Merge branch 'master' of ssh://gerrit.onlab.us:29418/onos-next
Conflicts:
providers/openflow/device/src/main/java/org/onlab/onos/provider/of/device/impl/OpenFlowDeviceProvider.java
diff --git a/apps/calendar/pom.xml b/apps/calendar/pom.xml
new file mode 100644
index 0000000..e1c0553
--- /dev/null
+++ b/apps/calendar/pom.xml
@@ -0,0 +1,81 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-apps</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+ <artifactId>onos-app-calendar</artifactId>
+ <packaging>bundle</packaging>
+
+ <description>ONOS simple calendaring REST interface for intents</description>
+
+ <properties>
+ <web.context>/onos/calendar</web.context>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-rest</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-servlet</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey.jersey-test-framework</groupId>
+ <artifactId>jersey-test-framework-core</artifactId>
+ <version>1.18.1</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey.jersey-test-framework</groupId>
+ <artifactId>jersey-test-framework-grizzly2</artifactId>
+ <version>1.18.1</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.core</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <_wab>src/main/webapp/</_wab>
+ <Bundle-SymbolicName>
+ ${project.groupId}.${project.artifactId}
+ </Bundle-SymbolicName>
+ <Import-Package>
+ org.osgi.framework,
+ javax.ws.rs,javax.ws.rs.core,
+ com.sun.jersey.api.core,
+ com.sun.jersey.spi.container.servlet,
+ com.sun.jersey.server.impl.container.servlet,
+ org.onlab.packet.*,
+ org.onlab.rest.*,
+ org.onlab.onos.*
+ </Import-Package>
+ <Web-ContextPath>${web.context}</Web-ContextPath>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/apps/calendar/src/main/java/org/onlab/onos/calendar/BandwidthCalendarResource.java b/apps/calendar/src/main/java/org/onlab/onos/calendar/BandwidthCalendarResource.java
new file mode 100644
index 0000000..b05c34e
--- /dev/null
+++ b/apps/calendar/src/main/java/org/onlab/onos/calendar/BandwidthCalendarResource.java
@@ -0,0 +1,43 @@
+package org.onlab.onos.calendar;
+
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.intent.IntentService;
+import org.onlab.rest.BaseResource;
+
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.core.Response;
+import java.net.URI;
+
+import static org.onlab.onos.net.PortNumber.portNumber;
+
+/**
+ * Web resource for triggering calendared intents.
+ */
+@Path("intent")
+public class BandwidthCalendarResource extends BaseResource {
+
+ @POST
+ @Path("{src}/{dst}/{srcPort}/{dstPort}/{bandwidth}")
+ public Response createIntent(@PathParam("src") String src,
+ @PathParam("dst") String dst,
+ @PathParam("srcPort") String srcPort,
+ @PathParam("dstPort") String dstPort,
+ @PathParam("bandwidth") String bandwidth) {
+ // TODO: implement calls to intent framework
+ IntentService service = get(IntentService.class);
+
+ ConnectPoint srcPoint = new ConnectPoint(deviceId(src), portNumber(srcPort));
+ ConnectPoint dstPoint = new ConnectPoint(deviceId(dst), portNumber(dstPort));
+
+ return Response.ok("Yo! We got src=" + srcPoint + "; dst=" + dstPoint +
+ "; bw=" + bandwidth + "; intent service " + service).build();
+ }
+
+ private DeviceId deviceId(String dpid) {
+ return DeviceId.deviceId(URI.create("of:" + dpid));
+ }
+
+}
diff --git a/apps/calendar/src/main/java/org/onlab/onos/calendar/package-info.java b/apps/calendar/src/main/java/org/onlab/onos/calendar/package-info.java
new file mode 100644
index 0000000..a959f3e
--- /dev/null
+++ b/apps/calendar/src/main/java/org/onlab/onos/calendar/package-info.java
@@ -0,0 +1,5 @@
+/**
+ * Application providing integration between OSCARS and ONOS intent
+ * framework via REST API.
+ */
+package org.onlab.onos.calendar;
\ No newline at end of file
diff --git a/apps/calendar/src/main/webapp/WEB-INF/web.xml b/apps/calendar/src/main/webapp/WEB-INF/web.xml
new file mode 100644
index 0000000..7aa6418
--- /dev/null
+++ b/apps/calendar/src/main/webapp/WEB-INF/web.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<web-app xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://java.sun.com/xml/ns/javaee"
+ xmlns:web="http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
+ xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
+ id="ONOS" version="2.5">
+ <display-name>ONOS GUI</display-name>
+
+ <servlet>
+ <servlet-name>JAX-RS Service</servlet-name>
+ <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
+ <init-param>
+ <param-name>com.sun.jersey.config.property.packages</param-name>
+ <param-value>org.onlab.onos.calendar</param-value>
+ </init-param>
+ <load-on-startup>10</load-on-startup>
+ </servlet>
+
+ <servlet-mapping>
+ <servlet-name>JAX-RS Service</servlet-name>
+ <url-pattern>/rs/*</url-pattern>
+ </servlet-mapping>
+
+</web-app>
\ No newline at end of file
diff --git a/apps/fwd/pom.xml b/apps/fwd/pom.xml
index 4ee2dc3..b203121 100644
--- a/apps/fwd/pom.xml
+++ b/apps/fwd/pom.xml
@@ -16,4 +16,11 @@
<description>ONOS simple reactive forwarding app</description>
+ <dependencies>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.compendium</artifactId>
+ </dependency>
+ </dependencies>
+
</project>
diff --git a/apps/fwd/src/main/java/org/onlab/onos/fwd/ReactiveForwarding.java b/apps/fwd/src/main/java/org/onlab/onos/fwd/ReactiveForwarding.java
index 8ead67f..62b0b84 100644
--- a/apps/fwd/src/main/java/org/onlab/onos/fwd/ReactiveForwarding.java
+++ b/apps/fwd/src/main/java/org/onlab/onos/fwd/ReactiveForwarding.java
@@ -1,12 +1,10 @@
package org.onlab.onos.fwd;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.util.Set;
-
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Modified;
+import org.apache.felix.scr.annotations.Property;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.onlab.onos.ApplicationId;
@@ -29,8 +27,14 @@
import org.onlab.onos.net.packet.PacketService;
import org.onlab.onos.net.topology.TopologyService;
import org.onlab.packet.Ethernet;
+import org.osgi.service.component.ComponentContext;
import org.slf4j.Logger;
+import java.util.Dictionary;
+import java.util.Set;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
/**
* Sample reactive forwarding application.
*/
@@ -61,6 +65,10 @@
private ApplicationId appId;
+ @Property(name = "enabled", boolValue = true,
+ label = "Enable forwarding; default is true")
+ private boolean isEnabled = true;
+
@Activate
public void activate() {
appId = coreService.registerApplication("org.onlab.onos.fwd");
@@ -76,6 +84,22 @@
log.info("Stopped");
}
+ @Modified
+ public void modified(ComponentContext context) {
+ Dictionary properties = context.getProperties();
+ String flag = (String) properties.get("enabled");
+ if (flag != null) {
+ boolean enabled = flag.equals("true");
+ if (isEnabled != enabled) {
+ isEnabled = enabled;
+ if (!isEnabled) {
+ flowRuleService.removeFlowRulesById(appId);
+ }
+ log.info("Reconfigured. Forwarding is {}",
+ isEnabled ? "enabled" : "disabled");
+ }
+ }
+ }
/**
* Packet processor responsible for forwarding packets along their paths.
@@ -86,7 +110,7 @@
public void process(PacketContext context) {
// Stop processing if the packet has been handled, since we
// can't do any more to it.
- if (context.isHandled()) {
+ if (!isEnabled || context.isHandled()) {
return;
}
@@ -114,8 +138,8 @@
// Otherwise, get a set of paths that lead from here to the
// destination edge switch.
Set<Path> paths = topologyService.getPaths(topologyService.currentTopology(),
- pkt.receivedFrom().deviceId(),
- dst.location().deviceId());
+ pkt.receivedFrom().deviceId(),
+ dst.location().deviceId());
if (paths.isEmpty()) {
// If there are no paths, flood and bail.
flood(context);
@@ -127,8 +151,8 @@
Path path = pickForwardPath(paths, pkt.receivedFrom().port());
if (path == null) {
log.warn("Doh... don't know where to go... {} -> {} received on {}",
- ethPkt.getSourceMAC(), ethPkt.getDestinationMAC(),
- pkt.receivedFrom());
+ ethPkt.getSourceMAC(), ethPkt.getDestinationMAC(),
+ pkt.receivedFrom());
flood(context);
return;
}
@@ -152,7 +176,7 @@
// Floods the specified packet if permissible.
private void flood(PacketContext context) {
if (topologyService.isBroadcastPoint(topologyService.currentTopology(),
- context.inPacket().receivedFrom())) {
+ context.inPacket().receivedFrom())) {
packetOut(context, PortNumber.FLOOD);
} else {
context.block();
@@ -174,18 +198,17 @@
Ethernet inPkt = context.inPacket().parsed();
TrafficSelector.Builder builder = DefaultTrafficSelector.builder();
builder.matchEthType(inPkt.getEtherType())
- .matchEthSrc(inPkt.getSourceMAC())
- .matchEthDst(inPkt.getDestinationMAC())
- .matchInport(context.inPacket().receivedFrom().port());
+ .matchEthSrc(inPkt.getSourceMAC())
+ .matchEthDst(inPkt.getDestinationMAC())
+ .matchInport(context.inPacket().receivedFrom().port());
TrafficTreatment.Builder treat = DefaultTrafficTreatment.builder();
treat.setOutput(portNumber);
FlowRule f = new DefaultFlowRule(context.inPacket().receivedFrom().deviceId(),
- builder.build(), treat.build(), PRIORITY, appId, TIMEOUT);
+ builder.build(), treat.build(), PRIORITY, appId, TIMEOUT);
flowRuleService.applyFlowRules(f);
-
}
}
diff --git a/apps/optical/pom.xml b/apps/optical/pom.xml
new file mode 100644
index 0000000..0264f8a
--- /dev/null
+++ b/apps/optical/pom.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-apps</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+ <artifactId>onos-app-optical</artifactId>
+ <packaging>bundle</packaging>
+
+ <description>ONOS application for packet/optical deployments</description>
+
+ <dependencies>
+
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-cli</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.karaf.shell</groupId>
+ <artifactId>org.apache.karaf.shell.console</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.codehaus.jackson</groupId>
+ <artifactId>jackson-core-asl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.codehaus.jackson</groupId>
+ <artifactId>jackson-mapper-asl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-annotations</artifactId>
+ <scope>provided</scope>
+ </dependency>
+
+ </dependencies>
+
+</project>
diff --git a/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalConfigProvider.java b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalConfigProvider.java
new file mode 100644
index 0000000..86c1c0b
--- /dev/null
+++ b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalConfigProvider.java
@@ -0,0 +1,338 @@
+package org.onlab.onos.optical.cfg;
+
+import static org.onlab.onos.net.DeviceId.deviceId;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.codehaus.jackson.JsonNode;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DefaultAnnotations;
+import org.onlab.onos.net.Device;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Link;
+import org.onlab.onos.net.MastershipRole;
+import org.onlab.onos.net.PortNumber;
+import org.onlab.onos.net.device.DefaultDeviceDescription;
+import org.onlab.onos.net.device.DeviceDescription;
+import org.onlab.onos.net.device.DeviceProvider;
+import org.onlab.onos.net.device.DeviceProviderRegistry;
+import org.onlab.onos.net.device.DeviceProviderService;
+import org.onlab.onos.net.link.DefaultLinkDescription;
+import org.onlab.onos.net.link.LinkProvider;
+import org.onlab.onos.net.link.LinkProviderRegistry;
+import org.onlab.onos.net.link.LinkProviderService;
+import org.onlab.onos.net.provider.AbstractProvider;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.packet.ChassisId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * OpticalConfigProvider emulates the SB network provider for optical switches,
+ * optical links and any other state that needs to be configured for correct network
+ * operations.
+ *
+ */
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@Component(immediate = true)
+public class OpticalConfigProvider extends AbstractProvider implements DeviceProvider, LinkProvider {
+
+ protected static final Logger log = LoggerFactory
+ .getLogger(OpticalConfigProvider.class);
+
+ // TODO: fix hard coded file path later.
+ private static final String DEFAULT_CONFIG_FILE =
+ "/opt/onos/config/demo-3-roadm-2-ps.json";
+ private String configFileName = DEFAULT_CONFIG_FILE;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected LinkProviderRegistry linkProviderRegistry;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected DeviceProviderRegistry deviceProviderRegistry;
+
+ private static final String OPTICAL_ANNOTATION = "optical.";
+
+ private LinkProviderService linkProviderService;
+ private DeviceProviderService deviceProviderService;
+
+ private static final List<Roadm> RAW_ROADMS = new ArrayList<>();
+ private static final List<WdmLink> RAW_WDMLINKS = new ArrayList<>();
+ private static final List<PktOptLink> RAW_PKTOPTLINKS = new ArrayList<>();
+
+ private static final String ROADM = "Roadm";
+ private static final String WDM_LINK = "wdmLink";
+ private static final String PKT_OPT_LINK = "pktOptLink";
+
+ protected OpticalNetworkConfig opticalNetworkConfig;
+
+ public OpticalConfigProvider() {
+ super(new ProviderId("of", "org.onlab.onos.provider.opticalConfig", true));
+ }
+
+ @Activate
+ protected void activate() {
+ linkProviderService = linkProviderRegistry.register(this);
+ deviceProviderService = deviceProviderRegistry.register(this);
+ log.info("Starting optical network configuration process...");
+ log.info("Optical config file set to {}", configFileName);
+
+ loadOpticalConfig();
+ parseOpticalConfig();
+ publishOpticalConfig();
+ }
+
+ @Deactivate
+ protected void deactivate() {
+ linkProviderRegistry.unregister(this);
+ linkProviderService = null;
+ deviceProviderRegistry.unregister(this);
+ deviceProviderService = null;
+ RAW_ROADMS.clear();
+ RAW_WDMLINKS.clear();
+ RAW_PKTOPTLINKS.clear();
+ log.info("Stopped");
+ }
+
+ private void loadOpticalConfig() {
+ ObjectMapper mapper = new ObjectMapper();
+ opticalNetworkConfig = new OpticalNetworkConfig();
+ try {
+ opticalNetworkConfig = mapper.readValue(new File(configFileName), OpticalNetworkConfig.class);
+ } catch (JsonParseException e) {
+ String err = String.format("JsonParseException while loading network "
+ + "config from file: %s: %s", configFileName, e.getMessage());
+ log.error(err, e);
+ } catch (JsonMappingException e) {
+ String err = String.format(
+ "JsonMappingException while loading network config "
+ + "from file: %s: %s", configFileName, e.getMessage());
+ log.error(err, e);
+ } catch (IOException e) {
+ String err = String.format("IOException while loading network config "
+ + "from file: %s %s", configFileName, e.getMessage());
+ log.error(err, e);
+ }
+ }
+
+ private void parseOpticalConfig() {
+ List<OpticalSwitchDescription> swList = opticalNetworkConfig.getOpticalSwitches();
+ List<OpticalLinkDescription> lkList = opticalNetworkConfig.getOpticalLinks();
+
+ for (OpticalSwitchDescription sw : swList) {
+ String swtype = sw.getType();
+ boolean allow = sw.isAllowed();
+ if (swtype.equals(ROADM) && allow) {
+ int regNum = 0;
+ Set<Map.Entry<String, JsonNode>> m = sw.params.entrySet();
+ for (Map.Entry<String, JsonNode> e : m) {
+ String key = e.getKey();
+ JsonNode j = e.getValue();
+ if (key.equals("numRegen")) {
+ regNum = j.asInt();
+ }
+ }
+
+ Roadm newRoadm = new Roadm();
+ newRoadm.setName(sw.name);
+ newRoadm.setNodeId(sw.nodeDpid);
+ newRoadm.setLongtitude(sw.longitude);
+ newRoadm.setLatitude(sw.latitude);
+ newRoadm.setRegenNum(regNum);
+
+ RAW_ROADMS.add(newRoadm);
+ log.info(newRoadm.toString());
+ }
+ }
+
+ for (OpticalLinkDescription lk : lkList) {
+ String lktype = lk.getType();
+ switch (lktype) {
+ case WDM_LINK:
+ WdmLink newWdmLink = new WdmLink();
+ newWdmLink.setSrcNodeId(lk.getNodeDpid1());
+ newWdmLink.setSnkNodeId(lk.getNodeDpid2());
+ newWdmLink.setAdminWeight(1000); // default weight for each WDM link.
+ Set<Map.Entry<String, JsonNode>> m = lk.params.entrySet();
+ for (Map.Entry<String, JsonNode> e : m) {
+ String key = e.getKey();
+ JsonNode j = e.getValue();
+ if (key.equals("nodeName1")) {
+ newWdmLink.setSrcNodeName(j.asText());
+ } else if (key.equals("nodeName2")) {
+ newWdmLink.setSnkNodeName(j.asText());
+ } else if (key.equals("port1")) {
+ newWdmLink.setSrcPort(j.asInt());
+ } else if (key.equals("port2")) {
+ newWdmLink.setSnkPort(j.asInt());
+ } else if (key.equals("distKms")) {
+ newWdmLink.setDistance(j.asDouble());
+ } else if (key.equals("numWaves")) {
+ newWdmLink.setWavelengthNumber(j.asInt());
+ } else {
+ log.error("error found");
+ // TODO add exception processing;
+ }
+ }
+ RAW_WDMLINKS.add(newWdmLink);
+ log.info(newWdmLink.toString());
+
+ break;
+
+ case PKT_OPT_LINK:
+ PktOptLink newPktOptLink = new PktOptLink();
+ newPktOptLink.setSrcNodeId(lk.getNodeDpid1());
+ newPktOptLink.setSnkNodeId(lk.getNodeDpid2());
+ newPktOptLink.setAdminWeight(10); // default weight for each packet-optical link.
+ Set<Map.Entry<String, JsonNode>> ptm = lk.params.entrySet();
+ for (Map.Entry<String, JsonNode> e : ptm) {
+ String key = e.getKey();
+ JsonNode j = e.getValue();
+ if (key.equals("nodeName1")) {
+ newPktOptLink.setSrcNodeName(j.asText());
+ } else if (key.equals("nodeName2")) {
+ newPktOptLink.setSnkNodeName(j.asText());
+ } else if (key.equals("port1")) {
+ newPktOptLink.setSrcPort(j.asInt());
+ } else if (key.equals("port2")) {
+ newPktOptLink.setSnkPort(j.asInt());
+ } else if (key.equals("bandWidth")) {
+ newPktOptLink.setBandwdith(j.asDouble());
+ } else {
+ log.error("error found");
+ // TODO add exception processing;
+ }
+ }
+
+ RAW_PKTOPTLINKS.add(newPktOptLink);
+ log.info(newPktOptLink.toString());
+ break;
+ default:
+ }
+ }
+ }
+
+ private void publishOpticalConfig() {
+ if (deviceProviderService == null || linkProviderService == null) {
+ return;
+ }
+
+ // Discover the optical ROADM objects
+ Iterator<Roadm> iterWdmNode = RAW_ROADMS.iterator();
+ while (iterWdmNode.hasNext()) {
+ Roadm value = iterWdmNode.next();
+ DeviceId did = deviceId("of:" + value.getNodeId().replace(":", ""));
+ ChassisId cid = new ChassisId(value.getNodeId());
+ DefaultAnnotations extendedAttributes = DefaultAnnotations.builder()
+ .set(OPTICAL_ANNOTATION + "switchType", "ROADM")
+ .set(OPTICAL_ANNOTATION + "switchName", value.getName())
+ .set(OPTICAL_ANNOTATION + "latitude", Double.toString(value.getLatitude()))
+ .set(OPTICAL_ANNOTATION + "longtitude", Double.toString(value.getLongtitude()))
+ .set(OPTICAL_ANNOTATION + "regNum", Integer.toString(value.getRegenNum()))
+ .build();
+
+ DeviceDescription description =
+ new DefaultDeviceDescription(did.uri(),
+ Device.Type.SWITCH,
+ "",
+ "",
+ "",
+ "",
+ cid,
+ extendedAttributes);
+ deviceProviderService.deviceConnected(did, description);
+ }
+
+ // Discover the optical WDM link objects
+ Iterator<WdmLink> iterWdmlink = RAW_WDMLINKS.iterator();
+ while (iterWdmlink.hasNext()) {
+ WdmLink value = iterWdmlink.next();
+
+ DeviceId srcNodeId = deviceId("of:" + value.getSrcNodeId().replace(":", ""));
+ DeviceId snkNodeId = deviceId("of:" + value.getSnkNodeId().replace(":", ""));
+
+ PortNumber srcPort = PortNumber.portNumber(value.getSrcPort());
+ PortNumber snkPort = PortNumber.portNumber(value.getSnkPort());
+
+ ConnectPoint srcPoint = new ConnectPoint(srcNodeId, srcPort);
+ ConnectPoint snkPoint = new ConnectPoint(snkNodeId, snkPort);
+
+ DefaultAnnotations extendedAttributes = DefaultAnnotations.builder()
+ .set(OPTICAL_ANNOTATION + "linkType", "WDM")
+ .set(OPTICAL_ANNOTATION + "distance", Double.toString(value.getDistance()))
+ .set(OPTICAL_ANNOTATION + "cost", Double.toString(value.getDistance()))
+ .set(OPTICAL_ANNOTATION + "adminWeight", Double.toString(value.getAdminWeight()))
+ .set(OPTICAL_ANNOTATION + "wavelengthNum", Integer.toString(value.getWavelengthNumber()))
+ .build();
+
+ DefaultLinkDescription linkDescription =
+ new DefaultLinkDescription(srcPoint,
+ snkPoint,
+ Link.Type.DIRECT,
+ extendedAttributes);
+
+ linkProviderService.linkDetected(linkDescription);
+ log.info(String.format("WDM link: %s : %s",
+ linkDescription.src().toString(), linkDescription.dst().toString()));
+ }
+
+ // Discover the packet optical link objects
+ Iterator<PktOptLink> iterPktOptlink = RAW_PKTOPTLINKS.iterator();
+ while (iterPktOptlink.hasNext()) {
+ PktOptLink value = iterPktOptlink.next();
+ DeviceId srcNodeId = deviceId("of:" + value.getSrcNodeId().replace(":", ""));
+ DeviceId snkNodeId = deviceId("of:" + value.getSnkNodeId().replace(":", ""));
+
+ PortNumber srcPort = PortNumber.portNumber(value.getSrcPort());
+ PortNumber snkPort = PortNumber.portNumber(value.getSnkPort());
+
+ ConnectPoint srcPoint = new ConnectPoint(srcNodeId, srcPort);
+ ConnectPoint snkPoint = new ConnectPoint(snkNodeId, snkPort);
+
+ DefaultAnnotations extendedAttributes = DefaultAnnotations.builder()
+ .set(OPTICAL_ANNOTATION + "linkType", "PktOptLink")
+ .set(OPTICAL_ANNOTATION + "bandwidth", Double.toString(value.getBandwidth()))
+ .set(OPTICAL_ANNOTATION + "cost", Double.toString(value.getBandwidth()))
+ .set(OPTICAL_ANNOTATION + "adminWeight", Double.toString(value.getAdminWeight()))
+ .build();
+
+ DefaultLinkDescription linkDescription =
+ new DefaultLinkDescription(srcPoint,
+ snkPoint,
+ Link.Type.DIRECT,
+ extendedAttributes);
+
+ linkProviderService.linkDetected(linkDescription);
+ log.info(String.format("Packet-optical link: %s : %s",
+ linkDescription.src().toString(), linkDescription.dst().toString()));
+ }
+
+ }
+
+ @Override
+ public void triggerProbe(Device device) {
+ // TODO We may want to consider re-reading config files and publishing them based on this event.
+ }
+
+ @Override
+ public void roleChanged(Device device, MastershipRole newRole) {
+ // TODO Auto-generated method stub.
+ }
+
+}
diff --git a/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalLinkDescription.java b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalLinkDescription.java
new file mode 100644
index 0000000..af616ef
--- /dev/null
+++ b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalLinkDescription.java
@@ -0,0 +1,89 @@
+package org.onlab.onos.optical.cfg;
+
+import java.util.Map;
+import org.codehaus.jackson.JsonNode;
+import org.onlab.util.HexString;
+
+/**
+ * Public class corresponding to JSON described data model.
+ */
+public class OpticalLinkDescription {
+ protected String type;
+ protected Boolean allowed;
+ protected long dpid1;
+ protected long dpid2;
+ protected String nodeDpid1;
+ protected String nodeDpid2;
+ protected Map<String, JsonNode> params;
+ protected Map<String, String> publishAttributes;
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public Boolean isAllowed() {
+ return allowed;
+ }
+
+ public void setAllowed(Boolean allowed) {
+ this.allowed = allowed;
+ }
+
+ public String getNodeDpid1() {
+ return nodeDpid1;
+ }
+
+ public void setNodeDpid1(String nodeDpid1) {
+ this.nodeDpid1 = nodeDpid1;
+ this.dpid1 = HexString.toLong(nodeDpid1);
+ }
+
+ public String getNodeDpid2() {
+ return nodeDpid2;
+ }
+
+ public void setNodeDpid2(String nodeDpid2) {
+ this.nodeDpid2 = nodeDpid2;
+ this.dpid2 = HexString.toLong(nodeDpid2);
+ }
+
+ public long getDpid1() {
+ return dpid1;
+ }
+
+ public void setDpid1(long dpid1) {
+ this.dpid1 = dpid1;
+ this.nodeDpid1 = HexString.toHexString(dpid1);
+ }
+
+ public long getDpid2() {
+ return dpid2;
+ }
+
+ public void setDpid2(long dpid2) {
+ this.dpid2 = dpid2;
+ this.nodeDpid2 = HexString.toHexString(dpid2);
+ }
+
+ public Map<String, JsonNode> getParams() {
+ return params;
+ }
+
+ public void setParams(Map<String, JsonNode> params) {
+ this.params = params;
+ }
+
+ public Map<String, String> getPublishAttributes() {
+ return publishAttributes;
+ }
+
+ public void setPublishAttributes(Map<String, String> publishAttributes) {
+ this.publishAttributes = publishAttributes;
+ }
+
+}
+
diff --git a/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalNetworkConfig.java b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalNetworkConfig.java
new file mode 100644
index 0000000..a34f843
--- /dev/null
+++ b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalNetworkConfig.java
@@ -0,0 +1,40 @@
+package org.onlab.onos.optical.cfg;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Public class corresponding to JSON described data model.
+ */
+public class OpticalNetworkConfig {
+ protected static final Logger log = LoggerFactory.getLogger(OpticalNetworkConfig.class);
+
+ private List<OpticalSwitchDescription> opticalSwitches;
+ private List<OpticalLinkDescription> opticalLinks;
+
+ public OpticalNetworkConfig() {
+ opticalSwitches = new ArrayList<OpticalSwitchDescription>();
+ opticalLinks = new ArrayList<OpticalLinkDescription>();
+ }
+
+ public List<OpticalSwitchDescription> getOpticalSwitches() {
+ return opticalSwitches;
+ }
+
+ public void setOpticalSwitches(List<OpticalSwitchDescription> switches) {
+ this.opticalSwitches = switches;
+ }
+
+ public List<OpticalLinkDescription> getOpticalLinks() {
+ return opticalLinks;
+ }
+
+ public void setOpticalLinks(List<OpticalLinkDescription> links) {
+ this.opticalLinks = links;
+ }
+
+}
+
diff --git a/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalSwitchDescription.java b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalSwitchDescription.java
new file mode 100644
index 0000000..18a3982
--- /dev/null
+++ b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/OpticalSwitchDescription.java
@@ -0,0 +1,100 @@
+package org.onlab.onos.optical.cfg;
+
+import java.util.Map;
+import org.codehaus.jackson.JsonNode;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.onlab.util.HexString;
+
+/**
+ * Public class corresponding to JSON described data model.
+ */
+public class OpticalSwitchDescription {
+ protected String name;
+ protected long dpid;
+ protected String nodeDpid;
+ protected String type;
+ protected double latitude;
+ protected double longitude;
+ protected boolean allowed;
+ protected Map<String, JsonNode> params;
+ protected Map<String, String> publishAttributes;
+
+ public String getName() {
+ return name;
+ }
+ @JsonProperty("name")
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public long getDpid() {
+ return dpid;
+ }
+ @JsonProperty("dpid")
+ public void setDpid(long dpid) {
+ this.dpid = dpid;
+ this.nodeDpid = HexString.toHexString(dpid);
+ }
+
+ public String getNodeDpid() {
+ return nodeDpid;
+ }
+
+ public String getHexDpid() {
+ return nodeDpid;
+ }
+
+ public void setNodeDpid(String nodeDpid) {
+ this.nodeDpid = nodeDpid;
+ this.dpid = HexString.toLong(nodeDpid);
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public double getLatitude() {
+ return latitude;
+ }
+
+ public void setLatitude(double latitude) {
+ this.latitude = latitude;
+ }
+
+ public double getLongitude() {
+ return longitude;
+ }
+
+ public void setLongitude(double longitude) {
+ this.longitude = longitude;
+ }
+
+ public boolean isAllowed() {
+ return allowed;
+ }
+
+ public void setAllowed(boolean allowed) {
+ this.allowed = allowed;
+ }
+
+ public Map<String, JsonNode> getParams() {
+ return params;
+ }
+
+ public void setParams(Map<String, JsonNode> params) {
+ this.params = params;
+ }
+
+ public Map<String, String> getPublishAttributes() {
+ return publishAttributes;
+ }
+
+ public void setPublishAttributes(Map<String, String> publishAttributes) {
+ this.publishAttributes = publishAttributes;
+ }
+
+}
diff --git a/apps/optical/src/main/java/org/onlab/onos/optical/cfg/PktOptLink.java b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/PktOptLink.java
new file mode 100644
index 0000000..206109f
--- /dev/null
+++ b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/PktOptLink.java
@@ -0,0 +1,110 @@
+package org.onlab.onos.optical.cfg;
+
+/**
+ * Packet-optical link Java data object.
+ */
+class PktOptLink {
+ private String srcNodeName;
+ private String snkNodeName;
+ private String srcNodeId;
+ private String snkNodeId;
+ private int srcPort;
+ private int snkPort;
+ private double bandwidth;
+ private double cost;
+ private long adminWeight;
+
+ public PktOptLink(String srcName, String snkName) {
+ this.srcNodeName = srcName;
+ this.snkNodeName = snkName;
+ }
+
+ public PktOptLink() {
+ // TODO Auto-generated constructor stub
+ }
+
+ public void setSrcNodeName(String name) {
+ this.srcNodeName = name;
+ }
+
+ public String getSrcNodeName() {
+ return this.srcNodeName;
+ }
+
+ public void setSnkNodeName(String name) {
+ this.snkNodeName = name;
+ }
+
+ public String getSnkNodeName() {
+ return this.snkNodeName;
+ }
+
+ public void setSrcNodeId(String nodeId) {
+ this.srcNodeId = nodeId;
+ }
+
+ public String getSrcNodeId() {
+ return this.srcNodeId;
+ }
+
+ public void setSnkNodeId(String nodeId) {
+ this.snkNodeId = nodeId;
+ }
+
+ public String getSnkNodeId() {
+ return this.snkNodeId;
+ }
+
+ public void setSrcPort(int port) {
+ this.srcPort = port;
+ }
+
+ public int getSrcPort() {
+ return this.srcPort;
+ }
+
+ public void setSnkPort(int port) {
+ this.snkPort = port;
+ }
+
+ public int getSnkPort() {
+ return this.snkPort;
+ }
+
+ public void setBandwdith(double x) {
+ this.bandwidth = x;
+ }
+
+ public double getBandwidth() {
+ return this.bandwidth;
+ }
+
+ public void setCost(double x) {
+ this.cost = x;
+ }
+
+ public double getCost() {
+ return this.cost;
+ }
+
+ public void setAdminWeight(long x) {
+ this.adminWeight = x;
+ }
+
+ public long getAdminWeight() {
+ return this.adminWeight;
+ }
+
+ @Override
+ public String toString() {
+ return new StringBuilder(" srcNodeName: ").append(this.srcNodeName)
+ .append(" snkNodeName: ").append(this.snkNodeName)
+ .append(" srcNodeId: ").append(this.srcNodeId)
+ .append(" snkNodeId: ").append(this.snkNodeId)
+ .append(" srcPort: ").append(this.srcPort)
+ .append(" snkPort: ").append(this.snkPort)
+ .append(" bandwidth: ").append(this.bandwidth)
+ .append(" cost: ").append(this.cost)
+ .append(" adminWeight: ").append(this.adminWeight).toString();
+ }
+}
diff --git a/apps/optical/src/main/java/org/onlab/onos/optical/cfg/Roadm.java b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/Roadm.java
new file mode 100644
index 0000000..beca5af
--- /dev/null
+++ b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/Roadm.java
@@ -0,0 +1,106 @@
+package org.onlab.onos.optical.cfg;
+
+/**
+ * ROADM java data object converted from a JSON file.
+ */
+class Roadm {
+ private String name;
+ private String nodeID;
+ private double longtitude;
+ private double latitude;
+ private int regenNum;
+
+ //TODO use the following attributes when needed for configurations
+ private int tPort10G;
+ private int tPort40G;
+ private int tPort100G;
+ private int wPort;
+
+ public Roadm() {
+ }
+
+ public Roadm(String name) {
+ this.name = name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public void setNodeId(String nameId) {
+ this.nodeID = nameId;
+ }
+
+ public String getNodeId() {
+ return this.nodeID;
+ }
+
+ public void setLongtitude(double x) {
+ this.longtitude = x;
+ }
+
+ public double getLongtitude() {
+ return this.longtitude;
+ }
+
+ public void setLatitude(double y) {
+ this.latitude = y;
+ }
+
+ public double getLatitude() {
+ return this.latitude;
+ }
+
+ public void setRegenNum(int num) {
+ this.regenNum = num;
+ }
+ public int getRegenNum() {
+ return this.regenNum;
+ }
+
+ public void setTport10GNum(int num) {
+ this.tPort10G = num;
+ }
+ public int getTport10GNum() {
+ return this.tPort10G;
+ }
+
+ public void setTport40GNum(int num) {
+ this.tPort40G = num;
+ }
+ public int getTport40GNum() {
+ return this.tPort40G;
+ }
+
+ public void setTport100GNum(int num) {
+ this.tPort100G = num;
+ }
+ public int getTport100GNum() {
+ return this.tPort100G;
+ }
+
+ public void setWportNum(int num) {
+ this.wPort = num;
+ }
+ public int getWportNum() {
+ return this.wPort;
+ }
+
+ @Override
+ public String toString() {
+ return new StringBuilder(" ROADM Name: ").append(this.name)
+ .append(" nodeID: ").append(this.nodeID)
+ .append(" longtitude: ").append(this.longtitude)
+ .append(" latitude: ").append(this.latitude)
+ .append(" regenNum: ").append(this.regenNum)
+ .append(" 10GTportNum: ").append(this.tPort10G)
+ .append(" 40GTportNum: ").append(this.tPort40G)
+ .append(" 100GTportNum: ").append(this.tPort100G)
+ .append(" WportNum: ").append(this.wPort).toString();
+ }
+}
+
diff --git a/apps/optical/src/main/java/org/onlab/onos/optical/cfg/WdmLink.java b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/WdmLink.java
new file mode 100644
index 0000000..5e7b468
--- /dev/null
+++ b/apps/optical/src/main/java/org/onlab/onos/optical/cfg/WdmLink.java
@@ -0,0 +1,121 @@
+package org.onlab.onos.optical.cfg;
+
+/**
+ * WDM Link Java data object converted from a JSON file.
+ */
+class WdmLink {
+ private String srcNodeName;
+ private String snkNodeName;
+ private String srcNodeId;
+ private String snkNodeId;
+ private int srcPort;
+ private int snkPort;
+ private double distance;
+ private double cost;
+ private int wavelengthNumber;
+ private long adminWeight;
+
+ public WdmLink(String name1, String name2) {
+ this.srcNodeName = name1;
+ this.snkNodeName = name2;
+ }
+
+ public WdmLink() {
+ // TODO Auto-generated constructor stub
+ }
+
+ public void setSrcNodeName(String name) {
+ this.srcNodeName = name;
+ }
+
+ public String getSrcNodeName() {
+ return this.srcNodeName;
+ }
+
+ public void setSnkNodeName(String name) {
+ this.snkNodeName = name;
+ }
+
+ public String getSnkNodeName() {
+ return this.snkNodeName;
+ }
+
+ public void setSrcNodeId(String nodeId) {
+ this.srcNodeId = nodeId;
+ }
+
+ public String getSrcNodeId() {
+ return this.srcNodeId;
+ }
+
+ public void setSnkNodeId(String nodeId) {
+ this.snkNodeId = nodeId;
+ }
+
+ public String getSnkNodeId() {
+ return this.snkNodeId;
+ }
+
+ public void setSrcPort(int port) {
+ this.srcPort = port;
+ }
+
+ public int getSrcPort() {
+ return this.srcPort;
+ }
+
+ public void setSnkPort(int port) {
+ this.snkPort = port;
+ }
+
+ public int getSnkPort() {
+ return this.snkPort;
+ }
+
+ public void setDistance(double x) {
+ this.distance = x;
+ }
+
+ public double getDistance() {
+ return this.distance;
+ }
+
+ public void setCost(double x) {
+ this.cost = x;
+ }
+
+ public double getCost() {
+ return this.cost;
+ }
+
+ public void setWavelengthNumber(int x) {
+ this.wavelengthNumber = x;
+ }
+
+ public int getWavelengthNumber() {
+ return this.wavelengthNumber;
+ }
+
+ public void setAdminWeight(long x) {
+ this.adminWeight = x;
+ }
+
+ public long getAdminWeight() {
+ return this.adminWeight;
+ }
+
+ @Override
+ public String toString() {
+ return new StringBuilder(" srcNodeName: ").append(this.srcNodeName)
+ .append(" snkNodeName: ").append(this.snkNodeName)
+ .append(" srcNodeId: ").append(this.srcNodeId)
+ .append(" snkNodeId: ").append(this.snkNodeId)
+ .append(" srcPort: ").append(this.srcPort)
+ .append(" snkPort: ").append(this.snkPort)
+ .append(" distance: ").append(this.distance)
+ .append(" cost: ").append(this.cost)
+ .append(" wavelengthNumber: ").append(this.wavelengthNumber)
+ .append(" adminWeight: ").append(this.adminWeight).toString();
+ }
+}
+
diff --git a/apps/optical/src/main/resources/demo-10-roadm-6-ps.json b/apps/optical/src/main/resources/demo-10-roadm-6-ps.json
new file mode 100644
index 0000000..e4e1122
--- /dev/null
+++ b/apps/optical/src/main/resources/demo-10-roadm-6-ps.json
@@ -0,0 +1,391 @@
+{
+ "opticalSwitches": [
+ {
+ "allowed": true,
+ "latitude": 37.6,
+ "longitude": 122.3,
+ "name": "SFO-W10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:01",
+ "params": {
+ "numRegen": 0
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 37.3,
+ "longitude": 121.9,
+ "name": "SJC-W10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:02",
+ "params": {
+ "numRegen": 0
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 33.9,
+ "longitude": 118.4
+ "name": "LAX-W10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:03",
+ "params": {
+ "numRegen": 0
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 32.8,
+ "longitude": 117.1,
+ "name": "SDG-W10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:04",
+ "params": {
+ "numRegen": 3
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 44.8,
+ "longitude": 93.1,
+ "name": "MSP-M10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:05",
+ "params": {
+ "numRegen": 3
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 32.8,
+ "longitude": 97.1,
+ "name": "DFW-M10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:06",
+ "params": {
+ "numRegen": 3
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 41.8,
+ "longitude": 120.1,
+ "name": "CHG-N10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:07",
+ "params": {
+ "numRegen": 3
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 38.8,
+ "longitude": 77.1,
+ "name": "IAD-M10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:08",
+ "params": {
+ "numRegen": 3
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 40.8,
+ "longitude": 73.1,
+ "name": "JFK-E10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:09",
+ "params": {
+ "numRegen": 0
+ },
+ "type": "Roadm"
+
+ },
+
+ {
+ "allowed": true,
+ "latitude": 33.8,
+ "longitude": 84.1,
+ "name": "ATL-S10",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:0A",
+ "params": {
+ "numRegen": 0
+ },
+ "type": "Roadm"
+ }
+
+ ],
+
+ "opticalLinks": [
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:01",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:02",
+ "params": {
+ "distKms": 1000,
+ "nodeName1": "SFO-W10",
+ "nodeName2": "SJC-W10",
+ "numWaves": 80,
+ "port1": 10,
+ "port2": 10
+ },
+ "type": "wdmLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:02",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:03",
+ "params": {
+ "distKms": 1000,
+ "nodeName1": "SJC-W10",
+ "nodeName2": "LAX-W10",
+ "numWaves": 80,
+ "port1": 20,
+ "port2": 10
+ },
+ "type": "wdmLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:03",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:04",
+ "params": {
+ "distKms": 1000,
+ "nodeName1": "LAX-W10",
+ "nodeName2": "SDG-W10",
+ "numWaves": 80,
+ "port1": 30,
+ "port2": 10
+ },
+ "type": "wdmLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:02",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:05",
+ "params": {
+ "distKms": 4000,
+ "nodeName1": "SJC-W10",
+ "nodeName2": "MSP-M10",
+ "numWaves": 80,
+ "port1": 20,
+ "port2": 10
+ },
+ "type": "wdmLink"
+ },
+
+ {
+
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:03",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:06",
+ "params": {
+ "distKms": 5000,
+ "nodeName1": "LAX-W10",
+ "nodeName2": "DFW-M10",
+ "numWaves": 80,
+ "port1": 20,
+ "port2": 10
+ },
+ "type": "wdmLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:05",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:06",
+ "params": {
+ "distKms": 3000,
+ "nodeName1": "MSP-M10",
+ "nodeName2": "DFW-M10",
+ "numWaves": 80,
+ "port1": 30,
+ "port2": 20
+ },
+ "type": "wdmLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:05",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:07",
+ "params": {
+ "distKms": 3000,
+ "nodeName1": "MSP-M10",
+ "nodeName2": "CHG-N10",
+ "numWaves": 80,
+ "port1": 20,
+ "port2": 21
+ },
+ "type": "wdmLink"
+ },
+
+ {
+
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:06",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:08",
+ "params": {
+ "distKms": 4000,
+ "nodeName1": "DFW-M10",
+ "nodeName2": "IAD-M10",
+ "numWaves": 80,
+ "port1": 30,
+ "port2": 10
+ },
+ "type": "wdmLink"
+ },
+
+ {
+
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:07",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:08",
+ "params": {
+ "distKms": 4000,
+ "nodeName1": "CHG-M10",
+ "nodeName2": "IAD-M10",
+ "numWaves": 80,
+ "port1": 30,
+ "port2": 20
+ },
+ "type": "wdmLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:07",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:09",
+ "params": {
+ "distKms": 5000,
+ "nodeName1": "CHG-M10",
+ "nodeName2": "JFK-E10",
+ "numWaves": 80,
+ "port1": 20,
+ "port2": 10
+ },
+ "type": "wdmLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:08",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:0A",
+ "params": {
+ "distKms": 3000,
+ "nodeName1": "IAD-M10",
+ "nodeName2": "ATL-S10",
+ "numWaves": 80,
+ "port1": 30,
+ "port2": 10
+ },
+ "type": "wdmLink"
+ },
+
+ {
+
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:09",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:0A",
+ "params": {
+ "distKms": 4000,
+ "nodeName1": "JFK-E10",
+ "nodeName2": "ATL-S10",
+ "numWaves": 80,
+ "port1": 20,
+ "port2": 20
+ },
+ "type": "wdmLink"
+ },
+
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:00:01",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:01",
+ "params": {
+ "nodeName1": "SFO-R10",
+ "nodeName2": "SFO-W10",
+ "port1": 10,
+ "port2": 1
+ },
+ "type": "pktOptLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:00:03",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:03",
+ "params": {
+ "nodeName1": "LAX-R10",
+ "nodeName2": "LAX-W10",
+ "port1": 10,
+ "port2": 1
+ },
+ "type": "pktOptLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:00:04",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:04",
+ "params": {
+ "nodeName1": "SDG-R10",
+ "nodeName2": "SDG-W10",
+ "port1": 10,
+ "port2": 1
+ },
+ "type": "pktOptLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:00:07",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:07",
+ "params": {
+ "nodeName1": "CHG-R10",
+ "nodeName2": "CHG-W10",
+ "port1": 10,
+ "port2": 1
+ },
+ "type": "pktOptLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:00:09",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:09",
+ "params": {
+ "nodeName1": "JFK-R10",
+ "nodeName2": "JFK-W10",
+ "port1": 10,
+ "port2": 1
+ },
+ "type": "pktOptLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:00:0A",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:0A",
+ "params": {
+ "nodeName1": "ATL-R10",
+ "nodeName2": "ATL-W10",
+ "port1": 10,
+ "port2": 1
+ },
+ "type": "pktOptLink"
+ },
+
+ ]
+}
diff --git a/apps/optical/src/main/resources/demo-3-roadm-2-ps.json b/apps/optical/src/main/resources/demo-3-roadm-2-ps.json
new file mode 100644
index 0000000..6f2c2f5
--- /dev/null
+++ b/apps/optical/src/main/resources/demo-3-roadm-2-ps.json
@@ -0,0 +1,101 @@
+{
+ "opticalSwitches": [
+ {
+ "allowed": true,
+ "latitude": 37.6,
+ "longitude": 122.3,
+ "name": "ROADM1",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:01",
+ "params": {
+ "numRegen": 0
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 37.3,
+ "longitude": 121.9,
+ "name": "ROADM2",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:02",
+ "params": {
+ "numRegen": 0
+ },
+ "type": "Roadm"
+ },
+
+ {
+ "allowed": true,
+ "latitude": 33.9,
+ "longitude": 118.4,
+ "name": "ROADM3",
+ "nodeDpid": "00:00:ff:ff:ff:ff:ff:03",
+ "params": {
+ "numRegen": 2
+ },
+ "type": "Roadm"
+ }
+ ],
+
+ "opticalLinks": [
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:01",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:03",
+ "params": {
+ "distKms": 1000,
+ "nodeName1": "ROADM1",
+ "nodeName2": "ROADM3",
+ "numWaves": 80,
+ "port1": 10,
+ "port2": 30
+ },
+ "type": "wdmLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:ff:03",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:02",
+ "params": {
+ "distKms": 2000,
+ "nodeName1": "ROADM3",
+ "nodeName2": "ROADM2",
+ "numWaves": 80,
+ "port1": 31,
+ "port2": 20
+ },
+ "type": "wdmLink"
+ },
+
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:00:01",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:01",
+ "params": {
+ "nodeName1": "ROUTER1",
+ "nodeName2": "ROADM1",
+ "bandWidth": 100000,
+ "port1": 10,
+ "port2": 11
+ },
+ "type": "pktOptLink"
+ },
+
+ {
+ "allowed": true,
+ "nodeDpid1": "00:00:ff:ff:ff:ff:00:02",
+ "nodeDpid2": "00:00:ff:ff:ff:ff:ff:02",
+ "params": {
+ "nodeName1": "ROUTER2",
+ "nodeName2": "ROADM2",
+ "bandWidth": 100000,
+ "port1": 10,
+ "port2": 21
+ },
+ "type": "pktOptLink"
+ }
+
+ ]
+}
diff --git a/apps/pom.xml b/apps/pom.xml
index eeff7b4..e812c47 100644
--- a/apps/pom.xml
+++ b/apps/pom.xml
@@ -25,6 +25,8 @@
<module>proxyarp</module>
<module>config</module>
<module>sdnip</module>
+ <module>calendar</module>
+ <module>optical</module>
</modules>
<properties>
diff --git a/apps/sdnip/pom.xml b/apps/sdnip/pom.xml
index c8db20d..ce77bf7 100644
--- a/apps/sdnip/pom.xml
+++ b/apps/sdnip/pom.xml
@@ -36,6 +36,36 @@
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
+
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-thirdparty</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-misc</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-cli</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.karaf.shell</groupId>
+ <artifactId>org.apache.karaf.shell.console</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.core</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.easymock</groupId>
+ <artifactId>easymock</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
</project>
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/HostServiceBasedInterfaceService.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/HostToInterfaceAdaptor.java
similarity index 90%
rename from apps/sdnip/src/main/java/org/onlab/onos/sdnip/HostServiceBasedInterfaceService.java
rename to apps/sdnip/src/main/java/org/onlab/onos/sdnip/HostToInterfaceAdaptor.java
index d6ad3c4..7a6e6bb 100644
--- a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/HostServiceBasedInterfaceService.java
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/HostToInterfaceAdaptor.java
@@ -18,11 +18,11 @@
/**
* Provides IntefaceService using PortAddresses data from the HostService.
*/
-public class HostServiceBasedInterfaceService implements InterfaceService {
+public class HostToInterfaceAdaptor implements InterfaceService {
private final HostService hostService;
- public HostServiceBasedInterfaceService(HostService hostService) {
+ public HostToInterfaceAdaptor(HostService hostService) {
this.hostService = checkNotNull(hostService);
}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/PeerConnectivity.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/PeerConnectivityManager.java
similarity index 95%
rename from apps/sdnip/src/main/java/org/onlab/onos/sdnip/PeerConnectivity.java
rename to apps/sdnip/src/main/java/org/onlab/onos/sdnip/PeerConnectivityManager.java
index e17206d..3917f5a 100644
--- a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/PeerConnectivity.java
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/PeerConnectivityManager.java
@@ -25,10 +25,10 @@
/**
* Manages the connectivity requirements between peers.
*/
-public class PeerConnectivity {
+public class PeerConnectivityManager {
private static final Logger log = LoggerFactory.getLogger(
- PeerConnectivity.class);
+ PeerConnectivityManager.class);
// TODO these shouldn't be defined here
private static final short BGP_PORT = 179;
@@ -41,7 +41,7 @@
// TODO this sucks.
private int intentId = 0;
- public PeerConnectivity(SdnIpConfigService configInfoService,
+ public PeerConnectivityManager(SdnIpConfigService configInfoService,
InterfaceService interfaceService, IntentService intentService) {
this.configInfoService = configInfoService;
this.interfaceService = interfaceService;
@@ -126,8 +126,8 @@
TrafficSelector selector = DefaultTrafficSelector.builder()
.matchEthType(Ethernet.TYPE_IPV4)
.matchIPProtocol(IPv4.PROTOCOL_TCP)
- .matchIPSrc(IpPrefix.valueOf(bgpdAddress.toRealInt(), IPV4_BIT_LENGTH))
- .matchIPDst(IpPrefix.valueOf(bgpdPeerAddress.toRealInt(), IPV4_BIT_LENGTH))
+ .matchIPSrc(IpPrefix.valueOf(bgpdAddress.toInt(), IPV4_BIT_LENGTH))
+ .matchIPDst(IpPrefix.valueOf(bgpdPeerAddress.toInt(), IPV4_BIT_LENGTH))
.matchTcpDst(BGP_PORT)
.build();
@@ -147,8 +147,8 @@
selector = DefaultTrafficSelector.builder()
.matchEthType(Ethernet.TYPE_IPV4)
.matchIPProtocol(IPv4.PROTOCOL_TCP)
- .matchIPSrc(IpPrefix.valueOf(bgpdAddress.toRealInt(), IPV4_BIT_LENGTH))
- .matchIPDst(IpPrefix.valueOf(bgpdPeerAddress.toRealInt(), IPV4_BIT_LENGTH))
+ .matchIPSrc(IpPrefix.valueOf(bgpdAddress.toInt(), IPV4_BIT_LENGTH))
+ .matchIPDst(IpPrefix.valueOf(bgpdPeerAddress.toInt(), IPV4_BIT_LENGTH))
.matchTcpSrc(BGP_PORT)
.build();
@@ -165,8 +165,8 @@
selector = DefaultTrafficSelector.builder()
.matchEthType(Ethernet.TYPE_IPV4)
.matchIPProtocol(IPv4.PROTOCOL_TCP)
- .matchIPSrc(IpPrefix.valueOf(bgpdPeerAddress.toRealInt(), IPV4_BIT_LENGTH))
- .matchIPDst(IpPrefix.valueOf(bgpdAddress.toRealInt(), IPV4_BIT_LENGTH))
+ .matchIPSrc(IpPrefix.valueOf(bgpdPeerAddress.toInt(), IPV4_BIT_LENGTH))
+ .matchIPDst(IpPrefix.valueOf(bgpdAddress.toInt(), IPV4_BIT_LENGTH))
.matchTcpDst(BGP_PORT)
.build();
@@ -183,8 +183,8 @@
selector = DefaultTrafficSelector.builder()
.matchEthType(Ethernet.TYPE_IPV4)
.matchIPProtocol(IPv4.PROTOCOL_TCP)
- .matchIPSrc(IpPrefix.valueOf(bgpdPeerAddress.toRealInt(), IPV4_BIT_LENGTH))
- .matchIPDst(IpPrefix.valueOf(bgpdAddress.toRealInt(), IPV4_BIT_LENGTH))
+ .matchIPSrc(IpPrefix.valueOf(bgpdPeerAddress.toInt(), IPV4_BIT_LENGTH))
+ .matchIPDst(IpPrefix.valueOf(bgpdAddress.toInt(), IPV4_BIT_LENGTH))
.matchTcpSrc(BGP_PORT)
.build();
@@ -251,8 +251,8 @@
TrafficSelector selector = DefaultTrafficSelector.builder()
.matchEthType(Ethernet.TYPE_IPV4)
.matchIPProtocol(IPv4.PROTOCOL_ICMP)
- .matchIPSrc(IpPrefix.valueOf(bgpdAddress.toRealInt(), IPV4_BIT_LENGTH))
- .matchIPDst(IpPrefix.valueOf(bgpdPeerAddress.toRealInt(), IPV4_BIT_LENGTH))
+ .matchIPSrc(IpPrefix.valueOf(bgpdAddress.toInt(), IPV4_BIT_LENGTH))
+ .matchIPDst(IpPrefix.valueOf(bgpdPeerAddress.toInt(), IPV4_BIT_LENGTH))
.build();
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
@@ -269,8 +269,8 @@
selector = DefaultTrafficSelector.builder()
.matchEthType(Ethernet.TYPE_IPV4)
.matchIPProtocol(IPv4.PROTOCOL_ICMP)
- .matchIPSrc(IpPrefix.valueOf(bgpdPeerAddress.toRealInt(), IPV4_BIT_LENGTH))
- .matchIPDst(IpPrefix.valueOf(bgpdAddress.toRealInt(), IPV4_BIT_LENGTH))
+ .matchIPSrc(IpPrefix.valueOf(bgpdPeerAddress.toInt(), IPV4_BIT_LENGTH))
+ .matchIPDst(IpPrefix.valueOf(bgpdAddress.toInt(), IPV4_BIT_LENGTH))
.build();
PointToPointIntent reversedIntent = new PointToPointIntent(
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/RouteEntry.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/RouteEntry.java
new file mode 100644
index 0000000..f8d659c
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/RouteEntry.java
@@ -0,0 +1,100 @@
+package org.onlab.onos.sdnip;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Objects;
+
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Represents a route entry for an IP prefix.
+ */
+public class RouteEntry {
+ private final IpPrefix prefix; // The IP prefix
+ private final IpAddress nextHop; // Next-hop IP address
+
+ /**
+ * Class constructor.
+ *
+ * @param prefix the IP prefix of the route
+ * @param nextHop the next hop IP address for the route
+ */
+ public RouteEntry(IpPrefix prefix, IpAddress nextHop) {
+ this.prefix = checkNotNull(prefix);
+ this.nextHop = checkNotNull(nextHop);
+ }
+
+ /**
+ * Returns the IP prefix of the route.
+ *
+ * @return the IP prefix of the route
+ */
+ public IpPrefix prefix() {
+ return prefix;
+ }
+
+ /**
+ * Returns the next hop IP address for the route.
+ *
+ * @return the next hop IP address for the route
+ */
+ public IpAddress nextHop() {
+ return nextHop;
+ }
+
+ /**
+ * Creates the binary string representation of an IPv4 prefix.
+ * The string length is equal to the prefix length.
+ *
+ * @param ip4Prefix the IPv4 prefix to use
+ * @return the binary string representation
+ */
+ static String createBinaryString(IpPrefix ip4Prefix) {
+ if (ip4Prefix.prefixLength() == 0) {
+ return "";
+ }
+
+ StringBuilder result = new StringBuilder(ip4Prefix.prefixLength());
+ long value = ip4Prefix.toInt();
+ for (int i = 0; i < ip4Prefix.prefixLength(); i++) {
+ long mask = 1 << (IpAddress.MAX_INET_MASK - 1 - i);
+ result.append(((value & mask) == 0) ? "0" : "1");
+ }
+ return result.toString();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+
+ //
+ // NOTE: Subclasses are considered as change of identity, hence
+ // equals() will return false if the class type doesn't match.
+ //
+ if (other == null || getClass() != other.getClass()) {
+ return false;
+ }
+
+ RouteEntry otherRoute = (RouteEntry) other;
+ return Objects.equals(this.prefix, otherRoute.prefix) &&
+ Objects.equals(this.nextHop, otherRoute.nextHop);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(prefix, nextHop);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("prefix", prefix)
+ .add("nextHop", nextHop)
+ .toString();
+ }
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/RouteListener.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/RouteListener.java
new file mode 100644
index 0000000..424e348
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/RouteListener.java
@@ -0,0 +1,13 @@
+package org.onlab.onos.sdnip;
+
+/**
+ * An interface to receive route updates from route providers.
+ */
+public interface RouteListener {
+ /**
+ * Receives a route update from a route provider.
+ *
+ * @param routeUpdate the updated route information
+ */
+ public void update(RouteUpdate routeUpdate);
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/RouteUpdate.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/RouteUpdate.java
new file mode 100644
index 0000000..a134a7a
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/RouteUpdate.java
@@ -0,0 +1,91 @@
+package org.onlab.onos.sdnip;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Objects;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Represents a change in routing information.
+ */
+public class RouteUpdate {
+ private final Type type; // The route update type
+ private final RouteEntry routeEntry; // The updated route entry
+
+ /**
+ * Specifies the type of a route update.
+ * <p/>
+ * Route updates can either provide updated information for a route, or
+ * withdraw a previously updated route.
+ */
+ public enum Type {
+ /**
+ * The update contains updated route information for a route.
+ */
+ UPDATE,
+ /**
+ * The update withdraws the route, meaning any previous information is
+ * no longer valid.
+ */
+ DELETE
+ }
+
+ /**
+ * Class constructor.
+ *
+ * @param type the type of the route update
+ * @param routeEntry the route entry with the update
+ */
+ public RouteUpdate(Type type, RouteEntry routeEntry) {
+ this.type = type;
+ this.routeEntry = checkNotNull(routeEntry);
+ }
+
+ /**
+ * Returns the type of the route update.
+ *
+ * @return the type of the update
+ */
+ public Type type() {
+ return type;
+ }
+
+ /**
+ * Returns the route entry the route update is for.
+ *
+ * @return the route entry the route update is for
+ */
+ public RouteEntry routeEntry() {
+ return routeEntry;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == this) {
+ return true;
+ }
+
+ if (!(other instanceof RouteUpdate)) {
+ return false;
+ }
+
+ RouteUpdate otherUpdate = (RouteUpdate) other;
+
+ return Objects.equals(this.type, otherUpdate.type) &&
+ Objects.equals(this.routeEntry, otherUpdate.routeEntry);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(type, routeEntry);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("type", type)
+ .add("routeEntry", routeEntry)
+ .toString();
+ }
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/Router.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/Router.java
new file mode 100644
index 0000000..d610361
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/Router.java
@@ -0,0 +1,771 @@
+package org.onlab.onos.sdnip;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.Semaphore;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.Host;
+import org.onlab.onos.net.flow.DefaultTrafficSelector;
+import org.onlab.onos.net.flow.DefaultTrafficTreatment;
+import org.onlab.onos.net.flow.TrafficSelector;
+import org.onlab.onos.net.flow.TrafficTreatment;
+import org.onlab.onos.net.flow.criteria.Criteria.IPCriterion;
+import org.onlab.onos.net.flow.criteria.Criterion;
+import org.onlab.onos.net.flow.criteria.Criterion.Type;
+import org.onlab.onos.net.host.HostEvent;
+import org.onlab.onos.net.host.HostListener;
+import org.onlab.onos.net.host.HostService;
+import org.onlab.onos.net.intent.Intent;
+import org.onlab.onos.net.intent.IntentId;
+import org.onlab.onos.net.intent.IntentService;
+import org.onlab.onos.net.intent.MultiPointToSinglePointIntent;
+import org.onlab.onos.sdnip.config.BgpPeer;
+import org.onlab.onos.sdnip.config.Interface;
+import org.onlab.onos.sdnip.config.SdnIpConfigService;
+import org.onlab.packet.Ethernet;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+import org.onlab.packet.MacAddress;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimaps;
+import com.google.common.collect.SetMultimap;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.googlecode.concurrenttrees.common.KeyValuePair;
+import com.googlecode.concurrenttrees.radix.node.concrete.DefaultByteArrayNodeFactory;
+import com.googlecode.concurrenttrees.radixinverted.ConcurrentInvertedRadixTree;
+import com.googlecode.concurrenttrees.radixinverted.InvertedRadixTree;
+
+/**
+ * This class processes BGP route update, translates each update into a intent
+ * and submits the intent.
+ *
+ * TODO: Make it thread-safe.
+ */
+public class Router implements RouteListener {
+
+ private static final Logger log = LoggerFactory.getLogger(Router.class);
+
+ // Store all route updates in a radix tree.
+ // The key in this tree is the binary string of prefix of the route.
+ private InvertedRadixTree<RouteEntry> bgpRoutes;
+
+ // Stores all incoming route updates in a queue.
+ private BlockingQueue<RouteUpdate> routeUpdates;
+
+ // The Ip4Address is the next hop address of each route update.
+ private SetMultimap<IpAddress, RouteEntry> routesWaitingOnArp;
+ private ConcurrentHashMap<IpPrefix, MultiPointToSinglePointIntent> pushedRouteIntents;
+
+ private IntentService intentService;
+ //private IProxyArpService proxyArp;
+ private HostService hostService;
+ private SdnIpConfigService configInfoService;
+ private InterfaceService interfaceService;
+
+ private ExecutorService bgpUpdatesExecutor;
+ private ExecutorService bgpIntentsSynchronizerExecutor;
+
+ // TODO temporary
+ private int intentId = Integer.MAX_VALUE / 2;
+
+ //
+ // State to deal with SDN-IP Leader election and pushing Intents
+ //
+ private Semaphore intentsSynchronizerSemaphore = new Semaphore(0);
+ private volatile boolean isElectedLeader = false;
+ private volatile boolean isActivatedLeader = false;
+
+ // For routes announced by local BGP daemon in SDN network,
+ // the next hop will be 0.0.0.0.
+ public static final IpAddress LOCAL_NEXT_HOP = IpAddress.valueOf("0.0.0.0");
+
+ /**
+ * Class constructor.
+ *
+ * @param intentService the intent service
+ * @param hostService the host service
+ * @param configInfoService the configuration service
+ * @param interfaceService the interface service
+ */
+ public Router(IntentService intentService, HostService hostService,
+ SdnIpConfigService configInfoService, InterfaceService interfaceService) {
+
+ this.intentService = intentService;
+ this.hostService = hostService;
+ this.configInfoService = configInfoService;
+ this.interfaceService = interfaceService;
+
+ bgpRoutes = new ConcurrentInvertedRadixTree<>(
+ new DefaultByteArrayNodeFactory());
+ routeUpdates = new LinkedBlockingQueue<>();
+ routesWaitingOnArp = Multimaps.synchronizedSetMultimap(
+ HashMultimap.<IpAddress, RouteEntry>create());
+ pushedRouteIntents = new ConcurrentHashMap<>();
+
+ bgpUpdatesExecutor = Executors.newSingleThreadExecutor(
+ new ThreadFactoryBuilder().setNameFormat("bgp-updates-%d").build());
+ bgpIntentsSynchronizerExecutor = Executors.newSingleThreadExecutor(
+ new ThreadFactoryBuilder()
+ .setNameFormat("bgp-intents-synchronizer-%d").build());
+
+ this.hostService.addListener(new InternalHostListener());
+ }
+
+ /**
+ * Starts the Router.
+ */
+ public void start() {
+
+ // TODO hack to enable SDN-IP now for testing
+ isElectedLeader = true;
+ isActivatedLeader = true;
+
+ bgpUpdatesExecutor.execute(new Runnable() {
+ @Override
+ public void run() {
+ doUpdatesThread();
+ }
+ });
+
+ bgpIntentsSynchronizerExecutor.execute(new Runnable() {
+ @Override
+ public void run() {
+ doIntentSynchronizationThread();
+ }
+ });
+ }
+
+ //@Override TODO hook this up to something
+ public void leaderChanged(boolean isLeader) {
+ log.debug("Leader changed: {}", isLeader);
+
+ if (!isLeader) {
+ this.isElectedLeader = false;
+ this.isActivatedLeader = false;
+ return; // Nothing to do
+ }
+ this.isActivatedLeader = false;
+ this.isElectedLeader = true;
+
+ //
+ // Tell the Intents Synchronizer thread to start the synchronization
+ //
+ intentsSynchronizerSemaphore.release();
+ }
+
+ @Override
+ public void update(RouteUpdate routeUpdate) {
+ log.debug("Received new route Update: {}", routeUpdate);
+
+ try {
+ routeUpdates.put(routeUpdate);
+ } catch (InterruptedException e) {
+ log.debug("Interrupted while putting on routeUpdates queue", e);
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ /**
+ * Thread for Intent Synchronization.
+ */
+ private void doIntentSynchronizationThread() {
+ boolean interrupted = false;
+ try {
+ while (!interrupted) {
+ try {
+ intentsSynchronizerSemaphore.acquire();
+ //
+ // Drain all permits, because a single synchronization is
+ // sufficient.
+ //
+ intentsSynchronizerSemaphore.drainPermits();
+ } catch (InterruptedException e) {
+ log.debug("Interrupted while waiting to become " +
+ "Intent Synchronization leader");
+ interrupted = true;
+ break;
+ }
+ syncIntents();
+ }
+ } finally {
+ if (interrupted) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ /**
+ * Thread for handling route updates.
+ */
+ private void doUpdatesThread() {
+ boolean interrupted = false;
+ try {
+ while (!interrupted) {
+ try {
+ RouteUpdate update = routeUpdates.take();
+ switch (update.type()) {
+ case UPDATE:
+ processRouteAdd(update.routeEntry());
+ break;
+ case DELETE:
+ processRouteDelete(update.routeEntry());
+ break;
+ default:
+ log.error("Unknown update Type: {}", update.type());
+ break;
+ }
+ } catch (InterruptedException e) {
+ log.debug("Interrupted while taking from updates queue", e);
+ interrupted = true;
+ } catch (Exception e) {
+ log.debug("exception", e);
+ }
+ }
+ } finally {
+ if (interrupted) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ /**
+ * Performs Intents Synchronization between the internally stored Route
+ * Intents and the installed Route Intents.
+ */
+ private void syncIntents() {
+ synchronized (this) {
+ if (!isElectedLeader) {
+ return; // Nothing to do: not the leader anymore
+ }
+ log.debug("Syncing SDN-IP Route Intents...");
+
+ Map<IpPrefix, MultiPointToSinglePointIntent> fetchedIntents =
+ new HashMap<>();
+
+ //
+ // Fetch all intents, and classify the Multi-Point-to-Point Intents
+ // based on the matching prefix.
+ //
+ for (Intent intent : intentService.getIntents()) {
+ //
+ // TODO: Ignore all intents that are not installed by
+ // the SDN-IP application.
+ //
+ if (!(intent instanceof MultiPointToSinglePointIntent)) {
+ continue;
+ }
+ MultiPointToSinglePointIntent mp2pIntent =
+ (MultiPointToSinglePointIntent) intent;
+ /*Match match = mp2pIntent.getMatch();
+ if (!(match instanceof PacketMatch)) {
+ continue;
+ }
+ PacketMatch packetMatch = (PacketMatch) match;
+ Ip4Prefix prefix = packetMatch.getDstIpAddress();
+ if (prefix == null) {
+ continue;
+ }
+ fetchedIntents.put(prefix, mp2pIntent);*/
+ for (Criterion criterion : mp2pIntent.selector().criteria()) {
+ if (criterion.type() == Type.IPV4_DST) {
+ IPCriterion ipCriterion = (IPCriterion) criterion;
+ fetchedIntents.put(ipCriterion.ip(), mp2pIntent);
+ }
+ }
+
+ }
+
+ //
+ // Compare for each prefix the local IN-MEMORY Intents with the
+ // FETCHED Intents:
+ // - If the IN-MEMORY Intent is same as the FETCHED Intent, store
+ // the FETCHED Intent in the local memory (i.e., override the
+ // IN-MEMORY Intent) to preserve the original Intent ID
+ // - if the IN-MEMORY Intent is not same as the FETCHED Intent,
+ // delete the FETCHED Intent, and push/install the IN-MEMORY
+ // Intent.
+ // - If there is an IN-MEMORY Intent for a prefix, but no FETCHED
+ // Intent for same prefix, then push/install the IN-MEMORY
+ // Intent.
+ // - If there is a FETCHED Intent for a prefix, but no IN-MEMORY
+ // Intent for same prefix, then delete/withdraw the FETCHED
+ // Intent.
+ //
+ Collection<Pair<IpPrefix, MultiPointToSinglePointIntent>>
+ storeInMemoryIntents = new LinkedList<>();
+ Collection<Pair<IpPrefix, MultiPointToSinglePointIntent>>
+ addIntents = new LinkedList<>();
+ Collection<Pair<IpPrefix, MultiPointToSinglePointIntent>>
+ deleteIntents = new LinkedList<>();
+ for (Map.Entry<IpPrefix, MultiPointToSinglePointIntent> entry :
+ pushedRouteIntents.entrySet()) {
+ IpPrefix prefix = entry.getKey();
+ MultiPointToSinglePointIntent inMemoryIntent =
+ entry.getValue();
+ MultiPointToSinglePointIntent fetchedIntent =
+ fetchedIntents.get(prefix);
+
+ if (fetchedIntent == null) {
+ //
+ // No FETCHED Intent for same prefix: push the IN-MEMORY
+ // Intent.
+ //
+ addIntents.add(Pair.of(prefix, inMemoryIntent));
+ continue;
+ }
+
+ //
+ // If IN-MEMORY Intent is same as the FETCHED Intent,
+ // store the FETCHED Intent in the local memory.
+ //
+ if (compareMultiPointToSinglePointIntents(inMemoryIntent,
+ fetchedIntent)) {
+ storeInMemoryIntents.add(Pair.of(prefix, fetchedIntent));
+ } else {
+ //
+ // The IN-MEMORY Intent is not same as the FETCHED Intent,
+ // hence delete the FETCHED Intent, and install the
+ // IN-MEMORY Intent.
+ //
+ deleteIntents.add(Pair.of(prefix, fetchedIntent));
+ addIntents.add(Pair.of(prefix, inMemoryIntent));
+ }
+ fetchedIntents.remove(prefix);
+ }
+
+ //
+ // Any remaining FETCHED Intents have to be deleted/withdrawn
+ //
+ for (Map.Entry<IpPrefix, MultiPointToSinglePointIntent> entry :
+ fetchedIntents.entrySet()) {
+ IpPrefix prefix = entry.getKey();
+ MultiPointToSinglePointIntent fetchedIntent = entry.getValue();
+ deleteIntents.add(Pair.of(prefix, fetchedIntent));
+ }
+
+ //
+ // Perform the actions:
+ // 1. Store in memory fetched intents that are same. Can be done
+ // even if we are not the leader anymore
+ // 2. Delete intents: check if the leader before each operation
+ // 3. Add intents: check if the leader before each operation
+ //
+ for (Pair<IpPrefix, MultiPointToSinglePointIntent> pair :
+ storeInMemoryIntents) {
+ IpPrefix prefix = pair.getLeft();
+ MultiPointToSinglePointIntent intent = pair.getRight();
+ log.debug("Intent synchronization: updating in-memory " +
+ "Intent for prefix: {}", prefix);
+ pushedRouteIntents.put(prefix, intent);
+ }
+ //
+ isActivatedLeader = true; // Allow push of Intents
+ for (Pair<IpPrefix, MultiPointToSinglePointIntent> pair :
+ deleteIntents) {
+ IpPrefix prefix = pair.getLeft();
+ MultiPointToSinglePointIntent intent = pair.getRight();
+ if (!isElectedLeader) {
+ isActivatedLeader = false;
+ return;
+ }
+ log.debug("Intent synchronization: deleting Intent for " +
+ "prefix: {}", prefix);
+ intentService.withdraw(intent);
+ }
+ //
+ for (Pair<IpPrefix, MultiPointToSinglePointIntent> pair :
+ addIntents) {
+ IpPrefix prefix = pair.getLeft();
+ MultiPointToSinglePointIntent intent = pair.getRight();
+ if (!isElectedLeader) {
+ isActivatedLeader = false;
+ return;
+ }
+ log.debug("Intent synchronization: adding Intent for " +
+ "prefix: {}", prefix);
+ intentService.submit(intent);
+ }
+ if (!isElectedLeader) {
+ isActivatedLeader = false;
+ }
+ log.debug("Syncing SDN-IP routes completed.");
+ }
+ }
+
+ /**
+ * Compares two Multi-point to Single Point Intents whether they represent
+ * same logical intention.
+ *
+ * @param intent1 the first Intent to compare
+ * @param intent2 the second Intent to compare
+ * @return true if both Intents represent same logical intention, otherwise
+ * false
+ */
+ private boolean compareMultiPointToSinglePointIntents(
+ MultiPointToSinglePointIntent intent1,
+ MultiPointToSinglePointIntent intent2) {
+ /*Match match1 = intent1.getMatch();
+ Match match2 = intent2.getMatch();
+ Action action1 = intent1.getAction();
+ Action action2 = intent2.getAction();
+ Set<SwitchPort> ingressPorts1 = intent1.getIngressPorts();
+ Set<SwitchPort> ingressPorts2 = intent2.getIngressPorts();
+ SwitchPort egressPort1 = intent1.getEgressPort();
+ SwitchPort egressPort2 = intent2.getEgressPort();
+
+ return Objects.equal(match1, match2) &&
+ Objects.equal(action1, action2) &&
+ Objects.equal(egressPort1, egressPort2) &&
+ Objects.equal(ingressPorts1, ingressPorts2);*/
+ return Objects.equal(intent1.selector(), intent2.selector()) &&
+ Objects.equal(intent1.treatment(), intent2.treatment()) &&
+ Objects.equal(intent1.ingressPoints(), intent2.ingressPoints()) &&
+ Objects.equal(intent1.egressPoint(), intent2.egressPoint());
+ }
+
+ /**
+ * Processes adding a route entry.
+ * <p/>
+ * Put new route entry into the radix tree. If there was an existing
+ * next hop for this prefix, but the next hop was different, then execute
+ * deleting old route entry. If the next hop is the SDN domain, we do not
+ * handle it at the moment. Otherwise, execute adding a route.
+ *
+ * @param routeEntry the route entry to add
+ */
+ protected void processRouteAdd(RouteEntry routeEntry) {
+ synchronized (this) {
+ log.debug("Processing route add: {}", routeEntry);
+
+ IpPrefix prefix = routeEntry.prefix();
+ IpAddress nextHop = null;
+ RouteEntry foundRouteEntry =
+ bgpRoutes.put(RouteEntry.createBinaryString(prefix),
+ routeEntry);
+ if (foundRouteEntry != null) {
+ nextHop = foundRouteEntry.nextHop();
+ }
+
+ if (nextHop != null && !nextHop.equals(routeEntry.nextHop())) {
+ // There was an existing nexthop for this prefix. This update
+ // supersedes that, so we need to remove the old flows for this
+ // prefix from the switches
+ executeRouteDelete(routeEntry);
+ }
+ if (nextHop != null && nextHop.equals(routeEntry.nextHop())) {
+ return;
+ }
+
+ if (routeEntry.nextHop().equals(LOCAL_NEXT_HOP)) {
+ // Route originated by SDN domain
+ // We don't handle these at the moment
+ log.debug("Own route {} to {}",
+ routeEntry.prefix(), routeEntry.nextHop());
+ return;
+ }
+
+ executeRouteAdd(routeEntry);
+ }
+ }
+
+ /**
+ * Executes adding a route entry.
+ * <p/>
+ * Find out the egress Interface and MAC address of next hop router for
+ * this route entry. If the MAC address can not be found in ARP cache,
+ * then this prefix will be put in routesWaitingOnArp queue. Otherwise,
+ * new route intent will be created and installed.
+ *
+ * @param routeEntry the route entry to add
+ */
+ private void executeRouteAdd(RouteEntry routeEntry) {
+ log.debug("Executing route add: {}", routeEntry);
+
+ // See if we know the MAC address of the next hop
+ //MacAddress nextHopMacAddress =
+ //proxyArp.getMacAddress(routeEntry.getNextHop());
+ MacAddress nextHopMacAddress = null;
+ Set<Host> hosts = hostService.getHostsByIp(
+ routeEntry.nextHop().toPrefix());
+ if (!hosts.isEmpty()) {
+ // TODO how to handle if multiple hosts are returned?
+ nextHopMacAddress = hosts.iterator().next().mac();
+ }
+
+ if (nextHopMacAddress == null) {
+ routesWaitingOnArp.put(routeEntry.nextHop(), routeEntry);
+ //proxyArp.sendArpRequest(routeEntry.getNextHop(), this, true);
+ // TODO maybe just do this for every prefix anyway
+ hostService.startMonitoringIp(routeEntry.nextHop());
+ return;
+ }
+
+ addRouteIntentToNextHop(routeEntry.prefix(),
+ routeEntry.nextHop(),
+ nextHopMacAddress);
+ }
+
+ /**
+ * Adds a route intent given a prefix and a next hop IP address. This
+ * method will find the egress interface for the intent.
+ *
+ * @param prefix IP prefix of the route to add
+ * @param nextHopIpAddress IP address of the next hop
+ * @param nextHopMacAddress MAC address of the next hop
+ */
+ private void addRouteIntentToNextHop(IpPrefix prefix,
+ IpAddress nextHopIpAddress,
+ MacAddress nextHopMacAddress) {
+
+ // Find the attachment point (egress interface) of the next hop
+ Interface egressInterface;
+ if (configInfoService.getBgpPeers().containsKey(nextHopIpAddress)) {
+ // Route to a peer
+ log.debug("Route to peer {}", nextHopIpAddress);
+ BgpPeer peer =
+ configInfoService.getBgpPeers().get(nextHopIpAddress);
+ egressInterface =
+ interfaceService.getInterface(peer.connectPoint());
+ } else {
+ // Route to non-peer
+ log.debug("Route to non-peer {}", nextHopIpAddress);
+ egressInterface =
+ interfaceService.getMatchingInterface(nextHopIpAddress);
+ if (egressInterface == null) {
+ log.warn("No outgoing interface found for {}",
+ nextHopIpAddress);
+ return;
+ }
+ }
+
+ doAddRouteIntent(prefix, egressInterface, nextHopMacAddress);
+ }
+
+ /**
+ * Installs a route intent for a prefix.
+ * <p/>
+ * Intent will match dst IP prefix and rewrite dst MAC address at all other
+ * border switches, then forward packets according to dst MAC address.
+ *
+ * @param prefix IP prefix from route
+ * @param egressInterface egress Interface connected to next hop router
+ * @param nextHopMacAddress MAC address of next hop router
+ */
+ private void doAddRouteIntent(IpPrefix prefix, Interface egressInterface,
+ MacAddress nextHopMacAddress) {
+ log.debug("Adding intent for prefix {}, next hop mac {}",
+ prefix, nextHopMacAddress);
+
+ MultiPointToSinglePointIntent pushedIntent =
+ pushedRouteIntents.get(prefix);
+
+ // Just for testing.
+ if (pushedIntent != null) {
+ log.error("There should not be a pushed intent: {}", pushedIntent);
+ }
+
+ ConnectPoint egressPort = egressInterface.connectPoint();
+
+ Set<ConnectPoint> ingressPorts = new HashSet<>();
+
+ for (Interface intf : interfaceService.getInterfaces()) {
+ if (!intf.equals(egressInterface)) {
+ ConnectPoint srcPort = intf.connectPoint();
+ ingressPorts.add(srcPort);
+ }
+ }
+
+ // Match the destination IP prefix at the first hop
+ //PacketMatchBuilder builder = new PacketMatchBuilder();
+ //builder.setEtherType(Ethernet.TYPE_IPV4).setDstIpNet(prefix);
+ //PacketMatch packetMatch = builder.build();
+ TrafficSelector selector = DefaultTrafficSelector.builder()
+ .matchEthType(Ethernet.TYPE_IPV4)
+ .matchIPDst(prefix)
+ .build();
+
+ // Rewrite the destination MAC address
+ //ModifyDstMacAction modifyDstMacAction =
+ //new ModifyDstMacAction(nextHopMacAddress);
+ TrafficTreatment treatment = DefaultTrafficTreatment.builder()
+ .setEthDst(nextHopMacAddress)
+ .build();
+
+ MultiPointToSinglePointIntent intent =
+ new MultiPointToSinglePointIntent(nextIntentId(),
+ selector, treatment, ingressPorts, egressPort);
+
+ if (isElectedLeader && isActivatedLeader) {
+ log.debug("Intent installation: adding Intent for prefix: {}",
+ prefix);
+ intentService.submit(intent);
+ }
+
+ // Maintain the Intent
+ pushedRouteIntents.put(prefix, intent);
+ }
+
+ /**
+ * Executes deleting a route entry.
+ * <p/>
+ * Removes prefix from radix tree, and if successful, then try to delete
+ * the related intent.
+ *
+ * @param routeEntry the route entry to delete
+ */
+ protected void processRouteDelete(RouteEntry routeEntry) {
+ synchronized (this) {
+ log.debug("Processing route delete: {}", routeEntry);
+ IpPrefix prefix = routeEntry.prefix();
+
+ // TODO check the change of logic here - remove doesn't check that
+ // the route entry was what we expected (and we can't do this
+ // concurrently)
+
+ if (bgpRoutes.remove(RouteEntry.createBinaryString(prefix))) {
+ //
+ // Only delete flows if an entry was actually removed from the
+ // tree. If no entry was removed, the <prefix, nexthop> wasn't
+ // there so it's probably already been removed and we don't
+ // need to do anything.
+ //
+ executeRouteDelete(routeEntry);
+ }
+
+ routesWaitingOnArp.remove(routeEntry.nextHop(), routeEntry);
+ // TODO cancel the request in the ARP manager as well
+ }
+ }
+
+ /**
+ * Executed deleting a route entry.
+ *
+ * @param routeEntry the route entry to delete
+ */
+ private void executeRouteDelete(RouteEntry routeEntry) {
+ log.debug("Executing route delete: {}", routeEntry);
+
+ IpPrefix prefix = routeEntry.prefix();
+
+ MultiPointToSinglePointIntent intent =
+ pushedRouteIntents.remove(prefix);
+
+ if (intent == null) {
+ log.debug("There is no intent in pushedRouteIntents to delete " +
+ "for prefix: {}", prefix);
+ } else {
+ if (isElectedLeader && isActivatedLeader) {
+ log.debug("Intent installation: deleting Intent for prefix: {}",
+ prefix);
+ intentService.withdraw(intent);
+ }
+ }
+ }
+
+ /**
+ * This method handles the prefixes which are waiting for ARP replies for
+ * MAC addresses of next hops.
+ *
+ * @param ipAddress next hop router IP address, for which we sent ARP
+ * request out
+ * @param macAddress MAC address which is relative to the ipAddress
+ */
+ //@Override
+ // TODO change name
+ public void arpResponse(IpAddress ipAddress, MacAddress macAddress) {
+ log.debug("Received ARP response: {} => {}", ipAddress, macAddress);
+
+ // We synchronize on this to prevent changes to the radix tree
+ // while we're pushing intents. If the tree changes, the
+ // tree and intents could get out of sync.
+ synchronized (this) {
+
+ Set<RouteEntry> routesToPush =
+ routesWaitingOnArp.removeAll(ipAddress);
+
+ for (RouteEntry routeEntry : routesToPush) {
+ // These will always be adds
+ IpPrefix prefix = routeEntry.prefix();
+ String binaryString = RouteEntry.createBinaryString(prefix);
+ RouteEntry foundRouteEntry =
+ bgpRoutes.getValueForExactKey(binaryString);
+ if (foundRouteEntry != null &&
+ foundRouteEntry.nextHop().equals(routeEntry.nextHop())) {
+ log.debug("Pushing prefix {} next hop {}",
+ routeEntry.prefix(), routeEntry.nextHop());
+ // We only push prefix flows if the prefix is still in the
+ // radix tree and the next hop is the same as our
+ // update.
+ // The prefix could have been removed while we were waiting
+ // for the ARP, or the next hop could have changed.
+ addRouteIntentToNextHop(prefix, ipAddress, macAddress);
+ } else {
+ log.debug("Received ARP response, but {}/{} is no longer in"
+ + " the radix tree", routeEntry.prefix(),
+ routeEntry.nextHop());
+ }
+ }
+ }
+ }
+
+ /**
+ * Gets the SDN-IP routes.
+ *
+ * @return the SDN-IP routes
+ */
+ public Collection<RouteEntry> getRoutes() {
+ Iterator<KeyValuePair<RouteEntry>> it =
+ bgpRoutes.getKeyValuePairsForKeysStartingWith("").iterator();
+
+ List<RouteEntry> routes = new LinkedList<>();
+
+ while (it.hasNext()) {
+ KeyValuePair<RouteEntry> entry = it.next();
+ routes.add(entry.getValue());
+ }
+
+ return routes;
+ }
+
+ /**
+ * Generates a new unique intent ID.
+ *
+ * @return the new intent ID.
+ */
+ private IntentId nextIntentId() {
+ return new IntentId(intentId++);
+ }
+
+ /**
+ * Listener for host events.
+ */
+ class InternalHostListener implements HostListener {
+ @Override
+ public void event(HostEvent event) {
+ if (event.type() == HostEvent.Type.HOST_ADDED ||
+ event.type() == HostEvent.Type.HOST_UPDATED) {
+ Host host = event.subject();
+ for (IpPrefix ip : host.ipAddresses()) {
+ arpResponse(ip.toIpAddress(), host.mac());
+ }
+ }
+ }
+ }
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/SdnIp.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/SdnIp.java
index 25b13f1..f2f4646 100644
--- a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/SdnIp.java
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/SdnIp.java
@@ -2,21 +2,30 @@
import static org.slf4j.LoggerFactory.getLogger;
+import java.util.Collection;
+
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.net.host.HostService;
import org.onlab.onos.net.intent.IntentService;
+import org.onlab.onos.sdnip.RouteUpdate.Type;
+import org.onlab.onos.sdnip.bgp.BgpRouteEntry;
+import org.onlab.onos.sdnip.bgp.BgpSessionManager;
import org.onlab.onos.sdnip.config.SdnIpConfigReader;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
import org.slf4j.Logger;
/**
- * Placeholder SDN-IP component.
+ * Component for the SDN-IP peering application.
*/
@Component(immediate = true)
-public class SdnIp {
+@Service
+public class SdnIp implements SdnIpService {
private final Logger log = getLogger(getClass());
@@ -27,7 +36,9 @@
protected HostService hostService;
private SdnIpConfigReader config;
- private PeerConnectivity peerConnectivity;
+ private PeerConnectivityManager peerConnectivity;
+ private Router router;
+ private BgpSessionManager bgpSessionManager;
@Activate
protected void activate() {
@@ -36,15 +47,40 @@
config = new SdnIpConfigReader();
config.init();
- InterfaceService interfaceService = new HostServiceBasedInterfaceService(hostService);
+ InterfaceService interfaceService = new HostToInterfaceAdaptor(hostService);
- peerConnectivity = new PeerConnectivity(config, interfaceService, intentService);
+ peerConnectivity = new PeerConnectivityManager(config, interfaceService, intentService);
peerConnectivity.start();
+ router = new Router(intentService, hostService, config, interfaceService);
+ router.start();
+
+ bgpSessionManager = new BgpSessionManager(router);
+ bgpSessionManager.startUp(2000); // TODO
+
+ // TODO need to disable link discovery on external ports
+
+ router.update(new RouteUpdate(Type.UPDATE, new RouteEntry(
+ IpPrefix.valueOf("172.16.20.0/24"),
+ IpAddress.valueOf("192.168.10.1"))));
}
@Deactivate
protected void deactivate() {
log.info("Stopped");
}
+
+ @Override
+ public Collection<BgpRouteEntry> getBgpRoutes() {
+ return bgpSessionManager.getBgpRoutes();
+ }
+
+ @Override
+ public Collection<RouteEntry> getRoutes() {
+ return router.getRoutes();
+ }
+
+ static String dpidToUri(String dpid) {
+ return "of:" + dpid.replace(":", "");
+ }
}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/SdnIpService.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/SdnIpService.java
new file mode 100644
index 0000000..187ebb2
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/SdnIpService.java
@@ -0,0 +1,24 @@
+package org.onlab.onos.sdnip;
+
+import java.util.Collection;
+
+import org.onlab.onos.sdnip.bgp.BgpRouteEntry;
+
+/**
+ * Service interface exported by SDN-IP.
+ */
+public interface SdnIpService {
+ /**
+ * Gets the BGP routes.
+ *
+ * @return the BGP routes
+ */
+ public Collection<BgpRouteEntry> getBgpRoutes();
+
+ /**
+ * Gets all the routes known to SDN-IP.
+ *
+ * @return the SDN-IP routes
+ */
+ public Collection<RouteEntry> getRoutes();
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpConstants.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpConstants.java
new file mode 100644
index 0000000..1703138
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpConstants.java
@@ -0,0 +1,368 @@
+package org.onlab.onos.sdnip.bgp;
+
+/**
+ * BGP related constants.
+ */
+public final class BgpConstants {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private BgpConstants() {
+ }
+
+ /** BGP port number (RFC 4271). */
+ public static final int BGP_PORT = 179;
+
+ /** BGP version. */
+ public static final int BGP_VERSION = 4;
+
+ /** BGP OPEN message type. */
+ public static final int BGP_TYPE_OPEN = 1;
+
+ /** BGP UPDATE message type. */
+ public static final int BGP_TYPE_UPDATE = 2;
+
+ /** BGP NOTIFICATION message type. */
+ public static final int BGP_TYPE_NOTIFICATION = 3;
+
+ /** BGP KEEPALIVE message type. */
+ public static final int BGP_TYPE_KEEPALIVE = 4;
+
+ /** BGP Header Marker field length. */
+ public static final int BGP_HEADER_MARKER_LENGTH = 16;
+
+ /** BGP Header length. */
+ public static final int BGP_HEADER_LENGTH = 19;
+
+ /** BGP message maximum length. */
+ public static final int BGP_MESSAGE_MAX_LENGTH = 4096;
+
+ /** BGP OPEN message minimum length (BGP Header included). */
+ public static final int BGP_OPEN_MIN_LENGTH = 29;
+
+ /** BGP UPDATE message minimum length (BGP Header included). */
+ public static final int BGP_UPDATE_MIN_LENGTH = 23;
+
+ /** BGP NOTIFICATION message minimum length (BGP Header included). */
+ public static final int BGP_NOTIFICATION_MIN_LENGTH = 21;
+
+ /** BGP KEEPALIVE message expected length (BGP Header included). */
+ public static final int BGP_KEEPALIVE_EXPECTED_LENGTH = 19;
+
+ /** BGP KEEPALIVE messages transmitted per Hold interval. */
+ public static final int BGP_KEEPALIVE_PER_HOLD_INTERVAL = 3;
+
+ /** BGP KEEPALIVE messages minimum Holdtime (in seconds). */
+ public static final int BGP_KEEPALIVE_MIN_HOLDTIME = 3;
+
+ /** BGP KEEPALIVE messages minimum transmission interval (in seconds). */
+ public static final int BGP_KEEPALIVE_MIN_INTERVAL = 1;
+
+ /** BGP AS 0 (zero) value. See draft-ietf-idr-as0-06.txt Internet Draft. */
+ public static final long BGP_AS_0 = 0;
+
+ /**
+ * BGP UPDATE related constants.
+ */
+ public static final class Update {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private Update() {
+ }
+
+ /**
+ * BGP UPDATE: ORIGIN related constants.
+ */
+ public static final class Origin {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private Origin() {
+ }
+
+ /** BGP UPDATE Attributes Type Code ORIGIN. */
+ public static final int TYPE = 1;
+
+ /** BGP UPDATE Attributes Type Code ORIGIN length. */
+ public static final int LENGTH = 1;
+
+ /** BGP UPDATE ORIGIN: IGP. */
+ public static final int IGP = 0;
+
+ /** BGP UPDATE ORIGIN: EGP. */
+ public static final int EGP = 1;
+
+ /** BGP UPDATE ORIGIN: INCOMPLETE. */
+ public static final int INCOMPLETE = 2;
+ }
+
+ /**
+ * BGP UPDATE: AS_PATH related constants.
+ */
+ public static final class AsPath {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private AsPath() {
+ }
+
+ /** BGP UPDATE Attributes Type Code AS_PATH. */
+ public static final int TYPE = 2;
+
+ /** BGP UPDATE AS_PATH Type: AS_SET. */
+ public static final int AS_SET = 1;
+
+ /** BGP UPDATE AS_PATH Type: AS_SEQUENCE. */
+ public static final int AS_SEQUENCE = 2;
+ }
+
+ /**
+ * BGP UPDATE: NEXT_HOP related constants.
+ */
+ public static final class NextHop {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private NextHop() {
+ }
+
+ /** BGP UPDATE Attributes Type Code NEXT_HOP. */
+ public static final int TYPE = 3;
+
+ /** BGP UPDATE Attributes Type Code NEXT_HOP length. */
+ public static final int LENGTH = 4;
+ }
+
+ /**
+ * BGP UPDATE: MULTI_EXIT_DISC related constants.
+ */
+ public static final class MultiExitDisc {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private MultiExitDisc() {
+ }
+
+ /** BGP UPDATE Attributes Type Code MULTI_EXIT_DISC. */
+ public static final int TYPE = 4;
+
+ /** BGP UPDATE Attributes Type Code MULTI_EXIT_DISC length. */
+ public static final int LENGTH = 4;
+
+ /** BGP UPDATE Attributes lowest MULTI_EXIT_DISC value. */
+ public static final int LOWEST_MULTI_EXIT_DISC = 0;
+ }
+
+ /**
+ * BGP UPDATE: LOCAL_PREF related constants.
+ */
+ public static final class LocalPref {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private LocalPref() {
+ }
+
+ /** BGP UPDATE Attributes Type Code LOCAL_PREF. */
+ public static final int TYPE = 5;
+
+ /** BGP UPDATE Attributes Type Code LOCAL_PREF length. */
+ public static final int LENGTH = 4;
+ }
+
+ /**
+ * BGP UPDATE: ATOMIC_AGGREGATE related constants.
+ */
+ public static final class AtomicAggregate {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private AtomicAggregate() {
+ }
+
+ /** BGP UPDATE Attributes Type Code ATOMIC_AGGREGATE. */
+ public static final int TYPE = 6;
+
+ /** BGP UPDATE Attributes Type Code ATOMIC_AGGREGATE length. */
+ public static final int LENGTH = 0;
+ }
+
+ /**
+ * BGP UPDATE: AGGREGATOR related constants.
+ */
+ public static final class Aggregator {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private Aggregator() {
+ }
+
+ /** BGP UPDATE Attributes Type Code AGGREGATOR. */
+ public static final int TYPE = 7;
+
+ /** BGP UPDATE Attributes Type Code AGGREGATOR length. */
+ public static final int LENGTH = 6;
+ }
+ }
+
+ /**
+ * BGP NOTIFICATION related constants.
+ */
+ public static final class Notifications {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private Notifications() {
+ }
+
+ /**
+ * BGP NOTIFICATION: Message Header Error constants.
+ */
+ public static final class MessageHeaderError {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private MessageHeaderError() {
+ }
+
+ /** Message Header Error code. */
+ public static final int ERROR_CODE = 1;
+
+ /** Message Header Error subcode: Connection Not Synchronized. */
+ public static final int CONNECTION_NOT_SYNCHRONIZED = 1;
+
+ /** Message Header Error subcode: Bad Message Length. */
+ public static final int BAD_MESSAGE_LENGTH = 2;
+
+ /** Message Header Error subcode: Bad Message Type. */
+ public static final int BAD_MESSAGE_TYPE = 3;
+ }
+
+ /**
+ * BGP NOTIFICATION: OPEN Message Error constants.
+ */
+ public static final class OpenMessageError {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private OpenMessageError() {
+ }
+
+ /** OPEN Message Error code. */
+ public static final int ERROR_CODE = 2;
+
+ /** OPEN Message Error subcode: Unsupported Version Number. */
+ public static final int UNSUPPORTED_VERSION_NUMBER = 1;
+
+ /** OPEN Message Error subcode: Bad PEER AS. */
+ public static final int BAD_PEER_AS = 2;
+
+ /** OPEN Message Error subcode: Unacceptable Hold Time. */
+ public static final int UNACCEPTABLE_HOLD_TIME = 6;
+ }
+
+ /**
+ * BGP NOTIFICATION: UPDATE Message Error constants.
+ */
+ public static final class UpdateMessageError {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private UpdateMessageError() {
+ }
+
+ /** UPDATE Message Error code. */
+ public static final int ERROR_CODE = 3;
+
+ /** UPDATE Message Error subcode: Malformed Attribute List. */
+ public static final int MALFORMED_ATTRIBUTE_LIST = 1;
+
+ /** UPDATE Message Error subcode: Unrecognized Well-known Attribute. */
+ public static final int UNRECOGNIZED_WELL_KNOWN_ATTRIBUTE = 2;
+
+ /** UPDATE Message Error subcode: Missing Well-known Attribute. */
+ public static final int MISSING_WELL_KNOWN_ATTRIBUTE = 3;
+
+ /** UPDATE Message Error subcode: Attribute Flags Error. */
+ public static final int ATTRIBUTE_FLAGS_ERROR = 4;
+
+ /** UPDATE Message Error subcode: Attribute Length Error. */
+ public static final int ATTRIBUTE_LENGTH_ERROR = 5;
+
+ /** UPDATE Message Error subcode: Invalid ORIGIN Attribute. */
+ public static final int INVALID_ORIGIN_ATTRIBUTE = 6;
+
+ /** UPDATE Message Error subcode: Invalid NEXT_HOP Attribute. */
+ public static final int INVALID_NEXT_HOP_ATTRIBUTE = 8;
+
+ /** UPDATE Message Error subcode: Optional Attribute Error. Unused. */
+ public static final int OPTIONAL_ATTRIBUTE_ERROR = 9;
+
+ /** UPDATE Message Error subcode: Invalid Network Field. */
+ public static final int INVALID_NETWORK_FIELD = 10;
+
+ /** UPDATE Message Error subcode: Malformed AS_PATH. */
+ public static final int MALFORMED_AS_PATH = 11;
+ }
+
+ /**
+ * BGP NOTIFICATION: Hold Timer Expired constants.
+ */
+ public static final class HoldTimerExpired {
+ /**
+ * Default constructor.
+ * <p>
+ * The constructor is private to prevent creating an instance of
+ * this utility class.
+ */
+ private HoldTimerExpired() {
+ }
+
+ /** Hold Timer Expired code. */
+ public static final int ERROR_CODE = 4;
+ }
+
+ /** BGP NOTIFICATION message Error subcode: Unspecific. */
+ public static final int ERROR_SUBCODE_UNSPECIFIC = 0;
+ }
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpFrameDecoder.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpFrameDecoder.java
new file mode 100644
index 0000000..938d975
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpFrameDecoder.java
@@ -0,0 +1,162 @@
+package org.onlab.onos.sdnip.bgp;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.handler.codec.frame.FrameDecoder;
+import org.onlab.onos.sdnip.bgp.BgpConstants.Notifications.MessageHeaderError;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Class for handling the decoding of the BGP messages.
+ */
+class BgpFrameDecoder extends FrameDecoder {
+ private static final Logger log =
+ LoggerFactory.getLogger(BgpFrameDecoder.class);
+
+ private final BgpSession bgpSession;
+
+ /**
+ * Constructor for a given BGP Session.
+ *
+ * @param bgpSession the BGP session state to use.
+ */
+ BgpFrameDecoder(BgpSession bgpSession) {
+ this.bgpSession = bgpSession;
+ }
+
+ @Override
+ protected Object decode(ChannelHandlerContext ctx,
+ Channel channel,
+ ChannelBuffer buf) throws Exception {
+ //
+ // NOTE: If we close the channel during the decoding, we might still
+ // see some incoming messages while the channel closing is completed.
+ //
+ if (bgpSession.isClosed()) {
+ return null;
+ }
+
+ log.trace("BGP Peer: decode(): remoteAddr = {} localAddr = {} " +
+ "messageSize = {}",
+ ctx.getChannel().getRemoteAddress(),
+ ctx.getChannel().getLocalAddress(),
+ buf.readableBytes());
+
+ // Test for minimum length of the BGP message
+ if (buf.readableBytes() < BgpConstants.BGP_HEADER_LENGTH) {
+ // No enough data received
+ return null;
+ }
+
+ //
+ // Mark the current buffer position in case we haven't received
+ // the whole message.
+ //
+ buf.markReaderIndex();
+
+ //
+ // Read and check the BGP message Marker field: it must be all ones
+ // (See RFC 4271, Section 4.1)
+ //
+ byte[] marker = new byte[BgpConstants.BGP_HEADER_MARKER_LENGTH];
+ buf.readBytes(marker);
+ for (int i = 0; i < marker.length; i++) {
+ if (marker[i] != (byte) 0xff) {
+ log.debug("BGP RX Error: invalid marker {} at position {}",
+ marker[i], i);
+ //
+ // ERROR: Connection Not Synchronized
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = MessageHeaderError.ERROR_CODE;
+ int errorSubcode =
+ MessageHeaderError.CONNECTION_NOT_SYNCHRONIZED;
+ ChannelBuffer txMessage =
+ bgpSession.prepareBgpNotification(errorCode, errorSubcode,
+ null);
+ ctx.getChannel().write(txMessage);
+ bgpSession.closeChannel(ctx);
+ return null;
+ }
+ }
+
+ //
+ // Read and check the BGP message Length field
+ //
+ int length = buf.readUnsignedShort();
+ if ((length < BgpConstants.BGP_HEADER_LENGTH) ||
+ (length > BgpConstants.BGP_MESSAGE_MAX_LENGTH)) {
+ log.debug("BGP RX Error: invalid Length field {}. " +
+ "Must be between {} and {}",
+ length,
+ BgpConstants.BGP_HEADER_LENGTH,
+ BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+ //
+ // ERROR: Bad Message Length
+ //
+ // Send NOTIFICATION and close the connection
+ ChannelBuffer txMessage =
+ bgpSession.prepareBgpNotificationBadMessageLength(length);
+ ctx.getChannel().write(txMessage);
+ bgpSession.closeChannel(ctx);
+ return null;
+ }
+
+ //
+ // Test whether the rest of the message is received:
+ // So far we have read the Marker (16 octets) and the
+ // Length (2 octets) fields.
+ //
+ int remainingMessageLen =
+ length - BgpConstants.BGP_HEADER_MARKER_LENGTH - 2;
+ if (buf.readableBytes() < remainingMessageLen) {
+ // No enough data received
+ buf.resetReaderIndex();
+ return null;
+ }
+
+ //
+ // Read the BGP message Type field, and process based on that type
+ //
+ int type = buf.readUnsignedByte();
+ remainingMessageLen--; // Adjust after reading the type
+ ChannelBuffer message = buf.readBytes(remainingMessageLen);
+
+ //
+ // Process the remaining of the message based on the message type
+ //
+ switch (type) {
+ case BgpConstants.BGP_TYPE_OPEN:
+ bgpSession.processBgpOpen(ctx, message);
+ break;
+ case BgpConstants.BGP_TYPE_UPDATE:
+ bgpSession.processBgpUpdate(ctx, message);
+ break;
+ case BgpConstants.BGP_TYPE_NOTIFICATION:
+ bgpSession.processBgpNotification(ctx, message);
+ break;
+ case BgpConstants.BGP_TYPE_KEEPALIVE:
+ bgpSession.processBgpKeepalive(ctx, message);
+ break;
+ default:
+ //
+ // ERROR: Bad Message Type
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = MessageHeaderError.ERROR_CODE;
+ int errorSubcode = MessageHeaderError.BAD_MESSAGE_TYPE;
+ ChannelBuffer data = ChannelBuffers.buffer(1);
+ data.writeByte(type);
+ ChannelBuffer txMessage =
+ bgpSession.prepareBgpNotification(errorCode, errorSubcode,
+ data);
+ ctx.getChannel().write(txMessage);
+ bgpSession.closeChannel(ctx);
+ return null;
+ }
+ return null;
+ }
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpRouteEntry.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpRouteEntry.java
new file mode 100644
index 0000000..890328a
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpRouteEntry.java
@@ -0,0 +1,432 @@
+package org.onlab.onos.sdnip.bgp;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.ArrayList;
+import java.util.Objects;
+
+import org.onlab.onos.sdnip.RouteEntry;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Represents a route in BGP.
+ */
+public class BgpRouteEntry extends RouteEntry {
+ private final BgpSession bgpSession; // The BGP Session the route was
+ // received on
+ private final byte origin; // Route ORIGIN: IGP, EGP, INCOMPLETE
+ private final AsPath asPath; // The AS Path
+ private final long localPref; // The local preference for the route
+ private long multiExitDisc =
+ BgpConstants.Update.MultiExitDisc.LOWEST_MULTI_EXIT_DISC;
+
+ /**
+ * Class constructor.
+ *
+ * @param bgpSession the BGP Session the route was received on
+ * @param prefix the prefix of the route
+ * @param nextHop the next hop of the route
+ * @param origin the route origin: 0=IGP, 1=EGP, 2=INCOMPLETE
+ * @param asPath the AS path
+ * @param localPref the route local preference
+ */
+ public BgpRouteEntry(BgpSession bgpSession, IpPrefix prefix,
+ IpAddress nextHop, byte origin,
+ BgpRouteEntry.AsPath asPath, long localPref) {
+ super(prefix, nextHop);
+ this.bgpSession = checkNotNull(bgpSession);
+ this.origin = origin;
+ this.asPath = checkNotNull(asPath);
+ this.localPref = localPref;
+ }
+
+ /**
+ * Gets the BGP Session the route was received on.
+ *
+ * @return the BGP Session the route was received on
+ */
+ public BgpSession getBgpSession() {
+ return bgpSession;
+ }
+
+ /**
+ * Gets the route origin: 0=IGP, 1=EGP, 2=INCOMPLETE.
+ *
+ * @return the route origin: 0=IGP, 1=EGP, 2=INCOMPLETE
+ */
+ public byte getOrigin() {
+ return origin;
+ }
+
+ /**
+ * Gets the route AS path.
+ *
+ * @return the route AS path
+ */
+ public BgpRouteEntry.AsPath getAsPath() {
+ return asPath;
+ }
+
+ /**
+ * Gets the route local preference.
+ *
+ * @return the route local preference
+ */
+ public long getLocalPref() {
+ return localPref;
+ }
+
+ /**
+ * Gets the route MED (Multi-Exit Discriminator).
+ *
+ * @return the route MED (Multi-Exit Discriminator)
+ */
+ public long getMultiExitDisc() {
+ return multiExitDisc;
+ }
+
+ /**
+ * Sets the route MED (Multi-Exit Discriminator).
+ *
+ * @param multiExitDisc the route MED (Multi-Exit Discriminator) to set
+ */
+ void setMultiExitDisc(long multiExitDisc) {
+ this.multiExitDisc = multiExitDisc;
+ }
+
+ /**
+ * Tests whether the route is originated from the local AS.
+ * <p/>
+ * The route is considered originated from the local AS if the AS Path
+ * is empty or if it begins with an AS_SET.
+ *
+ * @return true if the route is originated from the local AS, otherwise
+ * false
+ */
+ boolean isLocalRoute() {
+ if (asPath.getPathSegments().isEmpty()) {
+ return true;
+ }
+ PathSegment firstPathSegment = asPath.getPathSegments().get(0);
+ if (firstPathSegment.getType() == BgpConstants.Update.AsPath.AS_SET) {
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Gets the BGP Neighbor AS number the route was received from.
+ * <p/>
+ * If the router is originated from the local AS, the return value is
+ * zero (BGP_AS_0).
+ *
+ * @return the BGP Neighbor AS number the route was received from.
+ */
+ long getNeighborAs() {
+ if (isLocalRoute()) {
+ return BgpConstants.BGP_AS_0;
+ }
+ PathSegment firstPathSegment = asPath.getPathSegments().get(0);
+ if (firstPathSegment.getSegmentAsNumbers().isEmpty()) {
+ // TODO: Shouldn't happen. Should check during the parsing.
+ return BgpConstants.BGP_AS_0;
+ }
+ return firstPathSegment.getSegmentAsNumbers().get(0);
+ }
+
+ /**
+ * Tests whether the AS Path contains a loop.
+ * <p/>
+ * The test is done by comparing whether the AS Path contains the
+ * local AS number.
+ *
+ * @param localAsNumber the local AS number to compare against
+ * @return true if the AS Path contains a loop, otherwise false
+ */
+ boolean hasAsPathLoop(long localAsNumber) {
+ for (PathSegment pathSegment : asPath.getPathSegments()) {
+ for (Long asNumber : pathSegment.getSegmentAsNumbers()) {
+ if (asNumber.equals(localAsNumber)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Compares this BGP route against another BGP route by using the
+ * BGP Decision Process.
+ * <p/>
+ * NOTE: The comparison needs to be performed only on routes that have
+ * same IP Prefix.
+ *
+ * @param other the BGP route to compare against
+ * @return true if this BGP route is better than the other BGP route
+ * or same, otherwise false
+ */
+ boolean isBetterThan(BgpRouteEntry other) {
+ if (this == other) {
+ return true; // Return true if same route
+ }
+
+ // Compare the LOCAL_PREF values: larger is better
+ if (getLocalPref() != other.getLocalPref()) {
+ return (getLocalPref() > other.getLocalPref());
+ }
+
+ // Compare the AS number in the path: smaller is better
+ if (getAsPath().getAsPathLength() !=
+ other.getAsPath().getAsPathLength()) {
+ return getAsPath().getAsPathLength() <
+ other.getAsPath().getAsPathLength();
+ }
+
+ // Compare the Origin number: lower is better
+ if (getOrigin() != other.getOrigin()) {
+ return (getOrigin() < other.getOrigin());
+ }
+
+ // Compare the MED if the neighbor AS is same: larger is better
+ medLabel: {
+ boolean thisIsLocalRoute = isLocalRoute();
+ if (thisIsLocalRoute != other.isLocalRoute()) {
+ break medLabel; // AS number is different
+ }
+ if (!thisIsLocalRoute) {
+ long thisNeighborAs = getNeighborAs();
+ if (thisNeighborAs != other.getNeighborAs()) {
+ break medLabel; // AS number is different
+ }
+ if (thisNeighborAs == BgpConstants.BGP_AS_0) {
+ break medLabel; // Invalid AS number
+ }
+ }
+
+ // Compare the MED
+ if (getMultiExitDisc() != other.getMultiExitDisc()) {
+ return (getMultiExitDisc() > other.getMultiExitDisc());
+ }
+ }
+
+ // Compare the peer BGP ID: lower is better
+ IpAddress peerBgpId = getBgpSession().getRemoteBgpId();
+ IpAddress otherPeerBgpId = other.getBgpSession().getRemoteBgpId();
+ if (!peerBgpId.equals(otherPeerBgpId)) {
+ return (peerBgpId.compareTo(otherPeerBgpId) < 0);
+ }
+
+ // Compare the peer BGP address: lower is better
+ IpAddress peerAddress = getBgpSession().getRemoteIp4Address();
+ IpAddress otherPeerAddress =
+ other.getBgpSession().getRemoteIp4Address();
+ if (!peerAddress.equals(otherPeerAddress)) {
+ return (peerAddress.compareTo(otherPeerAddress) < 0);
+ }
+
+ return true; // Routes are same. Shouldn't happen?
+ }
+
+ /**
+ * A class to represent AS Path Segment.
+ */
+ public static class PathSegment {
+ private final byte type; // Segment type: AS_SET, AS_SEQUENCE
+ private final ArrayList<Long> segmentAsNumbers; // Segment AS numbers
+
+ /**
+ * Constructor.
+ *
+ * @param type the Path Segment Type: 1=AS_SET, 2=AS_SEQUENCE
+ * @param segmentAsNumbers the Segment AS numbers
+ */
+ PathSegment(byte type, ArrayList<Long> segmentAsNumbers) {
+ this.type = type;
+ this.segmentAsNumbers = checkNotNull(segmentAsNumbers);
+ }
+
+ /**
+ * Gets the Path Segment Type: AS_SET, AS_SEQUENCE.
+ *
+ * @return the Path Segment Type: AS_SET, AS_SEQUENCE
+ */
+ public byte getType() {
+ return type;
+ }
+
+ /**
+ * Gets the Path Segment AS Numbers.
+ *
+ * @return the Path Segment AS Numbers
+ */
+ public ArrayList<Long> getSegmentAsNumbers() {
+ return segmentAsNumbers;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+
+ if (!(other instanceof PathSegment)) {
+ return false;
+ }
+
+ PathSegment otherPathSegment = (PathSegment) other;
+ return Objects.equals(this.type, otherPathSegment.type) &&
+ Objects.equals(this.segmentAsNumbers,
+ otherPathSegment.segmentAsNumbers);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(type, segmentAsNumbers);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("type", this.type)
+ .add("segmentAsNumbers", this.segmentAsNumbers)
+ .toString();
+ }
+ }
+
+ /**
+ * A class to represent AS Path.
+ */
+ public static class AsPath {
+ private final ArrayList<PathSegment> pathSegments;
+ private final int asPathLength; // Precomputed AS Path Length
+
+ /**
+ * Constructor.
+ *
+ * @param pathSegments the Path Segments of the Path
+ */
+ AsPath(ArrayList<PathSegment> pathSegments) {
+ this.pathSegments = checkNotNull(pathSegments);
+
+ //
+ // Precompute the AS Path Length:
+ // - AS_SET counts as 1
+ //
+ int pl = 0;
+ for (PathSegment pathSegment : pathSegments) {
+ if (pathSegment.getType() ==
+ BgpConstants.Update.AsPath.AS_SET) {
+ pl++;
+ continue;
+ }
+ pl += pathSegment.getSegmentAsNumbers().size();
+ }
+ asPathLength = pl;
+ }
+
+ /**
+ * Gets the AS Path Segments.
+ *
+ * @return the AS Path Segments
+ */
+ public ArrayList<PathSegment> getPathSegments() {
+ return pathSegments;
+ }
+
+ /**
+ * Gets the AS Path Length as considered by the BGP Decision Process.
+ *
+ * @return the AS Path Length as considered by the BGP Decision Process
+ */
+ int getAsPathLength() {
+ return asPathLength;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+
+ if (!(other instanceof AsPath)) {
+ return false;
+ }
+
+ AsPath otherAsPath = (AsPath) other;
+ return Objects.equals(this.pathSegments, otherAsPath.pathSegments);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(pathSegments);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("pathSegments", this.pathSegments)
+ .toString();
+ }
+ }
+
+ /**
+ * Compares whether two objects are equal.
+ * <p/>
+ * NOTE: The bgpSession field is excluded from the comparison.
+ *
+ * @return true if the two objects are equal, otherwise false.
+ */
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+
+ //
+ // NOTE: Subclasses are considered as change of identity, hence
+ // equals() will return false if the class type doesn't match.
+ //
+ if (other == null || getClass() != other.getClass()) {
+ return false;
+ }
+
+ if (!super.equals(other)) {
+ return false;
+ }
+
+ // NOTE: The bgpSession field is excluded from the comparison
+ BgpRouteEntry otherRoute = (BgpRouteEntry) other;
+ return (this.origin == otherRoute.origin) &&
+ Objects.equals(this.asPath, otherRoute.asPath) &&
+ (this.localPref == otherRoute.localPref) &&
+ (this.multiExitDisc == otherRoute.multiExitDisc);
+ }
+
+ /**
+ * Computes the hash code.
+ * <p/>
+ * NOTE: We return the base class hash code to avoid expensive computation
+ *
+ * @return the object hash code
+ */
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("prefix", prefix())
+ .add("nextHop", nextHop())
+ .add("bgpId", bgpSession.getRemoteBgpId())
+ .add("origin", origin)
+ .add("asPath", asPath)
+ .add("localPref", localPref)
+ .add("multiExitDisc", multiExitDisc)
+ .toString();
+ }
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpSession.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpSession.java
new file mode 100644
index 0000000..0b39bb7
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpSession.java
@@ -0,0 +1,1840 @@
+package org.onlab.onos.sdnip.bgp;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ChannelStateEvent;
+import org.jboss.netty.channel.SimpleChannelHandler;
+import org.jboss.netty.util.HashedWheelTimer;
+import org.jboss.netty.util.Timeout;
+import org.jboss.netty.util.Timer;
+import org.jboss.netty.util.TimerTask;
+import org.onlab.onos.sdnip.bgp.BgpConstants.Notifications;
+import org.onlab.onos.sdnip.bgp.BgpConstants.Notifications.HoldTimerExpired;
+import org.onlab.onos.sdnip.bgp.BgpConstants.Notifications.MessageHeaderError;
+import org.onlab.onos.sdnip.bgp.BgpConstants.Notifications.OpenMessageError;
+import org.onlab.onos.sdnip.bgp.BgpConstants.Notifications.UpdateMessageError;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Class for handling the BGP peer sessions.
+ * There is one instance per each BGP peer session.
+ */
+public class BgpSession extends SimpleChannelHandler {
+ private static final Logger log =
+ LoggerFactory.getLogger(BgpSession.class);
+
+ private final BgpSessionManager bgpSessionManager;
+
+ // Local flag to indicate the session is closed.
+ // It is used to avoid the Netty's asynchronous closing of a channel.
+ private boolean isClosed = false;
+
+ private SocketAddress remoteAddress; // Peer IP addr/port
+ private IpAddress remoteIp4Address; // Peer IPv4 address
+ private int remoteBgpVersion; // 1 octet
+ private long remoteAs; // 2 octets
+ private long remoteHoldtime; // 2 octets
+ private IpAddress remoteBgpId; // 4 octets -> IPv4 address
+ //
+ private SocketAddress localAddress; // Local IP addr/port
+ private IpAddress localIp4Address; // Local IPv4 address
+ private int localBgpVersion; // 1 octet
+ private long localAs; // 2 octets
+ private long localHoldtime; // 2 octets
+ private IpAddress localBgpId; // 4 octets -> IPv4 address
+ //
+ private long localKeepaliveInterval; // Keepalive interval
+
+ // Timers state
+ private Timer timer = new HashedWheelTimer();
+ private volatile Timeout keepaliveTimeout; // Periodic KEEPALIVE
+ private volatile Timeout sessionTimeout; // Session timeout
+
+ // BGP RIB-IN routing entries from this peer
+ private ConcurrentMap<IpPrefix, BgpRouteEntry> bgpRibIn =
+ new ConcurrentHashMap<>();
+
+ /**
+ * Constructor for a given BGP Session Manager.
+ *
+ * @param bgpSessionManager the BGP Session Manager to use
+ */
+ BgpSession(BgpSessionManager bgpSessionManager) {
+ this.bgpSessionManager = bgpSessionManager;
+ }
+
+ /**
+ * Gets the BGP RIB-IN routing entries.
+ *
+ * @return the BGP RIB-IN routing entries
+ */
+ public Collection<BgpRouteEntry> getBgpRibIn() {
+ return bgpRibIn.values();
+ }
+
+ /**
+ * Finds a BGP routing entry in the BGP RIB-IN.
+ *
+ * @param prefix the prefix of the route to search for
+ * @return the BGP routing entry if found, otherwise null
+ */
+ public BgpRouteEntry findBgpRouteEntry(IpPrefix prefix) {
+ return bgpRibIn.get(prefix);
+ }
+
+ /**
+ * Gets the BGP session remote address.
+ *
+ * @return the BGP session remote address
+ */
+ public SocketAddress getRemoteAddress() {
+ return remoteAddress;
+ }
+
+ /**
+ * Gets the BGP session remote IPv4 address.
+ *
+ * @return the BGP session remote IPv4 address
+ */
+ public IpAddress getRemoteIp4Address() {
+ return remoteIp4Address;
+ }
+
+ /**
+ * Gets the BGP session remote BGP version.
+ *
+ * @return the BGP session remote BGP version
+ */
+ public int getRemoteBgpVersion() {
+ return remoteBgpVersion;
+ }
+
+ /**
+ * Gets the BGP session remote AS number.
+ *
+ * @return the BGP session remote AS number
+ */
+ public long getRemoteAs() {
+ return remoteAs;
+ }
+
+ /**
+ * Gets the BGP session remote Holdtime.
+ *
+ * @return the BGP session remote Holdtime
+ */
+ public long getRemoteHoldtime() {
+ return remoteHoldtime;
+ }
+
+ /**
+ * Gets the BGP session remote BGP Identifier as an IPv4 address.
+ *
+ * @return the BGP session remote BGP Identifier as an IPv4 address
+ */
+ public IpAddress getRemoteBgpId() {
+ return remoteBgpId;
+ }
+
+ /**
+ * Gets the BGP session local address.
+ *
+ * @return the BGP session local address
+ */
+ public SocketAddress getLocalAddress() {
+ return localAddress;
+ }
+
+ /**
+ * Gets the BGP session local BGP version.
+ *
+ * @return the BGP session local BGP version
+ */
+ public int getLocalBgpVersion() {
+ return localBgpVersion;
+ }
+
+ /**
+ * Gets the BGP session local AS number.
+ *
+ * @return the BGP session local AS number
+ */
+ public long getLocalAs() {
+ return localAs;
+ }
+
+ /**
+ * Gets the BGP session local Holdtime.
+ *
+ * @return the BGP session local Holdtime
+ */
+ public long getLocalHoldtime() {
+ return localHoldtime;
+ }
+
+ /**
+ * Gets the BGP session local BGP Identifier as an IPv4 address.
+ *
+ * @return the BGP session local BGP Identifier as an IPv4 address
+ */
+ public IpAddress getLocalBgpId() {
+ return localBgpId;
+ }
+
+ /**
+ * Tests whether the session is closed.
+ * <p/>
+ * NOTE: We use this method to avoid the Netty's asynchronous closing
+ * of a channel.
+ *
+ * @param return true if the session is closed
+ */
+ boolean isClosed() {
+ return isClosed;
+ }
+
+ /**
+ * Closes the channel.
+ *
+ * @param ctx the Channel Handler Context
+ */
+ void closeChannel(ChannelHandlerContext ctx) {
+ isClosed = true;
+ timer.stop();
+ ctx.getChannel().close();
+ }
+
+ @Override
+ public void channelConnected(ChannelHandlerContext ctx,
+ ChannelStateEvent channelEvent) {
+ localAddress = ctx.getChannel().getLocalAddress();
+ remoteAddress = ctx.getChannel().getRemoteAddress();
+
+ // Assign the local and remote IPv4 addresses
+ InetAddress inetAddr;
+ if (localAddress instanceof InetSocketAddress) {
+ inetAddr = ((InetSocketAddress) localAddress).getAddress();
+ localIp4Address = IpAddress.valueOf(inetAddr.getAddress());
+ }
+ if (remoteAddress instanceof InetSocketAddress) {
+ inetAddr = ((InetSocketAddress) remoteAddress).getAddress();
+ remoteIp4Address = IpAddress.valueOf(inetAddr.getAddress());
+ }
+
+ log.debug("BGP Session Connected from {} on {}",
+ remoteAddress, localAddress);
+ if (!bgpSessionManager.peerConnected(this)) {
+ log.debug("Cannot setup BGP Session Connection from {}. Closing...",
+ remoteAddress);
+ ctx.getChannel().close();
+ }
+ }
+
+ @Override
+ public void channelDisconnected(ChannelHandlerContext ctx,
+ ChannelStateEvent channelEvent) {
+ log.debug("BGP Session Disconnected from {} on {}",
+ ctx.getChannel().getRemoteAddress(),
+ ctx.getChannel().getLocalAddress());
+
+ //
+ // Withdraw the routes advertised by this BGP peer
+ //
+ // NOTE: We must initialize the RIB-IN before propagating the withdraws
+ // for further processing. Otherwise, the BGP Decision Process
+ // will use those routes again.
+ //
+ Collection<BgpRouteEntry> deletedRoutes = bgpRibIn.values();
+ bgpRibIn = new ConcurrentHashMap<>();
+
+ // Push the updates to the BGP Merged RIB
+ BgpSessionManager.BgpRouteSelector bgpRouteSelector =
+ bgpSessionManager.getBgpRouteSelector();
+ Collection<BgpRouteEntry> addedRoutes = Collections.emptyList();
+ bgpRouteSelector.routeUpdates(this, addedRoutes, deletedRoutes);
+
+ bgpSessionManager.peerDisconnected(this);
+ }
+
+ /**
+ * Processes BGP OPEN message.
+ *
+ * @param ctx the Channel Handler Context
+ * @param message the message to process
+ */
+ void processBgpOpen(ChannelHandlerContext ctx, ChannelBuffer message) {
+ int minLength =
+ BgpConstants.BGP_OPEN_MIN_LENGTH - BgpConstants.BGP_HEADER_LENGTH;
+ if (message.readableBytes() < minLength) {
+ log.debug("BGP RX OPEN Error from {}: " +
+ "Message length {} too short. Must be at least {}",
+ remoteAddress, message.readableBytes(), minLength);
+ //
+ // ERROR: Bad Message Length
+ //
+ // Send NOTIFICATION and close the connection
+ ChannelBuffer txMessage = prepareBgpNotificationBadMessageLength(
+ message.readableBytes() + BgpConstants.BGP_HEADER_LENGTH);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ return;
+ }
+
+ //
+ // Parse the OPEN message
+ //
+ // Remote BGP version
+ remoteBgpVersion = message.readUnsignedByte();
+ if (remoteBgpVersion != BgpConstants.BGP_VERSION) {
+ log.debug("BGP RX OPEN Error from {}: " +
+ "Unsupported BGP version {}. Should be {}",
+ remoteAddress, remoteBgpVersion,
+ BgpConstants.BGP_VERSION);
+ //
+ // ERROR: Unsupported Version Number
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = OpenMessageError.ERROR_CODE;
+ int errorSubcode = OpenMessageError.UNSUPPORTED_VERSION_NUMBER;
+ ChannelBuffer data = ChannelBuffers.buffer(2);
+ data.writeShort(BgpConstants.BGP_VERSION);
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, data);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ return;
+ }
+
+ // Remote AS number
+ remoteAs = message.readUnsignedShort();
+ //
+ // Verify that the AS number is same for all other BGP Sessions
+ // NOTE: This check applies only for our use-case where all BGP
+ // sessions are iBGP.
+ //
+ for (BgpSession bgpSession : bgpSessionManager.getBgpSessions()) {
+ if (remoteAs != bgpSession.getRemoteAs()) {
+ log.debug("BGP RX OPEN Error from {}: Bad Peer AS {}. " +
+ "Expected {}",
+ remoteAddress, remoteAs, bgpSession.getRemoteAs());
+ //
+ // ERROR: Bad Peer AS
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = OpenMessageError.ERROR_CODE;
+ int errorSubcode = OpenMessageError.BAD_PEER_AS;
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, null);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ return;
+ }
+ }
+
+ // Remote Hold Time
+ remoteHoldtime = message.readUnsignedShort();
+ if ((remoteHoldtime != 0) &&
+ (remoteHoldtime < BgpConstants.BGP_KEEPALIVE_MIN_HOLDTIME)) {
+ log.debug("BGP RX OPEN Error from {}: " +
+ "Unacceptable Hold Time field {}. " +
+ "Should be 0 or at least {}",
+ remoteAddress, remoteHoldtime,
+ BgpConstants.BGP_KEEPALIVE_MIN_HOLDTIME);
+ //
+ // ERROR: Unacceptable Hold Time
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = OpenMessageError.ERROR_CODE;
+ int errorSubcode = OpenMessageError.UNACCEPTABLE_HOLD_TIME;
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, null);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ return;
+ }
+
+ // Remote BGP Identifier
+ remoteBgpId = IpAddress.valueOf((int) message.readUnsignedInt());
+
+ // Optional Parameters
+ int optParamLen = message.readUnsignedByte();
+ if (message.readableBytes() < optParamLen) {
+ log.debug("BGP RX OPEN Error from {}: " +
+ "Invalid Optional Parameter Length field {}. " +
+ "Remaining Optional Parameters {}",
+ remoteAddress, optParamLen, message.readableBytes());
+ //
+ // ERROR: Invalid Optional Parameter Length field: Unspecific
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = OpenMessageError.ERROR_CODE;
+ int errorSubcode = Notifications.ERROR_SUBCODE_UNSPECIFIC;
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, null);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ return;
+ }
+ // TODO: Parse the optional parameters (if needed)
+ message.readBytes(optParamLen); // NOTE: data ignored
+
+ //
+ // Copy some of the remote peer's state/setup to the local setup:
+ // - BGP version
+ // - AS number (NOTE: the peer setup is always iBGP)
+ // - Holdtime
+ // Also, assign the local BGP ID based on the local setup
+ //
+ localBgpVersion = remoteBgpVersion;
+ localAs = remoteAs;
+ localHoldtime = remoteHoldtime;
+ localBgpId = bgpSessionManager.getMyBgpId();
+
+ // Set the Keepalive interval
+ if (localHoldtime == 0) {
+ localKeepaliveInterval = 0;
+ } else {
+ localKeepaliveInterval = Math.max(localHoldtime /
+ BgpConstants.BGP_KEEPALIVE_PER_HOLD_INTERVAL,
+ BgpConstants.BGP_KEEPALIVE_MIN_INTERVAL);
+ }
+
+ log.debug("BGP RX OPEN message from {}: " +
+ "BGPv{} AS {} BGP-ID {} Holdtime {}",
+ remoteAddress, remoteBgpVersion, remoteAs,
+ remoteBgpId, remoteHoldtime);
+
+ // Send my OPEN followed by KEEPALIVE
+ ChannelBuffer txMessage = prepareBgpOpen();
+ ctx.getChannel().write(txMessage);
+ //
+ txMessage = prepareBgpKeepalive();
+ ctx.getChannel().write(txMessage);
+
+ // Start the KEEPALIVE timer
+ restartKeepaliveTimer(ctx);
+
+ // Start the Session Timeout timer
+ restartSessionTimeoutTimer(ctx);
+ }
+
+ /**
+ * Processes BGP UPDATE message.
+ *
+ * @param ctx the Channel Handler Context
+ * @param message the message to process
+ */
+ void processBgpUpdate(ChannelHandlerContext ctx, ChannelBuffer message) {
+ Collection<BgpRouteEntry> addedRoutes = null;
+ Map<IpPrefix, BgpRouteEntry> deletedRoutes = new HashMap<>();
+
+ int minLength =
+ BgpConstants.BGP_UPDATE_MIN_LENGTH - BgpConstants.BGP_HEADER_LENGTH;
+ if (message.readableBytes() < minLength) {
+ log.debug("BGP RX UPDATE Error from {}: " +
+ "Message length {} too short. Must be at least {}",
+ remoteAddress, message.readableBytes(), minLength);
+ //
+ // ERROR: Bad Message Length
+ //
+ // Send NOTIFICATION and close the connection
+ ChannelBuffer txMessage = prepareBgpNotificationBadMessageLength(
+ message.readableBytes() + BgpConstants.BGP_HEADER_LENGTH);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ return;
+ }
+
+ log.debug("BGP RX UPDATE message from {}", remoteAddress);
+
+ //
+ // Parse the UPDATE message
+ //
+
+ //
+ // Parse the Withdrawn Routes
+ //
+ int withdrawnRoutesLength = message.readUnsignedShort();
+ if (withdrawnRoutesLength > message.readableBytes()) {
+ // ERROR: Malformed Attribute List
+ actionsBgpUpdateMalformedAttributeList(ctx);
+ return;
+ }
+ Collection<IpPrefix> withdrawnPrefixes = null;
+ try {
+ withdrawnPrefixes = parsePackedPrefixes(withdrawnRoutesLength,
+ message);
+ } catch (BgpParseException e) {
+ // ERROR: Invalid Network Field
+ log.debug("Exception parsing Withdrawn Prefixes from BGP peer {}: ",
+ remoteBgpId, e);
+ actionsBgpUpdateInvalidNetworkField(ctx);
+ return;
+ }
+ for (IpPrefix prefix : withdrawnPrefixes) {
+ log.debug("BGP RX UPDATE message WITHDRAWN from {}: {}",
+ remoteAddress, prefix);
+ BgpRouteEntry bgpRouteEntry = bgpRibIn.get(prefix);
+ if (bgpRouteEntry != null) {
+ deletedRoutes.put(prefix, bgpRouteEntry);
+ }
+ }
+
+ //
+ // Parse the Path Attributes
+ //
+ try {
+ addedRoutes = parsePathAttributes(ctx, message);
+ } catch (BgpParseException e) {
+ log.debug("Exception parsing Path Attributes from BGP peer {}: ",
+ remoteBgpId, e);
+ // NOTE: The session was already closed, so nothing else to do
+ return;
+ }
+ // Ignore WITHDRAWN routes that are ADDED
+ for (BgpRouteEntry bgpRouteEntry : addedRoutes) {
+ deletedRoutes.remove(bgpRouteEntry.prefix());
+ }
+
+ // Update the BGP RIB-IN
+ for (BgpRouteEntry bgpRouteEntry : deletedRoutes.values()) {
+ bgpRibIn.remove(bgpRouteEntry.prefix());
+ }
+ for (BgpRouteEntry bgpRouteEntry : addedRoutes) {
+ bgpRibIn.put(bgpRouteEntry.prefix(), bgpRouteEntry);
+ }
+
+ // Push the updates to the BGP Merged RIB
+ BgpSessionManager.BgpRouteSelector bgpRouteSelector =
+ bgpSessionManager.getBgpRouteSelector();
+ bgpRouteSelector.routeUpdates(this, addedRoutes,
+ deletedRoutes.values());
+
+ // Start the Session Timeout timer
+ restartSessionTimeoutTimer(ctx);
+ }
+
+ /**
+ * Parse BGP Path Attributes from the BGP UPDATE message.
+ *
+ * @param ctx the Channel Handler Context
+ * @param message the message to parse
+ * @return a collection of the result BGP Route Entries
+ * @throws BgpParseException
+ */
+ private Collection<BgpRouteEntry> parsePathAttributes(
+ ChannelHandlerContext ctx,
+ ChannelBuffer message)
+ throws BgpParseException {
+ Map<IpPrefix, BgpRouteEntry> addedRoutes = new HashMap<>();
+
+ //
+ // Parsed values
+ //
+ Short origin = -1; // Mandatory
+ BgpRouteEntry.AsPath asPath = null; // Mandatory
+ IpAddress nextHop = null; // Mandatory
+ long multiExitDisc = // Optional
+ BgpConstants.Update.MultiExitDisc.LOWEST_MULTI_EXIT_DISC;
+ Long localPref = null; // Mandatory
+ Long aggregatorAsNumber = null; // Optional: unused
+ IpAddress aggregatorIpAddress = null; // Optional: unused
+
+ //
+ // Get and verify the Path Attributes Length
+ //
+ int pathAttributeLength = message.readUnsignedShort();
+ if (pathAttributeLength > message.readableBytes()) {
+ // ERROR: Malformed Attribute List
+ actionsBgpUpdateMalformedAttributeList(ctx);
+ String errorMsg = "Malformed Attribute List";
+ throw new BgpParseException(errorMsg);
+ }
+ if (pathAttributeLength == 0) {
+ return addedRoutes.values();
+ }
+
+ //
+ // Parse the Path Attributes
+ //
+ int pathAttributeEnd = message.readerIndex() + pathAttributeLength;
+ while (message.readerIndex() < pathAttributeEnd) {
+ int attrFlags = message.readUnsignedByte();
+ if (message.readerIndex() >= pathAttributeEnd) {
+ // ERROR: Malformed Attribute List
+ actionsBgpUpdateMalformedAttributeList(ctx);
+ String errorMsg = "Malformed Attribute List";
+ throw new BgpParseException(errorMsg);
+ }
+ int attrTypeCode = message.readUnsignedByte();
+
+ // The Attribute Flags
+ boolean optionalBit = ((0x80 & attrFlags) != 0);
+ boolean transitiveBit = ((0x40 & attrFlags) != 0);
+ boolean partialBit = ((0x20 & attrFlags) != 0);
+ boolean extendedLengthBit = ((0x10 & attrFlags) != 0);
+
+ // The Attribute Length
+ int attrLen = 0;
+ int attrLenOctets = 1;
+ if (extendedLengthBit) {
+ attrLenOctets = 2;
+ }
+ if (message.readerIndex() + attrLenOctets > pathAttributeEnd) {
+ // ERROR: Malformed Attribute List
+ actionsBgpUpdateMalformedAttributeList(ctx);
+ String errorMsg = "Malformed Attribute List";
+ throw new BgpParseException(errorMsg);
+ }
+ if (extendedLengthBit) {
+ attrLen = message.readUnsignedShort();
+ } else {
+ attrLen = message.readUnsignedByte();
+ }
+ if (message.readerIndex() + attrLen > pathAttributeEnd) {
+ // ERROR: Malformed Attribute List
+ actionsBgpUpdateMalformedAttributeList(ctx);
+ String errorMsg = "Malformed Attribute List";
+ throw new BgpParseException(errorMsg);
+ }
+
+ //
+ // Verify the Attribute Flags
+ //
+ verifyBgpUpdateAttributeFlags(ctx, attrTypeCode, attrLen,
+ attrFlags, message);
+
+ //
+ // Extract the Attribute Value based on the Attribute Type Code
+ //
+ switch (attrTypeCode) {
+
+ case BgpConstants.Update.Origin.TYPE:
+ // Attribute Type Code ORIGIN
+ origin = parseAttributeTypeOrigin(ctx, attrTypeCode, attrLen,
+ attrFlags, message);
+ break;
+
+ case BgpConstants.Update.AsPath.TYPE:
+ // Attribute Type Code AS_PATH
+ asPath = parseAttributeTypeAsPath(ctx, attrTypeCode, attrLen,
+ attrFlags, message);
+ break;
+
+ case BgpConstants.Update.NextHop.TYPE:
+ // Attribute Type Code NEXT_HOP
+ nextHop = parseAttributeTypeNextHop(ctx, attrTypeCode, attrLen,
+ attrFlags, message);
+ break;
+
+ case BgpConstants.Update.MultiExitDisc.TYPE:
+ // Attribute Type Code MULTI_EXIT_DISC
+ multiExitDisc =
+ parseAttributeTypeMultiExitDisc(ctx, attrTypeCode, attrLen,
+ attrFlags, message);
+ break;
+
+ case BgpConstants.Update.LocalPref.TYPE:
+ // Attribute Type Code LOCAL_PREF
+ localPref =
+ parseAttributeTypeLocalPref(ctx, attrTypeCode, attrLen,
+ attrFlags, message);
+ break;
+
+ case BgpConstants.Update.AtomicAggregate.TYPE:
+ // Attribute Type Code ATOMIC_AGGREGATE
+ parseAttributeTypeAtomicAggregate(ctx, attrTypeCode, attrLen,
+ attrFlags, message);
+ // Nothing to do: this attribute is primarily informational
+ break;
+
+ case BgpConstants.Update.Aggregator.TYPE:
+ // Attribute Type Code AGGREGATOR
+ Pair<Long, IpAddress> aggregator =
+ parseAttributeTypeAggregator(ctx, attrTypeCode, attrLen,
+ attrFlags, message);
+ aggregatorAsNumber = aggregator.getLeft();
+ aggregatorIpAddress = aggregator.getRight();
+ break;
+
+ default:
+ // TODO: Parse any new Attribute Types if needed
+ if (!optionalBit) {
+ // ERROR: Unrecognized Well-known Attribute
+ actionsBgpUpdateUnrecognizedWellKnownAttribute(
+ ctx, attrTypeCode, attrLen, attrFlags, message);
+ String errorMsg = "Unrecognized Well-known Attribute: " +
+ attrTypeCode;
+ throw new BgpParseException(errorMsg);
+ }
+
+ // Skip the data from the unrecognized attribute
+ log.debug("BGP RX UPDATE message from {}: " +
+ "Unrecognized Attribute Type {}",
+ remoteAddress, attrTypeCode);
+ message.skipBytes(attrLen);
+ break;
+ }
+ }
+
+ //
+ // Verify the Well-known Attributes
+ //
+ verifyBgpUpdateWellKnownAttributes(ctx, origin, asPath, nextHop,
+ localPref);
+
+ //
+ // Parse the NLRI (Network Layer Reachability Information)
+ //
+ Collection<IpPrefix> addedPrefixes = null;
+ int nlriLength = message.readableBytes();
+ try {
+ addedPrefixes = parsePackedPrefixes(nlriLength, message);
+ } catch (BgpParseException e) {
+ // ERROR: Invalid Network Field
+ log.debug("Exception parsing NLRI from BGP peer {}: ",
+ remoteBgpId, e);
+ actionsBgpUpdateInvalidNetworkField(ctx);
+ // Rethrow the exception
+ throw e;
+ }
+
+ // Generate the added routes
+ for (IpPrefix prefix : addedPrefixes) {
+ BgpRouteEntry bgpRouteEntry =
+ new BgpRouteEntry(this, prefix, nextHop,
+ origin.byteValue(), asPath, localPref);
+ bgpRouteEntry.setMultiExitDisc(multiExitDisc);
+ if (bgpRouteEntry.hasAsPathLoop(localAs)) {
+ log.debug("BGP RX UPDATE message IGNORED from {}: {} " +
+ "nextHop {}: contains AS Path loop",
+ remoteAddress, prefix, nextHop);
+ continue;
+ } else {
+ log.debug("BGP RX UPDATE message ADDED from {}: {} nextHop {}",
+ remoteAddress, prefix, nextHop);
+ }
+ addedRoutes.put(prefix, bgpRouteEntry);
+ }
+
+ return addedRoutes.values();
+ }
+
+ /**
+ * Verifies BGP UPDATE Well-known Attributes.
+ *
+ * @param ctx the Channel Handler Context
+ * @param origin the ORIGIN well-known mandatory attribute
+ * @param asPath the AS_PATH well-known mandatory attribute
+ * @param nextHop the NEXT_HOP well-known mandatory attribute
+ * @param localPref the LOCAL_PREF required attribute
+ * @throws BgpParseException
+ */
+ private void verifyBgpUpdateWellKnownAttributes(
+ ChannelHandlerContext ctx,
+ Short origin,
+ BgpRouteEntry.AsPath asPath,
+ IpAddress nextHop,
+ Long localPref)
+ throws BgpParseException {
+ //
+ // Check for Missing Well-known Attributes
+ //
+ if ((origin == null) || (origin == -1)) {
+ // Missing Attribute Type Code ORIGIN
+ int type = BgpConstants.Update.Origin.TYPE;
+ actionsBgpUpdateMissingWellKnownAttribute(ctx, type);
+ String errorMsg = "Missing Well-known Attribute: ORIGIN";
+ throw new BgpParseException(errorMsg);
+ }
+ if (asPath == null) {
+ // Missing Attribute Type Code AS_PATH
+ int type = BgpConstants.Update.AsPath.TYPE;
+ actionsBgpUpdateMissingWellKnownAttribute(ctx, type);
+ String errorMsg = "Missing Well-known Attribute: AS_PATH";
+ throw new BgpParseException(errorMsg);
+ }
+ if (nextHop == null) {
+ // Missing Attribute Type Code NEXT_HOP
+ int type = BgpConstants.Update.NextHop.TYPE;
+ actionsBgpUpdateMissingWellKnownAttribute(ctx, type);
+ String errorMsg = "Missing Well-known Attribute: NEXT_HOP";
+ throw new BgpParseException(errorMsg);
+ }
+ if (localPref == null) {
+ // Missing Attribute Type Code LOCAL_PREF
+ // NOTE: Required for iBGP
+ int type = BgpConstants.Update.LocalPref.TYPE;
+ actionsBgpUpdateMissingWellKnownAttribute(ctx, type);
+ String errorMsg = "Missing Well-known Attribute: LOCAL_PREF";
+ throw new BgpParseException(errorMsg);
+ }
+ }
+
+ /**
+ * Verifies the BGP UPDATE Attribute Flags.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message to parse
+ * @throws BgpParseException
+ */
+ private void verifyBgpUpdateAttributeFlags(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message)
+ throws BgpParseException {
+
+ //
+ // Assign the Attribute Type Name and the Well-known flag
+ //
+ String typeName = "UNKNOWN";
+ boolean isWellKnown = false;
+ switch (attrTypeCode) {
+ case BgpConstants.Update.Origin.TYPE:
+ isWellKnown = true;
+ typeName = "ORIGIN";
+ break;
+ case BgpConstants.Update.AsPath.TYPE:
+ isWellKnown = true;
+ typeName = "AS_PATH";
+ break;
+ case BgpConstants.Update.NextHop.TYPE:
+ isWellKnown = true;
+ typeName = "NEXT_HOP";
+ break;
+ case BgpConstants.Update.MultiExitDisc.TYPE:
+ isWellKnown = false;
+ typeName = "MULTI_EXIT_DISC";
+ break;
+ case BgpConstants.Update.LocalPref.TYPE:
+ isWellKnown = true;
+ typeName = "LOCAL_PREF";
+ break;
+ case BgpConstants.Update.AtomicAggregate.TYPE:
+ isWellKnown = true;
+ typeName = "ATOMIC_AGGREGATE";
+ break;
+ case BgpConstants.Update.Aggregator.TYPE:
+ isWellKnown = false;
+ typeName = "AGGREGATOR";
+ break;
+ default:
+ isWellKnown = false;
+ typeName = "UNKNOWN(" + attrTypeCode + ")";
+ break;
+ }
+
+ //
+ // Verify the Attribute Flags
+ //
+ boolean optionalBit = ((0x80 & attrFlags) != 0);
+ boolean transitiveBit = ((0x40 & attrFlags) != 0);
+ boolean partialBit = ((0x20 & attrFlags) != 0);
+ if ((isWellKnown && optionalBit) ||
+ (isWellKnown && (!transitiveBit)) ||
+ (isWellKnown && partialBit) ||
+ (optionalBit && (!transitiveBit) && partialBit)) {
+ //
+ // ERROR: The Optional bit cannot be set for Well-known attributes
+ // ERROR: The Transtive bit MUST be 1 for well-known attributes
+ // ERROR: The Partial bit MUST be 0 for well-known attributes
+ // ERROR: The Partial bit MUST be 0 for optional non-transitive
+ // attributes
+ //
+ actionsBgpUpdateAttributeFlagsError(
+ ctx, attrTypeCode, attrLen, attrFlags, message);
+ String errorMsg = "Attribute Flags Error for " + typeName + ": " +
+ attrFlags;
+ throw new BgpParseException(errorMsg);
+ }
+ }
+
+ /**
+ * Parses BGP UPDATE Attribute Type ORIGIN.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message to parse
+ * @return the parsed ORIGIN value
+ * @throws BgpParseException
+ */
+ private short parseAttributeTypeOrigin(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message)
+ throws BgpParseException {
+
+ // Check the Attribute Length
+ if (attrLen != BgpConstants.Update.Origin.LENGTH) {
+ // ERROR: Attribute Length Error
+ actionsBgpUpdateAttributeLengthError(
+ ctx, attrTypeCode, attrLen, attrFlags, message);
+ String errorMsg = "Attribute Length Error";
+ throw new BgpParseException(errorMsg);
+ }
+
+ message.markReaderIndex();
+ short origin = message.readUnsignedByte();
+ switch (origin) {
+ case BgpConstants.Update.Origin.IGP:
+ // FALLTHROUGH
+ case BgpConstants.Update.Origin.EGP:
+ // FALLTHROUGH
+ case BgpConstants.Update.Origin.INCOMPLETE:
+ break;
+ default:
+ // ERROR: Invalid ORIGIN Attribute
+ message.resetReaderIndex();
+ actionsBgpUpdateInvalidOriginAttribute(
+ ctx, attrTypeCode, attrLen, attrFlags, message, origin);
+ String errorMsg = "Invalid ORIGIN Attribute: " + origin;
+ throw new BgpParseException(errorMsg);
+ }
+
+ return origin;
+ }
+
+ /**
+ * Parses BGP UPDATE Attribute AS Path.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message to parse
+ * @return the parsed AS Path
+ * @throws BgpParseException
+ */
+ private BgpRouteEntry.AsPath parseAttributeTypeAsPath(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message)
+ throws BgpParseException {
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+
+ //
+ // Parse the message
+ //
+ while (attrLen > 0) {
+ if (attrLen < 2) {
+ // ERROR: Malformed AS_PATH
+ actionsBgpUpdateMalformedAsPath(ctx);
+ String errorMsg = "Malformed AS Path";
+ throw new BgpParseException(errorMsg);
+ }
+ // Get the Path Segment Type and Length (in number of ASes)
+ short pathSegmentType = message.readUnsignedByte();
+ short pathSegmentLength = message.readUnsignedByte();
+ attrLen -= 2;
+
+ // Verify the Path Segment Type
+ switch (pathSegmentType) {
+ case BgpConstants.Update.AsPath.AS_SET:
+ // FALLTHROUGH
+ case BgpConstants.Update.AsPath.AS_SEQUENCE:
+ break;
+ default:
+ // ERROR: Invalid Path Segment Type
+ //
+ // NOTE: The BGP Spec (RFC 4271) doesn't contain Error Subcode
+ // for "Invalid Path Segment Type", hence we return
+ // the error as "Malformed AS_PATH".
+ //
+ actionsBgpUpdateMalformedAsPath(ctx);
+ String errorMsg =
+ "Invalid AS Path Segment Type: " + pathSegmentType;
+ throw new BgpParseException(errorMsg);
+ }
+
+ // Parse the AS numbers
+ if (2 * pathSegmentLength > attrLen) {
+ // ERROR: Malformed AS_PATH
+ actionsBgpUpdateMalformedAsPath(ctx);
+ String errorMsg = "Malformed AS Path";
+ throw new BgpParseException(errorMsg);
+ }
+ attrLen -= (2 * pathSegmentLength);
+ ArrayList<Long> segmentAsNumbers = new ArrayList<>();
+ while (pathSegmentLength-- > 0) {
+ long asNumber = message.readUnsignedShort();
+ segmentAsNumbers.add(asNumber);
+ }
+
+ BgpRouteEntry.PathSegment pathSegment =
+ new BgpRouteEntry.PathSegment((byte) pathSegmentType,
+ segmentAsNumbers);
+ pathSegments.add(pathSegment);
+ }
+
+ return new BgpRouteEntry.AsPath(pathSegments);
+ }
+
+ /**
+ * Parses BGP UPDATE Attribute Type NEXT_HOP.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message to parse
+ * @return the parsed NEXT_HOP value
+ * @throws BgpParseException
+ */
+ private IpAddress parseAttributeTypeNextHop(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message)
+ throws BgpParseException {
+
+ // Check the Attribute Length
+ if (attrLen != BgpConstants.Update.NextHop.LENGTH) {
+ // ERROR: Attribute Length Error
+ actionsBgpUpdateAttributeLengthError(
+ ctx, attrTypeCode, attrLen, attrFlags, message);
+ String errorMsg = "Attribute Length Error";
+ throw new BgpParseException(errorMsg);
+ }
+
+ message.markReaderIndex();
+ long address = message.readUnsignedInt();
+ IpAddress nextHopAddress = IpAddress.valueOf((int) address);
+ //
+ // Check whether the NEXT_HOP IP address is semantically correct.
+ // As per RFC 4271, Section 6.3:
+ //
+ // a) It MUST NOT be the IP address of the receiving speaker
+ // b) In the case of an EBGP ....
+ //
+ // Here we check only (a), because (b) doesn't apply for us: all our
+ // peers are iBGP.
+ //
+ if (nextHopAddress.equals(localIp4Address)) {
+ // ERROR: Invalid NEXT_HOP Attribute
+ message.resetReaderIndex();
+ actionsBgpUpdateInvalidNextHopAttribute(
+ ctx, attrTypeCode, attrLen, attrFlags, message,
+ nextHopAddress);
+ String errorMsg = "Invalid NEXT_HOP Attribute: " + nextHopAddress;
+ throw new BgpParseException(errorMsg);
+ }
+
+ return nextHopAddress;
+ }
+
+ /**
+ * Parses BGP UPDATE Attribute Type MULTI_EXIT_DISC.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message to parse
+ * @return the parsed MULTI_EXIT_DISC value
+ * @throws BgpParseException
+ */
+ private long parseAttributeTypeMultiExitDisc(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message)
+ throws BgpParseException {
+
+ // Check the Attribute Length
+ if (attrLen != BgpConstants.Update.MultiExitDisc.LENGTH) {
+ // ERROR: Attribute Length Error
+ actionsBgpUpdateAttributeLengthError(
+ ctx, attrTypeCode, attrLen, attrFlags, message);
+ String errorMsg = "Attribute Length Error";
+ throw new BgpParseException(errorMsg);
+ }
+
+ long multiExitDisc = message.readUnsignedInt();
+ return multiExitDisc;
+ }
+
+ /**
+ * Parses BGP UPDATE Attribute Type LOCAL_PREF.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message to parse
+ * @return the parsed LOCAL_PREF value
+ * @throws BgpParseException
+ */
+ private long parseAttributeTypeLocalPref(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message)
+ throws BgpParseException {
+
+ // Check the Attribute Length
+ if (attrLen != BgpConstants.Update.LocalPref.LENGTH) {
+ // ERROR: Attribute Length Error
+ actionsBgpUpdateAttributeLengthError(
+ ctx, attrTypeCode, attrLen, attrFlags, message);
+ String errorMsg = "Attribute Length Error";
+ throw new BgpParseException(errorMsg);
+ }
+
+ long localPref = message.readUnsignedInt();
+ return localPref;
+ }
+
+ /**
+ * Parses BGP UPDATE Attribute Type ATOMIC_AGGREGATE.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message to parse
+ * @throws BgpParseException
+ */
+ private void parseAttributeTypeAtomicAggregate(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message)
+ throws BgpParseException {
+
+ // Check the Attribute Length
+ if (attrLen != BgpConstants.Update.AtomicAggregate.LENGTH) {
+ // ERROR: Attribute Length Error
+ actionsBgpUpdateAttributeLengthError(
+ ctx, attrTypeCode, attrLen, attrFlags, message);
+ String errorMsg = "Attribute Length Error";
+ throw new BgpParseException(errorMsg);
+ }
+
+ // Nothing to do: this attribute is primarily informational
+ }
+
+ /**
+ * Parses BGP UPDATE Attribute Type AGGREGATOR.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message to parse
+ * @return the parsed AGGREGATOR value: a tuple of <AS-Number, IP-Address>
+ * @throws BgpParseException
+ */
+ private Pair<Long, IpAddress> parseAttributeTypeAggregator(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message)
+ throws BgpParseException {
+
+ // Check the Attribute Length
+ if (attrLen != BgpConstants.Update.Aggregator.LENGTH) {
+ // ERROR: Attribute Length Error
+ actionsBgpUpdateAttributeLengthError(
+ ctx, attrTypeCode, attrLen, attrFlags, message);
+ String errorMsg = "Attribute Length Error";
+ throw new BgpParseException(errorMsg);
+ }
+
+ // The AGGREGATOR AS number
+ long aggregatorAsNumber = message.readUnsignedShort();
+ // The AGGREGATOR IP address
+ long aggregatorAddress = message.readUnsignedInt();
+ IpAddress aggregatorIpAddress =
+ IpAddress.valueOf((int) aggregatorAddress);
+
+ Pair<Long, IpAddress> aggregator = Pair.of(aggregatorAsNumber,
+ aggregatorIpAddress);
+ return aggregator;
+ }
+
+ /**
+ * Parses a message that contains encoded IPv4 network prefixes.
+ * <p>
+ * The IPv4 prefixes are encoded in the form:
+ * <Length, Prefix> where Length is the length in bits of the IPv4 prefix,
+ * and Prefix is the IPv4 prefix (padded with trailing bits to the end
+ * of an octet).
+ *
+ * @param totalLength the total length of the data to parse
+ * @param message the message with data to parse
+ * @return a collection of parsed IPv4 network prefixes
+ * @throws BgpParseException
+ */
+ private Collection<IpPrefix> parsePackedPrefixes(int totalLength,
+ ChannelBuffer message)
+ throws BgpParseException {
+ Collection<IpPrefix> result = new ArrayList<>();
+
+ if (totalLength == 0) {
+ return result;
+ }
+
+ // Parse the data
+ int dataEnd = message.readerIndex() + totalLength;
+ while (message.readerIndex() < dataEnd) {
+ int prefixBitlen = message.readUnsignedByte();
+ int prefixBytelen = (prefixBitlen + 7) / 8; // Round-up
+ if (message.readerIndex() + prefixBytelen > dataEnd) {
+ String errorMsg = "Malformed Network Prefixes";
+ throw new BgpParseException(errorMsg);
+ }
+
+ long address = 0;
+ long extraShift = (4 - prefixBytelen) * 8;
+ while (prefixBytelen > 0) {
+ address <<= 8;
+ address |= message.readUnsignedByte();
+ prefixBytelen--;
+ }
+ address <<= extraShift;
+ IpPrefix prefix =
+ IpPrefix.valueOf(IpAddress.valueOf((int) address).toInt(),
+ (short) prefixBitlen);
+ result.add(prefix);
+ }
+
+ return result;
+ }
+
+ /**
+ * Applies the appropriate actions after detecting BGP UPDATE
+ * Invalid Network Field Error: send NOTIFICATION and close the channel.
+ *
+ * @param ctx the Channel Handler Context
+ */
+ private void actionsBgpUpdateInvalidNetworkField(
+ ChannelHandlerContext ctx) {
+ log.debug("BGP RX UPDATE Error from {}: Invalid Network Field",
+ remoteAddress);
+
+ //
+ // ERROR: Invalid Network Field
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = UpdateMessageError.ERROR_CODE;
+ int errorSubcode = UpdateMessageError.INVALID_NETWORK_FIELD;
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, null);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+
+ /**
+ * Applies the appropriate actions after detecting BGP UPDATE
+ * Malformed Attribute List Error: send NOTIFICATION and close the channel.
+ *
+ * @param ctx the Channel Handler Context
+ */
+ private void actionsBgpUpdateMalformedAttributeList(
+ ChannelHandlerContext ctx) {
+ log.debug("BGP RX UPDATE Error from {}: Malformed Attribute List",
+ remoteAddress);
+
+ //
+ // ERROR: Malformed Attribute List
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = UpdateMessageError.ERROR_CODE;
+ int errorSubcode = UpdateMessageError.MALFORMED_ATTRIBUTE_LIST;
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, null);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+
+ /**
+ * Applies the appropriate actions after detecting BGP UPDATE
+ * Missing Well-known Attribute Error: send NOTIFICATION and close the
+ * channel.
+ *
+ * @param ctx the Channel Handler Context
+ * @param missingAttrTypeCode the missing attribute type code
+ */
+ private void actionsBgpUpdateMissingWellKnownAttribute(
+ ChannelHandlerContext ctx,
+ int missingAttrTypeCode) {
+ log.debug("BGP RX UPDATE Error from {}: Missing Well-known Attribute: {}",
+ remoteAddress, missingAttrTypeCode);
+
+ //
+ // ERROR: Missing Well-known Attribute
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = UpdateMessageError.ERROR_CODE;
+ int errorSubcode = UpdateMessageError.MISSING_WELL_KNOWN_ATTRIBUTE;
+ ChannelBuffer data = ChannelBuffers.buffer(1);
+ data.writeByte(missingAttrTypeCode);
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, data);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+
+ /**
+ * Applies the appropriate actions after detecting BGP UPDATE
+ * Invalid ORIGIN Attribute Error: send NOTIFICATION and close the channel.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message with the data
+ * @param origin the ORIGIN attribute value
+ */
+ private void actionsBgpUpdateInvalidOriginAttribute(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message,
+ short origin) {
+ log.debug("BGP RX UPDATE Error from {}: Invalid ORIGIN Attribute",
+ remoteAddress);
+
+ //
+ // ERROR: Invalid ORIGIN Attribute
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = UpdateMessageError.ERROR_CODE;
+ int errorSubcode = UpdateMessageError.INVALID_ORIGIN_ATTRIBUTE;
+ ChannelBuffer data =
+ prepareBgpUpdateNotificationDataPayload(attrTypeCode, attrLen,
+ attrFlags, message);
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, data);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+
+ /**
+ * Applies the appropriate actions after detecting BGP UPDATE
+ * Attribute Flags Error: send NOTIFICATION and close the channel.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message with the data
+ */
+ private void actionsBgpUpdateAttributeFlagsError(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message) {
+ log.debug("BGP RX UPDATE Error from {}: Attribute Flags Error",
+ remoteAddress);
+
+ //
+ // ERROR: Attribute Flags Error
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = UpdateMessageError.ERROR_CODE;
+ int errorSubcode = UpdateMessageError.ATTRIBUTE_FLAGS_ERROR;
+ ChannelBuffer data =
+ prepareBgpUpdateNotificationDataPayload(attrTypeCode, attrLen,
+ attrFlags, message);
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, data);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+
+ /**
+ * Applies the appropriate actions after detecting BGP UPDATE
+ * Invalid NEXT_HOP Attribute Error: send NOTIFICATION and close the
+ * channel.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message with the data
+ * @param nextHop the NEXT_HOP attribute value
+ */
+ private void actionsBgpUpdateInvalidNextHopAttribute(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message,
+ IpAddress nextHop) {
+ log.debug("BGP RX UPDATE Error from {}: Invalid NEXT_HOP Attribute {}",
+ remoteAddress, nextHop);
+
+ //
+ // ERROR: Invalid ORIGIN Attribute
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = UpdateMessageError.ERROR_CODE;
+ int errorSubcode = UpdateMessageError.INVALID_NEXT_HOP_ATTRIBUTE;
+ ChannelBuffer data =
+ prepareBgpUpdateNotificationDataPayload(attrTypeCode, attrLen,
+ attrFlags, message);
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, data);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+
+ /**
+ * Applies the appropriate actions after detecting BGP UPDATE
+ * Unrecognized Well-known Attribute Error: send NOTIFICATION and close
+ * the channel.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message with the data
+ */
+ private void actionsBgpUpdateUnrecognizedWellKnownAttribute(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message) {
+ log.debug("BGP RX UPDATE Error from {}: " +
+ "Unrecognized Well-known Attribute Error: {}",
+ remoteAddress, attrTypeCode);
+
+ //
+ // ERROR: Unrecognized Well-known Attribute
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = UpdateMessageError.ERROR_CODE;
+ int errorSubcode =
+ UpdateMessageError.UNRECOGNIZED_WELL_KNOWN_ATTRIBUTE;
+ ChannelBuffer data =
+ prepareBgpUpdateNotificationDataPayload(attrTypeCode, attrLen,
+ attrFlags, message);
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, data);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+
+ /**
+ * Applies the appropriate actions after detecting BGP UPDATE
+ * Attribute Length Error: send NOTIFICATION and close the channel.
+ *
+ * @param ctx the Channel Handler Context
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message with the data
+ */
+ private void actionsBgpUpdateAttributeLengthError(
+ ChannelHandlerContext ctx,
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message) {
+ log.debug("BGP RX UPDATE Error from {}: Attribute Length Error",
+ remoteAddress);
+
+ //
+ // ERROR: Attribute Length Error
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = UpdateMessageError.ERROR_CODE;
+ int errorSubcode = UpdateMessageError.ATTRIBUTE_LENGTH_ERROR;
+ ChannelBuffer data =
+ prepareBgpUpdateNotificationDataPayload(attrTypeCode, attrLen,
+ attrFlags, message);
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, data);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+
+ /**
+ * Applies the appropriate actions after detecting BGP UPDATE
+ * Malformed AS_PATH Error: send NOTIFICATION and close the channel.
+ *
+ * @param ctx the Channel Handler Context
+ */
+ private void actionsBgpUpdateMalformedAsPath(
+ ChannelHandlerContext ctx) {
+ log.debug("BGP RX UPDATE Error from {}: Malformed AS Path",
+ remoteAddress);
+
+ //
+ // ERROR: Malformed AS_PATH
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = UpdateMessageError.ERROR_CODE;
+ int errorSubcode = UpdateMessageError.MALFORMED_AS_PATH;
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, null);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+
+ /**
+ * Processes BGP NOTIFICATION message.
+ *
+ * @param ctx the Channel Handler Context
+ * @param message the message to process
+ */
+ void processBgpNotification(ChannelHandlerContext ctx,
+ ChannelBuffer message) {
+ int minLength =
+ BgpConstants.BGP_NOTIFICATION_MIN_LENGTH - BgpConstants.BGP_HEADER_LENGTH;
+ if (message.readableBytes() < minLength) {
+ log.debug("BGP RX NOTIFICATION Error from {}: " +
+ "Message length {} too short. Must be at least {}",
+ remoteAddress, message.readableBytes(), minLength);
+ //
+ // ERROR: Bad Message Length
+ //
+ // NOTE: We do NOT send NOTIFICATION in response to a notification
+ return;
+ }
+
+ //
+ // Parse the NOTIFICATION message
+ //
+ int errorCode = message.readUnsignedByte();
+ int errorSubcode = message.readUnsignedByte();
+ int dataLength = message.readableBytes();
+
+ log.debug("BGP RX NOTIFICATION message from {}: Error Code {} " +
+ "Error Subcode {} Data Length {}",
+ remoteAddress, errorCode, errorSubcode, dataLength);
+
+ //
+ // NOTE: If the peer sent a NOTIFICATION, we leave it to the peer to
+ // close the connection.
+ //
+
+ // Start the Session Timeout timer
+ restartSessionTimeoutTimer(ctx);
+ }
+
+ /**
+ * Processes BGP KEEPALIVE message.
+ *
+ * @param ctx the Channel Handler Context
+ * @param message the message to process
+ */
+ void processBgpKeepalive(ChannelHandlerContext ctx,
+ ChannelBuffer message) {
+ if (message.readableBytes() + BgpConstants.BGP_HEADER_LENGTH !=
+ BgpConstants.BGP_KEEPALIVE_EXPECTED_LENGTH) {
+ log.debug("BGP RX KEEPALIVE Error from {}: " +
+ "Invalid total message length {}. Expected {}",
+ remoteAddress,
+ message.readableBytes() + BgpConstants.BGP_HEADER_LENGTH,
+ BgpConstants.BGP_KEEPALIVE_EXPECTED_LENGTH);
+ //
+ // ERROR: Bad Message Length
+ //
+ // Send NOTIFICATION and close the connection
+ ChannelBuffer txMessage = prepareBgpNotificationBadMessageLength(
+ message.readableBytes() + BgpConstants.BGP_HEADER_LENGTH);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ return;
+ }
+
+ //
+ // Parse the KEEPALIVE message: nothing to do
+ //
+ log.debug("BGP RX KEEPALIVE message from {}", remoteAddress);
+
+ // Start the Session Timeout timer
+ restartSessionTimeoutTimer(ctx);
+ }
+
+ /**
+ * Prepares BGP OPEN message.
+ *
+ * @return the message to transmit (BGP header included)
+ */
+ private ChannelBuffer prepareBgpOpen() {
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+
+ //
+ // Prepare the OPEN message payload
+ //
+ message.writeByte(localBgpVersion);
+ message.writeShort((int) localAs);
+ message.writeShort((int) localHoldtime);
+ message.writeInt(bgpSessionManager.getMyBgpId().toInt());
+ message.writeByte(0); // No Optional Parameters
+ return prepareBgpMessage(BgpConstants.BGP_TYPE_OPEN, message);
+ }
+
+ /**
+ * Prepares BGP KEEPALIVE message.
+ *
+ * @return the message to transmit (BGP header included)
+ */
+ private ChannelBuffer prepareBgpKeepalive() {
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+
+ //
+ // Prepare the KEEPALIVE message payload: nothing to do
+ //
+ return prepareBgpMessage(BgpConstants.BGP_TYPE_KEEPALIVE, message);
+ }
+
+ /**
+ * Prepares BGP NOTIFICATION message.
+ *
+ * @param errorCode the BGP NOTIFICATION Error Code
+ * @param errorSubcode the BGP NOTIFICATION Error Subcode if applicable,
+ * otherwise BgpConstants.Notifications.ERROR_SUBCODE_UNSPECIFIC
+ * @param payload the BGP NOTIFICATION Data if applicable, otherwise null
+ * @return the message to transmit (BGP header included)
+ */
+ ChannelBuffer prepareBgpNotification(int errorCode, int errorSubcode,
+ ChannelBuffer data) {
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+
+ //
+ // Prepare the NOTIFICATION message payload
+ //
+ message.writeByte(errorCode);
+ message.writeByte(errorSubcode);
+ if (data != null) {
+ message.writeBytes(data);
+ }
+ return prepareBgpMessage(BgpConstants.BGP_TYPE_NOTIFICATION, message);
+ }
+
+ /**
+ * Prepares BGP NOTIFICATION message: Bad Message Length.
+ *
+ * @param length the erroneous Length field
+ * @return the message to transmit (BGP header included)
+ */
+ ChannelBuffer prepareBgpNotificationBadMessageLength(int length) {
+ int errorCode = MessageHeaderError.ERROR_CODE;
+ int errorSubcode = MessageHeaderError.BAD_MESSAGE_LENGTH;
+ ChannelBuffer data = ChannelBuffers.buffer(2);
+ data.writeShort(length);
+
+ return prepareBgpNotification(errorCode, errorSubcode, data);
+ }
+
+ /**
+ * Prepares BGP UPDATE Notification data payload.
+ *
+ * @param attrTypeCode the attribute type code
+ * @param attrLen the attribute length (in octets)
+ * @param attrFlags the attribute flags
+ * @param message the message with the data
+ * @return the buffer with the data payload for the BGP UPDATE Notification
+ */
+ private ChannelBuffer prepareBgpUpdateNotificationDataPayload(
+ int attrTypeCode,
+ int attrLen,
+ int attrFlags,
+ ChannelBuffer message) {
+ // Compute the attribute length field octets
+ boolean extendedLengthBit = ((0x10 & attrFlags) != 0);
+ int attrLenOctets = 1;
+ if (extendedLengthBit) {
+ attrLenOctets = 2;
+ }
+ ChannelBuffer data =
+ ChannelBuffers.buffer(attrLen + attrLenOctets + 1);
+ data.writeByte(attrTypeCode);
+ if (extendedLengthBit) {
+ data.writeShort(attrLen);
+ } else {
+ data.writeByte(attrLen);
+ }
+ data.writeBytes(message, attrLen);
+ return data;
+ }
+
+ /**
+ * Prepares BGP message.
+ *
+ * @param type the BGP message type
+ * @param payload the message payload to transmit (BGP header excluded)
+ * @return the message to transmit (BGP header included)
+ */
+ private ChannelBuffer prepareBgpMessage(int type, ChannelBuffer payload) {
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_HEADER_LENGTH +
+ payload.readableBytes());
+
+ // Write the marker
+ for (int i = 0; i < BgpConstants.BGP_HEADER_MARKER_LENGTH; i++) {
+ message.writeByte(0xff);
+ }
+
+ // Write the rest of the BGP header
+ message.writeShort(BgpConstants.BGP_HEADER_LENGTH +
+ payload.readableBytes());
+ message.writeByte(type);
+
+ // Write the payload
+ message.writeBytes(payload);
+ return message;
+ }
+
+ /**
+ * Restarts the BGP KeepaliveTimer.
+ */
+ private void restartKeepaliveTimer(ChannelHandlerContext ctx) {
+ if (localKeepaliveInterval == 0) {
+ return; // Nothing to do
+ }
+ keepaliveTimeout = timer.newTimeout(new TransmitKeepaliveTask(ctx),
+ localKeepaliveInterval,
+ TimeUnit.SECONDS);
+ }
+
+ /**
+ * Task class for transmitting KEEPALIVE messages.
+ */
+ private final class TransmitKeepaliveTask implements TimerTask {
+ private final ChannelHandlerContext ctx;
+
+ /**
+ * Constructor for given Channel Handler Context.
+ *
+ * @param ctx the Channel Handler Context to use
+ */
+ TransmitKeepaliveTask(ChannelHandlerContext ctx) {
+ this.ctx = ctx;
+ }
+
+ @Override
+ public void run(Timeout timeout) throws Exception {
+ if (timeout.isCancelled()) {
+ return;
+ }
+ if (!ctx.getChannel().isOpen()) {
+ return;
+ }
+
+ // Transmit the KEEPALIVE
+ ChannelBuffer txMessage = prepareBgpKeepalive();
+ ctx.getChannel().write(txMessage);
+
+ // Restart the KEEPALIVE timer
+ restartKeepaliveTimer(ctx);
+ }
+ }
+
+ /**
+ * Restarts the BGP Session Timeout Timer.
+ */
+ private void restartSessionTimeoutTimer(ChannelHandlerContext ctx) {
+ if (remoteHoldtime == 0) {
+ return; // Nothing to do
+ }
+ if (sessionTimeout != null) {
+ sessionTimeout.cancel();
+ }
+ sessionTimeout = timer.newTimeout(new SessionTimeoutTask(ctx),
+ remoteHoldtime,
+ TimeUnit.SECONDS);
+ }
+
+ /**
+ * Task class for BGP Session timeout.
+ */
+ private final class SessionTimeoutTask implements TimerTask {
+ private final ChannelHandlerContext ctx;
+
+ /**
+ * Constructor for given Channel Handler Context.
+ *
+ * @param ctx the Channel Handler Context to use
+ */
+ SessionTimeoutTask(ChannelHandlerContext ctx) {
+ this.ctx = ctx;
+ }
+
+ @Override
+ public void run(Timeout timeout) throws Exception {
+ if (timeout.isCancelled()) {
+ return;
+ }
+ if (!ctx.getChannel().isOpen()) {
+ return;
+ }
+
+ log.debug("BGP Session Timeout: peer {}", remoteAddress);
+ //
+ // ERROR: Invalid Optional Parameter Length field: Unspecific
+ //
+ // Send NOTIFICATION and close the connection
+ int errorCode = HoldTimerExpired.ERROR_CODE;
+ int errorSubcode = Notifications.ERROR_SUBCODE_UNSPECIFIC;
+ ChannelBuffer txMessage =
+ prepareBgpNotification(errorCode, errorSubcode, null);
+ ctx.getChannel().write(txMessage);
+ closeChannel(ctx);
+ }
+ }
+
+ /**
+ * An exception indicating a parsing error of the BGP message.
+ */
+ private static class BgpParseException extends Exception {
+ /**
+ * Default constructor.
+ */
+ public BgpParseException() {
+ super();
+ }
+
+ /**
+ * Constructor for a specific exception details message.
+ *
+ * @param message the message with the exception details
+ */
+ public BgpParseException(String message) {
+ super(message);
+ }
+ }
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpSessionManager.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpSessionManager.java
new file mode 100644
index 0000000..097c002
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/BgpSessionManager.java
@@ -0,0 +1,355 @@
+package org.onlab.onos.sdnip.bgp;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.util.Collection;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Executors;
+
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelException;
+import org.jboss.netty.channel.ChannelFactory;
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.onlab.onos.sdnip.RouteListener;
+import org.onlab.onos.sdnip.RouteUpdate;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * BGP Session Manager class.
+ */
+public class BgpSessionManager {
+ private static final Logger log =
+ LoggerFactory.getLogger(BgpSessionManager.class);
+ private Channel serverChannel; // Listener for incoming BGP connections
+ private ConcurrentMap<SocketAddress, BgpSession> bgpSessions =
+ new ConcurrentHashMap<>();
+ private IpAddress myBgpId; // Same BGP ID for all peers
+
+ private BgpRouteSelector bgpRouteSelector = new BgpRouteSelector();
+ private ConcurrentMap<IpPrefix, BgpRouteEntry> bgpRoutes =
+ new ConcurrentHashMap<>();
+
+ private final RouteListener routeListener;
+
+ /**
+ * Constructor for given route listener.
+ *
+ * @param routeListener the route listener to use
+ */
+ public BgpSessionManager(RouteListener routeListener) {
+ this.routeListener = checkNotNull(routeListener);
+ }
+
+ /**
+ * Gets the BGP sessions.
+ *
+ * @return the BGP sessions
+ */
+ public Collection<BgpSession> getBgpSessions() {
+ return bgpSessions.values();
+ }
+
+ /**
+ * Gets the BGP routes.
+ *
+ * @return the BGP routes
+ */
+ public Collection<BgpRouteEntry> getBgpRoutes() {
+ return bgpRoutes.values();
+ }
+
+ /**
+ * Processes the connection from a BGP peer.
+ *
+ * @param bgpSession the BGP session for the peer
+ * @return true if the connection can be established, otherwise false
+ */
+ boolean peerConnected(BgpSession bgpSession) {
+
+ // Test whether there is already a session from the same remote
+ if (bgpSessions.get(bgpSession.getRemoteAddress()) != null) {
+ return false; // Duplicate BGP session
+ }
+ bgpSessions.put(bgpSession.getRemoteAddress(), bgpSession);
+
+ //
+ // If the first connection, set my BGP ID to the local address
+ // of the socket.
+ //
+ if (bgpSession.getLocalAddress() instanceof InetSocketAddress) {
+ InetAddress inetAddr =
+ ((InetSocketAddress) bgpSession.getLocalAddress()).getAddress();
+ IpAddress ip4Address = IpAddress.valueOf(inetAddr.getAddress());
+ updateMyBgpId(ip4Address);
+ }
+ return true;
+ }
+
+ /**
+ * Processes the disconnection from a BGP peer.
+ *
+ * @param bgpSession the BGP session for the peer
+ */
+ void peerDisconnected(BgpSession bgpSession) {
+ bgpSessions.remove(bgpSession.getRemoteAddress());
+ }
+
+ /**
+ * Conditionally updates the local BGP ID if it wasn't set already.
+ * <p/>
+ * NOTE: A BGP instance should use same BGP ID across all BGP sessions.
+ *
+ * @param ip4Address the IPv4 address to use as BGP ID
+ */
+ private synchronized void updateMyBgpId(IpAddress ip4Address) {
+ if (myBgpId == null) {
+ myBgpId = ip4Address;
+ log.debug("BGP: My BGP ID is {}", myBgpId);
+ }
+ }
+
+ /**
+ * Gets the local BGP Identifier as an IPv4 address.
+ *
+ * @return the local BGP Identifier as an IPv4 address
+ */
+ IpAddress getMyBgpId() {
+ return myBgpId;
+ }
+
+ /**
+ * Gets the BGP Route Selector.
+ *
+ * @return the BGP Route Selector
+ */
+ BgpRouteSelector getBgpRouteSelector() {
+ return bgpRouteSelector;
+ }
+
+ /**
+ * Starts up BGP Session Manager operation.
+ *
+ * @param listenPortNumber the port number to listen on. By default
+ * it should be BgpConstants.BGP_PORT (179)
+ */
+ public void startUp(int listenPortNumber) {
+ log.debug("BGP Session Manager startUp()");
+
+ ChannelFactory channelFactory =
+ new NioServerSocketChannelFactory(Executors.newCachedThreadPool(),
+ Executors.newCachedThreadPool());
+ ChannelPipelineFactory pipelineFactory = new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ // Allocate a new session per connection
+ BgpSession bgpSessionHandler =
+ new BgpSession(BgpSessionManager.this);
+ BgpFrameDecoder bgpFrameDecoder =
+ new BgpFrameDecoder(bgpSessionHandler);
+
+ // Setup the processing pipeline
+ ChannelPipeline pipeline = Channels.pipeline();
+ pipeline.addLast("BgpFrameDecoder", bgpFrameDecoder);
+ pipeline.addLast("BgpSession", bgpSessionHandler);
+ return pipeline;
+ }
+ };
+ InetSocketAddress listenAddress =
+ new InetSocketAddress(listenPortNumber);
+
+ ServerBootstrap serverBootstrap = new ServerBootstrap(channelFactory);
+ // serverBootstrap.setOptions("reuseAddr", true);
+ serverBootstrap.setOption("child.keepAlive", true);
+ serverBootstrap.setOption("child.tcpNoDelay", true);
+ serverBootstrap.setPipelineFactory(pipelineFactory);
+ try {
+ serverChannel = serverBootstrap.bind(listenAddress);
+ } catch (ChannelException e) {
+ log.debug("Exception binding to BGP port {}: ",
+ listenAddress.getPort(), e);
+ }
+ }
+
+ /**
+ * Shuts down the BGP Session Manager operation.
+ */
+ public void shutDown() {
+ // TODO: Complete the implementation: remove routes, etc.
+ if (serverChannel != null) {
+ serverChannel.close();
+ }
+ }
+
+ /**
+ * Class to receive and process the BGP routes from each BGP Session/Peer.
+ */
+ class BgpRouteSelector {
+ /**
+ * Processes route entry updates: added/updated and deleted route
+ * entries.
+ *
+ * @param bgpSession the BGP session the route entry updates were
+ * received on
+ * @param addedBgpRouteEntries the added/updated route entries to
+ * process
+ * @param deletedBgpRouteEntries the deleted route entries to process
+ */
+ synchronized void routeUpdates(BgpSession bgpSession,
+ Collection<BgpRouteEntry> addedBgpRouteEntries,
+ Collection<BgpRouteEntry> deletedBgpRouteEntries) {
+ //
+ // TODO: Merge the updates from different BGP Peers,
+ // by choosing the best route.
+ //
+
+ // Process the deleted route entries
+ for (BgpRouteEntry bgpRouteEntry : deletedBgpRouteEntries) {
+ processDeletedRoute(bgpSession, bgpRouteEntry);
+ }
+
+ // Process the added/updated route entries
+ for (BgpRouteEntry bgpRouteEntry : addedBgpRouteEntries) {
+ processAddedRoute(bgpSession, bgpRouteEntry);
+ }
+ }
+
+ /**
+ * Processes an added/updated route entry.
+ *
+ * @param bgpSession the BGP session the route entry update was
+ * received on
+ * @param bgpRouteEntry the added/updated route entry
+ */
+ private void processAddedRoute(BgpSession bgpSession,
+ BgpRouteEntry bgpRouteEntry) {
+ RouteUpdate routeUpdate;
+ BgpRouteEntry bestBgpRouteEntry =
+ bgpRoutes.get(bgpRouteEntry.prefix());
+
+ //
+ // Install the new route entry if it is better than the
+ // current best route.
+ //
+ if ((bestBgpRouteEntry == null) ||
+ bgpRouteEntry.isBetterThan(bestBgpRouteEntry)) {
+ bgpRoutes.put(bgpRouteEntry.prefix(), bgpRouteEntry);
+ routeUpdate =
+ new RouteUpdate(RouteUpdate.Type.UPDATE, bgpRouteEntry);
+ // Forward the result route updates to the Route Listener
+ routeListener.update(routeUpdate);
+ return;
+ }
+
+ //
+ // If the route entry arrived on the same BGP Session as
+ // the current best route, then elect the next best route
+ // and install it.
+ //
+ if (bestBgpRouteEntry.getBgpSession() !=
+ bgpRouteEntry.getBgpSession()) {
+ return;
+ }
+
+ // Find the next best route
+ bestBgpRouteEntry = findBestBgpRoute(bgpRouteEntry.prefix());
+ if (bestBgpRouteEntry == null) {
+ //
+ // TODO: Shouldn't happen. Install the new route as a
+ // pre-caution.
+ //
+ log.debug("BGP next best route for prefix {} is missing. " +
+ "Adding the route that is currently processed.",
+ bgpRouteEntry.prefix());
+ bestBgpRouteEntry = bgpRouteEntry;
+ }
+ // Install the next best route
+ bgpRoutes.put(bestBgpRouteEntry.prefix(), bestBgpRouteEntry);
+ routeUpdate = new RouteUpdate(RouteUpdate.Type.UPDATE,
+ bestBgpRouteEntry);
+ // Forward the result route updates to the Route Listener
+ routeListener.update(routeUpdate);
+ }
+
+ /**
+ * Processes a deleted route entry.
+ *
+ * @param bgpSession the BGP session the route entry update was
+ * received on
+ * @param bgpRouteEntry the deleted route entry
+ */
+ private void processDeletedRoute(BgpSession bgpSession,
+ BgpRouteEntry bgpRouteEntry) {
+ RouteUpdate routeUpdate;
+ BgpRouteEntry bestBgpRouteEntry =
+ bgpRoutes.get(bgpRouteEntry.prefix());
+
+ //
+ // Remove the route entry only if it was the best one.
+ // Install the the next best route if it exists.
+ //
+ // NOTE: We intentionally use "==" instead of method equals(),
+ // because we need to check whether this is same object.
+ //
+ if (bgpRouteEntry != bestBgpRouteEntry) {
+ return; // Nothing to do
+ }
+
+ //
+ // Find the next best route
+ //
+ bestBgpRouteEntry = findBestBgpRoute(bgpRouteEntry.prefix());
+ if (bestBgpRouteEntry != null) {
+ // Install the next best route
+ bgpRoutes.put(bestBgpRouteEntry.prefix(),
+ bestBgpRouteEntry);
+ routeUpdate = new RouteUpdate(RouteUpdate.Type.UPDATE,
+ bestBgpRouteEntry);
+ // Forward the result route updates to the Route Listener
+ routeListener.update(routeUpdate);
+ return;
+ }
+
+ //
+ // No route found. Remove the route entry
+ //
+ bgpRoutes.remove(bgpRouteEntry.prefix());
+ routeUpdate = new RouteUpdate(RouteUpdate.Type.DELETE,
+ bgpRouteEntry);
+ // Forward the result route updates to the Route Listener
+ routeListener.update(routeUpdate);
+ }
+
+ /**
+ * Finds the best route entry among all BGP Sessions.
+ *
+ * @param prefix the prefix of the route
+ * @return the best route if found, otherwise null
+ */
+ private BgpRouteEntry findBestBgpRoute(IpPrefix prefix) {
+ BgpRouteEntry bestRoute = null;
+
+ // Iterate across all BGP Sessions and select the best route
+ for (BgpSession bgpSession : bgpSessions.values()) {
+ BgpRouteEntry route = bgpSession.findBgpRouteEntry(prefix);
+ if (route == null) {
+ continue;
+ }
+ if ((bestRoute == null) || route.isBetterThan(bestRoute)) {
+ bestRoute = route;
+ }
+ }
+ return bestRoute;
+ }
+ }
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/package-info.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/package-info.java
new file mode 100644
index 0000000..e39d7e0
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/bgp/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Implementation of the BGP protocol.
+ */
+package org.onlab.onos.sdnip.bgp;
\ No newline at end of file
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/cli/BgpRoutesListCommand.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/cli/BgpRoutesListCommand.java
new file mode 100644
index 0000000..63cc305
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/cli/BgpRoutesListCommand.java
@@ -0,0 +1,57 @@
+package org.onlab.onos.sdnip.cli;
+
+import org.apache.karaf.shell.commands.Command;
+import org.onlab.onos.cli.AbstractShellCommand;
+import org.onlab.onos.sdnip.SdnIpService;
+import org.onlab.onos.sdnip.bgp.BgpConstants;
+import org.onlab.onos.sdnip.bgp.BgpRouteEntry;
+
+/**
+ * Command to show the routes learned through BGP.
+ */
+@Command(scope = "onos", name = "bgp-routes",
+ description = "Lists all routes received from BGP")
+public class BgpRoutesListCommand extends AbstractShellCommand {
+
+ private static final String FORMAT =
+ "prefix=%s, nexthop=%s, origin=%s, localpref=%s, med=%s, aspath=%s, bgpid=%s";
+
+ @Override
+ protected void execute() {
+ SdnIpService service = get(SdnIpService.class);
+
+ for (BgpRouteEntry route : service.getBgpRoutes()) {
+ printRoute(route);
+ }
+ }
+
+ private void printRoute(BgpRouteEntry route) {
+ if (route != null) {
+ print(FORMAT, route.prefix(), route.nextHop(),
+ originToString(route.getOrigin()), route.getLocalPref(),
+ route.getMultiExitDisc(), route.getAsPath(),
+ route.getBgpSession().getRemoteBgpId());
+ }
+ }
+
+ private static String originToString(int origin) {
+ String originString = "UNKNOWN";
+
+ switch (origin) {
+ case BgpConstants.Update.Origin.IGP:
+ originString = "IGP";
+ break;
+ case BgpConstants.Update.Origin.EGP:
+ originString = "EGP";
+ break;
+ case BgpConstants.Update.Origin.INCOMPLETE:
+ originString = "INCOMPLETE";
+ break;
+ default:
+ break;
+ }
+
+ return originString;
+ }
+
+}
diff --git a/apps/sdnip/src/main/java/org/onlab/onos/sdnip/cli/RoutesListCommand.java b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/cli/RoutesListCommand.java
new file mode 100644
index 0000000..1af409a
--- /dev/null
+++ b/apps/sdnip/src/main/java/org/onlab/onos/sdnip/cli/RoutesListCommand.java
@@ -0,0 +1,32 @@
+package org.onlab.onos.sdnip.cli;
+
+import org.apache.karaf.shell.commands.Command;
+import org.onlab.onos.cli.AbstractShellCommand;
+import org.onlab.onos.sdnip.RouteEntry;
+import org.onlab.onos.sdnip.SdnIpService;
+
+/**
+ * Command to show the list of routes in SDN-IP's routing table.
+ */
+@Command(scope = "onos", name = "routes",
+ description = "Lists all routes known to SDN-IP")
+public class RoutesListCommand extends AbstractShellCommand {
+
+ private static final String FORMAT =
+ "prefix=%s, nexthop=%s";
+
+ @Override
+ protected void execute() {
+ SdnIpService service = get(SdnIpService.class);
+
+ for (RouteEntry route : service.getRoutes()) {
+ printRoute(route);
+ }
+ }
+
+ private void printRoute(RouteEntry route) {
+ if (route != null) {
+ print(FORMAT, route.prefix(), route.nextHop());
+ }
+ }
+}
diff --git a/apps/sdnip/src/main/resources/OSGI-INF/blueprint/shell-config.xml b/apps/sdnip/src/main/resources/OSGI-INF/blueprint/shell-config.xml
new file mode 100644
index 0000000..62fccec
--- /dev/null
+++ b/apps/sdnip/src/main/resources/OSGI-INF/blueprint/shell-config.xml
@@ -0,0 +1,11 @@
+<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0">
+
+ <command-bundle xmlns="http://karaf.apache.org/xmlns/shell/v1.1.0">
+ <command>
+ <action class="org.onlab.onos.sdnip.cli.BgpRoutesListCommand"/>
+ </command>
+ <command>
+ <action class="org.onlab.onos.sdnip.cli.RoutesListCommand"/>
+ </command>
+ </command-bundle>
+</blueprint>
diff --git a/apps/sdnip/src/test/java/org/onlab/onos/sdnip/PeerConnectivityManagerTest.java b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/PeerConnectivityManagerTest.java
new file mode 100644
index 0000000..0ecea00
--- /dev/null
+++ b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/PeerConnectivityManagerTest.java
@@ -0,0 +1,711 @@
+package org.onlab.onos.sdnip;
+
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reportMatcher;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.easymock.IArgumentMatcher;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.PortNumber;
+import org.onlab.onos.net.flow.DefaultTrafficSelector;
+import org.onlab.onos.net.flow.DefaultTrafficTreatment;
+import org.onlab.onos.net.flow.TrafficSelector;
+import org.onlab.onos.net.flow.TrafficTreatment;
+import org.onlab.onos.net.intent.IntentId;
+import org.onlab.onos.net.intent.IntentService;
+import org.onlab.onos.net.intent.PointToPointIntent;
+import org.onlab.onos.sdnip.bgp.BgpConstants;
+import org.onlab.onos.sdnip.config.BgpPeer;
+import org.onlab.onos.sdnip.config.BgpSpeaker;
+import org.onlab.onos.sdnip.config.Interface;
+import org.onlab.onos.sdnip.config.InterfaceAddress;
+import org.onlab.onos.sdnip.config.SdnIpConfigService;
+import org.onlab.packet.Ethernet;
+import org.onlab.packet.IPv4;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+import org.onlab.packet.MacAddress;
+
+import com.google.common.collect.Sets;
+
+/**
+ * Unit tests for PeerConnectivityManager interface.
+ */
+public class PeerConnectivityManagerTest {
+
+ private PeerConnectivityManager peerConnectivityManager;
+ private IntentService intentService;
+ private SdnIpConfigService configInfoService;
+ private InterfaceService interfaceService;
+
+ private Map<String, BgpSpeaker> bgpSpeakers;
+ private Map<String, Interface> interfaces;
+ private Map<IpAddress, BgpPeer> peers;
+
+ private Map<String, BgpSpeaker> configuredBgpSpeakers;
+ private Map<String, Interface> configuredInterfaces;
+ private Map<IpAddress, BgpPeer> configuredPeers;
+ private List<PointToPointIntent> intentList;
+
+ private final String dpid1 = "00:00:00:00:00:00:00:01";
+ private final String dpid2 = "00:00:00:00:00:00:00:02";
+
+ private final DeviceId deviceId1 =
+ DeviceId.deviceId(SdnIp.dpidToUri(dpid1));
+ private final DeviceId deviceId2 =
+ DeviceId.deviceId(SdnIp.dpidToUri(dpid2));
+
+ // Interfaces connected to BGP speakers
+ private final ConnectPoint s1Eth100 =
+ new ConnectPoint(deviceId1, PortNumber.portNumber(100));
+ private final ConnectPoint s2Eth100 =
+ new ConnectPoint(deviceId2, PortNumber.portNumber(100));
+
+ // Interfaces connected to BGP peers
+ private final ConnectPoint s1Eth1 =
+ new ConnectPoint(deviceId1, PortNumber.portNumber(1));
+ private final ConnectPoint s2Eth1 =
+ new ConnectPoint(deviceId2, PortNumber.portNumber(1));
+
+ // We don't compare the intent ID so all expected intents can use the same ID
+ private final IntentId testIntentId = new IntentId(0);
+
+ private final TrafficTreatment noTreatment =
+ DefaultTrafficTreatment.builder().build();
+
+ @Before
+ public void setUp() throws Exception {
+ bgpSpeakers = Collections.unmodifiableMap(setUpBgpSpeakers());
+ interfaces = Collections.unmodifiableMap(setUpInterfaces());
+ peers = Collections.unmodifiableMap(setUpPeers());
+
+ initPeerConnectivity();
+ intentList = setUpIntentList();
+ }
+
+ /**
+ * Sets up BGP speakers.
+ *
+ * @return configured BGP speakers as a map from speaker name to speaker
+ */
+ private Map<String, BgpSpeaker> setUpBgpSpeakers() {
+
+ configuredBgpSpeakers = new HashMap<>();
+
+ BgpSpeaker bgpSpeaker1 = new BgpSpeaker(
+ "bgpSpeaker1",
+ "00:00:00:00:00:00:00:01", 100,
+ "00:00:00:00:00:01");
+ List<InterfaceAddress> interfaceAddresses1 =
+ new LinkedList<InterfaceAddress>();
+ interfaceAddresses1.add(new InterfaceAddress(dpid1, 1, "192.168.10.101"));
+ interfaceAddresses1.add(new InterfaceAddress(dpid2, 1, "192.168.20.101"));
+ bgpSpeaker1.setInterfaceAddresses(interfaceAddresses1);
+ configuredBgpSpeakers.put(bgpSpeaker1.name(), bgpSpeaker1);
+
+ // BGP speaker2 is attached to the same switch port with speaker1
+ BgpSpeaker bgpSpeaker2 = new BgpSpeaker(
+ "bgpSpeaker2",
+ "00:00:00:00:00:00:00:01", 100,
+ "00:00:00:00:00:02");
+ List<InterfaceAddress> interfaceAddresses2 =
+ new LinkedList<InterfaceAddress>();
+ interfaceAddresses2.add(new InterfaceAddress(dpid1, 1, "192.168.10.102"));
+ interfaceAddresses2.add(new InterfaceAddress(dpid2, 1, "192.168.20.102"));
+ bgpSpeaker2.setInterfaceAddresses(interfaceAddresses2);
+ configuredBgpSpeakers.put(bgpSpeaker2.name(), bgpSpeaker2);
+
+ BgpSpeaker bgpSpeaker3 = new BgpSpeaker(
+ "bgpSpeaker3",
+ "00:00:00:00:00:00:00:02", 100,
+ "00:00:00:00:00:03");
+ List<InterfaceAddress> interfaceAddresses3 =
+ new LinkedList<InterfaceAddress>();
+ interfaceAddresses3.add(new InterfaceAddress(dpid1, 1, "192.168.10.103"));
+ interfaceAddresses3.add(new InterfaceAddress(dpid2, 1, "192.168.20.103"));
+ bgpSpeaker3.setInterfaceAddresses(interfaceAddresses3);
+ configuredBgpSpeakers.put(bgpSpeaker3.name(), bgpSpeaker3);
+
+ return configuredBgpSpeakers;
+ }
+
+ /**
+ * Sets up logical interfaces, which emulate the configured interfaces
+ * in SDN-IP application.
+ *
+ * @return configured interfaces as a MAP from Interface name to Interface
+ */
+ private Map<String, Interface> setUpInterfaces() {
+
+ configuredInterfaces = new HashMap<>();
+
+ String interfaceSw1Eth1 = "s1-eth1";
+ Interface intfsw1eth1 = new Interface(s1Eth1,
+ Collections.singleton(IpPrefix.valueOf("192.168.10.0/24")),
+ MacAddress.valueOf("00:00:00:00:00:01"));
+
+ configuredInterfaces.put(interfaceSw1Eth1, intfsw1eth1);
+ String interfaceSw2Eth1 = "s2-eth1";
+ Interface intfsw2eth1 = new Interface(s2Eth1,
+ Collections.singleton(IpPrefix.valueOf("192.168.20.0/24")),
+ MacAddress.valueOf("00:00:00:00:00:02"));
+ configuredInterfaces.put(interfaceSw2Eth1, intfsw2eth1);
+
+ interfaceService = createMock(InterfaceService.class);
+
+ expect(interfaceService.getInterface(s1Eth1))
+ .andReturn(intfsw1eth1).anyTimes();
+ expect(interfaceService.getInterface(s2Eth1))
+ .andReturn(intfsw2eth1).anyTimes();
+
+ // Non-existent interface used during one of the tests
+ expect(interfaceService.getInterface(new ConnectPoint(
+ DeviceId.deviceId(SdnIp.dpidToUri("00:00:00:00:00:00:01:00")),
+ PortNumber.portNumber(1))))
+ .andReturn(null).anyTimes();
+
+ expect(interfaceService.getInterfaces()).andReturn(
+ Sets.newHashSet(configuredInterfaces.values())).anyTimes();
+ replay(interfaceService);
+
+ return configuredInterfaces;
+ }
+
+ /**
+ * Sets up BGP daemon peers.
+ *
+ * @return configured BGP peers as a MAP from peer IP address to BgpPeer
+ */
+ private Map<IpAddress, BgpPeer> setUpPeers() {
+
+ configuredPeers = new HashMap<>();
+
+ String peerSw1Eth1 = "192.168.10.1";
+ configuredPeers.put(IpAddress.valueOf(peerSw1Eth1),
+ new BgpPeer(dpid1, 1, peerSw1Eth1));
+
+ // Two BGP peers are connected to switch 2 port 1.
+ String peer1Sw2Eth1 = "192.168.20.1";
+ configuredPeers.put(IpAddress.valueOf(peer1Sw2Eth1),
+ new BgpPeer(dpid2, 1, peer1Sw2Eth1));
+
+ String peer2Sw2Eth1 = "192.168.20.2";
+ configuredPeers.put(IpAddress.valueOf(peer2Sw2Eth1),
+ new BgpPeer(dpid2, 1, peer2Sw2Eth1));
+
+ return configuredPeers;
+ }
+
+ /**
+ * Sets up expected point to point intent list.
+ *
+ * @return point to point intent list
+ */
+ private List<PointToPointIntent> setUpIntentList() {
+
+ intentList = new ArrayList<PointToPointIntent>();
+
+ setUpBgpIntents();
+ setUpIcmpIntents();
+
+ return intentList;
+
+ }
+
+ /**
+ * Constructs a BGP intent and put it into the intentList.
+ * <p/>
+ * The purpose of this method is too simplify the setUpBgpIntents() method,
+ * and to make the setUpBgpIntents() easy to read.
+ *
+ * @param srcPrefix source IP prefix to match
+ * @param dstPrefix destination IP prefix to match
+ * @param srcTcpPort source TCP port to match
+ * @param dstTcpPort destination TCP port to match
+ * @param srcConnectPoint source connect point for PointToPointIntent
+ * @param dstConnectPoint destination connect point for PointToPointIntent
+ */
+ private void bgpPathintentConstructor(String srcPrefix, String dstPrefix,
+ Short srcTcpPort, Short dstTcpPort,
+ ConnectPoint srcConnectPoint, ConnectPoint dstConnectPoint) {
+
+ TrafficSelector.Builder builder = DefaultTrafficSelector.builder()
+ .matchEthType(Ethernet.TYPE_IPV4)
+ .matchIPProtocol(IPv4.PROTOCOL_TCP)
+ .matchIPSrc(IpPrefix.valueOf(srcPrefix))
+ .matchIPDst(IpPrefix.valueOf(dstPrefix));
+
+ if (srcTcpPort != null) {
+ builder.matchTcpSrc(srcTcpPort);
+ }
+ if (dstTcpPort != null) {
+ builder.matchTcpDst(dstTcpPort);
+ }
+
+ PointToPointIntent intent = new PointToPointIntent(
+ testIntentId, builder.build(), noTreatment,
+ srcConnectPoint, dstConnectPoint);
+
+ intentList.add(intent);
+ }
+
+ /**
+ * Sets up intents for BGP paths.
+ */
+ private void setUpBgpIntents() {
+
+ Short bgpPort = Short.valueOf((short) BgpConstants.BGP_PORT);
+
+ // Start to build intents between BGP speaker1 and BGP peer1
+ bgpPathintentConstructor(
+ "192.168.10.101/32", "192.168.10.1/32", null, bgpPort,
+ s1Eth100, s1Eth1);
+ bgpPathintentConstructor(
+ "192.168.10.101/32", "192.168.10.1/32", bgpPort, null,
+ s1Eth100, s1Eth1);
+
+ bgpPathintentConstructor(
+ "192.168.10.1/32", "192.168.10.101/32", null, bgpPort,
+ s1Eth1, s1Eth100);
+ bgpPathintentConstructor(
+ "192.168.10.1/32", "192.168.10.101/32", bgpPort, null,
+ s1Eth1, s1Eth100);
+
+ // Start to build intents between BGP speaker1 and BGP peer2
+ bgpPathintentConstructor(
+ "192.168.20.101/32", "192.168.20.1/32", null, bgpPort,
+ s1Eth100, s2Eth1);
+ bgpPathintentConstructor(
+ "192.168.20.101/32", "192.168.20.1/32", bgpPort, null,
+ s1Eth100, s2Eth1);
+
+ bgpPathintentConstructor(
+ "192.168.20.1/32", "192.168.20.101/32", null, bgpPort,
+ s2Eth1, s1Eth100);
+ bgpPathintentConstructor(
+ "192.168.20.1/32", "192.168.20.101/32", bgpPort, null,
+ s2Eth1, s1Eth100);
+
+ // Start to build intents between BGP speaker1 and BGP peer3
+ bgpPathintentConstructor(
+ "192.168.20.101/32", "192.168.20.2/32", null, bgpPort,
+ s1Eth100, s2Eth1);
+ bgpPathintentConstructor(
+ "192.168.20.101/32", "192.168.20.2/32", bgpPort, null,
+ s1Eth100, s2Eth1);
+
+ bgpPathintentConstructor(
+ "192.168.20.2/32", "192.168.20.101/32", null, bgpPort,
+ s2Eth1, s1Eth100);
+ bgpPathintentConstructor(
+ "192.168.20.2/32", "192.168.20.101/32", bgpPort, null,
+ s2Eth1, s1Eth100);
+
+ //
+ // Start to build intents between BGP speaker2 and BGP peer1
+ bgpPathintentConstructor(
+ "192.168.10.102/32", "192.168.10.1/32", null, bgpPort,
+ s1Eth100, s1Eth1);
+ bgpPathintentConstructor(
+ "192.168.10.102/32", "192.168.10.1/32", bgpPort, null,
+ s1Eth100, s1Eth1);
+
+ bgpPathintentConstructor(
+ "192.168.10.1/32", "192.168.10.102/32", null, bgpPort,
+ s1Eth1, s1Eth100);
+ bgpPathintentConstructor(
+ "192.168.10.1/32", "192.168.10.102/32", bgpPort, null,
+ s1Eth1, s1Eth100);
+ // Start to build intents between BGP speaker2 and BGP peer2
+ bgpPathintentConstructor(
+ "192.168.20.102/32", "192.168.20.1/32", null, bgpPort,
+ s1Eth100, s2Eth1);
+ bgpPathintentConstructor(
+ "192.168.20.102/32", "192.168.20.1/32", bgpPort, null,
+ s1Eth100, s2Eth1);
+
+ bgpPathintentConstructor(
+ "192.168.20.1/32", "192.168.20.102/32", null, bgpPort,
+ s2Eth1, s1Eth100);
+ bgpPathintentConstructor(
+ "192.168.20.1/32", "192.168.20.102/32", bgpPort, null,
+ s2Eth1, s1Eth100);
+
+ // Start to build intents between BGP speaker2 and BGP peer3
+ bgpPathintentConstructor(
+ "192.168.20.102/32", "192.168.20.2/32", null, bgpPort,
+ s1Eth100, s2Eth1);
+ bgpPathintentConstructor(
+ "192.168.20.102/32", "192.168.20.2/32", bgpPort, null,
+ s1Eth100, s2Eth1);
+
+ bgpPathintentConstructor(
+ "192.168.20.2/32", "192.168.20.102/32", null, bgpPort,
+ s2Eth1, s1Eth100);
+ bgpPathintentConstructor(
+ "192.168.20.2/32", "192.168.20.102/32", bgpPort, null,
+ s2Eth1, s1Eth100);
+
+ //
+ // Start to build intents between BGP speaker3 and BGP peer1
+ bgpPathintentConstructor(
+ "192.168.10.103/32", "192.168.10.1/32", null, bgpPort,
+ s2Eth100, s1Eth1);
+ bgpPathintentConstructor(
+ "192.168.10.103/32", "192.168.10.1/32", bgpPort, null,
+ s2Eth100, s1Eth1);
+
+ bgpPathintentConstructor(
+ "192.168.10.1/32", "192.168.10.103/32", null, bgpPort,
+ s1Eth1, s2Eth100);
+ bgpPathintentConstructor(
+ "192.168.10.1/32", "192.168.10.103/32", bgpPort, null,
+ s1Eth1, s2Eth100);
+
+ // Start to build intents between BGP speaker3 and BGP peer2
+ bgpPathintentConstructor(
+ "192.168.20.103/32", "192.168.20.1/32", null, bgpPort,
+ s2Eth100, s2Eth1);
+ bgpPathintentConstructor(
+ "192.168.20.103/32", "192.168.20.1/32", bgpPort, null,
+ s2Eth100, s2Eth1);
+
+ bgpPathintentConstructor(
+ "192.168.20.1/32", "192.168.20.103/32", null, bgpPort,
+ s2Eth1, s2Eth100);
+ bgpPathintentConstructor(
+ "192.168.20.1/32", "192.168.20.103/32", bgpPort, null,
+ s2Eth1, s2Eth100);
+
+ // Start to build intents between BGP speaker3 and BGP peer3
+ bgpPathintentConstructor(
+ "192.168.20.103/32", "192.168.20.2/32", null, bgpPort,
+ s2Eth100, s2Eth1);
+ bgpPathintentConstructor(
+ "192.168.20.103/32", "192.168.20.2/32", bgpPort, null,
+ s2Eth100, s2Eth1);
+
+ bgpPathintentConstructor(
+ "192.168.20.2/32", "192.168.20.103/32", null, bgpPort,
+ s2Eth1, s2Eth100);
+ bgpPathintentConstructor(
+ "192.168.20.2/32", "192.168.20.103/32", bgpPort, null,
+ s2Eth1, s2Eth100);
+ }
+
+ /**
+ * Constructs a BGP intent and put it into the intentList.
+ * <p/>
+ * The purpose of this method is too simplify the setUpBgpIntents() method,
+ * and to make the setUpBgpIntents() easy to read.
+ *
+ * @param srcPrefix source IP prefix to match
+ * @param dstPrefix destination IP prefix to match
+ * @param srcConnectPoint source connect point for PointToPointIntent
+ * @param dstConnectPoint destination connect point for PointToPointIntent
+ */
+ private void icmpPathintentConstructor(String srcPrefix, String dstPrefix,
+ ConnectPoint srcConnectPoint, ConnectPoint dstConnectPoint) {
+
+ TrafficSelector selector = DefaultTrafficSelector.builder()
+ .matchEthType(Ethernet.TYPE_IPV4)
+ .matchIPProtocol(IPv4.PROTOCOL_ICMP)
+ .matchIPSrc(IpPrefix.valueOf(srcPrefix))
+ .matchIPDst(IpPrefix.valueOf(dstPrefix))
+ .build();
+
+ PointToPointIntent intent = new PointToPointIntent(
+ testIntentId, selector, noTreatment,
+ srcConnectPoint, dstConnectPoint);
+
+ intentList.add(intent);
+ }
+
+ /**
+ * Sets up intents for ICMP paths.
+ */
+ private void setUpIcmpIntents() {
+
+ // Start to build intents between BGP speaker1 and BGP peer1
+ icmpPathintentConstructor(
+ "192.168.10.101/32", "192.168.10.1/32", s1Eth100, s1Eth1);
+ icmpPathintentConstructor(
+ "192.168.10.1/32", "192.168.10.101/32", s1Eth1, s1Eth100);
+
+ // Start to build intents between BGP speaker1 and BGP peer2
+ icmpPathintentConstructor(
+ "192.168.20.101/32", "192.168.20.1/32", s1Eth100, s2Eth1);
+ icmpPathintentConstructor(
+ "192.168.20.1/32", "192.168.20.101/32", s2Eth1, s1Eth100);
+
+ // Start to build intents between BGP speaker1 and BGP peer3
+ icmpPathintentConstructor(
+ "192.168.20.101/32", "192.168.20.2/32", s1Eth100, s2Eth1);
+ icmpPathintentConstructor(
+ "192.168.20.2/32", "192.168.20.101/32", s2Eth1, s1Eth100);
+
+ //
+ // Start to build intents between BGP speaker2 and BGP peer1
+ icmpPathintentConstructor(
+ "192.168.10.102/32", "192.168.10.1/32", s1Eth100, s1Eth1);
+ icmpPathintentConstructor(
+ "192.168.10.1/32", "192.168.10.102/32", s1Eth1, s1Eth100);
+
+ // Start to build intents between BGP speaker2 and BGP peer2
+ icmpPathintentConstructor(
+ "192.168.20.102/32", "192.168.20.1/32", s1Eth100, s2Eth1);
+ icmpPathintentConstructor(
+ "192.168.20.1/32", "192.168.20.102/32", s2Eth1, s1Eth100);
+
+ // Start to build intents between BGP speaker2 and BGP peer3
+ icmpPathintentConstructor(
+ "192.168.20.102/32", "192.168.20.2/32", s1Eth100, s2Eth1);
+ icmpPathintentConstructor(
+ "192.168.20.2/32", "192.168.20.102/32", s2Eth1, s1Eth100);
+
+ //
+ // Start to build intents between BGP speaker3 and BGP peer1
+ icmpPathintentConstructor(
+ "192.168.10.103/32", "192.168.10.1/32", s2Eth100, s1Eth1);
+ icmpPathintentConstructor(
+ "192.168.10.1/32", "192.168.10.103/32", s1Eth1, s2Eth100);
+
+ // Start to build intents between BGP speaker3 and BGP peer2
+ icmpPathintentConstructor(
+ "192.168.20.103/32", "192.168.20.1/32", s2Eth100, s2Eth1);
+ icmpPathintentConstructor(
+ "192.168.20.1/32", "192.168.20.103/32", s2Eth1, s2Eth100);
+
+ // Start to build intents between BGP speaker3 and BGP peer3
+ icmpPathintentConstructor(
+ "192.168.20.103/32", "192.168.20.2/32", s2Eth100, s2Eth1);
+ icmpPathintentConstructor(
+ "192.168.20.2/32", "192.168.20.103/32", s2Eth1, s2Eth100);
+
+ }
+
+ /**
+ * Initializes peer connectivity testing environment.
+ */
+ private void initPeerConnectivity() {
+
+ configInfoService = createMock(SdnIpConfigService.class);
+ expect(configInfoService.getBgpPeers()).andReturn(peers).anyTimes();
+ expect(configInfoService.getBgpSpeakers()).andReturn(bgpSpeakers).anyTimes();
+ replay(configInfoService);
+
+ intentService = createMock(IntentService.class);
+ replay(intentService);
+
+ peerConnectivityManager = new PeerConnectivityManager(configInfoService,
+ interfaceService, intentService);
+ }
+
+ /*
+ * EasyMock matcher that matches {@link PointToPointIntent}s but
+ * ignores the {@link IntentId} when matching.
+ * <p/>
+ * The normal intent equals method tests that the intent IDs are equal,
+ * however in these tests we can't know what the intent IDs will be in
+ * advance, so we can't set up expected intents with the correct IDs. Thus,
+ * the solution is to use an EasyMock matcher that verifies that all the
+ * value properties of the provided intent match the expected values, but
+ * ignores the intent ID when testing equality.
+ */
+ private static final class IdAgnosticPointToPointIntentMatcher implements
+ IArgumentMatcher {
+
+ private final PointToPointIntent intent;
+ private String providedIntentString;
+
+ /**
+ * Constructor taking the expected intent to match against.
+ *
+ * @param intent the expected intent
+ */
+ public IdAgnosticPointToPointIntentMatcher(PointToPointIntent intent) {
+ this.intent = intent;
+ }
+
+ @Override
+ public void appendTo(StringBuffer strBuffer) {
+ strBuffer.append("PointToPointIntentMatcher unable to match: "
+ + providedIntentString);
+ }
+
+ @Override
+ public boolean matches(Object object) {
+ if (!(object instanceof PointToPointIntent)) {
+ return false;
+ }
+
+ PointToPointIntent providedIntent = (PointToPointIntent) object;
+ providedIntentString = providedIntent.toString();
+
+ PointToPointIntent matchIntent =
+ new PointToPointIntent(providedIntent.id(),
+ intent.selector(), intent.treatment(),
+ intent.ingressPoint(), intent.egressPoint());
+
+ return matchIntent.equals(providedIntent);
+ }
+ }
+
+ /**
+ * Matcher method to set an expected intent to match against (ignoring the
+ * the intent ID).
+ *
+ * @param intent the expected intent
+ * @return something of type PointToPointIntent
+ */
+ private static PointToPointIntent eqExceptId(
+ PointToPointIntent intent) {
+ reportMatcher(new IdAgnosticPointToPointIntentMatcher(intent));
+ return null;
+ }
+
+ /**
+ * Tests whether peer connectivity manager can set up correct BGP and
+ * ICMP intents according to specific configuration.
+ * <p/>
+ * Two tricky cases included in the configuration are: 2 peers on a same
+ * switch port, peer on the same switch with BGPd.
+ */
+ @Test
+ public void testConnectionSetup() {
+
+ reset(intentService);
+
+ // Sets up the expected PointToPoint intents.
+ for (int i = 0; i < intentList.size(); i++) {
+ intentService.submit(eqExceptId(intentList.get(i)));
+ }
+
+ replay(intentService);
+
+ // Running the interface to be tested.
+ peerConnectivityManager.start();
+
+ verify(intentService);
+
+ }
+
+ /**
+ * Tests a corner case, when there are no interfaces in the configuration.
+ */
+ @Test
+ public void testNullInterfaces() {
+ reset(interfaceService);
+ expect(interfaceService.getInterfaces()).andReturn(
+ Sets.<Interface>newHashSet()).anyTimes();
+ expect(interfaceService.getInterface(s2Eth1))
+ .andReturn(null).anyTimes();
+ expect(interfaceService.getInterface(s1Eth1))
+ .andReturn(null).anyTimes();
+ replay(interfaceService);
+
+ reset(configInfoService);
+ expect(configInfoService.getBgpPeers()).andReturn(peers).anyTimes();
+ expect(configInfoService.getBgpSpeakers()).andReturn(bgpSpeakers).anyTimes();
+ replay(configInfoService);
+
+ reset(intentService);
+ replay(intentService);
+ peerConnectivityManager.start();
+ verify(intentService);
+ }
+
+ /**
+ * Tests a corner case, when there are no BGP peers in the configuration.
+ */
+ @Test
+ public void testNullBgpPeers() {
+ reset(interfaceService);
+ expect(interfaceService.getInterfaces()).andReturn(
+ Sets.newHashSet(interfaces.values())).anyTimes();
+ replay(interfaceService);
+
+ reset(configInfoService);
+ expect(configInfoService.getBgpPeers()).andReturn(
+ new HashMap<IpAddress, BgpPeer>()).anyTimes();
+ expect(configInfoService.getBgpSpeakers()).andReturn(
+ bgpSpeakers).anyTimes();
+ replay(configInfoService);
+
+ reset(intentService);
+ replay(intentService);
+ peerConnectivityManager.start();
+ verify(intentService);
+ }
+
+ /**
+ * Tests a corner case, when there is no BGP speakers in the configuration.
+ */
+ @Test
+ public void testNullBgpSpeakers() {
+ reset(interfaceService);
+ expect(interfaceService.getInterfaces()).andReturn(
+ Sets.newHashSet(interfaces.values())).anyTimes();
+ replay(interfaceService);
+
+ reset(configInfoService);
+ expect(configInfoService.getBgpPeers()).andReturn(
+ peers).anyTimes();
+ expect(configInfoService.getBgpSpeakers()).andReturn(
+ null).anyTimes();
+ replay(configInfoService);
+
+ reset(intentService);
+ replay(intentService);
+ peerConnectivityManager.start();
+ verify(intentService);
+ }
+
+ /**
+ * Tests a corner case, when there is no Interface configured for one BGP
+ * peer.
+ */
+ @Test
+ public void testNoPeerInterface() {
+ String peerSw100Eth1 = "192.168.200.1";
+ configuredPeers.put(IpAddress.valueOf(peerSw100Eth1),
+ new BgpPeer("00:00:00:00:00:00:01:00", 1, peerSw100Eth1));
+ testConnectionSetup();
+ }
+
+ /**
+ * Tests a corner case, when there is no Interface configured for one BGP
+ * speaker.
+ * TODO: we should add a configuration correctness checking module/method
+ * before testing this corner case.
+ */
+ @Ignore
+ @Test
+ public void testNoSpeakerInterface() {
+ BgpSpeaker bgpSpeaker100 = new BgpSpeaker(
+ "bgpSpeaker100",
+ "00:00:00:00:00:00:01:00", 100,
+ "00:00:00:00:01:00");
+ List<InterfaceAddress> interfaceAddresses100 =
+ new LinkedList<InterfaceAddress>();
+ interfaceAddresses100.add(new InterfaceAddress(dpid1, 1, "192.168.10.201"));
+ interfaceAddresses100.add(new InterfaceAddress(dpid2, 1, "192.168.20.201"));
+ bgpSpeaker100.setInterfaceAddresses(interfaceAddresses100);
+ configuredBgpSpeakers.put(bgpSpeaker100.name(), bgpSpeaker100);
+ testConnectionSetup();
+ }
+}
diff --git a/apps/sdnip/src/test/java/org/onlab/onos/sdnip/RouteEntryTest.java b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/RouteEntryTest.java
new file mode 100644
index 0000000..45371f7
--- /dev/null
+++ b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/RouteEntryTest.java
@@ -0,0 +1,143 @@
+package org.onlab.onos.sdnip;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+import static org.junit.Assert.assertThat;
+
+import org.junit.Test;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+
+/**
+ * Unit tests for the RouteEntry class.
+ */
+public class RouteEntryTest {
+ /**
+ * Tests valid class constructor.
+ */
+ @Test
+ public void testConstructor() {
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+
+ RouteEntry routeEntry = new RouteEntry(prefix, nextHop);
+ assertThat(routeEntry.toString(),
+ is("RouteEntry{prefix=1.2.3.0/24, nextHop=5.6.7.8}"));
+ }
+
+ /**
+ * Tests invalid class constructor for null IPv4 prefix.
+ */
+ @Test(expected = NullPointerException.class)
+ public void testInvalidConstructorNullPrefix() {
+ IpPrefix prefix = null;
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+
+ new RouteEntry(prefix, nextHop);
+ }
+
+ /**
+ * Tests invalid class constructor for null IPv4 next-hop.
+ */
+ @Test(expected = NullPointerException.class)
+ public void testInvalidConstructorNullNextHop() {
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = null;
+
+ new RouteEntry(prefix, nextHop);
+ }
+
+ /**
+ * Tests getting the fields of a route entry.
+ */
+ @Test
+ public void testGetFields() {
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+
+ RouteEntry routeEntry = new RouteEntry(prefix, nextHop);
+ assertThat(routeEntry.prefix(), is(prefix));
+ assertThat(routeEntry.nextHop(), is(nextHop));
+ }
+
+ /**
+ * Tests creating a binary string from IPv4 prefix.
+ */
+ @Test
+ public void testCreateBinaryString() {
+ IpPrefix prefix;
+
+ prefix = IpPrefix.valueOf("0.0.0.0/0");
+ assertThat(RouteEntry.createBinaryString(prefix), is(""));
+
+ prefix = IpPrefix.valueOf("192.168.166.0/22");
+ assertThat(RouteEntry.createBinaryString(prefix),
+ is("1100000010101000101001"));
+
+ prefix = IpPrefix.valueOf("192.168.166.0/23");
+ assertThat(RouteEntry.createBinaryString(prefix),
+ is("11000000101010001010011"));
+
+ prefix = IpPrefix.valueOf("192.168.166.0/24");
+ assertThat(RouteEntry.createBinaryString(prefix),
+ is("110000001010100010100110"));
+
+ prefix = IpPrefix.valueOf("130.162.10.1/25");
+ assertThat(RouteEntry.createBinaryString(prefix),
+ is("1000001010100010000010100"));
+
+ prefix = IpPrefix.valueOf("255.255.255.255/32");
+ assertThat(RouteEntry.createBinaryString(prefix),
+ is("11111111111111111111111111111111"));
+ }
+
+ /**
+ * Tests equality of {@link RouteEntry}.
+ */
+ @Test
+ public void testEquality() {
+ IpPrefix prefix1 = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop1 = IpAddress.valueOf("5.6.7.8");
+ RouteEntry routeEntry1 = new RouteEntry(prefix1, nextHop1);
+
+ IpPrefix prefix2 = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop2 = IpAddress.valueOf("5.6.7.8");
+ RouteEntry routeEntry2 = new RouteEntry(prefix2, nextHop2);
+
+ assertThat(routeEntry1, is(routeEntry2));
+ }
+
+ /**
+ * Tests non-equality of {@link RouteEntry}.
+ */
+ @Test
+ public void testNonEquality() {
+ IpPrefix prefix1 = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop1 = IpAddress.valueOf("5.6.7.8");
+ RouteEntry routeEntry1 = new RouteEntry(prefix1, nextHop1);
+
+ IpPrefix prefix2 = IpPrefix.valueOf("1.2.3.0/25"); // Different
+ IpAddress nextHop2 = IpAddress.valueOf("5.6.7.8");
+ RouteEntry routeEntry2 = new RouteEntry(prefix2, nextHop2);
+
+ IpPrefix prefix3 = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop3 = IpAddress.valueOf("5.6.7.9"); // Different
+ RouteEntry routeEntry3 = new RouteEntry(prefix3, nextHop3);
+
+ assertThat(routeEntry1, is(not(routeEntry2)));
+ assertThat(routeEntry1, is(not(routeEntry3)));
+ }
+
+ /**
+ * Tests object string representation.
+ */
+ @Test
+ public void testToString() {
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+ RouteEntry routeEntry = new RouteEntry(prefix, nextHop);
+
+ assertThat(routeEntry.toString(),
+ is("RouteEntry{prefix=1.2.3.0/24, nextHop=5.6.7.8}"));
+ }
+}
diff --git a/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/AsPathTest.java b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/AsPathTest.java
new file mode 100644
index 0000000..c14c20b
--- /dev/null
+++ b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/AsPathTest.java
@@ -0,0 +1,169 @@
+package org.onlab.onos.sdnip.bgp;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+import static org.junit.Assert.assertThat;
+
+import java.util.ArrayList;
+
+import org.junit.Test;
+
+/**
+ * Unit tests for the BgpRouteEntry.AsPath class.
+ */
+public class AsPathTest {
+ /**
+ * Generates an AS Path.
+ *
+ * @return a generated AS Path
+ */
+ private BgpRouteEntry.AsPath generateAsPath() {
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers1 = new ArrayList<>();
+ segmentAsNumbers1.add((long) 1);
+ segmentAsNumbers1.add((long) 2);
+ segmentAsNumbers1.add((long) 3);
+ BgpRouteEntry.PathSegment pathSegment1 =
+ new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1);
+ pathSegments.add(pathSegment1);
+ //
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ ArrayList<Long> segmentAsNumbers2 = new ArrayList<>();
+ segmentAsNumbers2.add((long) 4);
+ segmentAsNumbers2.add((long) 5);
+ segmentAsNumbers2.add((long) 6);
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2);
+ pathSegments.add(pathSegment2);
+ //
+ BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments);
+
+ return asPath;
+ }
+
+ /**
+ * Tests valid class constructor.
+ */
+ @Test
+ public void testConstructor() {
+ BgpRouteEntry.AsPath asPath = generateAsPath();
+
+ String expectedString =
+ "AsPath{pathSegments=" +
+ "[PathSegment{type=2, segmentAsNumbers=[1, 2, 3]}, " +
+ "PathSegment{type=1, segmentAsNumbers=[4, 5, 6]}]}";
+ assertThat(asPath.toString(), is(expectedString));
+ }
+
+ /**
+ * Tests invalid class constructor for null Path Segments.
+ */
+ @Test(expected = NullPointerException.class)
+ public void testInvalidConstructorNullPathSegments() {
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = null;
+ new BgpRouteEntry.AsPath(pathSegments);
+ }
+
+ /**
+ * Tests getting the fields of an AS Path.
+ */
+ @Test
+ public void testGetFields() {
+ // Create the fields to compare against
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers1 = new ArrayList<>();
+ segmentAsNumbers1.add((long) 1);
+ segmentAsNumbers1.add((long) 2);
+ segmentAsNumbers1.add((long) 3);
+ BgpRouteEntry.PathSegment pathSegment1 =
+ new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1);
+ pathSegments.add(pathSegment1);
+ //
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ ArrayList<Long> segmentAsNumbers2 = new ArrayList<>();
+ segmentAsNumbers2.add((long) 4);
+ segmentAsNumbers2.add((long) 5);
+ segmentAsNumbers2.add((long) 6);
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2);
+ pathSegments.add(pathSegment2);
+
+ // Generate the entry to test
+ BgpRouteEntry.AsPath asPath = generateAsPath();
+
+ assertThat(asPath.getPathSegments(), is(pathSegments));
+ }
+
+ /**
+ * Tests getting the AS Path Length.
+ */
+ @Test
+ public void testGetAsPathLength() {
+ BgpRouteEntry.AsPath asPath = generateAsPath();
+ assertThat(asPath.getAsPathLength(), is(4));
+
+ // Create an empty AS Path
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ asPath = new BgpRouteEntry.AsPath(pathSegments);
+ assertThat(asPath.getAsPathLength(), is(0));
+ }
+
+ /**
+ * Tests equality of {@link BgpRouteEntry.AsPath}.
+ */
+ @Test
+ public void testEquality() {
+ BgpRouteEntry.AsPath asPath1 = generateAsPath();
+ BgpRouteEntry.AsPath asPath2 = generateAsPath();
+
+ assertThat(asPath1, is(asPath2));
+ }
+
+ /**
+ * Tests non-equality of {@link BgpRouteEntry.AsPath}.
+ */
+ @Test
+ public void testNonEquality() {
+ BgpRouteEntry.AsPath asPath1 = generateAsPath();
+
+ // Setup AS Path 2
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers1 = new ArrayList<>();
+ segmentAsNumbers1.add((long) 1);
+ segmentAsNumbers1.add((long) 2);
+ segmentAsNumbers1.add((long) 3);
+ BgpRouteEntry.PathSegment pathSegment1 =
+ new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1);
+ pathSegments.add(pathSegment1);
+ //
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ ArrayList<Long> segmentAsNumbers2 = new ArrayList<>();
+ segmentAsNumbers2.add((long) 4);
+ segmentAsNumbers2.add((long) 55); // Different
+ segmentAsNumbers2.add((long) 6);
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2);
+ pathSegments.add(pathSegment2);
+ //
+ BgpRouteEntry.AsPath asPath2 = new BgpRouteEntry.AsPath(pathSegments);
+
+ assertThat(asPath1, is(not(asPath2)));
+ }
+
+ /**
+ * Tests object string representation.
+ */
+ @Test
+ public void testToString() {
+ BgpRouteEntry.AsPath asPath = generateAsPath();
+
+ String expectedString =
+ "AsPath{pathSegments=" +
+ "[PathSegment{type=2, segmentAsNumbers=[1, 2, 3]}, " +
+ "PathSegment{type=1, segmentAsNumbers=[4, 5, 6]}]}";
+ assertThat(asPath.toString(), is(expectedString));
+ }
+}
diff --git a/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/BgpRouteEntryTest.java b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/BgpRouteEntryTest.java
new file mode 100644
index 0000000..52a0214
--- /dev/null
+++ b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/BgpRouteEntryTest.java
@@ -0,0 +1,498 @@
+package org.onlab.onos.sdnip.bgp;
+
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+import static org.junit.Assert.assertThat;
+
+import java.util.ArrayList;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+
+/**
+ * Unit tests for the BgpRouteEntry class.
+ */
+public class BgpRouteEntryTest {
+ private BgpSession bgpSession;
+ private static final IpAddress BGP_SESSION_BGP_ID =
+ IpAddress.valueOf("10.0.0.1");
+ private static final IpAddress BGP_SESSION_IP_ADDRESS =
+ IpAddress.valueOf("20.0.0.1");
+
+ private BgpSession bgpSession2;
+ private static final IpAddress BGP_SESSION_BGP_ID2 =
+ IpAddress.valueOf("10.0.0.2");
+ private static final IpAddress BGP_SESSION_IP_ADDRESS2 =
+ IpAddress.valueOf("20.0.0.1");
+
+ private BgpSession bgpSession3;
+ private static final IpAddress BGP_SESSION_BGP_ID3 =
+ IpAddress.valueOf("10.0.0.1");
+ private static final IpAddress BGP_SESSION_IP_ADDRESS3 =
+ IpAddress.valueOf("20.0.0.2");
+
+ @Before
+ public void setUp() throws Exception {
+ // Mock objects for testing
+ bgpSession = createMock(BgpSession.class);
+ bgpSession2 = createMock(BgpSession.class);
+ bgpSession3 = createMock(BgpSession.class);
+
+ // Setup the BGP Sessions
+ expect(bgpSession.getRemoteBgpId())
+ .andReturn(BGP_SESSION_BGP_ID).anyTimes();
+ expect(bgpSession.getRemoteIp4Address())
+ .andReturn(BGP_SESSION_IP_ADDRESS).anyTimes();
+ //
+ expect(bgpSession2.getRemoteBgpId())
+ .andReturn(BGP_SESSION_BGP_ID2).anyTimes();
+ expect(bgpSession2.getRemoteIp4Address())
+ .andReturn(BGP_SESSION_IP_ADDRESS2).anyTimes();
+ //
+ expect(bgpSession3.getRemoteBgpId())
+ .andReturn(BGP_SESSION_BGP_ID3).anyTimes();
+ expect(bgpSession3.getRemoteIp4Address())
+ .andReturn(BGP_SESSION_IP_ADDRESS3).anyTimes();
+
+ replay(bgpSession);
+ replay(bgpSession2);
+ replay(bgpSession3);
+ }
+
+ /**
+ * Generates a BGP Route Entry.
+ *
+ * @return a generated BGP Route Entry
+ */
+ private BgpRouteEntry generateBgpRouteEntry() {
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+ byte origin = BgpConstants.Update.Origin.IGP;
+ // Setup the AS Path
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers1 = new ArrayList<>();
+ segmentAsNumbers1.add((long) 1);
+ segmentAsNumbers1.add((long) 2);
+ segmentAsNumbers1.add((long) 3);
+ BgpRouteEntry.PathSegment pathSegment1 =
+ new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1);
+ pathSegments.add(pathSegment1);
+ //
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ ArrayList<Long> segmentAsNumbers2 = new ArrayList<>();
+ segmentAsNumbers2.add((long) 4);
+ segmentAsNumbers2.add((long) 5);
+ segmentAsNumbers2.add((long) 6);
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2);
+ pathSegments.add(pathSegment2);
+ //
+ BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments);
+ //
+ long localPref = 100;
+ long multiExitDisc = 20;
+
+ BgpRouteEntry bgpRouteEntry =
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry.setMultiExitDisc(multiExitDisc);
+
+ return bgpRouteEntry;
+ }
+
+ /**
+ * Tests valid class constructor.
+ */
+ @Test
+ public void testConstructor() {
+ BgpRouteEntry bgpRouteEntry = generateBgpRouteEntry();
+
+ String expectedString =
+ "BgpRouteEntry{prefix=1.2.3.0/24, nextHop=5.6.7.8, " +
+ "bgpId=10.0.0.1, origin=0, asPath=AsPath{pathSegments=" +
+ "[PathSegment{type=2, segmentAsNumbers=[1, 2, 3]}, " +
+ "PathSegment{type=1, segmentAsNumbers=[4, 5, 6]}]}, " +
+ "localPref=100, multiExitDisc=20}";
+ assertThat(bgpRouteEntry.toString(), is(expectedString));
+ }
+
+ /**
+ * Tests invalid class constructor for null BGP Session.
+ */
+ @Test(expected = NullPointerException.class)
+ public void testInvalidConstructorNullBgpSession() {
+ BgpSession bgpSessionNull = null;
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+ byte origin = BgpConstants.Update.Origin.IGP;
+ // Setup the AS Path
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments);
+ //
+ long localPref = 100;
+
+ new BgpRouteEntry(bgpSessionNull, prefix, nextHop, origin, asPath,
+ localPref);
+ }
+
+ /**
+ * Tests invalid class constructor for null AS Path.
+ */
+ @Test(expected = NullPointerException.class)
+ public void testInvalidConstructorNullAsPath() {
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+ byte origin = BgpConstants.Update.Origin.IGP;
+ BgpRouteEntry.AsPath asPath = null;
+ long localPref = 100;
+
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath,
+ localPref);
+ }
+
+ /**
+ * Tests getting the fields of a BGP route entry.
+ */
+ @Test
+ public void testGetFields() {
+ // Create the fields to compare against
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+ byte origin = BgpConstants.Update.Origin.IGP;
+ // Setup the AS Path
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers1 = new ArrayList<>();
+ segmentAsNumbers1.add((long) 1);
+ segmentAsNumbers1.add((long) 2);
+ segmentAsNumbers1.add((long) 3);
+ BgpRouteEntry.PathSegment pathSegment1 =
+ new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1);
+ pathSegments.add(pathSegment1);
+ //
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ ArrayList<Long> segmentAsNumbers2 = new ArrayList<>();
+ segmentAsNumbers2.add((long) 4);
+ segmentAsNumbers2.add((long) 5);
+ segmentAsNumbers2.add((long) 6);
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2);
+ pathSegments.add(pathSegment2);
+ //
+ BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments);
+ //
+ long localPref = 100;
+ long multiExitDisc = 20;
+
+ // Generate the entry to test
+ BgpRouteEntry bgpRouteEntry = generateBgpRouteEntry();
+
+ assertThat(bgpRouteEntry.prefix(), is(prefix));
+ assertThat(bgpRouteEntry.nextHop(), is(nextHop));
+ assertThat(bgpRouteEntry.getBgpSession(), is(bgpSession));
+ assertThat(bgpRouteEntry.getOrigin(), is(origin));
+ assertThat(bgpRouteEntry.getAsPath(), is(asPath));
+ assertThat(bgpRouteEntry.getLocalPref(), is(localPref));
+ assertThat(bgpRouteEntry.getMultiExitDisc(), is(multiExitDisc));
+ }
+
+ /**
+ * Tests whether a BGP route entry is a local route.
+ */
+ @Test
+ public void testIsLocalRoute() {
+ //
+ // Test non-local route
+ //
+ BgpRouteEntry bgpRouteEntry = generateBgpRouteEntry();
+ assertThat(bgpRouteEntry.isLocalRoute(), is(false));
+
+ //
+ // Test local route with AS Path that begins with AS_SET
+ //
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+ byte origin = BgpConstants.Update.Origin.IGP;
+ // Setup the AS Path
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ ArrayList<Long> segmentAsNumbers1 = new ArrayList<>();
+ segmentAsNumbers1.add((long) 1);
+ segmentAsNumbers1.add((long) 2);
+ segmentAsNumbers1.add((long) 3);
+ BgpRouteEntry.PathSegment pathSegment1 =
+ new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1);
+ pathSegments.add(pathSegment1);
+ //
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers2 = new ArrayList<>();
+ segmentAsNumbers2.add((long) 4);
+ segmentAsNumbers2.add((long) 5);
+ segmentAsNumbers2.add((long) 6);
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2);
+ pathSegments.add(pathSegment2);
+ //
+ BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments);
+ //
+ long localPref = 100;
+ long multiExitDisc = 20;
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry.setMultiExitDisc(multiExitDisc);
+ assertThat(bgpRouteEntry.isLocalRoute(), is(true));
+
+ //
+ // Test local route with empty AS Path
+ //
+ pathSegments = new ArrayList<>();
+ asPath = new BgpRouteEntry.AsPath(pathSegments);
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry.setMultiExitDisc(multiExitDisc);
+ assertThat(bgpRouteEntry.isLocalRoute(), is(true));
+ }
+
+ /**
+ * Tests getting the BGP Neighbor AS number for a route.
+ */
+ @Test
+ public void testGetNeighborAs() {
+ //
+ // Get neighbor AS for non-local route
+ //
+ BgpRouteEntry bgpRouteEntry = generateBgpRouteEntry();
+ assertThat(bgpRouteEntry.getNeighborAs(), is((long) 1));
+
+ //
+ // Get neighbor AS for a local route
+ //
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+ byte origin = BgpConstants.Update.Origin.IGP;
+ // Setup the AS Path
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments);
+ //
+ long localPref = 100;
+ long multiExitDisc = 20;
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry.setMultiExitDisc(multiExitDisc);
+ assertThat(bgpRouteEntry.getNeighborAs(), is(BgpConstants.BGP_AS_0));
+ }
+
+ /**
+ * Tests whether a BGP route entry has AS Path loop.
+ */
+ @Test
+ public void testHasAsPathLoop() {
+ BgpRouteEntry bgpRouteEntry = generateBgpRouteEntry();
+
+ // Test for loops: test each AS number in the interval [1, 6]
+ for (int i = 1; i <= 6; i++) {
+ assertThat(bgpRouteEntry.hasAsPathLoop(i), is(true));
+ }
+
+ // Test for non-loops
+ assertThat(bgpRouteEntry.hasAsPathLoop(500), is(false));
+ }
+
+ /**
+ * Tests the BGP Decision Process comparison of BGP routes.
+ */
+ @Test
+ public void testBgpDecisionProcessComparison() {
+ BgpRouteEntry bgpRouteEntry1 = generateBgpRouteEntry();
+ BgpRouteEntry bgpRouteEntry2 = generateBgpRouteEntry();
+
+ //
+ // Compare two routes that are same
+ //
+ assertThat(bgpRouteEntry1.isBetterThan(bgpRouteEntry2), is(true));
+ assertThat(bgpRouteEntry2.isBetterThan(bgpRouteEntry1), is(true));
+
+ //
+ // Compare two routes with different LOCAL_PREF
+ //
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+ byte origin = BgpConstants.Update.Origin.IGP;
+ // Setup the AS Path
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers1 = new ArrayList<>();
+ segmentAsNumbers1.add((long) 1);
+ segmentAsNumbers1.add((long) 2);
+ segmentAsNumbers1.add((long) 3);
+ BgpRouteEntry.PathSegment pathSegment1 =
+ new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1);
+ pathSegments.add(pathSegment1);
+ //
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ ArrayList<Long> segmentAsNumbers2 = new ArrayList<>();
+ segmentAsNumbers2.add((long) 4);
+ segmentAsNumbers2.add((long) 5);
+ segmentAsNumbers2.add((long) 6);
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2);
+ pathSegments.add(pathSegment2);
+ //
+ BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments);
+ //
+ long localPref = 50; // Different
+ long multiExitDisc = 20;
+ bgpRouteEntry2 =
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry2.setMultiExitDisc(multiExitDisc);
+ //
+ assertThat(bgpRouteEntry1.isBetterThan(bgpRouteEntry2), is(true));
+ assertThat(bgpRouteEntry2.isBetterThan(bgpRouteEntry1), is(false));
+ localPref = bgpRouteEntry1.getLocalPref(); // Restore
+
+ //
+ // Compare two routes with different AS_PATH length
+ //
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments2 = new ArrayList<>();
+ pathSegments2.add(pathSegment1);
+ // Different AS Path
+ BgpRouteEntry.AsPath asPath2 = new BgpRouteEntry.AsPath(pathSegments2);
+ bgpRouteEntry2 =
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath2,
+ localPref);
+ bgpRouteEntry2.setMultiExitDisc(multiExitDisc);
+ //
+ assertThat(bgpRouteEntry1.isBetterThan(bgpRouteEntry2), is(false));
+ assertThat(bgpRouteEntry2.isBetterThan(bgpRouteEntry1), is(true));
+
+ //
+ // Compare two routes with different ORIGIN
+ //
+ origin = BgpConstants.Update.Origin.EGP; // Different
+ bgpRouteEntry2 =
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry2.setMultiExitDisc(multiExitDisc);
+ //
+ assertThat(bgpRouteEntry1.isBetterThan(bgpRouteEntry2), is(true));
+ assertThat(bgpRouteEntry2.isBetterThan(bgpRouteEntry1), is(false));
+ origin = bgpRouteEntry1.getOrigin(); // Restore
+
+ //
+ // Compare two routes with different MULTI_EXIT_DISC
+ //
+ multiExitDisc = 10; // Different
+ bgpRouteEntry2 =
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry2.setMultiExitDisc(multiExitDisc);
+ //
+ assertThat(bgpRouteEntry1.isBetterThan(bgpRouteEntry2), is(true));
+ assertThat(bgpRouteEntry2.isBetterThan(bgpRouteEntry1), is(false));
+ multiExitDisc = bgpRouteEntry1.getMultiExitDisc(); // Restore
+
+ //
+ // Compare two routes with different BGP ID
+ //
+ bgpRouteEntry2 =
+ new BgpRouteEntry(bgpSession2, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry2.setMultiExitDisc(multiExitDisc);
+ //
+ assertThat(bgpRouteEntry1.isBetterThan(bgpRouteEntry2), is(true));
+ assertThat(bgpRouteEntry2.isBetterThan(bgpRouteEntry1), is(false));
+
+ //
+ // Compare two routes with different BGP address
+ //
+ bgpRouteEntry2 =
+ new BgpRouteEntry(bgpSession3, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry2.setMultiExitDisc(multiExitDisc);
+ //
+ assertThat(bgpRouteEntry1.isBetterThan(bgpRouteEntry2), is(true));
+ assertThat(bgpRouteEntry2.isBetterThan(bgpRouteEntry1), is(false));
+ }
+
+ /**
+ * Tests equality of {@link BgpRouteEntry}.
+ */
+ @Test
+ public void testEquality() {
+ BgpRouteEntry bgpRouteEntry1 = generateBgpRouteEntry();
+ BgpRouteEntry bgpRouteEntry2 = generateBgpRouteEntry();
+
+ assertThat(bgpRouteEntry1, is(bgpRouteEntry2));
+ }
+
+ /**
+ * Tests non-equality of {@link BgpRouteEntry}.
+ */
+ @Test
+ public void testNonEquality() {
+ BgpRouteEntry bgpRouteEntry1 = generateBgpRouteEntry();
+
+ // Setup BGP Route 2
+ IpPrefix prefix = IpPrefix.valueOf("1.2.3.0/24");
+ IpAddress nextHop = IpAddress.valueOf("5.6.7.8");
+ byte origin = BgpConstants.Update.Origin.IGP;
+ // Setup the AS Path
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers1 = new ArrayList<>();
+ segmentAsNumbers1.add((long) 1);
+ segmentAsNumbers1.add((long) 2);
+ segmentAsNumbers1.add((long) 3);
+ BgpRouteEntry.PathSegment pathSegment1 =
+ new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1);
+ pathSegments.add(pathSegment1);
+ //
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ ArrayList<Long> segmentAsNumbers2 = new ArrayList<>();
+ segmentAsNumbers2.add((long) 4);
+ segmentAsNumbers2.add((long) 5);
+ segmentAsNumbers2.add((long) 6);
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2);
+ pathSegments.add(pathSegment2);
+ //
+ BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments);
+ //
+ long localPref = 500; // Different
+ long multiExitDisc = 20;
+ BgpRouteEntry bgpRouteEntry2 =
+ new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath,
+ localPref);
+ bgpRouteEntry2.setMultiExitDisc(multiExitDisc);
+
+ assertThat(bgpRouteEntry1, is(not(bgpRouteEntry2)));
+ }
+
+ /**
+ * Tests object string representation.
+ */
+ @Test
+ public void testToString() {
+ BgpRouteEntry bgpRouteEntry = generateBgpRouteEntry();
+
+ String expectedString =
+ "BgpRouteEntry{prefix=1.2.3.0/24, nextHop=5.6.7.8, " +
+ "bgpId=10.0.0.1, origin=0, asPath=AsPath{pathSegments=" +
+ "[PathSegment{type=2, segmentAsNumbers=[1, 2, 3]}, " +
+ "PathSegment{type=1, segmentAsNumbers=[4, 5, 6]}]}, " +
+ "localPref=100, multiExitDisc=20}";
+ assertThat(bgpRouteEntry.toString(), is(expectedString));
+ }
+}
diff --git a/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/BgpSessionManagerTest.java b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/BgpSessionManagerTest.java
new file mode 100644
index 0000000..99e3be1
--- /dev/null
+++ b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/BgpSessionManagerTest.java
@@ -0,0 +1,403 @@
+package org.onlab.onos.sdnip.bgp;
+
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.Assert.assertThat;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFactory;
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.onlab.onos.sdnip.RouteListener;
+import org.onlab.onos.sdnip.RouteUpdate;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+import org.onlab.util.TestUtils;
+import org.onlab.util.TestUtils.TestUtilsException;
+
+import com.google.common.net.InetAddresses;
+
+/**
+ * Unit tests for the BgpSessionManager class.
+ */
+public class BgpSessionManagerTest {
+ private static final IpAddress IP_LOOPBACK_ID =
+ IpAddress.valueOf("127.0.0.1");
+ private static final IpAddress BGP_PEER1_ID = IpAddress.valueOf("10.0.0.1");
+ private static final long DEFAULT_LOCAL_PREF = 10;
+ private static final long DEFAULT_MULTI_EXIT_DISC = 20;
+
+ // The BGP Session Manager to test
+ private BgpSessionManager bgpSessionManager;
+
+ // Remote Peer state
+ private ClientBootstrap peerBootstrap;
+ private TestBgpPeerChannelHandler peerChannelHandler =
+ new TestBgpPeerChannelHandler(BGP_PEER1_ID, DEFAULT_LOCAL_PREF);
+ private TestBgpPeerFrameDecoder peerFrameDecoder =
+ new TestBgpPeerFrameDecoder();
+
+ // The socket that the Remote Peer should connect to
+ private InetSocketAddress connectToSocket;
+
+ private final DummyRouteListener dummyRouteListener =
+ new DummyRouteListener();
+
+ /**
+ * Dummy implementation for the RouteListener interface.
+ */
+ private class DummyRouteListener implements RouteListener {
+ @Override
+ public void update(RouteUpdate routeUpdate) {
+ // Nothing to do
+ }
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ //
+ // Setup the BGP Session Manager to test, and start listening for BGP
+ // connections.
+ //
+ bgpSessionManager = new BgpSessionManager(dummyRouteListener);
+ // NOTE: We use port 0 to bind on any available port
+ bgpSessionManager.startUp(0);
+
+ // Get the port number the BGP Session Manager is listening on
+ Channel serverChannel = TestUtils.getField(bgpSessionManager,
+ "serverChannel");
+ SocketAddress socketAddress = serverChannel.getLocalAddress();
+ InetSocketAddress inetSocketAddress =
+ (InetSocketAddress) socketAddress;
+
+ //
+ // Setup the BGP Peer, i.e., the "remote" BGP router that will
+ // initiate the BGP connection, send BGP UPDATE messages, etc.
+ //
+ ChannelFactory channelFactory =
+ new NioClientSocketChannelFactory(Executors.newCachedThreadPool(),
+ Executors.newCachedThreadPool());
+ ChannelPipelineFactory pipelineFactory = new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ // Setup the transmitting pipeline
+ ChannelPipeline pipeline = Channels.pipeline();
+ pipeline.addLast("TestBgpPeerFrameDecoder",
+ peerFrameDecoder);
+ pipeline.addLast("TestBgpPeerChannelHandler",
+ peerChannelHandler);
+ return pipeline;
+ }
+ };
+
+ peerBootstrap = new ClientBootstrap(channelFactory);
+ peerBootstrap.setOption("child.keepAlive", true);
+ peerBootstrap.setOption("child.tcpNoDelay", true);
+ peerBootstrap.setPipelineFactory(pipelineFactory);
+
+ InetAddress connectToAddress = InetAddresses.forString("127.0.0.1");
+ connectToSocket = new InetSocketAddress(connectToAddress,
+ inetSocketAddress.getPort());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ bgpSessionManager.shutDown();
+ bgpSessionManager = null;
+ }
+
+ /**
+ * Gets BGP RIB-IN routes by waiting until they are received.
+ * <p/>
+ * NOTE: We keep checking once a second the number of received routes,
+ * up to 5 seconds.
+ *
+ * @param bgpSession the BGP session that is expected to receive the
+ * routes
+ * @param expectedRoutes the expected number of routes
+ * @return the BGP RIB-IN routes as received within the expected
+ * time interval
+ */
+ private Collection<BgpRouteEntry> waitForBgpRibIn(BgpSession bgpSession,
+ long expectedRoutes)
+ throws InterruptedException {
+ Collection<BgpRouteEntry> bgpRibIn = bgpSession.getBgpRibIn();
+
+ final int maxChecks = 5; // Max wait of 5 seconds
+ for (int i = 0; i < maxChecks; i++) {
+ if (bgpRibIn.size() == expectedRoutes) {
+ break;
+ }
+ Thread.sleep(1000);
+ bgpRibIn = bgpSession.getBgpRibIn();
+ }
+
+ return bgpRibIn;
+ }
+
+ /**
+ * Gets BGP merged routes by waiting until they are received.
+ * <p/>
+ * NOTE: We keep checking once a second the number of received routes,
+ * up to 5 seconds.
+ *
+ * @param expectedRoutes the expected number of routes
+ * @return the BGP Session Manager routes as received within the expected
+ * time interval
+ */
+ private Collection<BgpRouteEntry> waitForBgpRoutes(long expectedRoutes)
+ throws InterruptedException {
+ Collection<BgpRouteEntry> bgpRoutes = bgpSessionManager.getBgpRoutes();
+
+ final int maxChecks = 5; // Max wait of 5 seconds
+ for (int i = 0; i < maxChecks; i++) {
+ if (bgpRoutes.size() == expectedRoutes) {
+ break;
+ }
+ Thread.sleep(1000);
+ bgpRoutes = bgpSessionManager.getBgpRoutes();
+ }
+
+ return bgpRoutes;
+ }
+
+ /**
+ * Tests that the BGP OPEN messages have been exchanged, followed by
+ * KEEPALIVE.
+ * <p>
+ * The BGP Peer opens the sessions and transmits OPEN Message, eventually
+ * followed by KEEPALIVE. The tested BGP listener should respond by
+ * OPEN Message, followed by KEEPALIVE.
+ *
+ * @throws TestUtilsException TestUtils error
+ */
+ @Test
+ public void testExchangedBgpOpenMessages()
+ throws InterruptedException, TestUtilsException {
+ // Initiate the connection
+ peerBootstrap.connect(connectToSocket);
+
+ // Wait until the OPEN message is received
+ peerFrameDecoder.receivedOpenMessageLatch.await(2000,
+ TimeUnit.MILLISECONDS);
+ // Wait until the KEEPALIVE message is received
+ peerFrameDecoder.receivedKeepaliveMessageLatch.await(2000,
+ TimeUnit.MILLISECONDS);
+
+ //
+ // Test the fields from the BGP OPEN message:
+ // BGP version, AS number, BGP ID
+ //
+ assertThat(peerFrameDecoder.remoteBgpVersion,
+ is(BgpConstants.BGP_VERSION));
+ assertThat(peerFrameDecoder.remoteAs,
+ is(TestBgpPeerChannelHandler.PEER_AS));
+ assertThat(peerFrameDecoder.remoteBgpIdentifier, is(IP_LOOPBACK_ID));
+
+ //
+ // Test that a BgpSession instance has been created
+ //
+ assertThat(bgpSessionManager.getMyBgpId(), is(IP_LOOPBACK_ID));
+ assertThat(bgpSessionManager.getBgpSessions(), hasSize(1));
+ BgpSession bgpSession =
+ bgpSessionManager.getBgpSessions().iterator().next();
+ assertThat(bgpSession, notNullValue());
+ long sessionAs = TestUtils.getField(bgpSession, "localAs");
+ assertThat(sessionAs, is(TestBgpPeerChannelHandler.PEER_AS));
+ }
+
+ /**
+ * Tests that the BGP UPDATE messages have been received and processed.
+ */
+ @Test
+ public void testProcessedBgpUpdateMessages() throws InterruptedException {
+ BgpSession bgpSession;
+ IpAddress nextHopRouter;
+ BgpRouteEntry bgpRouteEntry;
+ ChannelBuffer message;
+ Collection<BgpRouteEntry> bgpRibIn;
+ Collection<BgpRouteEntry> bgpRoutes;
+
+ // Initiate the connection
+ peerBootstrap.connect(connectToSocket);
+
+ // Wait until the OPEN message is received
+ peerFrameDecoder.receivedOpenMessageLatch.await(2000,
+ TimeUnit.MILLISECONDS);
+ // Wait until the KEEPALIVE message is received
+ peerFrameDecoder.receivedKeepaliveMessageLatch.await(2000,
+ TimeUnit.MILLISECONDS);
+
+ // Get the BGP Session handler
+ bgpSession = bgpSessionManager.getBgpSessions().iterator().next();
+
+ // Prepare routes to add/delete
+ nextHopRouter = IpAddress.valueOf("10.20.30.40");
+ Collection<IpPrefix> addedRoutes = new LinkedList<>();
+ Collection<IpPrefix> withdrawnRoutes = new LinkedList<>();
+ addedRoutes.add(IpPrefix.valueOf("0.0.0.0/0"));
+ addedRoutes.add(IpPrefix.valueOf("20.0.0.0/8"));
+ addedRoutes.add(IpPrefix.valueOf("30.0.0.0/16"));
+ addedRoutes.add(IpPrefix.valueOf("40.0.0.0/24"));
+ addedRoutes.add(IpPrefix.valueOf("50.0.0.0/32"));
+ withdrawnRoutes.add(IpPrefix.valueOf("60.0.0.0/8"));
+ withdrawnRoutes.add(IpPrefix.valueOf("70.0.0.0/16"));
+ withdrawnRoutes.add(IpPrefix.valueOf("80.0.0.0/24"));
+ withdrawnRoutes.add(IpPrefix.valueOf("90.0.0.0/32"));
+ // Write the routes
+ message = peerChannelHandler.prepareBgpUpdate(nextHopRouter,
+ addedRoutes,
+ withdrawnRoutes);
+ peerChannelHandler.savedCtx.getChannel().write(message);
+
+ // Check that the routes have been received, processed and stored
+ bgpRibIn = waitForBgpRibIn(bgpSession, 5);
+ assertThat(bgpRibIn, hasSize(5));
+ bgpRoutes = waitForBgpRoutes(5);
+ assertThat(bgpRoutes, hasSize(5));
+
+ // Setup the AS Path
+ ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>();
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers1 = new ArrayList<>();
+ segmentAsNumbers1.add((long) 65010);
+ segmentAsNumbers1.add((long) 65020);
+ segmentAsNumbers1.add((long) 65030);
+ BgpRouteEntry.PathSegment pathSegment1 =
+ new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1);
+ pathSegments.add(pathSegment1);
+ //
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ ArrayList<Long> segmentAsNumbers2 = new ArrayList<>();
+ segmentAsNumbers2.add((long) 65041);
+ segmentAsNumbers2.add((long) 65042);
+ segmentAsNumbers2.add((long) 65043);
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2);
+ pathSegments.add(pathSegment2);
+ //
+ BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments);
+
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession,
+ IpPrefix.valueOf("0.0.0.0/0"),
+ nextHopRouter,
+ (byte) BgpConstants.Update.Origin.IGP,
+ asPath,
+ DEFAULT_LOCAL_PREF);
+ bgpRouteEntry.setMultiExitDisc(DEFAULT_MULTI_EXIT_DISC);
+ assertThat(bgpRibIn, hasItem(bgpRouteEntry));
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession,
+ IpPrefix.valueOf("20.0.0.0/8"),
+ nextHopRouter,
+ (byte) BgpConstants.Update.Origin.IGP,
+ asPath,
+ DEFAULT_LOCAL_PREF);
+ bgpRouteEntry.setMultiExitDisc(DEFAULT_MULTI_EXIT_DISC);
+ assertThat(bgpRibIn, hasItem(bgpRouteEntry));
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession,
+ IpPrefix.valueOf("30.0.0.0/16"),
+ nextHopRouter,
+ (byte) BgpConstants.Update.Origin.IGP,
+ asPath,
+ DEFAULT_LOCAL_PREF);
+ bgpRouteEntry.setMultiExitDisc(DEFAULT_MULTI_EXIT_DISC);
+ assertThat(bgpRibIn, hasItem(bgpRouteEntry));
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession,
+ IpPrefix.valueOf("40.0.0.0/24"),
+ nextHopRouter,
+ (byte) BgpConstants.Update.Origin.IGP,
+ asPath,
+ DEFAULT_LOCAL_PREF);
+ bgpRouteEntry.setMultiExitDisc(DEFAULT_MULTI_EXIT_DISC);
+ assertThat(bgpRibIn, hasItem(bgpRouteEntry));
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession,
+ IpPrefix.valueOf("50.0.0.0/32"),
+ nextHopRouter,
+ (byte) BgpConstants.Update.Origin.IGP,
+ asPath,
+ DEFAULT_LOCAL_PREF);
+ bgpRouteEntry.setMultiExitDisc(DEFAULT_MULTI_EXIT_DISC);
+ assertThat(bgpRibIn, hasItem(bgpRouteEntry));
+
+ // Delete some routes
+ addedRoutes = new LinkedList<>();
+ withdrawnRoutes = new LinkedList<>();
+ withdrawnRoutes.add(IpPrefix.valueOf("0.0.0.0/0"));
+ withdrawnRoutes.add(IpPrefix.valueOf("50.0.0.0/32"));
+
+ // Write the routes
+ message = peerChannelHandler.prepareBgpUpdate(nextHopRouter,
+ addedRoutes,
+ withdrawnRoutes);
+ peerChannelHandler.savedCtx.getChannel().write(message);
+
+ // Check that the routes have been received, processed and stored
+ bgpRibIn = waitForBgpRibIn(bgpSession, 3);
+ assertThat(bgpRibIn, hasSize(3));
+ bgpRoutes = waitForBgpRoutes(3);
+ assertThat(bgpRoutes, hasSize(3));
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession,
+ IpPrefix.valueOf("20.0.0.0/8"),
+ nextHopRouter,
+ (byte) BgpConstants.Update.Origin.IGP,
+ asPath,
+ DEFAULT_LOCAL_PREF);
+ bgpRouteEntry.setMultiExitDisc(DEFAULT_MULTI_EXIT_DISC);
+ assertThat(bgpRibIn, hasItem(bgpRouteEntry));
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession,
+ IpPrefix.valueOf("30.0.0.0/16"),
+ nextHopRouter,
+ (byte) BgpConstants.Update.Origin.IGP,
+ asPath,
+ DEFAULT_LOCAL_PREF);
+ bgpRouteEntry.setMultiExitDisc(DEFAULT_MULTI_EXIT_DISC);
+ assertThat(bgpRibIn, hasItem(bgpRouteEntry));
+ //
+ bgpRouteEntry =
+ new BgpRouteEntry(bgpSession,
+ IpPrefix.valueOf("40.0.0.0/24"),
+ nextHopRouter,
+ (byte) BgpConstants.Update.Origin.IGP,
+ asPath,
+ DEFAULT_LOCAL_PREF);
+ bgpRouteEntry.setMultiExitDisc(DEFAULT_MULTI_EXIT_DISC);
+ assertThat(bgpRibIn, hasItem(bgpRouteEntry));
+
+ // Close the channel and test there are no routes
+ peerChannelHandler.closeChannel();
+ bgpRoutes = waitForBgpRoutes(0);
+ assertThat(bgpRoutes, hasSize(0));
+ }
+}
diff --git a/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/PathSegmentTest.java b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/PathSegmentTest.java
new file mode 100644
index 0000000..636883e
--- /dev/null
+++ b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/PathSegmentTest.java
@@ -0,0 +1,115 @@
+package org.onlab.onos.sdnip.bgp;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+import static org.junit.Assert.assertThat;
+
+import java.util.ArrayList;
+
+import org.junit.Test;
+
+/**
+ * Unit tests for the BgpRouteEntry.PathSegment class.
+ */
+public class PathSegmentTest {
+ /**
+ * Generates a Path Segment.
+ *
+ * @return a generated PathSegment
+ */
+ private BgpRouteEntry.PathSegment generatePathSegment() {
+ byte pathSegmentType = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers = new ArrayList<>();
+ segmentAsNumbers.add((long) 1);
+ segmentAsNumbers.add((long) 2);
+ segmentAsNumbers.add((long) 3);
+ BgpRouteEntry.PathSegment pathSegment =
+ new BgpRouteEntry.PathSegment(pathSegmentType, segmentAsNumbers);
+
+ return pathSegment;
+ }
+
+ /**
+ * Tests valid class constructor.
+ */
+ @Test
+ public void testConstructor() {
+ BgpRouteEntry.PathSegment pathSegment = generatePathSegment();
+
+ String expectedString =
+ "PathSegment{type=2, segmentAsNumbers=[1, 2, 3]}";
+ assertThat(pathSegment.toString(), is(expectedString));
+ }
+
+ /**
+ * Tests invalid class constructor for null Segment AS Numbers.
+ */
+ @Test(expected = NullPointerException.class)
+ public void testInvalidConstructorNullSegmentAsNumbers() {
+ byte pathSegmentType = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers = null;
+ new BgpRouteEntry.PathSegment(pathSegmentType, segmentAsNumbers);
+ }
+
+ /**
+ * Tests getting the fields of a Path Segment.
+ */
+ @Test
+ public void testGetFields() {
+ // Create the fields to compare against
+ byte pathSegmentType = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers = new ArrayList<>();
+ segmentAsNumbers.add((long) 1);
+ segmentAsNumbers.add((long) 2);
+ segmentAsNumbers.add((long) 3);
+
+ // Generate the entry to test
+ BgpRouteEntry.PathSegment pathSegment = generatePathSegment();
+
+ assertThat(pathSegment.getType(), is(pathSegmentType));
+ assertThat(pathSegment.getSegmentAsNumbers(), is(segmentAsNumbers));
+ }
+
+ /**
+ * Tests equality of {@link BgpRouteEntry.PathSegment}.
+ */
+ @Test
+ public void testEquality() {
+ BgpRouteEntry.PathSegment pathSegment1 = generatePathSegment();
+ BgpRouteEntry.PathSegment pathSegment2 = generatePathSegment();
+
+ assertThat(pathSegment1, is(pathSegment2));
+ }
+
+ /**
+ * Tests non-equality of {@link BgpRouteEntry.PathSegment}.
+ */
+ @Test
+ public void testNonEquality() {
+ BgpRouteEntry.PathSegment pathSegment1 = generatePathSegment();
+
+ // Setup Path Segment 2
+ byte pathSegmentType = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ ArrayList<Long> segmentAsNumbers = new ArrayList<>();
+ segmentAsNumbers.add((long) 1);
+ segmentAsNumbers.add((long) 22); // Different
+ segmentAsNumbers.add((long) 3);
+ //
+ BgpRouteEntry.PathSegment pathSegment2 =
+ new BgpRouteEntry.PathSegment(pathSegmentType, segmentAsNumbers);
+
+ assertThat(pathSegment1, is(not(pathSegment2)));
+ }
+
+ /**
+ * Tests object string representation.
+ */
+ @Test
+ public void testToString() {
+ BgpRouteEntry.PathSegment pathSegment = generatePathSegment();
+
+ String expectedString =
+ "PathSegment{type=2, segmentAsNumbers=[1, 2, 3]}";
+ assertThat(pathSegment.toString(), is(expectedString));
+ }
+}
diff --git a/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/TestBgpPeerChannelHandler.java b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/TestBgpPeerChannelHandler.java
new file mode 100644
index 0000000..1b51d52
--- /dev/null
+++ b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/TestBgpPeerChannelHandler.java
@@ -0,0 +1,250 @@
+package org.onlab.onos.sdnip.bgp;
+
+import java.util.Collection;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ChannelStateEvent;
+import org.jboss.netty.channel.SimpleChannelHandler;
+import org.onlab.packet.IpAddress;
+import org.onlab.packet.IpPrefix;
+
+/**
+ * Class for handling the remote BGP Peer session.
+ */
+class TestBgpPeerChannelHandler extends SimpleChannelHandler {
+ static final long PEER_AS = 65001;
+ static final int PEER_HOLDTIME = 120; // 120 seconds
+ final IpAddress bgpId; // The BGP ID
+ final long localPref; // Local preference for routes
+ final long multiExitDisc = 20; // MED value
+
+ ChannelHandlerContext savedCtx;
+
+ /**
+ * Constructor for given BGP ID.
+ *
+ * @param bgpId the BGP ID to use
+ * @param localPref the local preference for the routes to use
+ */
+ TestBgpPeerChannelHandler(IpAddress bgpId,
+ long localPref) {
+ this.bgpId = bgpId;
+ this.localPref = localPref;
+ }
+
+ /**
+ * Closes the channel.
+ */
+ void closeChannel() {
+ savedCtx.getChannel().close();
+ }
+
+ @Override
+ public void channelConnected(ChannelHandlerContext ctx,
+ ChannelStateEvent channelEvent) {
+ this.savedCtx = ctx;
+ // Prepare and transmit BGP OPEN message
+ ChannelBuffer message = prepareBgpOpen();
+ ctx.getChannel().write(message);
+
+ // Prepare and transmit BGP KEEPALIVE message
+ message = prepareBgpKeepalive();
+ ctx.getChannel().write(message);
+ }
+
+ @Override
+ public void channelDisconnected(ChannelHandlerContext ctx,
+ ChannelStateEvent channelEvent) {
+ // Nothing to do
+ }
+
+ /**
+ * Prepares BGP OPEN message.
+ *
+ * @return the message to transmit (BGP header included)
+ */
+ ChannelBuffer prepareBgpOpen() {
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+ message.writeByte(BgpConstants.BGP_VERSION);
+ message.writeShort((int) PEER_AS);
+ message.writeShort(PEER_HOLDTIME);
+ message.writeInt(bgpId.toInt());
+ message.writeByte(0); // No Optional Parameters
+ return prepareBgpMessage(BgpConstants.BGP_TYPE_OPEN, message);
+ }
+
+ /**
+ * Prepares BGP UPDATE message.
+ *
+ * @param nextHopRouter the next-hop router address for the routes to add
+ * @param addedRoutes the routes to add
+ * @param withdrawnRoutes the routes to withdraw
+ * @return the message to transmit (BGP header included)
+ */
+ ChannelBuffer prepareBgpUpdate(IpAddress nextHopRouter,
+ Collection<IpPrefix> addedRoutes,
+ Collection<IpPrefix> withdrawnRoutes) {
+ int attrFlags;
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+ ChannelBuffer pathAttributes =
+ ChannelBuffers.buffer(BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+
+ // Encode the Withdrawn Routes
+ ChannelBuffer encodedPrefixes = encodePackedPrefixes(withdrawnRoutes);
+ message.writeShort(encodedPrefixes.readableBytes());
+ message.writeBytes(encodedPrefixes);
+
+ // Encode the Path Attributes
+ // ORIGIN: IGP
+ attrFlags = 0x40; // Transitive flag
+ pathAttributes.writeByte(attrFlags);
+ pathAttributes.writeByte(BgpConstants.Update.Origin.TYPE);
+ pathAttributes.writeByte(1); // Data length
+ pathAttributes.writeByte(BgpConstants.Update.Origin.IGP);
+ // AS_PATH: Two Path Segments of 3 ASes each
+ attrFlags = 0x40; // Transitive flag
+ pathAttributes.writeByte(attrFlags);
+ pathAttributes.writeByte(BgpConstants.Update.AsPath.TYPE);
+ pathAttributes.writeByte(16); // Data length
+ byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE;
+ pathAttributes.writeByte(pathSegmentType1);
+ pathAttributes.writeByte(3); // Three ASes
+ pathAttributes.writeShort(65010); // AS=65010
+ pathAttributes.writeShort(65020); // AS=65020
+ pathAttributes.writeShort(65030); // AS=65030
+ byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SET;
+ pathAttributes.writeByte(pathSegmentType2);
+ pathAttributes.writeByte(3); // Three ASes
+ pathAttributes.writeShort(65041); // AS=65041
+ pathAttributes.writeShort(65042); // AS=65042
+ pathAttributes.writeShort(65043); // AS=65043
+ // NEXT_HOP: nextHopRouter
+ attrFlags = 0x40; // Transitive flag
+ pathAttributes.writeByte(attrFlags);
+ pathAttributes.writeByte(BgpConstants.Update.NextHop.TYPE);
+ pathAttributes.writeByte(4); // Data length
+ pathAttributes.writeInt(nextHopRouter.toInt()); // Next-hop router
+ // LOCAL_PREF: localPref
+ attrFlags = 0x40; // Transitive flag
+ pathAttributes.writeByte(attrFlags);
+ pathAttributes.writeByte(BgpConstants.Update.LocalPref.TYPE);
+ pathAttributes.writeByte(4); // Data length
+ pathAttributes.writeInt((int) localPref); // Preference value
+ // MULTI_EXIT_DISC: multiExitDisc
+ attrFlags = 0x80; // Optional
+ // Non-Transitive flag
+ pathAttributes.writeByte(attrFlags);
+ pathAttributes.writeByte(BgpConstants.Update.MultiExitDisc.TYPE);
+ pathAttributes.writeByte(4); // Data length
+ pathAttributes.writeInt((int) multiExitDisc); // Preference value
+ // The NLRI prefixes
+ encodedPrefixes = encodePackedPrefixes(addedRoutes);
+
+ // Write the Path Attributes, beginning with its length
+ message.writeShort(pathAttributes.readableBytes());
+ message.writeBytes(pathAttributes);
+ message.writeBytes(encodedPrefixes);
+
+ return prepareBgpMessage(BgpConstants.BGP_TYPE_UPDATE, message);
+ }
+
+ /**
+ * Encodes a collection of IPv4 network prefixes in a packed format.
+ * <p>
+ * The IPv4 prefixes are encoded in the form:
+ * <Length, Prefix> where Length is the length in bits of the IPv4 prefix,
+ * and Prefix is the IPv4 prefix (padded with trailing bits to the end
+ * of an octet).
+ *
+ * @param prefixes the prefixes to encode
+ * @return the buffer with the encoded prefixes
+ */
+ private ChannelBuffer encodePackedPrefixes(Collection<IpPrefix> prefixes) {
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+
+ // Write each of the prefixes
+ for (IpPrefix prefix : prefixes) {
+ int prefixBitlen = prefix.prefixLength();
+ int prefixBytelen = (prefixBitlen + 7) / 8; // Round-up
+ message.writeByte(prefixBitlen);
+
+ IpAddress address = prefix.toIpAddress();
+ long value = address.toInt() & 0xffffffffL;
+ for (int i = 0; i < IpAddress.INET_LEN; i++) {
+ if (prefixBytelen-- == 0) {
+ break;
+ }
+ long nextByte =
+ (value >> ((IpAddress.INET_LEN - i - 1) * 8)) & 0xff;
+ message.writeByte((int) nextByte);
+ }
+ }
+
+ return message;
+ }
+
+ /**
+ * Prepares BGP KEEPALIVE message.
+ *
+ * @return the message to transmit (BGP header included)
+ */
+ ChannelBuffer prepareBgpKeepalive() {
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+ return prepareBgpMessage(BgpConstants.BGP_TYPE_KEEPALIVE, message);
+ }
+
+ /**
+ * Prepares BGP NOTIFICATION message.
+ *
+ * @param errorCode the BGP NOTIFICATION Error Code
+ * @param errorSubcode the BGP NOTIFICATION Error Subcode if applicable,
+ * otherwise BgpConstants.Notifications.ERROR_SUBCODE_UNSPECIFIC
+ * @param payload the BGP NOTIFICATION Data if applicable, otherwise null
+ * @return the message to transmit (BGP header included)
+ */
+ ChannelBuffer prepareBgpNotification(int errorCode, int errorSubcode,
+ ChannelBuffer data) {
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_MESSAGE_MAX_LENGTH);
+ // Prepare the NOTIFICATION message payload
+ message.writeByte(errorCode);
+ message.writeByte(errorSubcode);
+ if (data != null) {
+ message.writeBytes(data);
+ }
+ return prepareBgpMessage(BgpConstants.BGP_TYPE_NOTIFICATION, message);
+ }
+
+ /**
+ * Prepares BGP message.
+ *
+ * @param type the BGP message type
+ * @param payload the message payload to transmit (BGP header excluded)
+ * @return the message to transmit (BGP header included)
+ */
+ private ChannelBuffer prepareBgpMessage(int type, ChannelBuffer payload) {
+ ChannelBuffer message =
+ ChannelBuffers.buffer(BgpConstants.BGP_HEADER_LENGTH +
+ payload.readableBytes());
+
+ // Write the marker
+ for (int i = 0; i < BgpConstants.BGP_HEADER_MARKER_LENGTH; i++) {
+ message.writeByte(0xff);
+ }
+
+ // Write the rest of the BGP header
+ message.writeShort(BgpConstants.BGP_HEADER_LENGTH +
+ payload.readableBytes());
+ message.writeByte(type);
+
+ // Write the payload
+ message.writeBytes(payload);
+ return message;
+ }
+}
diff --git a/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/TestBgpPeerFrameDecoder.java b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/TestBgpPeerFrameDecoder.java
new file mode 100644
index 0000000..42bbb90
--- /dev/null
+++ b/apps/sdnip/src/test/java/org/onlab/onos/sdnip/bgp/TestBgpPeerFrameDecoder.java
@@ -0,0 +1,163 @@
+package org.onlab.onos.sdnip.bgp;
+
+import java.util.concurrent.CountDownLatch;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.handler.codec.frame.FrameDecoder;
+import org.onlab.packet.IpAddress;
+
+/**
+ * Class for handling the decoding of the BGP messages at the remote
+ * BGP peer session.
+ */
+class TestBgpPeerFrameDecoder extends FrameDecoder {
+ int remoteBgpVersion; // 1 octet
+ long remoteAs; // 2 octets
+ long remoteHoldtime; // 2 octets
+ IpAddress remoteBgpIdentifier; // 4 octets -> IPv4 address
+
+ final CountDownLatch receivedOpenMessageLatch = new CountDownLatch(1);
+ final CountDownLatch receivedKeepaliveMessageLatch = new CountDownLatch(1);
+
+ @Override
+ protected Object decode(ChannelHandlerContext ctx,
+ Channel channel,
+ ChannelBuffer buf) throws Exception {
+ // Test for minimum length of the BGP message
+ if (buf.readableBytes() < BgpConstants.BGP_HEADER_LENGTH) {
+ // No enough data received
+ return null;
+ }
+
+ //
+ // Mark the current buffer position in case we haven't received
+ // the whole message.
+ //
+ buf.markReaderIndex();
+
+ //
+ // Read and check the BGP message Marker field: it must be all ones
+ //
+ byte[] marker = new byte[BgpConstants.BGP_HEADER_MARKER_LENGTH];
+ buf.readBytes(marker);
+ for (int i = 0; i < marker.length; i++) {
+ if (marker[i] != (byte) 0xff) {
+ // ERROR: Connection Not Synchronized. Close the channel.
+ ctx.getChannel().close();
+ return null;
+ }
+ }
+
+ //
+ // Read and check the BGP message Length field
+ //
+ int length = buf.readUnsignedShort();
+ if ((length < BgpConstants.BGP_HEADER_LENGTH) ||
+ (length > BgpConstants.BGP_MESSAGE_MAX_LENGTH)) {
+ // ERROR: Bad Message Length. Close the channel.
+ ctx.getChannel().close();
+ return null;
+ }
+
+ //
+ // Test whether the rest of the message is received:
+ // So far we have read the Marker (16 octets) and the
+ // Length (2 octets) fields.
+ //
+ int remainingMessageLen =
+ length - BgpConstants.BGP_HEADER_MARKER_LENGTH - 2;
+ if (buf.readableBytes() < remainingMessageLen) {
+ // No enough data received
+ buf.resetReaderIndex();
+ return null;
+ }
+
+ //
+ // Read the BGP message Type field, and process based on that type
+ //
+ int type = buf.readUnsignedByte();
+ remainingMessageLen--; // Adjust after reading the type
+ ChannelBuffer message = buf.readBytes(remainingMessageLen);
+
+ //
+ // Process the remaining of the message based on the message type
+ //
+ switch (type) {
+ case BgpConstants.BGP_TYPE_OPEN:
+ processBgpOpen(ctx, message);
+ break;
+ case BgpConstants.BGP_TYPE_UPDATE:
+ // NOTE: Not used as part of the test, because ONOS does not
+ // originate UPDATE messages.
+ break;
+ case BgpConstants.BGP_TYPE_NOTIFICATION:
+ // NOTE: Not used as part of the testing (yet)
+ break;
+ case BgpConstants.BGP_TYPE_KEEPALIVE:
+ processBgpKeepalive(ctx, message);
+ break;
+ default:
+ // ERROR: Bad Message Type. Close the channel.
+ ctx.getChannel().close();
+ return null;
+ }
+
+ return null;
+ }
+
+ /**
+ * Processes BGP OPEN message.
+ *
+ * @param ctx the Channel Handler Context.
+ * @param message the message to process.
+ */
+ private void processBgpOpen(ChannelHandlerContext ctx,
+ ChannelBuffer message) {
+ int minLength =
+ BgpConstants.BGP_OPEN_MIN_LENGTH - BgpConstants.BGP_HEADER_LENGTH;
+ if (message.readableBytes() < minLength) {
+ // ERROR: Bad Message Length. Close the channel.
+ ctx.getChannel().close();
+ return;
+ }
+
+ //
+ // Parse the OPEN message
+ //
+ remoteBgpVersion = message.readUnsignedByte();
+ remoteAs = message.readUnsignedShort();
+ remoteHoldtime = message.readUnsignedShort();
+ remoteBgpIdentifier = IpAddress.valueOf((int) message.readUnsignedInt());
+ // Optional Parameters
+ int optParamLen = message.readUnsignedByte();
+ if (message.readableBytes() < optParamLen) {
+ // ERROR: Bad Message Length. Close the channel.
+ ctx.getChannel().close();
+ return;
+ }
+ message.readBytes(optParamLen); // NOTE: data ignored
+
+ // BGP OPEN message successfully received
+ receivedOpenMessageLatch.countDown();
+ }
+
+ /**
+ * Processes BGP KEEPALIVE message.
+ *
+ * @param ctx the Channel Handler Context.
+ * @param message the message to process.
+ */
+ private void processBgpKeepalive(ChannelHandlerContext ctx,
+ ChannelBuffer message) {
+ if (message.readableBytes() + BgpConstants.BGP_HEADER_LENGTH !=
+ BgpConstants.BGP_KEEPALIVE_EXPECTED_LENGTH) {
+ // ERROR: Bad Message Length. Close the channel.
+ ctx.getChannel().close();
+ return;
+ }
+ // BGP KEEPALIVE message successfully received
+ receivedKeepaliveMessageLatch.countDown();
+ }
+}
diff --git a/cli/pom.xml b/cli/pom.xml
index cc1d9dd..8a11f34 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -26,6 +26,16 @@
<groupId>org.onlab.onos</groupId>
<artifactId>onlab-osgi</artifactId>
</dependency>
+
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-annotations</artifactId>
+ </dependency>
+
<dependency>
<groupId>org.osgi</groupId>
<artifactId>org.osgi.core</artifactId>
diff --git a/cli/src/main/java/org/onlab/onos/cli/AbstractShellCommand.java b/cli/src/main/java/org/onlab/onos/cli/AbstractShellCommand.java
index 184a7e6..839d2841 100644
--- a/cli/src/main/java/org/onlab/onos/cli/AbstractShellCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/AbstractShellCommand.java
@@ -1,5 +1,6 @@
package org.onlab.onos.cli;
+import org.apache.karaf.shell.commands.Option;
import org.apache.karaf.shell.console.OsgiCommandSupport;
import org.onlab.osgi.DefaultServiceDirectory;
import org.onlab.osgi.ServiceNotFoundException;
@@ -9,6 +10,10 @@
*/
public abstract class AbstractShellCommand extends OsgiCommandSupport {
+ @Option(name = "-j", aliases = "--json", description = "Output JSON",
+ required = false, multiValued = false)
+ private boolean json = false;
+
/**
* Returns the reference to the implementation of the specified service.
*
@@ -46,6 +51,15 @@
*/
protected abstract void execute();
+ /**
+ * Indicates whether JSON format should be output.
+ *
+ * @return true if JSON is requested
+ */
+ protected boolean outputJson() {
+ return json;
+ }
+
@Override
protected Object doExecute() throws Exception {
try {
diff --git a/cli/src/main/java/org/onlab/onos/cli/MastersListCommand.java b/cli/src/main/java/org/onlab/onos/cli/MastersListCommand.java
index 0f7857f..fff4955 100644
--- a/cli/src/main/java/org/onlab/onos/cli/MastersListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/MastersListCommand.java
@@ -1,7 +1,9 @@
package org.onlab.onos.cli;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
import com.google.common.collect.Lists;
-
import org.apache.karaf.shell.commands.Command;
import org.onlab.onos.cluster.ClusterService;
import org.onlab.onos.cluster.ControllerNode;
@@ -26,15 +28,50 @@
MastershipService mastershipService = get(MastershipService.class);
List<ControllerNode> nodes = newArrayList(service.getNodes());
Collections.sort(nodes, Comparators.NODE_COMPARATOR);
+
+ if (outputJson()) {
+ print("%s", json(service, mastershipService, nodes));
+ } else {
+ for (ControllerNode node : nodes) {
+ List<DeviceId> ids = Lists.newArrayList(mastershipService.getDevicesOf(node.id()));
+ Collections.sort(ids, Comparators.ELEMENT_ID_COMPARATOR);
+ print("%s: %d devices", node.id(), ids.size());
+ for (DeviceId deviceId : ids) {
+ print(" %s", deviceId);
+ }
+ }
+ }
+ }
+
+ // Produces JSON structure.
+ private JsonNode json(ClusterService service, MastershipService mastershipService,
+ List<ControllerNode> nodes) {
+ ObjectMapper mapper = new ObjectMapper();
+ ArrayNode result = mapper.createArrayNode();
ControllerNode self = service.getLocalNode();
for (ControllerNode node : nodes) {
List<DeviceId> ids = Lists.newArrayList(mastershipService.getDevicesOf(node.id()));
- Collections.sort(ids, Comparators.ELEMENT_ID_COMPARATOR);
- print("%s: %d devices", node.id(), ids.size());
- for (DeviceId deviceId : ids) {
- print(" %s", deviceId);
- }
+ result.add(mapper.createObjectNode()
+ .put("id", node.id().toString())
+ .put("size", ids.size())
+ .set("devices", json(mapper, ids)));
}
+ return result;
+ }
+
+ /**
+ * Produces a JSON array containing the specified device identifiers.
+ *
+ * @param mapper object mapper
+ * @param ids collection of device identifiers
+ * @return JSON array
+ */
+ public static JsonNode json(ObjectMapper mapper, Iterable<DeviceId> ids) {
+ ArrayNode result = mapper.createArrayNode();
+ for (DeviceId deviceId : ids) {
+ result.add(deviceId.toString());
+ }
+ return result;
}
}
diff --git a/cli/src/main/java/org/onlab/onos/cli/NodesListCommand.java b/cli/src/main/java/org/onlab/onos/cli/NodesListCommand.java
index b7b4556..d9bdf94 100644
--- a/cli/src/main/java/org/onlab/onos/cli/NodesListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/NodesListCommand.java
@@ -1,5 +1,8 @@
package org.onlab.onos.cli;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
import org.apache.karaf.shell.commands.Command;
import org.onlab.onos.cluster.ClusterService;
import org.onlab.onos.cluster.ControllerNode;
@@ -24,12 +27,32 @@
ClusterService service = get(ClusterService.class);
List<ControllerNode> nodes = newArrayList(service.getNodes());
Collections.sort(nodes, Comparators.NODE_COMPARATOR);
+ if (outputJson()) {
+ print("%s", json(service, nodes));
+ } else {
+ ControllerNode self = service.getLocalNode();
+ for (ControllerNode node : nodes) {
+ print(FMT, node.id(), node.ip(), node.tcpPort(),
+ service.getState(node.id()),
+ node.equals(self) ? "*" : "");
+ }
+ }
+ }
+
+ // Produces JSON structure.
+ private JsonNode json(ClusterService service, List<ControllerNode> nodes) {
+ ObjectMapper mapper = new ObjectMapper();
+ ArrayNode result = mapper.createArrayNode();
ControllerNode self = service.getLocalNode();
for (ControllerNode node : nodes) {
- print(FMT, node.id(), node.ip(), node.tcpPort(),
- service.getState(node.id()),
- node.equals(self) ? "*" : "");
+ result.add(mapper.createObjectNode()
+ .put("id", node.id().toString())
+ .put("ip", node.ip().toString())
+ .put("tcpPort", node.tcpPort())
+ .put("state", service.getState(node.id()).toString())
+ .put("self", node.equals(self)));
}
+ return result;
}
}
diff --git a/cli/src/main/java/org/onlab/onos/cli/RolesCommand.java b/cli/src/main/java/org/onlab/onos/cli/RolesCommand.java
new file mode 100644
index 0000000..0456e4a
--- /dev/null
+++ b/cli/src/main/java/org/onlab/onos/cli/RolesCommand.java
@@ -0,0 +1,74 @@
+package org.onlab.onos.cli;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.karaf.shell.commands.Command;
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.onos.mastership.MastershipService;
+import org.onlab.onos.net.Device;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.device.DeviceService;
+
+
+/**
+ * Lists mastership roles of nodes for each device.
+ */
+@Command(scope = "onos", name = "roles",
+ description = "Lists mastership roles of nodes for each device.")
+public class RolesCommand extends AbstractShellCommand {
+
+ private static final String FMT_HDR = "%s: master=%s\nstandbys: %s nodes";
+ private static final String FMT_SB = "\t%s";
+
+ @Override
+ protected void execute() {
+ DeviceService deviceService = get(DeviceService.class);
+ MastershipService roleService = get(MastershipService.class);
+
+ for (Device d : getSortedDevices(deviceService)) {
+ DeviceId did = d.id();
+ printRoles(roleService, did);
+ }
+ }
+
+ /**
+ * Returns the list of devices sorted using the device ID URIs.
+ *
+ * @param service device service
+ * @return sorted device list
+ */
+ protected static List<Device> getSortedDevices(DeviceService service) {
+ List<Device> devices = newArrayList(service.getDevices());
+ Collections.sort(devices, Comparators.ELEMENT_COMPARATOR);
+ return devices;
+ }
+
+ /**
+ * Prints the role information for a device.
+ *
+ * @param deviceId the ID of the device
+ * @param master the current master
+ */
+ protected void printRoles(MastershipService service, DeviceId deviceId) {
+ List<NodeId> nodes = service.getNodesFor(deviceId);
+ NodeId first = null;
+ NodeId master = null;
+
+ if (!nodes.isEmpty()) {
+ first = nodes.get(0);
+ }
+ if (first != null &&
+ first.equals(service.getMasterFor(deviceId))) {
+ master = nodes.get(0);
+ nodes.remove(master);
+ }
+ print(FMT_HDR, deviceId, master == null ? "NONE" : master, nodes.size());
+
+ for (NodeId nid : nodes) {
+ print(FMT_SB, nid);
+ }
+ }
+}
diff --git a/cli/src/main/java/org/onlab/onos/cli/SummaryCommand.java b/cli/src/main/java/org/onlab/onos/cli/SummaryCommand.java
index 1597b55..180405b 100644
--- a/cli/src/main/java/org/onlab/onos/cli/SummaryCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/SummaryCommand.java
@@ -1,5 +1,6 @@
package org.onlab.onos.cli;
+import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.karaf.shell.commands.Command;
import org.onlab.onos.CoreService;
import org.onlab.onos.cluster.ClusterService;
@@ -22,18 +23,32 @@
protected void execute() {
TopologyService topologyService = get(TopologyService.class);
Topology topology = topologyService.currentTopology();
- print("node=%s, version=%s",
- get(ClusterService.class).getLocalNode().ip(),
- get(CoreService.class).version().toString());
- print("nodes=%d, devices=%d, links=%d, hosts=%d, clusters=%s, paths=%d, flows=%d, intents=%d",
- get(ClusterService.class).getNodes().size(),
- get(DeviceService.class).getDeviceCount(),
- get(LinkService.class).getLinkCount(),
- get(HostService.class).getHostCount(),
- topologyService.getClusters(topology).size(),
- topology.pathCount(),
- get(FlowRuleService.class).getFlowRuleCount(),
- get(IntentService.class).getIntentCount());
+ if (outputJson()) {
+ print("%s", new ObjectMapper().createObjectNode()
+ .put("node", get(ClusterService.class).getLocalNode().ip().toString())
+ .put("version", get(CoreService.class).version().toString())
+ .put("nodes", get(ClusterService.class).getNodes().size())
+ .put("devices", get(DeviceService.class).getDeviceCount())
+ .put("links", get(LinkService.class).getLinkCount())
+ .put("hosts", get(HostService.class).getHostCount())
+ .put("clusters", topologyService.getClusters(topology).size())
+ .put("paths", topology.pathCount())
+ .put("flows", get(FlowRuleService.class).getFlowRuleCount())
+ .put("intents", get(IntentService.class).getIntentCount()));
+ } else {
+ print("node=%s, version=%s",
+ get(ClusterService.class).getLocalNode().ip(),
+ get(CoreService.class).version().toString());
+ print("nodes=%d, devices=%d, links=%d, hosts=%d, clusters=%s, paths=%d, flows=%d, intents=%d",
+ get(ClusterService.class).getNodes().size(),
+ get(DeviceService.class).getDeviceCount(),
+ get(LinkService.class).getLinkCount(),
+ get(HostService.class).getHostCount(),
+ topologyService.getClusters(topology).size(),
+ topology.pathCount(),
+ get(FlowRuleService.class).getFlowRuleCount(),
+ get(IntentService.class).getIntentCount());
+ }
}
}
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/ClusterDevicesCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/ClusterDevicesCommand.java
index f09b185..e03af45 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/ClusterDevicesCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/ClusterDevicesCommand.java
@@ -1,5 +1,6 @@
package org.onlab.onos.cli.net;
+import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.Lists;
import org.apache.karaf.shell.commands.Argument;
import org.apache.karaf.shell.commands.Command;
@@ -10,6 +11,7 @@
import java.util.Collections;
import java.util.List;
+import static org.onlab.onos.cli.MastersListCommand.json;
import static org.onlab.onos.net.topology.ClusterId.clusterId;
/**
@@ -33,11 +35,14 @@
} else {
List<DeviceId> ids = Lists.newArrayList(service.getClusterDevices(topology, cluster));
Collections.sort(ids, Comparators.ELEMENT_ID_COMPARATOR);
- for (DeviceId deviceId : ids) {
- print("%s", deviceId);
+ if (outputJson()) {
+ print("%s", json(new ObjectMapper(), ids));
+ } else {
+ for (DeviceId deviceId : ids) {
+ print("%s", deviceId);
+ }
}
}
}
-
}
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/ClusterLinksCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/ClusterLinksCommand.java
index 2bbfb46..ed5be77 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/ClusterLinksCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/ClusterLinksCommand.java
@@ -5,6 +5,7 @@
import org.onlab.onos.net.Link;
import org.onlab.onos.net.topology.TopologyCluster;
+import static org.onlab.onos.cli.net.LinksListCommand.json;
import static org.onlab.onos.cli.net.LinksListCommand.linkString;
import static org.onlab.onos.net.topology.ClusterId.clusterId;
@@ -26,6 +27,8 @@
TopologyCluster cluster = service.getCluster(topology, clusterId(cid));
if (cluster == null) {
error("No such cluster %s", cid);
+ } else if (outputJson()) {
+ print("%s", json(service.getClusterLinks(topology, cluster)));
} else {
for (Link link : service.getClusterLinks(topology, cluster)) {
print(linkString(link));
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/ClustersListCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/ClustersListCommand.java
index 2b2953b..f41f85e 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/ClustersListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/ClustersListCommand.java
@@ -1,5 +1,8 @@
package org.onlab.onos.cli.net;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
import com.google.common.collect.Lists;
import org.apache.karaf.shell.commands.Command;
import org.onlab.onos.cli.Comparators;
@@ -24,9 +27,26 @@
List<TopologyCluster> clusters = Lists.newArrayList(service.getClusters(topology));
Collections.sort(clusters, Comparators.CLUSTER_COMPARATOR);
- for (TopologyCluster cluster : clusters) {
- print(FMT, cluster.id().index(), cluster.deviceCount(), cluster.linkCount());
+ if (outputJson()) {
+ print("%s", json(clusters));
+ } else {
+ for (TopologyCluster cluster : clusters) {
+ print(FMT, cluster.id().index(), cluster.deviceCount(), cluster.linkCount());
+ }
}
}
+ // Produces a JSON result.
+ private JsonNode json(Iterable<TopologyCluster> clusters) {
+ ObjectMapper mapper = new ObjectMapper();
+ ArrayNode result = mapper.createArrayNode();
+ for (TopologyCluster cluster : clusters) {
+ result.add(mapper.createObjectNode()
+ .put("id", cluster.id().index())
+ .put("deviceCount", cluster.deviceCount())
+ .put("linkCount", cluster.linkCount()));
+ }
+ return result;
+ }
+
}
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/DevicePortsListCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/DevicePortsListCommand.java
index f66cedd..32b7830 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/DevicePortsListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/DevicePortsListCommand.java
@@ -1,7 +1,12 @@
package org.onlab.onos.cli.net;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.karaf.shell.commands.Argument;
import org.apache.karaf.shell.commands.Command;
+import org.apache.karaf.shell.commands.Option;
import org.onlab.onos.cli.Comparators;
import org.onlab.onos.net.Device;
import org.onlab.onos.net.Port;
@@ -22,6 +27,14 @@
private static final String FMT = " port=%s, state=%s";
+ @Option(name = "-e", aliases = "--enabled", description = "Show only enabled ports",
+ required = false, multiValued = false)
+ private boolean enabled = false;
+
+ @Option(name = "-d", aliases = "--disabled", description = "Show only disabled ports",
+ required = false, multiValued = false)
+ private boolean disabled = false;
+
@Argument(index = 0, name = "uri", description = "Device ID",
required = false, multiValued = false)
String uri = null;
@@ -30,26 +43,78 @@
protected void execute() {
DeviceService service = get(DeviceService.class);
if (uri == null) {
- for (Device device : getSortedDevices(service)) {
- printDevice(service, device);
+ if (outputJson()) {
+ print("%s", jsonPorts(service, getSortedDevices(service)));
+ } else {
+ for (Device device : getSortedDevices(service)) {
+ printDevice(service, device);
+ }
}
+
} else {
Device device = service.getDevice(deviceId(uri));
if (device == null) {
error("No such device %s", uri);
+ } else if (outputJson()) {
+ print("%s", jsonPorts(service, new ObjectMapper(), device));
} else {
printDevice(service, device);
}
}
}
+ /**
+ * Produces JSON array containing ports of the specified devices.
+ *
+ * @param service device service
+ * @param devices collection of devices
+ * @return JSON array
+ */
+ public JsonNode jsonPorts(DeviceService service, Iterable<Device> devices) {
+ ObjectMapper mapper = new ObjectMapper();
+ ArrayNode result = mapper.createArrayNode();
+ for (Device device : devices) {
+ result.add(jsonPorts(service, mapper, device));
+ }
+ return result;
+ }
+
+ /**
+ * Produces JSON array containing ports of the specified device.
+ *
+ * @param service device service
+ * @param mapper object mapper
+ * @param device infrastructure devices
+ * @return JSON array
+ */
+ public JsonNode jsonPorts(DeviceService service, ObjectMapper mapper, Device device) {
+ ObjectNode result = mapper.createObjectNode();
+ ArrayNode ports = mapper.createArrayNode();
+ for (Port port : service.getPorts(device.id())) {
+ if (isIncluded(port)) {
+ ports.add(mapper.createObjectNode()
+ .put("port", port.number().toString())
+ .put("isEnabled", port.isEnabled()));
+ }
+ }
+ return result.put("device", device.id().toString()).set("ports", ports);
+ }
+
+ // Determines if a port should be included in output.
+ private boolean isIncluded(Port port) {
+ return enabled && port.isEnabled() || disabled && !port.isEnabled() ||
+ !enabled && !disabled;
+ }
+
@Override
protected void printDevice(DeviceService service, Device device) {
super.printDevice(service, device);
List<Port> ports = new ArrayList<>(service.getPorts(device.id()));
Collections.sort(ports, Comparators.PORT_COMPARATOR);
for (Port port : ports) {
- print(FMT, port.number(), port.isEnabled() ? "enabled" : "disabled");
+ if (isIncluded(port)) {
+ print(FMT, port.number(), port.isEnabled() ? "enabled" : "disabled");
+ }
}
}
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/DevicesListCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/DevicesListCommand.java
index f34f97e..b7a8acc 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/DevicesListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/DevicesListCommand.java
@@ -1,5 +1,9 @@
package org.onlab.onos.cli.net;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.karaf.shell.commands.Command;
import org.onlab.onos.cli.AbstractShellCommand;
import org.onlab.onos.cli.Comparators;
@@ -24,12 +28,55 @@
@Override
protected void execute() {
DeviceService service = get(DeviceService.class);
- for (Device device : getSortedDevices(service)) {
- printDevice(service, device);
+ if (outputJson()) {
+ print("%s", json(service, getSortedDevices(service)));
+ } else {
+ for (Device device : getSortedDevices(service)) {
+ printDevice(service, device);
+ }
}
}
/**
+ * Returns JSON node representing the specified devices.
+ *
+ * @param service device service
+ * @param devices collection of devices
+ * @return JSON node
+ */
+ public static JsonNode json(DeviceService service, Iterable<Device> devices) {
+ ObjectMapper mapper = new ObjectMapper();
+ ArrayNode result = mapper.createArrayNode();
+ for (Device device : devices) {
+ result.add(json(service, mapper, device));
+ }
+ return result;
+ }
+
+ /**
+ * Returns JSON node representing the specified device.
+ *
+ * @param service device service
+ * @param mapper object mapper
+ * @param device infrastructure device
+ * @return JSON node
+ */
+ public static ObjectNode json(DeviceService service, ObjectMapper mapper,
+ Device device) {
+ ObjectNode result = mapper.createObjectNode();
+ if (device != null) {
+ result.put("id", device.id().toString())
+ .put("available", service.isAvailable(device.id()))
+ .put("role", service.getRole(device.id()).toString())
+ .put("mfr", device.manufacturer())
+ .put("hw", device.hwVersion())
+ .put("sw", device.swVersion())
+ .put("serial", device.serialNumber());
+ }
+ return result;
+ }
+
+ /**
* Returns the list of devices sorted using the device ID URIs.
*
* @param service device service
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/FlowsListCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/FlowsListCommand.java
index 28309c5..39a8bc2 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/FlowsListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/FlowsListCommand.java
@@ -1,5 +1,9 @@
package org.onlab.onos.cli.net;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.Maps;
import org.apache.karaf.shell.commands.Argument;
import org.apache.karaf.shell.commands.Command;
@@ -12,6 +16,8 @@
import org.onlab.onos.net.flow.FlowEntry;
import org.onlab.onos.net.flow.FlowEntry.FlowEntryState;
import org.onlab.onos.net.flow.FlowRuleService;
+import org.onlab.onos.net.flow.criteria.Criterion;
+import org.onlab.onos.net.flow.instructions.Instruction;
import java.util.Collections;
import java.util.List;
@@ -48,12 +54,76 @@
DeviceService deviceService = get(DeviceService.class);
FlowRuleService service = get(FlowRuleService.class);
Map<Device, List<FlowEntry>> flows = getSortedFlows(deviceService, service);
- for (Device d : getSortedDevices(deviceService)) {
- printFlows(d, flows.get(d), coreService);
+
+ if (outputJson()) {
+ print("%s", json(coreService, getSortedDevices(deviceService), flows));
+ } else {
+ for (Device d : getSortedDevices(deviceService)) {
+ printFlows(d, flows.get(d), coreService);
+ }
}
}
/**
+ * Produces a JSON array of flows grouped by the each device.
+ *
+ * @param coreService core service
+ * @param devices collection of devices to group flow by
+ * @param flows collection of flows per each device
+ * @return JSON array
+ */
+ private JsonNode json(CoreService coreService, Iterable<Device> devices,
+ Map<Device, List<FlowEntry>> flows) {
+ ObjectMapper mapper = new ObjectMapper();
+ ArrayNode result = mapper.createArrayNode();
+ for (Device device : devices) {
+ result.add(json(coreService, mapper, device, flows.get(device)));
+ }
+ return result;
+ }
+
+ // Produces JSON object with the flows of the given device.
+ private ObjectNode json(CoreService coreService, ObjectMapper mapper,
+ Device device, List<FlowEntry> flows) {
+ ObjectNode result = mapper.createObjectNode();
+ ArrayNode array = mapper.createArrayNode();
+
+ for (FlowEntry flow : flows) {
+ array.add(json(coreService, mapper, flow));
+ }
+
+ result.put("device", device.id().toString())
+ .put("flowCount", flows.size())
+ .set("flows", array);
+ return result;
+ }
+
+ // Produces JSON structure with the specified flow data.
+ private ObjectNode json(CoreService coreService, ObjectMapper mapper,
+ FlowEntry flow) {
+ ObjectNode result = mapper.createObjectNode();
+ ArrayNode crit = mapper.createArrayNode();
+ for (Criterion c : flow.selector().criteria()) {
+ crit.add(c.toString());
+ }
+
+ ArrayNode instr = mapper.createArrayNode();
+ for (Instruction i : flow.treatment().instructions()) {
+ instr.add(i.toString());
+ }
+
+ result.put("flowId", Long.toHexString(flow.id().value()))
+ .put("state", flow.state().toString())
+ .put("bytes", flow.bytes())
+ .put("packets", flow.packets())
+ .put("life", flow.life())
+ .put("appId", coreService.getAppId(flow.appId()).name());
+ result.set("selector", crit);
+ result.set("treatment", instr);
+ return result;
+ }
+
+ /**
* Returns the list of devices sorted using the device ID URIs.
*
* @param service device service
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/HostsListCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/HostsListCommand.java
index cd9ba08..f431142 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/HostsListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/HostsListCommand.java
@@ -1,10 +1,15 @@
package org.onlab.onos.cli.net;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.karaf.shell.commands.Command;
import org.onlab.onos.cli.AbstractShellCommand;
import org.onlab.onos.cli.Comparators;
import org.onlab.onos.net.Host;
import org.onlab.onos.net.host.HostService;
+import org.onlab.packet.IpPrefix;
import java.util.Collections;
import java.util.List;
@@ -15,7 +20,7 @@
* Lists all currently-known hosts.
*/
@Command(scope = "onos", name = "hosts",
- description = "Lists all currently-known hosts.")
+ description = "Lists all currently-known hosts.")
public class HostsListCommand extends AbstractShellCommand {
private static final String FMT =
@@ -24,11 +29,42 @@
@Override
protected void execute() {
HostService service = get(HostService.class);
- for (Host host : getSortedHosts(service)) {
- printHost(host);
+ if (outputJson()) {
+ print("%s", json(getSortedHosts(service)));
+ } else {
+ for (Host host : getSortedHosts(service)) {
+ printHost(host);
+ }
}
}
+ // Produces JSON structure.
+ private static JsonNode json(Iterable<Host> hosts) {
+ ObjectMapper mapper = new ObjectMapper();
+ ArrayNode result = mapper.createArrayNode();
+ for (Host host : hosts) {
+ result.add(json(mapper, host));
+ }
+ return result;
+ }
+
+ // Produces JSON structure.
+ private static JsonNode json(ObjectMapper mapper, Host host) {
+ ObjectNode loc = LinksListCommand.json(mapper, host.location())
+ .put("time", host.location().time());
+ ArrayNode ips = mapper.createArrayNode();
+ for (IpPrefix ip : host.ipAddresses()) {
+ ips.add(ip.toString());
+ }
+ ObjectNode result = mapper.createObjectNode()
+ .put("id", host.id().toString())
+ .put("mac", host.mac().toString())
+ .put("vlan", host.vlan().toString());
+ result.set("location", loc);
+ result.set("ips", ips);
+ return result;
+ }
+
/**
* Returns the list of devices sorted using the device ID URIs.
*
@@ -44,14 +80,14 @@
/**
* Prints information about a host.
*
- * @param host
+ * @param host end-station host
*/
protected void printHost(Host host) {
if (host != null) {
print(FMT, host.id(), host.mac(),
- host.location().deviceId(),
- host.location().port(),
- host.vlan(), host.ipAddresses());
+ host.location().deviceId(),
+ host.location().port(),
+ host.vlan(), host.ipAddresses());
}
}
- }
+}
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/IntentPushTestCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/IntentPushTestCommand.java
index 60181bd..4c3ed8e 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/IntentPushTestCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/IntentPushTestCommand.java
@@ -90,11 +90,15 @@
service.submit(intent);
}
try {
- latch.await(5, TimeUnit.SECONDS);
- printResults(count);
+ if (latch.await(10, TimeUnit.SECONDS)) {
+ printResults(count);
+ } else {
+ print("I FAIL MISERABLY -> %d", latch.getCount());
+ }
} catch (InterruptedException e) {
print(e.toString());
}
+
service.removeListener(this);
}
@@ -140,6 +144,8 @@
} else {
log.warn("install event latch is null");
}
+ } else {
+ log.info("I FAIL -> {}", event);
}
}
}
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/LinksListCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/LinksListCommand.java
index f5226b1..1c3287e 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/LinksListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/LinksListCommand.java
@@ -1,8 +1,13 @@
package org.onlab.onos.cli.net;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.karaf.shell.commands.Argument;
import org.apache.karaf.shell.commands.Command;
import org.onlab.onos.cli.AbstractShellCommand;
+import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.Link;
import org.onlab.onos.net.link.LinkService;
@@ -27,12 +32,58 @@
LinkService service = get(LinkService.class);
Iterable<Link> links = uri != null ?
service.getDeviceLinks(deviceId(uri)) : service.getLinks();
- for (Link link : links) {
- print(linkString(link));
+ if (outputJson()) {
+ print("%s", json(links));
+ } else {
+ for (Link link : links) {
+ print(linkString(link));
+ }
}
}
/**
+ * Produces a JSON array containing the specified links.
+ *
+ * @param links collection of links
+ * @return JSON array
+ */
+ public static JsonNode json(Iterable<Link> links) {
+ ObjectMapper mapper = new ObjectMapper();
+ ArrayNode result = mapper.createArrayNode();
+ for (Link link : links) {
+ result.add(json(mapper, link));
+ }
+ return result;
+ }
+
+ /**
+ * Produces a JSON object for the specified link.
+ *
+ * @param mapper object mapper
+ * @param link link to encode
+ * @return JSON object
+ */
+ public static ObjectNode json(ObjectMapper mapper, Link link) {
+ ObjectNode result = mapper.createObjectNode();
+ result.set("src", json(mapper, link.src()));
+ result.set("dst", json(mapper, link.dst()));
+ return result;
+ }
+
+ /**
+ * Produces a JSON object for the specified connect point.
+ *
+ * @param mapper object mapper
+ * @param connectPoint connection point to encode
+ * @return JSON object
+ */
+ public static ObjectNode json(ObjectMapper mapper, ConnectPoint connectPoint) {
+ return mapper.createObjectNode()
+ .put("device", connectPoint.deviceId().toString())
+ .put("port", connectPoint.port().toString());
+ }
+
+ /**
* Returns a formatted string representing the given link.
*
* @param link infrastructure link
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/PathListCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/PathListCommand.java
index 8bb808a..6777625 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/PathListCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/PathListCommand.java
@@ -1,5 +1,8 @@
package org.onlab.onos.cli.net;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
import org.apache.karaf.shell.commands.Argument;
import org.apache.karaf.shell.commands.Command;
import org.onlab.onos.net.Link;
@@ -32,12 +35,33 @@
protected void execute() {
init();
Set<Path> paths = service.getPaths(topology, deviceId(src), deviceId(dst));
- for (Path path : paths) {
- print(pathString(path));
+ if (outputJson()) {
+ print("%s", json(paths));
+ } else {
+ for (Path path : paths) {
+ print(pathString(path));
+ }
}
}
/**
+ * Produces a JSON array containing the specified paths.
+ *
+ * @param paths collection of paths
+ * @return JSON array
+ */
+ public static JsonNode json(Iterable<Path> paths) {
+ ObjectMapper mapper = new ObjectMapper();
+ ArrayNode result = mapper.createArrayNode();
+ for (Path path : paths) {
+ result.add(LinksListCommand.json(mapper, path)
+ .put("cost", path.cost())
+ .set("links", LinksListCommand.json(path.links())));
+ }
+ return result;
+ }
+
+ /**
* Produces a formatted string representing the specified path.
*
* @param path network path
diff --git a/cli/src/main/java/org/onlab/onos/cli/net/TopologyCommand.java b/cli/src/main/java/org/onlab/onos/cli/net/TopologyCommand.java
index 5c8310f..a7e6422 100644
--- a/cli/src/main/java/org/onlab/onos/cli/net/TopologyCommand.java
+++ b/cli/src/main/java/org/onlab/onos/cli/net/TopologyCommand.java
@@ -1,5 +1,6 @@
package org.onlab.onos.cli.net;
+import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.karaf.shell.commands.Command;
import org.onlab.onos.cli.AbstractShellCommand;
import org.onlab.onos.net.topology.Topology;
@@ -30,8 +31,17 @@
@Override
protected void execute() {
init();
- print(FMT, topology.time(), topology.deviceCount(), topology.linkCount(),
- topology.clusterCount(), topology.pathCount());
+ if (outputJson()) {
+ print("%s", new ObjectMapper().createObjectNode()
+ .put("time", topology.time())
+ .put("deviceCount", topology.deviceCount())
+ .put("linkCount", topology.linkCount())
+ .put("clusterCount", topology.clusterCount())
+ .put("pathCount", topology.pathCount()));
+ } else {
+ print(FMT, topology.time(), topology.deviceCount(), topology.linkCount(),
+ topology.clusterCount(), topology.pathCount());
+ }
}
}
diff --git a/cli/src/main/resources/OSGI-INF/blueprint/shell-config.xml b/cli/src/main/resources/OSGI-INF/blueprint/shell-config.xml
index 9e6d396..3c75bf9 100644
--- a/cli/src/main/resources/OSGI-INF/blueprint/shell-config.xml
+++ b/cli/src/main/resources/OSGI-INF/blueprint/shell-config.xml
@@ -13,6 +13,10 @@
<command>
<action class="org.onlab.onos.cli.NodeRemoveCommand"/>
</command>
+
+ <command>
+ <action class="org.onlab.onos.cli.RolesCommand"/>
+ </command>
<command>
<action class="org.onlab.onos.cli.MastersListCommand"/>
<completers>
diff --git a/core/api/src/main/java/org/onlab/onos/cluster/ControllerNodeToNodeId.java b/core/api/src/main/java/org/onlab/onos/cluster/ControllerNodeToNodeId.java
index 0891494..0f79c68 100644
--- a/core/api/src/main/java/org/onlab/onos/cluster/ControllerNodeToNodeId.java
+++ b/core/api/src/main/java/org/onlab/onos/cluster/ControllerNodeToNodeId.java
@@ -12,7 +12,11 @@
@Override
public NodeId apply(ControllerNode input) {
- return input.id();
+ if (input == null) {
+ return null;
+ } else {
+ return input.id();
+ }
}
/**
diff --git a/core/api/src/main/java/org/onlab/onos/mastership/MastershipService.java b/core/api/src/main/java/org/onlab/onos/mastership/MastershipService.java
index 029e357..224bc05 100644
--- a/core/api/src/main/java/org/onlab/onos/mastership/MastershipService.java
+++ b/core/api/src/main/java/org/onlab/onos/mastership/MastershipService.java
@@ -1,5 +1,6 @@
package org.onlab.onos.mastership;
+import java.util.List;
import java.util.Set;
import org.onlab.onos.cluster.NodeId;
@@ -50,6 +51,15 @@
NodeId getMasterFor(DeviceId deviceId);
/**
+ * Returns controllers connected to a given device, in order of
+ * preference. The first entry in the list is the current master.
+ *
+ * @param deviceId the identifier of the device
+ * @return a list of controller IDs
+ */
+ List<NodeId> getNodesFor(DeviceId deviceId);
+
+ /**
* Returns the devices for which a controller is master.
*
* @param nodeId the ID of the controller
diff --git a/core/api/src/main/java/org/onlab/onos/mastership/MastershipStore.java b/core/api/src/main/java/org/onlab/onos/mastership/MastershipStore.java
index 0117d0d..5e7b0e4 100644
--- a/core/api/src/main/java/org/onlab/onos/mastership/MastershipStore.java
+++ b/core/api/src/main/java/org/onlab/onos/mastership/MastershipStore.java
@@ -1,5 +1,6 @@
package org.onlab.onos.mastership;
+import java.util.List;
import java.util.Set;
import org.onlab.onos.cluster.NodeId;
@@ -41,6 +42,15 @@
NodeId getMaster(DeviceId deviceId);
/**
+ * Returns the controllers connected to a device, in mastership-
+ * preference order.
+ *
+ * @param deviceId the device identifier
+ * @return an ordered list of controller IDs
+ */
+ List<NodeId> getNodes(DeviceId deviceId);
+
+ /**
* Returns the devices that a controller instance is master of.
*
* @param nodeId the instance identifier
@@ -48,6 +58,7 @@
*/
Set<DeviceId> getDevices(NodeId nodeId);
+
/**
* Sets a device's role for a specified controller instance.
*
diff --git a/core/api/src/main/java/org/onlab/onos/net/DefaultDevice.java b/core/api/src/main/java/org/onlab/onos/net/DefaultDevice.java
index 8b3eee1..53f2676 100644
--- a/core/api/src/main/java/org/onlab/onos/net/DefaultDevice.java
+++ b/core/api/src/main/java/org/onlab/onos/net/DefaultDevice.java
@@ -1,6 +1,7 @@
package org.onlab.onos.net;
import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.packet.ChassisId;
import java.util.Objects;
@@ -16,6 +17,7 @@
private final String serialNumber;
private final String hwVersion;
private final String swVersion;
+ private final ChassisId chassisId;
// For serialization
private DefaultDevice() {
@@ -24,6 +26,7 @@
this.hwVersion = null;
this.swVersion = null;
this.serialNumber = null;
+ this.chassisId = null;
}
/**
@@ -40,13 +43,15 @@
*/
public DefaultDevice(ProviderId providerId, DeviceId id, Type type,
String manufacturer, String hwVersion, String swVersion,
- String serialNumber, Annotations... annotations) {
+ String serialNumber, ChassisId chassisId,
+ Annotations... annotations) {
super(providerId, id, annotations);
this.type = type;
this.manufacturer = manufacturer;
this.hwVersion = hwVersion;
this.swVersion = swVersion;
this.serialNumber = serialNumber;
+ this.chassisId = chassisId;
}
@Override
@@ -80,6 +85,11 @@
}
@Override
+ public ChassisId chassisId() {
+ return chassisId;
+ }
+
+ @Override
public int hashCode() {
return Objects.hash(id, type, manufacturer, hwVersion, swVersion, serialNumber);
}
diff --git a/core/api/src/main/java/org/onlab/onos/net/Device.java b/core/api/src/main/java/org/onlab/onos/net/Device.java
index 4e3478f..20737ac 100644
--- a/core/api/src/main/java/org/onlab/onos/net/Device.java
+++ b/core/api/src/main/java/org/onlab/onos/net/Device.java
@@ -1,5 +1,7 @@
package org.onlab.onos.net;
+import org.onlab.packet.ChassisId;
+
/**
* Representation of a network infrastructure device.
*/
@@ -54,6 +56,13 @@
*/
String serialNumber();
+ /**
+ * Returns the device chassis id.
+ *
+ * @return chassis id
+ */
+ ChassisId chassisId();
+
// Device realizedBy(); ?
// ports are not provided directly, but rather via DeviceService.getPorts(Device device);
diff --git a/core/api/src/main/java/org/onlab/onos/net/device/DefaultDeviceDescription.java b/core/api/src/main/java/org/onlab/onos/net/device/DefaultDeviceDescription.java
index ede2eb2..79710ae 100644
--- a/core/api/src/main/java/org/onlab/onos/net/device/DefaultDeviceDescription.java
+++ b/core/api/src/main/java/org/onlab/onos/net/device/DefaultDeviceDescription.java
@@ -2,6 +2,7 @@
import org.onlab.onos.net.AbstractDescription;
import org.onlab.onos.net.SparseAnnotations;
+import org.onlab.packet.ChassisId;
import java.net.URI;
@@ -20,6 +21,7 @@
private final String hwVersion;
private final String swVersion;
private final String serialNumber;
+ private final ChassisId chassisId;
/**
* Creates a device description using the supplied information.
@@ -34,7 +36,7 @@
*/
public DefaultDeviceDescription(URI uri, Type type, String manufacturer,
String hwVersion, String swVersion,
- String serialNumber,
+ String serialNumber, ChassisId chassis,
SparseAnnotations... annotations) {
super(annotations);
this.uri = checkNotNull(uri, "Device URI cannot be null");
@@ -43,6 +45,7 @@
this.hwVersion = hwVersion;
this.swVersion = swVersion;
this.serialNumber = serialNumber;
+ this.chassisId = chassis;
}
/**
@@ -54,7 +57,7 @@
SparseAnnotations... annotations) {
this(base.deviceURI(), base.type(), base.manufacturer(),
base.hwVersion(), base.swVersion(), base.serialNumber(),
- annotations);
+ base.chassisId(), annotations);
}
@Override
@@ -88,6 +91,11 @@
}
@Override
+ public ChassisId chassisId() {
+ return chassisId;
+ }
+
+ @Override
public String toString() {
return toStringHelper(this)
.add("uri", uri).add("type", type).add("mfr", manufacturer)
@@ -104,5 +112,6 @@
this.hwVersion = null;
this.swVersion = null;
this.serialNumber = null;
+ this.chassisId = null;
}
}
diff --git a/core/api/src/main/java/org/onlab/onos/net/device/DeviceDescription.java b/core/api/src/main/java/org/onlab/onos/net/device/DeviceDescription.java
index e32c19d..99b49ab 100644
--- a/core/api/src/main/java/org/onlab/onos/net/device/DeviceDescription.java
+++ b/core/api/src/main/java/org/onlab/onos/net/device/DeviceDescription.java
@@ -2,6 +2,7 @@
import org.onlab.onos.net.Description;
import org.onlab.onos.net.Device;
+import org.onlab.packet.ChassisId;
import java.net.URI;
@@ -54,4 +55,11 @@
*/
String serialNumber();
+ /**
+ * Returns a device chassis id.
+ *
+ * @return chassis id
+ */
+ ChassisId chassisId();
+
}
diff --git a/core/api/src/main/java/org/onlab/onos/net/flow/DefaultFlowEntry.java b/core/api/src/main/java/org/onlab/onos/net/flow/DefaultFlowEntry.java
index d4657d2..905469f 100644
--- a/core/api/src/main/java/org/onlab/onos/net/flow/DefaultFlowEntry.java
+++ b/core/api/src/main/java/org/onlab/onos/net/flow/DefaultFlowEntry.java
@@ -6,9 +6,10 @@
import org.onlab.onos.net.DeviceId;
import org.slf4j.Logger;
-public class DefaultFlowEntry extends DefaultFlowRule implements FlowEntry {
+public class DefaultFlowEntry extends DefaultFlowRule
+ implements FlowEntry, StoredFlowEntry {
- private final Logger log = getLogger(getClass());
+ private static final Logger log = getLogger(DefaultFlowEntry.class);
private long life;
private long packets;
diff --git a/core/api/src/main/java/org/onlab/onos/net/flow/DefaultFlowRule.java b/core/api/src/main/java/org/onlab/onos/net/flow/DefaultFlowRule.java
index e5504db..6ecbbbc 100644
--- a/core/api/src/main/java/org/onlab/onos/net/flow/DefaultFlowRule.java
+++ b/core/api/src/main/java/org/onlab/onos/net/flow/DefaultFlowRule.java
@@ -11,7 +11,7 @@
public class DefaultFlowRule implements FlowRule {
- private final Logger log = getLogger(getClass());
+ private static final Logger log = getLogger(DefaultFlowRule.class);
private final DeviceId deviceId;
private final int priority;
diff --git a/core/api/src/main/java/org/onlab/onos/net/flow/FlowEntry.java b/core/api/src/main/java/org/onlab/onos/net/flow/FlowEntry.java
index 882c9df..cdccaa9 100644
--- a/core/api/src/main/java/org/onlab/onos/net/flow/FlowEntry.java
+++ b/core/api/src/main/java/org/onlab/onos/net/flow/FlowEntry.java
@@ -65,6 +65,7 @@
*/
long bytes();
+ // TODO: consider removing this attribute
/**
* When this flow entry was last deemed active.
* @return epoch time of last activity
@@ -72,35 +73,6 @@
long lastSeen();
/**
- * Sets the last active epoch time.
- */
- void setLastSeen();
-
- /**
- * Sets the new state for this entry.
- * @param newState new flow entry state.
- */
- void setState(FlowEntryState newState);
-
- /**
- * Sets how long this entry has been entered in the system.
- * @param life epoch time
- */
- void setLife(long life);
-
- /**
- * Number of packets seen by this entry.
- * @param packets a long value
- */
- void setPackets(long packets);
-
- /**
- * Number of bytes seen by this rule.
- * @param bytes a long value
- */
- void setBytes(long bytes);
-
- /**
* Indicates the error type.
* @return an integer value of the error
*/
diff --git a/core/api/src/main/java/org/onlab/onos/net/flow/StoredFlowEntry.java b/core/api/src/main/java/org/onlab/onos/net/flow/StoredFlowEntry.java
new file mode 100644
index 0000000..e68ed68
--- /dev/null
+++ b/core/api/src/main/java/org/onlab/onos/net/flow/StoredFlowEntry.java
@@ -0,0 +1,35 @@
+package org.onlab.onos.net.flow;
+
+
+public interface StoredFlowEntry extends FlowEntry {
+
+ /**
+ * Sets the last active epoch time.
+ */
+ void setLastSeen();
+
+ /**
+ * Sets the new state for this entry.
+ * @param newState new flow entry state.
+ */
+ void setState(FlowEntryState newState);
+
+ /**
+ * Sets how long this entry has been entered in the system.
+ * @param life epoch time
+ */
+ void setLife(long life);
+
+ /**
+ * Number of packets seen by this entry.
+ * @param packets a long value
+ */
+ void setPackets(long packets);
+
+ /**
+ * Number of bytes seen by this rule.
+ * @param bytes a long value
+ */
+ void setBytes(long bytes);
+
+}
diff --git a/core/api/src/main/java/org/onlab/onos/net/host/DefaultHostDescription.java b/core/api/src/main/java/org/onlab/onos/net/host/DefaultHostDescription.java
index 2e92dad..71a952e 100644
--- a/core/api/src/main/java/org/onlab/onos/net/host/DefaultHostDescription.java
+++ b/core/api/src/main/java/org/onlab/onos/net/host/DefaultHostDescription.java
@@ -1,5 +1,8 @@
package org.onlab.onos.net.host;
+import java.util.Collections;
+import java.util.Set;
+
import org.onlab.onos.net.AbstractDescription;
import org.onlab.onos.net.HostLocation;
import org.onlab.onos.net.SparseAnnotations;
@@ -7,6 +10,8 @@
import org.onlab.packet.MacAddress;
import org.onlab.packet.VlanId;
+import com.google.common.collect.ImmutableSet;
+
import static com.google.common.base.MoreObjects.toStringHelper;
/**
@@ -18,7 +23,7 @@
private final MacAddress mac;
private final VlanId vlan;
private final HostLocation location;
- private final IpPrefix ip;
+ private final Set<IpPrefix> ip;
/**
* Creates a host description using the supplied information.
@@ -31,7 +36,7 @@
public DefaultHostDescription(MacAddress mac, VlanId vlan,
HostLocation location,
SparseAnnotations... annotations) {
- this(mac, vlan, location, null, annotations);
+ this(mac, vlan, location, Collections.<IpPrefix>emptySet(), annotations);
}
/**
@@ -46,11 +51,26 @@
public DefaultHostDescription(MacAddress mac, VlanId vlan,
HostLocation location, IpPrefix ip,
SparseAnnotations... annotations) {
+ this(mac, vlan, location, ImmutableSet.of(ip), annotations);
+ }
+
+ /**
+ * Creates a host description using the supplied information.
+ *
+ * @param mac host MAC address
+ * @param vlan host VLAN identifier
+ * @param location host location
+ * @param ip host IP addresses
+ * @param annotations optional key/value annotations map
+ */
+ public DefaultHostDescription(MacAddress mac, VlanId vlan,
+ HostLocation location, Set<IpPrefix> ip,
+ SparseAnnotations... annotations) {
super(annotations);
this.mac = mac;
this.vlan = vlan;
this.location = location;
- this.ip = ip;
+ this.ip = ImmutableSet.copyOf(ip);
}
@Override
@@ -69,7 +89,7 @@
}
@Override
- public IpPrefix ipAddress() {
+ public Set<IpPrefix> ipAddress() {
return ip;
}
diff --git a/core/api/src/main/java/org/onlab/onos/net/host/HostDescription.java b/core/api/src/main/java/org/onlab/onos/net/host/HostDescription.java
index fc16854..258ce3d 100644
--- a/core/api/src/main/java/org/onlab/onos/net/host/HostDescription.java
+++ b/core/api/src/main/java/org/onlab/onos/net/host/HostDescription.java
@@ -1,5 +1,7 @@
package org.onlab.onos.net.host;
+import java.util.Set;
+
import org.onlab.onos.net.Description;
import org.onlab.onos.net.HostLocation;
import org.onlab.packet.IpPrefix;
@@ -38,6 +40,6 @@
* @return host IP address
*/
// FIXME: Switch to IpAddress
- IpPrefix ipAddress();
+ Set<IpPrefix> ipAddress();
}
diff --git a/core/api/src/main/java/org/onlab/onos/net/intent/HostToHostIntent.java b/core/api/src/main/java/org/onlab/onos/net/intent/HostToHostIntent.java
index f420fc2..7a894be6 100644
--- a/core/api/src/main/java/org/onlab/onos/net/intent/HostToHostIntent.java
+++ b/core/api/src/main/java/org/onlab/onos/net/intent/HostToHostIntent.java
@@ -12,7 +12,7 @@
/**
* Abstraction of end-station to end-station bidirectional connectivity.
*/
-public class HostToHostIntent extends ConnectivityIntent {
+public final class HostToHostIntent extends ConnectivityIntent {
private final HostId one;
private final HostId two;
diff --git a/core/api/src/main/java/org/onlab/onos/net/intent/LinkCollectionIntent.java b/core/api/src/main/java/org/onlab/onos/net/intent/LinkCollectionIntent.java
index 78c95cf..6a8b002 100644
--- a/core/api/src/main/java/org/onlab/onos/net/intent/LinkCollectionIntent.java
+++ b/core/api/src/main/java/org/onlab/onos/net/intent/LinkCollectionIntent.java
@@ -4,6 +4,7 @@
import java.util.Objects;
import java.util.Set;
+import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.Link;
import org.onlab.onos.net.flow.TrafficSelector;
import org.onlab.onos.net.flow.TrafficTreatment;
@@ -14,10 +15,12 @@
* Abstraction of a connectivity intent that is implemented by a set of path
* segments.
*/
-public class LinkCollectionIntent extends ConnectivityIntent implements InstallableIntent {
+public final class LinkCollectionIntent extends ConnectivityIntent implements InstallableIntent {
private final Set<Link> links;
+ private final ConnectPoint egressPoint;
+
/**
* Creates a new point-to-point intent with the supplied ingress/egress
* ports and using the specified explicit path.
@@ -26,19 +29,23 @@
* @param selector traffic match
* @param treatment action
* @param links traversed links
+ * @param egressPoint egress point
* @throws NullPointerException {@code path} is null
*/
public LinkCollectionIntent(IntentId id,
TrafficSelector selector,
TrafficTreatment treatment,
- Set<Link> links) {
+ Set<Link> links,
+ ConnectPoint egressPoint) {
super(id, selector, treatment);
this.links = links;
+ this.egressPoint = egressPoint;
}
protected LinkCollectionIntent() {
super();
this.links = null;
+ this.egressPoint = null;
}
@Override
@@ -46,10 +53,25 @@
return links;
}
+ /**
+ * Returns the set of links that represent the network connections needed
+ * by this intent.
+ *
+ * @return Set of links for the network hops needed by this intent
+ */
public Set<Link> links() {
return links;
}
+ /**
+ * Returns the egress point of the intent.
+ *
+ * @return the egress point
+ */
+ public ConnectPoint egressPoint() {
+ return egressPoint;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) {
@@ -64,12 +86,13 @@
LinkCollectionIntent that = (LinkCollectionIntent) o;
- return Objects.equals(this.links, that.links);
+ return Objects.equals(this.links, that.links) &&
+ Objects.equals(this.egressPoint, that.egressPoint);
}
@Override
public int hashCode() {
- return Objects.hash(super.hashCode(), links);
+ return Objects.hash(super.hashCode(), links, egressPoint);
}
@Override
@@ -79,6 +102,7 @@
.add("match", selector())
.add("action", treatment())
.add("links", links())
+ .add("egress", egressPoint())
.toString();
}
}
diff --git a/core/api/src/main/java/org/onlab/onos/net/intent/MultiPointToSinglePointIntent.java b/core/api/src/main/java/org/onlab/onos/net/intent/MultiPointToSinglePointIntent.java
index be8d309..8ee4a9e 100644
--- a/core/api/src/main/java/org/onlab/onos/net/intent/MultiPointToSinglePointIntent.java
+++ b/core/api/src/main/java/org/onlab/onos/net/intent/MultiPointToSinglePointIntent.java
@@ -15,7 +15,7 @@
/**
* Abstraction of multiple source to single destination connectivity intent.
*/
-public class MultiPointToSinglePointIntent extends ConnectivityIntent {
+public final class MultiPointToSinglePointIntent extends ConnectivityIntent {
private final Set<ConnectPoint> ingressPoints;
private final ConnectPoint egressPoint;
diff --git a/core/api/src/main/java/org/onlab/onos/net/packet/DefaultInboundPacket.java b/core/api/src/main/java/org/onlab/onos/net/packet/DefaultInboundPacket.java
index fb31b10..1bf53d5 100644
--- a/core/api/src/main/java/org/onlab/onos/net/packet/DefaultInboundPacket.java
+++ b/core/api/src/main/java/org/onlab/onos/net/packet/DefaultInboundPacket.java
@@ -24,7 +24,7 @@
* @param parsed parsed ethernet frame
* @param unparsed unparsed raw bytes
*/
- public DefaultInboundPacket(ConnectPoint receivedFrom, Ethernet parsed,
+ public DefaultInboundPacket(ConnectPoint receivedFrom, Ethernet parsed,
ByteBuffer unparsed) {
this.receivedFrom = receivedFrom;
this.parsed = parsed;
diff --git a/core/api/src/main/java/org/onlab/onos/net/provider/AbstractProviderRegistry.java b/core/api/src/main/java/org/onlab/onos/net/provider/AbstractProviderRegistry.java
index d59bfd2..35aed74 100644
--- a/core/api/src/main/java/org/onlab/onos/net/provider/AbstractProviderRegistry.java
+++ b/core/api/src/main/java/org/onlab/onos/net/provider/AbstractProviderRegistry.java
@@ -62,6 +62,9 @@
((AbstractProviderService) service).invalidate();
services.remove(provider.id());
providers.remove(provider.id());
+ if (!provider.id().isAncillary()) {
+ providersByScheme.remove(provider.id().scheme());
+ }
}
}
diff --git a/core/api/src/main/java/org/onlab/onos/net/topology/Topology.java b/core/api/src/main/java/org/onlab/onos/net/topology/Topology.java
index f71a5ec..ea3c68d 100644
--- a/core/api/src/main/java/org/onlab/onos/net/topology/Topology.java
+++ b/core/api/src/main/java/org/onlab/onos/net/topology/Topology.java
@@ -7,6 +7,8 @@
*/
public interface Topology extends Provided {
+ // FIXME: Following is not true right now. It is actually System.nanoTime(),
+ // which has no relation to epoch time, wall clock, etc.
/**
* Returns the time, specified in milliseconds since start of epoch,
* when the topology became active and made available.
diff --git a/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterCommunicationService.java b/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterCommunicationService.java
index 8b966ed..6fc150c 100644
--- a/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterCommunicationService.java
+++ b/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterCommunicationService.java
@@ -37,6 +37,15 @@
boolean multicast(ClusterMessage message, Set<NodeId> nodeIds) throws IOException;
/**
+ * Sends a message synchronously.
+ * @param message message to send
+ * @param toNodeId recipient node identifier
+ * @return ClusterMessageResponse which is reply future.
+ * @throws IOException
+ */
+ ClusterMessageResponse sendAndReceive(ClusterMessage message, NodeId toNodeId) throws IOException;
+
+ /**
* Adds a new subscriber for the specified message subject.
*
* @param subject message subject
diff --git a/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterMessage.java b/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterMessage.java
index b74f887..dd29f24 100644
--- a/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterMessage.java
+++ b/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterMessage.java
@@ -1,5 +1,7 @@
package org.onlab.onos.store.cluster.messaging;
+import java.io.IOException;
+
import org.onlab.onos.cluster.NodeId;
// TODO: Should payload type be ByteBuffer?
@@ -49,4 +51,14 @@
public byte[] payload() {
return payload;
}
+
+ /**
+ * Sends a response to the sender.
+ *
+ * @param data payload response.
+ * @throws IOException
+ */
+ public void respond(byte[] data) throws IOException {
+ throw new IllegalStateException("One can only repond to message recived from others.");
+ }
}
diff --git a/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterMessageResponse.java b/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterMessageResponse.java
new file mode 100644
index 0000000..ae2089d
--- /dev/null
+++ b/core/api/src/main/java/org/onlab/onos/store/cluster/messaging/ClusterMessageResponse.java
@@ -0,0 +1,12 @@
+package org.onlab.onos.store.cluster.messaging;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.onlab.onos.cluster.NodeId;
+
+public interface ClusterMessageResponse {
+ public NodeId sender();
+ public byte[] get(long timeout, TimeUnit timeunit) throws TimeoutException;
+ public byte[] get(long timeout) throws InterruptedException;
+}
diff --git a/core/api/src/test/java/org/onlab/onos/cluster/ControllerNodeToNodeIdTest.java b/core/api/src/test/java/org/onlab/onos/cluster/ControllerNodeToNodeIdTest.java
index 44261e8..b95dcfc 100644
--- a/core/api/src/test/java/org/onlab/onos/cluster/ControllerNodeToNodeIdTest.java
+++ b/core/api/src/test/java/org/onlab/onos/cluster/ControllerNodeToNodeIdTest.java
@@ -1,5 +1,6 @@
package org.onlab.onos.cluster;
+import static com.google.common.base.Predicates.notNull;
import static org.junit.Assert.*;
import static org.onlab.onos.cluster.ControllerNodeToNodeId.toNodeId;
@@ -30,12 +31,13 @@
@Test
public final void testToNodeId() {
- final Iterable<ControllerNode> nodes = Arrays.asList(CN1, CN2, CN3);
+ final Iterable<ControllerNode> nodes = Arrays.asList(CN1, CN2, CN3, null);
final List<NodeId> nodeIds = Arrays.asList(NID1, NID2, NID3);
assertEquals(nodeIds,
FluentIterable.from(nodes)
.transform(toNodeId())
+ .filter(notNull())
.toList());
}
diff --git a/core/api/src/test/java/org/onlab/onos/mastership/MastershipServiceAdapter.java b/core/api/src/test/java/org/onlab/onos/mastership/MastershipServiceAdapter.java
index 97b57e5..af376e8 100644
--- a/core/api/src/test/java/org/onlab/onos/mastership/MastershipServiceAdapter.java
+++ b/core/api/src/test/java/org/onlab/onos/mastership/MastershipServiceAdapter.java
@@ -4,6 +4,7 @@
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.MastershipRole;
+import java.util.List;
import java.util.Set;
/**
@@ -46,4 +47,9 @@
public MastershipTermService requestTermService() {
return null;
}
+
+ @Override
+ public List<NodeId> getNodesFor(DeviceId deviceId) {
+ return null;
+ }
}
diff --git a/core/api/src/test/java/org/onlab/onos/net/DefaultDeviceTest.java b/core/api/src/test/java/org/onlab/onos/net/DefaultDeviceTest.java
index 329e128..63f1daa 100644
--- a/core/api/src/test/java/org/onlab/onos/net/DefaultDeviceTest.java
+++ b/core/api/src/test/java/org/onlab/onos/net/DefaultDeviceTest.java
@@ -3,6 +3,7 @@
import com.google.common.testing.EqualsTester;
import org.junit.Test;
import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.packet.ChassisId;
import static org.junit.Assert.assertEquals;
import static org.onlab.onos.net.Device.Type.SWITCH;
@@ -21,14 +22,15 @@
static final String SW = "3.9.1";
static final String SN1 = "43311-12345";
static final String SN2 = "42346-43512";
+ static final ChassisId CID = new ChassisId();
@Test
public void testEquality() {
- Device d1 = new DefaultDevice(PID, DID1, SWITCH, MFR, HW, SW, SN1);
- Device d2 = new DefaultDevice(PID, DID1, SWITCH, MFR, HW, SW, SN1);
- Device d3 = new DefaultDevice(PID, DID2, SWITCH, MFR, HW, SW, SN2);
- Device d4 = new DefaultDevice(PID, DID2, SWITCH, MFR, HW, SW, SN2);
- Device d5 = new DefaultDevice(PID, DID2, SWITCH, MFR, HW, SW, SN1);
+ Device d1 = new DefaultDevice(PID, DID1, SWITCH, MFR, HW, SW, SN1, CID);
+ Device d2 = new DefaultDevice(PID, DID1, SWITCH, MFR, HW, SW, SN1, CID);
+ Device d3 = new DefaultDevice(PID, DID2, SWITCH, MFR, HW, SW, SN2, CID);
+ Device d4 = new DefaultDevice(PID, DID2, SWITCH, MFR, HW, SW, SN2, CID);
+ Device d5 = new DefaultDevice(PID, DID2, SWITCH, MFR, HW, SW, SN1, CID);
new EqualsTester().addEqualityGroup(d1, d2)
.addEqualityGroup(d3, d4)
@@ -38,13 +40,13 @@
@Test
public void basics() {
- Device device = new DefaultDevice(PID, DID1, SWITCH, MFR, HW, SW, SN1);
+ Device device = new DefaultDevice(PID, DID1, SWITCH, MFR, HW, SW, SN1, CID);
validate(device);
}
@Test
public void annotations() {
- Device device = new DefaultDevice(PID, DID1, SWITCH, MFR, HW, SW, SN1,
+ Device device = new DefaultDevice(PID, DID1, SWITCH, MFR, HW, SW, SN1, CID,
DefaultAnnotations.builder().set("foo", "bar").build());
validate(device);
assertEquals("incorrect provider", "bar", device.annotations().value("foo"));
diff --git a/core/api/src/test/java/org/onlab/onos/net/DefaultPortTest.java b/core/api/src/test/java/org/onlab/onos/net/DefaultPortTest.java
index 1069fd1..b9720ea 100644
--- a/core/api/src/test/java/org/onlab/onos/net/DefaultPortTest.java
+++ b/core/api/src/test/java/org/onlab/onos/net/DefaultPortTest.java
@@ -3,6 +3,7 @@
import com.google.common.testing.EqualsTester;
import org.junit.Test;
import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.packet.ChassisId;
import static org.junit.Assert.assertEquals;
import static org.onlab.onos.net.Device.Type.SWITCH;
@@ -22,7 +23,8 @@
@Test
public void testEquality() {
- Device device = new DefaultDevice(PID, DID1, SWITCH, "m", "h", "s", "n");
+ Device device = new DefaultDevice(PID, DID1, SWITCH, "m", "h", "s", "n",
+ new ChassisId());
Port p1 = new DefaultPort(device, portNumber(1), true);
Port p2 = new DefaultPort(device, portNumber(1), true);
Port p3 = new DefaultPort(device, portNumber(2), true);
@@ -37,7 +39,8 @@
@Test
public void basics() {
- Device device = new DefaultDevice(PID, DID1, SWITCH, "m", "h", "s", "n");
+ Device device = new DefaultDevice(PID, DID1, SWITCH, "m", "h", "s", "n",
+ new ChassisId());
Port port = new DefaultPort(device, portNumber(1), true);
assertEquals("incorrect element", device, port.element());
assertEquals("incorrect number", portNumber(1), port.number());
diff --git a/core/api/src/test/java/org/onlab/onos/net/NetTestTools.java b/core/api/src/test/java/org/onlab/onos/net/NetTestTools.java
index 379ec7a..6fc38e3 100644
--- a/core/api/src/test/java/org/onlab/onos/net/NetTestTools.java
+++ b/core/api/src/test/java/org/onlab/onos/net/NetTestTools.java
@@ -1,6 +1,7 @@
package org.onlab.onos.net;
import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.packet.ChassisId;
import org.onlab.packet.IpPrefix;
import java.util.ArrayList;
@@ -37,7 +38,7 @@
// Crates a new device with the specified id
public static Device device(String id) {
return new DefaultDevice(PID, did(id), Device.Type.SWITCH,
- "mfg", "1.0", "1.1", "1234");
+ "mfg", "1.0", "1.1", "1234", new ChassisId());
}
// Crates a new host with the specified id
@@ -47,10 +48,16 @@
new HashSet<IpPrefix>());
}
+ // Short-hand for creating a connection point.
+ public static ConnectPoint connectPoint(String id, int port) {
+ return new ConnectPoint(did(id), portNumber(port));
+ }
+
// Short-hand for creating a link.
public static Link link(String src, int sp, String dst, int dp) {
- return new DefaultLink(PID, new ConnectPoint(did(src), portNumber(sp)),
- new ConnectPoint(did(dst), portNumber(dp)),
+ return new DefaultLink(PID,
+ connectPoint(src, sp),
+ connectPoint(dst, dp),
Link.Type.DIRECT);
}
diff --git a/core/api/src/test/java/org/onlab/onos/net/device/DefaultDeviceDescriptionTest.java b/core/api/src/test/java/org/onlab/onos/net/device/DefaultDeviceDescriptionTest.java
index 9d06edf..243d148 100644
--- a/core/api/src/test/java/org/onlab/onos/net/device/DefaultDeviceDescriptionTest.java
+++ b/core/api/src/test/java/org/onlab/onos/net/device/DefaultDeviceDescriptionTest.java
@@ -1,6 +1,7 @@
package org.onlab.onos.net.device;
import org.junit.Test;
+import org.onlab.packet.ChassisId;
import java.net.URI;
@@ -18,12 +19,13 @@
private static final String HW = "1.1.x";
private static final String SW = "3.9.1";
private static final String SN = "43311-12345";
+ private static final ChassisId CID = new ChassisId();
@Test
public void basics() {
DeviceDescription device =
- new DefaultDeviceDescription(DURI, SWITCH, MFR, HW, SW, SN);
+ new DefaultDeviceDescription(DURI, SWITCH, MFR, HW, SW, SN, CID);
assertEquals("incorrect uri", DURI, device.deviceURI());
assertEquals("incorrect type", SWITCH, device.type());
assertEquals("incorrect manufacturer", MFR, device.manufacturer());
@@ -31,6 +33,7 @@
assertEquals("incorrect sw", SW, device.swVersion());
assertEquals("incorrect serial", SN, device.serialNumber());
assertTrue("incorrect toString", device.toString().contains("uri=of:foo"));
+ assertTrue("Incorrect chassis", device.chassisId().value() == 0);
}
}
diff --git a/core/api/src/test/java/org/onlab/onos/net/device/DeviceEventTest.java b/core/api/src/test/java/org/onlab/onos/net/device/DeviceEventTest.java
index 9c45b96..312b9c2 100644
--- a/core/api/src/test/java/org/onlab/onos/net/device/DeviceEventTest.java
+++ b/core/api/src/test/java/org/onlab/onos/net/device/DeviceEventTest.java
@@ -11,6 +11,7 @@
import org.onlab.onos.net.Port;
import org.onlab.onos.net.PortNumber;
import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.packet.ChassisId;
/**
* Tests of the device event.
@@ -19,7 +20,7 @@
private Device createDevice() {
return new DefaultDevice(new ProviderId("of", "foo"), deviceId("of:foo"),
- Device.Type.SWITCH, "box", "hw", "sw", "sn");
+ Device.Type.SWITCH, "box", "hw", "sw", "sn", new ChassisId());
}
@Override
diff --git a/core/api/src/test/java/org/onlab/onos/net/host/DefualtHostDecriptionTest.java b/core/api/src/test/java/org/onlab/onos/net/host/DefualtHostDecriptionTest.java
index 5ae7c27..f2b9475 100644
--- a/core/api/src/test/java/org/onlab/onos/net/host/DefualtHostDecriptionTest.java
+++ b/core/api/src/test/java/org/onlab/onos/net/host/DefualtHostDecriptionTest.java
@@ -8,6 +8,8 @@
import org.onlab.packet.MacAddress;
import org.onlab.packet.VlanId;
+import com.google.common.collect.ImmutableSet;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -33,7 +35,7 @@
assertEquals("incorrect mac", MAC, host.hwAddress());
assertEquals("incorrect vlan", VLAN, host.vlan());
assertEquals("incorrect location", LOC, host.location());
- assertEquals("incorrect ip's", IP, host.ipAddress());
+ assertEquals("incorrect ip's", ImmutableSet.of(IP), host.ipAddress());
assertTrue("incorrect toString", host.toString().contains("vlan=10"));
}
diff --git a/core/api/src/test/java/org/onlab/onos/net/topology/DefaultGraphDescriptionTest.java b/core/api/src/test/java/org/onlab/onos/net/topology/DefaultGraphDescriptionTest.java
index 5f7d47b..81e6393 100644
--- a/core/api/src/test/java/org/onlab/onos/net/topology/DefaultGraphDescriptionTest.java
+++ b/core/api/src/test/java/org/onlab/onos/net/topology/DefaultGraphDescriptionTest.java
@@ -18,9 +18,9 @@
private static final DeviceId D3 = deviceId("3");
- static final Device DEV1 = new DefaultDevice(PID, D1, SWITCH, "", "", "", "");
- static final Device DEV2 = new DefaultDevice(PID, D2, SWITCH, "", "", "", "");
- static final Device DEV3 = new DefaultDevice(PID, D3, SWITCH, "", "", "", "");
+ static final Device DEV1 = new DefaultDevice(PID, D1, SWITCH, "", "", "", "", null);
+ static final Device DEV2 = new DefaultDevice(PID, D2, SWITCH, "", "", "", "", null);
+ static final Device DEV3 = new DefaultDevice(PID, D3, SWITCH, "", "", "", "", null);
@Test
public void basics() {
diff --git a/core/store/hz/net/pom.xml b/core/json/pom.xml
similarity index 74%
rename from core/store/hz/net/pom.xml
rename to core/json/pom.xml
index 177e99e..63dfe57 100644
--- a/core/store/hz/net/pom.xml
+++ b/core/json/pom.xml
@@ -6,15 +6,15 @@
<parent>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-core-hz</artifactId>
+ <artifactId>onos-core</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
- <artifactId>onos-core-hz-net</artifactId>
+ <artifactId>onos-json</artifactId>
<packaging>bundle</packaging>
- <description>ONOS Hazelcast based distributed store subsystems</description>
+ <description>ONOS JSON encode/decode facilities</description>
<dependencies>
<dependency>
@@ -23,24 +23,22 @@
</dependency>
<dependency>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-core-hz-common</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onos-core-hz-common</artifactId>
+ <artifactId>onos-api</artifactId>
<classifier>tests</classifier>
<scope>test</scope>
- <version>${project.version}</version>
</dependency>
+
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-core-trivial</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+
<dependency>
<groupId>org.apache.felix</groupId>
<artifactId>org.apache.felix.scr.annotations</artifactId>
</dependency>
- <dependency>
- <groupId>com.hazelcast</groupId>
- <artifactId>hazelcast</artifactId>
- </dependency>
</dependencies>
<build>
diff --git a/core/json/src/main/java/org/onlab/onos/json/impl/DeleteMe.java b/core/json/src/main/java/org/onlab/onos/json/impl/DeleteMe.java
new file mode 100644
index 0000000..d89f31b
--- /dev/null
+++ b/core/json/src/main/java/org/onlab/onos/json/impl/DeleteMe.java
@@ -0,0 +1,7 @@
+package org.onlab.onos.json.impl;
+
+/**
+ * Created by tom on 10/16/14.
+ */
+public class DeleteMe {
+}
diff --git a/core/json/src/main/java/org/onlab/onos/json/impl/package-info.java b/core/json/src/main/java/org/onlab/onos/json/impl/package-info.java
new file mode 100644
index 0000000..406641f
--- /dev/null
+++ b/core/json/src/main/java/org/onlab/onos/json/impl/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Implementation of JSON codec factory and of the builtin codecs.
+ */
+package org.onlab.onos.json.impl;
\ No newline at end of file
diff --git a/core/net/pom.xml b/core/net/pom.xml
index 6518068..bff4cce 100644
--- a/core/net/pom.xml
+++ b/core/net/pom.xml
@@ -36,26 +36,9 @@
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>org.easymock</groupId>
- <artifactId>easymock</artifactId>
- <scope>test</scope>
- </dependency>
-
- <!-- TODO Consider removing store dependency.
- Currently required for DistributedDeviceManagerTest. -->
<dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onos-core-hz-net</artifactId>
- <version>${project.version}</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.onlab.onos</groupId>
- <!-- FIXME: should be somewhere else -->
- <artifactId>onos-core-hz-common</artifactId>
- <version>${project.version}</version>
- <classifier>tests</classifier>
+ <groupId>org.easymock</groupId>
+ <artifactId>easymock</artifactId>
<scope>test</scope>
</dependency>
diff --git a/core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java b/core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java
index 59614da..7cf14fc 100644
--- a/core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java
+++ b/core/net/src/main/java/org/onlab/onos/cluster/impl/MastershipManager.java
@@ -3,6 +3,7 @@
import static com.google.common.base.Preconditions.checkNotNull;
import static org.slf4j.LoggerFactory.getLogger;
+import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
@@ -103,7 +104,6 @@
MastershipEvent event = null;
event = store.relinquishRole(
clusterService.getLocalNode().id(), deviceId);
-
if (event != null) {
post(event);
}
@@ -127,6 +127,11 @@
return store.getDevices(nodeId);
}
+ @Override
+ public List<NodeId> getNodesFor(DeviceId deviceId) {
+ checkNotNull(deviceId, DEVICE_ID_NULL);
+ return store.getNodes(deviceId);
+ }
@Override
public MastershipTermService requestTermService() {
@@ -223,7 +228,8 @@
return true;
}
//else {
- //FIXME: break tie for equal-sized clusters, can we use hz's functions?
+ //FIXME: break tie for equal-sized clusters,
+ // maybe by number of connected switches
// }
return false;
}
diff --git a/core/net/src/main/java/org/onlab/onos/impl/package-info.java b/core/net/src/main/java/org/onlab/onos/impl/package-info.java
index bbe539f..2bf17b3 100644
--- a/core/net/src/main/java/org/onlab/onos/impl/package-info.java
+++ b/core/net/src/main/java/org/onlab/onos/impl/package-info.java
@@ -1,4 +1,4 @@
/**
- *
+ * Miscellaneous core system implementations.
*/
package org.onlab.onos.impl;
\ No newline at end of file
diff --git a/core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java b/core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java
index 28bdac1..b163f08 100644
--- a/core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java
+++ b/core/net/src/main/java/org/onlab/onos/net/device/impl/DeviceManager.java
@@ -144,7 +144,7 @@
// Applies the specified role to the device; ignores NONE
private void applyRole(DeviceId deviceId, MastershipRole newRole) {
- if (newRole.equals(MastershipRole.NONE)) {
+ if (!newRole.equals(MastershipRole.NONE)) {
Device device = store.getDevice(deviceId);
// FIXME: Device might not be there yet. (eventual consistent)
if (device == null) {
@@ -161,6 +161,9 @@
@Override
public void removeDevice(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_ID_NULL);
+ // XXX is this intended to apply to the full global topology?
+ // if so, we probably don't want the fact that we aren't
+ // MASTER to get in the way, as it would do now.
DeviceEvent event = store.removeDevice(deviceId);
if (event != null) {
log.info("Device {} administratively removed", deviceId);
@@ -203,19 +206,21 @@
log.info("Device {} connected", deviceId);
// check my Role
MastershipRole role = mastershipService.requestRoleFor(deviceId);
-
+ log.info("## - our role for {} is {} [master is {}]", deviceId, role,
+ mastershipService.getMasterFor(deviceId));
if (role != MastershipRole.MASTER) {
// TODO: Do we need to explicitly tell the Provider that
// this instance is no longer the MASTER? probably not
return;
}
-
MastershipTerm term = mastershipService.requestTermService()
.getMastershipTerm(deviceId);
+
if (!term.master().equals(clusterService.getLocalNode().id())) {
// lost mastership after requestRole told this instance was MASTER.
return;
}
+
// tell clock provider if this instance is the master
deviceClockProviderService.setMastershipTerm(deviceId, term);
@@ -256,19 +261,38 @@
// but if I was the last STANDBY connection, etc. and no one else
// was there to mark the device offline, this instance may need to
// temporarily request for Master Role and mark offline.
+ log.info("## for {} role is {}", deviceId, mastershipService.getLocalRole(deviceId));
if (!mastershipService.getLocalRole(deviceId).equals(MastershipRole.MASTER)) {
log.debug("Device {} disconnected, but I am not the master", deviceId);
//let go of ability to be backup
mastershipService.relinquishMastership(deviceId);
return;
}
- DeviceEvent event = store.markOffline(deviceId);
- //relinquish master role and ability to be backup.
- mastershipService.relinquishMastership(deviceId);
- if (event != null) {
- log.info("Device {} disconnected", deviceId);
- post(event);
+ DeviceEvent event = null;
+ try {
+ event = store.markOffline(deviceId);
+ } catch (IllegalStateException e) {
+ //there are times when this node will correctly have mastership, BUT
+ //that isn't reflected in the ClockManager before the device disconnects.
+ //we want to let go of the device anyways, so make sure this happens.
+
+ // FIXME: Come up with workaround for above scenario.
+ MastershipTerm term = termService.getMastershipTerm(deviceId);
+ final NodeId myNodeId = clusterService.getLocalNode().id();
+ // TODO: Move this type of check inside device clock manager, etc.
+ if (myNodeId.equals(term.master())) {
+ deviceClockProviderService.setMastershipTerm(deviceId, term);
+ event = store.markOffline(deviceId);
+ }
+ } finally {
+ //relinquish master role and ability to be backup.
+ mastershipService.relinquishMastership(deviceId);
+
+ if (event != null) {
+ log.info("Device {} disconnected", deviceId);
+ post(event);
+ }
}
}
@@ -279,7 +303,15 @@
checkNotNull(portDescriptions,
"Port descriptions list cannot be null");
checkValidity();
+ //XXX what's this doing here?
this.provider().id();
+
+ if (!mastershipService.getLocalRole(deviceId).equals(MastershipRole.MASTER)) {
+ // TODO If we become master, then we'll trigger something to update this
+ // info to fix any inconsistencies that may result during the handoff.
+ return;
+ }
+
List<DeviceEvent> events = store.updatePorts(this.provider().id(),
deviceId, portDescriptions);
for (DeviceEvent event : events) {
@@ -293,6 +325,12 @@
checkNotNull(deviceId, DEVICE_ID_NULL);
checkNotNull(portDescription, PORT_DESCRIPTION_NULL);
checkValidity();
+
+ if (!mastershipService.getLocalRole(deviceId).equals(MastershipRole.MASTER)) {
+ // TODO If we become master, then we'll trigger something to update this
+ // info to fix any inconsistencies that may result during the handoff.
+ return;
+ }
DeviceEvent event = store.updatePortStatus(this.provider().id(),
deviceId, portDescription);
if (event != null) {
@@ -328,27 +366,37 @@
final DeviceId did = event.subject();
final NodeId myNodeId = clusterService.getLocalNode().id();
+ log.info("## got Mastershipevent for dev {}", did);
if (myNodeId.equals(event.master())) {
MastershipTerm term = termService.getMastershipTerm(did);
- if (term.master().equals(myNodeId)) {
- // only set the new term if I am the master
- deviceClockProviderService.setMastershipTerm(did, term);
+ if (!myNodeId.equals(term.master())) {
+ // something went wrong in consistency, let go
+ mastershipService.relinquishMastership(did);
+ applyRole(did, MastershipRole.STANDBY);
+ return;
}
+ log.info("## setting term for CPS as new master for {}", did);
+ // only set the new term if I am the master
+ deviceClockProviderService.setMastershipTerm(did, term);
+
// FIXME: we should check that the device is connected on our end.
// currently, this is not straight forward as the actual switch
- // implementation is hidden from the registry.
- if (!isAvailable(did)) {
+ // implementation is hidden from the registry. Maybe we can ask the
+ // provider.
+ // if the device is null here, we are the first master to claim the
+ // device. No worries, the DeviceManager will create one soon.
+ Device device = getDevice(did);
+ if ((device != null) && !isAvailable(did)) {
//flag the device as online. Is there a better way to do this?
- Device device = getDevice(did);
store.createOrUpdateDevice(device.providerId(), did,
new DefaultDeviceDescription(
did.uri(), device.type(), device.manufacturer(),
device.hwVersion(), device.swVersion(),
- device.serialNumber()));
+ device.serialNumber(), device.chassisId()));
}
-
+ //TODO re-collect device information to fix potential staleness
applyRole(did, MastershipRole.MASTER);
} else {
applyRole(did, MastershipRole.STANDBY);
diff --git a/core/net/src/main/java/org/onlab/onos/net/flow/impl/FlowRuleManager.java b/core/net/src/main/java/org/onlab/onos/net/flow/impl/FlowRuleManager.java
index fa8e947..9ea99c3 100644
--- a/core/net/src/main/java/org/onlab/onos/net/flow/impl/FlowRuleManager.java
+++ b/core/net/src/main/java/org/onlab/onos/net/flow/impl/FlowRuleManager.java
@@ -3,8 +3,8 @@
import static com.google.common.base.Preconditions.checkNotNull;
import static org.slf4j.LoggerFactory.getLogger;
-import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
@@ -45,6 +45,7 @@
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
/**
@@ -197,6 +198,8 @@
extends AbstractProviderService<FlowRuleProvider>
implements FlowRuleProviderService {
+ final Map<FlowEntry, Long> lastSeen = Maps.newConcurrentMap();
+
protected InternalFlowRuleProviderService(FlowRuleProvider provider) {
super(provider);
}
@@ -205,6 +208,7 @@
public void flowRemoved(FlowEntry flowEntry) {
checkNotNull(flowEntry, FLOW_RULE_NULL);
checkValidity();
+ lastSeen.remove(flowEntry);
FlowEntry stored = store.getFlowEntry(flowEntry);
if (stored == null) {
log.info("Rule already evicted from store: {}", flowEntry);
@@ -292,14 +296,25 @@
if (storedRule == null) {
return false;
}
- long timeout = storedRule.timeout() * 1000;
- Long currentTime = System.currentTimeMillis();
+ final long timeout = storedRule.timeout() * 1000;
+ final long currentTime = System.currentTimeMillis();
if (storedRule.packets() != swRule.packets()) {
- storedRule.setLastSeen();
+ lastSeen.put(storedRule, currentTime);
return true;
}
+ if (!lastSeen.containsKey(storedRule)) {
+ // checking for the first time
+ lastSeen.put(storedRule, storedRule.lastSeen());
+ // Use following if lastSeen attr. was removed.
+ //lastSeen.put(storedRule, currentTime);
+ }
+ Long last = lastSeen.get(storedRule);
+ if (last == null) {
+ // concurrently removed? let the liveness check fail
+ return false;
+ }
- if ((currentTime - storedRule.lastSeen()) <= timeout) {
+ if ((currentTime - last) <= timeout) {
return true;
}
return false;
@@ -316,10 +331,7 @@
public void pushFlowMetrics(DeviceId deviceId, Iterable<FlowEntry> flowEntries) {
List<FlowEntry> storedRules = Lists.newLinkedList(store.getFlowEntries(deviceId));
- Iterator<FlowEntry> switchRulesIterator = flowEntries.iterator();
-
- while (switchRulesIterator.hasNext()) {
- FlowEntry rule = switchRulesIterator.next();
+ for (FlowEntry rule : flowEntries) {
if (storedRules.remove(rule)) {
// we both have the rule, let's update some info then.
flowAdded(rule);
@@ -401,7 +413,7 @@
CompletedBatchOperation completed;
for (Future<CompletedBatchOperation> future : futures) {
completed = future.get();
- success = validateBatchOperation(failed, completed, future);
+ success = validateBatchOperation(failed, completed);
}
return finalizeBatchOperation(success, failed);
@@ -426,14 +438,13 @@
long now = System.nanoTime();
long thisTimeout = end - now;
completed = future.get(thisTimeout, TimeUnit.NANOSECONDS);
- success = validateBatchOperation(failed, completed, future);
+ success = validateBatchOperation(failed, completed);
}
return finalizeBatchOperation(success, failed);
}
private boolean validateBatchOperation(List<FlowEntry> failed,
- CompletedBatchOperation completed,
- Future<CompletedBatchOperation> future) {
+ CompletedBatchOperation completed) {
if (isCancelled()) {
throw new CancellationException();
diff --git a/core/net/src/main/java/org/onlab/onos/net/intent/impl/HostToHostIntentCompiler.java b/core/net/src/main/java/org/onlab/onos/net/intent/impl/HostToHostIntentCompiler.java
index de61e8e..50faf38 100644
--- a/core/net/src/main/java/org/onlab/onos/net/intent/impl/HostToHostIntentCompiler.java
+++ b/core/net/src/main/java/org/onlab/onos/net/intent/impl/HostToHostIntentCompiler.java
@@ -41,7 +41,7 @@
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected HostService hostService;
- private IdGenerator<IntentId> intentIdGenerator;
+ protected IdGenerator<IntentId> intentIdGenerator;
@Activate
public void activate() {
diff --git a/core/net/src/main/java/org/onlab/onos/net/intent/impl/LinkCollectionIntentInstaller.java b/core/net/src/main/java/org/onlab/onos/net/intent/impl/LinkCollectionIntentInstaller.java
index 51e0d2e..ec668dc 100644
--- a/core/net/src/main/java/org/onlab/onos/net/intent/impl/LinkCollectionIntentInstaller.java
+++ b/core/net/src/main/java/org/onlab/onos/net/intent/impl/LinkCollectionIntentInstaller.java
@@ -1,5 +1,8 @@
package org.onlab.onos.net.intent.impl;
+import static org.onlab.onos.net.flow.DefaultTrafficTreatment.builder;
+import static org.slf4j.LoggerFactory.getLogger;
+
import java.util.List;
import java.util.concurrent.Future;
@@ -10,7 +13,9 @@
import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.onlab.onos.ApplicationId;
import org.onlab.onos.CoreService;
+import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.Link;
+import org.onlab.onos.net.PortNumber;
import org.onlab.onos.net.flow.CompletedBatchOperation;
import org.onlab.onos.net.flow.DefaultFlowRule;
import org.onlab.onos.net.flow.DefaultTrafficSelector;
@@ -29,9 +34,6 @@
import com.google.common.collect.Lists;
-import static org.onlab.onos.net.flow.DefaultTrafficTreatment.builder;
-import static org.slf4j.LoggerFactory.getLogger;
-
/**
* Installer for {@link org.onlab.onos.net.intent.LinkCollectionIntent}
* path segment intents.
@@ -79,15 +81,17 @@
DefaultTrafficSelector.builder(intent.selector());
List<FlowRuleBatchEntry> rules = Lists.newLinkedList();
for (Link link : intent.links()) {
- TrafficTreatment treatment = builder()
- .setOutput(link.src().port()).build();
-
- FlowRule rule = new DefaultFlowRule(link.src().deviceId(),
- builder.build(), treatment,
- 123, appId, 600);
- rules.add(new FlowRuleBatchEntry(FlowRuleOperation.ADD, rule));
+ rules.add(createBatchEntry(FlowRuleOperation.ADD,
+ builder.build(),
+ link.src().deviceId(),
+ link.src().port()));
}
+ rules.add(createBatchEntry(FlowRuleOperation.ADD,
+ builder.build(),
+ intent.egressPoint().deviceId(),
+ intent.egressPoint().port()));
+
return applyBatch(rules);
}
@@ -98,13 +102,39 @@
List<FlowRuleBatchEntry> rules = Lists.newLinkedList();
for (Link link : intent.links()) {
- TrafficTreatment treatment = builder()
- .setOutput(link.src().port()).build();
- FlowRule rule = new DefaultFlowRule(link.src().deviceId(),
- builder.build(), treatment,
- 123, appId, 600);
- rules.add(new FlowRuleBatchEntry(FlowRuleOperation.REMOVE, rule));
+ rules.add(createBatchEntry(FlowRuleOperation.REMOVE,
+ builder.build(),
+ link.src().deviceId(),
+ link.src().port()));
}
+
+ rules.add(createBatchEntry(FlowRuleOperation.REMOVE,
+ builder.build(),
+ intent.egressPoint().deviceId(),
+ intent.egressPoint().port()));
+
return applyBatch(rules);
}
+
+ /**
+ * Creates a FlowRuleBatchEntry based on the provided parameters.
+ *
+ * @param operation the FlowRuleOperation to use
+ * @param selector the traffic selector
+ * @param deviceId the device ID for the flow rule
+ * @param outPort the output port of the flow rule
+ * @return the new flow rule batch entry
+ */
+ private FlowRuleBatchEntry createBatchEntry(FlowRuleOperation operation,
+ TrafficSelector selector,
+ DeviceId deviceId,
+ PortNumber outPort) {
+
+ TrafficTreatment treatment = builder().setOutput(outPort).build();
+
+ FlowRule rule = new DefaultFlowRule(deviceId,
+ selector, treatment, 123, appId, 600);
+
+ return new FlowRuleBatchEntry(operation, rule);
+ }
}
diff --git a/core/net/src/main/java/org/onlab/onos/net/intent/impl/MultiPointToSinglePointIntentCompiler.java b/core/net/src/main/java/org/onlab/onos/net/intent/impl/MultiPointToSinglePointIntentCompiler.java
index 68c55dd..6ce12d6 100644
--- a/core/net/src/main/java/org/onlab/onos/net/intent/impl/MultiPointToSinglePointIntentCompiler.java
+++ b/core/net/src/main/java/org/onlab/onos/net/intent/impl/MultiPointToSinglePointIntentCompiler.java
@@ -37,7 +37,7 @@
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected PathService pathService;
- private IdGenerator<IntentId> intentIdGenerator;
+ protected IdGenerator<IntentId> intentIdGenerator;
@Activate
public void activate() {
@@ -62,7 +62,7 @@
Intent result = new LinkCollectionIntent(intentIdGenerator.getNewId(),
intent.selector(), intent.treatment(),
- links);
+ links, intent.egressPoint());
return Arrays.asList(result);
}
diff --git a/core/net/src/main/java/org/onlab/onos/net/intent/impl/PathIntentInstaller.java b/core/net/src/main/java/org/onlab/onos/net/intent/impl/PathIntentInstaller.java
index 8111681..dd6d7e5 100644
--- a/core/net/src/main/java/org/onlab/onos/net/intent/impl/PathIntentInstaller.java
+++ b/core/net/src/main/java/org/onlab/onos/net/intent/impl/PathIntentInstaller.java
@@ -96,7 +96,7 @@
FlowRule rule = new DefaultFlowRule(link.src().deviceId(),
builder.build(), treatment,
- 123, appId, 600);
+ 123, appId, 15);
rules.add(new FlowRuleBatchEntry(FlowRuleOperation.ADD, rule));
prev = link.dst();
}
diff --git a/core/net/src/main/java/org/onlab/onos/net/intent/impl/PointToPointIntentCompiler.java b/core/net/src/main/java/org/onlab/onos/net/intent/impl/PointToPointIntentCompiler.java
index 0bd1703..0556df0 100644
--- a/core/net/src/main/java/org/onlab/onos/net/intent/impl/PointToPointIntentCompiler.java
+++ b/core/net/src/main/java/org/onlab/onos/net/intent/impl/PointToPointIntentCompiler.java
@@ -43,7 +43,7 @@
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected HostService hostService;
- private IdGenerator<IntentId> intentIdGenerator;
+ protected IdGenerator<IntentId> intentIdGenerator;
@Activate
public void activate() {
diff --git a/core/net/src/main/java/org/onlab/onos/net/link/impl/LinkManager.java b/core/net/src/main/java/org/onlab/onos/net/link/impl/LinkManager.java
index 779e1ee..7b35f1c 100644
--- a/core/net/src/main/java/org/onlab/onos/net/link/impl/LinkManager.java
+++ b/core/net/src/main/java/org/onlab/onos/net/link/impl/LinkManager.java
@@ -16,6 +16,7 @@
import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.Link;
+import org.onlab.onos.net.MastershipRole;
import org.onlab.onos.net.device.DeviceEvent;
import org.onlab.onos.net.device.DeviceListener;
import org.onlab.onos.net.device.DeviceService;
@@ -139,11 +140,17 @@
@Override
public void removeLinks(ConnectPoint connectPoint) {
+ if (deviceService.getRole(connectPoint.deviceId()) != MastershipRole.MASTER) {
+ return;
+ }
removeLinks(getLinks(connectPoint));
}
@Override
public void removeLinks(DeviceId deviceId) {
+ if (deviceService.getRole(deviceId) != MastershipRole.MASTER) {
+ return;
+ }
removeLinks(getDeviceLinks(deviceId));
}
@@ -189,6 +196,15 @@
public void linkDetected(LinkDescription linkDescription) {
checkNotNull(linkDescription, LINK_DESC_NULL);
checkValidity();
+
+ ConnectPoint src = linkDescription.src();
+ ConnectPoint dst = linkDescription.dst();
+ // if we aren't master for the device associated with the ConnectPoint
+ // we probably shouldn't be doing this.
+ if ((deviceService.getRole(src.deviceId()) != MastershipRole.MASTER) ||
+ (deviceService.getRole(dst.deviceId()) != MastershipRole.MASTER)) {
+ return;
+ }
LinkEvent event = store.createOrUpdateLink(provider().id(),
linkDescription);
if (event != null) {
@@ -201,6 +217,15 @@
public void linkVanished(LinkDescription linkDescription) {
checkNotNull(linkDescription, LINK_DESC_NULL);
checkValidity();
+
+ ConnectPoint src = linkDescription.src();
+ ConnectPoint dst = linkDescription.dst();
+ // if we aren't master for the device associated with the ConnectPoint
+ // we probably shouldn't be doing this.
+ if ((deviceService.getRole(src.deviceId()) != MastershipRole.MASTER) ||
+ (deviceService.getRole(dst.deviceId()) != MastershipRole.MASTER)) {
+ return;
+ }
LinkEvent event = store.removeLink(linkDescription.src(),
linkDescription.dst());
if (event != null) {
@@ -213,7 +238,13 @@
public void linksVanished(ConnectPoint connectPoint) {
checkNotNull(connectPoint, "Connect point cannot be null");
checkValidity();
+ // if we aren't master for the device associated with the ConnectPoint
+ // we probably shouldn't be doing this.
+ if (deviceService.getRole(connectPoint.deviceId()) != MastershipRole.MASTER) {
+ return;
+ }
log.info("Links for connection point {} vanished", connectPoint);
+ // FIXME: This will remove links registered by other providers
removeLinks(getLinks(connectPoint));
}
@@ -221,6 +252,11 @@
public void linksVanished(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_ID_NULL);
checkValidity();
+ // if we aren't master for the device associated with the ConnectPoint
+ // we probably shouldn't be doing this.
+ if (deviceService.getRole(deviceId) != MastershipRole.MASTER) {
+ return;
+ }
log.info("Links for device {} vanished", deviceId);
removeLinks(getDeviceLinks(deviceId));
}
diff --git a/core/net/src/main/java/org/onlab/onos/net/proxyarp/impl/ProxyArpManager.java b/core/net/src/main/java/org/onlab/onos/net/proxyarp/impl/ProxyArpManager.java
index 8a86544..81f42c8 100644
--- a/core/net/src/main/java/org/onlab/onos/net/proxyarp/impl/ProxyArpManager.java
+++ b/core/net/src/main/java/org/onlab/onos/net/proxyarp/impl/ProxyArpManager.java
@@ -167,6 +167,7 @@
return;
}
+ // TODO find the correct IP address
Ethernet arpReply = buildArpReply(dst.ipAddresses().iterator().next(),
dst.mac(), eth);
// TODO: check send status with host service.
@@ -355,7 +356,7 @@
arp.setTargetProtocolAddress(((ARP) request.getPayload())
.getSenderProtocolAddress());
- arp.setSenderProtocolAddress(srcIp.toRealInt());
+ arp.setSenderProtocolAddress(srcIp.toInt());
eth.setPayload(arp);
return eth;
}
diff --git a/core/net/src/main/java/org/onlab/onos/net/topology/impl/DefaultTopologyProvider.java b/core/net/src/main/java/org/onlab/onos/net/topology/impl/DefaultTopologyProvider.java
index 7ee6ddd..9631c66 100644
--- a/core/net/src/main/java/org/onlab/onos/net/topology/impl/DefaultTopologyProvider.java
+++ b/core/net/src/main/java/org/onlab/onos/net/topology/impl/DefaultTopologyProvider.java
@@ -23,6 +23,7 @@
import org.onlab.onos.net.topology.TopologyProviderService;
import org.slf4j.Logger;
+import java.util.Collections;
import java.util.List;
import java.util.Timer;
import java.util.concurrent.ExecutorService;
@@ -88,7 +89,7 @@
linkService.addListener(linkListener);
isStarted = true;
- triggerTopologyBuild(null);
+ triggerTopologyBuild(Collections.<Event>emptyList());
log.info("Started");
}
diff --git a/core/net/src/test/java/org/onlab/onos/net/device/impl/DeviceManagerTest.java b/core/net/src/test/java/org/onlab/onos/net/device/impl/DeviceManagerTest.java
index 0b017cf..a86502f 100644
--- a/core/net/src/test/java/org/onlab/onos/net/device/impl/DeviceManagerTest.java
+++ b/core/net/src/test/java/org/onlab/onos/net/device/impl/DeviceManagerTest.java
@@ -37,6 +37,7 @@
import org.onlab.onos.net.provider.AbstractProvider;
import org.onlab.onos.net.provider.ProviderId;
import org.onlab.onos.store.trivial.impl.SimpleDeviceStore;
+import org.onlab.packet.ChassisId;
import org.onlab.packet.IpPrefix;
import java.util.ArrayList;
@@ -62,6 +63,7 @@
private static final String SW1 = "3.8.1";
private static final String SW2 = "3.9.5";
private static final String SN = "43311-12345";
+ private static final ChassisId CID = new ChassisId();
private static final PortNumber P1 = PortNumber.portNumber(1);
private static final PortNumber P2 = PortNumber.portNumber(2);
@@ -111,7 +113,7 @@
private void connectDevice(DeviceId deviceId, String swVersion) {
DeviceDescription description =
new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR,
- HW, swVersion, SN);
+ HW, swVersion, SN, CID);
providerService.deviceConnected(deviceId, description);
assertNotNull("device should be found", service.getDevice(DID1));
}
diff --git a/core/net/src/test/java/org/onlab/onos/net/flow/impl/FlowRuleManagerTest.java b/core/net/src/test/java/org/onlab/onos/net/flow/impl/FlowRuleManagerTest.java
index 68223f0..dff740a 100644
--- a/core/net/src/test/java/org/onlab/onos/net/flow/impl/FlowRuleManagerTest.java
+++ b/core/net/src/test/java/org/onlab/onos/net/flow/impl/FlowRuleManagerTest.java
@@ -8,7 +8,9 @@
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
@@ -44,6 +46,7 @@
import org.onlab.onos.net.flow.FlowRuleProviderRegistry;
import org.onlab.onos.net.flow.FlowRuleProviderService;
import org.onlab.onos.net.flow.FlowRuleService;
+import org.onlab.onos.net.flow.StoredFlowEntry;
import org.onlab.onos.net.flow.TrafficSelector;
import org.onlab.onos.net.flow.TrafficTreatment;
import org.onlab.onos.net.flow.criteria.Criterion;
@@ -54,6 +57,7 @@
import org.onlab.onos.store.trivial.impl.SimpleFlowRuleStore;
import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
@@ -68,7 +72,7 @@
private static final DeviceId DID = DeviceId.deviceId("of:001");
private static final int TIMEOUT = 10;
private static final Device DEV = new DefaultDevice(
- PID, DID, Type.SWITCH, "", "", "", "");
+ PID, DID, Type.SWITCH, "", "", "", "", null);
private FlowRuleManager mgr;
@@ -166,16 +170,17 @@
}
+ // TODO: If preserving iteration order is a requirement, redo FlowRuleStore.
//backing store is sensitive to the order of additions/removals
- private boolean validateState(FlowEntryState... state) {
+ private boolean validateState(Map<FlowRule, FlowEntryState> expected) {
+ Map<FlowRule, FlowEntryState> expectedToCheck = new HashMap<>(expected);
Iterable<FlowEntry> rules = service.getFlowEntries(DID);
- int i = 0;
for (FlowEntry f : rules) {
- if (f.state() != state[i]) {
- return false;
- }
- i++;
+ assertTrue("Unexpected FlowRule " + f, expectedToCheck.containsKey(f));
+ assertEquals("FlowEntry" + f, expectedToCheck.get(f), f.state());
+ expectedToCheck.remove(f);
}
+ assertEquals(Collections.emptySet(), expectedToCheck.entrySet());
return true;
}
@@ -191,8 +196,10 @@
mgr.applyFlowRules(r1, r2, r3);
assertEquals("3 rules should exist", 3, flowCount());
assertTrue("Entries should be pending add.",
- validateState(FlowEntryState.PENDING_ADD, FlowEntryState.PENDING_ADD,
- FlowEntryState.PENDING_ADD));
+ validateState(ImmutableMap.of(
+ r1, FlowEntryState.PENDING_ADD,
+ r2, FlowEntryState.PENDING_ADD,
+ r3, FlowEntryState.PENDING_ADD)));
}
@Test
@@ -213,8 +220,10 @@
validateEvents();
assertEquals("3 rule should exist", 3, flowCount());
assertTrue("Entries should be pending remove.",
- validateState(FlowEntryState.PENDING_REMOVE, FlowEntryState.PENDING_REMOVE,
- FlowEntryState.ADDED));
+ validateState(ImmutableMap.of(
+ f1, FlowEntryState.PENDING_REMOVE,
+ f2, FlowEntryState.PENDING_REMOVE,
+ f3, FlowEntryState.ADDED)));
mgr.removeFlowRules(f1);
assertEquals("3 rule should still exist", 3, flowCount());
@@ -224,7 +233,7 @@
public void flowRemoved() {
FlowRule f1 = addFlowRule(1);
FlowRule f2 = addFlowRule(2);
- FlowEntry fe1 = new DefaultFlowEntry(f1);
+ StoredFlowEntry fe1 = new DefaultFlowEntry(f1);
FlowEntry fe2 = new DefaultFlowEntry(f2);
providerService.pushFlowMetrics(DID, ImmutableList.of(fe1, fe2));
service.removeFlowRules(f1);
@@ -263,8 +272,10 @@
providerService.pushFlowMetrics(DID, Lists.newArrayList(fe1, fe2));
assertTrue("Entries should be added.",
- validateState(FlowEntryState.ADDED, FlowEntryState.ADDED,
- FlowEntryState.PENDING_ADD));
+ validateState(ImmutableMap.of(
+ f1, FlowEntryState.ADDED,
+ f2, FlowEntryState.ADDED,
+ f3, FlowEntryState.PENDING_ADD)));
validateEvents(RULE_ADDED, RULE_ADDED);
}
@@ -336,7 +347,9 @@
//only check that we are in pending remove. Events and actual remove state will
// be set by flowRemoved call.
- validateState(FlowEntryState.PENDING_REMOVE, FlowEntryState.PENDING_REMOVE);
+ validateState(ImmutableMap.of(
+ f1, FlowEntryState.PENDING_REMOVE,
+ f2, FlowEntryState.PENDING_REMOVE));
}
@Test
@@ -360,7 +373,9 @@
Lists.newArrayList(fbe1, fbe2));
Future<CompletedBatchOperation> future = mgr.applyBatch(fbo);
assertTrue("Entries in wrong state",
- validateState(FlowEntryState.PENDING_REMOVE, FlowEntryState.PENDING_ADD));
+ validateState(ImmutableMap.of(
+ f1, FlowEntryState.PENDING_REMOVE,
+ f2, FlowEntryState.PENDING_ADD)));
CompletedBatchOperation completed = null;
try {
completed = future.get();
@@ -381,9 +396,18 @@
mgr.applyFlowRules(f1);
+ assertTrue("Entries in wrong state",
+ validateState(ImmutableMap.of(
+ f1, FlowEntryState.PENDING_ADD)));
+
FlowEntry fe1 = new DefaultFlowEntry(f1);
providerService.pushFlowMetrics(DID, Collections.<FlowEntry>singletonList(fe1));
+ assertTrue("Entries in wrong state",
+ validateState(ImmutableMap.of(
+ f1, FlowEntryState.ADDED)));
+
+
FlowRuleBatchEntry fbe1 = new FlowRuleBatchEntry(
FlowRuleBatchEntry.FlowRuleOperation.REMOVE, f1);
@@ -403,9 +427,9 @@
* state.
*/
assertTrue("Entries in wrong state",
- validateState(FlowEntryState.PENDING_REMOVE,
- FlowEntryState.PENDING_ADD));
-
+ validateState(ImmutableMap.of(
+ f2, FlowEntryState.PENDING_REMOVE,
+ f1, FlowEntryState.PENDING_ADD)));
}
diff --git a/core/net/src/test/java/org/onlab/onos/net/intent/IntentTestsMocks.java b/core/net/src/test/java/org/onlab/onos/net/intent/IntentTestsMocks.java
new file mode 100644
index 0000000..0f020a4
--- /dev/null
+++ b/core/net/src/test/java/org/onlab/onos/net/intent/IntentTestsMocks.java
@@ -0,0 +1,86 @@
+package org.onlab.onos.net.intent;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.onlab.onos.net.ElementId;
+import org.onlab.onos.net.Path;
+import org.onlab.onos.net.flow.TrafficSelector;
+import org.onlab.onos.net.flow.TrafficTreatment;
+import org.onlab.onos.net.flow.criteria.Criterion;
+import org.onlab.onos.net.flow.instructions.Instruction;
+import org.onlab.onos.net.topology.LinkWeight;
+import org.onlab.onos.net.topology.PathService;
+
+import static org.onlab.onos.net.NetTestTools.createPath;
+
+/**
+ * Common mocks used by the intent framework tests.
+ */
+public class IntentTestsMocks {
+ /**
+ * Mock traffic selector class used for satisfying API requirements.
+ */
+ public static class MockSelector implements TrafficSelector {
+ @Override
+ public Set<Criterion> criteria() {
+ return new HashSet<>();
+ }
+ }
+
+ /**
+ * Mock traffic treatment class used for satisfying API requirements.
+ */
+ public static class MockTreatment implements TrafficTreatment {
+ @Override
+ public List<Instruction> instructions() {
+ return new ArrayList<>();
+ }
+ }
+
+ /**
+ * Mock path service for creating paths within the test.
+ */
+ public static class MockPathService implements PathService {
+
+ final String[] pathHops;
+ final String[] reversePathHops;
+
+ /**
+ * Constructor that provides a set of hops to mock.
+ *
+ * @param pathHops path hops to mock
+ */
+ public MockPathService(String[] pathHops) {
+ this.pathHops = pathHops;
+ String[] reversed = pathHops.clone();
+ Collections.reverse(Arrays.asList(reversed));
+ reversePathHops = reversed;
+ }
+
+ @Override
+ public Set<Path> getPaths(ElementId src, ElementId dst) {
+ Set<Path> result = new HashSet<>();
+
+ String[] allHops = new String[pathHops.length];
+
+ if (src.toString().endsWith(pathHops[0])) {
+ System.arraycopy(pathHops, 0, allHops, 0, pathHops.length);
+ } else {
+ System.arraycopy(reversePathHops, 0, allHops, 0, pathHops.length);
+ }
+
+ result.add(createPath(allHops));
+ return result;
+ }
+
+ @Override
+ public Set<Path> getPaths(ElementId src, ElementId dst, LinkWeight weight) {
+ return getPaths(src, dst);
+ }
+ }
+}
diff --git a/core/net/src/test/java/org/onlab/onos/net/intent/LinksHaveEntryWithSourceDestinationPairMatcher.java b/core/net/src/test/java/org/onlab/onos/net/intent/LinksHaveEntryWithSourceDestinationPairMatcher.java
new file mode 100644
index 0000000..869cefe
--- /dev/null
+++ b/core/net/src/test/java/org/onlab/onos/net/intent/LinksHaveEntryWithSourceDestinationPairMatcher.java
@@ -0,0 +1,73 @@
+package org.onlab.onos.net.intent;
+
+import java.util.Collection;
+
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+import org.onlab.onos.net.Link;
+
+/**
+ * Matcher to determine if a Collection of Links contains a path between a source
+ * and a destination.
+ */
+public class LinksHaveEntryWithSourceDestinationPairMatcher extends
+ TypeSafeMatcher<Collection<Link>> {
+ private final String source;
+ private final String destination;
+
+ /**
+ * Creates a matcher for a given path represented by a source and
+ * a destination.
+ *
+ * @param source string identifier for the source of the path
+ * @param destination string identifier for the destination of the path
+ */
+ LinksHaveEntryWithSourceDestinationPairMatcher(String source,
+ String destination) {
+ this.source = source;
+ this.destination = destination;
+ }
+
+ @Override
+ public boolean matchesSafely(Collection<Link> links) {
+ for (Link link : links) {
+ if (link.src().elementId().toString().endsWith(source) &&
+ link.dst().elementId().toString().endsWith(destination)) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("link lookup for source \"");
+ description.appendText(source);
+ description.appendText(" and destination ");
+ description.appendText(destination);
+ description.appendText("\"");
+ }
+
+ @Override
+ public void describeMismatchSafely(Collection<Link> links,
+ Description mismatchDescription) {
+ mismatchDescription.appendText("was ").
+ appendText(links.toString());
+ }
+
+ /**
+ * Creates a link has path matcher.
+ *
+ * @param source string identifier for the source of the path
+ * @param destination string identifier for the destination of the path
+ * @return matcher to match the path
+ */
+ public static LinksHaveEntryWithSourceDestinationPairMatcher linksHasPath(
+ String source,
+ String destination) {
+ return new LinksHaveEntryWithSourceDestinationPairMatcher(source,
+ destination);
+ }
+}
+
diff --git a/core/net/src/test/java/org/onlab/onos/net/intent/TestHostToHostIntent.java b/core/net/src/test/java/org/onlab/onos/net/intent/TestHostToHostIntent.java
new file mode 100644
index 0000000..f5e2551
--- /dev/null
+++ b/core/net/src/test/java/org/onlab/onos/net/intent/TestHostToHostIntent.java
@@ -0,0 +1,112 @@
+package org.onlab.onos.net.intent;
+
+import org.junit.Test;
+import org.onlab.onos.net.HostId;
+import org.onlab.onos.net.flow.TrafficSelector;
+import org.onlab.onos.net.flow.TrafficTreatment;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+import static org.onlab.onos.net.NetTestTools.hid;
+
+/**
+ * Unit tests for the HostToHostIntent class.
+ */
+public class TestHostToHostIntent {
+
+ private TrafficSelector selector = new IntentTestsMocks.MockSelector();
+ private TrafficTreatment treatment = new IntentTestsMocks.MockTreatment();
+
+ private HostToHostIntent makeHostToHost(long id, HostId one, HostId two) {
+ return new HostToHostIntent(new IntentId(id),
+ one,
+ two,
+ selector,
+ treatment);
+ }
+
+ /**
+ * Tests the equals() method where two HostToHostIntents have references
+ * to the same hosts. These should compare equal.
+ */
+ @Test
+ public void testSameEquals() {
+
+ HostId one = hid("00:00:00:00:00:01/-1");
+ HostId two = hid("00:00:00:00:00:02/-1");
+ HostToHostIntent i1 = makeHostToHost(12, one, two);
+ HostToHostIntent i2 = makeHostToHost(12, one, two);
+
+ assertThat(i1, is(equalTo(i2)));
+ }
+
+ /**
+ * Tests the equals() method where two HostToHostIntents have references
+ * to different Hosts. These should compare not equal.
+ */
+ @Test
+ public void testLinksDifferentEquals() {
+
+ HostId one = hid("00:00:00:00:00:01/-1");
+ HostId two = hid("00:00:00:00:00:02/-1");
+ HostToHostIntent i1 = makeHostToHost(12, one, two);
+ HostToHostIntent i2 = makeHostToHost(12, two, one);
+
+ assertThat(i1, is(not(equalTo(i2))));
+ }
+
+ /**
+ * Tests the equals() method where two HostToHostIntents have different
+ * ids. These should compare not equal.
+ */
+
+ @Test
+ public void testBaseDifferentEquals() {
+ HostId one = hid("00:00:00:00:00:01/-1");
+ HostId two = hid("00:00:00:00:00:02/-1");
+ HostToHostIntent i1 = makeHostToHost(12, one, two);
+ HostToHostIntent i2 = makeHostToHost(11, one, two);
+
+ assertThat(i1, is(not(equalTo(i2))));
+ }
+
+ /**
+ * Tests that the hashCode() values for two equivalent HostToHostIntent
+ * objects are the same.
+ */
+
+ @Test
+ public void testHashCodeEquals() {
+ HostId one = hid("00:00:00:00:00:01/-1");
+ HostId two = hid("00:00:00:00:00:02/-1");
+ HostToHostIntent i1 = makeHostToHost(12, one, two);
+ HostToHostIntent i2 = makeHostToHost(12, one, two);
+
+ assertThat(i1.hashCode(), is(equalTo(i2.hashCode())));
+ }
+
+ /**
+ * Tests that the hashCode() values for two distinct LinkCollectionIntent
+ * objects are different.
+ */
+
+ @Test
+ public void testHashCodeDifferent() {
+ HostId one = hid("00:00:00:00:00:01/-1");
+ HostId two = hid("00:00:00:00:00:02/-1");
+ HostToHostIntent i1 = makeHostToHost(12, one, two);
+ HostToHostIntent i2 = makeHostToHost(112, one, two);
+
+ assertThat(i1.hashCode(), is(not(equalTo(i2.hashCode()))));
+ }
+
+ /**
+ * Checks that the HostToHostIntent class is immutable.
+ */
+ @Test
+ public void checkImmutability() {
+ ImmutableClassChecker.assertThatClassIsImmutable(HostToHostIntent.class);
+ }
+}
diff --git a/core/net/src/test/java/org/onlab/onos/net/intent/TestLinkCollectionIntent.java b/core/net/src/test/java/org/onlab/onos/net/intent/TestLinkCollectionIntent.java
index ba67a6a..a7082b4 100644
--- a/core/net/src/test/java/org/onlab/onos/net/intent/TestLinkCollectionIntent.java
+++ b/core/net/src/test/java/org/onlab/onos/net/intent/TestLinkCollectionIntent.java
@@ -1,47 +1,174 @@
package org.onlab.onos.net.intent;
-import java.util.ArrayList;
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.onlab.onos.net.NetTestTools.link;
+
import java.util.HashSet;
-import java.util.List;
import java.util.Set;
+import org.junit.Before;
import org.junit.Test;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.Link;
+import org.onlab.onos.net.PortNumber;
import org.onlab.onos.net.flow.TrafficSelector;
import org.onlab.onos.net.flow.TrafficTreatment;
-import org.onlab.onos.net.flow.criteria.Criterion;
-import org.onlab.onos.net.flow.instructions.Instruction;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.Matchers.is;
-
+/**
+ * Unit tests for the LinkCollectionIntent class.
+ */
public class TestLinkCollectionIntent {
- private static class MockSelector implements TrafficSelector {
- @Override
- public Set<Criterion> criteria() {
- return new HashSet<Criterion>();
- }
+ private Link link1 = link("dev1", 1, "dev2", 2);
+ private Link link2 = link("dev1", 1, "dev3", 2);
+ private Link link3 = link("dev2", 1, "dev3", 2);
+
+ private Set<Link> links1;
+ private Set<Link> links2;
+
+ private ConnectPoint egress1 = new ConnectPoint(DeviceId.deviceId("dev1"),
+ PortNumber.portNumber(3));
+ private ConnectPoint egress2 = new ConnectPoint(DeviceId.deviceId("dev2"),
+ PortNumber.portNumber(3));
+
+ private TrafficSelector selector = new IntentTestsMocks.MockSelector();
+ private TrafficTreatment treatment = new IntentTestsMocks.MockTreatment();
+
+ private LinkCollectionIntent makeLinkCollection(long id, Set<Link> links,
+ ConnectPoint egress) {
+ return new LinkCollectionIntent(new IntentId(id),
+ selector, treatment, links, egress);
}
- private static class MockTreatment implements TrafficTreatment {
- @Override
- public List<Instruction> instructions() {
- return new ArrayList<>();
- }
+ @Before
+ public void setup() {
+ links1 = new HashSet<>();
+ links2 = new HashSet<>();
}
+ /**
+ * Tests the equals() method where two LinkCollectionIntents have references
+ * to the same Links in different orders. These should compare equal.
+ */
@Test
- public void testComparison() {
- TrafficSelector selector = new MockSelector();
- TrafficTreatment treatment = new MockTreatment();
- Set<Link> links = new HashSet<>();
- LinkCollectionIntent i1 = new LinkCollectionIntent(new IntentId(12),
- selector, treatment, links);
- LinkCollectionIntent i2 = new LinkCollectionIntent(new IntentId(12),
- selector, treatment, links);
+ public void testSameEquals() {
+ links1.add(link1);
+ links1.add(link2);
+ links1.add(link3);
- assertThat(i1.equals(i2), is(true));
+ links2.add(link3);
+ links2.add(link2);
+ links2.add(link1);
+
+ LinkCollectionIntent i1 = makeLinkCollection(12, links1, egress1);
+ LinkCollectionIntent i2 = makeLinkCollection(12, links2, egress1);
+
+ assertThat(i1, is(equalTo(i2)));
}
+ /**
+ * Tests the equals() method where two LinkCollectionIntents have references
+ * to different Links. These should compare not equal.
+ */
+ @Test
+ public void testLinksDifferentEquals() {
+ links1.add(link1);
+ links1.add(link2);
+
+ links2.add(link3);
+ links2.add(link1);
+
+ LinkCollectionIntent i1 = makeLinkCollection(12, links1, egress1);
+ LinkCollectionIntent i2 = makeLinkCollection(12, links2, egress1);
+
+ assertThat(i1, is(not(equalTo(i2))));
+ }
+
+ /**
+ * Tests the equals() method where two LinkCollectionIntents have references
+ * to the same Links but different egress points. These should compare not equal.
+ */
+ @Test
+ public void testEgressDifferentEquals() {
+ links1.add(link1);
+ links1.add(link2);
+ links1.add(link3);
+
+ links2.add(link3);
+ links2.add(link2);
+ links2.add(link1);
+
+ LinkCollectionIntent i1 = makeLinkCollection(12, links1, egress1);
+ LinkCollectionIntent i2 = makeLinkCollection(12, links2, egress2);
+
+ assertThat(i1, is(not(equalTo(i2))));
+ }
+
+ /**
+ * Tests the equals() method where two LinkCollectionIntents have different
+ * ids. These should compare not equal.
+ */
+ @Test
+ public void testBaseDifferentEquals() {
+ links1.add(link1);
+ links1.add(link2);
+
+ links2.add(link2);
+ links2.add(link1);
+
+ LinkCollectionIntent i1 = makeLinkCollection(1, links1, egress1);
+ LinkCollectionIntent i2 = makeLinkCollection(2, links2, egress1);
+
+ assertThat(i1, is(not(equalTo(i2))));
+ }
+
+ /**
+ * Tests that the hashCode() values for two equivalent LinkCollectionIntent
+ * objects are the same.
+ */
+ @Test
+ public void testHashCodeEquals() {
+ links1.add(link1);
+ links1.add(link2);
+ links1.add(link3);
+
+ links2.add(link3);
+ links2.add(link2);
+ links2.add(link1);
+
+ LinkCollectionIntent i1 = makeLinkCollection(1, links1, egress1);
+ LinkCollectionIntent i2 = makeLinkCollection(1, links2, egress1);
+
+ assertThat(i1.hashCode(), is(equalTo(i2.hashCode())));
+ }
+
+ /**
+ * Tests that the hashCode() values for two distinct LinkCollectionIntent
+ * objects are different.
+ */
+ @Test
+ public void testHashCodeDifferent() {
+ links1.add(link1);
+ links1.add(link2);
+
+ links2.add(link1);
+ links2.add(link3);
+
+ LinkCollectionIntent i1 = makeLinkCollection(1, links1, egress1);
+ LinkCollectionIntent i2 = makeLinkCollection(1, links2, egress2);
+
+ assertThat(i1.hashCode(), is(not(equalTo(i2.hashCode()))));
+ }
+
+ /**
+ * Checks that the HostToHostIntent class is immutable.
+ */
+ @Test
+ public void checkImmutability() {
+ ImmutableClassChecker.assertThatClassIsImmutable(LinkCollectionIntent.class);
+ }
}
diff --git a/core/net/src/test/java/org/onlab/onos/net/intent/TestMultiPointToSinglePointIntent.java b/core/net/src/test/java/org/onlab/onos/net/intent/TestMultiPointToSinglePointIntent.java
new file mode 100644
index 0000000..e921907
--- /dev/null
+++ b/core/net/src/test/java/org/onlab/onos/net/intent/TestMultiPointToSinglePointIntent.java
@@ -0,0 +1,157 @@
+package org.onlab.onos.net.intent;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.flow.TrafficSelector;
+import org.onlab.onos.net.flow.TrafficTreatment;
+
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.onlab.onos.net.NetTestTools.connectPoint;
+
+/**
+ * Unit tests for the MultiPointToSinglePointIntent class.
+ */
+public class TestMultiPointToSinglePointIntent {
+
+ private ConnectPoint point1 = connectPoint("dev1", 1);
+ private ConnectPoint point2 = connectPoint("dev2", 1);
+ private ConnectPoint point3 = connectPoint("dev3", 1);
+
+ private TrafficSelector selector = new IntentTestsMocks.MockSelector();
+ private TrafficTreatment treatment = new IntentTestsMocks.MockTreatment();
+
+ Set<ConnectPoint> ingress1;
+ Set<ConnectPoint> ingress2;
+
+ /**
+ * Creates a MultiPointToSinglePointIntent object.
+ *
+ * @param id identifier to use for the new intent
+ * @param ingress set of ingress points
+ * @param egress egress point
+ * @return MultiPointToSinglePoint intent
+ */
+ private MultiPointToSinglePointIntent makeIntent(long id,
+ Set<ConnectPoint> ingress,
+ ConnectPoint egress) {
+ return new MultiPointToSinglePointIntent(new IntentId(id),
+ selector,
+ treatment,
+ ingress,
+ egress);
+ }
+
+ /**
+ * Initializes the ingress sets.
+ */
+ @Before
+ public void setup() {
+ ingress1 = new HashSet<>();
+ ingress2 = new HashSet<>();
+ }
+
+ /**
+ * Tests the equals() method where two MultiPointToSinglePoint have references
+ * to the same Links in different orders. These should compare equal.
+ */
+ @Test
+ public void testSameEquals() {
+
+ Set<ConnectPoint> ingress1 = new HashSet<>();
+ ingress1.add(point2);
+ ingress1.add(point3);
+
+ Set<ConnectPoint> ingress2 = new HashSet<>();
+ ingress2.add(point3);
+ ingress2.add(point2);
+
+ Intent i1 = makeIntent(12, ingress1, point1);
+ Intent i2 = makeIntent(12, ingress2, point1);
+
+ assertThat(i1, is(equalTo(i2)));
+ }
+
+ /**
+ * Tests the equals() method where two MultiPointToSinglePoint have references
+ * to different Links. These should compare not equal.
+ */
+ @Test
+ public void testLinksDifferentEquals() {
+ ingress1.add(point3);
+
+ ingress2.add(point3);
+ ingress2.add(point2);
+
+ Intent i1 = makeIntent(12, ingress1, point1);
+ Intent i2 = makeIntent(12, ingress2, point1);
+
+ assertThat(i1, is(not(equalTo(i2))));
+ }
+
+ /**
+ * Tests the equals() method where two MultiPointToSinglePoint have different
+ * ids. These should compare not equal.
+ */
+ @Test
+ public void testBaseDifferentEquals() {
+ ingress1.add(point3);
+ ingress2.add(point3);
+
+ Intent i1 = makeIntent(12, ingress1, point1);
+ Intent i2 = makeIntent(11, ingress2, point1);
+
+ assertThat(i1, is(not(equalTo(i2))));
+ }
+
+ /**
+ * Tests that the hashCode() values for two equivalent MultiPointToSinglePoint
+ * objects are the same.
+ */
+ @Test
+ public void testHashCodeEquals() {
+ ingress1.add(point2);
+ ingress1.add(point3);
+
+ ingress2.add(point3);
+ ingress2.add(point2);
+
+ Intent i1 = makeIntent(12, ingress1, point1);
+ Intent i2 = makeIntent(12, ingress2, point1);
+
+ assertThat(i1.hashCode(), is(equalTo(i2.hashCode())));
+ }
+
+ /**
+ * Tests that the hashCode() values for two distinct MultiPointToSinglePoint
+ * objects are different.
+ */
+ @Test
+ public void testHashCodeDifferent() {
+ ingress1.add(point2);
+
+ ingress2.add(point3);
+ ingress2.add(point2);
+
+ Intent i1 = makeIntent(12, ingress1, point1);
+ Intent i2 = makeIntent(12, ingress2, point1);
+
+
+ assertThat(i1.hashCode(), is(not(equalTo(i2.hashCode()))));
+ }
+
+ /**
+ * Checks that the MultiPointToSinglePointIntent class is immutable.
+ */
+ @Test
+ public void checkImmutability() {
+ ImmutableClassChecker.
+ assertThatClassIsImmutable(MultiPointToSinglePointIntent.class);
+ }
+}
diff --git a/core/net/src/test/java/org/onlab/onos/net/intent/TestPointToPointIntent.java b/core/net/src/test/java/org/onlab/onos/net/intent/TestPointToPointIntent.java
new file mode 100644
index 0000000..41769c6
--- /dev/null
+++ b/core/net/src/test/java/org/onlab/onos/net/intent/TestPointToPointIntent.java
@@ -0,0 +1,96 @@
+package org.onlab.onos.net.intent;
+
+import org.junit.Test;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.flow.TrafficSelector;
+import org.onlab.onos.net.flow.TrafficTreatment;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+import static org.onlab.onos.net.NetTestTools.connectPoint;
+
+/**
+ * Unit tests for the HostToHostIntent class.
+ */
+public class TestPointToPointIntent {
+
+ private TrafficSelector selector = new IntentTestsMocks.MockSelector();
+ private TrafficTreatment treatment = new IntentTestsMocks.MockTreatment();
+
+ private ConnectPoint point1 = connectPoint("dev1", 1);
+ private ConnectPoint point2 = connectPoint("dev2", 1);
+
+ private PointToPointIntent makePointToPoint(long id,
+ ConnectPoint ingress,
+ ConnectPoint egress) {
+ return new PointToPointIntent(new IntentId(id),
+ selector,
+ treatment,
+ ingress,
+ egress);
+ }
+
+ /**
+ * Tests the equals() method where two PointToPointIntents have references
+ * to the same ingress and egress points. These should compare equal.
+ */
+ @Test
+ public void testSameEquals() {
+ PointToPointIntent i1 = makePointToPoint(12, point1, point2);
+ PointToPointIntent i2 = makePointToPoint(12, point1, point2);
+
+ assertThat(i1, is(equalTo(i2)));
+ }
+
+ /**
+ * Tests the equals() method where two HostToHostIntents have references
+ * to different Hosts. These should compare not equal.
+ */
+ @Test
+ public void testLinksDifferentEquals() {
+
+ PointToPointIntent i1 = makePointToPoint(12, point1, point2);
+ PointToPointIntent i2 = makePointToPoint(12, point2, point1);
+
+ assertThat(i1, is(not(equalTo(i2))));
+ }
+
+ /**
+ * Tests the equals() method where two HostToHostIntents have different
+ * ids. These should compare not equal.
+ */
+ @Test
+ public void testBaseDifferentEquals() {
+ PointToPointIntent i1 = makePointToPoint(12, point1, point2);
+ PointToPointIntent i2 = makePointToPoint(11, point1, point2);
+
+
+ assertThat(i1, is(not(equalTo(i2))));
+ }
+
+ /**
+ * Tests that the hashCode() values for two equivalent HostToHostIntent
+ * objects are the same.
+ */
+ @Test
+ public void testHashCodeEquals() {
+ PointToPointIntent i1 = makePointToPoint(12, point1, point2);
+ PointToPointIntent i2 = makePointToPoint(12, point1, point2);
+
+ assertThat(i1.hashCode(), is(equalTo(i2.hashCode())));
+ }
+
+ /**
+ * Tests that the hashCode() values for two distinct LinkCollectionIntent
+ * objects are different.
+ */
+ @Test
+ public void testHashCodeDifferent() {
+ PointToPointIntent i1 = makePointToPoint(12, point1, point2);
+ PointToPointIntent i2 = makePointToPoint(22, point1, point2);
+
+ assertThat(i1.hashCode(), is(not(equalTo(i2.hashCode()))));
+ }
+}
diff --git a/core/net/src/test/java/org/onlab/onos/net/intent/impl/TestHostToHostIntentCompiler.java b/core/net/src/test/java/org/onlab/onos/net/intent/impl/TestHostToHostIntentCompiler.java
new file mode 100644
index 0000000..bd61b7a
--- /dev/null
+++ b/core/net/src/test/java/org/onlab/onos/net/intent/impl/TestHostToHostIntentCompiler.java
@@ -0,0 +1,151 @@
+package org.onlab.onos.net.intent.impl;
+
+import java.util.List;
+
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+import org.onlab.onos.net.Host;
+import org.onlab.onos.net.HostId;
+import org.onlab.onos.net.flow.TrafficSelector;
+import org.onlab.onos.net.flow.TrafficTreatment;
+import org.onlab.onos.net.host.HostService;
+import org.onlab.onos.net.intent.HostToHostIntent;
+import org.onlab.onos.net.intent.Intent;
+import org.onlab.onos.net.intent.IntentId;
+import org.onlab.onos.net.intent.IntentTestsMocks;
+import org.onlab.onos.net.intent.PathIntent;
+import org.onlab.packet.MacAddress;
+import org.onlab.packet.VlanId;
+
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+import static org.onlab.onos.net.NetTestTools.hid;
+import static org.onlab.onos.net.intent.LinksHaveEntryWithSourceDestinationPairMatcher.linksHasPath;
+
+/**
+ * Unit tests for the HostToHost intent compiler.
+ */
+public class TestHostToHostIntentCompiler {
+ private static final String HOST_ONE_MAC = "00:00:00:00:00:01";
+ private static final String HOST_TWO_MAC = "00:00:00:00:00:02";
+ private static final String HOST_ONE_VLAN = "-1";
+ private static final String HOST_TWO_VLAN = "-1";
+ private static final String HOST_ONE = HOST_ONE_MAC + "/" + HOST_ONE_VLAN;
+ private static final String HOST_TWO = HOST_TWO_MAC + "/" + HOST_TWO_VLAN;
+
+ private TrafficSelector selector = new IntentTestsMocks.MockSelector();
+ private TrafficTreatment treatment = new IntentTestsMocks.MockTreatment();
+
+ private HostId hostOneId = HostId.hostId(HOST_ONE);
+ private HostId hostTwoId = HostId.hostId(HOST_TWO);
+ private HostService mockHostService;
+
+ @Before
+ public void setup() {
+ Host hostOne = createMock(Host.class);
+ expect(hostOne.mac()).andReturn(new MacAddress(HOST_ONE_MAC.getBytes())).anyTimes();
+ expect(hostOne.vlan()).andReturn(VlanId.vlanId()).anyTimes();
+ replay(hostOne);
+
+ Host hostTwo = createMock(Host.class);
+ expect(hostTwo.mac()).andReturn(new MacAddress(HOST_TWO_MAC.getBytes())).anyTimes();
+ expect(hostTwo.vlan()).andReturn(VlanId.vlanId()).anyTimes();
+ replay(hostTwo);
+
+ mockHostService = createMock(HostService.class);
+ expect(mockHostService.getHost(eq(hostOneId))).andReturn(hostOne).anyTimes();
+ expect(mockHostService.getHost(eq(hostTwoId))).andReturn(hostTwo).anyTimes();
+ replay(mockHostService);
+ }
+
+ /**
+ * Creates a HostToHost intent based on two host Ids.
+ *
+ * @param oneIdString string for host one id
+ * @param twoIdString string for host two id
+ * @return HostToHostIntent for the two hosts
+ */
+ private HostToHostIntent makeIntent(String oneIdString, String twoIdString) {
+ return new HostToHostIntent(new IntentId(12),
+ hid(oneIdString),
+ hid(twoIdString),
+ selector,
+ treatment);
+ }
+
+ /**
+ * Creates a compiler for HostToHost intents.
+ *
+ * @param hops string array describing the path hops to use when compiling
+ * @return HostToHost intent compiler
+ */
+ private HostToHostIntentCompiler makeCompiler(String[] hops) {
+ HostToHostIntentCompiler compiler =
+ new HostToHostIntentCompiler();
+ compiler.pathService = new IntentTestsMocks.MockPathService(hops);
+ compiler.hostService = mockHostService;
+ IdBlockAllocator idBlockAllocator = new DummyIdBlockAllocator();
+ compiler.intentIdGenerator =
+ new IdBlockAllocatorBasedIntentIdGenerator(idBlockAllocator);
+ return compiler;
+ }
+
+
+ /**
+ * Tests a pair of hosts with 8 hops between them.
+ */
+ @Test
+ public void testSingleLongPathCompilation() {
+
+ HostToHostIntent intent = makeIntent(HOST_ONE,
+ HOST_TWO);
+ assertThat(intent, is(notNullValue()));
+
+ String[] hops = {HOST_ONE, "h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", HOST_TWO};
+ HostToHostIntentCompiler compiler = makeCompiler(hops);
+ assertThat(compiler, is(notNullValue()));
+
+ List<Intent> result = compiler.compile(intent);
+ assertThat(result, is(Matchers.notNullValue()));
+ assertThat(result, hasSize(2));
+ Intent forwardResultIntent = result.get(0);
+ assertThat(forwardResultIntent instanceof PathIntent, is(true));
+ Intent reverseResultIntent = result.get(1);
+ assertThat(reverseResultIntent instanceof PathIntent, is(true));
+
+ if (forwardResultIntent instanceof PathIntent) {
+ PathIntent forwardPathIntent = (PathIntent) forwardResultIntent;
+ assertThat(forwardPathIntent.path().links(), hasSize(9));
+ assertThat(forwardPathIntent.path().links(), linksHasPath(HOST_ONE, "h1"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("h1", "h2"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("h2", "h3"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("h3", "h4"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("h4", "h5"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("h5", "h6"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("h6", "h7"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("h7", "h8"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("h8", HOST_TWO));
+ }
+
+ if (reverseResultIntent instanceof PathIntent) {
+ PathIntent reversePathIntent = (PathIntent) reverseResultIntent;
+ assertThat(reversePathIntent.path().links(), hasSize(9));
+ assertThat(reversePathIntent.path().links(), linksHasPath("h1", HOST_ONE));
+ assertThat(reversePathIntent.path().links(), linksHasPath("h2", "h1"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("h3", "h2"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("h4", "h3"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("h5", "h4"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("h6", "h5"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("h7", "h6"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("h8", "h7"));
+ assertThat(reversePathIntent.path().links(), linksHasPath(HOST_TWO, "h8"));
+ }
+ }
+}
diff --git a/core/net/src/test/java/org/onlab/onos/net/intent/impl/TestMultiPointToSinglePointIntentCompiler.java b/core/net/src/test/java/org/onlab/onos/net/intent/impl/TestMultiPointToSinglePointIntentCompiler.java
new file mode 100644
index 0000000..8d286cf
--- /dev/null
+++ b/core/net/src/test/java/org/onlab/onos/net/intent/impl/TestMultiPointToSinglePointIntentCompiler.java
@@ -0,0 +1,214 @@
+package org.onlab.onos.net.intent.impl;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.hamcrest.Matchers;
+import org.junit.Test;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.ElementId;
+import org.onlab.onos.net.Path;
+import org.onlab.onos.net.flow.TrafficSelector;
+import org.onlab.onos.net.flow.TrafficTreatment;
+import org.onlab.onos.net.intent.Intent;
+import org.onlab.onos.net.intent.IntentId;
+import org.onlab.onos.net.intent.IntentTestsMocks;
+import org.onlab.onos.net.intent.LinkCollectionIntent;
+import org.onlab.onos.net.intent.MultiPointToSinglePointIntent;
+import org.onlab.onos.net.topology.LinkWeight;
+import org.onlab.onos.net.topology.PathService;
+
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+import static org.onlab.onos.net.NetTestTools.connectPoint;
+import static org.onlab.onos.net.NetTestTools.createPath;
+import static org.onlab.onos.net.intent.LinksHaveEntryWithSourceDestinationPairMatcher.linksHasPath;
+
+/**
+ * Unit tests for the MultiPointToSinglePoint intent compiler.
+ */
+public class TestMultiPointToSinglePointIntentCompiler {
+
+ private TrafficSelector selector = new IntentTestsMocks.MockSelector();
+ private TrafficTreatment treatment = new IntentTestsMocks.MockTreatment();
+
+ /**
+ * Mock path service for creating paths within the test.
+ */
+ private static class MockPathService implements PathService {
+
+ final String[] pathHops;
+
+ /**
+ * Constructor that provides a set of hops to mock.
+ *
+ * @param pathHops path hops to mock
+ */
+ MockPathService(String[] pathHops) {
+ this.pathHops = pathHops;
+ }
+
+ @Override
+ public Set<Path> getPaths(ElementId src, ElementId dst) {
+ Set<Path> result = new HashSet<>();
+
+ String[] allHops = new String[pathHops.length + 1];
+ allHops[0] = src.toString();
+ System.arraycopy(pathHops, 0, allHops, 1, pathHops.length);
+
+ result.add(createPath(allHops));
+ return result;
+ }
+
+ @Override
+ public Set<Path> getPaths(ElementId src, ElementId dst, LinkWeight weight) {
+ return null;
+ }
+ }
+
+ /**
+ * Creates a MultiPointToSinglePoint intent for a group of ingress points
+ * and an egress point.
+ *
+ * @param ingressIds array of ingress device ids
+ * @param egressId device id of the egress point
+ * @return MultiPointToSinglePoint intent
+ */
+ private MultiPointToSinglePointIntent makeIntent(String[] ingressIds, String egressId) {
+ Set<ConnectPoint> ingressPoints = new HashSet<>();
+ ConnectPoint egressPoint = connectPoint(egressId, 1);
+
+ for (String ingressId : ingressIds) {
+ ingressPoints.add(connectPoint(ingressId, 1));
+ }
+
+ return new MultiPointToSinglePointIntent(
+ new IntentId(12),
+ selector,
+ treatment,
+ ingressPoints,
+ egressPoint);
+ }
+
+ /**
+ * Creates a compiler for MultiPointToSinglePoint intents.
+ *
+ * @param hops hops to use while computing paths for this intent
+ * @return MultiPointToSinglePoint intent
+ */
+ private MultiPointToSinglePointIntentCompiler makeCompiler(String[] hops) {
+ MultiPointToSinglePointIntentCompiler compiler =
+ new MultiPointToSinglePointIntentCompiler();
+ compiler.pathService = new MockPathService(hops);
+ IdBlockAllocator idBlockAllocator = new DummyIdBlockAllocator();
+ compiler.intentIdGenerator =
+ new IdBlockAllocatorBasedIntentIdGenerator(idBlockAllocator);
+ return compiler;
+ }
+
+ /**
+ * Tests a single ingress point with 8 hops to its egress point.
+ */
+ @Test
+ public void testSingleLongPathCompilation() {
+
+ String[] ingress = {"ingress"};
+ String egress = "egress";
+
+ MultiPointToSinglePointIntent intent = makeIntent(ingress, egress);
+ assertThat(intent, is(notNullValue()));
+
+ String[] hops = {"h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8",
+ egress};
+ MultiPointToSinglePointIntentCompiler compiler = makeCompiler(hops);
+ assertThat(compiler, is(notNullValue()));
+
+ List<Intent> result = compiler.compile(intent);
+ assertThat(result, is(Matchers.notNullValue()));
+ assertThat(result, hasSize(1));
+ Intent resultIntent = result.get(0);
+ assertThat(resultIntent instanceof LinkCollectionIntent, is(true));
+
+ if (resultIntent instanceof LinkCollectionIntent) {
+ LinkCollectionIntent linkIntent = (LinkCollectionIntent) resultIntent;
+ assertThat(linkIntent.links(), hasSize(9));
+ assertThat(linkIntent.links(), linksHasPath("ingress", "h1"));
+ assertThat(linkIntent.links(), linksHasPath("h1", "h2"));
+ assertThat(linkIntent.links(), linksHasPath("h2", "h3"));
+ assertThat(linkIntent.links(), linksHasPath("h4", "h5"));
+ assertThat(linkIntent.links(), linksHasPath("h5", "h6"));
+ assertThat(linkIntent.links(), linksHasPath("h7", "h8"));
+ assertThat(linkIntent.links(), linksHasPath("h8", "egress"));
+ }
+ }
+
+ /**
+ * Tests a simple topology where two ingress points share some path segments
+ * and some path segments are not shared.
+ */
+ @Test
+ public void testTwoIngressCompilation() {
+ String[] ingress = {"ingress1", "ingress2"};
+ String egress = "egress";
+
+ MultiPointToSinglePointIntent intent = makeIntent(ingress, egress);
+ assertThat(intent, is(notNullValue()));
+
+ final String[] hops = {"inner1", "inner2", egress};
+ MultiPointToSinglePointIntentCompiler compiler = makeCompiler(hops);
+ assertThat(compiler, is(notNullValue()));
+
+ List<Intent> result = compiler.compile(intent);
+ assertThat(result, is(notNullValue()));
+ assertThat(result, hasSize(1));
+ Intent resultIntent = result.get(0);
+ assertThat(resultIntent instanceof LinkCollectionIntent, is(true));
+
+ if (resultIntent instanceof LinkCollectionIntent) {
+ LinkCollectionIntent linkIntent = (LinkCollectionIntent) resultIntent;
+ assertThat(linkIntent.links(), hasSize(4));
+ assertThat(linkIntent.links(), linksHasPath("ingress1", "inner1"));
+ assertThat(linkIntent.links(), linksHasPath("ingress2", "inner1"));
+ assertThat(linkIntent.links(), linksHasPath("inner1", "inner2"));
+ assertThat(linkIntent.links(), linksHasPath("inner2", "egress"));
+ }
+ }
+
+ /**
+ * Tests a large number of ingress points that share a common path to the
+ * egress point.
+ */
+ @Test
+ public void testMultiIngressCompilation() {
+ String[] ingress = {"i1", "i2", "i3", "i4", "i5",
+ "i6", "i7", "i8", "i9", "i10"};
+ String egress = "e";
+
+ MultiPointToSinglePointIntent intent = makeIntent(ingress, egress);
+ assertThat(intent, is(notNullValue()));
+
+ final String[] hops = {"n1", egress};
+ MultiPointToSinglePointIntentCompiler compiler = makeCompiler(hops);
+ assertThat(compiler, is(notNullValue()));
+
+ List<Intent> result = compiler.compile(intent);
+ assertThat(result, is(notNullValue()));
+ assertThat(result, hasSize(1));
+ Intent resultIntent = result.get(0);
+ assertThat(resultIntent instanceof LinkCollectionIntent, is(true));
+
+ if (resultIntent instanceof LinkCollectionIntent) {
+ LinkCollectionIntent linkIntent = (LinkCollectionIntent) resultIntent;
+ assertThat(linkIntent.links(), hasSize(ingress.length + 1));
+ for (String ingressToCheck : ingress) {
+ assertThat(linkIntent.links(),
+ linksHasPath(ingressToCheck,
+ "n1"));
+ }
+ assertThat(linkIntent.links(), linksHasPath("n1", egress));
+ }
+ }
+}
diff --git a/core/net/src/test/java/org/onlab/onos/net/intent/impl/TestPointToPointIntentCompiler.java b/core/net/src/test/java/org/onlab/onos/net/intent/impl/TestPointToPointIntentCompiler.java
new file mode 100644
index 0000000..e282347
--- /dev/null
+++ b/core/net/src/test/java/org/onlab/onos/net/intent/impl/TestPointToPointIntentCompiler.java
@@ -0,0 +1,127 @@
+package org.onlab.onos.net.intent.impl;
+
+import java.util.List;
+
+import org.hamcrest.Matchers;
+import org.junit.Test;
+import org.onlab.onos.net.flow.TrafficSelector;
+import org.onlab.onos.net.flow.TrafficTreatment;
+import org.onlab.onos.net.intent.Intent;
+import org.onlab.onos.net.intent.IntentId;
+import org.onlab.onos.net.intent.IntentTestsMocks;
+import org.onlab.onos.net.intent.PathIntent;
+import org.onlab.onos.net.intent.PointToPointIntent;
+
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+import static org.onlab.onos.net.NetTestTools.connectPoint;
+import static org.onlab.onos.net.intent.LinksHaveEntryWithSourceDestinationPairMatcher.linksHasPath;
+
+/**
+ * Unit tests for the HostToHost intent compiler.
+ */
+public class TestPointToPointIntentCompiler {
+
+ private TrafficSelector selector = new IntentTestsMocks.MockSelector();
+ private TrafficTreatment treatment = new IntentTestsMocks.MockTreatment();
+
+ /**
+ * Creates a PointToPoint intent based on ingress and egress device Ids.
+ *
+ * @param ingressIdString string for id of ingress device
+ * @param egressIdString string for id of egress device
+ * @return PointToPointIntent for the two devices
+ */
+ private PointToPointIntent makeIntent(String ingressIdString,
+ String egressIdString) {
+ return new PointToPointIntent(new IntentId(12),
+ selector,
+ treatment,
+ connectPoint(ingressIdString, 1),
+ connectPoint(egressIdString, 1));
+ }
+
+ /**
+ * Creates a compiler for HostToHost intents.
+ *
+ * @param hops string array describing the path hops to use when compiling
+ * @return HostToHost intent compiler
+ */
+ private PointToPointIntentCompiler makeCompiler(String[] hops) {
+ PointToPointIntentCompiler compiler =
+ new PointToPointIntentCompiler();
+ compiler.pathService = new IntentTestsMocks.MockPathService(hops);
+ IdBlockAllocator idBlockAllocator = new DummyIdBlockAllocator();
+ compiler.intentIdGenerator =
+ new IdBlockAllocatorBasedIntentIdGenerator(idBlockAllocator);
+ return compiler;
+ }
+
+
+ /**
+ * Tests a pair of devices in an 8 hop path, forward direction.
+ */
+ @Test
+ public void testForwardPathCompilation() {
+
+ PointToPointIntent intent = makeIntent("d1", "d8");
+ assertThat(intent, is(notNullValue()));
+
+ String[] hops = {"d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8"};
+ PointToPointIntentCompiler compiler = makeCompiler(hops);
+ assertThat(compiler, is(notNullValue()));
+
+ List<Intent> result = compiler.compile(intent);
+ assertThat(result, is(Matchers.notNullValue()));
+ assertThat(result, hasSize(1));
+ Intent forwardResultIntent = result.get(0);
+ assertThat(forwardResultIntent instanceof PathIntent, is(true));
+
+ if (forwardResultIntent instanceof PathIntent) {
+ PathIntent forwardPathIntent = (PathIntent) forwardResultIntent;
+ // 7 links for the hops, plus one default lnk on ingress and egress
+ assertThat(forwardPathIntent.path().links(), hasSize(hops.length + 1));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("d1", "d2"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("d2", "d3"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("d3", "d4"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("d4", "d5"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("d5", "d6"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("d6", "d7"));
+ assertThat(forwardPathIntent.path().links(), linksHasPath("d7", "d8"));
+ }
+ }
+
+ /**
+ * Tests a pair of devices in an 8 hop path, forward direction.
+ */
+ @Test
+ public void testReversePathCompilation() {
+
+ PointToPointIntent intent = makeIntent("d8", "d1");
+ assertThat(intent, is(notNullValue()));
+
+ String[] hops = {"d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8"};
+ PointToPointIntentCompiler compiler = makeCompiler(hops);
+ assertThat(compiler, is(notNullValue()));
+
+ List<Intent> result = compiler.compile(intent);
+ assertThat(result, is(Matchers.notNullValue()));
+ assertThat(result, hasSize(1));
+ Intent reverseResultIntent = result.get(0);
+ assertThat(reverseResultIntent instanceof PathIntent, is(true));
+
+ if (reverseResultIntent instanceof PathIntent) {
+ PathIntent reversePathIntent = (PathIntent) reverseResultIntent;
+ assertThat(reversePathIntent.path().links(), hasSize(hops.length + 1));
+ assertThat(reversePathIntent.path().links(), linksHasPath("d2", "d1"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("d3", "d2"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("d4", "d3"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("d5", "d4"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("d6", "d5"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("d7", "d6"));
+ assertThat(reversePathIntent.path().links(), linksHasPath("d8", "d7"));
+ }
+ }
+}
diff --git a/core/net/src/test/java/org/onlab/onos/net/link/impl/LinkManagerTest.java b/core/net/src/test/java/org/onlab/onos/net/link/impl/LinkManagerTest.java
index 1d52ba3..9a16562 100644
--- a/core/net/src/test/java/org/onlab/onos/net/link/impl/LinkManagerTest.java
+++ b/core/net/src/test/java/org/onlab/onos/net/link/impl/LinkManagerTest.java
@@ -59,6 +59,7 @@
protected LinkProviderService providerService;
protected TestProvider provider;
protected TestListener listener = new TestListener();
+ protected DeviceManager devmgr = new TestDeviceManager();
@Before
public void setUp() {
@@ -68,7 +69,7 @@
registry = mgr;
mgr.store = new SimpleLinkStore();
mgr.eventDispatcher = new TestEventDispatcher();
- mgr.deviceService = new DeviceManager();
+ mgr.deviceService = devmgr;
mgr.activate();
service.addListener(listener);
@@ -259,4 +260,11 @@
}
}
+ private static class TestDeviceManager extends DeviceManager {
+ @Override
+ public MastershipRole getRole(DeviceId deviceId) {
+ return MastershipRole.MASTER;
+ }
+ }
+
}
diff --git a/core/pom.xml b/core/pom.xml
index afee0d0..c0f74cf 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -20,6 +20,7 @@
<module>api</module>
<module>net</module>
<module>store</module>
+ <module>json</module>
</modules>
<dependencies>
diff --git a/core/store/dist/pom.xml b/core/store/dist/pom.xml
index 33517c7..6482729 100644
--- a/core/store/dist/pom.xml
+++ b/core/store/dist/pom.xml
@@ -19,21 +19,10 @@
<dependencies>
<dependency>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.onlab.onos</groupId>
<artifactId>onos-core-serializers</artifactId>
<version>${project.version}</version>
</dependency>
-
- <dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onlab-nio</artifactId>
- <version>${project.version}</version>
- </dependency>
-
<dependency>
<groupId>org.onlab.onos</groupId>
<artifactId>onlab-netty</artifactId>
@@ -50,10 +39,6 @@
</dependency>
<dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.scr.annotations</artifactId>
- </dependency>
- <dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava-testlib</artifactId>
<scope>test</scope>
@@ -67,15 +52,12 @@
<artifactId>easymock</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-api</artifactId>
+ <classifier>tests</classifier>
+ <scope>test</scope>
+ </dependency>
</dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-scr-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
</project>
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java.bak b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java.bak
index 5708e77..c781b23 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java.bak
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/impl/DistributedClusterStore.java.bak
@@ -53,7 +53,7 @@
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
private ClusterCommunicationAdminService clusterCommunicationAdminService;
- private final ClusterNodesDelegate nodesDelegate = new InnerNodesDelegate();
+ private final ClusterNodesDelegate nodesDelegate = new InternalNodesDelegate();
@Activate
public void activate() throws IOException {
@@ -150,7 +150,7 @@
}
// Entity to handle back calls from the connection manager.
- private class InnerNodesDelegate implements ClusterNodesDelegate {
+ private class InternalNodesDelegate implements ClusterNodesDelegate {
@Override
public DefaultControllerNode nodeDetected(NodeId nodeId, IpPrefix ip, int tcpPort) {
DefaultControllerNode node = nodes.get(nodeId);
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/cluster/messaging/impl/ClusterCommunicationManager.java b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/messaging/impl/ClusterCommunicationManager.java
index c7852ae..55d8b1a 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/cluster/messaging/impl/ClusterCommunicationManager.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/cluster/messaging/impl/ClusterCommunicationManager.java
@@ -4,6 +4,9 @@
import java.io.IOException;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -17,17 +20,19 @@
import org.onlab.onos.store.cluster.messaging.ClusterCommunicationService;
import org.onlab.onos.store.cluster.messaging.ClusterMessage;
import org.onlab.onos.store.cluster.messaging.ClusterMessageHandler;
+import org.onlab.onos.store.cluster.messaging.ClusterMessageResponse;
import org.onlab.onos.store.cluster.messaging.MessageSubject;
import org.onlab.onos.store.serializers.ClusterMessageSerializer;
-import org.onlab.onos.store.serializers.KryoPoolUtil;
+import org.onlab.onos.store.serializers.KryoNamespaces;
import org.onlab.onos.store.serializers.KryoSerializer;
import org.onlab.onos.store.serializers.MessageSubjectSerializer;
-import org.onlab.util.KryoPool;
+import org.onlab.util.KryoNamespace;
import org.onlab.netty.Endpoint;
import org.onlab.netty.Message;
import org.onlab.netty.MessageHandler;
import org.onlab.netty.MessagingService;
import org.onlab.netty.NettyMessagingService;
+import org.onlab.netty.Response;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -47,8 +52,8 @@
private static final KryoSerializer SERIALIZER = new KryoSerializer() {
@Override
protected void setupKryoPool() {
- serializerPool = KryoPool.newBuilder()
- .register(KryoPoolUtil.API)
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
.register(ClusterMessage.class, new ClusterMessageSerializer())
.register(ClusterMembershipEvent.class)
.register(byte[].class)
@@ -114,7 +119,23 @@
message.subject().value(), SERIALIZER.encode(message));
return true;
} catch (IOException e) {
- log.error("Failed to send cluster message to nodeId: " + toNodeId, e);
+ log.trace("Failed to send cluster message to nodeId: " + toNodeId, e);
+ throw e;
+ }
+ }
+
+ @Override
+ public ClusterMessageResponse sendAndReceive(ClusterMessage message, NodeId toNodeId) throws IOException {
+ ControllerNode node = clusterService.getNode(toNodeId);
+ checkArgument(node != null, "Unknown nodeId: %s", toNodeId);
+ Endpoint nodeEp = new Endpoint(node.ip().toString(), node.tcpPort());
+ try {
+ Response responseFuture =
+ messagingService.sendAndReceive(nodeEp, message.subject().value(), SERIALIZER.encode(message));
+ return new InternalClusterMessageResponse(toNodeId, responseFuture);
+
+ } catch (IOException e) {
+ log.error("Failed interaction with remote nodeId: " + toNodeId, e);
throw e;
}
}
@@ -137,11 +158,52 @@
public void handle(Message message) {
try {
ClusterMessage clusterMessage = SERIALIZER.decode(message.payload());
- handler.handle(clusterMessage);
+ handler.handle(new InternalClusterMessage(clusterMessage, message));
} catch (Exception e) {
log.error("Exception caught during ClusterMessageHandler", e);
throw e;
}
}
}
+
+ public static final class InternalClusterMessage extends ClusterMessage {
+
+ private final Message rawMessage;
+
+ public InternalClusterMessage(ClusterMessage clusterMessage, Message rawMessage) {
+ super(clusterMessage.sender(), clusterMessage.subject(), clusterMessage.payload());
+ this.rawMessage = rawMessage;
+ }
+
+ @Override
+ public void respond(byte[] response) throws IOException {
+ rawMessage.respond(response);
+ }
+ }
+
+ private static final class InternalClusterMessageResponse implements ClusterMessageResponse {
+
+ private final NodeId sender;
+ private final Response responseFuture;
+
+ public InternalClusterMessageResponse(NodeId sender, Response responseFuture) {
+ this.sender = sender;
+ this.responseFuture = responseFuture;
+ }
+ @Override
+ public NodeId sender() {
+ return sender;
+ }
+
+ @Override
+ public byte[] get(long timeout, TimeUnit timeunit)
+ throws TimeoutException {
+ return responseFuture.get(timeout, timeunit);
+ }
+
+ @Override
+ public byte[] get(long timeout) throws InterruptedException {
+ return responseFuture.get();
+ }
+ }
}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/common/impl/package-info.java b/core/store/dist/src/main/java/org/onlab/onos/store/common/impl/package-info.java
deleted file mode 100644
index 992fd49..0000000
--- a/core/store/dist/src/main/java/org/onlab/onos/store/common/impl/package-info.java
+++ /dev/null
@@ -1,5 +0,0 @@
-/**
- * Common abstractions and facilities for implementing distributed store
- * using gossip protocol.
- */
-package org.onlab.onos.store.common.impl;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/DeviceClockManager.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/DeviceClockManager.java
index 48355cf..92af514 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/DeviceClockManager.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/DeviceClockManager.java
@@ -44,6 +44,8 @@
@Override
public Timestamp getTimestamp(DeviceId deviceId) {
MastershipTerm term = deviceMastershipTerms.get(deviceId);
+ log.trace("term info for {} is: {}", deviceId, term);
+
if (term == null) {
throw new IllegalStateException("Requesting timestamp for a deviceId without mastership");
}
@@ -52,6 +54,7 @@
@Override
public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
+ log.info("adding term info {} {}", deviceId, term.master());
deviceMastershipTerms.put(deviceId, term);
}
}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/DeviceDescriptions.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/DeviceDescriptions.java
index 03c293a..43008d2 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/DeviceDescriptions.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/DeviceDescriptions.java
@@ -15,7 +15,7 @@
import org.onlab.onos.net.device.DeviceDescription;
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.store.Timestamp;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
/*
* Collection of Description of a Device and Ports, given from a Provider.
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/GossipDeviceStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/GossipDeviceStore.java
index d923075..fdc0827 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/GossipDeviceStore.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/GossipDeviceStore.java
@@ -38,10 +38,11 @@
import org.onlab.onos.store.cluster.messaging.ClusterMessage;
import org.onlab.onos.store.cluster.messaging.ClusterMessageHandler;
import org.onlab.onos.store.cluster.messaging.MessageSubject;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
import org.onlab.onos.store.serializers.KryoSerializer;
import org.onlab.onos.store.serializers.DistributedStoreSerializers;
-import org.onlab.util.KryoPool;
+import org.onlab.packet.ChassisId;
+import org.onlab.util.KryoNamespace;
import org.onlab.util.NewConcurrentHashMap;
import org.slf4j.Logger;
@@ -116,7 +117,7 @@
protected static final KryoSerializer SERIALIZER = new KryoSerializer() {
@Override
protected void setupKryoPool() {
- serializerPool = KryoPool.newBuilder()
+ serializerPool = KryoNamespace.newBuilder()
.register(DistributedStoreSerializers.COMMON)
.register(InternalDeviceEvent.class, new InternalDeviceEventSerializer())
@@ -390,6 +391,7 @@
List<PortDescription> portDescriptions) {
final Timestamp newTimestamp = deviceClockService.getTimestamp(deviceId);
+ log.info("timestamp for {} {}", deviceId, newTimestamp);
final Timestamped<List<PortDescription>> timestampedInput
= new Timestamped<>(portDescriptions, newTimestamp);
@@ -515,12 +517,12 @@
Map<PortNumber, Port> ports,
Set<PortNumber> processed) {
List<DeviceEvent> events = new ArrayList<>();
- Iterator<PortNumber> iterator = ports.keySet().iterator();
+ Iterator<Entry<PortNumber, Port>> iterator = ports.entrySet().iterator();
while (iterator.hasNext()) {
- PortNumber portNumber = iterator.next();
+ Entry<PortNumber, Port> e = iterator.next();
+ PortNumber portNumber = e.getKey();
if (!processed.contains(portNumber)) {
- events.add(new DeviceEvent(PORT_REMOVED, device,
- ports.get(portNumber)));
+ events.add(new DeviceEvent(PORT_REMOVED, device, e.getValue()));
iterator.remove();
}
}
@@ -745,6 +747,7 @@
String hwVersion = base.hwVersion();
String swVersion = base.swVersion();
String serialNumber = base.serialNumber();
+ ChassisId chassisId = base.chassisId();
DefaultAnnotations annotations = DefaultAnnotations.builder().build();
annotations = merge(annotations, base.annotations());
@@ -762,7 +765,8 @@
}
return new DefaultDevice(primary, deviceId , type, manufacturer,
- hwVersion, swVersion, serialNumber, annotations);
+ hwVersion, swVersion, serialNumber,
+ chassisId, annotations);
}
/**
@@ -1136,7 +1140,7 @@
try {
unicastMessage(peer, DEVICE_ADVERTISE, ad);
} catch (IOException e) {
- log.error("Failed to send anti-entropy advertisement", e);
+ log.debug("Failed to send anti-entropy advertisement to {}", peer);
return;
}
} catch (Exception e) {
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InitDeviceDescs.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InitDeviceDescs.java
deleted file mode 100644
index 936723a..0000000
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InitDeviceDescs.java
+++ /dev/null
@@ -1,23 +0,0 @@
-package org.onlab.onos.store.device.impl;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import org.apache.commons.lang3.concurrent.ConcurrentException;
-import org.apache.commons.lang3.concurrent.ConcurrentInitializer;
-import org.onlab.onos.net.device.DeviceDescription;
-import org.onlab.onos.store.common.impl.Timestamped;
-
-// FIXME: consider removing this class
-public final class InitDeviceDescs
- implements ConcurrentInitializer<DeviceDescriptions> {
-
- private final Timestamped<DeviceDescription> deviceDesc;
-
- public InitDeviceDescs(Timestamped<DeviceDescription> deviceDesc) {
- this.deviceDesc = checkNotNull(deviceDesc);
- }
- @Override
- public DeviceDescriptions get() throws ConcurrentException {
- return new DeviceDescriptions(deviceDesc);
- }
-}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalDeviceEvent.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalDeviceEvent.java
index 344fe73..623a6b1 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalDeviceEvent.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalDeviceEvent.java
@@ -3,7 +3,7 @@
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.device.DeviceDescription;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
import com.google.common.base.MoreObjects;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalDeviceEventSerializer.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalDeviceEventSerializer.java
index 0d3d013..5f97afd 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalDeviceEventSerializer.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalDeviceEventSerializer.java
@@ -3,7 +3,7 @@
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.device.DeviceDescription;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortEvent.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortEvent.java
index d1fc73a..e036ec6 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortEvent.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortEvent.java
@@ -5,7 +5,7 @@
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
import com.google.common.base.MoreObjects;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortEventSerializer.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortEventSerializer.java
index 6fff395..4f4a9e6 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortEventSerializer.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortEventSerializer.java
@@ -5,7 +5,7 @@
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortStatusEvent.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortStatusEvent.java
index fd154da..ff59ab0 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortStatusEvent.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortStatusEvent.java
@@ -3,7 +3,7 @@
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
import com.google.common.base.MoreObjects;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortStatusEventSerializer.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortStatusEventSerializer.java
index 8f0c2b0..9d2db47 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortStatusEventSerializer.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/InternalPortStatusEventSerializer.java
@@ -3,7 +3,7 @@
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/package-info.java b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/package-info.java
index c1f5aad..572948e 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/package-info.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/device/impl/package-info.java
@@ -1,4 +1,4 @@
/**
- * Implementation of device store using distributed distributed p2p synchronization protocol.
+ * Implementation of distributed device store using p2p synchronization protocol.
*/
package org.onlab.onos.store.device.impl;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfo.java b/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfo.java
new file mode 100644
index 0000000..47706f9
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfo.java
@@ -0,0 +1,54 @@
+package org.onlab.onos.store.flow;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Collection;
+import java.util.Collections;
+
+import org.onlab.onos.cluster.NodeId;
+
+import com.google.common.base.Optional;
+
+/**
+ * Class to represent placement information about Master/Backup copy.
+ */
+public final class ReplicaInfo {
+
+ private final Optional<NodeId> master;
+ private final Collection<NodeId> backups;
+
+ /**
+ * Creates a ReplicaInfo instance.
+ *
+ * @param master NodeId of the node where the master copy should be
+ * @param backups collection of NodeId, where backup copies should be placed
+ */
+ public ReplicaInfo(NodeId master, Collection<NodeId> backups) {
+ this.master = Optional.fromNullable(master);
+ this.backups = checkNotNull(backups);
+ }
+
+ /**
+ * Returns the NodeId, if there is a Node where the master copy should be.
+ *
+ * @return NodeId, where the master copy should be placed
+ */
+ public Optional<NodeId> master() {
+ return master;
+ }
+
+ /**
+ * Returns the collection of NodeId, where backup copies should be placed.
+ *
+ * @return collection of NodeId, where backup copies should be placed
+ */
+ public Collection<NodeId> backups() {
+ return backups;
+ }
+
+ // for Serializer
+ private ReplicaInfo() {
+ this.master = Optional.absent();
+ this.backups = Collections.emptyList();
+ }
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfoEvent.java b/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfoEvent.java
new file mode 100644
index 0000000..7cba127
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfoEvent.java
@@ -0,0 +1,49 @@
+package org.onlab.onos.store.flow;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import org.onlab.onos.event.AbstractEvent;
+import org.onlab.onos.net.DeviceId;
+
+/**
+ * Describes a device replicainfo event.
+ */
+public class ReplicaInfoEvent extends AbstractEvent<ReplicaInfoEvent.Type, DeviceId> {
+
+ private final ReplicaInfo replicaInfo;
+
+ /**
+ * Types of Replica info event.
+ */
+ public enum Type {
+ /**
+ * Event to notify that master placement should be changed.
+ */
+ MASTER_CHANGED,
+ //
+ // BACKUPS_CHANGED?
+ }
+
+
+ /**
+ * Creates an event of a given type and for the specified device,
+ * and replica info.
+ *
+ * @param type replicainfo event type
+ * @param device event device subject
+ * @param replicaInfo replicainfo
+ */
+ public ReplicaInfoEvent(Type type, DeviceId device, ReplicaInfo replicaInfo) {
+ super(type, device);
+ this.replicaInfo = checkNotNull(replicaInfo);
+ }
+
+ /**
+ * Returns the current replica information for the subject.
+ *
+ * @return replica information for the subject
+ */
+ public ReplicaInfo replicaInfo() {
+ return replicaInfo;
+ };
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfoEventListener.java b/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfoEventListener.java
new file mode 100644
index 0000000..44239dc
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfoEventListener.java
@@ -0,0 +1,11 @@
+package org.onlab.onos.store.flow;
+
+import org.onlab.onos.event.EventListener;
+
+/**
+ * Entity capable of receiving Replica placement information-related events.
+ */
+public interface ReplicaInfoEventListener extends EventListener<ReplicaInfoEvent> {
+
+}
+
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfoService.java b/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfoService.java
new file mode 100644
index 0000000..e613348
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/flow/ReplicaInfoService.java
@@ -0,0 +1,33 @@
+package org.onlab.onos.store.flow;
+
+import org.onlab.onos.net.DeviceId;
+
+/**
+ * Service to return where the replica should be placed.
+ */
+public interface ReplicaInfoService {
+
+ // returns where it should be.
+ /**
+ * Returns the placement information for given Device.
+ *
+ * @param deviceId identifier of the device
+ * @return placement information
+ */
+ ReplicaInfo getReplicaInfoFor(DeviceId deviceId);
+
+ /**
+ * Adds the specified replica placement info change listener.
+ *
+ * @param listener the replica placement info change listener
+ */
+ void addListener(ReplicaInfoEventListener listener);
+
+ /**
+ * Removes the specified replica placement info change listener.
+ *
+ * @param listener the replica placement info change listener
+ */
+ void removeListener(ReplicaInfoEventListener listener);
+
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
index 084435f..e5b2ed6 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
@@ -3,14 +3,20 @@
import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
import static org.slf4j.LoggerFactory.getLogger;
+import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.ApplicationId;
+import org.onlab.onos.cluster.ClusterService;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.flow.DefaultFlowEntry;
import org.onlab.onos.net.flow.FlowEntry;
@@ -20,7 +26,16 @@
import org.onlab.onos.net.flow.FlowRuleEvent.Type;
import org.onlab.onos.net.flow.FlowRuleStore;
import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
+import org.onlab.onos.net.flow.StoredFlowEntry;
import org.onlab.onos.store.AbstractStore;
+import org.onlab.onos.store.cluster.messaging.ClusterCommunicationService;
+import org.onlab.onos.store.cluster.messaging.ClusterMessage;
+import org.onlab.onos.store.cluster.messaging.ClusterMessageResponse;
+import org.onlab.onos.store.flow.ReplicaInfo;
+import org.onlab.onos.store.flow.ReplicaInfoService;
+import org.onlab.onos.store.serializers.DistributedStoreSerializers;
+import org.onlab.onos.store.serializers.KryoSerializer;
+import org.onlab.util.KryoNamespace;
import org.slf4j.Logger;
import com.google.common.collect.ArrayListMultimap;
@@ -28,9 +43,8 @@
import com.google.common.collect.Multimap;
/**
- * Manages inventory of flow rules using trivial in-memory implementation.
+ * Manages inventory of flow rules using a distributed state management protocol.
*/
-//FIXME I LIE. I AIN'T DISTRIBUTED
@Component(immediate = true)
@Service
public class DistributedFlowRuleStore
@@ -40,12 +54,34 @@
private final Logger log = getLogger(getClass());
// store entries as a pile of rules, no info about device tables
- private final Multimap<DeviceId, FlowEntry> flowEntries =
- ArrayListMultimap.<DeviceId, FlowEntry>create();
+ private final Multimap<DeviceId, StoredFlowEntry> flowEntries =
+ ArrayListMultimap.<DeviceId, StoredFlowEntry>create();
private final Multimap<Short, FlowRule> flowEntriesById =
ArrayListMultimap.<Short, FlowRule>create();
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ private ReplicaInfoService replicaInfoManager;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ private ClusterCommunicationService clusterCommunicator;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ private ClusterService clusterService;
+
+ protected static final KryoSerializer SERIALIZER = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(DistributedStoreSerializers.COMMON)
+ .build()
+ .populate(1);
+ }
+ };
+
+ // TODO: make this configurable
+ private static final long FLOW_RULE_STORE_TIMEOUT_MILLIS = 1000;
+
@Activate
public void activate() {
log.info("Started");
@@ -64,7 +100,11 @@
@Override
public synchronized FlowEntry getFlowEntry(FlowRule rule) {
- for (FlowEntry f : flowEntries.get(rule.deviceId())) {
+ return getFlowEntryInternal(rule);
+ }
+
+ private synchronized StoredFlowEntry getFlowEntryInternal(FlowRule rule) {
+ for (StoredFlowEntry f : flowEntries.get(rule.deviceId())) {
if (f.equals(rule)) {
return f;
}
@@ -74,7 +114,7 @@
@Override
public synchronized Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) {
- Collection<FlowEntry> rules = flowEntries.get(deviceId);
+ Collection<? extends FlowEntry> rules = flowEntries.get(deviceId);
if (rules == null) {
return Collections.emptyList();
}
@@ -91,30 +131,96 @@
}
@Override
- public synchronized void storeFlowRule(FlowRule rule) {
- FlowEntry f = new DefaultFlowEntry(rule);
- DeviceId did = f.deviceId();
- if (!flowEntries.containsEntry(did, f)) {
- flowEntries.put(did, f);
- flowEntriesById.put(rule.appId(), f);
+ public void storeFlowRule(FlowRule rule) {
+ ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(rule.deviceId());
+ if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) {
+ storeFlowEntryInternal(rule);
+ return;
}
+
+ ClusterMessage message = new ClusterMessage(
+ clusterService.getLocalNode().id(),
+ FlowStoreMessageSubjects.STORE_FLOW_RULE,
+ SERIALIZER.encode(rule));
+
+ try {
+ ClusterMessageResponse response = clusterCommunicator.sendAndReceive(message, replicaInfo.master().get());
+ response.get(FLOW_RULE_STORE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
+ } catch (IOException | TimeoutException e) {
+ // FIXME: throw a FlowStoreException
+ throw new RuntimeException(e);
+ }
+ }
+
+ private synchronized void storeFlowEntryInternal(FlowRule flowRule) {
+ StoredFlowEntry flowEntry = new DefaultFlowEntry(flowRule);
+ DeviceId deviceId = flowRule.deviceId();
+ // write to local copy.
+ if (!flowEntries.containsEntry(deviceId, flowEntry)) {
+ flowEntries.put(deviceId, flowEntry);
+ flowEntriesById.put(flowRule.appId(), flowEntry);
+ }
+ // write to backup.
+ // TODO: write to a hazelcast map.
}
@Override
public synchronized void deleteFlowRule(FlowRule rule) {
- FlowEntry entry = getFlowEntry(rule);
+ ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(rule.deviceId());
+ if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) {
+ deleteFlowRuleInternal(rule);
+ return;
+ }
+
+ ClusterMessage message = new ClusterMessage(
+ clusterService.getLocalNode().id(),
+ FlowStoreMessageSubjects.DELETE_FLOW_RULE,
+ SERIALIZER.encode(rule));
+
+ try {
+ ClusterMessageResponse response = clusterCommunicator.sendAndReceive(message, replicaInfo.master().get());
+ response.get(FLOW_RULE_STORE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
+ } catch (IOException | TimeoutException e) {
+ // FIXME: throw a FlowStoreException
+ throw new RuntimeException(e);
+ }
+ }
+
+ private synchronized void deleteFlowRuleInternal(FlowRule flowRule) {
+ StoredFlowEntry entry = getFlowEntryInternal(flowRule);
if (entry == null) {
return;
}
entry.setState(FlowEntryState.PENDING_REMOVE);
+ // TODO: also update backup.
}
@Override
- public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowEntry rule) {
+ public FlowRuleEvent addOrUpdateFlowRule(FlowEntry rule) {
+ ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(rule.deviceId());
+ if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) {
+ return addOrUpdateFlowRuleInternal(rule);
+ }
+
+ ClusterMessage message = new ClusterMessage(
+ clusterService.getLocalNode().id(),
+ FlowStoreMessageSubjects.ADD_OR_UPDATE_FLOW_RULE,
+ SERIALIZER.encode(rule));
+
+ try {
+ ClusterMessageResponse response = clusterCommunicator.sendAndReceive(message, replicaInfo.master().get());
+ return SERIALIZER.decode(response.get(FLOW_RULE_STORE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS));
+ } catch (IOException | TimeoutException e) {
+ // FIXME: throw a FlowStoreException
+ throw new RuntimeException(e);
+ }
+ }
+
+ private synchronized FlowRuleEvent addOrUpdateFlowRuleInternal(FlowEntry rule) {
DeviceId did = rule.deviceId();
// check if this new rule is an update to an existing entry
- FlowEntry stored = getFlowEntry(rule);
+ StoredFlowEntry stored = getFlowEntryInternal(rule);
if (stored != null) {
stored.setBytes(rule.bytes());
stored.setLife(rule.life());
@@ -126,17 +232,42 @@
return new FlowRuleEvent(Type.RULE_UPDATED, rule);
}
- flowEntries.put(did, rule);
+ // TODO: Confirm if this behavior is correct. See SimpleFlowRuleStore
+ flowEntries.put(did, new DefaultFlowEntry(rule));
return null;
+
+ // TODO: also update backup.
}
@Override
- public synchronized FlowRuleEvent removeFlowRule(FlowEntry rule) {
+ public FlowRuleEvent removeFlowRule(FlowEntry rule) {
+ ReplicaInfo replicaInfo = replicaInfoManager.getReplicaInfoFor(rule.deviceId());
+ if (replicaInfo.master().get().equals(clusterService.getLocalNode().id())) {
+ // bypass and handle it locally
+ return removeFlowRuleInternal(rule);
+ }
+
+ ClusterMessage message = new ClusterMessage(
+ clusterService.getLocalNode().id(),
+ FlowStoreMessageSubjects.REMOVE_FLOW_RULE,
+ SERIALIZER.encode(rule));
+
+ try {
+ ClusterMessageResponse response = clusterCommunicator.sendAndReceive(message, replicaInfo.master().get());
+ return SERIALIZER.decode(response.get(FLOW_RULE_STORE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS));
+ } catch (IOException | TimeoutException e) {
+ // FIXME: throw a FlowStoreException
+ throw new RuntimeException(e);
+ }
+ }
+
+ private synchronized FlowRuleEvent removeFlowRuleInternal(FlowEntry rule) {
// This is where one could mark a rule as removed and still keep it in the store.
if (flowEntries.remove(rule.deviceId(), rule)) {
return new FlowRuleEvent(RULE_REMOVED, rule);
} else {
return null;
}
+ // TODO: also update backup.
}
}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/FlowStoreMessageSubjects.java b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/FlowStoreMessageSubjects.java
new file mode 100644
index 0000000..a43dad6
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/FlowStoreMessageSubjects.java
@@ -0,0 +1,15 @@
+package org.onlab.onos.store.flow.impl;
+
+import org.onlab.onos.store.cluster.messaging.MessageSubject;
+
+/**
+ * MessageSubjects used by DistributedFlowRuleStore peer-peer communication.
+ */
+public final class FlowStoreMessageSubjects {
+ private FlowStoreMessageSubjects() {}
+ public static final MessageSubject STORE_FLOW_RULE = new MessageSubject("peer-forward-store-flow-rule");
+ public static final MessageSubject DELETE_FLOW_RULE = new MessageSubject("peer-forward-delete-flow-rule");
+ public static final MessageSubject ADD_OR_UPDATE_FLOW_RULE =
+ new MessageSubject("peer-forward-add-or-update-flow-rule");
+ public static final MessageSubject REMOVE_FLOW_RULE = new MessageSubject("peer-forward-remove-flow-rule");
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/ReplicaInfoManager.java b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/ReplicaInfoManager.java
new file mode 100644
index 0000000..dcbdb4a
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/ReplicaInfoManager.java
@@ -0,0 +1,93 @@
+package org.onlab.onos.store.flow.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.slf4j.LoggerFactory.getLogger;
+import static org.onlab.onos.store.flow.ReplicaInfoEvent.Type.MASTER_CHANGED;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.apache.felix.scr.annotations.Service;
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.onos.event.AbstractListenerRegistry;
+import org.onlab.onos.event.EventDeliveryService;
+import org.onlab.onos.mastership.MastershipEvent;
+import org.onlab.onos.mastership.MastershipListener;
+import org.onlab.onos.mastership.MastershipService;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.store.flow.ReplicaInfo;
+import org.onlab.onos.store.flow.ReplicaInfoEvent;
+import org.onlab.onos.store.flow.ReplicaInfoEventListener;
+import org.onlab.onos.store.flow.ReplicaInfoService;
+import org.slf4j.Logger;
+
+/**
+ * Manages replica placement information.
+ */
+@Component(immediate = true)
+@Service
+public class ReplicaInfoManager implements ReplicaInfoService {
+
+ private final Logger log = getLogger(getClass());
+
+ private final MastershipListener mastershipListener = new InternalMastershipListener();
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected EventDeliveryService eventDispatcher;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService mastershipService;
+
+ protected final AbstractListenerRegistry<ReplicaInfoEvent, ReplicaInfoEventListener>
+ listenerRegistry = new AbstractListenerRegistry<>();
+
+ @Activate
+ public void activate() {
+ eventDispatcher.addSink(ReplicaInfoEvent.class, listenerRegistry);
+ mastershipService.addListener(mastershipListener);
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ eventDispatcher.removeSink(ReplicaInfoEvent.class);
+ mastershipService.removeListener(mastershipListener);
+ log.info("Stopped");
+ }
+
+ @Override
+ public ReplicaInfo getReplicaInfoFor(DeviceId deviceId) {
+ // TODO: populate backup List when we reach the point we need them.
+ return new ReplicaInfo(mastershipService.getMasterFor(deviceId),
+ Collections.<NodeId>emptyList());
+ }
+
+ @Override
+ public void addListener(ReplicaInfoEventListener listener) {
+ listenerRegistry.addListener(checkNotNull(listener));
+ }
+
+ @Override
+ public void removeListener(ReplicaInfoEventListener listener) {
+ listenerRegistry.removeListener(checkNotNull(listener));
+ }
+
+ final class InternalMastershipListener implements MastershipListener {
+
+ @Override
+ public void event(MastershipEvent event) {
+ // TODO: distinguish stby list update, when MastershipService,
+ // start publishing them
+ final List<NodeId> standbyList = Collections.<NodeId>emptyList();
+ eventDispatcher.post(new ReplicaInfoEvent(MASTER_CHANGED,
+ event.subject(),
+ new ReplicaInfo(event.master(), standbyList)));
+ }
+ }
+
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/package-info.java b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/package-info.java
new file mode 100644
index 0000000..cedf68e
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/flow/impl/package-info.java
@@ -0,0 +1,5 @@
+/**
+ * Implementation of the distributed flow rule store using p2p synchronization
+ * protocol.
+ */
+package org.onlab.onos.store.flow.impl;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/GossipHostStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/GossipHostStore.java
index 39bc770..e3d8fe0 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/GossipHostStore.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/GossipHostStore.java
@@ -1,10 +1,13 @@
package org.onlab.onos.store.host.impl;
+import com.google.common.collect.FluentIterable;
import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -12,6 +15,8 @@
import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.cluster.ClusterService;
+import org.onlab.onos.cluster.ControllerNode;
+import org.onlab.onos.cluster.NodeId;
import org.onlab.onos.net.Annotations;
import org.onlab.onos.net.ConnectPoint;
import org.onlab.onos.net.DefaultHost;
@@ -19,6 +24,7 @@
import org.onlab.onos.net.Host;
import org.onlab.onos.net.HostId;
import org.onlab.onos.net.HostLocation;
+import org.onlab.onos.net.host.DefaultHostDescription;
import org.onlab.onos.net.host.HostClockService;
import org.onlab.onos.net.host.HostDescription;
import org.onlab.onos.net.host.HostEvent;
@@ -32,22 +38,29 @@
import org.onlab.onos.store.cluster.messaging.ClusterMessage;
import org.onlab.onos.store.cluster.messaging.ClusterMessageHandler;
import org.onlab.onos.store.cluster.messaging.MessageSubject;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
import org.onlab.onos.store.serializers.DistributedStoreSerializers;
import org.onlab.onos.store.serializers.KryoSerializer;
import org.onlab.packet.IpPrefix;
import org.onlab.packet.MacAddress;
import org.onlab.packet.VlanId;
-import org.onlab.util.KryoPool;
+import org.onlab.util.KryoNamespace;
import org.slf4j.Logger;
import java.io.IOException;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
+import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor;
+import static org.onlab.onos.cluster.ControllerNodeToNodeId.toNodeId;
import static org.onlab.onos.net.host.HostEvent.Type.*;
+import static org.onlab.util.Tools.namedThreads;
import static org.slf4j.LoggerFactory.getLogger;
//TODO: multi-provider, annotation not supported.
@@ -86,26 +99,60 @@
private static final KryoSerializer SERIALIZER = new KryoSerializer() {
@Override
protected void setupKryoPool() {
- serializerPool = KryoPool.newBuilder()
+ serializerPool = KryoNamespace.newBuilder()
.register(DistributedStoreSerializers.COMMON)
+ .register(InternalHostEvent.class)
.register(InternalHostRemovedEvent.class)
+ .register(HostFragmentId.class)
+ .register(HostAntiEntropyAdvertisement.class)
.build()
.populate(1);
}
};
+ private ScheduledExecutorService executor;
+
@Activate
public void activate() {
clusterCommunicator.addSubscriber(
- GossipHostStoreMessageSubjects.HOST_UPDATED, new InternalHostEventListener());
+ GossipHostStoreMessageSubjects.HOST_UPDATED,
+ new InternalHostEventListener());
clusterCommunicator.addSubscriber(
- GossipHostStoreMessageSubjects.HOST_REMOVED, new InternalHostRemovedEventListener());
+ GossipHostStoreMessageSubjects.HOST_REMOVED,
+ new InternalHostRemovedEventListener());
+ clusterCommunicator.addSubscriber(
+ GossipHostStoreMessageSubjects.HOST_ANTI_ENTROPY_ADVERTISEMENT,
+ new InternalHostAntiEntropyAdvertisementListener());
+
+ executor =
+ newSingleThreadScheduledExecutor(namedThreads("link-anti-entropy-%d"));
+
+ // TODO: Make these configurable
+ long initialDelaySec = 5;
+ long periodSec = 5;
+ // start anti-entropy thread
+ executor.scheduleAtFixedRate(new SendAdvertisementTask(),
+ initialDelaySec, periodSec, TimeUnit.SECONDS);
log.info("Started");
}
@Deactivate
public void deactivate() {
+ executor.shutdownNow();
+ try {
+ if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
+ log.error("Timeout during executor shutdown");
+ }
+ } catch (InterruptedException e) {
+ log.error("Error during executor shutdown", e);
+ }
+
+ hosts.clear();
+ removedHosts.clear();
+ locations.clear();
+ portAddresses.clear();
+
log.info("Stopped");
}
@@ -153,7 +200,7 @@
descr.hwAddress(),
descr.vlan(),
new Timestamped<>(descr.location(), timestamp),
- ImmutableSet.of(descr.ipAddress()));
+ ImmutableSet.copyOf(descr.ipAddress()));
hosts.put(hostId, newhost);
locations.put(descr.location(), newhost);
return new HostEvent(HOST_ADDED, newhost);
@@ -169,12 +216,12 @@
return new HostEvent(HOST_MOVED, host);
}
- if (host.ipAddresses().contains(descr.ipAddress())) {
+ if (host.ipAddresses().containsAll(descr.ipAddress())) {
return null;
}
Set<IpPrefix> addresses = new HashSet<>(host.ipAddresses());
- addresses.add(descr.ipAddress());
+ addresses.addAll(descr.ipAddress());
StoredHost updated = new StoredHost(providerId, host.id(),
host.mac(), host.vlan(),
host.location, addresses);
@@ -381,6 +428,10 @@
public HostLocation location() {
return location.value();
}
+
+ public Timestamp timestamp() {
+ return location.timestamp();
+ }
}
private void notifyPeers(InternalHostRemovedEvent event) throws IOException {
@@ -399,6 +450,16 @@
clusterCommunicator.broadcast(message);
}
+ private void unicastMessage(NodeId peer,
+ MessageSubject subject,
+ Object event) throws IOException {
+ ClusterMessage message = new ClusterMessage(
+ clusterService.getLocalNode().id(),
+ subject,
+ SERIALIZER.encode(event));
+ clusterCommunicator.unicast(message, peer);
+ }
+
private void notifyDelegateIfNotNull(HostEvent event) {
if (event != null) {
notifyDelegate(event);
@@ -434,4 +495,165 @@
notifyDelegateIfNotNull(removeHostInternal(hostId, timestamp));
}
}
+
+ private final class SendAdvertisementTask implements Runnable {
+
+ @Override
+ public void run() {
+ if (Thread.currentThread().isInterrupted()) {
+ log.info("Interrupted, quitting");
+ return;
+ }
+
+ try {
+ final NodeId self = clusterService.getLocalNode().id();
+ Set<ControllerNode> nodes = clusterService.getNodes();
+
+ ImmutableList<NodeId> nodeIds = FluentIterable.from(nodes)
+ .transform(toNodeId())
+ .toList();
+
+ if (nodeIds.size() == 1 && nodeIds.get(0).equals(self)) {
+ log.debug("No other peers in the cluster.");
+ return;
+ }
+
+ NodeId peer;
+ do {
+ int idx = RandomUtils.nextInt(0, nodeIds.size());
+ peer = nodeIds.get(idx);
+ } while (peer.equals(self));
+
+ HostAntiEntropyAdvertisement ad = createAdvertisement();
+
+ if (Thread.currentThread().isInterrupted()) {
+ log.info("Interrupted, quitting");
+ return;
+ }
+
+ try {
+ unicastMessage(peer, GossipHostStoreMessageSubjects.HOST_ANTI_ENTROPY_ADVERTISEMENT, ad);
+ } catch (IOException e) {
+ log.debug("Failed to send anti-entropy advertisement to {}", peer);
+ return;
+ }
+ } catch (Exception e) {
+ // catch all Exception to avoid Scheduled task being suppressed.
+ log.error("Exception thrown while sending advertisement", e);
+ }
+ }
+ }
+
+ private HostAntiEntropyAdvertisement createAdvertisement() {
+ final NodeId self = clusterService.getLocalNode().id();
+
+ Map<HostFragmentId, Timestamp> timestamps = new HashMap<>(hosts.size());
+ Map<HostId, Timestamp> tombstones = new HashMap<>(removedHosts.size());
+
+ for (Entry<HostId, StoredHost> e : hosts.entrySet()) {
+
+ final HostId hostId = e.getKey();
+ final StoredHost hostInfo = e.getValue();
+ final ProviderId providerId = hostInfo.providerId();
+ timestamps.put(new HostFragmentId(hostId, providerId), hostInfo.timestamp());
+ }
+
+ for (Entry<HostId, Timestamped<Host>> e : removedHosts.entrySet()) {
+ tombstones.put(e.getKey(), e.getValue().timestamp());
+ }
+
+ return new HostAntiEntropyAdvertisement(self, timestamps, tombstones);
+ }
+
+ private synchronized void handleAntiEntropyAdvertisement(HostAntiEntropyAdvertisement ad) {
+
+ final NodeId sender = ad.sender();
+
+ for (Entry<HostId, StoredHost> host : hosts.entrySet()) {
+ // for each locally live Hosts...
+ final HostId hostId = host.getKey();
+ final StoredHost localHost = host.getValue();
+ final ProviderId providerId = localHost.providerId();
+ final HostFragmentId hostFragId = new HostFragmentId(hostId, providerId);
+ final Timestamp localLiveTimestamp = localHost.timestamp();
+
+ Timestamp remoteTimestamp = ad.timestamps().get(hostFragId);
+ if (remoteTimestamp == null) {
+ remoteTimestamp = ad.tombstones().get(hostId);
+ }
+ if (remoteTimestamp == null ||
+ localLiveTimestamp.compareTo(remoteTimestamp) > 0) {
+
+ // local is more recent, push
+ // TODO: annotation is lost
+ final HostDescription desc = new DefaultHostDescription(
+ localHost.mac(),
+ localHost.vlan(),
+ localHost.location(),
+ localHost.ipAddresses());
+ try {
+ unicastMessage(sender, GossipHostStoreMessageSubjects.HOST_UPDATED,
+ new InternalHostEvent(providerId, hostId, desc, localHost.timestamp()));
+ } catch (IOException e1) {
+ log.debug("Failed to send advertisement response", e1);
+ }
+ }
+
+ final Timestamp remoteDeadTimestamp = ad.tombstones().get(hostId);
+ if (remoteDeadTimestamp != null &&
+ remoteDeadTimestamp.compareTo(localLiveTimestamp) > 0) {
+ // sender has recent remove
+ notifyDelegateIfNotNull(removeHostInternal(hostId, remoteDeadTimestamp));
+ }
+ }
+
+ for (Entry<HostId, Timestamped<Host>> dead : removedHosts.entrySet()) {
+ // for each locally dead Hosts
+ final HostId hostId = dead.getKey();
+ final Timestamp localDeadTimestamp = dead.getValue().timestamp();
+
+ // TODO: pick proper ProviderId, when supporting multi-provider
+ final ProviderId providerId = dead.getValue().value().providerId();
+ final HostFragmentId hostFragId = new HostFragmentId(hostId, providerId);
+
+ final Timestamp remoteLiveTimestamp = ad.timestamps().get(hostFragId);
+ if (remoteLiveTimestamp != null &&
+ localDeadTimestamp.compareTo(remoteLiveTimestamp) > 0) {
+ // sender has zombie, push
+ try {
+ unicastMessage(sender, GossipHostStoreMessageSubjects.HOST_REMOVED,
+ new InternalHostRemovedEvent(hostId, localDeadTimestamp));
+ } catch (IOException e1) {
+ log.debug("Failed to send advertisement response", e1);
+ }
+ }
+ }
+
+
+ for (Entry<HostId, Timestamp> e : ad.tombstones().entrySet()) {
+ // for each remote tombstone advertisement...
+ final HostId hostId = e.getKey();
+ final Timestamp adRemoveTimestamp = e.getValue();
+
+ final StoredHost storedHost = hosts.get(hostId);
+ if (storedHost == null) {
+ continue;
+ }
+ if (adRemoveTimestamp.compareTo(storedHost.timestamp()) > 0) {
+ // sender has recent remove info, locally remove
+ notifyDelegateIfNotNull(removeHostInternal(hostId, adRemoveTimestamp));
+ }
+ }
+ }
+
+ private final class InternalHostAntiEntropyAdvertisementListener implements
+ ClusterMessageHandler {
+
+ @Override
+ public void handle(ClusterMessage message) {
+ log.debug("Received Host Anti-Entropy advertisement from peer: {}", message.sender());
+ HostAntiEntropyAdvertisement advertisement = SERIALIZER.decode(message.payload());
+ handleAntiEntropyAdvertisement(advertisement);
+ }
+ }
}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/GossipHostStoreMessageSubjects.java b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/GossipHostStoreMessageSubjects.java
index 27cf4ce..0a9f0e0 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/GossipHostStoreMessageSubjects.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/GossipHostStoreMessageSubjects.java
@@ -4,6 +4,11 @@
public final class GossipHostStoreMessageSubjects {
private GossipHostStoreMessageSubjects() {}
- public static final MessageSubject HOST_UPDATED = new MessageSubject("peer-host-updated");
- public static final MessageSubject HOST_REMOVED = new MessageSubject("peer-host-removed");
+
+ public static final MessageSubject HOST_UPDATED
+ = new MessageSubject("peer-host-updated");
+ public static final MessageSubject HOST_REMOVED
+ = new MessageSubject("peer-host-removed");
+ public static final MessageSubject HOST_ANTI_ENTROPY_ADVERTISEMENT
+ = new MessageSubject("host-enti-entropy-advertisement");;
}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/HostAntiEntropyAdvertisement.java b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/HostAntiEntropyAdvertisement.java
new file mode 100644
index 0000000..6139005
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/HostAntiEntropyAdvertisement.java
@@ -0,0 +1,48 @@
+package org.onlab.onos.store.host.impl;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Map;
+
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.onos.net.HostId;
+import org.onlab.onos.store.Timestamp;
+
+/**
+ * Host AE Advertisement message.
+ */
+public final class HostAntiEntropyAdvertisement {
+
+ private final NodeId sender;
+ private final Map<HostFragmentId, Timestamp> timestamps;
+ private final Map<HostId, Timestamp> tombstones;
+
+
+ public HostAntiEntropyAdvertisement(NodeId sender,
+ Map<HostFragmentId, Timestamp> timestamps,
+ Map<HostId, Timestamp> tombstones) {
+ this.sender = checkNotNull(sender);
+ this.timestamps = checkNotNull(timestamps);
+ this.tombstones = checkNotNull(tombstones);
+ }
+
+ public NodeId sender() {
+ return sender;
+ }
+
+ public Map<HostFragmentId, Timestamp> timestamps() {
+ return timestamps;
+ }
+
+ public Map<HostId, Timestamp> tombstones() {
+ return tombstones;
+ }
+
+ // For serializer
+ @SuppressWarnings("unused")
+ private HostAntiEntropyAdvertisement() {
+ this.sender = null;
+ this.timestamps = null;
+ this.tombstones = null;
+ }
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/HostFragmentId.java b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/HostFragmentId.java
new file mode 100644
index 0000000..34dbff6
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/HostFragmentId.java
@@ -0,0 +1,62 @@
+package org.onlab.onos.store.host.impl;
+
+import java.util.Objects;
+
+import org.onlab.onos.net.HostId;
+import org.onlab.onos.net.provider.ProviderId;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Identifier for HostDescription from a Provider.
+ */
+public final class HostFragmentId {
+ public final ProviderId providerId;
+ public final HostId hostId;
+
+ public HostFragmentId(HostId hostId, ProviderId providerId) {
+ this.providerId = providerId;
+ this.hostId = hostId;
+ }
+
+ public HostId hostId() {
+ return hostId;
+ }
+
+ public ProviderId providerId() {
+ return providerId;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(providerId, hostId);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof HostFragmentId)) {
+ return false;
+ }
+ HostFragmentId that = (HostFragmentId) obj;
+ return Objects.equals(this.hostId, that.hostId) &&
+ Objects.equals(this.providerId, that.providerId);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(getClass())
+ .add("providerId", providerId)
+ .add("hostId", hostId)
+ .toString();
+ }
+
+ // for serializer
+ @SuppressWarnings("unused")
+ private HostFragmentId() {
+ this.providerId = null;
+ this.hostId = null;
+ }
+}
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/package-info.java b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/package-info.java
new file mode 100644
index 0000000..a5f7727
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/host/impl/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Implementation of the distributed host store using p2p synchronization protocol.
+ */
+package org.onlab.onos.store.host.impl;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/common/impl/Timestamped.java b/core/store/dist/src/main/java/org/onlab/onos/store/impl/Timestamped.java
similarity index 88%
rename from core/store/dist/src/main/java/org/onlab/onos/store/common/impl/Timestamped.java
rename to core/store/dist/src/main/java/org/onlab/onos/store/impl/Timestamped.java
index 8d2aee1..ae79831 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/common/impl/Timestamped.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/impl/Timestamped.java
@@ -1,4 +1,4 @@
-package org.onlab.onos.store.common.impl;
+package org.onlab.onos.store.impl;
import static com.google.common.base.Preconditions.checkNotNull;
@@ -58,12 +58,12 @@
}
/**
- * Tests if this timestamp is newer thatn the specified timestamp.
- * @param timestamp to compare agains
+ * Tests if this timestamp is newer than the specified timestamp.
+ * @param other timestamp to compare against
* @return true if this instance is newer
*/
- public boolean isNewer(Timestamp timestamp) {
- return this.timestamp.compareTo(checkNotNull(timestamp)) > 0;
+ public boolean isNewer(Timestamp other) {
+ return this.timestamp.compareTo(checkNotNull(other)) > 0;
}
@Override
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/GossipLinkStore.java b/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/GossipLinkStore.java
index a6c1660..312d072 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/GossipLinkStore.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/GossipLinkStore.java
@@ -1,7 +1,6 @@
package org.onlab.onos.store.link.impl;
import com.google.common.base.Function;
-import com.google.common.base.Predicate;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableList;
@@ -27,7 +26,6 @@
import org.onlab.onos.net.SparseAnnotations;
import org.onlab.onos.net.Link.Type;
import org.onlab.onos.net.LinkKey;
-import org.onlab.onos.net.Provided;
import org.onlab.onos.net.device.DeviceClockService;
import org.onlab.onos.net.link.DefaultLinkDescription;
import org.onlab.onos.net.link.LinkDescription;
@@ -41,10 +39,10 @@
import org.onlab.onos.store.cluster.messaging.ClusterMessage;
import org.onlab.onos.store.cluster.messaging.ClusterMessageHandler;
import org.onlab.onos.store.cluster.messaging.MessageSubject;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
import org.onlab.onos.store.serializers.DistributedStoreSerializers;
import org.onlab.onos.store.serializers.KryoSerializer;
-import org.onlab.util.KryoPool;
+import org.onlab.util.KryoNamespace;
import org.slf4j.Logger;
import java.io.IOException;
@@ -70,7 +68,9 @@
import static org.onlab.util.Tools.namedThreads;
import static org.slf4j.LoggerFactory.getLogger;
import static com.google.common.collect.Multimaps.synchronizedSetMultimap;
+import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Predicates.notNull;
+import static org.onlab.onos.store.link.impl.GossipLinkStoreMessageSubjects.LINK_ANTI_ENTROPY_ADVERTISEMENT;
/**
* Manages inventory of infrastructure links in distributed data store
@@ -110,7 +110,7 @@
private static final KryoSerializer SERIALIZER = new KryoSerializer() {
@Override
protected void setupKryoPool() {
- serializerPool = KryoPool.newBuilder()
+ serializerPool = KryoNamespace.newBuilder()
.register(DistributedStoreSerializers.COMMON)
.register(InternalLinkEvent.class)
.register(InternalLinkRemovedEvent.class)
@@ -239,9 +239,9 @@
LinkKey key = linkKey(linkDescription.src(), linkDescription.dst());
final LinkEvent event;
final Timestamped<LinkDescription> mergedDesc;
- synchronized (getLinkDescriptions(key)) {
+ synchronized (getOrCreateLinkDescriptions(key)) {
event = createOrUpdateLinkInternal(providerId, deltaDesc);
- mergedDesc = getLinkDescriptions(key).get(providerId);
+ mergedDesc = getOrCreateLinkDescriptions(key).get(providerId);
}
if (event != null) {
@@ -265,7 +265,7 @@
LinkKey key = linkKey(linkDescription.value().src(),
linkDescription.value().dst());
- Map<ProviderId, Timestamped<LinkDescription>> descs = getLinkDescriptions(key);
+ Map<ProviderId, Timestamped<LinkDescription>> descs = getOrCreateLinkDescriptions(key);
synchronized (descs) {
// if the link was previously removed, we should proceed if and
@@ -296,7 +296,7 @@
ProviderId providerId,
Timestamped<LinkDescription> linkDescription) {
- // merge existing attributes and merge
+ // merge existing annotations
Timestamped<LinkDescription> existingLinkDescription = descs.get(providerId);
if (existingLinkDescription != null && existingLinkDescription.isNewer(linkDescription)) {
return null;
@@ -360,7 +360,14 @@
final LinkKey key = linkKey(src, dst);
DeviceId dstDeviceId = dst.deviceId();
- Timestamp timestamp = deviceClockService.getTimestamp(dstDeviceId);
+ Timestamp timestamp = null;
+ try {
+ timestamp = deviceClockService.getTimestamp(dstDeviceId);
+ } catch (IllegalStateException e) {
+ //there are times when this is called before mastership
+ // handoff correctly completes.
+ return null;
+ }
LinkEvent event = removeLinkInternal(key, timestamp);
@@ -377,14 +384,54 @@
return event;
}
- private LinkEvent removeLinkInternal(LinkKey key, Timestamp timestamp) {
- Map<ProviderId, Timestamped<LinkDescription>> linkDescriptions =
- getLinkDescriptions(key);
+ private static Timestamped<LinkDescription> getPrimaryDescription(
+ Map<ProviderId, Timestamped<LinkDescription>> linkDescriptions) {
+
synchronized (linkDescriptions) {
+ for (Entry<ProviderId, Timestamped<LinkDescription>>
+ e : linkDescriptions.entrySet()) {
+
+ if (!e.getKey().isAncillary()) {
+ return e.getValue();
+ }
+ }
+ }
+ return null;
+ }
+
+
+ // TODO: consider slicing out as Timestamp utils
+ /**
+ * Checks is timestamp is more recent than timestamped object.
+ *
+ * @param timestamp to check if this is more recent then other
+ * @param timestamped object to be tested against
+ * @return true if {@code timestamp} is more recent than {@code timestamped}
+ * or {@code timestamped is null}
+ */
+ private static boolean isMoreRecent(Timestamp timestamp, Timestamped<?> timestamped) {
+ checkNotNull(timestamp);
+ if (timestamped == null) {
+ return true;
+ }
+ return timestamp.compareTo(timestamped.timestamp()) > 0;
+ }
+
+ private LinkEvent removeLinkInternal(LinkKey key, Timestamp timestamp) {
+ Map<ProviderId, Timestamped<LinkDescription>> linkDescriptions
+ = getOrCreateLinkDescriptions(key);
+
+ synchronized (linkDescriptions) {
+ if (linkDescriptions.isEmpty()) {
+ // never seen such link before. keeping timestamp for record
+ removedLinks.put(key, timestamp);
+ return null;
+ }
// accept removal request if given timestamp is newer than
// the latest Timestamp from Primary provider
- ProviderId primaryProviderId = pickPrimaryProviderId(linkDescriptions);
- if (linkDescriptions.get(primaryProviderId).isNewer(timestamp)) {
+ Timestamped<LinkDescription> prim = getPrimaryDescription(linkDescriptions);
+ if (!isMoreRecent(timestamp, prim)) {
+ // outdated remove request, ignore
return null;
}
removedLinks.put(key, timestamp);
@@ -406,12 +453,13 @@
/**
* @return primary ProviderID, or randomly chosen one if none exists
*/
- private ProviderId pickPrimaryProviderId(
+ private static ProviderId pickBaseProviderId(
Map<ProviderId, Timestamped<LinkDescription>> linkDescriptions) {
ProviderId fallBackPrimary = null;
for (Entry<ProviderId, Timestamped<LinkDescription>> e : linkDescriptions.entrySet()) {
if (!e.getKey().isAncillary()) {
+ // found primary
return e.getKey();
} else if (fallBackPrimary == null) {
// pick randomly as a fallback in case there is no primary
@@ -421,9 +469,10 @@
return fallBackPrimary;
}
+ // Guarded by linkDescs value (=locking each Link)
private Link composeLink(Map<ProviderId, Timestamped<LinkDescription>> descs) {
- ProviderId primaryProviderId = pickPrimaryProviderId(descs);
- Timestamped<LinkDescription> base = descs.get(primaryProviderId);
+ ProviderId baseProviderId = pickBaseProviderId(descs);
+ Timestamped<LinkDescription> base = descs.get(baseProviderId);
ConnectPoint src = base.value().src();
ConnectPoint dst = base.value().dst();
@@ -432,7 +481,7 @@
annotations = merge(annotations, base.value().annotations());
for (Entry<ProviderId, Timestamped<LinkDescription>> e : descs.entrySet()) {
- if (primaryProviderId.equals(e.getKey())) {
+ if (baseProviderId.equals(e.getKey())) {
continue;
}
@@ -445,10 +494,10 @@
annotations = merge(annotations, e.getValue().value().annotations());
}
- return new DefaultLink(primaryProviderId , src, dst, type, annotations);
+ return new DefaultLink(baseProviderId, src, dst, type, annotations);
}
- private Map<ProviderId, Timestamped<LinkDescription>> getLinkDescriptions(LinkKey key) {
+ private Map<ProviderId, Timestamped<LinkDescription>> getOrCreateLinkDescriptions(LinkKey key) {
Map<ProviderId, Timestamped<LinkDescription>> r;
r = linkDescs.get(key);
if (r != null) {
@@ -464,11 +513,11 @@
}
}
- private Timestamped<LinkDescription> getLinkDescription(LinkKey key, ProviderId providerId) {
- return getLinkDescriptions(key).get(providerId);
- }
-
private final Function<LinkKey, Link> lookupLink = new LookupLink();
+ /**
+ * Returns a Function to lookup Link instance using LinkKey from cache.
+ * @return
+ */
private Function<LinkKey, Link> lookupLink() {
return lookupLink;
}
@@ -476,20 +525,11 @@
private final class LookupLink implements Function<LinkKey, Link> {
@Override
public Link apply(LinkKey input) {
- return links.get(input);
- }
- }
-
- private static final class IsPrimary implements Predicate<Provided> {
-
- private static final Predicate<Provided> IS_PRIMARY = new IsPrimary();
- public static final Predicate<Provided> isPrimary() {
- return IS_PRIMARY;
- }
-
- @Override
- public boolean apply(Provided input) {
- return !input.providerId().isAncillary();
+ if (input == null) {
+ return null;
+ } else {
+ return links.get(input);
+ }
}
}
@@ -499,7 +539,6 @@
}
}
- // TODO: should we be throwing exception?
private void broadcastMessage(MessageSubject subject, Object event) throws IOException {
ClusterMessage message = new ClusterMessage(
clusterService.getLocalNode().id(),
@@ -508,17 +547,12 @@
clusterCommunicator.broadcast(message);
}
- // TODO: should we be throwing exception?
- private void unicastMessage(NodeId recipient, MessageSubject subject, Object event) {
- try {
- ClusterMessage message = new ClusterMessage(
- clusterService.getLocalNode().id(),
- subject,
- SERIALIZER.encode(event));
- clusterCommunicator.unicast(message, recipient);
- } catch (IOException e) {
- log.error("Failed to send a {} message to {}", subject.value(), recipient);
- }
+ private void unicastMessage(NodeId recipient, MessageSubject subject, Object event) throws IOException {
+ ClusterMessage message = new ClusterMessage(
+ clusterService.getLocalNode().id(),
+ subject,
+ SERIALIZER.encode(event));
+ clusterCommunicator.unicast(message, recipient);
}
private void notifyPeers(InternalLinkEvent event) throws IOException {
@@ -529,12 +563,22 @@
broadcastMessage(GossipLinkStoreMessageSubjects.LINK_REMOVED, event);
}
+ // notify peer, silently ignoring error
private void notifyPeer(NodeId peer, InternalLinkEvent event) {
- unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_UPDATE, event);
+ try {
+ unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_UPDATE, event);
+ } catch (IOException e) {
+ log.debug("Failed to notify peer {} with message {}", peer, event);
+ }
}
+ // notify peer, silently ignoring error
private void notifyPeer(NodeId peer, InternalLinkRemovedEvent event) {
- unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_REMOVED, event);
+ try {
+ unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_REMOVED, event);
+ } catch (IOException e) {
+ log.debug("Failed to notify peer {} with message {}", peer, event);
+ }
}
private final class SendAdvertisementTask implements Runnable {
@@ -573,9 +617,9 @@
}
try {
- unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_ANTI_ENTROPY_ADVERTISEMENT, ad);
- } catch (Exception e) {
- log.error("Failed to send anti-entropy advertisement", e);
+ unicastMessage(peer, LINK_ANTI_ENTROPY_ADVERTISEMENT, ad);
+ } catch (IOException e) {
+ log.debug("Failed to send anti-entropy advertisement to {}", peer);
return;
}
} catch (Exception e) {
@@ -608,42 +652,75 @@
return new LinkAntiEntropyAdvertisement(self, linkTimestamps, linkTombstones);
}
- private void handleAntiEntropyAdvertisement(LinkAntiEntropyAdvertisement advertisement) {
+ private void handleAntiEntropyAdvertisement(LinkAntiEntropyAdvertisement ad) {
- NodeId peer = advertisement.sender();
+ final NodeId sender = ad.sender();
+ boolean localOutdated = false;
- Map<LinkFragmentId, Timestamp> linkTimestamps = advertisement.linkTimestamps();
- Map<LinkKey, Timestamp> linkTombstones = advertisement.linkTombstones();
- for (Map.Entry<LinkFragmentId, Timestamp> entry : linkTimestamps.entrySet()) {
- LinkFragmentId linkFragmentId = entry.getKey();
- Timestamp peerTimestamp = entry.getValue();
+ for (Entry<LinkKey, Map<ProviderId, Timestamped<LinkDescription>>>
+ l : linkDescs.entrySet()) {
- LinkKey key = linkFragmentId.linkKey();
- ProviderId providerId = linkFragmentId.providerId();
+ final LinkKey key = l.getKey();
+ final Map<ProviderId, Timestamped<LinkDescription>> link = l.getValue();
+ synchronized (link) {
+ Timestamp localLatest = removedLinks.get(key);
- Timestamped<LinkDescription> linkDescription = getLinkDescription(key, providerId);
- if (linkDescription.isNewer(peerTimestamp)) {
- // I have more recent link description. update peer.
- notifyPeer(peer, new InternalLinkEvent(providerId, linkDescription));
- }
- // else TODO: Peer has more recent link description. request it.
+ for (Entry<ProviderId, Timestamped<LinkDescription>> p : link.entrySet()) {
+ final ProviderId providerId = p.getKey();
+ final Timestamped<LinkDescription> pDesc = p.getValue();
- Timestamp linkRemovedTimestamp = removedLinks.get(key);
- if (linkRemovedTimestamp != null && linkRemovedTimestamp.compareTo(peerTimestamp) > 0) {
- // peer has a zombie link. update peer.
- notifyPeer(peer, new InternalLinkRemovedEvent(key, linkRemovedTimestamp));
+ final LinkFragmentId fragId = new LinkFragmentId(key, providerId);
+ // remote
+ Timestamp remoteTimestamp = ad.linkTimestamps().get(fragId);
+ if (remoteTimestamp == null) {
+ remoteTimestamp = ad.linkTombstones().get(key);
+ }
+ if (remoteTimestamp == null ||
+ pDesc.isNewer(remoteTimestamp)) {
+ // I have more recent link description. update peer.
+ notifyPeer(sender, new InternalLinkEvent(providerId, pDesc));
+ } else {
+ final Timestamp remoteLive = ad.linkTimestamps().get(fragId);
+ if (remoteLive != null &&
+ remoteLive.compareTo(pDesc.timestamp()) > 0) {
+ // I have something outdated
+ localOutdated = true;
+ }
+ }
+
+ // search local latest along the way
+ if (localLatest == null ||
+ pDesc.isNewer(localLatest)) {
+ localLatest = pDesc.timestamp();
+ }
+ }
+ // Tests if remote remove is more recent then local latest.
+ final Timestamp remoteRemove = ad.linkTombstones().get(key);
+ if (remoteRemove != null) {
+ if (localLatest != null &&
+ localLatest.compareTo(remoteRemove) < 0) {
+ // remote remove is more recent
+ notifyDelegateIfNotNull(removeLinkInternal(key, remoteRemove));
+ }
+ }
}
}
- for (Map.Entry<LinkKey, Timestamp> entry : linkTombstones.entrySet()) {
- LinkKey key = entry.getKey();
- Timestamp peerTimestamp = entry.getValue();
+ // populate remove info if not known locally
+ for (Entry<LinkKey, Timestamp> remoteRm : ad.linkTombstones().entrySet()) {
+ final LinkKey key = remoteRm.getKey();
+ final Timestamp remoteRemove = remoteRm.getValue();
+ // relying on removeLinkInternal to ignore stale info
+ notifyDelegateIfNotNull(removeLinkInternal(key, remoteRemove));
+ }
- ProviderId primaryProviderId = pickPrimaryProviderId(getLinkDescriptions(key));
- if (primaryProviderId != null) {
- if (!getLinkDescription(key, primaryProviderId).isNewer(peerTimestamp)) {
- notifyDelegateIfNotNull(removeLinkInternal(key, peerTimestamp));
- }
+ if (localOutdated) {
+ // send back advertisement to speed up convergence
+ try {
+ unicastMessage(sender, LINK_ANTI_ENTROPY_ADVERTISEMENT,
+ createAdvertisement());
+ } catch (IOException e) {
+ log.debug("Failed to send back active advertisement");
}
}
}
@@ -652,7 +729,7 @@
@Override
public void handle(ClusterMessage message) {
- log.info("Received link event from peer: {}", message.sender());
+ log.trace("Received link event from peer: {}", message.sender());
InternalLinkEvent event = (InternalLinkEvent) SERIALIZER.decode(message.payload());
ProviderId providerId = event.providerId();
@@ -666,7 +743,7 @@
@Override
public void handle(ClusterMessage message) {
- log.info("Received link removed event from peer: {}", message.sender());
+ log.trace("Received link removed event from peer: {}", message.sender());
InternalLinkRemovedEvent event = (InternalLinkRemovedEvent) SERIALIZER.decode(message.payload());
LinkKey linkKey = event.linkKey();
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/InternalLinkEvent.java b/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/InternalLinkEvent.java
index 9bb3445..74694ba 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/InternalLinkEvent.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/InternalLinkEvent.java
@@ -4,7 +4,7 @@
import org.onlab.onos.net.link.LinkDescription;
import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.impl.Timestamped;
+import org.onlab.onos.store.impl.Timestamped;
/**
* Information published by GossipDeviceStore to notify peers of a device
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/package-info.java b/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/package-info.java
index c675f84..ab45e4f 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/package-info.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/link/impl/package-info.java
@@ -1,4 +1,4 @@
/**
- * Implementation of link store using distributed p2p synchronization protocol.
+ * Implementation of distributed link store using p2p synchronization protocol.
*/
package org.onlab.onos.store.link.impl;
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/serializers/DistributedStoreSerializers.java b/core/store/dist/src/main/java/org/onlab/onos/store/serializers/DistributedStoreSerializers.java
index a04539b..2f03c87 100644
--- a/core/store/dist/src/main/java/org/onlab/onos/store/serializers/DistributedStoreSerializers.java
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/serializers/DistributedStoreSerializers.java
@@ -1,17 +1,17 @@
package org.onlab.onos.store.serializers;
-import org.onlab.onos.store.common.impl.Timestamped;
import org.onlab.onos.store.impl.MastershipBasedTimestamp;
+import org.onlab.onos.store.impl.Timestamped;
import org.onlab.onos.store.impl.WallClockTimestamp;
-import org.onlab.util.KryoPool;
+import org.onlab.util.KryoNamespace;
public final class DistributedStoreSerializers {
/**
- * KryoPool which can serialize ON.lab misc classes.
+ * KryoNamespace which can serialize ON.lab misc classes.
*/
- public static final KryoPool COMMON = KryoPool.newBuilder()
- .register(KryoPoolUtil.API)
+ public static final KryoNamespace COMMON = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
.register(Timestamped.class)
.register(MastershipBasedTimestamp.class, new MastershipBasedTimestampSerializer())
.register(WallClockTimestamp.class)
diff --git a/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/package-info.java b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/package-info.java
new file mode 100644
index 0000000..ec719d0
--- /dev/null
+++ b/core/store/dist/src/main/java/org/onlab/onos/store/topology/impl/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Implementation of distributed topology store using p2p synchronization protocol.
+ */
+package org.onlab.onos.store.topology.impl;
diff --git a/core/store/dist/src/test/java/org/onlab/onos/store/device/impl/GossipDeviceStoreTest.java b/core/store/dist/src/test/java/org/onlab/onos/store/device/impl/GossipDeviceStoreTest.java
index 141f2b2..55fa1f4 100644
--- a/core/store/dist/src/test/java/org/onlab/onos/store/device/impl/GossipDeviceStoreTest.java
+++ b/core/store/dist/src/test/java/org/onlab/onos/store/device/impl/GossipDeviceStoreTest.java
@@ -53,6 +53,7 @@
import org.onlab.onos.store.cluster.messaging.ClusterMessage;
import org.onlab.onos.store.cluster.messaging.ClusterMessageHandler;
import org.onlab.onos.store.cluster.messaging.MessageSubject;
+import org.onlab.packet.ChassisId;
import org.onlab.packet.IpPrefix;
import com.google.common.collect.Iterables;
@@ -74,6 +75,7 @@
private static final String SW1 = "3.8.1";
private static final String SW2 = "3.9.5";
private static final String SN = "43311-12345";
+ private static final ChassisId CID = new ChassisId();
private static final PortNumber P1 = PortNumber.portNumber(1);
private static final PortNumber P2 = PortNumber.portNumber(2);
@@ -158,7 +160,7 @@
SparseAnnotations... annotations) {
DeviceDescription description =
new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR,
- HW, swVersion, SN, annotations);
+ HW, swVersion, SN, CID, annotations);
reset(clusterCommunicator);
try {
expect(clusterCommunicator.broadcast(anyObject(ClusterMessage.class)))
@@ -175,7 +177,7 @@
SparseAnnotations... annotations) {
DeviceDescription description =
new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR,
- HW, swVersion, SN, annotations);
+ HW, swVersion, SN, CID, annotations);
deviceStore.createOrUpdateDevice(PIDA, deviceId, description);
}
@@ -315,7 +317,7 @@
public final void testCreateOrUpdateDevice() throws IOException {
DeviceDescription description =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN);
+ HW, SW1, SN, CID);
Capture<ClusterMessage> bcast = new Capture<>();
resetCommunicatorExpectingSingleBroadcast(bcast);
@@ -328,7 +330,7 @@
DeviceDescription description2 =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN);
+ HW, SW2, SN, CID);
resetCommunicatorExpectingSingleBroadcast(bcast);
DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2);
assertEquals(DEVICE_UPDATED, event2.type());
@@ -346,7 +348,7 @@
// add
DeviceDescription description =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN, A2);
+ HW, SW1, SN, CID, A2);
Capture<ClusterMessage> bcast = new Capture<>();
resetCommunicatorExpectingSingleBroadcast(bcast);
@@ -362,7 +364,7 @@
// update from primary
DeviceDescription description2 =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN, A1);
+ HW, SW2, SN, CID, A1);
resetCommunicatorExpectingSingleBroadcast(bcast);
DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2);
@@ -392,7 +394,7 @@
// But, Ancillary annotations will be in effect
DeviceDescription description3 =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN, A2_2);
+ HW, SW1, SN, CID, A2_2);
resetCommunicatorExpectingSingleBroadcast(bcast);
DeviceEvent event3 = deviceStore.createOrUpdateDevice(PIDA, DID1, description3);
@@ -775,7 +777,7 @@
DeviceDescription description =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN);
+ HW, SW1, SN, CID);
deviceStore.setDelegate(checkAdd);
deviceStore.createOrUpdateDevice(PID, DID1, description);
assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
@@ -783,7 +785,7 @@
DeviceDescription description2 =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN);
+ HW, SW2, SN, CID);
deviceStore.unsetDelegate(checkAdd);
deviceStore.setDelegate(checkUpdate);
deviceStore.createOrUpdateDevice(PID, DID1, description2);
diff --git a/core/store/dist/src/test/java/org/onlab/onos/store/flow/impl/ReplicaInfoManagerTest.java b/core/store/dist/src/test/java/org/onlab/onos/store/flow/impl/ReplicaInfoManagerTest.java
new file mode 100644
index 0000000..105e37b
--- /dev/null
+++ b/core/store/dist/src/test/java/org/onlab/onos/store/flow/impl/ReplicaInfoManagerTest.java
@@ -0,0 +1,165 @@
+package org.onlab.onos.store.flow.impl;
+
+import static com.google.common.base.Preconditions.checkState;
+import static org.junit.Assert.*;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.onos.event.AbstractListenerRegistry;
+import org.onlab.onos.event.DefaultEventSinkRegistry;
+import org.onlab.onos.event.Event;
+import org.onlab.onos.event.EventDeliveryService;
+import org.onlab.onos.event.EventSink;
+import org.onlab.onos.mastership.MastershipEvent;
+import org.onlab.onos.mastership.MastershipEvent.Type;
+import org.onlab.onos.mastership.MastershipListener;
+import org.onlab.onos.mastership.MastershipService;
+import org.onlab.onos.mastership.MastershipServiceAdapter;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.store.flow.ReplicaInfo;
+import org.onlab.onos.store.flow.ReplicaInfoEvent;
+import org.onlab.onos.store.flow.ReplicaInfoEventListener;
+import org.onlab.onos.store.flow.ReplicaInfoService;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.Maps;
+
+public class ReplicaInfoManagerTest {
+
+
+ private static final DeviceId DID1 = DeviceId.deviceId("of:1");
+ private static final DeviceId DID2 = DeviceId.deviceId("of:2");
+ private static final NodeId NID1 = new NodeId("foo");
+
+ private ReplicaInfoManager mgr;
+ private ReplicaInfoService service;
+
+ private AbstractListenerRegistry<MastershipEvent, MastershipListener>
+ mastershipListenerRegistry;
+ private TestEventDispatcher eventDispatcher;
+
+
+ @Before
+ public void setUp() throws Exception {
+ mastershipListenerRegistry = new AbstractListenerRegistry<>();
+
+ mgr = new ReplicaInfoManager();
+ service = mgr;
+
+ eventDispatcher = new TestEventDispatcher();
+ mgr.eventDispatcher = eventDispatcher;
+ mgr.mastershipService = new TestMastershipService();
+
+ // register dummy mastership event source
+ mgr.eventDispatcher.addSink(MastershipEvent.class, mastershipListenerRegistry);
+
+ mgr.activate();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ mgr.deactivate();
+ }
+
+ @Test
+ public void testGetReplicaInfoFor() {
+ ReplicaInfo info1 = service.getReplicaInfoFor(DID1);
+ assertEquals(Optional.of(NID1), info1.master());
+ // backups are always empty for now
+ assertEquals(Collections.emptyList(), info1.backups());
+
+ ReplicaInfo info2 = service.getReplicaInfoFor(DID2);
+ assertEquals("There's no master", Optional.absent(), info2.master());
+ // backups are always empty for now
+ assertEquals(Collections.emptyList(), info2.backups());
+ }
+
+ @Test
+ public void testReplicaInfoEvent() throws InterruptedException {
+ final CountDownLatch latch = new CountDownLatch(1);
+ service.addListener(new MasterNodeCheck(latch, DID1, NID1));
+
+ // fake MastershipEvent
+ eventDispatcher.post(new MastershipEvent(Type.MASTER_CHANGED, DID1, NID1));
+
+ assertTrue(latch.await(1, TimeUnit.SECONDS));
+ }
+
+
+ private final class MasterNodeCheck implements ReplicaInfoEventListener {
+ private final CountDownLatch latch;
+ private Optional<NodeId> expectedMaster;
+ private DeviceId expectedDevice;
+
+
+ MasterNodeCheck(CountDownLatch latch, DeviceId did,
+ NodeId nid) {
+ this.latch = latch;
+ this.expectedMaster = Optional.fromNullable(nid);
+ this.expectedDevice = did;
+ }
+
+ @Override
+ public void event(ReplicaInfoEvent event) {
+ assertEquals(expectedDevice, event.subject());
+ assertEquals(expectedMaster, event.replicaInfo().master());
+ // backups are always empty for now
+ assertEquals(Collections.emptyList(), event.replicaInfo().backups());
+ latch.countDown();
+ }
+ }
+
+
+ private final class TestMastershipService
+ extends MastershipServiceAdapter
+ implements MastershipService {
+
+ private Map<DeviceId, NodeId> masters;
+
+ TestMastershipService() {
+ masters = Maps.newHashMap();
+ masters.put(DID1, NID1);
+ // DID2 has no master
+ }
+
+ @Override
+ public NodeId getMasterFor(DeviceId deviceId) {
+ return masters.get(deviceId);
+ }
+
+ @Override
+ public void addListener(MastershipListener listener) {
+ mastershipListenerRegistry.addListener(listener);
+ }
+
+ @Override
+ public void removeListener(MastershipListener listener) {
+ mastershipListenerRegistry.removeListener(listener);
+ }
+ }
+
+
+ // code clone
+ /**
+ * Implements event delivery system that delivers events synchronously, or
+ * in-line with the post method invocation.
+ */
+ private static class TestEventDispatcher extends DefaultEventSinkRegistry
+ implements EventDeliveryService {
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Override
+ public void post(Event event) {
+ EventSink sink = getSink(event.getClass());
+ checkState(sink != null, "No sink for event %s", event);
+ sink.process(event);
+ }
+ }
+}
diff --git a/core/store/dist/src/test/java/org/onlab/onos/store/impl/MastershipBasedTimestampTest.java b/core/store/dist/src/test/java/org/onlab/onos/store/impl/MastershipBasedTimestampTest.java
index b455989..ae36d78 100644
--- a/core/store/dist/src/test/java/org/onlab/onos/store/impl/MastershipBasedTimestampTest.java
+++ b/core/store/dist/src/test/java/org/onlab/onos/store/impl/MastershipBasedTimestampTest.java
@@ -7,7 +7,7 @@
import org.junit.Test;
import org.onlab.onos.store.Timestamp;
import org.onlab.onos.store.serializers.MastershipBasedTimestampSerializer;
-import org.onlab.util.KryoPool;
+import org.onlab.util.KryoNamespace;
import com.google.common.testing.EqualsTester;
@@ -63,7 +63,7 @@
@Test
public final void testKryoSerializable() {
final ByteBuffer buffer = ByteBuffer.allocate(1 * 1024 * 1024);
- final KryoPool kryos = KryoPool.newBuilder()
+ final KryoNamespace kryos = KryoNamespace.newBuilder()
.register(MastershipBasedTimestamp.class)
.build();
@@ -79,7 +79,7 @@
@Test
public final void testKryoSerializableWithHandcraftedSerializer() {
final ByteBuffer buffer = ByteBuffer.allocate(1 * 1024 * 1024);
- final KryoPool kryos = KryoPool.newBuilder()
+ final KryoNamespace kryos = KryoNamespace.newBuilder()
.register(MastershipBasedTimestamp.class, new MastershipBasedTimestampSerializer())
.build();
diff --git a/core/store/dist/src/test/java/org/onlab/onos/store/common/impl/TimestampedTest.java b/core/store/dist/src/test/java/org/onlab/onos/store/impl/TimestampedTest.java
similarity index 93%
rename from core/store/dist/src/test/java/org/onlab/onos/store/common/impl/TimestampedTest.java
rename to core/store/dist/src/test/java/org/onlab/onos/store/impl/TimestampedTest.java
index deb3e4d..23a97be 100644
--- a/core/store/dist/src/test/java/org/onlab/onos/store/common/impl/TimestampedTest.java
+++ b/core/store/dist/src/test/java/org/onlab/onos/store/impl/TimestampedTest.java
@@ -1,4 +1,4 @@
-package org.onlab.onos.store.common.impl;
+package org.onlab.onos.store.impl;
import static org.junit.Assert.*;
@@ -6,8 +6,7 @@
import org.junit.Test;
import org.onlab.onos.store.Timestamp;
-import org.onlab.onos.store.impl.MastershipBasedTimestamp;
-import org.onlab.util.KryoPool;
+import org.onlab.util.KryoNamespace;
import com.google.common.testing.EqualsTester;
@@ -78,7 +77,7 @@
@Test
public final void testKryoSerializable() {
final ByteBuffer buffer = ByteBuffer.allocate(1 * 1024 * 1024);
- final KryoPool kryos = KryoPool.newBuilder()
+ final KryoNamespace kryos = KryoNamespace.newBuilder()
.register(Timestamped.class,
MastershipBasedTimestamp.class)
.build();
diff --git a/core/store/dist/src/test/java/org/onlab/onos/store/impl/WallClockTimestampTest.java b/core/store/dist/src/test/java/org/onlab/onos/store/impl/WallClockTimestampTest.java
index 76e4be0..370c9f6 100644
--- a/core/store/dist/src/test/java/org/onlab/onos/store/impl/WallClockTimestampTest.java
+++ b/core/store/dist/src/test/java/org/onlab/onos/store/impl/WallClockTimestampTest.java
@@ -6,7 +6,7 @@
import org.junit.Test;
import org.onlab.onos.store.Timestamp;
-import org.onlab.util.KryoPool;
+import org.onlab.util.KryoNamespace;
import com.google.common.testing.EqualsTester;
@@ -30,7 +30,7 @@
public final void testKryoSerializable() {
WallClockTimestamp ts1 = new WallClockTimestamp();
final ByteBuffer buffer = ByteBuffer.allocate(1 * 1024 * 1024);
- final KryoPool kryos = KryoPool.newBuilder()
+ final KryoNamespace kryos = KryoNamespace.newBuilder()
.register(WallClockTimestamp.class)
.build();
diff --git a/core/store/hz/cluster/pom.xml b/core/store/hz/cluster/pom.xml
index d183aed..4ab347e 100644
--- a/core/store/hz/cluster/pom.xml
+++ b/core/store/hz/cluster/pom.xml
@@ -19,10 +19,6 @@
<dependencies>
<dependency>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.onlab.onos</groupId>
<artifactId>onos-core-serializers</artifactId>
<version>${project.version}</version>
</dependency>
@@ -38,23 +34,6 @@
<scope>test</scope>
<version>${project.version}</version>
</dependency>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.scr.annotations</artifactId>
- </dependency>
- <dependency>
- <groupId>com.hazelcast</groupId>
- <artifactId>hazelcast</artifactId>
- </dependency>
</dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-scr-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
</project>
diff --git a/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/DistributedMastershipStore.java b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/DistributedMastershipStore.java
index e073b63..15f80f8 100644
--- a/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/DistributedMastershipStore.java
+++ b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/DistributedMastershipStore.java
@@ -2,6 +2,9 @@
import static org.onlab.onos.mastership.MastershipEvent.Type.MASTER_CHANGED;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -20,11 +23,18 @@
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.MastershipRole;
import org.onlab.onos.store.common.AbstractHazelcastStore;
+import org.onlab.onos.store.common.SMap;
+import org.onlab.onos.store.serializers.KryoNamespaces;
+import org.onlab.onos.store.serializers.KryoSerializer;
+import org.onlab.util.KryoNamespace;
import com.google.common.collect.ImmutableSet;
-import com.hazelcast.core.ILock;
-import com.hazelcast.core.IMap;
-import com.hazelcast.core.MultiMap;
+import com.hazelcast.core.EntryEvent;
+import com.hazelcast.core.EntryListener;
+import com.hazelcast.core.IAtomicLong;
+import com.hazelcast.core.MapEvent;
+
+import static org.onlab.onos.net.MastershipRole.*;
/**
* Distributed implementation of the mastership store. The store is
@@ -36,36 +46,42 @@
extends AbstractHazelcastStore<MastershipEvent, MastershipStoreDelegate>
implements MastershipStore {
- //arbitrary lock name
- private static final String LOCK = "lock";
//initial term/TTL value
private static final Integer INIT = 0;
- //devices to masters
- protected IMap<byte[], byte[]> masters;
+ //device to node roles
+ protected SMap<DeviceId, RoleValue> roleMap;
//devices to terms
- protected IMap<byte[], Integer> terms;
+ protected SMap<DeviceId, Integer> terms;
+ //last-known cluster size, used for tie-breaking when partitioning occurs
+ protected IAtomicLong clusterSize;
- //re-election related, disjoint-set structures:
- //device-nodes multiset of available nodes
- protected MultiMap<byte[], byte[]> standbys;
- //device-nodes multiset for nodes that have given up on device
- protected MultiMap<byte[], byte[]> unusable;
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected ClusterService clusterService;
+ @SuppressWarnings({ "unchecked", "rawtypes" })
@Override
@Activate
public void activate() {
super.activate();
- masters = theInstance.getMap("masters");
- terms = theInstance.getMap("terms");
- standbys = theInstance.getMultiMap("backups");
- unusable = theInstance.getMultiMap("unusable");
+ this.serializer = new KryoSerializer() {
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
- masters.addEntryListener(new RemoteMasterShipEventHandler(), true);
+ .register(RoleValue.class, new RoleValueSerializer())
+ .build()
+ .populate(1);
+ }
+ };
+
+ roleMap = new SMap(theInstance.getMap("nodeRoles"), this.serializer);
+ terms = new SMap(theInstance.getMap("terms"), this.serializer);
+ clusterSize = theInstance.getAtomicLong("clustersize");
+ roleMap.addEntryListener((new RemoteMasterShipEventHandler()), true);
log.info("Started");
}
@@ -77,12 +93,9 @@
@Override
public MastershipRole getRole(NodeId nodeId, DeviceId deviceId) {
- byte[] did = serialize(deviceId);
- byte[] nid = serialize(nodeId);
-
- NodeId current = deserialize(masters.get(did));
+ NodeId current = getNode(MASTER, deviceId);
if (current == null) {
- if (standbys.containsEntry(did, nid)) {
+ if (isRole(STANDBY, nodeId, deviceId)) {
//was previously standby, or set to standby from master
return MastershipRole.STANDBY;
} else {
@@ -101,55 +114,79 @@
@Override
public MastershipEvent setMaster(NodeId nodeId, DeviceId deviceId) {
- byte [] did = serialize(deviceId);
- byte [] nid = serialize(nodeId);
- ILock lock = theInstance.getLock(LOCK);
- lock.lock();
+ MastershipRole role = getRole(nodeId, deviceId);
+ roleMap.lock(deviceId);
try {
- MastershipRole role = getRole(nodeId, deviceId);
+ RoleValue rv = getRoleValue(deviceId);
switch (role) {
case MASTER:
//reinforce mastership
- evict(nid, did);
+ rv.reassign(nodeId, STANDBY, NONE);
+ roleMap.put(deviceId, rv);
return null;
case STANDBY:
- //make current master standby
- byte [] current = masters.get(did);
+ NodeId current = rv.get(MASTER);
if (current != null) {
- backup(current, did);
+ //backup and replace current master
+ rv.reassign(nodeId, NONE, STANDBY);
+ rv.replace(current, nodeId, MASTER);
+ } else {
+ //no master before so just add.
+ rv.add(MASTER, nodeId);
}
- //assign specified node as new master
- masters.put(did, nid);
- evict(nid, did);
- updateTerm(did);
+ rv.reassign(nodeId, STANDBY, NONE);
+ roleMap.put(deviceId, rv);
+ updateTerm(deviceId);
return new MastershipEvent(MASTER_CHANGED, deviceId, nodeId);
case NONE:
- masters.put(did, nid);
- evict(nid, did);
- updateTerm(did);
+ rv.add(MASTER, nodeId);
+ rv.reassign(nodeId, STANDBY, NONE);
+ roleMap.put(deviceId, rv);
+ updateTerm(deviceId);
return new MastershipEvent(MASTER_CHANGED, deviceId, nodeId);
default:
log.warn("unknown Mastership Role {}", role);
return null;
}
} finally {
- lock.unlock();
+ roleMap.unlock(deviceId);
}
}
@Override
public NodeId getMaster(DeviceId deviceId) {
- return deserialize(masters.get(serialize(deviceId)));
+ return getNode(MASTER, deviceId);
+ }
+
+
+ @Override
+ public List<NodeId> getNodes(DeviceId deviceId) {
+ List<NodeId> nodes = new LinkedList<>();
+
+ //add current master to head - if there is one.
+ roleMap.lock(deviceId);
+ try {
+ RoleValue rv = getRoleValue(deviceId);
+ NodeId master = rv.get(MASTER);
+ if (master != null) {
+ nodes.add(master);
+ }
+ //We ignore NONE nodes.
+ nodes.addAll(rv.nodesOfRole(STANDBY));
+ return Collections.unmodifiableList(nodes);
+ } finally {
+ roleMap.unlock(deviceId);
+ }
}
@Override
public Set<DeviceId> getDevices(NodeId nodeId) {
ImmutableSet.Builder<DeviceId> builder = ImmutableSet.builder();
- for (Map.Entry<byte[], byte[]> entry : masters.entrySet()) {
- if (nodeId.equals(deserialize(entry.getValue()))) {
- builder.add((DeviceId) deserialize(entry.getKey()));
+ for (Map.Entry<DeviceId, RoleValue> el : roleMap.entrySet()) {
+ if (nodeId.equals(el.getValue().get(MASTER))) {
+ builder.add(el.getKey());
}
}
@@ -159,26 +196,28 @@
@Override
public MastershipRole requestRole(DeviceId deviceId) {
NodeId local = clusterService.getLocalNode().id();
- byte [] did = serialize(deviceId);
- byte [] lnid = serialize(local);
- ILock lock = theInstance.getLock(LOCK);
- lock.lock();
+ roleMap.lock(deviceId);
try {
+ RoleValue rv = getRoleValue(deviceId);
MastershipRole role = getRole(local, deviceId);
switch (role) {
case MASTER:
- evict(lnid, did);
+ rv.reassign(local, STANDBY, NONE);
+ roleMap.put(deviceId, rv);
break;
case STANDBY:
- backup(lnid, did);
- terms.putIfAbsent(did, INIT);
+ rv.reassign(local, NONE, STANDBY);
+ roleMap.put(deviceId, rv);
+ terms.putIfAbsent(deviceId, INIT);
+
break;
case NONE:
//claim mastership
- masters.put(did, lnid);
- evict(lnid, did);
- updateTerm(did);
+ rv.add(MASTER, local);
+ rv.reassign(local, STANDBY, NONE);
+ roleMap.put(deviceId, rv);
+ updateTerm(deviceId);
role = MastershipRole.MASTER;
break;
default:
@@ -186,152 +225,178 @@
}
return role;
} finally {
- lock.unlock();
+ roleMap.unlock(deviceId);
}
}
@Override
public MastershipTerm getTermFor(DeviceId deviceId) {
- byte[] did = serialize(deviceId);
- if ((masters.get(did) == null) ||
- (terms.get(did) == null)) {
+ RoleValue rv = getRoleValue(deviceId);
+ if ((rv.get(MASTER) == null) || (terms.get(deviceId) == null)) {
return null;
}
- return MastershipTerm.of(
- (NodeId) deserialize(masters.get(did)), terms.get(did));
+ return MastershipTerm.of(rv.get(MASTER), terms.get(deviceId));
}
@Override
public MastershipEvent setStandby(NodeId nodeId, DeviceId deviceId) {
- byte [] did = serialize(deviceId);
- byte [] nid = serialize(nodeId);
MastershipEvent event = null;
- ILock lock = theInstance.getLock(LOCK);
- lock.lock();
+ roleMap.lock(deviceId);
try {
+ RoleValue rv = getRoleValue(deviceId);
MastershipRole role = getRole(nodeId, deviceId);
switch (role) {
case MASTER:
- event = reelect(nodeId, deviceId);
- backup(nid, did);
- break;
+ event = reelect(nodeId, deviceId, rv);
+ //fall through to reinforce role
case STANDBY:
//fall through to reinforce role
case NONE:
- backup(nid, did);
+ rv.reassign(nodeId, NONE, STANDBY);
+ roleMap.put(deviceId, rv);
break;
default:
log.warn("unknown Mastership Role {}", role);
}
return event;
} finally {
- lock.unlock();
+ roleMap.unlock(deviceId);
}
}
@Override
public MastershipEvent relinquishRole(NodeId nodeId, DeviceId deviceId) {
- byte [] did = serialize(deviceId);
- byte [] nid = serialize(nodeId);
MastershipEvent event = null;
- ILock lock = theInstance.getLock(LOCK);
- lock.lock();
+ roleMap.lock(deviceId);
try {
+ RoleValue rv = getRoleValue(deviceId);
MastershipRole role = getRole(nodeId, deviceId);
switch (role) {
case MASTER:
- event = reelect(nodeId, deviceId);
- evict(nid, did);
- break;
+ event = reelect(nodeId, deviceId, rv);
+ //fall through to reinforce relinquishment
case STANDBY:
//fall through to reinforce relinquishment
case NONE:
- evict(nid, did);
+ rv.reassign(nodeId, STANDBY, NONE);
+ roleMap.put(deviceId, rv);
break;
default:
log.warn("unknown Mastership Role {}", role);
}
return event;
} finally {
- lock.unlock();
+ roleMap.unlock(deviceId);
}
}
//helper to fetch a new master candidate for a given device.
- private MastershipEvent reelect(NodeId current, DeviceId deviceId) {
- byte [] did = serialize(deviceId);
- byte [] nid = serialize(current);
+ private MastershipEvent reelect(
+ NodeId current, DeviceId deviceId, RoleValue rv) {
//if this is an queue it'd be neater.
- byte [] backup = null;
- for (byte [] n : standbys.get(serialize(deviceId))) {
- if (!current.equals(deserialize(n))) {
+ NodeId backup = null;
+ for (NodeId n : rv.nodesOfRole(STANDBY)) {
+ if (!current.equals(n)) {
backup = n;
break;
}
}
if (backup == null) {
- masters.remove(did, nid);
+ log.info("{} giving up and going to NONE for {}", current, deviceId);
+ rv.remove(MASTER, current);
+ roleMap.put(deviceId, rv);
return null;
} else {
- masters.put(did, backup);
- evict(backup, did);
- Integer term = terms.get(did);
- terms.put(did, ++term);
- return new MastershipEvent(
- MASTER_CHANGED, deviceId, (NodeId) deserialize(backup));
+ log.info("{} trying to pass mastership for {} to {}", current, deviceId, backup);
+ rv.replace(current, backup, MASTER);
+ rv.reassign(backup, STANDBY, NONE);
+ roleMap.put(deviceId, rv);
+ Integer term = terms.get(deviceId);
+ terms.put(deviceId, ++term);
+ return new MastershipEvent(MASTER_CHANGED, deviceId, backup);
}
}
- //adds node to pool(s) of backups and moves them from unusable.
- private void backup(byte [] nodeId, byte [] deviceId) {
- if (!standbys.containsEntry(deviceId, nodeId)) {
- standbys.put(deviceId, nodeId);
+ //return the RoleValue structure for a device, or create one
+ private RoleValue getRoleValue(DeviceId deviceId) {
+ RoleValue value = roleMap.get(deviceId);
+ if (value == null) {
+ value = new RoleValue();
+ roleMap.put(deviceId, value);
}
- if (unusable.containsEntry(deviceId, nodeId)) {
- unusable.remove(deviceId, nodeId);
- }
+ return value;
}
- //adds node to unusable and evicts it from backup pool.
- private void evict(byte [] nodeId, byte [] deviceId) {
- if (!unusable.containsEntry(deviceId, nodeId)) {
- unusable.put(deviceId, nodeId);
+ //get first applicable node out of store-unique structure.
+ private NodeId getNode(MastershipRole role, DeviceId deviceId) {
+ RoleValue value = roleMap.get(deviceId);
+ if (value != null) {
+ return value.get(role);
}
- if (standbys.containsEntry(deviceId, nodeId)) {
- standbys.remove(deviceId, nodeId);
+ return null;
+ }
+
+ //check if node is a certain role given a device
+ private boolean isRole(
+ MastershipRole role, NodeId nodeId, DeviceId deviceId) {
+ RoleValue value = roleMap.get(deviceId);
+ if (value != null) {
+ return value.contains(role, nodeId);
}
+ return false;
}
//adds or updates term information.
- private void updateTerm(byte [] deviceId) {
- Integer term = terms.get(deviceId);
- if (term == null) {
- terms.put(deviceId, INIT);
- } else {
- terms.put(deviceId, ++term);
+ private void updateTerm(DeviceId deviceId) {
+ terms.lock(deviceId);
+ try {
+ Integer term = terms.get(deviceId);
+ if (term == null) {
+ terms.put(deviceId, INIT);
+ } else {
+ terms.put(deviceId, ++term);
+ }
+ } finally {
+ terms.unlock(deviceId);
}
}
- private class RemoteMasterShipEventHandler extends RemoteEventHandler<DeviceId, NodeId> {
+ private class RemoteMasterShipEventHandler implements EntryListener<DeviceId, RoleValue> {
@Override
- protected void onAdd(DeviceId deviceId, NodeId nodeId) {
- notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
+ public void entryAdded(EntryEvent<DeviceId, RoleValue> event) {
}
@Override
- protected void onRemove(DeviceId deviceId, NodeId nodeId) {
- //notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
+ public void entryRemoved(EntryEvent<DeviceId, RoleValue> event) {
}
@Override
- protected void onUpdate(DeviceId deviceId, NodeId oldNodeId, NodeId nodeId) {
- //only addition indicates a change in mastership
- //notifyDelegate(new MastershipEvent(MASTER_CHANGED, deviceId, nodeId));
+ public void entryUpdated(EntryEvent<DeviceId, RoleValue> event) {
+ NodeId myId = clusterService.getLocalNode().id();
+ NodeId node = event.getValue().get(MASTER);
+ if (myId.equals(node)) {
+ // XXX or do we just let it get sent and caught by ourself?
+ return;
+ }
+ notifyDelegate(new MastershipEvent(
+ MASTER_CHANGED, event.getKey(), event.getValue().get(MASTER)));
+ }
+
+ @Override
+ public void entryEvicted(EntryEvent<DeviceId, RoleValue> event) {
+ }
+
+ @Override
+ public void mapEvicted(MapEvent event) {
+ }
+
+ @Override
+ public void mapCleared(MapEvent event) {
}
}
diff --git a/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/RoleValue.java b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/RoleValue.java
new file mode 100644
index 0000000..f2c3559
--- /dev/null
+++ b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/RoleValue.java
@@ -0,0 +1,112 @@
+package org.onlab.onos.store.mastership.impl;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.onos.net.MastershipRole;
+
+/**
+ * A structure that holds node mastership roles associated with a
+ * {@link DeviceId}. This structure needs to be locked through IMap.
+ */
+public class RoleValue {
+
+ protected Map<MastershipRole, List<NodeId>> value = new HashMap<>();
+
+ public RoleValue() {
+ value.put(MastershipRole.MASTER, new LinkedList<NodeId>());
+ value.put(MastershipRole.STANDBY, new LinkedList<NodeId>());
+ value.put(MastershipRole.NONE, new LinkedList<NodeId>());
+ }
+
+ public Map<MastershipRole, List<NodeId>> value() {
+ return Collections.unmodifiableMap(value);
+ }
+
+ public List<NodeId> nodesOfRole(MastershipRole type) {
+ return value.get(type);
+ }
+
+ public NodeId get(MastershipRole type) {
+ return value.get(type).isEmpty() ? null : value.get(type).get(0);
+ }
+
+ public boolean contains(MastershipRole type, NodeId nodeId) {
+ return value.get(type).contains(nodeId);
+ }
+
+ /**
+ * Associates a node to a certain role.
+ *
+ * @param type the role
+ * @param nodeId the node ID of the node to associate
+ */
+ public void add(MastershipRole type, NodeId nodeId) {
+ List<NodeId> nodes = value.get(type);
+
+ if (!nodes.contains(nodeId)) {
+ nodes.add(nodeId);
+ }
+ }
+
+ /**
+ * Removes a node from a certain role.
+ *
+ * @param type the role
+ * @param nodeId the ID of the node to remove
+ * @return
+ */
+ public boolean remove(MastershipRole type, NodeId nodeId) {
+ List<NodeId> nodes = value.get(type);
+ if (!nodes.isEmpty()) {
+ return nodes.remove(nodeId);
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * Reassigns a node from one role to another. If the node was not of the
+ * old role, it will still be assigned the new role.
+ *
+ * @param nodeId the Node ID of node changing roles
+ * @param from the old role
+ * @param to the new role
+ */
+ // might want to add anyways as default behavior
+ public void reassign(NodeId nodeId, MastershipRole from, MastershipRole to) {
+ remove(from, nodeId);
+ add(to, nodeId);
+ }
+
+ /**
+ * Replaces a node in one role with another node. Even if there is no node to
+ * replace, the new node is associated to the role.
+ *
+ * @param from the old NodeId to replace
+ * @param to the new NodeId
+ * @param type the role associated with the old NodeId
+ */
+ // might want to add anyways as default behavior
+ public void replace(NodeId from, NodeId to, MastershipRole type) {
+ remove(type, from);
+ add(type, to);
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder builder = new StringBuilder();
+ for (Map.Entry<MastershipRole, List<NodeId>> el : value.entrySet()) {
+ builder.append(el.getKey().toString()).append(": [");
+ for (NodeId n : el.getValue()) {
+ builder.append(n);
+ }
+ builder.append("]\n");
+ }
+ return builder.toString();
+ }
+}
diff --git a/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/RoleValueSerializer.java b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/RoleValueSerializer.java
new file mode 100644
index 0000000..22d1b35
--- /dev/null
+++ b/core/store/hz/cluster/src/main/java/org/onlab/onos/store/mastership/impl/RoleValueSerializer.java
@@ -0,0 +1,52 @@
+package org.onlab.onos.store.mastership.impl;
+
+import java.util.List;
+import java.util.Map;
+
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.onos.net.MastershipRole;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Serializer for RoleValues used by {@link DistributedMastershipStore}.
+ */
+public class RoleValueSerializer extends Serializer<RoleValue> {
+
+ //RoleValues are assumed to hold a Map of MastershipRoles (an enum)
+ //to a List of NodeIds.
+
+ @Override
+ public RoleValue read(Kryo kryo, Input input, Class<RoleValue> type) {
+ RoleValue rv = new RoleValue();
+ int size = input.readInt();
+ for (int i = 0; i < size; i++) {
+ MastershipRole role = MastershipRole.values()[input.readInt()];
+ int s = input.readInt();
+ for (int j = 0; j < s; j++) {
+ rv.add(role, new NodeId(input.readString()));
+ }
+ }
+ return rv;
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, RoleValue type) {
+ output.writeInt(type.value().size());
+
+ for (Map.Entry<MastershipRole, List<NodeId>> el :
+ type.value().entrySet()) {
+ output.writeInt(el.getKey().ordinal());
+
+ List<NodeId> nodes = el.getValue();
+ output.writeInt(nodes.size());
+ for (NodeId n : nodes) {
+ output.writeString(n.toString());
+ }
+ }
+ }
+
+}
diff --git a/core/store/hz/cluster/src/test/java/org/onlab/onos/store/mastership/impl/DistributedMastershipStoreTest.java b/core/store/hz/cluster/src/test/java/org/onlab/onos/store/mastership/impl/DistributedMastershipStoreTest.java
index 89c4357..956b0be 100644
--- a/core/store/hz/cluster/src/test/java/org/onlab/onos/store/mastership/impl/DistributedMastershipStoreTest.java
+++ b/core/store/hz/cluster/src/test/java/org/onlab/onos/store/mastership/impl/DistributedMastershipStoreTest.java
@@ -27,6 +27,7 @@
import org.onlab.onos.mastership.MastershipTerm;
import org.onlab.onos.mastership.MastershipEvent.Type;
import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.MastershipRole;
import org.onlab.onos.store.common.StoreManager;
import org.onlab.onos.store.common.StoreService;
import org.onlab.onos.store.common.TestStoreManager;
@@ -101,7 +102,7 @@
@Test
public void getMaster() {
- assertTrue("wrong store state:", dms.masters.isEmpty());
+ assertTrue("wrong store state:", dms.roleMap.isEmpty());
testStore.put(DID1, N1, true, false, false);
assertEquals("wrong master:", N1, dms.getMaster(DID1));
@@ -110,12 +111,11 @@
@Test
public void getDevices() {
- assertTrue("wrong store state:", dms.masters.isEmpty());
+ assertTrue("wrong store state:", dms.roleMap.isEmpty());
testStore.put(DID1, N1, true, false, false);
testStore.put(DID2, N1, true, false, false);
testStore.put(DID3, N2, true, false, false);
-
assertEquals("wrong devices",
Sets.newHashSet(DID1, DID2), dms.getDevices(N1));
}
@@ -161,7 +161,7 @@
assertEquals("wrong event:", Type.MASTER_CHANGED, dms.setMaster(N2, DID2).type());
assertEquals("wrong term", MastershipTerm.of(N2, 0), dms.getTermFor(DID2));
//disconnect and reconnect - sign of failing re-election or single-instance channel
- testStore.reset(true, false, false);
+ dms.roleMap.clear();
dms.setMaster(N2, DID2);
assertEquals("wrong term", MastershipTerm.of(N2, 1), dms.getTermFor(DID2));
}
@@ -191,13 +191,15 @@
assertEquals("wrong role for node:", NONE, dms.getRole(N2, DID1));
assertEquals("wrong role for node:", NONE, dms.getRole(N1, DID1));
- assertEquals("wrong number of retired nodes", 2, dms.unusable.size());
+ assertEquals("wrong number of retired nodes", 2,
+ dms.roleMap.get(DID1).nodesOfRole(NONE).size());
//bring nodes back
assertEquals("wrong role for NONE:", MASTER, dms.requestRole(DID1));
testStore.setCurrent(CN1);
assertEquals("wrong role for NONE:", STANDBY, dms.requestRole(DID1));
- assertEquals("wrong number of backup nodes", 1, dms.standbys.size());
+ assertEquals("wrong number of backup nodes", 1,
+ dms.roleMap.get(DID1).nodesOfRole(STANDBY).size());
//NONE - nothing happens
assertNull("wrong event:", dms.relinquishRole(N1, DID2));
@@ -238,55 +240,44 @@
//helper to populate master/backup structures
public void put(DeviceId dev, NodeId node,
boolean master, boolean backup, boolean term) {
- byte [] n = serialize(node);
- byte [] d = serialize(dev);
+ RoleValue rv = dms.roleMap.get(dev);
+ if (rv == null) {
+ rv = new RoleValue();
+ }
if (master) {
- dms.masters.put(d, n);
- dms.unusable.put(d, n);
- dms.standbys.remove(d, n);
+ rv.add(MASTER, node);
+ rv.reassign(node, STANDBY, NONE);
}
if (backup) {
- dms.standbys.put(d, n);
- dms.masters.remove(d, n);
- dms.unusable.remove(d, n);
+ rv.add(STANDBY, node);
+ rv.remove(MASTER, node);
+ rv.remove(NONE, node);
}
if (term) {
- dms.terms.put(d, 0);
+ dms.terms.put(dev, 0);
}
+ dms.roleMap.put(dev, rv);
}
//a dumb utility function.
public void dump() {
- System.out.println("standbys");
- for (Map.Entry<byte [], byte []> e : standbys.entrySet()) {
- System.out.println(deserialize(e.getKey()) + ":" + deserialize(e.getValue()));
- }
- System.out.println("unusable");
- for (Map.Entry<byte [], byte []> e : unusable.entrySet()) {
- System.out.println(deserialize(e.getKey()) + ":" + deserialize(e.getValue()));
- }
- }
-
- //clears structures
- public void reset(boolean store, boolean backup, boolean term) {
- if (store) {
- dms.masters.clear();
- dms.unusable.clear();
- }
- if (backup) {
- dms.standbys.clear();
- }
- if (term) {
- dms.terms.clear();
+ for (Map.Entry<DeviceId, RoleValue> el : dms.roleMap.entrySet()) {
+ System.out.println("DID: " + el.getKey());
+ for (MastershipRole role : MastershipRole.values()) {
+ System.out.println("\t" + role.toString() + ":");
+ for (NodeId n : el.getValue().nodesOfRole(role)) {
+ System.out.println("\t\t" + n);
+ }
+ }
}
}
//increment term for a device
public void increment(DeviceId dev) {
- Integer t = dms.terms.get(serialize(dev));
+ Integer t = dms.terms.get(dev);
if (t != null) {
- dms.terms.put(serialize(dev), ++t);
+ dms.terms.put(dev, ++t);
}
}
diff --git a/core/store/hz/cluster/src/test/java/org/onlab/onos/store/mastership/impl/RoleValueTest.java b/core/store/hz/cluster/src/test/java/org/onlab/onos/store/mastership/impl/RoleValueTest.java
new file mode 100644
index 0000000..93741b7
--- /dev/null
+++ b/core/store/hz/cluster/src/test/java/org/onlab/onos/store/mastership/impl/RoleValueTest.java
@@ -0,0 +1,31 @@
+package org.onlab.onos.store.mastership.impl;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.onlab.onos.net.MastershipRole.*;
+
+import org.junit.Test;
+import org.onlab.onos.cluster.NodeId;
+
+import com.google.common.collect.Sets;
+
+public class RoleValueTest {
+
+ private static final RoleValue RV = new RoleValue();
+
+ private static final NodeId NID1 = new NodeId("node1");
+ private static final NodeId NID2 = new NodeId("node2");
+ private static final NodeId NID3 = new NodeId("node3");
+
+ @Test
+ public void add() {
+ assertEquals("faulty initialization: ", 3, RV.value.size());
+ RV.add(MASTER, NID1);
+ RV.add(STANDBY, NID2);
+ RV.add(STANDBY, NID3);
+
+ assertEquals("wrong nodeID: ", NID1, RV.get(MASTER));
+ assertTrue("wrong nodeIDs: ",
+ Sets.newHashSet(NID3, NID2).containsAll(RV.nodesOfRole(STANDBY)));
+ }
+}
diff --git a/core/store/hz/common/pom.xml b/core/store/hz/common/pom.xml
index 1d79206..a4ff179 100644
--- a/core/store/hz/common/pom.xml
+++ b/core/store/hz/common/pom.xml
@@ -19,34 +19,13 @@
<dependencies>
<dependency>
<groupId>org.onlab.onos</groupId>
- <artifactId>onos-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.onlab.onos</groupId>
<artifactId>onos-core-serializers</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.scr.annotations</artifactId>
- </dependency>
- <dependency>
- <groupId>com.hazelcast</groupId>
- <artifactId>hazelcast</artifactId>
- </dependency>
- <dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
</dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-scr-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
</project>
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
deleted file mode 100644
index 0016939..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/DistributedDeviceStore.java
+++ /dev/null
@@ -1,408 +0,0 @@
-package org.onlab.onos.store.device.impl;
-
-import static com.google.common.base.Predicates.notNull;
-
-import com.google.common.base.Optional;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSet.Builder;
-import com.hazelcast.core.IMap;
-import com.hazelcast.core.ISet;
-
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.net.DefaultDevice;
-import org.onlab.onos.net.DefaultPort;
-import org.onlab.onos.net.Device;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Port;
-import org.onlab.onos.net.PortNumber;
-import org.onlab.onos.net.device.DeviceDescription;
-import org.onlab.onos.net.device.DeviceEvent;
-import org.onlab.onos.net.device.DeviceStore;
-import org.onlab.onos.net.device.DeviceStoreDelegate;
-import org.onlab.onos.net.device.PortDescription;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
-import org.onlab.onos.store.common.AbstractHazelcastStore;
-import org.onlab.onos.store.common.OptionalCacheLoader;
-import org.slf4j.Logger;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.cache.CacheBuilder.newBuilder;
-import static org.onlab.onos.net.device.DeviceEvent.Type.*;
-import static org.slf4j.LoggerFactory.getLogger;
-
-//TODO: Add support for multiple provider and annotations
-/**
- * Manages inventory of infrastructure devices using Hazelcast-backed map.
- */
-@Component(immediate = true)
-@Service
-public class DistributedDeviceStore
- extends AbstractHazelcastStore<DeviceEvent, DeviceStoreDelegate>
- implements DeviceStore {
-
- private final Logger log = getLogger(getClass());
-
- public static final String DEVICE_NOT_FOUND = "Device with ID %s not found";
-
- // private IMap<DeviceId, DefaultDevice> cache;
- private IMap<byte[], byte[]> rawDevices;
- private LoadingCache<DeviceId, Optional<DefaultDevice>> devices;
-
- // private ISet<DeviceId> availableDevices;
- private ISet<byte[]> availableDevices;
-
- // TODO DevicePorts is very inefficient consider restructuring.
- // private IMap<DeviceId, Map<PortNumber, Port>> devicePorts;
- private IMap<byte[], byte[]> rawDevicePorts;
- private LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> devicePorts;
-
- private String devicesListener;
-
- private String portsListener;
-
- @Override
- @Activate
- public void activate() {
- super.activate();
-
- // IMap event handler needs value
- final boolean includeValue = true;
-
- // TODO decide on Map name scheme to avoid collision
- rawDevices = theInstance.getMap("devices");
- final OptionalCacheLoader<DeviceId, DefaultDevice> deviceLoader
- = new OptionalCacheLoader<>(serializer, rawDevices);
- devices = new AbsentInvalidatingLoadingCache<>(newBuilder().build(deviceLoader));
- // refresh/populate cache based on notification from other instance
- devicesListener = rawDevices.addEntryListener(new RemoteDeviceEventHandler(devices), includeValue);
-
- // TODO cache availableDevices
- availableDevices = theInstance.getSet("availableDevices");
-
- rawDevicePorts = theInstance.getMap("devicePorts");
- final OptionalCacheLoader<DeviceId, Map<PortNumber, Port>> devicePortLoader
- = new OptionalCacheLoader<>(serializer, rawDevicePorts);
- devicePorts = new AbsentInvalidatingLoadingCache<>(newBuilder().build(devicePortLoader));
- // refresh/populate cache based on notification from other instance
- portsListener = rawDevicePorts.addEntryListener(new RemotePortEventHandler(devicePorts), includeValue);
-
- loadDeviceCache();
- loadDevicePortsCache();
-
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- rawDevicePorts.removeEntryListener(portsListener);
- rawDevices.removeEntryListener(devicesListener);
- log.info("Stopped");
- }
-
- @Override
- public int getDeviceCount() {
- return devices.asMap().size();
- }
-
- @Override
- public Iterable<Device> getDevices() {
- // TODO builder v.s. copyOf. Guava semms to be using copyOf?
- Builder<Device> builder = ImmutableSet.builder();
- for (Optional<DefaultDevice> e : devices.asMap().values()) {
- if (e.isPresent()) {
- builder.add(e.get());
- }
- }
- return builder.build();
- }
-
- private void loadDeviceCache() {
- for (byte[] keyBytes : rawDevices.keySet()) {
- final DeviceId id = deserialize(keyBytes);
- devices.refresh(id);
- }
- }
-
- private void loadDevicePortsCache() {
- for (byte[] keyBytes : rawDevicePorts.keySet()) {
- final DeviceId id = deserialize(keyBytes);
- devicePorts.refresh(id);
- }
- }
-
- @Override
- public Device getDevice(DeviceId deviceId) {
- // TODO revisit if ignoring exception is safe.
- return devices.getUnchecked(deviceId).orNull();
- }
-
- @Override
- public DeviceEvent createOrUpdateDevice(ProviderId providerId, DeviceId deviceId,
- DeviceDescription deviceDescription) {
- DefaultDevice device = devices.getUnchecked(deviceId).orNull();
- if (device == null) {
- return createDevice(providerId, deviceId, deviceDescription);
- }
- return updateDevice(providerId, device, deviceDescription);
- }
-
- // Creates the device and returns the appropriate event if necessary.
- private DeviceEvent createDevice(ProviderId providerId, DeviceId deviceId,
- DeviceDescription desc) {
- DefaultDevice device = new DefaultDevice(providerId, deviceId, desc.type(),
- desc.manufacturer(),
- desc.hwVersion(), desc.swVersion(),
- desc.serialNumber());
-
- synchronized (this) {
- final byte[] deviceIdBytes = serialize(deviceId);
- rawDevices.put(deviceIdBytes, serialize(device));
- devices.put(deviceId, Optional.of(device));
-
- availableDevices.add(deviceIdBytes);
- }
- return new DeviceEvent(DEVICE_ADDED, device, null);
- }
-
- // Updates the device and returns the appropriate event if necessary.
- private DeviceEvent updateDevice(ProviderId providerId, DefaultDevice device,
- DeviceDescription desc) {
- // We allow only certain attributes to trigger update
- if (!Objects.equals(device.hwVersion(), desc.hwVersion()) ||
- !Objects.equals(device.swVersion(), desc.swVersion())) {
-
- DefaultDevice updated = new DefaultDevice(providerId, device.id(),
- desc.type(),
- desc.manufacturer(),
- desc.hwVersion(),
- desc.swVersion(),
- desc.serialNumber());
- synchronized (this) {
- final byte[] deviceIdBytes = serialize(device.id());
- rawDevices.put(deviceIdBytes, serialize(updated));
- devices.put(device.id(), Optional.of(updated));
- availableDevices.add(serialize(device.id()));
- }
- return new DeviceEvent(DeviceEvent.Type.DEVICE_UPDATED, updated, null);
- }
-
- // Otherwise merely attempt to change availability
- synchronized (this) {
- boolean added = availableDevices.add(serialize(device.id()));
- return !added ? null :
- new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null);
- }
- }
-
- @Override
- public DeviceEvent markOffline(DeviceId deviceId) {
- synchronized (this) {
- Device device = devices.getUnchecked(deviceId).orNull();
- boolean removed = device != null && availableDevices.remove(serialize(deviceId));
- return !removed ? null :
- new DeviceEvent(DEVICE_AVAILABILITY_CHANGED, device, null);
- }
- }
-
- @Override
- public List<DeviceEvent> updatePorts(ProviderId providerId, DeviceId deviceId,
- List<PortDescription> portDescriptions) {
- List<DeviceEvent> events = new ArrayList<>();
- synchronized (this) {
- Device device = devices.getUnchecked(deviceId).orNull();
- checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
- Map<PortNumber, Port> ports = getPortMap(deviceId);
-
- // Add new ports
- Set<PortNumber> processed = new HashSet<>();
- for (PortDescription portDescription : portDescriptions) {
- Port port = ports.get(portDescription.portNumber());
- events.add(port == null ?
- createPort(device, portDescription, ports) :
- updatePort(device, port, portDescription, ports));
- processed.add(portDescription.portNumber());
- }
-
- updatePortMap(deviceId, ports);
-
- events.addAll(pruneOldPorts(device, ports, processed));
- }
- return FluentIterable.from(events).filter(notNull()).toList();
- }
-
- // Creates a new port based on the port description adds it to the map and
- // Returns corresponding event.
- //@GuardedBy("this")
- private DeviceEvent createPort(Device device, PortDescription portDescription,
- Map<PortNumber, Port> ports) {
- DefaultPort port = new DefaultPort(device, portDescription.portNumber(),
- portDescription.isEnabled());
- ports.put(port.number(), port);
- updatePortMap(device.id(), ports);
- return new DeviceEvent(PORT_ADDED, device, port);
- }
-
- // Checks if the specified port requires update and if so, it replaces the
- // existing entry in the map and returns corresponding event.
- //@GuardedBy("this")
- private DeviceEvent updatePort(Device device, Port port,
- PortDescription portDescription,
- Map<PortNumber, Port> ports) {
- if (port.isEnabled() != portDescription.isEnabled()) {
- DefaultPort updatedPort =
- new DefaultPort(device, portDescription.portNumber(),
- portDescription.isEnabled());
- ports.put(port.number(), updatedPort);
- updatePortMap(device.id(), ports);
- return new DeviceEvent(PORT_UPDATED, device, updatedPort);
- }
- return null;
- }
-
- // Prunes the specified list of ports based on which ports are in the
- // processed list and returns list of corresponding events.
- //@GuardedBy("this")
- private List<DeviceEvent> pruneOldPorts(Device device,
- Map<PortNumber, Port> ports,
- Set<PortNumber> processed) {
- List<DeviceEvent> events = new ArrayList<>();
- Iterator<PortNumber> iterator = ports.keySet().iterator();
- while (iterator.hasNext()) {
- PortNumber portNumber = iterator.next();
- if (!processed.contains(portNumber)) {
- events.add(new DeviceEvent(PORT_REMOVED, device,
- ports.get(portNumber)));
- iterator.remove();
- }
- }
- if (!events.isEmpty()) {
- updatePortMap(device.id(), ports);
- }
- return events;
- }
-
- // Gets the map of ports for the specified device; if one does not already
- // exist, it creates and registers a new one.
- // WARN: returned value is a copy, changes made to the Map
- // needs to be written back using updatePortMap
- //@GuardedBy("this")
- private Map<PortNumber, Port> getPortMap(DeviceId deviceId) {
- Map<PortNumber, Port> ports = devicePorts.getUnchecked(deviceId).orNull();
- if (ports == null) {
- ports = new HashMap<>();
- // this probably is waste of time in most cases.
- updatePortMap(deviceId, ports);
- }
- return ports;
- }
-
- //@GuardedBy("this")
- private void updatePortMap(DeviceId deviceId, Map<PortNumber, Port> ports) {
- rawDevicePorts.put(serialize(deviceId), serialize(ports));
- devicePorts.put(deviceId, Optional.of(ports));
- }
-
- @Override
- public DeviceEvent updatePortStatus(ProviderId providerId, DeviceId deviceId,
- PortDescription portDescription) {
- synchronized (this) {
- Device device = devices.getUnchecked(deviceId).orNull();
- checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
- Map<PortNumber, Port> ports = getPortMap(deviceId);
- Port port = ports.get(portDescription.portNumber());
- return updatePort(device, port, portDescription, ports);
- }
- }
-
- @Override
- public List<Port> getPorts(DeviceId deviceId) {
- Map<PortNumber, Port> ports = devicePorts.getUnchecked(deviceId).orNull();
- return ports == null ? Collections.<Port>emptyList() : ImmutableList.copyOf(ports.values());
- }
-
- @Override
- public Port getPort(DeviceId deviceId, PortNumber portNumber) {
- Map<PortNumber, Port> ports = devicePorts.getUnchecked(deviceId).orNull();
- return ports == null ? null : ports.get(portNumber);
- }
-
- @Override
- public boolean isAvailable(DeviceId deviceId) {
- return availableDevices.contains(serialize(deviceId));
- }
-
- @Override
- public DeviceEvent removeDevice(DeviceId deviceId) {
- synchronized (this) {
- byte[] deviceIdBytes = serialize(deviceId);
-
- // TODO conditional remove?
- Device device = deserialize(rawDevices.remove(deviceIdBytes));
- devices.invalidate(deviceId);
- return device == null ? null :
- new DeviceEvent(DEVICE_REMOVED, device, null);
- }
- }
-
- private class RemoteDeviceEventHandler extends RemoteCacheEventHandler<DeviceId, DefaultDevice> {
- public RemoteDeviceEventHandler(LoadingCache<DeviceId, Optional<DefaultDevice>> cache) {
- super(cache);
- }
-
- @Override
- protected void onAdd(DeviceId deviceId, DefaultDevice device) {
- notifyDelegate(new DeviceEvent(DEVICE_ADDED, device));
- }
-
- @Override
- protected void onRemove(DeviceId deviceId, DefaultDevice device) {
- notifyDelegate(new DeviceEvent(DEVICE_REMOVED, device));
- }
-
- @Override
- protected void onUpdate(DeviceId deviceId, DefaultDevice oldDevice, DefaultDevice device) {
- notifyDelegate(new DeviceEvent(DEVICE_UPDATED, device));
- }
- }
-
- private class RemotePortEventHandler extends RemoteCacheEventHandler<DeviceId, Map<PortNumber, Port>> {
- public RemotePortEventHandler(LoadingCache<DeviceId, Optional<Map<PortNumber, Port>>> cache) {
- super(cache);
- }
-
- @Override
- protected void onAdd(DeviceId deviceId, Map<PortNumber, Port> ports) {
-// notifyDelegate(new DeviceEvent(PORT_ADDED, getDevice(deviceId)));
- }
-
- @Override
- protected void onRemove(DeviceId deviceId, Map<PortNumber, Port> ports) {
-// notifyDelegate(new DeviceEvent(PORT_REMOVED, getDevice(deviceId)));
- }
-
- @Override
- protected void onUpdate(DeviceId deviceId, Map<PortNumber, Port> oldPorts, Map<PortNumber, Port> ports) {
-// notifyDelegate(new DeviceEvent(PORT_UPDATED, getDevice(deviceId)));
- }
- }
-
-
- // TODO cache serialized DeviceID if we suffer from serialization cost
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/NoOpClockProviderService.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/NoOpClockProviderService.java
deleted file mode 100644
index 4626fa4..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/device/impl/NoOpClockProviderService.java
+++ /dev/null
@@ -1,20 +0,0 @@
-package org.onlab.onos.store.device.impl;
-
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.mastership.MastershipTerm;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.device.DeviceClockProviderService;
-
-// FIXME: Code clone in onos-core-trivial, onos-core-hz-net
-/**
- * Dummy implementation of {@link DeviceClockProviderService}.
- */
-@Component(immediate = true)
-@Service
-public class NoOpClockProviderService implements DeviceClockProviderService {
-
- @Override
- public void setMastershipTerm(DeviceId deviceId, MastershipTerm term) {
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
deleted file mode 100644
index 084435f..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/flow/impl/DistributedFlowRuleStore.java
+++ /dev/null
@@ -1,142 +0,0 @@
-package org.onlab.onos.store.flow.impl;
-
-import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.util.Collection;
-import java.util.Collections;
-
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.ApplicationId;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.flow.DefaultFlowEntry;
-import org.onlab.onos.net.flow.FlowEntry;
-import org.onlab.onos.net.flow.FlowEntry.FlowEntryState;
-import org.onlab.onos.net.flow.FlowRule;
-import org.onlab.onos.net.flow.FlowRuleEvent;
-import org.onlab.onos.net.flow.FlowRuleEvent.Type;
-import org.onlab.onos.net.flow.FlowRuleStore;
-import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
-import org.onlab.onos.store.AbstractStore;
-import org.slf4j.Logger;
-
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Multimap;
-
-/**
- * Manages inventory of flow rules using trivial in-memory implementation.
- */
-//FIXME I LIE. I AIN'T DISTRIBUTED
-@Component(immediate = true)
-@Service
-public class DistributedFlowRuleStore
- extends AbstractStore<FlowRuleEvent, FlowRuleStoreDelegate>
- implements FlowRuleStore {
-
- private final Logger log = getLogger(getClass());
-
- // store entries as a pile of rules, no info about device tables
- private final Multimap<DeviceId, FlowEntry> flowEntries =
- ArrayListMultimap.<DeviceId, FlowEntry>create();
-
- private final Multimap<Short, FlowRule> flowEntriesById =
- ArrayListMultimap.<Short, FlowRule>create();
-
- @Activate
- public void activate() {
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- log.info("Stopped");
- }
-
-
- @Override
- public int getFlowRuleCount() {
- return flowEntries.size();
- }
-
- @Override
- public synchronized FlowEntry getFlowEntry(FlowRule rule) {
- for (FlowEntry f : flowEntries.get(rule.deviceId())) {
- if (f.equals(rule)) {
- return f;
- }
- }
- return null;
- }
-
- @Override
- public synchronized Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) {
- Collection<FlowEntry> rules = flowEntries.get(deviceId);
- if (rules == null) {
- return Collections.emptyList();
- }
- return ImmutableSet.copyOf(rules);
- }
-
- @Override
- public synchronized Iterable<FlowRule> getFlowRulesByAppId(ApplicationId appId) {
- Collection<FlowRule> rules = flowEntriesById.get(appId.id());
- if (rules == null) {
- return Collections.emptyList();
- }
- return ImmutableSet.copyOf(rules);
- }
-
- @Override
- public synchronized void storeFlowRule(FlowRule rule) {
- FlowEntry f = new DefaultFlowEntry(rule);
- DeviceId did = f.deviceId();
- if (!flowEntries.containsEntry(did, f)) {
- flowEntries.put(did, f);
- flowEntriesById.put(rule.appId(), f);
- }
- }
-
- @Override
- public synchronized void deleteFlowRule(FlowRule rule) {
- FlowEntry entry = getFlowEntry(rule);
- if (entry == null) {
- return;
- }
- entry.setState(FlowEntryState.PENDING_REMOVE);
- }
-
- @Override
- public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowEntry rule) {
- DeviceId did = rule.deviceId();
-
- // check if this new rule is an update to an existing entry
- FlowEntry stored = getFlowEntry(rule);
- if (stored != null) {
- stored.setBytes(rule.bytes());
- stored.setLife(rule.life());
- stored.setPackets(rule.packets());
- if (stored.state() == FlowEntryState.PENDING_ADD) {
- stored.setState(FlowEntryState.ADDED);
- return new FlowRuleEvent(Type.RULE_ADDED, rule);
- }
- return new FlowRuleEvent(Type.RULE_UPDATED, rule);
- }
-
- flowEntries.put(did, rule);
- return null;
- }
-
- @Override
- public synchronized FlowRuleEvent removeFlowRule(FlowEntry rule) {
- // This is where one could mark a rule as removed and still keep it in the store.
- if (flowEntries.remove(rule.deviceId(), rule)) {
- return new FlowRuleEvent(RULE_REMOVED, rule);
- } else {
- return null;
- }
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java
deleted file mode 100644
index 0ca4ae2..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/host/impl/DistributedHostStore.java
+++ /dev/null
@@ -1,302 +0,0 @@
-package org.onlab.onos.store.host.impl;
-
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.net.Annotations;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DefaultHost;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Host;
-import org.onlab.onos.net.HostId;
-import org.onlab.onos.net.HostLocation;
-import org.onlab.onos.net.host.HostDescription;
-import org.onlab.onos.net.host.HostEvent;
-import org.onlab.onos.net.host.HostStore;
-import org.onlab.onos.net.host.HostStoreDelegate;
-import org.onlab.onos.net.host.PortAddresses;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.AbstractStore;
-import org.onlab.packet.IpPrefix;
-import org.onlab.packet.MacAddress;
-import org.onlab.packet.VlanId;
-import org.slf4j.Logger;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import static org.onlab.onos.net.host.HostEvent.Type.*;
-import static org.slf4j.LoggerFactory.getLogger;
-
-/**
- * TEMPORARY: Manages inventory of end-station hosts using distributed
- * structures implementation.
- */
-//FIXME: I LIE I AM NOT DISTRIBUTED
-@Component(immediate = true)
-@Service
-public class DistributedHostStore
- extends AbstractStore<HostEvent, HostStoreDelegate>
- implements HostStore {
-
- private final Logger log = getLogger(getClass());
-
- // Host inventory
- private final Map<HostId, StoredHost> hosts = new ConcurrentHashMap<>(2000000, 0.75f, 16);
-
- // Hosts tracked by their location
- private final Multimap<ConnectPoint, Host> locations = HashMultimap.create();
-
- private final Map<ConnectPoint, PortAddresses> portAddresses =
- new ConcurrentHashMap<>();
-
- @Activate
- public void activate() {
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- log.info("Stopped");
- }
-
- @Override
- public HostEvent createOrUpdateHost(ProviderId providerId, HostId hostId,
- HostDescription hostDescription) {
- StoredHost host = hosts.get(hostId);
- if (host == null) {
- return createHost(providerId, hostId, hostDescription);
- }
- return updateHost(providerId, host, hostDescription);
- }
-
- // creates a new host and sends HOST_ADDED
- private HostEvent createHost(ProviderId providerId, HostId hostId,
- HostDescription descr) {
- StoredHost newhost = new StoredHost(providerId, hostId,
- descr.hwAddress(),
- descr.vlan(),
- descr.location(),
- ImmutableSet.of(descr.ipAddress()));
- synchronized (this) {
- hosts.put(hostId, newhost);
- locations.put(descr.location(), newhost);
- }
- return new HostEvent(HOST_ADDED, newhost);
- }
-
- // checks for type of update to host, sends appropriate event
- private HostEvent updateHost(ProviderId providerId, StoredHost host,
- HostDescription descr) {
- HostEvent event;
- if (!host.location().equals(descr.location())) {
- host.setLocation(descr.location());
- return new HostEvent(HOST_MOVED, host);
- }
-
- if (host.ipAddresses().contains(descr.ipAddress())) {
- return null;
- }
-
- Set<IpPrefix> addresses = new HashSet<>(host.ipAddresses());
- addresses.add(descr.ipAddress());
- StoredHost updated = new StoredHost(providerId, host.id(),
- host.mac(), host.vlan(),
- descr.location(), addresses);
- event = new HostEvent(HOST_UPDATED, updated);
- synchronized (this) {
- hosts.put(host.id(), updated);
- locations.remove(host.location(), host);
- locations.put(updated.location(), updated);
- }
- return event;
- }
-
- @Override
- public HostEvent removeHost(HostId hostId) {
- synchronized (this) {
- Host host = hosts.remove(hostId);
- if (host != null) {
- locations.remove((host.location()), host);
- return new HostEvent(HOST_REMOVED, host);
- }
- return null;
- }
- }
-
- @Override
- public int getHostCount() {
- return hosts.size();
- }
-
- @Override
- public Iterable<Host> getHosts() {
- return ImmutableSet.<Host>copyOf(hosts.values());
- }
-
- @Override
- public Host getHost(HostId hostId) {
- return hosts.get(hostId);
- }
-
- @Override
- public Set<Host> getHosts(VlanId vlanId) {
- Set<Host> vlanset = new HashSet<>();
- for (Host h : hosts.values()) {
- if (h.vlan().equals(vlanId)) {
- vlanset.add(h);
- }
- }
- return vlanset;
- }
-
- @Override
- public Set<Host> getHosts(MacAddress mac) {
- Set<Host> macset = new HashSet<>();
- for (Host h : hosts.values()) {
- if (h.mac().equals(mac)) {
- macset.add(h);
- }
- }
- return macset;
- }
-
- @Override
- public Set<Host> getHosts(IpPrefix ip) {
- Set<Host> ipset = new HashSet<>();
- for (Host h : hosts.values()) {
- if (h.ipAddresses().contains(ip)) {
- ipset.add(h);
- }
- }
- return ipset;
- }
-
- @Override
- public Set<Host> getConnectedHosts(ConnectPoint connectPoint) {
- return ImmutableSet.copyOf(locations.get(connectPoint));
- }
-
- @Override
- public Set<Host> getConnectedHosts(DeviceId deviceId) {
- Set<Host> hostset = new HashSet<>();
- for (ConnectPoint p : locations.keySet()) {
- if (p.deviceId().equals(deviceId)) {
- hostset.addAll(locations.get(p));
- }
- }
- return hostset;
- }
-
- @Override
- public void updateAddressBindings(PortAddresses addresses) {
- synchronized (portAddresses) {
- PortAddresses existing = portAddresses.get(addresses.connectPoint());
- if (existing == null) {
- portAddresses.put(addresses.connectPoint(), addresses);
- } else {
- Set<IpPrefix> union = Sets.union(existing.ips(), addresses.ips())
- .immutableCopy();
-
- MacAddress newMac = (addresses.mac() == null) ? existing.mac()
- : addresses.mac();
-
- PortAddresses newAddresses =
- new PortAddresses(addresses.connectPoint(), union, newMac);
-
- portAddresses.put(newAddresses.connectPoint(), newAddresses);
- }
- }
- }
-
- @Override
- public void removeAddressBindings(PortAddresses addresses) {
- synchronized (portAddresses) {
- PortAddresses existing = portAddresses.get(addresses.connectPoint());
- if (existing != null) {
- Set<IpPrefix> difference =
- Sets.difference(existing.ips(), addresses.ips()).immutableCopy();
-
- // If they removed the existing mac, set the new mac to null.
- // Otherwise, keep the existing mac.
- MacAddress newMac = existing.mac();
- if (addresses.mac() != null && addresses.mac().equals(existing.mac())) {
- newMac = null;
- }
-
- PortAddresses newAddresses =
- new PortAddresses(addresses.connectPoint(), difference, newMac);
-
- portAddresses.put(newAddresses.connectPoint(), newAddresses);
- }
- }
- }
-
- @Override
- public void clearAddressBindings(ConnectPoint connectPoint) {
- synchronized (portAddresses) {
- portAddresses.remove(connectPoint);
- }
- }
-
- @Override
- public Set<PortAddresses> getAddressBindings() {
- synchronized (portAddresses) {
- return new HashSet<>(portAddresses.values());
- }
- }
-
- @Override
- public PortAddresses getAddressBindingsForPort(ConnectPoint connectPoint) {
- PortAddresses addresses;
-
- synchronized (portAddresses) {
- addresses = portAddresses.get(connectPoint);
- }
-
- if (addresses == null) {
- addresses = new PortAddresses(connectPoint, null, null);
- }
-
- return addresses;
- }
-
- // Auxiliary extension to allow location to mutate.
- private class StoredHost extends DefaultHost {
- private HostLocation location;
-
- /**
- * Creates an end-station host using the supplied information.
- *
- * @param providerId provider identity
- * @param id host identifier
- * @param mac host MAC address
- * @param vlan host VLAN identifier
- * @param location host location
- * @param ips host IP addresses
- * @param annotations optional key/value annotations
- */
- public StoredHost(ProviderId providerId, HostId id,
- MacAddress mac, VlanId vlan, HostLocation location,
- Set<IpPrefix> ips, Annotations... annotations) {
- super(providerId, id, mac, vlan, location, ips, annotations);
- this.location = location;
- }
-
- void setLocation(HostLocation location) {
- this.location = location;
- }
-
- @Override
- public HostLocation location() {
- return location;
- }
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
deleted file mode 100644
index 90ae6fe..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/link/impl/DistributedLinkStore.java
+++ /dev/null
@@ -1,263 +0,0 @@
-package org.onlab.onos.store.link.impl;
-
-import static com.google.common.cache.CacheBuilder.newBuilder;
-import static org.onlab.onos.net.Link.Type.DIRECT;
-import static org.onlab.onos.net.Link.Type.INDIRECT;
-import static org.onlab.onos.net.LinkKey.linkKey;
-import static org.onlab.onos.net.link.LinkEvent.Type.LINK_ADDED;
-import static org.onlab.onos.net.link.LinkEvent.Type.LINK_REMOVED;
-import static org.onlab.onos.net.link.LinkEvent.Type.LINK_UPDATED;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DefaultLink;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Link;
-import org.onlab.onos.net.LinkKey;
-import org.onlab.onos.net.link.LinkDescription;
-import org.onlab.onos.net.link.LinkEvent;
-import org.onlab.onos.net.link.LinkStore;
-import org.onlab.onos.net.link.LinkStoreDelegate;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.AbsentInvalidatingLoadingCache;
-import org.onlab.onos.store.common.AbstractHazelcastStore;
-import org.onlab.onos.store.common.OptionalCacheLoader;
-import org.slf4j.Logger;
-
-import com.google.common.base.Optional;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.ImmutableSet.Builder;
-import com.hazelcast.core.IMap;
-
-//TODO: Add support for multiple provider and annotations
-/**
- * Manages inventory of infrastructure links using Hazelcast-backed map.
- */
-@Component(immediate = true)
-@Service
-public class DistributedLinkStore
- extends AbstractHazelcastStore<LinkEvent, LinkStoreDelegate>
- implements LinkStore {
-
- private final Logger log = getLogger(getClass());
-
- // Link inventory
- private IMap<byte[], byte[]> rawLinks;
- private LoadingCache<LinkKey, Optional<DefaultLink>> links;
-
- // TODO synchronize?
- // Egress and ingress link sets
- private final Multimap<DeviceId, Link> srcLinks = HashMultimap.create();
- private final Multimap<DeviceId, Link> dstLinks = HashMultimap.create();
-
- private String linksListener;
-
- @Override
- @Activate
- public void activate() {
- super.activate();
-
- boolean includeValue = true;
-
- // TODO decide on Map name scheme to avoid collision
- rawLinks = theInstance.getMap("links");
- final OptionalCacheLoader<LinkKey, DefaultLink> linkLoader
- = new OptionalCacheLoader<>(serializer, rawLinks);
- links = new AbsentInvalidatingLoadingCache<>(newBuilder().build(linkLoader));
- // refresh/populate cache based on notification from other instance
- linksListener = rawLinks.addEntryListener(new RemoteLinkEventHandler(links), includeValue);
-
- loadLinkCache();
-
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- rawLinks.removeEntryListener(linksListener);
- log.info("Stopped");
- }
-
- private void loadLinkCache() {
- for (byte[] keyBytes : rawLinks.keySet()) {
- final LinkKey id = deserialize(keyBytes);
- links.refresh(id);
- }
- }
-
- @Override
- public int getLinkCount() {
- return links.asMap().size();
- }
-
- @Override
- public Iterable<Link> getLinks() {
- Builder<Link> builder = ImmutableSet.builder();
- for (Optional<DefaultLink> e : links.asMap().values()) {
- if (e.isPresent()) {
- builder.add(e.get());
- }
- }
- return builder.build();
- }
-
- @Override
- public Set<Link> getDeviceEgressLinks(DeviceId deviceId) {
- return ImmutableSet.copyOf(srcLinks.get(deviceId));
- }
-
- @Override
- public Set<Link> getDeviceIngressLinks(DeviceId deviceId) {
- return ImmutableSet.copyOf(dstLinks.get(deviceId));
- }
-
- @Override
- public Link getLink(ConnectPoint src, ConnectPoint dst) {
- return links.getUnchecked(linkKey(src, dst)).orNull();
- }
-
- @Override
- public Set<Link> getEgressLinks(ConnectPoint src) {
- Set<Link> egress = new HashSet<>();
- for (Link link : srcLinks.get(src.deviceId())) {
- if (link.src().equals(src)) {
- egress.add(link);
- }
- }
- return egress;
- }
-
- @Override
- public Set<Link> getIngressLinks(ConnectPoint dst) {
- Set<Link> ingress = new HashSet<>();
- for (Link link : dstLinks.get(dst.deviceId())) {
- if (link.dst().equals(dst)) {
- ingress.add(link);
- }
- }
- return ingress;
- }
-
- @Override
- public LinkEvent createOrUpdateLink(ProviderId providerId,
- LinkDescription linkDescription) {
- LinkKey key = linkKey(linkDescription.src(), linkDescription.dst());
- Optional<DefaultLink> link = links.getUnchecked(key);
- if (!link.isPresent()) {
- return createLink(providerId, key, linkDescription);
- }
- return updateLink(providerId, link.get(), key, linkDescription);
- }
-
- // Creates and stores the link and returns the appropriate event.
- private LinkEvent createLink(ProviderId providerId, LinkKey key,
- LinkDescription linkDescription) {
- DefaultLink link = new DefaultLink(providerId, key.src(), key.dst(),
- linkDescription.type());
- synchronized (this) {
- final byte[] keyBytes = serialize(key);
- rawLinks.put(keyBytes, serialize(link));
- links.asMap().putIfAbsent(key, Optional.of(link));
-
- addNewLink(link);
- }
- return new LinkEvent(LINK_ADDED, link);
- }
-
- // update Egress and ingress link sets
- private void addNewLink(DefaultLink link) {
- synchronized (this) {
- srcLinks.put(link.src().deviceId(), link);
- dstLinks.put(link.dst().deviceId(), link);
- }
- }
-
- // Updates, if necessary the specified link and returns the appropriate event.
- private LinkEvent updateLink(ProviderId providerId, DefaultLink link,
- LinkKey key, LinkDescription linkDescription) {
- // FIXME confirm Link update condition is OK
- if (link.type() == INDIRECT && linkDescription.type() == DIRECT) {
- synchronized (this) {
-
- DefaultLink updated =
- new DefaultLink(providerId, link.src(), link.dst(),
- linkDescription.type());
- final byte[] keyBytes = serialize(key);
- rawLinks.put(keyBytes, serialize(updated));
- links.asMap().replace(key, Optional.of(link), Optional.of(updated));
-
- replaceLink(link, updated);
- return new LinkEvent(LINK_UPDATED, updated);
- }
- }
- return null;
- }
-
- // update Egress and ingress link sets
- private void replaceLink(DefaultLink link, DefaultLink updated) {
- synchronized (this) {
- srcLinks.remove(link.src().deviceId(), link);
- dstLinks.remove(link.dst().deviceId(), link);
-
- srcLinks.put(link.src().deviceId(), updated);
- dstLinks.put(link.dst().deviceId(), updated);
- }
- }
-
- @Override
- public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) {
- synchronized (this) {
- LinkKey key = linkKey(src, dst);
- byte[] keyBytes = serialize(key);
- Link link = deserialize(rawLinks.remove(keyBytes));
- links.invalidate(key);
- if (link != null) {
- removeLink(link);
- return new LinkEvent(LINK_REMOVED, link);
- }
- return null;
- }
- }
-
- // update Egress and ingress link sets
- private void removeLink(Link link) {
- synchronized (this) {
- srcLinks.remove(link.src().deviceId(), link);
- dstLinks.remove(link.dst().deviceId(), link);
- }
- }
-
- private class RemoteLinkEventHandler extends RemoteCacheEventHandler<LinkKey, DefaultLink> {
- public RemoteLinkEventHandler(LoadingCache<LinkKey, Optional<DefaultLink>> cache) {
- super(cache);
- }
-
- @Override
- protected void onAdd(LinkKey key, DefaultLink newVal) {
- addNewLink(newVal);
- notifyDelegate(new LinkEvent(LINK_ADDED, newVal));
- }
-
- @Override
- protected void onUpdate(LinkKey key, DefaultLink oldVal, DefaultLink newVal) {
- replaceLink(oldVal, newVal);
- notifyDelegate(new LinkEvent(LINK_UPDATED, newVal));
- }
-
- @Override
- protected void onRemove(LinkKey key, DefaultLink val) {
- removeLink(val);
- notifyDelegate(new LinkEvent(LINK_REMOVED, val));
- }
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java
deleted file mode 100644
index 5574d27..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopology.java
+++ /dev/null
@@ -1,444 +0,0 @@
-package org.onlab.onos.store.topology.impl;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSetMultimap;
-import org.onlab.graph.DijkstraGraphSearch;
-import org.onlab.graph.GraphPathSearch;
-import org.onlab.graph.TarjanGraphSearch;
-import org.onlab.onos.net.AbstractModel;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DefaultPath;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Link;
-import org.onlab.onos.net.Path;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.net.topology.ClusterId;
-import org.onlab.onos.net.topology.DefaultTopologyCluster;
-import org.onlab.onos.net.topology.DefaultTopologyVertex;
-import org.onlab.onos.net.topology.GraphDescription;
-import org.onlab.onos.net.topology.LinkWeight;
-import org.onlab.onos.net.topology.Topology;
-import org.onlab.onos.net.topology.TopologyCluster;
-import org.onlab.onos.net.topology.TopologyEdge;
-import org.onlab.onos.net.topology.TopologyGraph;
-import org.onlab.onos.net.topology.TopologyVertex;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static com.google.common.base.MoreObjects.toStringHelper;
-import static com.google.common.collect.ImmutableSetMultimap.Builder;
-import static org.onlab.graph.GraphPathSearch.Result;
-import static org.onlab.graph.TarjanGraphSearch.SCCResult;
-import static org.onlab.onos.net.Link.Type.INDIRECT;
-
-/**
- * Default implementation of the topology descriptor. This carries the
- * backing topology data.
- */
-public class DefaultTopology extends AbstractModel implements Topology {
-
- private static final DijkstraGraphSearch<TopologyVertex, TopologyEdge> DIJKSTRA =
- new DijkstraGraphSearch<>();
- private static final TarjanGraphSearch<TopologyVertex, TopologyEdge> TARJAN =
- new TarjanGraphSearch<>();
-
- private static final ProviderId PID = new ProviderId("core", "org.onlab.onos.net");
-
- private final long time;
- private final TopologyGraph graph;
-
- private final SCCResult<TopologyVertex, TopologyEdge> clusterResults;
- private final ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> results;
- private final ImmutableSetMultimap<PathKey, Path> paths;
-
- private final ImmutableMap<ClusterId, TopologyCluster> clusters;
- private final ImmutableSet<ConnectPoint> infrastructurePoints;
- private final ImmutableSetMultimap<ClusterId, ConnectPoint> broadcastSets;
-
- private ImmutableMap<DeviceId, TopologyCluster> clustersByDevice;
- private ImmutableSetMultimap<TopologyCluster, DeviceId> devicesByCluster;
- private ImmutableSetMultimap<TopologyCluster, Link> linksByCluster;
-
-
- /**
- * Creates a topology descriptor attributed to the specified provider.
- *
- * @param providerId identity of the provider
- * @param description data describing the new topology
- */
- DefaultTopology(ProviderId providerId, GraphDescription description) {
- super(providerId);
- this.time = description.timestamp();
-
- // Build the graph
- this.graph = new DefaultTopologyGraph(description.vertexes(),
- description.edges());
-
- this.results = searchForShortestPaths();
- this.paths = buildPaths();
-
- this.clusterResults = searchForClusters();
- this.clusters = buildTopologyClusters();
-
- buildIndexes();
-
- this.broadcastSets = buildBroadcastSets();
- this.infrastructurePoints = findInfrastructurePoints();
- }
-
- @Override
- public long time() {
- return time;
- }
-
- @Override
- public int clusterCount() {
- return clusters.size();
- }
-
- @Override
- public int deviceCount() {
- return graph.getVertexes().size();
- }
-
- @Override
- public int linkCount() {
- return graph.getEdges().size();
- }
-
- @Override
- public int pathCount() {
- return paths.size();
- }
-
- /**
- * Returns the backing topology graph.
- *
- * @return topology graph
- */
- TopologyGraph getGraph() {
- return graph;
- }
-
- /**
- * Returns the set of topology clusters.
- *
- * @return set of clusters
- */
- Set<TopologyCluster> getClusters() {
- return ImmutableSet.copyOf(clusters.values());
- }
-
- /**
- * Returns the specified topology cluster.
- *
- * @param clusterId cluster identifier
- * @return topology cluster
- */
- TopologyCluster getCluster(ClusterId clusterId) {
- return clusters.get(clusterId);
- }
-
- /**
- * Returns the topology cluster that contains the given device.
- *
- * @param deviceId device identifier
- * @return topology cluster
- */
- TopologyCluster getCluster(DeviceId deviceId) {
- return clustersByDevice.get(deviceId);
- }
-
- /**
- * Returns the set of cluster devices.
- *
- * @param cluster topology cluster
- * @return cluster devices
- */
- Set<DeviceId> getClusterDevices(TopologyCluster cluster) {
- return devicesByCluster.get(cluster);
- }
-
- /**
- * Returns the set of cluster links.
- *
- * @param cluster topology cluster
- * @return cluster links
- */
- Set<Link> getClusterLinks(TopologyCluster cluster) {
- return linksByCluster.get(cluster);
- }
-
- /**
- * Indicates whether the given point is an infrastructure link end-point.
- *
- * @param connectPoint connection point
- * @return true if infrastructure
- */
- boolean isInfrastructure(ConnectPoint connectPoint) {
- return infrastructurePoints.contains(connectPoint);
- }
-
- /**
- * Indicates whether the given point is part of a broadcast set.
- *
- * @param connectPoint connection point
- * @return true if in broadcast set
- */
- boolean isBroadcastPoint(ConnectPoint connectPoint) {
- // Any non-infrastructure, i.e. edge points are assumed to be OK.
- if (!isInfrastructure(connectPoint)) {
- return true;
- }
-
- // Find the cluster to which the device belongs.
- TopologyCluster cluster = clustersByDevice.get(connectPoint.deviceId());
- if (cluster == null) {
- throw new IllegalArgumentException("No cluster found for device " + connectPoint.deviceId());
- }
-
- // If the broadcast set is null or empty, or if the point explicitly
- // belongs to it, return true;
- Set<ConnectPoint> points = broadcastSets.get(cluster.id());
- return points == null || points.isEmpty() || points.contains(connectPoint);
- }
-
- /**
- * Returns the size of the cluster broadcast set.
- *
- * @param clusterId cluster identifier
- * @return size of the cluster broadcast set
- */
- int broadcastSetSize(ClusterId clusterId) {
- return broadcastSets.get(clusterId).size();
- }
-
- /**
- * Returns the set of pre-computed shortest paths between source and
- * destination devices.
- *
- * @param src source device
- * @param dst destination device
- * @return set of shortest paths
- */
- Set<Path> getPaths(DeviceId src, DeviceId dst) {
- return paths.get(new PathKey(src, dst));
- }
-
- /**
- * Computes on-demand the set of shortest paths between source and
- * destination devices.
- *
- * @param src source device
- * @param dst destination device
- * @return set of shortest paths
- */
- Set<Path> getPaths(DeviceId src, DeviceId dst, LinkWeight weight) {
- GraphPathSearch.Result<TopologyVertex, TopologyEdge> result =
- DIJKSTRA.search(graph, new DefaultTopologyVertex(src),
- new DefaultTopologyVertex(dst), weight);
- ImmutableSet.Builder<Path> builder = ImmutableSet.builder();
- for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
- builder.add(networkPath(path));
- }
- return builder.build();
- }
-
-
- // Searches the graph for all shortest paths and returns the search results.
- private ImmutableMap<DeviceId, Result<TopologyVertex, TopologyEdge>> searchForShortestPaths() {
- ImmutableMap.Builder<DeviceId, Result<TopologyVertex, TopologyEdge>> builder = ImmutableMap.builder();
-
- // Search graph paths for each source to all destinations.
- LinkWeight weight = new HopCountLinkWeight(graph.getVertexes().size());
- for (TopologyVertex src : graph.getVertexes()) {
- builder.put(src.deviceId(), DIJKSTRA.search(graph, src, null, weight));
- }
- return builder.build();
- }
-
- // Builds network paths from the graph path search results
- private ImmutableSetMultimap<PathKey, Path> buildPaths() {
- Builder<PathKey, Path> builder = ImmutableSetMultimap.builder();
- for (DeviceId deviceId : results.keySet()) {
- Result<TopologyVertex, TopologyEdge> result = results.get(deviceId);
- for (org.onlab.graph.Path<TopologyVertex, TopologyEdge> path : result.paths()) {
- builder.put(new PathKey(path.src().deviceId(), path.dst().deviceId()),
- networkPath(path));
- }
- }
- return builder.build();
- }
-
- // Converts graph path to a network path with the same cost.
- private Path networkPath(org.onlab.graph.Path<TopologyVertex, TopologyEdge> path) {
- List<Link> links = new ArrayList<>();
- for (TopologyEdge edge : path.edges()) {
- links.add(edge.link());
- }
- return new DefaultPath(PID, links, path.cost());
- }
-
-
- // Searches for SCC clusters in the network topology graph using Tarjan
- // algorithm.
- private SCCResult<TopologyVertex, TopologyEdge> searchForClusters() {
- return TARJAN.search(graph, new NoIndirectLinksWeight());
- }
-
- // Builds the topology clusters and returns the id-cluster bindings.
- private ImmutableMap<ClusterId, TopologyCluster> buildTopologyClusters() {
- ImmutableMap.Builder<ClusterId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
- SCCResult<TopologyVertex, TopologyEdge> result =
- TARJAN.search(graph, new NoIndirectLinksWeight());
-
- // Extract both vertexes and edges from the results; the lists form
- // pairs along the same index.
- List<Set<TopologyVertex>> clusterVertexes = result.clusterVertexes();
- List<Set<TopologyEdge>> clusterEdges = result.clusterEdges();
-
- // Scan over the lists and create a cluster from the results.
- for (int i = 0, n = result.clusterCount(); i < n; i++) {
- Set<TopologyVertex> vertexSet = clusterVertexes.get(i);
- Set<TopologyEdge> edgeSet = clusterEdges.get(i);
-
- ClusterId cid = ClusterId.clusterId(i);
- DefaultTopologyCluster cluster =
- new DefaultTopologyCluster(cid, vertexSet.size(), edgeSet.size(),
- findRoot(vertexSet).deviceId());
- clusterBuilder.put(cid, cluster);
- }
- return clusterBuilder.build();
- }
-
- // Finds the vertex whose device id is the lexicographical minimum in the
- // specified set.
- private TopologyVertex findRoot(Set<TopologyVertex> vertexSet) {
- TopologyVertex minVertex = null;
- for (TopologyVertex vertex : vertexSet) {
- if (minVertex == null ||
- minVertex.deviceId().toString()
- .compareTo(minVertex.deviceId().toString()) < 0) {
- minVertex = vertex;
- }
- }
- return minVertex;
- }
-
- // Processes a map of broadcast sets for each cluster.
- private ImmutableSetMultimap<ClusterId, ConnectPoint> buildBroadcastSets() {
- Builder<ClusterId, ConnectPoint> builder = ImmutableSetMultimap.builder();
- for (TopologyCluster cluster : clusters.values()) {
- addClusterBroadcastSet(cluster, builder);
- }
- return builder.build();
- }
-
- // Finds all broadcast points for the cluster. These are those connection
- // points which lie along the shortest paths between the cluster root and
- // all other devices within the cluster.
- private void addClusterBroadcastSet(TopologyCluster cluster,
- Builder<ClusterId, ConnectPoint> builder) {
- // Use the graph root search results to build the broadcast set.
- Result<TopologyVertex, TopologyEdge> result = results.get(cluster.root());
- for (Map.Entry<TopologyVertex, Set<TopologyEdge>> entry : result.parents().entrySet()) {
- TopologyVertex vertex = entry.getKey();
-
- // Ignore any parents that lead outside the cluster.
- if (clustersByDevice.get(vertex.deviceId()) != cluster) {
- continue;
- }
-
- // Ignore any back-link sets that are empty.
- Set<TopologyEdge> parents = entry.getValue();
- if (parents.isEmpty()) {
- continue;
- }
-
- // Use the first back-link source and destinations to add to the
- // broadcast set.
- Link link = parents.iterator().next().link();
- builder.put(cluster.id(), link.src());
- builder.put(cluster.id(), link.dst());
- }
- }
-
- // Collects and returns an set of all infrastructure link end-points.
- private ImmutableSet<ConnectPoint> findInfrastructurePoints() {
- ImmutableSet.Builder<ConnectPoint> builder = ImmutableSet.builder();
- for (TopologyEdge edge : graph.getEdges()) {
- builder.add(edge.link().src());
- builder.add(edge.link().dst());
- }
- return builder.build();
- }
-
- // Builds cluster-devices, cluster-links and device-cluster indexes.
- private void buildIndexes() {
- // Prepare the index builders
- ImmutableMap.Builder<DeviceId, TopologyCluster> clusterBuilder = ImmutableMap.builder();
- ImmutableSetMultimap.Builder<TopologyCluster, DeviceId> devicesBuilder = ImmutableSetMultimap.builder();
- ImmutableSetMultimap.Builder<TopologyCluster, Link> linksBuilder = ImmutableSetMultimap.builder();
-
- // Now scan through all the clusters
- for (TopologyCluster cluster : clusters.values()) {
- int i = cluster.id().index();
-
- // Scan through all the cluster vertexes.
- for (TopologyVertex vertex : clusterResults.clusterVertexes().get(i)) {
- devicesBuilder.put(cluster, vertex.deviceId());
- clusterBuilder.put(vertex.deviceId(), cluster);
- }
-
- // Scan through all the cluster edges.
- for (TopologyEdge edge : clusterResults.clusterEdges().get(i)) {
- linksBuilder.put(cluster, edge.link());
- }
- }
-
- // Finalize all indexes.
- clustersByDevice = clusterBuilder.build();
- devicesByCluster = devicesBuilder.build();
- linksByCluster = linksBuilder.build();
- }
-
- // Link weight for measuring link cost as hop count with indirect links
- // being as expensive as traversing the entire graph to assume the worst.
- private static class HopCountLinkWeight implements LinkWeight {
- private final int indirectLinkCost;
-
- HopCountLinkWeight(int indirectLinkCost) {
- this.indirectLinkCost = indirectLinkCost;
- }
-
- @Override
- public double weight(TopologyEdge edge) {
- // To force preference to use direct paths first, make indirect
- // links as expensive as the linear vertex traversal.
- return edge.link().type() == INDIRECT ? indirectLinkCost : 1;
- }
- }
-
- // Link weight for preventing traversal over indirect links.
- private static class NoIndirectLinksWeight implements LinkWeight {
- @Override
- public double weight(TopologyEdge edge) {
- return edge.link().type() == INDIRECT ? -1 : 1;
- }
- }
-
- @Override
- public String toString() {
- return toStringHelper(this)
- .add("time", time)
- .add("clusters", clusterCount())
- .add("devices", deviceCount())
- .add("links", linkCount())
- .add("pathCount", pathCount())
- .toString();
- }
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java
deleted file mode 100644
index 945ba05..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DefaultTopologyGraph.java
+++ /dev/null
@@ -1,28 +0,0 @@
-package org.onlab.onos.store.topology.impl;
-
-import org.onlab.graph.AdjacencyListsGraph;
-import org.onlab.onos.net.topology.TopologyEdge;
-import org.onlab.onos.net.topology.TopologyGraph;
-import org.onlab.onos.net.topology.TopologyVertex;
-
-import java.util.Set;
-
-/**
- * Default implementation of an immutable topology graph based on a generic
- * implementation of adjacency lists graph.
- */
-public class DefaultTopologyGraph
- extends AdjacencyListsGraph<TopologyVertex, TopologyEdge>
- implements TopologyGraph {
-
- /**
- * Creates a topology graph comprising of the specified vertexes and edges.
- *
- * @param vertexes set of graph vertexes
- * @param edges set of graph edges
- */
- public DefaultTopologyGraph(Set<TopologyVertex> vertexes, Set<TopologyEdge> edges) {
- super(vertexes, edges);
- }
-
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java
deleted file mode 100644
index 04f5fce..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/DistributedTopologyStore.java
+++ /dev/null
@@ -1,142 +0,0 @@
-package org.onlab.onos.store.topology.impl;
-
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
-import org.onlab.onos.event.Event;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Link;
-import org.onlab.onos.net.Path;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.net.topology.ClusterId;
-import org.onlab.onos.net.topology.GraphDescription;
-import org.onlab.onos.net.topology.LinkWeight;
-import org.onlab.onos.net.topology.Topology;
-import org.onlab.onos.net.topology.TopologyCluster;
-import org.onlab.onos.net.topology.TopologyEvent;
-import org.onlab.onos.net.topology.TopologyGraph;
-import org.onlab.onos.net.topology.TopologyStore;
-import org.onlab.onos.net.topology.TopologyStoreDelegate;
-import org.onlab.onos.store.AbstractStore;
-import org.slf4j.Logger;
-
-/**
- * TEMPORARY: Manages inventory of topology snapshots using distributed
- * structures implementation.
- */
-//FIXME: I LIE I AM NOT DISTRIBUTED
-@Component(immediate = true)
-@Service
-public class DistributedTopologyStore
-extends AbstractStore<TopologyEvent, TopologyStoreDelegate>
-implements TopologyStore {
-
- private final Logger log = getLogger(getClass());
-
- private volatile DefaultTopology current;
-
- @Activate
- public void activate() {
- log.info("Started");
- }
-
- @Deactivate
- public void deactivate() {
- log.info("Stopped");
- }
- @Override
- public Topology currentTopology() {
- return current;
- }
-
- @Override
- public boolean isLatest(Topology topology) {
- // Topology is current only if it is the same as our current topology
- return topology == current;
- }
-
- @Override
- public TopologyGraph getGraph(Topology topology) {
- return defaultTopology(topology).getGraph();
- }
-
- @Override
- public Set<TopologyCluster> getClusters(Topology topology) {
- return defaultTopology(topology).getClusters();
- }
-
- @Override
- public TopologyCluster getCluster(Topology topology, ClusterId clusterId) {
- return defaultTopology(topology).getCluster(clusterId);
- }
-
- @Override
- public Set<DeviceId> getClusterDevices(Topology topology, TopologyCluster cluster) {
- return defaultTopology(topology).getClusterDevices(cluster);
- }
-
- @Override
- public Set<Link> getClusterLinks(Topology topology, TopologyCluster cluster) {
- return defaultTopology(topology).getClusterLinks(cluster);
- }
-
- @Override
- public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst) {
- return defaultTopology(topology).getPaths(src, dst);
- }
-
- @Override
- public Set<Path> getPaths(Topology topology, DeviceId src, DeviceId dst,
- LinkWeight weight) {
- return defaultTopology(topology).getPaths(src, dst, weight);
- }
-
- @Override
- public boolean isInfrastructure(Topology topology, ConnectPoint connectPoint) {
- return defaultTopology(topology).isInfrastructure(connectPoint);
- }
-
- @Override
- public boolean isBroadcastPoint(Topology topology, ConnectPoint connectPoint) {
- return defaultTopology(topology).isBroadcastPoint(connectPoint);
- }
-
- @Override
- public TopologyEvent updateTopology(ProviderId providerId,
- GraphDescription graphDescription,
- List<Event> reasons) {
- // First off, make sure that what we're given is indeed newer than
- // what we already have.
- if (current != null && graphDescription.timestamp() < current.time()) {
- return null;
- }
-
- // Have the default topology construct self from the description data.
- DefaultTopology newTopology =
- new DefaultTopology(providerId, graphDescription);
-
- // Promote the new topology to current and return a ready-to-send event.
- synchronized (this) {
- current = newTopology;
- return new TopologyEvent(TopologyEvent.Type.TOPOLOGY_CHANGED,
- current, reasons);
- }
- }
-
- // Validates the specified topology and returns it as a default
- private DefaultTopology defaultTopology(Topology topology) {
- if (topology instanceof DefaultTopology) {
- return (DefaultTopology) topology;
- }
- throw new IllegalArgumentException("Topology class " + topology.getClass() +
- " not supported");
- }
-
-}
diff --git a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java b/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java
deleted file mode 100644
index 60736b9..0000000
--- a/core/store/hz/net/src/main/java/org/onlab/onos/store/topology/impl/PathKey.java
+++ /dev/null
@@ -1,40 +0,0 @@
-package org.onlab.onos.store.topology.impl;
-
-import org.onlab.onos.net.DeviceId;
-
-import java.util.Objects;
-
-/**
- * Key for filing pre-computed paths between source and destination devices.
- */
-class PathKey {
- private final DeviceId src;
- private final DeviceId dst;
-
- /**
- * Creates a path key from the given source/dest pair.
- * @param src source device
- * @param dst destination device
- */
- PathKey(DeviceId src, DeviceId dst) {
- this.src = src;
- this.dst = dst;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(src, dst);
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj instanceof PathKey) {
- final PathKey other = (PathKey) obj;
- return Objects.equals(this.src, other.src) && Objects.equals(this.dst, other.dst);
- }
- return false;
- }
-}
diff --git a/core/store/hz/net/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java b/core/store/hz/net/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
deleted file mode 100644
index 7e2924b..0000000
--- a/core/store/hz/net/src/test/java/org/onlab/onos/store/device/impl/DistributedDeviceStoreTest.java
+++ /dev/null
@@ -1,390 +0,0 @@
-/**
- *
- */
-package org.onlab.onos.store.device.impl;
-
-import static org.junit.Assert.*;
-import static org.onlab.onos.net.Device.Type.SWITCH;
-import static org.onlab.onos.net.DeviceId.deviceId;
-import static org.onlab.onos.net.device.DeviceEvent.Type.*;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.onlab.onos.net.Device;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Port;
-import org.onlab.onos.net.PortNumber;
-import org.onlab.onos.net.device.DefaultDeviceDescription;
-import org.onlab.onos.net.device.DefaultPortDescription;
-import org.onlab.onos.net.device.DeviceDescription;
-import org.onlab.onos.net.device.DeviceEvent;
-import org.onlab.onos.net.device.DeviceStoreDelegate;
-import org.onlab.onos.net.device.PortDescription;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.StoreManager;
-import org.onlab.onos.store.common.StoreService;
-import org.onlab.onos.store.common.TestStoreManager;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-import com.hazelcast.config.Config;
-import com.hazelcast.core.Hazelcast;
-
-/**
- * Test of the Hazelcast based distributed DeviceStore implementation.
- */
-public class DistributedDeviceStoreTest {
-
- private static final ProviderId PID = new ProviderId("of", "foo");
- private static final DeviceId DID1 = deviceId("of:foo");
- private static final DeviceId DID2 = deviceId("of:bar");
- private static final String MFR = "whitebox";
- private static final String HW = "1.1.x";
- private static final String SW1 = "3.8.1";
- private static final String SW2 = "3.9.5";
- private static final String SN = "43311-12345";
-
- private static final PortNumber P1 = PortNumber.portNumber(1);
- private static final PortNumber P2 = PortNumber.portNumber(2);
- private static final PortNumber P3 = PortNumber.portNumber(3);
-
- private DistributedDeviceStore deviceStore;
-
- private StoreManager storeManager;
-
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- }
-
-
- @Before
- public void setUp() throws Exception {
- // TODO should find a way to clean Hazelcast instance without shutdown.
- Config config = TestStoreManager.getTestConfig();
-
- storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
- storeManager.activate();
-
- deviceStore = new TestDistributedDeviceStore(storeManager);
- deviceStore.activate();
- }
-
- @After
- public void tearDown() throws Exception {
- deviceStore.deactivate();
-
- storeManager.deactivate();
- }
-
- private void putDevice(DeviceId deviceId, String swVersion) {
- DeviceDescription description =
- new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR,
- HW, swVersion, SN);
- deviceStore.createOrUpdateDevice(PID, deviceId, description);
- }
-
- private static void assertDevice(DeviceId id, String swVersion, Device device) {
- assertNotNull(device);
- assertEquals(id, device.id());
- assertEquals(MFR, device.manufacturer());
- assertEquals(HW, device.hwVersion());
- assertEquals(swVersion, device.swVersion());
- assertEquals(SN, device.serialNumber());
- }
-
- @Test
- public final void testGetDeviceCount() {
- assertEquals("initialy empty", 0, deviceStore.getDeviceCount());
-
- putDevice(DID1, SW1);
- putDevice(DID2, SW2);
- putDevice(DID1, SW1);
-
- assertEquals("expect 2 uniq devices", 2, deviceStore.getDeviceCount());
- }
-
- @Test
- public final void testGetDevices() {
- assertEquals("initialy empty", 0, Iterables.size(deviceStore.getDevices()));
-
- putDevice(DID1, SW1);
- putDevice(DID2, SW2);
- putDevice(DID1, SW1);
-
- assertEquals("expect 2 uniq devices",
- 2, Iterables.size(deviceStore.getDevices()));
-
- Map<DeviceId, Device> devices = new HashMap<>();
- for (Device device : deviceStore.getDevices()) {
- devices.put(device.id(), device);
- }
-
- assertDevice(DID1, SW1, devices.get(DID1));
- assertDevice(DID2, SW2, devices.get(DID2));
-
- // add case for new node?
- }
-
- @Test
- public final void testGetDevice() {
-
- putDevice(DID1, SW1);
-
- assertDevice(DID1, SW1, deviceStore.getDevice(DID1));
- assertNull("DID2 shouldn't be there", deviceStore.getDevice(DID2));
- }
-
- @Test
- public final void testCreateOrUpdateDevice() {
- DeviceDescription description =
- new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN);
- DeviceEvent event = deviceStore.createOrUpdateDevice(PID, DID1, description);
- assertEquals(DEVICE_ADDED, event.type());
- assertDevice(DID1, SW1, event.subject());
-
- DeviceDescription description2 =
- new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN);
- DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2);
- assertEquals(DEVICE_UPDATED, event2.type());
- assertDevice(DID1, SW2, event2.subject());
-
- assertNull("No change expected", deviceStore.createOrUpdateDevice(PID, DID1, description2));
- }
-
- @Test
- public final void testMarkOffline() {
-
- putDevice(DID1, SW1);
- assertTrue(deviceStore.isAvailable(DID1));
-
- DeviceEvent event = deviceStore.markOffline(DID1);
- assertEquals(DEVICE_AVAILABILITY_CHANGED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertFalse(deviceStore.isAvailable(DID1));
-
- DeviceEvent event2 = deviceStore.markOffline(DID1);
- assertNull("No change, no event", event2);
-}
-
- @Test
- public final void testUpdatePorts() {
- putDevice(DID1, SW1);
- List<PortDescription> pds = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, true),
- new DefaultPortDescription(P2, true)
- );
-
- List<DeviceEvent> events = deviceStore.updatePorts(PID, DID1, pds);
-
- Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2);
- for (DeviceEvent event : events) {
- assertEquals(PORT_ADDED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertTrue("PortNumber is one of expected",
- expectedPorts.remove(event.port().number()));
- assertTrue("Port is enabled", event.port().isEnabled());
- }
- assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty());
-
-
- List<PortDescription> pds2 = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, false),
- new DefaultPortDescription(P2, true),
- new DefaultPortDescription(P3, true)
- );
-
- events = deviceStore.updatePorts(PID, DID1, pds2);
- assertFalse("event should be triggered", events.isEmpty());
- for (DeviceEvent event : events) {
- PortNumber num = event.port().number();
- if (P1.equals(num)) {
- assertEquals(PORT_UPDATED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertFalse("Port is disabled", event.port().isEnabled());
- } else if (P2.equals(num)) {
- fail("P2 event not expected.");
- } else if (P3.equals(num)) {
- assertEquals(PORT_ADDED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertTrue("Port is enabled", event.port().isEnabled());
- } else {
- fail("Unknown port number encountered: " + num);
- }
- }
-
- List<PortDescription> pds3 = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, false),
- new DefaultPortDescription(P2, true)
- );
- events = deviceStore.updatePorts(PID, DID1, pds3);
- assertFalse("event should be triggered", events.isEmpty());
- for (DeviceEvent event : events) {
- PortNumber num = event.port().number();
- if (P1.equals(num)) {
- fail("P1 event not expected.");
- } else if (P2.equals(num)) {
- fail("P2 event not expected.");
- } else if (P3.equals(num)) {
- assertEquals(PORT_REMOVED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertTrue("Port was enabled", event.port().isEnabled());
- } else {
- fail("Unknown port number encountered: " + num);
- }
- }
-
- }
-
- @Test
- public final void testUpdatePortStatus() {
- putDevice(DID1, SW1);
- List<PortDescription> pds = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, true)
- );
- deviceStore.updatePorts(PID, DID1, pds);
-
- DeviceEvent event = deviceStore.updatePortStatus(PID, DID1,
- new DefaultPortDescription(P1, false));
- assertEquals(PORT_UPDATED, event.type());
- assertDevice(DID1, SW1, event.subject());
- assertEquals(P1, event.port().number());
- assertFalse("Port is disabled", event.port().isEnabled());
- }
-
- @Test
- public final void testGetPorts() {
- putDevice(DID1, SW1);
- putDevice(DID2, SW1);
- List<PortDescription> pds = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, true),
- new DefaultPortDescription(P2, true)
- );
- deviceStore.updatePorts(PID, DID1, pds);
-
- Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2);
- List<Port> ports = deviceStore.getPorts(DID1);
- for (Port port : ports) {
- assertTrue("Port is enabled", port.isEnabled());
- assertTrue("PortNumber is one of expected",
- expectedPorts.remove(port.number()));
- }
- assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty());
-
-
- assertTrue("DID2 has no ports", deviceStore.getPorts(DID2).isEmpty());
- }
-
- @Test
- public final void testGetPort() {
- putDevice(DID1, SW1);
- putDevice(DID2, SW1);
- List<PortDescription> pds = Arrays.<PortDescription>asList(
- new DefaultPortDescription(P1, true),
- new DefaultPortDescription(P2, false)
- );
- deviceStore.updatePorts(PID, DID1, pds);
-
- Port port1 = deviceStore.getPort(DID1, P1);
- assertEquals(P1, port1.number());
- assertTrue("Port is enabled", port1.isEnabled());
-
- Port port2 = deviceStore.getPort(DID1, P2);
- assertEquals(P2, port2.number());
- assertFalse("Port is disabled", port2.isEnabled());
-
- Port port3 = deviceStore.getPort(DID1, P3);
- assertNull("P3 not expected", port3);
- }
-
- @Test
- public final void testRemoveDevice() {
- putDevice(DID1, SW1);
- putDevice(DID2, SW1);
-
- assertEquals(2, deviceStore.getDeviceCount());
-
- DeviceEvent event = deviceStore.removeDevice(DID1);
- assertEquals(DEVICE_REMOVED, event.type());
- assertDevice(DID1, SW1, event.subject());
-
- assertEquals(1, deviceStore.getDeviceCount());
- }
-
- // TODO add test for Port events when we have them
- @Ignore("Ignore until Delegate spec. is clear.")
- @Test
- public final void testEvents() throws InterruptedException {
- final CountDownLatch addLatch = new CountDownLatch(1);
- DeviceStoreDelegate checkAdd = new DeviceStoreDelegate() {
- @Override
- public void notify(DeviceEvent event) {
- assertEquals(DEVICE_ADDED, event.type());
- assertDevice(DID1, SW1, event.subject());
- addLatch.countDown();
- }
- };
- final CountDownLatch updateLatch = new CountDownLatch(1);
- DeviceStoreDelegate checkUpdate = new DeviceStoreDelegate() {
- @Override
- public void notify(DeviceEvent event) {
- assertEquals(DEVICE_UPDATED, event.type());
- assertDevice(DID1, SW2, event.subject());
- updateLatch.countDown();
- }
- };
- final CountDownLatch removeLatch = new CountDownLatch(1);
- DeviceStoreDelegate checkRemove = new DeviceStoreDelegate() {
- @Override
- public void notify(DeviceEvent event) {
- assertEquals(DEVICE_REMOVED, event.type());
- assertDevice(DID1, SW2, event.subject());
- removeLatch.countDown();
- }
- };
-
- DeviceDescription description =
- new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN);
- deviceStore.setDelegate(checkAdd);
- deviceStore.createOrUpdateDevice(PID, DID1, description);
- assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
-
-
- DeviceDescription description2 =
- new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN);
- deviceStore.unsetDelegate(checkAdd);
- deviceStore.setDelegate(checkUpdate);
- deviceStore.createOrUpdateDevice(PID, DID1, description2);
- assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS));
-
- deviceStore.unsetDelegate(checkUpdate);
- deviceStore.setDelegate(checkRemove);
- deviceStore.removeDevice(DID1);
- assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
- }
-
- private class TestDistributedDeviceStore extends DistributedDeviceStore {
- public TestDistributedDeviceStore(StoreService storeService) {
- this.storeService = storeService;
- }
- }
-}
diff --git a/core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java b/core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
deleted file mode 100644
index 7415fed..0000000
--- a/core/store/hz/net/src/test/java/org/onlab/onos/store/link/impl/DistributedLinkStoreTest.java
+++ /dev/null
@@ -1,361 +0,0 @@
-package org.onlab.onos.store.link.impl;
-
-import static org.junit.Assert.*;
-import static org.onlab.onos.net.DeviceId.deviceId;
-import static org.onlab.onos.net.Link.Type.*;
-import static org.onlab.onos.net.LinkKey.linkKey;
-import static org.onlab.onos.net.link.LinkEvent.Type.*;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.onlab.onos.net.ConnectPoint;
-import org.onlab.onos.net.DeviceId;
-import org.onlab.onos.net.Link;
-import org.onlab.onos.net.LinkKey;
-import org.onlab.onos.net.PortNumber;
-import org.onlab.onos.net.Link.Type;
-import org.onlab.onos.net.link.DefaultLinkDescription;
-import org.onlab.onos.net.link.LinkEvent;
-import org.onlab.onos.net.link.LinkStoreDelegate;
-import org.onlab.onos.net.provider.ProviderId;
-import org.onlab.onos.store.common.StoreManager;
-import org.onlab.onos.store.common.StoreService;
-import org.onlab.onos.store.common.TestStoreManager;
-import com.google.common.collect.Iterables;
-import com.hazelcast.config.Config;
-import com.hazelcast.core.Hazelcast;
-
-/**
- * Test of the Hazelcast based distributed LinkStore implementation.
- */
-public class DistributedLinkStoreTest {
-
- private static final ProviderId PID = new ProviderId("of", "foo");
- private static final DeviceId DID1 = deviceId("of:foo");
- private static final DeviceId DID2 = deviceId("of:bar");
-
- private static final PortNumber P1 = PortNumber.portNumber(1);
- private static final PortNumber P2 = PortNumber.portNumber(2);
- private static final PortNumber P3 = PortNumber.portNumber(3);
-
- private StoreManager storeManager;
-
- private DistributedLinkStore linkStore;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- }
-
- @Before
- public void setUp() throws Exception {
- // TODO should find a way to clean Hazelcast instance without shutdown.
- Config config = TestStoreManager.getTestConfig();
-
- storeManager = new TestStoreManager(Hazelcast.newHazelcastInstance(config));
- storeManager.activate();
-
- linkStore = new TestDistributedLinkStore(storeManager);
- linkStore.activate();
- }
-
- @After
- public void tearDown() throws Exception {
- linkStore.deactivate();
- storeManager.deactivate();
- }
-
- private void putLink(DeviceId srcId, PortNumber srcNum,
- DeviceId dstId, PortNumber dstNum, Type type) {
- ConnectPoint src = new ConnectPoint(srcId, srcNum);
- ConnectPoint dst = new ConnectPoint(dstId, dstNum);
- linkStore.createOrUpdateLink(PID, new DefaultLinkDescription(src, dst, type));
- }
-
- private void putLink(LinkKey key, Type type) {
- putLink(key.src().deviceId(), key.src().port(),
- key.dst().deviceId(), key.dst().port(),
- type);
- }
-
- private static void assertLink(DeviceId srcId, PortNumber srcNum,
- DeviceId dstId, PortNumber dstNum, Type type,
- Link link) {
- assertEquals(srcId, link.src().deviceId());
- assertEquals(srcNum, link.src().port());
- assertEquals(dstId, link.dst().deviceId());
- assertEquals(dstNum, link.dst().port());
- assertEquals(type, link.type());
- }
-
- private static void assertLink(LinkKey key, Type type, Link link) {
- assertLink(key.src().deviceId(), key.src().port(),
- key.dst().deviceId(), key.dst().port(),
- type, link);
- }
-
- @Test
- public final void testGetLinkCount() {
- assertEquals("initialy empty", 0, linkStore.getLinkCount());
-
- putLink(DID1, P1, DID2, P2, DIRECT);
- putLink(DID2, P2, DID1, P1, DIRECT);
- putLink(DID1, P1, DID2, P2, DIRECT);
-
- assertEquals("expecting 2 unique link", 2, linkStore.getLinkCount());
- }
-
- @Test
- public final void testGetLinks() {
- assertEquals("initialy empty", 0,
- Iterables.size(linkStore.getLinks()));
-
- LinkKey linkId1 = linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
- LinkKey linkId2 = linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId1, DIRECT);
-
- assertEquals("expecting 2 unique link", 2,
- Iterables.size(linkStore.getLinks()));
-
- Map<LinkKey, Link> links = new HashMap<>();
- for (Link link : linkStore.getLinks()) {
- links.put(linkKey(link), link);
- }
-
- assertLink(linkId1, DIRECT, links.get(linkId1));
- assertLink(linkId2, DIRECT, links.get(linkId2));
- }
-
- @Test
- public final void testGetDeviceEgressLinks() {
- LinkKey linkId1 = linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
- LinkKey linkId2 = linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
- LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId3, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- Set<Link> links1 = linkStore.getDeviceEgressLinks(DID1);
- assertEquals(2, links1.size());
- // check
-
- Set<Link> links2 = linkStore.getDeviceEgressLinks(DID2);
- assertEquals(1, links2.size());
- assertLink(linkId2, DIRECT, links2.iterator().next());
- }
-
- @Test
- public final void testGetDeviceIngressLinks() {
- LinkKey linkId1 = linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
- LinkKey linkId2 = linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
- LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId3, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- Set<Link> links1 = linkStore.getDeviceIngressLinks(DID2);
- assertEquals(2, links1.size());
- // check
-
- Set<Link> links2 = linkStore.getDeviceIngressLinks(DID1);
- assertEquals(1, links2.size());
- assertLink(linkId2, DIRECT, links2.iterator().next());
- }
-
- @Test
- public final void testGetLink() {
- ConnectPoint src = new ConnectPoint(DID1, P1);
- ConnectPoint dst = new ConnectPoint(DID2, P2);
- LinkKey linkId1 = linkKey(src, dst);
-
- putLink(linkId1, DIRECT);
-
- Link link = linkStore.getLink(src, dst);
- assertLink(linkId1, DIRECT, link);
-
- assertNull("There shouldn't be reverese link",
- linkStore.getLink(dst, src));
- }
-
- @Test
- public final void testGetEgressLinks() {
- final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
- final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
- LinkKey linkId1 = linkKey(d1P1, d2P2);
- LinkKey linkId2 = linkKey(d2P2, d1P1);
- LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId3, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- Set<Link> links1 = linkStore.getEgressLinks(d1P1);
- assertEquals(1, links1.size());
- assertLink(linkId1, DIRECT, links1.iterator().next());
-
- Set<Link> links2 = linkStore.getEgressLinks(d2P2);
- assertEquals(1, links2.size());
- assertLink(linkId2, DIRECT, links2.iterator().next());
- }
-
- @Test
- public final void testGetIngressLinks() {
- final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
- final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
- LinkKey linkId1 = linkKey(d1P1, d2P2);
- LinkKey linkId2 = linkKey(d2P2, d1P1);
- LinkKey linkId3 = linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
- putLink(linkId3, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- Set<Link> links1 = linkStore.getIngressLinks(d2P2);
- assertEquals(1, links1.size());
- assertLink(linkId1, DIRECT, links1.iterator().next());
-
- Set<Link> links2 = linkStore.getIngressLinks(d1P1);
- assertEquals(1, links2.size());
- assertLink(linkId2, DIRECT, links2.iterator().next());
- }
-
- @Test
- public final void testCreateOrUpdateLink() {
- ConnectPoint src = new ConnectPoint(DID1, P1);
- ConnectPoint dst = new ConnectPoint(DID2, P2);
-
- // add link
- LinkEvent event = linkStore.createOrUpdateLink(PID,
- new DefaultLinkDescription(src, dst, INDIRECT));
-
- assertLink(DID1, P1, DID2, P2, INDIRECT, event.subject());
- assertEquals(LINK_ADDED, event.type());
-
- // update link type
- LinkEvent event2 = linkStore.createOrUpdateLink(PID,
- new DefaultLinkDescription(src, dst, DIRECT));
-
- assertLink(DID1, P1, DID2, P2, DIRECT, event2.subject());
- assertEquals(LINK_UPDATED, event2.type());
-
- // no change
- LinkEvent event3 = linkStore.createOrUpdateLink(PID,
- new DefaultLinkDescription(src, dst, DIRECT));
-
- assertNull("No change event expected", event3);
- }
-
- @Test
- public final void testRemoveLink() {
- final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
- final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
- LinkKey linkId1 = linkKey(d1P1, d2P2);
- LinkKey linkId2 = linkKey(d2P2, d1P1);
-
- putLink(linkId1, DIRECT);
- putLink(linkId2, DIRECT);
-
- // DID1,P1 => DID2,P2
- // DID2,P2 => DID1,P1
- // DID1,P2 => DID2,P3
-
- LinkEvent event = linkStore.removeLink(d1P1, d2P2);
- assertEquals(LINK_REMOVED, event.type());
- LinkEvent event2 = linkStore.removeLink(d1P1, d2P2);
- assertNull(event2);
-
- assertLink(linkId2, DIRECT, linkStore.getLink(d2P2, d1P1));
- }
-
- @Ignore("Ignore until Delegate spec. is clear.")
- @Test
- public final void testEvents() throws InterruptedException {
-
- final ConnectPoint d1P1 = new ConnectPoint(DID1, P1);
- final ConnectPoint d2P2 = new ConnectPoint(DID2, P2);
- final LinkKey linkId1 = linkKey(d1P1, d2P2);
-
- final CountDownLatch addLatch = new CountDownLatch(1);
- LinkStoreDelegate checkAdd = new LinkStoreDelegate() {
- @Override
- public void notify(LinkEvent event) {
- assertEquals(LINK_ADDED, event.type());
- assertLink(linkId1, INDIRECT, event.subject());
- addLatch.countDown();
- }
- };
- final CountDownLatch updateLatch = new CountDownLatch(1);
- LinkStoreDelegate checkUpdate = new LinkStoreDelegate() {
- @Override
- public void notify(LinkEvent event) {
- assertEquals(LINK_UPDATED, event.type());
- assertLink(linkId1, DIRECT, event.subject());
- updateLatch.countDown();
- }
- };
- final CountDownLatch removeLatch = new CountDownLatch(1);
- LinkStoreDelegate checkRemove = new LinkStoreDelegate() {
- @Override
- public void notify(LinkEvent event) {
- assertEquals(LINK_REMOVED, event.type());
- assertLink(linkId1, DIRECT, event.subject());
- removeLatch.countDown();
- }
- };
-
- linkStore.setDelegate(checkAdd);
- putLink(linkId1, INDIRECT);
- assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
-
- linkStore.unsetDelegate(checkAdd);
- linkStore.setDelegate(checkUpdate);
- putLink(linkId1, DIRECT);
- assertTrue("Update event fired", updateLatch.await(1, TimeUnit.SECONDS));
-
- linkStore.unsetDelegate(checkUpdate);
- linkStore.setDelegate(checkRemove);
- linkStore.removeLink(d1P1, d2P2);
- assertTrue("Remove event fired", removeLatch.await(1, TimeUnit.SECONDS));
- }
-
-
- class TestDistributedLinkStore extends DistributedLinkStore {
- TestDistributedLinkStore(StoreService storeService) {
- this.storeService = storeService;
- }
- }
-}
diff --git a/core/store/hz/pom.xml b/core/store/hz/pom.xml
index d6aa1fe..dae40d4 100644
--- a/core/store/hz/pom.xml
+++ b/core/store/hz/pom.xml
@@ -17,35 +17,13 @@
<modules>
<module>common</module>
<module>cluster</module>
- <module>net</module>
</modules>
<dependencies>
<dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
- <dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onlab-misc</artifactId>
- </dependency>
- <dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onlab-junit</artifactId>
- </dependency>
- <dependency>
<groupId>com.hazelcast</groupId>
<artifactId>hazelcast</artifactId>
</dependency>
</dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
</project>
diff --git a/core/store/pom.xml b/core/store/pom.xml
index b94b4fe..47e3162 100644
--- a/core/store/pom.xml
+++ b/core/store/pom.xml
@@ -1,5 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
@@ -19,24 +21,17 @@
<module>dist</module>
<module>hz</module>
<module>serializers</module>
- </modules>
+ </modules>
<dependencies>
<dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
- <dependency>
<groupId>org.onlab.onos</groupId>
- <artifactId>onlab-misc</artifactId>
+ <artifactId>onos-api</artifactId>
</dependency>
+
<dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onlab-junit</artifactId>
- </dependency>
- <dependency>
- <groupId>com.hazelcast</groupId>
- <artifactId>hazelcast</artifactId>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>org.apache.felix.scr.annotations</artifactId>
</dependency>
</dependencies>
@@ -44,7 +39,7 @@
<plugins>
<plugin>
<groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
+ <artifactId>maven-scr-plugin</artifactId>
</plugin>
</plugins>
</build>
diff --git a/core/store/serializers/pom.xml b/core/store/serializers/pom.xml
index fe0a501..5f92ad1 100644
--- a/core/store/serializers/pom.xml
+++ b/core/store/serializers/pom.xml
@@ -18,14 +18,6 @@
<dependencies>
<dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onos-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.scr.annotations</artifactId>
- </dependency>
- <dependency>
<groupId>com.esotericsoftware</groupId>
<artifactId>kryo</artifactId>
</dependency>
@@ -36,13 +28,4 @@
</dependency>
</dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-scr-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
</project>
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/HostLocationSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/HostLocationSerializer.java
new file mode 100644
index 0000000..2ef09ac
--- /dev/null
+++ b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/HostLocationSerializer.java
@@ -0,0 +1,40 @@
+package org.onlab.onos.store.serializers;
+
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.HostLocation;
+import org.onlab.onos.net.PortNumber;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+* Kryo Serializer for {@link HostLocation}.
+*/
+public class HostLocationSerializer extends Serializer<HostLocation> {
+
+ /**
+ * Creates {@link HostLocation} serializer instance.
+ */
+ public HostLocationSerializer() {
+ // non-null, immutable
+ super(false, true);
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, HostLocation object) {
+ kryo.writeClassAndObject(output, object.deviceId());
+ kryo.writeClassAndObject(output, object.port());
+ output.writeLong(object.time());
+ }
+
+ @Override
+ public HostLocation read(Kryo kryo, Input input, Class<HostLocation> type) {
+ DeviceId deviceId = (DeviceId) kryo.readClassAndObject(input);
+ PortNumber portNumber = (PortNumber) kryo.readClassAndObject(input);
+ long time = input.readLong();
+ return new HostLocation(deviceId, portNumber, time);
+ }
+
+}
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableListSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableListSerializer.java
index 4bcc0a3..83a7c8a 100644
--- a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableListSerializer.java
+++ b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableListSerializer.java
@@ -1,6 +1,6 @@
package org.onlab.onos.store.serializers;
-import org.onlab.util.KryoPool.FamilySerializer;
+import org.onlab.util.KryoNamespace.FamilySerializer;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.io.Input;
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableMapSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableMapSerializer.java
index 734033f..cfa7ccc 100644
--- a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableMapSerializer.java
+++ b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableMapSerializer.java
@@ -4,7 +4,7 @@
import java.util.HashMap;
import java.util.Map;
-import org.onlab.util.KryoPool.FamilySerializer;
+import org.onlab.util.KryoNamespace.FamilySerializer;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.io.Input;
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableSetSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableSetSerializer.java
index 051a843..eb9fd06 100644
--- a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableSetSerializer.java
+++ b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/ImmutableSetSerializer.java
@@ -3,7 +3,7 @@
import java.util.ArrayList;
import java.util.List;
-import org.onlab.util.KryoPool.FamilySerializer;
+import org.onlab.util.KryoNamespace.FamilySerializer;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.io.Input;
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpAddressSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpAddressSerializer.java
index b923df7..280da1e 100644
--- a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpAddressSerializer.java
+++ b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpAddressSerializer.java
@@ -20,8 +20,7 @@
}
@Override
- public void write(Kryo kryo, Output output,
- IpAddress object) {
+ public void write(Kryo kryo, Output output, IpAddress object) {
byte[] octs = object.toOctets();
output.writeInt(octs.length);
output.writeBytes(octs);
@@ -29,11 +28,10 @@
}
@Override
- public IpAddress read(Kryo kryo, Input input,
- Class<IpAddress> type) {
- int octLen = input.readInt();
+ public IpAddress read(Kryo kryo, Input input, Class<IpAddress> type) {
+ final int octLen = input.readInt();
byte[] octs = new byte[octLen];
- input.read(octs);
+ input.readBytes(octs);
int prefLen = input.readInt();
return IpAddress.valueOf(octs, prefLen);
}
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpPrefixSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpPrefixSerializer.java
index 2e92692..cf82ebb 100644
--- a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpPrefixSerializer.java
+++ b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/IpPrefixSerializer.java
@@ -34,7 +34,7 @@
Class<IpPrefix> type) {
int octLen = input.readInt();
byte[] octs = new byte[octLen];
- input.read(octs);
+ input.readBytes(octs);
int prefLen = input.readInt();
return IpPrefix.valueOf(octs, prefLen);
}
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoPoolUtil.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoNamespaces.java
similarity index 70%
rename from core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoPoolUtil.java
rename to core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoNamespaces.java
index b44c102..7c4cac5 100644
--- a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoPoolUtil.java
+++ b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoNamespaces.java
@@ -17,41 +17,52 @@
import org.onlab.onos.net.Device;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.Element;
+import org.onlab.onos.net.HostId;
+import org.onlab.onos.net.HostLocation;
import org.onlab.onos.net.Link;
import org.onlab.onos.net.LinkKey;
-import org.onlab.onos.net.MastershipRole;
import org.onlab.onos.net.Port;
import org.onlab.onos.net.PortNumber;
import org.onlab.onos.net.device.DefaultDeviceDescription;
import org.onlab.onos.net.device.DefaultPortDescription;
+import org.onlab.onos.net.flow.DefaultFlowRule;
+import org.onlab.onos.net.host.DefaultHostDescription;
+import org.onlab.onos.net.host.HostDescription;
import org.onlab.onos.net.link.DefaultLinkDescription;
import org.onlab.onos.net.provider.ProviderId;
import org.onlab.onos.store.Timestamp;
+import org.onlab.packet.ChassisId;
import org.onlab.packet.IpAddress;
import org.onlab.packet.IpPrefix;
-import org.onlab.util.KryoPool;
+import org.onlab.packet.MacAddress;
+import org.onlab.packet.VlanId;
+import org.onlab.util.KryoNamespace;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
-public final class KryoPoolUtil {
+public final class KryoNamespaces {
/**
- * KryoPool which can serialize ON.lab misc classes.
+ * KryoNamespace which can serialize ON.lab misc classes.
*/
- public static final KryoPool MISC = KryoPool.newBuilder()
+ public static final KryoNamespace MISC = KryoNamespace.newBuilder()
.register(IpPrefix.class, new IpPrefixSerializer())
.register(IpAddress.class, new IpAddressSerializer())
+ .register(MacAddress.class, new MacAddressSerializer())
+ .register(VlanId.class)
.build();
// TODO: Populate other classes
/**
- * KryoPool which can serialize API bundle classes.
+ * KryoNamespace which can serialize API bundle classes.
*/
- public static final KryoPool API = KryoPool.newBuilder()
+ public static final KryoNamespace API = KryoNamespace.newBuilder()
.register(MISC)
.register(ImmutableMap.class, new ImmutableMapSerializer())
.register(ImmutableList.class, new ImmutableListSerializer())
+ .register(ImmutableSet.class, new ImmutableSetSerializer())
.register(
//
ArrayList.class,
@@ -61,18 +72,21 @@
//
ControllerNode.State.class,
Device.Type.class,
+ ChassisId.class,
DefaultAnnotations.class,
DefaultControllerNode.class,
DefaultDevice.class,
DefaultDeviceDescription.class,
DefaultLinkDescription.class,
- MastershipRole.class,
Port.class,
DefaultPortDescription.class,
Element.class,
Link.Type.class,
- Timestamp.class
-
+ Timestamp.class,
+ HostId.class,
+ HostDescription.class,
+ DefaultHostDescription.class,
+ DefaultFlowRule.class
)
.register(URI.class, new URISerializer())
.register(NodeId.class, new NodeIdSerializer())
@@ -84,11 +98,11 @@
.register(ConnectPoint.class, new ConnectPointSerializer())
.register(DefaultLink.class, new DefaultLinkSerializer())
.register(MastershipTerm.class, new MastershipTermSerializer())
- .register(MastershipRole.class, new MastershipRoleSerializer())
+ .register(HostLocation.class, new HostLocationSerializer())
.build();
// not to be instantiated
- private KryoPoolUtil() {}
+ private KryoNamespaces() {}
}
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoSerializer.java
index 3920dd6..8142e57 100644
--- a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoSerializer.java
+++ b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/KryoSerializer.java
@@ -1,6 +1,6 @@
package org.onlab.onos.store.serializers;
-import org.onlab.util.KryoPool;
+import org.onlab.util.KryoNamespace;
import java.nio.ByteBuffer;
/**
@@ -8,7 +8,7 @@
*/
public class KryoSerializer implements StoreSerializer {
- protected KryoPool serializerPool;
+ protected KryoNamespace serializerPool;
public KryoSerializer() {
setupKryoPool();
@@ -18,8 +18,8 @@
* Sets up the common serialzers pool.
*/
protected void setupKryoPool() {
- serializerPool = KryoPool.newBuilder()
- .register(KryoPoolUtil.API)
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
.build()
.populate(1);
}
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/MacAddressSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/MacAddressSerializer.java
new file mode 100644
index 0000000..954c071
--- /dev/null
+++ b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/MacAddressSerializer.java
@@ -0,0 +1,32 @@
+package org.onlab.onos.store.serializers;
+
+import org.onlab.packet.MacAddress;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Kryo Serializer for {@link MacAddress}.
+ */
+public class MacAddressSerializer extends Serializer<MacAddress> {
+
+ /**
+ * Creates {@link MacAddress} serializer instance.
+ */
+ public MacAddressSerializer() {
+ super(false, true);
+ }
+
+ @Override
+ public void write(Kryo kryo, Output output, MacAddress object) {
+ output.writeBytes(object.getAddress());
+ }
+
+ @Override
+ public MacAddress read(Kryo kryo, Input input, Class<MacAddress> type) {
+ return MacAddress.valueOf(input.readBytes(MacAddress.MAC_ADDRESS_LENGTH));
+ }
+
+}
diff --git a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/MastershipRoleSerializer.java b/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/MastershipRoleSerializer.java
deleted file mode 100644
index dab5aa8..0000000
--- a/core/store/serializers/src/main/java/org/onlab/onos/store/serializers/MastershipRoleSerializer.java
+++ /dev/null
@@ -1,34 +0,0 @@
-package org.onlab.onos.store.serializers;
-
-import org.onlab.onos.net.MastershipRole;
-
-import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.Serializer;
-import com.esotericsoftware.kryo.io.Input;
-import com.esotericsoftware.kryo.io.Output;
-
-/**
- * Kryo Serializer for {@link org.onlab.onos.net.MastershipRole}.
- */
-public class MastershipRoleSerializer extends Serializer<MastershipRole> {
-
- /**
- * Creates {@link MastershipRole} serializer instance.
- */
- public MastershipRoleSerializer() {
- // non-null, immutable
- super(false, true);
- }
-
- @Override
- public MastershipRole read(Kryo kryo, Input input, Class<MastershipRole> type) {
- final String role = kryo.readObject(input, String.class);
- return MastershipRole.valueOf(role);
- }
-
- @Override
- public void write(Kryo kryo, Output output, MastershipRole object) {
- kryo.writeObject(output, object.toString());
- }
-
-}
diff --git a/core/store/serializers/src/test/java/org/onlab/onos/store/serializers/KryoSerializerTest.java b/core/store/serializers/src/test/java/org/onlab/onos/store/serializers/KryoSerializerTest.java
index 58956d5..ac427cc 100644
--- a/core/store/serializers/src/test/java/org/onlab/onos/store/serializers/KryoSerializerTest.java
+++ b/core/store/serializers/src/test/java/org/onlab/onos/store/serializers/KryoSerializerTest.java
@@ -20,16 +20,19 @@
import org.onlab.onos.net.DefaultPort;
import org.onlab.onos.net.Device;
import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.HostLocation;
import org.onlab.onos.net.Link;
import org.onlab.onos.net.LinkKey;
-import org.onlab.onos.net.MastershipRole;
import org.onlab.onos.net.PortNumber;
import org.onlab.onos.net.SparseAnnotations;
import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.packet.ChassisId;
import org.onlab.packet.IpAddress;
import org.onlab.packet.IpPrefix;
-import org.onlab.util.KryoPool;
+import org.onlab.packet.MacAddress;
+import org.onlab.util.KryoNamespace;
+import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.testing.EqualsTester;
@@ -49,7 +52,9 @@
private static final String SW1 = "3.8.1";
private static final String SW2 = "3.9.5";
private static final String SN = "43311-12345";
- private static final Device DEV1 = new DefaultDevice(PID, DID1, Device.Type.SWITCH, MFR, HW, SW1, SN);
+ private static final ChassisId CID = new ChassisId();
+ private static final Device DEV1 = new DefaultDevice(PID, DID1, Device.Type.SWITCH, MFR, HW,
+ SW1, SN, CID);
private static final SparseAnnotations A1 = DefaultAnnotations.builder()
.set("A1", "a1")
.set("B1", "b1")
@@ -59,81 +64,147 @@
.set("B3", "b3")
.build();
- private static KryoPool kryos;
+ private KryoSerializer serializer;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
- kryos = KryoPool.newBuilder()
- .register(KryoPoolUtil.API)
- .register(ImmutableMap.class, new ImmutableMapSerializer())
- .register(ImmutableSet.class, new ImmutableSetSerializer())
- .build();
}
@Before
public void setUp() throws Exception {
+ serializer = new KryoSerializer() {
+
+ @Override
+ protected void setupKryoPool() {
+ serializerPool = KryoNamespace.newBuilder()
+ .register(KryoNamespaces.API)
+ .build()
+ .populate(1);
+ }
+ };
}
@After
public void tearDown() throws Exception {
- // removing Kryo instance to use fresh Kryo on each tests
- kryos.getKryo();
}
- private static <T> void testSerialized(T original) {
+ private <T> void testSerialized(T original) {
ByteBuffer buffer = ByteBuffer.allocate(1 * 1024 * 1024);
- kryos.serialize(original, buffer);
+ serializer.encode(original, buffer);
buffer.flip();
- T copy = kryos.deserialize(buffer);
+ T copy = serializer.decode(buffer);
+
+ T copy2 = serializer.decode(serializer.encode(original));
new EqualsTester()
- .addEqualityGroup(original, copy)
+ .addEqualityGroup(original, copy, copy2)
.testEquals();
}
@Test
- public final void testSerialization() {
+ public void testConnectPoint() {
testSerialized(new ConnectPoint(DID1, P1));
- testSerialized(new DefaultLink(PID, CP1, CP2, Link.Type.DIRECT));
- testSerialized(new DefaultPort(DEV1, P1, true));
- testSerialized(new DefaultLink(PID, CP1, CP2, Link.Type.DIRECT, A1));
- testSerialized(new DefaultPort(DEV1, P1, true, A1_2));
- testSerialized(DID1);
- testSerialized(ImmutableMap.of(DID1, DEV1, DID2, DEV1));
- testSerialized(ImmutableMap.of(DID1, DEV1));
- testSerialized(ImmutableMap.of());
- testSerialized(ImmutableSet.of(DID1, DID2));
- testSerialized(ImmutableSet.of(DID1));
- testSerialized(ImmutableSet.of());
- testSerialized(IpPrefix.valueOf("192.168.0.1/24"));
- testSerialized(IpAddress.valueOf("192.168.0.1"));
- testSerialized(LinkKey.linkKey(CP1, CP2));
- testSerialized(new NodeId("SomeNodeIdentifier"));
- testSerialized(P1);
- testSerialized(PID);
- testSerialized(PIDA);
- testSerialized(new NodeId("bar"));
- testSerialized(MastershipTerm.of(new NodeId("foo"), 2));
- for (MastershipRole role : MastershipRole.values()) {
- testSerialized(role);
- }
}
@Test
- public final void testAnnotations() {
+ public void testDefaultLink() {
+ testSerialized(new DefaultLink(PID, CP1, CP2, Link.Type.DIRECT));
+ testSerialized(new DefaultLink(PID, CP1, CP2, Link.Type.DIRECT, A1));
+ }
+
+ @Test
+ public void testDefaultPort() {
+ testSerialized(new DefaultPort(DEV1, P1, true));
+ testSerialized(new DefaultPort(DEV1, P1, true, A1_2));
+ }
+
+ @Test
+ public void testDeviceId() {
+ testSerialized(DID1);
+ }
+
+ @Test
+ public void testImmutableMap() {
+ testSerialized(ImmutableMap.of(DID1, DEV1, DID2, DEV1));
+ testSerialized(ImmutableMap.of(DID1, DEV1));
+ testSerialized(ImmutableMap.of());
+ }
+
+ @Test
+ public void testImmutableSet() {
+ testSerialized(ImmutableSet.of(DID1, DID2));
+ testSerialized(ImmutableSet.of(DID1));
+ testSerialized(ImmutableSet.of());
+ }
+
+ @Test
+ public void testImmutableList() {
+ testSerialized(ImmutableList.of(DID1, DID2));
+ testSerialized(ImmutableList.of(DID1));
+ testSerialized(ImmutableList.of());
+ }
+
+ @Test
+ public void testIpPrefix() {
+ testSerialized(IpPrefix.valueOf("192.168.0.1/24"));
+ }
+
+ @Test
+ public void testIpAddress() {
+ testSerialized(IpAddress.valueOf("192.168.0.1"));
+ }
+
+ @Test
+ public void testMacAddress() {
+ testSerialized(MacAddress.valueOf("12:34:56:78:90:ab"));
+ }
+
+ @Test
+ public void testLinkKey() {
+ testSerialized(LinkKey.linkKey(CP1, CP2));
+ }
+
+ @Test
+ public void testNodeId() {
+ testSerialized(new NodeId("SomeNodeIdentifier"));
+ }
+
+ @Test
+ public void testPortNumber() {
+ testSerialized(P1);
+ }
+
+ @Test
+ public void testProviderId() {
+ testSerialized(PID);
+ testSerialized(PIDA);
+ }
+
+ @Test
+ public void testMastershipTerm() {
+ testSerialized(MastershipTerm.of(new NodeId("foo"), 2));
+ }
+
+ @Test
+ public void testHostLocation() {
+ testSerialized(new HostLocation(CP1, 1234L));
+ }
+
+ @Test
+ public void testAnnotations() {
// Annotations does not have equals defined, manually test equality
- final byte[] a1Bytes = kryos.serialize(A1);
- SparseAnnotations copiedA1 = kryos.deserialize(a1Bytes);
+ final byte[] a1Bytes = serializer.encode(A1);
+ SparseAnnotations copiedA1 = serializer.decode(a1Bytes);
assertAnnotationsEquals(copiedA1, A1);
- final byte[] a12Bytes = kryos.serialize(A1_2);
- SparseAnnotations copiedA12 = kryos.deserialize(a12Bytes);
+ final byte[] a12Bytes = serializer.encode(A1_2);
+ SparseAnnotations copiedA12 = serializer.decode(a12Bytes);
assertAnnotationsEquals(copiedA12, A1_2);
}
// code clone
- public static void assertAnnotationsEquals(Annotations actual, SparseAnnotations... annotations) {
+ protected static void assertAnnotationsEquals(Annotations actual, SparseAnnotations... annotations) {
SparseAnnotations expected = DefaultAnnotations.builder().build();
for (SparseAnnotations a : annotations) {
expected = DefaultAnnotations.union(expected, a);
diff --git a/core/store/trivial/pom.xml b/core/store/trivial/pom.xml
index b35f6a50..d9f9154 100644
--- a/core/store/trivial/pom.xml
+++ b/core/store/trivial/pom.xml
@@ -18,26 +18,9 @@
<dependencies>
<dependency>
- <groupId>org.onlab.onos</groupId>
- <artifactId>onos-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.scr.annotations</artifactId>
- </dependency>
- <dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
</dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-scr-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
</project>
diff --git a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleDeviceStore.java b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleDeviceStore.java
index 514a22e..fbfaf9d 100644
--- a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleDeviceStore.java
+++ b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleDeviceStore.java
@@ -5,8 +5,6 @@
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
-import org.apache.commons.lang3.concurrent.ConcurrentException;
-import org.apache.commons.lang3.concurrent.ConcurrentInitializer;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -30,11 +28,13 @@
import org.onlab.onos.net.device.PortDescription;
import org.onlab.onos.net.provider.ProviderId;
import org.onlab.onos.store.AbstractStore;
+import org.onlab.packet.ChassisId;
import org.onlab.util.NewConcurrentHashMap;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
@@ -71,8 +71,7 @@
public static final String DEVICE_NOT_FOUND = "Device with ID %s not found";
// collection of Description given from various providers
- private final ConcurrentMap<DeviceId,
- ConcurrentMap<ProviderId, DeviceDescriptions>>
+ private final ConcurrentMap<DeviceId, Map<ProviderId, DeviceDescriptions>>
deviceDescs = Maps.newConcurrentMap();
// cache of Device and Ports generated by compositing descriptions from providers
@@ -117,15 +116,16 @@
DeviceId deviceId,
DeviceDescription deviceDescription) {
- ConcurrentMap<ProviderId, DeviceDescriptions> providerDescs
- = getDeviceDescriptions(deviceId);
+ Map<ProviderId, DeviceDescriptions> providerDescs
+ = getOrCreateDeviceDescriptions(deviceId);
synchronized (providerDescs) {
// locking per device
DeviceDescriptions descs
- = createIfAbsentUnchecked(providerDescs, providerId,
- new InitDeviceDescs(deviceDescription));
+ = getOrCreateProviderDeviceDescriptions(providerDescs,
+ providerId,
+ deviceDescription);
Device oldDevice = devices.get(deviceId);
// update description
@@ -192,8 +192,8 @@
@Override
public DeviceEvent markOffline(DeviceId deviceId) {
- ConcurrentMap<ProviderId, DeviceDescriptions> providerDescs
- = getDeviceDescriptions(deviceId);
+ Map<ProviderId, DeviceDescriptions> providerDescs
+ = getOrCreateDeviceDescriptions(deviceId);
// locking device
synchronized (providerDescs) {
@@ -218,7 +218,7 @@
Device device = devices.get(deviceId);
checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
- ConcurrentMap<ProviderId, DeviceDescriptions> descsMap = deviceDescs.get(deviceId);
+ Map<ProviderId, DeviceDescriptions> descsMap = deviceDescs.get(deviceId);
checkArgument(descsMap != null, DEVICE_NOT_FOUND, deviceId);
List<DeviceEvent> events = new ArrayList<>();
@@ -287,12 +287,12 @@
Map<PortNumber, Port> ports,
Set<PortNumber> processed) {
List<DeviceEvent> events = new ArrayList<>();
- Iterator<PortNumber> iterator = ports.keySet().iterator();
+ Iterator<Entry<PortNumber, Port>> iterator = ports.entrySet().iterator();
while (iterator.hasNext()) {
- PortNumber portNumber = iterator.next();
+ Entry<PortNumber, Port> e = iterator.next();
+ PortNumber portNumber = e.getKey();
if (!processed.contains(portNumber)) {
- events.add(new DeviceEvent(PORT_REMOVED, device,
- ports.get(portNumber)));
+ events.add(new DeviceEvent(PORT_REMOVED, device, e.getValue()));
iterator.remove();
}
}
@@ -306,10 +306,36 @@
NewConcurrentHashMap.<PortNumber, Port>ifNeeded());
}
- private ConcurrentMap<ProviderId, DeviceDescriptions> getDeviceDescriptions(
+ private Map<ProviderId, DeviceDescriptions> getOrCreateDeviceDescriptions(
DeviceId deviceId) {
- return createIfAbsentUnchecked(deviceDescs, deviceId,
- NewConcurrentHashMap.<ProviderId, DeviceDescriptions>ifNeeded());
+ Map<ProviderId, DeviceDescriptions> r;
+ r = deviceDescs.get(deviceId);
+ if (r != null) {
+ return r;
+ }
+ r = new HashMap<>();
+ final Map<ProviderId, DeviceDescriptions> concurrentlyAdded;
+ concurrentlyAdded = deviceDescs.putIfAbsent(deviceId, r);
+ if (concurrentlyAdded != null) {
+ return concurrentlyAdded;
+ } else {
+ return r;
+ }
+ }
+
+ // Guarded by deviceDescs value (=Device lock)
+ private DeviceDescriptions getOrCreateProviderDeviceDescriptions(
+ Map<ProviderId, DeviceDescriptions> device,
+ ProviderId providerId, DeviceDescription deltaDesc) {
+
+ synchronized (device) {
+ DeviceDescriptions r = device.get(providerId);
+ if (r == null) {
+ r = new DeviceDescriptions(deltaDesc);
+ device.put(providerId, r);
+ }
+ return r;
+ }
}
@Override
@@ -318,12 +344,12 @@
Device device = devices.get(deviceId);
checkArgument(device != null, DEVICE_NOT_FOUND, deviceId);
- ConcurrentMap<ProviderId, DeviceDescriptions> descsMap = deviceDescs.get(deviceId);
+ Map<ProviderId, DeviceDescriptions> descsMap = deviceDescs.get(deviceId);
checkArgument(descsMap != null, DEVICE_NOT_FOUND, deviceId);
synchronized (descsMap) {
DeviceDescriptions descs = descsMap.get(providerId);
- // assuming all providers must to give DeviceDescription
+ // assuming all providers must give DeviceDescription first
checkArgument(descs != null,
"Device description for Device ID %s from Provider %s was not found",
deviceId, providerId);
@@ -367,7 +393,7 @@
@Override
public DeviceEvent removeDevice(DeviceId deviceId) {
- ConcurrentMap<ProviderId, DeviceDescriptions> descs = getDeviceDescriptions(deviceId);
+ Map<ProviderId, DeviceDescriptions> descs = getOrCreateDeviceDescriptions(deviceId);
synchronized (descs) {
Device device = devices.remove(deviceId);
// should DEVICE_REMOVED carry removed ports?
@@ -390,7 +416,7 @@
* @return Device instance
*/
private Device composeDevice(DeviceId deviceId,
- ConcurrentMap<ProviderId, DeviceDescriptions> providerDescs) {
+ Map<ProviderId, DeviceDescriptions> providerDescs) {
checkArgument(!providerDescs.isEmpty(), "No Device descriptions supplied");
@@ -404,6 +430,7 @@
String hwVersion = base.hwVersion();
String swVersion = base.swVersion();
String serialNumber = base.serialNumber();
+ ChassisId chassisId = base.chassisId();
DefaultAnnotations annotations = DefaultAnnotations.builder().build();
annotations = merge(annotations, base.annotations());
@@ -421,7 +448,8 @@
}
return new DefaultDevice(primary, deviceId , type, manufacturer,
- hwVersion, swVersion, serialNumber, annotations);
+ hwVersion, swVersion, serialNumber,
+ chassisId, annotations);
}
/**
@@ -429,14 +457,14 @@
*
* @param device device the port is on
* @param number port number
- * @param providerDescs Collection of Descriptions from multiple providers
+ * @param descsMap Collection of Descriptions from multiple providers
* @return Port instance
*/
private Port composePort(Device device, PortNumber number,
- ConcurrentMap<ProviderId, DeviceDescriptions> providerDescs) {
+ Map<ProviderId, DeviceDescriptions> descsMap) {
- ProviderId primary = pickPrimaryPID(providerDescs);
- DeviceDescriptions primDescs = providerDescs.get(primary);
+ ProviderId primary = pickPrimaryPID(descsMap);
+ DeviceDescriptions primDescs = descsMap.get(primary);
// if no primary, assume not enabled
// TODO: revisit this default port enabled/disabled behavior
boolean isEnabled = false;
@@ -448,7 +476,7 @@
annotations = merge(annotations, portDesc.annotations());
}
- for (Entry<ProviderId, DeviceDescriptions> e : providerDescs.entrySet()) {
+ for (Entry<ProviderId, DeviceDescriptions> e : descsMap.entrySet()) {
if (e.getKey().equals(primary)) {
continue;
}
@@ -470,10 +498,9 @@
/**
* @return primary ProviderID, or randomly chosen one if none exists
*/
- private ProviderId pickPrimaryPID(
- ConcurrentMap<ProviderId, DeviceDescriptions> providerDescs) {
+ private ProviderId pickPrimaryPID(Map<ProviderId, DeviceDescriptions> descsMap) {
ProviderId fallBackPrimary = null;
- for (Entry<ProviderId, DeviceDescriptions> e : providerDescs.entrySet()) {
+ for (Entry<ProviderId, DeviceDescriptions> e : descsMap.entrySet()) {
if (!e.getKey().isAncillary()) {
return e.getKey();
} else if (fallBackPrimary == null) {
@@ -484,21 +511,6 @@
return fallBackPrimary;
}
- public static final class InitDeviceDescs
- implements ConcurrentInitializer<DeviceDescriptions> {
-
- private final DeviceDescription deviceDesc;
-
- public InitDeviceDescs(DeviceDescription deviceDesc) {
- this.deviceDesc = checkNotNull(deviceDesc);
- }
- @Override
- public DeviceDescriptions get() throws ConcurrentException {
- return new DeviceDescriptions(deviceDesc);
- }
- }
-
-
/**
* Collection of Description of a Device and it's Ports given from a Provider.
*/
diff --git a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleFlowRuleStore.java b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleFlowRuleStore.java
index 2d50851..8ca2ca0 100644
--- a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleFlowRuleStore.java
+++ b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleFlowRuleStore.java
@@ -2,9 +2,14 @@
import static org.onlab.onos.net.flow.FlowRuleEvent.Type.RULE_REMOVED;
import static org.slf4j.LoggerFactory.getLogger;
+import static org.apache.commons.lang3.concurrent.ConcurrentUtils.createIfAbsentUnchecked;
+import static java.util.Collections.unmodifiableCollection;
import java.util.Collection;
-import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
@@ -15,18 +20,17 @@
import org.onlab.onos.net.flow.DefaultFlowEntry;
import org.onlab.onos.net.flow.FlowEntry;
import org.onlab.onos.net.flow.FlowEntry.FlowEntryState;
+import org.onlab.onos.net.flow.FlowId;
import org.onlab.onos.net.flow.FlowRule;
import org.onlab.onos.net.flow.FlowRuleEvent;
import org.onlab.onos.net.flow.FlowRuleEvent.Type;
import org.onlab.onos.net.flow.FlowRuleStore;
import org.onlab.onos.net.flow.FlowRuleStoreDelegate;
+import org.onlab.onos.net.flow.StoredFlowEntry;
import org.onlab.onos.store.AbstractStore;
+import org.onlab.util.NewConcurrentHashMap;
import org.slf4j.Logger;
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Multimap;
-
/**
* Manages inventory of flow rules using trivial in-memory implementation.
*/
@@ -38,12 +42,11 @@
private final Logger log = getLogger(getClass());
- // store entries as a pile of rules, no info about device tables
- private final Multimap<DeviceId, FlowEntry> flowEntries =
- ArrayListMultimap.<DeviceId, FlowEntry>create();
- private final Multimap<Short, FlowRule> flowEntriesById =
- ArrayListMultimap.<Short, FlowRule>create();
+ // inner Map is Device flow table
+ // Assumption: FlowId cannot have synonyms
+ private final ConcurrentMap<DeviceId, ConcurrentMap<FlowId, StoredFlowEntry>>
+ flowEntries = new ConcurrentHashMap<>();
@Activate
public void activate() {
@@ -52,88 +55,130 @@
@Deactivate
public void deactivate() {
+ flowEntries.clear();
log.info("Stopped");
}
@Override
public int getFlowRuleCount() {
- return flowEntries.size();
+ int sum = 0;
+ for (ConcurrentMap<FlowId, StoredFlowEntry> ft : flowEntries.values()) {
+ sum += ft.size();
+ }
+ return sum;
+ }
+
+ private static NewConcurrentHashMap<FlowId, StoredFlowEntry> lazyEmptyFlowTable() {
+ return NewConcurrentHashMap.<FlowId, StoredFlowEntry>ifNeeded();
+ }
+
+ /**
+ * Returns the flow table for specified device.
+ *
+ * @param deviceId identifier of the device
+ * @return Map representing Flow Table of given device.
+ */
+ private ConcurrentMap<FlowId, StoredFlowEntry> getFlowTable(DeviceId deviceId) {
+ return createIfAbsentUnchecked(flowEntries,
+ deviceId, lazyEmptyFlowTable());
+ }
+
+ private StoredFlowEntry getFlowEntry(DeviceId deviceId, FlowId flowId) {
+ return getFlowTable(deviceId).get(flowId);
}
@Override
- public synchronized FlowEntry getFlowEntry(FlowRule rule) {
- for (FlowEntry f : flowEntries.get(rule.deviceId())) {
- if (f.equals(rule)) {
- return f;
+ public FlowEntry getFlowEntry(FlowRule rule) {
+ return getFlowEntry(rule.deviceId(), rule.id());
+ }
+
+ @Override
+ public Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) {
+ return unmodifiableCollection((Collection<? extends FlowEntry>)
+ getFlowTable(deviceId).values());
+ }
+
+ @Override
+ public Iterable<FlowRule> getFlowRulesByAppId(ApplicationId appId) {
+
+ Set<FlowRule> rules = new HashSet<>();
+ for (DeviceId did : flowEntries.keySet()) {
+ ConcurrentMap<FlowId, StoredFlowEntry> ft = getFlowTable(did);
+ for (FlowEntry fe : ft.values()) {
+ if (fe.appId() == appId.id()) {
+ rules.add(fe);
+ }
}
}
- return null;
+ return rules;
}
@Override
- public synchronized Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) {
- Collection<FlowEntry> rules = flowEntries.get(deviceId);
- if (rules == null) {
- return Collections.emptyList();
+ public void storeFlowRule(FlowRule rule) {
+ final boolean added = storeFlowRuleInternal(rule);
+ }
+
+ private boolean storeFlowRuleInternal(FlowRule rule) {
+ StoredFlowEntry f = new DefaultFlowEntry(rule);
+ final DeviceId did = f.deviceId();
+ final FlowId fid = f.id();
+ FlowEntry existing = getFlowTable(did).putIfAbsent(fid, f);
+ if (existing != null) {
+ // was already there? ignore
+ return false;
}
- return ImmutableSet.copyOf(rules);
+ // new flow rule added
+ // TODO: notify through delegate about remote event?
+ return true;
}
@Override
- public synchronized Iterable<FlowRule> getFlowRulesByAppId(ApplicationId appId) {
- Collection<FlowRule> rules = flowEntriesById.get(appId.id());
- if (rules == null) {
- return Collections.emptyList();
- }
- return ImmutableSet.copyOf(rules);
- }
+ public void deleteFlowRule(FlowRule rule) {
- @Override
- public synchronized void storeFlowRule(FlowRule rule) {
- FlowEntry f = new DefaultFlowEntry(rule);
- DeviceId did = f.deviceId();
- if (!flowEntries.containsEntry(did, f)) {
- flowEntries.put(did, f);
- flowEntriesById.put(rule.appId(), f);
- }
- }
-
- @Override
- public synchronized void deleteFlowRule(FlowRule rule) {
- FlowEntry entry = getFlowEntry(rule);
+ StoredFlowEntry entry = getFlowEntry(rule.deviceId(), rule.id());
if (entry == null) {
//log.warn("Cannot find rule {}", rule);
return;
}
- entry.setState(FlowEntryState.PENDING_REMOVE);
+ synchronized (entry) {
+ entry.setState(FlowEntryState.PENDING_REMOVE);
+ }
}
@Override
- public synchronized FlowRuleEvent addOrUpdateFlowRule(FlowEntry rule) {
- DeviceId did = rule.deviceId();
-
+ public FlowRuleEvent addOrUpdateFlowRule(FlowEntry rule) {
// check if this new rule is an update to an existing entry
- FlowEntry stored = getFlowEntry(rule);
+ StoredFlowEntry stored = getFlowEntry(rule.deviceId(), rule.id());
if (stored != null) {
- stored.setBytes(rule.bytes());
- stored.setLife(rule.life());
- stored.setPackets(rule.packets());
- if (stored.state() == FlowEntryState.PENDING_ADD) {
- stored.setState(FlowEntryState.ADDED);
- return new FlowRuleEvent(Type.RULE_ADDED, rule);
+ synchronized (stored) {
+ stored.setBytes(rule.bytes());
+ stored.setLife(rule.life());
+ stored.setPackets(rule.packets());
+ if (stored.state() == FlowEntryState.PENDING_ADD) {
+ stored.setState(FlowEntryState.ADDED);
+ // TODO: Do we need to change `rule` state?
+ return new FlowRuleEvent(Type.RULE_ADDED, rule);
+ }
+ return new FlowRuleEvent(Type.RULE_UPDATED, rule);
}
- return new FlowRuleEvent(Type.RULE_UPDATED, rule);
}
+ // should not reach here
+ // storeFlowRule was expected to be called
+ log.error("FlowRule was not found in store {} to update", rule);
+
//flowEntries.put(did, rule);
return null;
}
@Override
- public synchronized FlowRuleEvent removeFlowRule(FlowEntry rule) {
+ public FlowRuleEvent removeFlowRule(FlowEntry rule) {
// This is where one could mark a rule as removed and still keep it in the store.
- if (flowEntries.remove(rule.deviceId(), rule)) {
+ final DeviceId did = rule.deviceId();
+
+ ConcurrentMap<FlowId, StoredFlowEntry> ft = getFlowTable(did);
+ if (ft.remove(rule.id(), rule)) {
return new FlowRuleEvent(RULE_REMOVED, rule);
} else {
return null;
diff --git a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleHostStore.java b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleHostStore.java
index bf99227..ef80b72 100644
--- a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleHostStore.java
+++ b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleHostStore.java
@@ -84,7 +84,7 @@
descr.hwAddress(),
descr.vlan(),
descr.location(),
- ImmutableSet.of(descr.ipAddress()));
+ ImmutableSet.copyOf(descr.ipAddress()));
synchronized (this) {
hosts.put(hostId, newhost);
locations.put(descr.location(), newhost);
@@ -101,12 +101,12 @@
return new HostEvent(HOST_MOVED, host);
}
- if (host.ipAddresses().contains(descr.ipAddress())) {
+ if (host.ipAddresses().containsAll(descr.ipAddress())) {
return null;
}
Set<IpPrefix> addresses = new HashSet<>(host.ipAddresses());
- addresses.add(descr.ipAddress());
+ addresses.addAll(descr.ipAddress());
StoredHost updated = new StoredHost(providerId, host.id(),
host.mac(), host.vlan(),
descr.location(), addresses);
diff --git a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleLinkStore.java b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleLinkStore.java
index bcddda3..a9a0982 100644
--- a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleLinkStore.java
+++ b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleLinkStore.java
@@ -1,12 +1,10 @@
package org.onlab.onos.store.trivial.impl;
import com.google.common.base.Function;
-import com.google.common.base.Predicate;
import com.google.common.collect.FluentIterable;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.SetMultimap;
-import org.apache.commons.lang3.concurrent.ConcurrentUtils;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -20,7 +18,6 @@
import org.onlab.onos.net.SparseAnnotations;
import org.onlab.onos.net.Link.Type;
import org.onlab.onos.net.LinkKey;
-import org.onlab.onos.net.Provided;
import org.onlab.onos.net.link.DefaultLinkDescription;
import org.onlab.onos.net.link.LinkDescription;
import org.onlab.onos.net.link.LinkEvent;
@@ -28,11 +25,12 @@
import org.onlab.onos.net.link.LinkStoreDelegate;
import org.onlab.onos.net.provider.ProviderId;
import org.onlab.onos.store.AbstractStore;
-import org.onlab.util.NewConcurrentHashMap;
import org.slf4j.Logger;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
+import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
@@ -47,6 +45,7 @@
import static org.slf4j.LoggerFactory.getLogger;
import static com.google.common.collect.Multimaps.synchronizedSetMultimap;
import static com.google.common.base.Predicates.notNull;
+import static com.google.common.base.Verify.verifyNotNull;
/**
* Manages inventory of infrastructure links using trivial in-memory structures
@@ -61,8 +60,7 @@
private final Logger log = getLogger(getClass());
// Link inventory
- private final ConcurrentMap<LinkKey,
- ConcurrentMap<ProviderId, LinkDescription>>
+ private final ConcurrentMap<LinkKey, Map<ProviderId, LinkDescription>>
linkDescs = new ConcurrentHashMap<>();
// Link instance cache
@@ -151,7 +149,7 @@
LinkDescription linkDescription) {
LinkKey key = linkKey(linkDescription.src(), linkDescription.dst());
- ConcurrentMap<ProviderId, LinkDescription> descs = getLinkDescriptions(key);
+ Map<ProviderId, LinkDescription> descs = getOrCreateLinkDescriptions(key);
synchronized (descs) {
final Link oldLink = links.get(key);
// update description
@@ -166,7 +164,7 @@
// Guarded by linkDescs value (=locking each Link)
private LinkDescription createOrUpdateLinkDescription(
- ConcurrentMap<ProviderId, LinkDescription> descs,
+ Map<ProviderId, LinkDescription> descs,
ProviderId providerId,
LinkDescription linkDescription) {
@@ -227,7 +225,7 @@
@Override
public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) {
final LinkKey key = linkKey(src, dst);
- ConcurrentMap<ProviderId, LinkDescription> descs = getLinkDescriptions(key);
+ Map<ProviderId, LinkDescription> descs = getOrCreateLinkDescriptions(key);
synchronized (descs) {
Link link = links.remove(key);
descs.clear();
@@ -247,8 +245,8 @@
/**
* @return primary ProviderID, or randomly chosen one if none exists
*/
- private ProviderId pickPrimaryPID(
- ConcurrentMap<ProviderId, LinkDescription> providerDescs) {
+ // Guarded by linkDescs value (=locking each Link)
+ private ProviderId getBaseProviderId(Map<ProviderId, LinkDescription> providerDescs) {
ProviderId fallBackPrimary = null;
for (Entry<ProviderId, LinkDescription> e : providerDescs.entrySet()) {
@@ -262,9 +260,10 @@
return fallBackPrimary;
}
- private Link composeLink(ConcurrentMap<ProviderId, LinkDescription> descs) {
- ProviderId primary = pickPrimaryPID(descs);
- LinkDescription base = descs.get(primary);
+ // Guarded by linkDescs value (=locking each Link)
+ private Link composeLink(Map<ProviderId, LinkDescription> descs) {
+ ProviderId primary = getBaseProviderId(descs);
+ LinkDescription base = descs.get(verifyNotNull(primary));
ConnectPoint src = base.src();
ConnectPoint dst = base.dst();
@@ -289,9 +288,20 @@
return new DefaultLink(primary , src, dst, type, annotations);
}
- private ConcurrentMap<ProviderId, LinkDescription> getLinkDescriptions(LinkKey key) {
- return ConcurrentUtils.createIfAbsentUnchecked(linkDescs, key,
- NewConcurrentHashMap.<ProviderId, LinkDescription>ifNeeded());
+ private Map<ProviderId, LinkDescription> getOrCreateLinkDescriptions(LinkKey key) {
+ Map<ProviderId, LinkDescription> r;
+ r = linkDescs.get(key);
+ if (r != null) {
+ return r;
+ }
+ r = new HashMap<>();
+ final Map<ProviderId, LinkDescription> concurrentlyAdded;
+ concurrentlyAdded = linkDescs.putIfAbsent(key, r);
+ if (concurrentlyAdded == null) {
+ return r;
+ } else {
+ return concurrentlyAdded;
+ }
}
private final Function<LinkKey, Link> lookupLink = new LookupLink();
@@ -302,20 +312,11 @@
private final class LookupLink implements Function<LinkKey, Link> {
@Override
public Link apply(LinkKey input) {
- return links.get(input);
- }
- }
-
- private static final Predicate<Provided> IS_PRIMARY = new IsPrimary();
- private static final Predicate<Provided> isPrimary() {
- return IS_PRIMARY;
- }
-
- private static final class IsPrimary implements Predicate<Provided> {
-
- @Override
- public boolean apply(Provided input) {
- return !input.providerId().isAncillary();
+ if (input == null) {
+ return null;
+ } else {
+ return links.get(input);
+ }
}
}
}
diff --git a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleMastershipStore.java b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleMastershipStore.java
index aba77d0..f4b035c 100644
--- a/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleMastershipStore.java
+++ b/core/store/trivial/src/main/java/org/onlab/onos/store/trivial/impl/SimpleMastershipStore.java
@@ -2,9 +2,11 @@
import static org.slf4j.LoggerFactory.getLogger;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
@@ -95,6 +97,18 @@
}
@Override
+ public List<NodeId> getNodes(DeviceId deviceId) {
+ List<NodeId> nodes = new ArrayList<>();
+
+ nodes.addAll(backups);
+ if (!nodes.contains(masterMap.get(deviceId))) {
+ nodes.add(masterMap.get(deviceId));
+ }
+
+ return Collections.unmodifiableList(nodes);
+ }
+
+ @Override
public Set<DeviceId> getDevices(NodeId nodeId) {
Set<DeviceId> ids = new HashSet<>();
for (Map.Entry<DeviceId, NodeId> d : masterMap.entrySet()) {
diff --git a/core/store/trivial/src/test/java/org/onlab/onos/store/trivial/impl/DefaultTopologyTest.java b/core/store/trivial/src/test/java/org/onlab/onos/store/trivial/impl/DefaultTopologyTest.java
index ef383c8..879f123 100644
--- a/core/store/trivial/src/test/java/org/onlab/onos/store/trivial/impl/DefaultTopologyTest.java
+++ b/core/store/trivial/src/test/java/org/onlab/onos/store/trivial/impl/DefaultTopologyTest.java
@@ -17,6 +17,7 @@
import org.onlab.onos.net.topology.LinkWeight;
import org.onlab.onos.net.topology.TopologyCluster;
import org.onlab.onos.net.topology.TopologyEdge;
+import org.onlab.packet.ChassisId;
import java.util.Set;
@@ -119,7 +120,7 @@
// Crates a new device with the specified id
public static Device device(String id) {
return new DefaultDevice(PID, did(id), Device.Type.SWITCH,
- "mfg", "1.0", "1.1", "1234");
+ "mfg", "1.0", "1.1", "1234", new ChassisId());
}
// Short-hand for producing a device id from a string
diff --git a/core/store/trivial/src/test/java/org/onlab/onos/store/trivial/impl/SimpleDeviceStoreTest.java b/core/store/trivial/src/test/java/org/onlab/onos/store/trivial/impl/SimpleDeviceStoreTest.java
index 146086a..8338a77 100644
--- a/core/store/trivial/src/test/java/org/onlab/onos/store/trivial/impl/SimpleDeviceStoreTest.java
+++ b/core/store/trivial/src/test/java/org/onlab/onos/store/trivial/impl/SimpleDeviceStoreTest.java
@@ -40,6 +40,7 @@
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
+import org.onlab.packet.ChassisId;
/**
* Test of the simple DeviceStore implementation.
@@ -55,6 +56,7 @@
private static final String SW1 = "3.8.1";
private static final String SW2 = "3.9.5";
private static final String SN = "43311-12345";
+ private static final ChassisId CID = new ChassisId();
private static final PortNumber P1 = PortNumber.portNumber(1);
private static final PortNumber P2 = PortNumber.portNumber(2);
@@ -107,7 +109,7 @@
SparseAnnotations... annotations) {
DeviceDescription description =
new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR,
- HW, swVersion, SN, annotations);
+ HW, swVersion, SN, CID, annotations);
deviceStore.createOrUpdateDevice(PID, deviceId, description);
}
@@ -115,7 +117,7 @@
SparseAnnotations... annotations) {
DeviceDescription description =
new DefaultDeviceDescription(deviceId.uri(), SWITCH, MFR,
- HW, swVersion, SN, annotations);
+ HW, swVersion, SN, CID, annotations);
deviceStore.createOrUpdateDevice(PIDA, deviceId, description);
}
@@ -193,14 +195,14 @@
public final void testCreateOrUpdateDevice() {
DeviceDescription description =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN);
+ HW, SW1, SN, CID);
DeviceEvent event = deviceStore.createOrUpdateDevice(PID, DID1, description);
assertEquals(DEVICE_ADDED, event.type());
assertDevice(DID1, SW1, event.subject());
DeviceDescription description2 =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN);
+ HW, SW2, SN, CID);
DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2);
assertEquals(DEVICE_UPDATED, event2.type());
assertDevice(DID1, SW2, event2.subject());
@@ -212,7 +214,7 @@
public final void testCreateOrUpdateDeviceAncillary() {
DeviceDescription description =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN, A2);
+ HW, SW1, SN, CID, A2);
DeviceEvent event = deviceStore.createOrUpdateDevice(PIDA, DID1, description);
assertEquals(DEVICE_ADDED, event.type());
assertDevice(DID1, SW1, event.subject());
@@ -222,7 +224,7 @@
DeviceDescription description2 =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN, A1);
+ HW, SW2, SN, CID, A1);
DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2);
assertEquals(DEVICE_UPDATED, event2.type());
assertDevice(DID1, SW2, event2.subject());
@@ -238,7 +240,7 @@
// But, Ancillary annotations will be in effect
DeviceDescription description3 =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN, A2_2);
+ HW, SW1, SN, CID, A2_2);
DeviceEvent event3 = deviceStore.createOrUpdateDevice(PIDA, DID1, description3);
assertEquals(DEVICE_UPDATED, event3.type());
// basic information will be the one from Primary
@@ -508,7 +510,7 @@
DeviceDescription description =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW1, SN);
+ HW, SW1, SN, CID);
deviceStore.setDelegate(checkAdd);
deviceStore.createOrUpdateDevice(PID, DID1, description);
assertTrue("Add event fired", addLatch.await(1, TimeUnit.SECONDS));
@@ -516,7 +518,7 @@
DeviceDescription description2 =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
- HW, SW2, SN);
+ HW, SW2, SN, CID);
deviceStore.unsetDelegate(checkAdd);
deviceStore.setDelegate(checkUpdate);
deviceStore.createOrUpdateDevice(PID, DID1, description2);
diff --git a/features/features.xml b/features/features.xml
index 34c70bc..9636643 100644
--- a/features/features.xml
+++ b/features/features.xml
@@ -1,7 +1,8 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
name="onos-1.0.0">
- <repository>mvn:org.onlab.onos/onos-features/1.0.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.onlab.onos/onos-features/1.0.0-SNAPSHOT/xml/features
+ </repository>
<feature name="onos-thirdparty-base" version="1.0.0"
description="ONOS 3rd party dependencies">
@@ -28,20 +29,22 @@
<bundle>mvn:org.onlab.onos/onlab-nio/1.0.0-SNAPSHOT</bundle>
- <bundle>mvn:org.codehaus.jackson/jackson-core-asl/1.9.13</bundle>
- <bundle>mvn:org.codehaus.jackson/jackson-mapper-asl/1.9.13</bundle>
+ <bundle>mvn:org.codehaus.jackson/jackson-core-asl/1.9.13</bundle>
+ <bundle>mvn:org.codehaus.jackson/jackson-mapper-asl/1.9.13</bundle>
+ <bundle>mvn:org.onlab.onos/onlab-thirdparty/1.0.0-SNAPSHOT</bundle>
</feature>
<feature name="onos-thirdparty-web" version="1.0.0"
description="ONOS 3rd party dependencies">
<feature>war</feature>
<bundle>mvn:com.fasterxml.jackson.core/jackson-core/2.4.2</bundle>
- <bundle>mvn:com.fasterxml.jackson.core/jackson-annotations/2.4.2</bundle>
+ <bundle>mvn:com.fasterxml.jackson.core/jackson-annotations/2.4.2
+ </bundle>
<bundle>mvn:com.fasterxml.jackson.core/jackson-databind/2.4.2</bundle>
<bundle>mvn:com.sun.jersey/jersey-core/1.18.1</bundle>
<bundle>mvn:com.sun.jersey/jersey-server/1.18.1</bundle>
<bundle>mvn:com.sun.jersey/jersey-servlet/1.18.1</bundle>
-
+
</feature>
<feature name="onos-api" version="1.0.0"
@@ -95,15 +98,15 @@
</feature>
<feature name="onos-openflow" version="1.0.0"
- description="ONOS OpenFlow API, Controller & Providers">
+ description="ONOS OpenFlow API, Controller & Providers">
<feature>onos-api</feature>
<bundle>mvn:io.netty/netty/3.9.2.Final</bundle>
<bundle>mvn:org.onlab.onos/onos-of-api/1.0.0-SNAPSHOT</bundle>
<bundle>mvn:org.onlab.onos/onos-of-ctl/1.0.0-SNAPSHOT</bundle>
+ <bundle>mvn:org.onlab.onos/onos-lldp-provider/1.0.0-SNAPSHOT</bundle>
+ <bundle>mvn:org.onlab.onos/onos-host-provider/1.0.0-SNAPSHOT</bundle>
<bundle>mvn:org.onlab.onos/onos-of-provider-device/1.0.0-SNAPSHOT</bundle>
- <bundle>mvn:org.onlab.onos/onos-of-provider-link/1.0.0-SNAPSHOT</bundle>
- <bundle>mvn:org.onlab.onos/onos-of-provider-host/1.0.0-SNAPSHOT</bundle>
<bundle>mvn:org.onlab.onos/onos-of-provider-packet/1.0.0-SNAPSHOT</bundle>
<bundle>mvn:org.onlab.onos/onos-of-provider-flow/1.0.0-SNAPSHOT</bundle>
@@ -152,6 +155,13 @@
<feature>onos-api</feature>
<bundle>mvn:org.onlab.onos/onos-app-config/1.0.0-SNAPSHOT</bundle>
</feature>
+
+ <feature name="onos-app-optical" version="1.0.0"
+ description="ONOS optical network config">
+ <feature>onos-api</feature>
+ <bundle>mvn:org.onlab.onos/onos-app-optical/1.0.0-SNAPSHOT</bundle>
+ </feature>
+
<feature name="onos-app-sdnip" version="1.0.0"
description="SDN-IP peering application">
@@ -159,4 +169,11 @@
<bundle>mvn:org.onlab.onos/onos-app-sdnip/1.0.0-SNAPSHOT</bundle>
</feature>
+ <feature name="onos-app-calendar" version="1.0.0"
+ description="REST interface for scheduling intents from an external calendar">
+ <feature>onos-api</feature>
+ <feature>onos-thirdparty-web</feature>
+ <bundle>mvn:org.onlab.onos/onos-app-calendar/1.0.0-SNAPSHOT</bundle>
+ </feature>
+
</features>
diff --git a/pom.xml b/pom.xml
index 9b275ab..ea879ea 100644
--- a/pom.xml
+++ b/pom.xml
@@ -107,7 +107,13 @@
</dependency>
<dependency>
- <groupId>commons-lang</groupId>
+ <groupId>com.googlecode.concurrent-trees</groupId>
+ <artifactId>concurrent-trees</artifactId>
+ <version>2.4.0</version>
+ </dependency>
+
+ <dependency>
+ <groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
<version>2.6</version>
</dependency>
@@ -164,6 +170,12 @@
<scope>provided</scope>
</dependency>
<dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.compendium</artifactId>
+ <version>4.3.1</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
<groupId>org.apache.felix</groupId>
<artifactId>org.apache.felix.scr.annotations</artifactId>
<version>1.9.8</version>
@@ -260,6 +272,13 @@
<artifactId>onos-of-api</artifactId>
<version>${project.version}</version>
</dependency>
+
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-thirdparty</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
<dependency>
<groupId>org.onlab.onos</groupId>
<artifactId>onos-of-api</artifactId>
@@ -412,7 +431,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
- <version>2.12.1</version>
+ <version>2.13</version>
<dependencies>
<dependency>
<groupId>org.onlab.tools</groupId>
@@ -502,7 +521,7 @@
<group>
<title>Core Subsystems</title>
<packages>
- org.onlab.onos.impl:org.onlab.onos.cluster.impl:org.onlab.onos.net.device.impl:org.onlab.onos.net.link.impl:org.onlab.onos.net.host.impl:org.onlab.onos.net.topology.impl:org.onlab.onos.net.packet.impl:org.onlab.onos.net.flow.impl:org.onlab.onos.store.trivial.*:org.onlab.onos.net.*.impl:org.onlab.onos.event.impl:org.onlab.onos.store.*:org.onlab.onos.net.intent.impl:org.onlab.onos.net.proxyarp.impl:org.onlab.onos.mastership.impl
+ org.onlab.onos.impl:org.onlab.onos.cluster.impl:org.onlab.onos.net.device.impl:org.onlab.onos.net.link.impl:org.onlab.onos.net.host.impl:org.onlab.onos.net.topology.impl:org.onlab.onos.net.packet.impl:org.onlab.onos.net.flow.impl:org.onlab.onos.store.trivial.*:org.onlab.onos.net.*.impl:org.onlab.onos.event.impl:org.onlab.onos.store.*:org.onlab.onos.net.intent.impl:org.onlab.onos.net.proxyarp.impl:org.onlab.onos.mastership.impl:org.onlab.onos.json:org.onlab.onos.json.*
</packages>
</group>
<group>
@@ -527,10 +546,11 @@
<group>
<title>Sample Applications</title>
<packages>
- org.onlab.onos.tvue:org.onlab.onos.fwd:org.onlab.onos.ifwd:org.onlab.onos.mobility:org.onlab.onos.proxyarp:org.onlab.onos.foo
+ org.onlab.onos.tvue:org.onlab.onos.fwd:org.onlab.onos.ifwd:org.onlab.onos.mobility:org.onlab.onos.proxyarp:org.onlab.onos.foo:org.onlab.onos.calendar
</packages>
</group>
</groups>
+ <excludePackageNames>org.onlab.thirdparty</excludePackageNames>
</configuration>
</plugin>
diff --git a/providers/host/bin/pom.xml b/providers/host/bin/pom.xml
new file mode 100644
index 0000000..518add8
--- /dev/null
+++ b/providers/host/bin/pom.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-of-providers</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+ <artifactId>onos-of-provider-host</artifactId>
+ <packaging>bundle</packaging>
+
+ <description>ONOS OpenFlow protocol host provider</description>
+
+</project>
diff --git a/providers/host/pom.xml b/providers/host/pom.xml
new file mode 100644
index 0000000..1cd580f
--- /dev/null
+++ b/providers/host/pom.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-providers</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+
+ <artifactId>onos-host-provider</artifactId>
+ <packaging>bundle</packaging>
+
+ <description>ONOS host tracking provider</description>
+ <dependencies>
+ <dependency>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-api</artifactId>
+ <classifier>tests</classifier>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+
+
+</project>
diff --git a/providers/host/src/main/java/org/onlab/onos/provider/host/impl/HostLocationProvider.java b/providers/host/src/main/java/org/onlab/onos/provider/host/impl/HostLocationProvider.java
new file mode 100644
index 0000000..5a8a7be
--- /dev/null
+++ b/providers/host/src/main/java/org/onlab/onos/provider/host/impl/HostLocationProvider.java
@@ -0,0 +1,122 @@
+package org.onlab.onos.provider.host.impl;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.Host;
+import org.onlab.onos.net.HostId;
+import org.onlab.onos.net.HostLocation;
+import org.onlab.onos.net.host.DefaultHostDescription;
+import org.onlab.onos.net.host.HostDescription;
+import org.onlab.onos.net.host.HostProvider;
+import org.onlab.onos.net.host.HostProviderRegistry;
+import org.onlab.onos.net.host.HostProviderService;
+import org.onlab.onos.net.packet.PacketContext;
+import org.onlab.onos.net.packet.PacketProcessor;
+import org.onlab.onos.net.packet.PacketService;
+import org.onlab.onos.net.provider.AbstractProvider;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.net.topology.Topology;
+import org.onlab.onos.net.topology.TopologyService;
+import org.onlab.packet.ARP;
+import org.onlab.packet.Ethernet;
+import org.onlab.packet.IpPrefix;
+import org.onlab.packet.VlanId;
+import org.slf4j.Logger;
+
+/**
+ * Provider which uses an OpenFlow controller to detect network
+ * end-station hosts.
+ */
+@Component(immediate = true)
+public class HostLocationProvider extends AbstractProvider implements HostProvider {
+
+ private final Logger log = getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected HostProviderRegistry providerRegistry;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected PacketService pktService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected TopologyService topologyService;
+
+ private HostProviderService providerService;
+
+ private final InternalHostProvider processor = new InternalHostProvider();
+
+
+ /**
+ * Creates an OpenFlow host provider.
+ */
+ public HostLocationProvider() {
+ super(new ProviderId("of", "org.onlab.onos.provider.host"));
+ }
+
+ @Activate
+ public void activate() {
+ providerService = providerRegistry.register(this);
+ pktService.addProcessor(processor, 1);
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ providerRegistry.unregister(this);
+ pktService.removeProcessor(processor);
+ providerService = null;
+ log.info("Stopped");
+ }
+
+ @Override
+ public void triggerProbe(Host host) {
+ log.info("Triggering probe on device {}", host);
+ }
+
+ private class InternalHostProvider implements PacketProcessor {
+
+ @Override
+ public void process(PacketContext context) {
+ if (context == null) {
+ return;
+ }
+ Ethernet eth = context.inPacket().parsed();
+
+ VlanId vlan = VlanId.vlanId(eth.getVlanID());
+ ConnectPoint heardOn = context.inPacket().receivedFrom();
+
+ // If this is not an edge port, bail out.
+ Topology topology = topologyService.currentTopology();
+ if (topologyService.isInfrastructure(topology, heardOn)) {
+ return;
+ }
+
+ HostLocation hloc = new HostLocation(heardOn, System.currentTimeMillis());
+
+ HostId hid = HostId.hostId(eth.getSourceMAC(), vlan);
+
+ // Potentially a new or moved host
+ if (eth.getEtherType() == Ethernet.TYPE_ARP) {
+ ARP arp = (ARP) eth.getPayload();
+ IpPrefix ip = IpPrefix.valueOf(arp.getSenderProtocolAddress(),
+ IpPrefix.MAX_INET_MASK);
+ HostDescription hdescr =
+ new DefaultHostDescription(eth.getSourceMAC(), vlan, hloc, ip);
+ providerService.hostDetected(hid, hdescr);
+
+ } else if (eth.getEtherType() == Ethernet.TYPE_IPV4) {
+ //Do not learn new ip from ip packet.
+ HostDescription hdescr =
+ new DefaultHostDescription(eth.getSourceMAC(), vlan, hloc);
+ providerService.hostDetected(hid, hdescr);
+
+ }
+ }
+ }
+}
diff --git a/providers/host/src/main/java/org/onlab/onos/provider/host/impl/package-info.java b/providers/host/src/main/java/org/onlab/onos/provider/host/impl/package-info.java
new file mode 100644
index 0000000..25b5315
--- /dev/null
+++ b/providers/host/src/main/java/org/onlab/onos/provider/host/impl/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Provider that uses packet service as a means of host discovery and tracking.
+ */
+package org.onlab.onos.provider.host.impl;
diff --git a/providers/host/src/test/java/org/onlab/onos/provider/host/impl/HostLocationProviderTest.java b/providers/host/src/test/java/org/onlab/onos/provider/host/impl/HostLocationProviderTest.java
new file mode 100644
index 0000000..9ba6ce14
--- /dev/null
+++ b/providers/host/src/test/java/org/onlab/onos/provider/host/impl/HostLocationProviderTest.java
@@ -0,0 +1,237 @@
+package org.onlab.onos.provider.host.impl;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.nio.ByteBuffer;
+import java.util.Set;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.HostId;
+import org.onlab.onos.net.PortNumber;
+import org.onlab.onos.net.flow.TrafficTreatment;
+import org.onlab.onos.net.host.HostDescription;
+import org.onlab.onos.net.host.HostProvider;
+import org.onlab.onos.net.host.HostProviderRegistry;
+import org.onlab.onos.net.host.HostProviderService;
+import org.onlab.onos.net.packet.DefaultInboundPacket;
+import org.onlab.onos.net.packet.InboundPacket;
+import org.onlab.onos.net.packet.OutboundPacket;
+import org.onlab.onos.net.packet.PacketContext;
+import org.onlab.onos.net.packet.PacketProcessor;
+import org.onlab.onos.net.packet.PacketService;
+import org.onlab.onos.net.provider.AbstractProviderService;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.onos.net.topology.Topology;
+
+import org.onlab.onos.net.topology.TopologyServiceAdapter;
+import org.onlab.packet.ARP;
+import org.onlab.packet.Ethernet;
+import org.onlab.packet.MacAddress;
+import org.onlab.packet.VlanId;
+
+public class HostLocationProviderTest {
+
+ private static final Integer INPORT = 10;
+ private static final String DEV1 = "of:1";
+ private static final String DEV2 = "of:2";
+ private static final String DEV3 = "of:3";
+
+ private static final VlanId VLAN = VlanId.vlanId();
+ private static final MacAddress MAC = MacAddress.valueOf("00:00:11:00:00:01");
+ private static final MacAddress BCMAC = MacAddress.valueOf("ff:ff:ff:ff:ff:ff");
+ private static final byte[] IP = new byte[]{10, 0, 0, 1};
+
+ private final HostLocationProvider provider = new HostLocationProvider();
+ private final TestHostRegistry hostService = new TestHostRegistry();
+ private final TestTopologyService topoService = new TestTopologyService();
+ private final TestPacketService packetService = new TestPacketService();
+
+ private PacketProcessor testProcessor;
+ private TestHostProviderService providerService;
+
+ @Before
+ public void setUp() {
+ provider.providerRegistry = hostService;
+ provider.topologyService = topoService;
+ provider.pktService = packetService;
+
+ provider.activate();
+
+ }
+
+ @Test
+ public void basics() {
+ assertNotNull("registration expected", providerService);
+ assertEquals("incorrect provider", provider, providerService.provider());
+ }
+
+ @Test
+ public void events() {
+ // new host
+
+
+ testProcessor.process(new TestPacketContext(DEV1));
+ assertNotNull("new host expected", providerService.added);
+ assertNull("host motion unexpected", providerService.moved);
+
+ // the host moved to new switch
+ testProcessor.process(new TestPacketContext(DEV2));
+ assertNotNull("host motion expected", providerService.moved);
+
+ // the host was misheard on a spine
+ testProcessor.process(new TestPacketContext(DEV3));
+ assertNull("host misheard on spine switch", providerService.spine);
+ }
+
+ @After
+ public void tearDown() {
+ provider.deactivate();
+ provider.providerRegistry = null;
+
+ }
+
+ private class TestHostRegistry implements HostProviderRegistry {
+
+ @Override
+ public HostProviderService register(HostProvider provider) {
+ providerService = new TestHostProviderService(provider);
+ return providerService;
+ }
+
+ @Override
+ public void unregister(HostProvider provider) {
+ }
+
+ @Override
+ public Set<ProviderId> getProviders() {
+ return null;
+ }
+
+ }
+
+ private class TestHostProviderService
+ extends AbstractProviderService<HostProvider>
+ implements HostProviderService {
+
+ DeviceId added = null;
+ DeviceId moved = null;
+ DeviceId spine = null;
+
+ protected TestHostProviderService(HostProvider provider) {
+ super(provider);
+ }
+
+ @Override
+ public void hostDetected(HostId hostId, HostDescription hostDescription) {
+ DeviceId descr = hostDescription.location().deviceId();
+ if (added == null) {
+ added = descr;
+ } else if ((moved == null) && !descr.equals(added)) {
+ moved = descr;
+ } else {
+ spine = descr;
+ }
+ }
+
+ @Override
+ public void hostVanished(HostId hostId) {
+ }
+
+ }
+
+ private class TestPacketService implements PacketService {
+
+ @Override
+ public void addProcessor(PacketProcessor processor, int priority) {
+ testProcessor = processor;
+ }
+
+ @Override
+ public void removeProcessor(PacketProcessor processor) {
+
+ }
+
+ @Override
+ public void emit(OutboundPacket packet) {
+
+ }
+ }
+
+
+ private class TestTopologyService extends TopologyServiceAdapter {
+ @Override
+ public boolean isInfrastructure(Topology topology,
+ ConnectPoint connectPoint) {
+ //simulate DPID3 as an infrastructure switch
+ if ((connectPoint.deviceId()).equals(DeviceId.deviceId(DEV3))) {
+ return true;
+ }
+ return false;
+ }
+ }
+
+ private class TestPacketContext implements PacketContext {
+
+ private final String deviceId;
+
+ public TestPacketContext(String deviceId) {
+ this.deviceId = deviceId;
+ }
+
+ @Override
+ public long time() {
+ return 0;
+ }
+
+ @Override
+ public InboundPacket inPacket() {
+ ARP arp = new ARP();
+ arp.setSenderProtocolAddress(IP)
+ .setSenderHardwareAddress(MAC.toBytes())
+ .setTargetHardwareAddress(BCMAC.toBytes())
+ .setTargetProtocolAddress(IP);
+
+ Ethernet eth = new Ethernet();
+ eth.setEtherType(Ethernet.TYPE_ARP)
+ .setVlanID(VLAN.toShort())
+ .setSourceMACAddress(MAC.toBytes())
+ .setDestinationMACAddress(BCMAC.getAddress())
+ .setPayload(arp);
+ ConnectPoint receivedFrom = new ConnectPoint(DeviceId.deviceId(deviceId),
+ PortNumber.portNumber(INPORT));
+ return new DefaultInboundPacket(receivedFrom, eth,
+ ByteBuffer.wrap(eth.serialize()));
+ }
+
+ @Override
+ public OutboundPacket outPacket() {
+ return null;
+ }
+
+ @Override
+ public TrafficTreatment.Builder treatmentBuilder() {
+ return null;
+ }
+
+ @Override
+ public void send() {
+
+ }
+
+ @Override
+ public boolean block() {
+ return false;
+ }
+
+ @Override
+ public boolean isHandled() {
+ return false;
+ }
+ }
+}
diff --git a/providers/lldp/pom.xml b/providers/lldp/pom.xml
new file mode 100644
index 0000000..06c18bd
--- /dev/null
+++ b/providers/lldp/pom.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onos-providers</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+ <artifactId>onos-lldp-provider</artifactId>
+ <packaging>bundle</packaging>
+
+ <description>ONOS LLDP Link Discovery</description>
+
+</project>
diff --git a/providers/lldp/src/main/java/org/onlab/onos/provider/lldp/impl/LLDPLinkProvider.java b/providers/lldp/src/main/java/org/onlab/onos/provider/lldp/impl/LLDPLinkProvider.java
new file mode 100644
index 0000000..f611496
--- /dev/null
+++ b/providers/lldp/src/main/java/org/onlab/onos/provider/lldp/impl/LLDPLinkProvider.java
@@ -0,0 +1,170 @@
+package org.onlab.onos.provider.lldp.impl;
+
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.onlab.onos.mastership.MastershipService;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.Device;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Port;
+import org.onlab.onos.net.device.DeviceEvent;
+import org.onlab.onos.net.device.DeviceListener;
+import org.onlab.onos.net.device.DeviceService;
+import org.onlab.onos.net.link.LinkProvider;
+import org.onlab.onos.net.link.LinkProviderRegistry;
+import org.onlab.onos.net.link.LinkProviderService;
+import org.onlab.onos.net.packet.PacketContext;
+import org.onlab.onos.net.packet.PacketProcessor;
+import org.onlab.onos.net.packet.PacketService;
+import org.onlab.onos.net.provider.AbstractProvider;
+import org.onlab.onos.net.provider.ProviderId;
+import org.slf4j.Logger;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+
+/**
+ * Provider which uses an OpenFlow controller to detect network
+ * infrastructure links.
+ */
+@Component(immediate = true)
+public class LLDPLinkProvider extends AbstractProvider implements LinkProvider {
+
+ private final Logger log = getLogger(getClass());
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected LinkProviderRegistry providerRegistry;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected DeviceService deviceService;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected PacketService packetSevice;
+
+ @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+ protected MastershipService masterService;
+
+ private LinkProviderService providerService;
+
+ private final boolean useBDDP = true;
+
+
+ private final InternalLinkProvider listener = new InternalLinkProvider();
+
+ protected final Map<DeviceId, LinkDiscovery> discoverers = new ConcurrentHashMap<>();
+
+ /**
+ * Creates an OpenFlow link provider.
+ */
+ public LLDPLinkProvider() {
+ super(new ProviderId("lldp", "org.onlab.onos.provider.lldp"));
+ }
+
+ @Activate
+ public void activate() {
+ providerService = providerRegistry.register(this);
+ deviceService.addListener(listener);
+ packetSevice.addProcessor(listener, 0);
+
+ log.info("Started");
+ }
+
+ @Deactivate
+ public void deactivate() {
+ for (LinkDiscovery ld : discoverers.values()) {
+ ld.stop();
+ }
+ providerRegistry.unregister(this);
+ deviceService.removeListener(listener);
+ packetSevice.removeProcessor(listener);
+ providerService = null;
+
+ log.info("Stopped");
+ }
+
+
+ private class InternalLinkProvider implements PacketProcessor, DeviceListener {
+
+ @Override
+ public void event(DeviceEvent event) {
+ LinkDiscovery ld = null;
+ Device device = event.subject();
+ Port port = event.port();
+ switch (event.type()) {
+ case DEVICE_ADDED:
+ discoverers.put(device.id(),
+ new LinkDiscovery(device, packetSevice, masterService,
+ providerService, useBDDP));
+ break;
+ case PORT_ADDED:
+ case PORT_UPDATED:
+ if (event.port().isEnabled()) {
+ ld = discoverers.get(device.id());
+ if (ld == null) {
+ return;
+ }
+ ld.addPort(port);
+ } else {
+ ConnectPoint point = new ConnectPoint(device.id(),
+ port.number());
+ providerService.linksVanished(point);
+ }
+ break;
+ case PORT_REMOVED:
+ ConnectPoint point = new ConnectPoint(device.id(),
+ port.number());
+ providerService.linksVanished(point);
+ break;
+ case DEVICE_REMOVED:
+ case DEVICE_SUSPENDED:
+ ld = discoverers.get(device.id());
+ if (ld == null) {
+ return;
+ }
+ ld.stop();
+ providerService.linksVanished(device.id());
+ break;
+ case DEVICE_AVAILABILITY_CHANGED:
+ ld = discoverers.get(device.id());
+ if (ld == null) {
+ return;
+ }
+ if (deviceService.isAvailable(device.id())) {
+ ld.start();
+ } else {
+ providerService.linksVanished(device.id());
+ ld.stop();
+ }
+ break;
+ case DEVICE_UPDATED:
+ case DEVICE_MASTERSHIP_CHANGED:
+ break;
+ default:
+ log.debug("Unknown event {}", event);
+ }
+ }
+
+ @Override
+ public void process(PacketContext context) {
+ if (context == null) {
+ return;
+ }
+ LinkDiscovery ld = discoverers.get(
+ context.inPacket().receivedFrom().deviceId());
+ if (ld == null) {
+ return;
+ }
+
+ if (ld.handleLLDP(context)) {
+ context.block();
+ }
+ }
+ }
+
+}
diff --git a/providers/lldp/src/main/java/org/onlab/onos/provider/lldp/impl/LinkDiscovery.java b/providers/lldp/src/main/java/org/onlab/onos/provider/lldp/impl/LinkDiscovery.java
new file mode 100644
index 0000000..3862437
--- /dev/null
+++ b/providers/lldp/src/main/java/org/onlab/onos/provider/lldp/impl/LinkDiscovery.java
@@ -0,0 +1,369 @@
+/*******************************************************************************
+ * Copyright 2014 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ ******************************************************************************/
+package org.onlab.onos.provider.lldp.impl;
+
+
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.jboss.netty.util.Timeout;
+import org.jboss.netty.util.TimerTask;
+import org.onlab.onos.mastership.MastershipService;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.Device;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.Link.Type;
+import org.onlab.onos.net.MastershipRole;
+import org.onlab.onos.net.Port;
+import org.onlab.onos.net.PortNumber;
+import org.onlab.onos.net.flow.DefaultTrafficTreatment;
+import org.onlab.onos.net.link.DefaultLinkDescription;
+import org.onlab.onos.net.link.LinkDescription;
+import org.onlab.onos.net.link.LinkProviderService;
+import org.onlab.onos.net.packet.DefaultOutboundPacket;
+import org.onlab.onos.net.packet.OutboundPacket;
+import org.onlab.onos.net.packet.PacketContext;
+import org.onlab.onos.net.packet.PacketService;
+import org.onlab.packet.Ethernet;
+import org.onlab.packet.ONOSLLDP;
+import org.onlab.util.Timer;
+import org.slf4j.Logger;
+
+
+
+/**
+ * Run discovery process from a physical switch. Ports are initially labeled as
+ * slow ports. When an LLDP is successfully received, label the remote port as
+ * fast. Every probeRate milliseconds, loop over all fast ports and send an
+ * LLDP, send an LLDP for a single slow port. Based on FlowVisor topology
+ * discovery implementation.
+ *
+ * TODO: add 'fast discovery' mode: drop LLDPs in destination switch but listen
+ * for flow_removed messages
+ */
+public class LinkDiscovery implements TimerTask {
+
+ private final Device device;
+ // send 1 probe every probeRate milliseconds
+ private final long probeRate;
+ private final Set<Long> slowPorts;
+ private final Set<Long> fastPorts;
+ // number of unacknowledged probes per port
+ private final Map<Long, AtomicInteger> portProbeCount;
+ // number of probes to send before link is removed
+ private static final short MAX_PROBE_COUNT = 3;
+ private final Logger log = getLogger(getClass());
+ private final ONOSLLDP lldpPacket;
+ private final Ethernet ethPacket;
+ private Ethernet bddpEth;
+ private final boolean useBDDP;
+ private final LinkProviderService linkProvider;
+ private final PacketService pktService;
+ private final MastershipService mastershipService;
+ private Timeout timeout;
+ private boolean isStopped;
+
+ /**
+ * Instantiates discovery manager for the given physical switch. Creates a
+ * generic LLDP packet that will be customized for the port it is sent out on.
+ * Starts the the timer for the discovery process.
+ * @param device the physical switch
+ * @param masterService
+ * @param useBDDP flag to also use BDDP for discovery
+ */
+ public LinkDiscovery(Device device, PacketService pktService,
+ MastershipService masterService, LinkProviderService providerService, Boolean... useBDDP) {
+ this.device = device;
+ this.probeRate = 3000;
+ this.linkProvider = providerService;
+ this.pktService = pktService;
+ this.mastershipService = masterService;
+ this.slowPorts = Collections.synchronizedSet(new HashSet<Long>());
+ this.fastPorts = Collections.synchronizedSet(new HashSet<Long>());
+ this.portProbeCount = new HashMap<>();
+ this.lldpPacket = new ONOSLLDP();
+ this.lldpPacket.setChassisId(device.chassisId());
+ this.lldpPacket.setDevice(device.id().toString());
+
+
+ this.ethPacket = new Ethernet();
+ this.ethPacket.setEtherType(Ethernet.TYPE_LLDP);
+ this.ethPacket.setDestinationMACAddress(ONOSLLDP.LLDP_NICIRA);
+ this.ethPacket.setPayload(this.lldpPacket);
+ this.ethPacket.setPad(true);
+ this.useBDDP = useBDDP.length > 0 ? useBDDP[0] : false;
+ if (this.useBDDP) {
+ this.bddpEth = new Ethernet();
+ this.bddpEth.setPayload(this.lldpPacket);
+ this.bddpEth.setEtherType(Ethernet.TYPE_BSN);
+ this.bddpEth.setDestinationMACAddress(ONOSLLDP.BDDP_MULTICAST);
+ this.bddpEth.setPad(true);
+ log.info("Using BDDP to discover network");
+ }
+
+ start();
+ this.log.debug("Started discovery manager for switch {}",
+ device.id());
+
+ }
+
+ /**
+ * Add physical port port to discovery process.
+ * Send out initial LLDP and label it as slow port.
+ *
+ * @param port the port
+ */
+ public void addPort(final Port port) {
+ this.log.debug("sending init probe to port {}",
+ port.number().toLong());
+
+ sendProbes(port.number().toLong());
+
+ synchronized (this) {
+ this.slowPorts.add(port.number().toLong());
+ }
+
+
+ }
+
+ /**
+ * Removes physical port from discovery process.
+ *
+ * @param port the port
+ */
+ public void removePort(final Port port) {
+ // Ignore ports that are not on this switch
+
+ long portnum = port.number().toLong();
+ synchronized (this) {
+ if (this.slowPorts.contains(portnum)) {
+ this.slowPorts.remove(portnum);
+
+ } else if (this.fastPorts.contains(portnum)) {
+ this.fastPorts.remove(portnum);
+ this.portProbeCount.remove(portnum);
+ // no iterator to update
+ } else {
+ this.log.warn(
+ "tried to dynamically remove non-existing port {}",
+ portnum);
+ }
+ }
+ }
+
+ /**
+ * Method called by remote port to acknowledge receipt of LLDP sent by
+ * this port. If slow port, updates label to fast. If fast port, decrements
+ * number of unacknowledged probes.
+ *
+ * @param portNumber the port
+ */
+ public void ackProbe(final Long portNumber) {
+
+ synchronized (this) {
+ if (this.slowPorts.contains(portNumber)) {
+ this.log.debug("Setting slow port to fast: {}:{}",
+ this.device.id(), portNumber);
+ this.slowPorts.remove(portNumber);
+ this.fastPorts.add(portNumber);
+ this.portProbeCount.put(portNumber, new AtomicInteger(0));
+ } else if (this.fastPorts.contains(portNumber)) {
+ this.portProbeCount.get(portNumber).set(0);
+ } else {
+ this.log.debug(
+ "Got ackProbe for non-existing port: {}",
+ portNumber);
+
+ }
+ }
+ }
+
+
+ /**
+ * Handles an incoming LLDP packet. Creates link in topology and sends ACK
+ * to port where LLDP originated.
+ */
+ public boolean handleLLDP(PacketContext context) {
+ Ethernet eth = context.inPacket().parsed();
+ ONOSLLDP onoslldp = ONOSLLDP.parseONOSLLDP(eth);
+ if (onoslldp != null) {
+ final PortNumber dstPort =
+ context.inPacket().receivedFrom().port();
+ final PortNumber srcPort = PortNumber.portNumber(onoslldp.getPort());
+ final DeviceId srcDeviceId = DeviceId.deviceId(onoslldp.getDeviceString());
+ final DeviceId dstDeviceId = context.inPacket().receivedFrom().deviceId();
+ this.ackProbe(srcPort.toLong());
+ ConnectPoint src = new ConnectPoint(srcDeviceId, srcPort);
+ ConnectPoint dst = new ConnectPoint(dstDeviceId, dstPort);
+
+ LinkDescription ld;
+ if (eth.getEtherType() == Ethernet.TYPE_BSN) {
+ ld = new DefaultLinkDescription(src, dst, Type.INDIRECT);
+ } else {
+ ld = new DefaultLinkDescription(src, dst, Type.DIRECT);
+ }
+ linkProvider.linkDetected(ld);
+ return true;
+ }
+ return false;
+ }
+
+
+
+ /**
+ * Execute this method every t milliseconds. Loops over all ports
+ * labeled as fast and sends out an LLDP. Send out an LLDP on a single slow
+ * port.
+ *
+ * @param t timeout
+ * @throws Exception
+ */
+ @Override
+ public void run(final Timeout t) {
+ this.log.debug("sending probes");
+ synchronized (this) {
+ final Iterator<Long> fastIterator = this.fastPorts.iterator();
+ Long portNumber;
+ Integer probeCount;
+ while (fastIterator.hasNext()) {
+ portNumber = fastIterator.next();
+ probeCount = this.portProbeCount.get(portNumber)
+ .getAndIncrement();
+
+ if (probeCount < LinkDiscovery.MAX_PROBE_COUNT) {
+ this.log.debug("sending fast probe to port");
+ sendProbes(portNumber);
+ } else {
+ // Update fast and slow ports
+ //fastIterator.remove();
+ //this.slowPorts.add(portNumber);
+ //this.portProbeCount.remove(portNumber);
+ this.portProbeCount.get(portNumber).set(0);
+
+ ConnectPoint cp = new ConnectPoint(
+ device.id(),
+ PortNumber.portNumber(portNumber));
+ log.debug("Link down -> {}", cp);
+ linkProvider.linksVanished(cp);
+ }
+ }
+
+ // send a probe for the next slow port
+ if (!this.slowPorts.isEmpty()) {
+ Iterator<Long> slowIterator = this.slowPorts.iterator();
+ while (slowIterator.hasNext()) {
+ portNumber = slowIterator.next();
+ this.log.debug("sending slow probe to port {}", portNumber);
+
+ sendProbes(portNumber);
+
+ }
+ }
+ }
+
+ // reschedule timer
+ timeout = Timer.getTimer().newTimeout(this, this.probeRate,
+ TimeUnit.MILLISECONDS);
+ }
+
+ public void stop() {
+ timeout.cancel();
+ isStopped = true;
+ }
+
+ public void start() {
+ timeout = Timer.getTimer().newTimeout(this, 0,
+ TimeUnit.MILLISECONDS);
+ isStopped = false;
+ }
+
+ /**
+ * Creates packet_out LLDP for specified output port.
+ *
+ * @param port the port
+ * @return Packet_out message with LLDP data
+ */
+ private OutboundPacket createOutBoundLLDP(final Long port) {
+ if (port == null) {
+ return null;
+ }
+ this.lldpPacket.setPortId(port.intValue());
+ this.ethPacket.setSourceMACAddress("DE:AD:BE:EF:BA:11");
+
+ final byte[] lldp = this.ethPacket.serialize();
+ OutboundPacket outboundPacket = new DefaultOutboundPacket(
+ this.device.id(),
+ DefaultTrafficTreatment.builder().setOutput(
+ PortNumber.portNumber(port)).build(),
+ ByteBuffer.wrap(lldp));
+ return outboundPacket;
+ }
+
+ /**
+ * Creates packet_out BDDP for specified output port.
+ *
+ * @param port the port
+ * @return Packet_out message with LLDP data
+ */
+ private OutboundPacket createOutBoundBDDP(final Long port) {
+ if (port == null) {
+ return null;
+ }
+ this.lldpPacket.setPortId(port.intValue());
+ this.bddpEth.setSourceMACAddress("DE:AD:BE:EF:BA:11");
+
+ final byte[] bddp = this.bddpEth.serialize();
+ OutboundPacket outboundPacket = new DefaultOutboundPacket(
+ this.device.id(),
+ DefaultTrafficTreatment.builder()
+ .setOutput(PortNumber.portNumber(port)).build(),
+ ByteBuffer.wrap(bddp));
+ return outboundPacket;
+ }
+
+ private void sendProbes(Long portNumber) {
+ if (mastershipService.getLocalRole(this.device.id()) ==
+ MastershipRole.MASTER) {
+ OutboundPacket pkt = this.createOutBoundLLDP(portNumber);
+ pktService.emit(pkt);
+ if (useBDDP) {
+ OutboundPacket bpkt = this.createOutBoundBDDP(portNumber);
+ pktService.emit(bpkt);
+ }
+ }
+ }
+
+ public boolean containsPort(Long portNumber) {
+ if (slowPorts.contains(portNumber) || fastPorts.contains(portNumber)) {
+ return true;
+ }
+ return false;
+ }
+
+ public boolean isStopped() {
+ return isStopped;
+ }
+
+}
diff --git a/providers/lldp/src/main/java/org/onlab/onos/provider/lldp/impl/package-info.java b/providers/lldp/src/main/java/org/onlab/onos/provider/lldp/impl/package-info.java
new file mode 100644
index 0000000..c7f5b61
--- /dev/null
+++ b/providers/lldp/src/main/java/org/onlab/onos/provider/lldp/impl/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * Provider that uses the core as a means of infrastructure link inference.
+ */
+package org.onlab.onos.provider.lldp.impl;
diff --git a/providers/lldp/src/test/java/org/onlab/onos/provider/lldp/impl/LLDPLinkProviderTest.java b/providers/lldp/src/test/java/org/onlab/onos/provider/lldp/impl/LLDPLinkProviderTest.java
new file mode 100644
index 0000000..bbf2de2
--- /dev/null
+++ b/providers/lldp/src/test/java/org/onlab/onos/provider/lldp/impl/LLDPLinkProviderTest.java
@@ -0,0 +1,490 @@
+package org.onlab.onos.provider.lldp.impl;
+
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.onlab.onos.cluster.NodeId;
+import org.onlab.onos.mastership.MastershipListener;
+import org.onlab.onos.mastership.MastershipService;
+import org.onlab.onos.mastership.MastershipTermService;
+import org.onlab.onos.net.ConnectPoint;
+import org.onlab.onos.net.DefaultDevice;
+import org.onlab.onos.net.DefaultPort;
+import org.onlab.onos.net.Device;
+import org.onlab.onos.net.DeviceId;
+import org.onlab.onos.net.MastershipRole;
+import org.onlab.onos.net.Port;
+import org.onlab.onos.net.PortNumber;
+import org.onlab.onos.net.device.DeviceEvent;
+import org.onlab.onos.net.device.DeviceListener;
+import org.onlab.onos.net.device.DeviceService;
+import org.onlab.onos.net.flow.TrafficTreatment;
+import org.onlab.onos.net.link.LinkDescription;
+import org.onlab.onos.net.link.LinkProvider;
+import org.onlab.onos.net.link.LinkProviderRegistry;
+import org.onlab.onos.net.link.LinkProviderService;
+import org.onlab.onos.net.packet.DefaultInboundPacket;
+import org.onlab.onos.net.packet.InboundPacket;
+import org.onlab.onos.net.packet.OutboundPacket;
+import org.onlab.onos.net.packet.PacketContext;
+import org.onlab.onos.net.packet.PacketProcessor;
+import org.onlab.onos.net.packet.PacketService;
+import org.onlab.onos.net.provider.AbstractProviderService;
+import org.onlab.onos.net.provider.ProviderId;
+import org.onlab.packet.ChassisId;
+import org.onlab.packet.Ethernet;
+import org.onlab.packet.ONOSLLDP;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.junit.Assert.*;
+
+public class LLDPLinkProviderTest {
+
+ private static final DeviceId DID1 = DeviceId.deviceId("of:0000000000000001");
+ private static final DeviceId DID2 = DeviceId.deviceId("of:0000000000000002");
+
+ private static Port pd1;
+ private static Port pd2;
+ private static Port pd3;
+ private static Port pd4;
+
+ private final LLDPLinkProvider provider = new LLDPLinkProvider();
+ private final TestLinkRegistry linkService = new TestLinkRegistry();
+ private final TestPacketService packetService = new TestPacketService();
+ private final TestDeviceService deviceService = new TestDeviceService();
+ private final TestMasterShipService masterService = new TestMasterShipService();
+
+ private TestLinkProviderService providerService;
+
+ private PacketProcessor testProcessor;
+ private DeviceListener deviceListener;
+
+ @Before
+ public void setUp() {
+
+ provider.deviceService = deviceService;
+ provider.packetSevice = packetService;
+ provider.providerRegistry = linkService;
+ provider.masterService = masterService;
+
+
+ provider.activate();
+ }
+
+ @Test
+ public void basics() {
+ assertNotNull("registration expected", providerService);
+ assertEquals("incorrect provider", provider, providerService.provider());
+ }
+
+ @Test
+ public void switchAdd() {
+ DeviceEvent de = deviceEvent(DeviceEvent.Type.DEVICE_ADDED, DID1);
+ deviceListener.event(de);
+
+ assertFalse("Device not added", provider.discoverers.isEmpty());
+ }
+
+ @Test
+ public void switchRemove() {
+ deviceListener.event(deviceEvent(DeviceEvent.Type.DEVICE_ADDED, DID1));
+ deviceListener.event(deviceEvent(DeviceEvent.Type.DEVICE_REMOVED, DID1));
+
+ assertTrue("Discoverer is not gone", provider.discoverers.get(DID1).isStopped());
+ assertTrue("Device is not gone.", vanishedDpid(DID1));
+ }
+
+ @Test
+ public void portUp() {
+ deviceListener.event(deviceEvent(DeviceEvent.Type.DEVICE_ADDED, DID1));
+ deviceListener.event(portEvent(DeviceEvent.Type.PORT_ADDED, DID1, port(DID1, 3, true)));
+
+ assertTrue("Port not added to discoverer",
+ provider.discoverers.get(DID1).containsPort((long) 3));
+ }
+
+ @Test
+ public void portDown() {
+
+ deviceListener.event(deviceEvent(DeviceEvent.Type.DEVICE_ADDED, DID1));
+ deviceListener.event(portEvent(DeviceEvent.Type.PORT_ADDED, DID1, port(DID1, 1, false)));
+
+
+
+ assertFalse("Port added to discoverer",
+ provider.discoverers.get(DID1).containsPort((long) 1));
+ assertTrue("Port is not gone.", vanishedPort((long) 1));
+ }
+
+ @Test
+ public void portUnknown() {
+ deviceListener.event(deviceEvent(DeviceEvent.Type.DEVICE_ADDED, DID1));
+ deviceListener.event(portEvent(DeviceEvent.Type.PORT_ADDED, DID2, port(DID2, 1, false)));
+
+
+ assertNull("DPID exists",
+ provider.discoverers.get(DID2));
+ }
+
+ @Test
+ public void unknownPktCtx() {
+
+ PacketContext pktCtx = new TestPacketContext(deviceService.getDevice(DID2));
+
+ testProcessor.process(pktCtx);
+ assertFalse("Context should still be free", pktCtx.isHandled());
+ }
+
+ @Test
+ public void knownPktCtx() {
+ deviceListener.event(deviceEvent(DeviceEvent.Type.DEVICE_ADDED, DID1));
+ deviceListener.event(deviceEvent(DeviceEvent.Type.DEVICE_ADDED, DID2));
+ PacketContext pktCtx = new TestPacketContext(deviceService.getDevice(DID2));
+
+
+ testProcessor.process(pktCtx);
+
+ assertTrue("Link not detected", detectedLink(DID1, DID2));
+
+ }
+
+
+ @After
+ public void tearDown() {
+ provider.deactivate();
+ provider.providerRegistry = null;
+ provider.deviceService = null;
+ provider.packetSevice = null;
+ }
+
+ private DeviceEvent deviceEvent(DeviceEvent.Type type, DeviceId did) {
+ return new DeviceEvent(type, deviceService.getDevice(did));
+
+ }
+
+ private DeviceEvent portEvent(DeviceEvent.Type type, DeviceId did, PortNumber port) {
+ return new DeviceEvent(type, deviceService.getDevice(did),
+ deviceService.getPort(did, port));
+ }
+
+ private DeviceEvent portEvent(DeviceEvent.Type type, DeviceId did, Port port) {
+ return new DeviceEvent(type, deviceService.getDevice(did), port);
+ }
+
+ private Port port(DeviceId did, long port, boolean enabled) {
+ return new DefaultPort(deviceService.getDevice(did),
+ PortNumber.portNumber(port), enabled);
+ }
+
+
+ private boolean vanishedDpid(DeviceId... dids) {
+ for (int i = 0; i < dids.length; i++) {
+ if (!providerService.vanishedDpid.contains(dids[i])) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private boolean vanishedPort(Long... ports) {
+ for (int i = 0; i < ports.length; i++) {
+ if (!providerService.vanishedPort.contains(ports[i])) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private boolean detectedLink(DeviceId src, DeviceId dst) {
+ for (DeviceId key : providerService.discoveredLinks.keySet()) {
+ if (key.equals(src)) {
+ return providerService.discoveredLinks.get(src).equals(dst);
+ }
+ }
+ return false;
+ }
+
+
+ private class TestLinkRegistry implements LinkProviderRegistry {
+
+ @Override
+ public LinkProviderService register(LinkProvider provider) {
+ providerService = new TestLinkProviderService(provider);
+ return providerService;
+ }
+
+ @Override
+ public void unregister(LinkProvider provider) {
+ }
+
+ @Override
+ public Set<ProviderId> getProviders() {
+ return null;
+ }
+
+ }
+
+ private class TestLinkProviderService
+ extends AbstractProviderService<LinkProvider>
+ implements LinkProviderService {
+
+ List<DeviceId> vanishedDpid = Lists.newLinkedList();
+ List<Long> vanishedPort = Lists.newLinkedList();
+ Map<DeviceId, DeviceId> discoveredLinks = Maps.newHashMap();
+
+ protected TestLinkProviderService(LinkProvider provider) {
+ super(provider);
+ }
+
+ @Override
+ public void linkDetected(LinkDescription linkDescription) {
+ DeviceId sDid = linkDescription.src().deviceId();
+ DeviceId dDid = linkDescription.dst().deviceId();
+ discoveredLinks.put(sDid, dDid);
+ }
+
+ @Override
+ public void linkVanished(LinkDescription linkDescription) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void linksVanished(ConnectPoint connectPoint) {
+ vanishedPort.add(connectPoint.port().toLong());
+
+ }
+
+ @Override
+ public void linksVanished(DeviceId deviceId) {
+ vanishedDpid.add(deviceId);
+ }
+
+
+ }
+
+
+
+ private class TestPacketContext implements PacketContext {
+
+ protected Device device;
+ protected boolean blocked = false;
+
+ public TestPacketContext(Device dev) {
+ device = dev;
+ }
+
+ @Override
+ public long time() {
+ return 0;
+ }
+
+ @Override
+ public InboundPacket inPacket() {
+ ONOSLLDP lldp = new ONOSLLDP();
+ lldp.setChassisId(device.chassisId());
+ lldp.setPortId((int) pd1.number().toLong());
+ lldp.setDevice(deviceService.getDevice(DID1).id().toString());
+
+
+ Ethernet ethPacket = new Ethernet();
+ ethPacket.setEtherType(Ethernet.TYPE_LLDP);
+ ethPacket.setDestinationMACAddress(ONOSLLDP.LLDP_NICIRA);
+ ethPacket.setPayload(lldp);
+ ethPacket.setPad(true);
+
+
+
+ ethPacket.setSourceMACAddress("DE:AD:BE:EF:BA:11");
+
+ ConnectPoint cp = new ConnectPoint(device.id(), pd3.number());
+
+ return new DefaultInboundPacket(cp, ethPacket,
+ ByteBuffer.wrap(ethPacket.serialize()));
+
+ }
+
+ @Override
+ public OutboundPacket outPacket() {
+ return null;
+ }
+
+ @Override
+ public TrafficTreatment.Builder treatmentBuilder() {
+ return null;
+ }
+
+ @Override
+ public void send() {
+
+ }
+
+ @Override
+ public boolean block() {
+ blocked = true;
+ return blocked;
+ }
+
+ @Override
+ public boolean isHandled() {
+ return blocked;
+ }
+
+ }
+
+ private class TestPacketService implements PacketService {
+
+ @Override
+ public void addProcessor(PacketProcessor processor, int priority) {
+ testProcessor = processor;
+ }
+
+ @Override
+ public void removeProcessor(PacketProcessor processor) {
+
+ }
+
+ @Override
+ public void emit(OutboundPacket packet) {
+
+ }
+ }
+
+ private class TestDeviceService implements DeviceService {
+
+ private Map<DeviceId, Device> devices = new HashMap<>();
+ private final ArrayListMultimap<DeviceId, Port> ports =
+ ArrayListMultimap.create();
+
+ public TestDeviceService() {
+ Device d1 = new DefaultDevice(ProviderId.NONE, DID1, Device.Type.SWITCH,
+ "TESTMF", "TESTHW", "TESTSW", "TESTSN", new ChassisId());
+ Device d2 = new DefaultDevice(ProviderId.NONE, DID2, Device.Type.SWITCH,
+ "TESTMF", "TESTHW", "TESTSW", "TESTSN", new ChassisId());
+ devices.put(DID1, d1);
+ devices.put(DID2, d2);
+
+ pd1 = new DefaultPort(d1, PortNumber.portNumber(1), true);
+ pd2 = new DefaultPort(d1, PortNumber.portNumber(2), true);
+ pd3 = new DefaultPort(d2, PortNumber.portNumber(1), true);
+ pd4 = new DefaultPort(d2, PortNumber.portNumber(2), true);
+
+ ports.putAll(DID1, Lists.newArrayList(pd1, pd2));
+ ports.putAll(DID2, Lists.newArrayList(pd3, pd4));
+
+
+ }
+
+ @Override
+ public int getDeviceCount() {
+ return devices.values().size();
+ }
+
+ @Override
+ public Iterable<Device> getDevices() {
+ return devices.values();
+ }
+
+ @Override
+ public Device getDevice(DeviceId deviceId) {
+ return devices.get(deviceId);
+ }
+
+ @Override
+ public MastershipRole getRole(DeviceId deviceId) {
+ return MastershipRole.MASTER;
+ }
+
+ @Override
+ public List<Port> getPorts(DeviceId deviceId) {
+ return ports.get(deviceId);
+ }
+
+ @Override
+ public Port getPort(DeviceId deviceId, PortNumber portNumber) {
+ for (Port p : ports.get(deviceId)) {
+ if (p.number().equals(portNumber)) {
+ return p;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public boolean isAvailable(DeviceId deviceId) {
+ return true;
+ }
+
+ @Override
+ public void addListener(DeviceListener listener) {
+ deviceListener = listener;
+
+ }
+
+ @Override
+ public void removeListener(DeviceListener listener) {
+
+ }
+ }
+
+ private final class TestMasterShipService implements MastershipService {
+
+ @Override
+ public MastershipRole getLocalRole(DeviceId deviceId) {
+ return MastershipRole.MASTER;
+ }
+
+ @Override
+ public MastershipRole requestRoleFor(DeviceId deviceId) {
+ return null;
+ }
+
+ @Override
+ public void relinquishMastership(DeviceId deviceId) {
+
+ }
+
+ @Override
+ public NodeId getMasterFor(DeviceId deviceId) {
+ return null;
+ }
+
+ @Override
+ public Set<DeviceId> getDevicesOf(NodeId nodeId) {
+ return null;
+ }
+
+ @Override
+ public MastershipTermService requestTermService() {
+ return null;
+ }
+
+ @Override
+ public void addListener(MastershipListener listener) {
+
+ }
+
+ @Override
+ public void removeListener(MastershipListener listener) {
+
+ }
+
+ @Override
+ public List<NodeId> getNodesFor(DeviceId deviceId) {
+ return Collections.emptyList();
+ }
+ }
+
+
+}
diff --git a/providers/openflow/device/src/main/java/org/onlab/onos/provider/of/device/impl/OpenFlowDeviceProvider.java b/providers/openflow/device/src/main/java/org/onlab/onos/provider/of/device/impl/OpenFlowDeviceProvider.java
index 8dbff56..fcc7810 100644
--- a/providers/openflow/device/src/main/java/org/onlab/onos/provider/of/device/impl/OpenFlowDeviceProvider.java
+++ b/providers/openflow/device/src/main/java/org/onlab/onos/provider/of/device/impl/OpenFlowDeviceProvider.java
@@ -23,6 +23,7 @@
import org.onlab.onos.openflow.controller.OpenFlowSwitch;
import org.onlab.onos.openflow.controller.OpenFlowSwitchListener;
import org.onlab.onos.openflow.controller.RoleState;
+import org.onlab.packet.ChassisId;
import org.projectfloodlight.openflow.protocol.OFPortConfig;
import org.projectfloodlight.openflow.protocol.OFPortDesc;
import org.projectfloodlight.openflow.protocol.OFPortState;
@@ -120,12 +121,14 @@
Device.Type deviceType = sw.isOptical() ? Device.Type.ROADM :
Device.Type.SWITCH;
+ ChassisId cId = new ChassisId(dpid.value());
DeviceDescription description =
new DefaultDeviceDescription(did.uri(), deviceType,
sw.manfacturerDescription(),
sw.hardwareDescription(),
sw.softwareDescription(),
- sw.serialNumber());
+ sw.serialNumber(),
+ cId);
providerService.deviceConnected(did, description);
providerService.updatePorts(did, buildPortDescriptions(sw.getPorts()));
}
@@ -172,7 +175,7 @@
*/
private List<PortDescription> buildPortDescriptions(
List<OFPortDesc> ports) {
- final List<PortDescription> portDescs = new ArrayList<>();
+ final List<PortDescription> portDescs = new ArrayList<>(ports.size());
for (OFPortDesc port : ports) {
portDescs.add(buildPortDescription(port));
}
diff --git a/providers/openflow/device/src/test/java/org/onlab/onos/provider/of/device/impl/OpenFlowDeviceProviderTest.java b/providers/openflow/device/src/test/java/org/onlab/onos/provider/of/device/impl/OpenFlowDeviceProviderTest.java
index f0e1c73..f7fdbfb 100644
--- a/providers/openflow/device/src/test/java/org/onlab/onos/provider/of/device/impl/OpenFlowDeviceProviderTest.java
+++ b/providers/openflow/device/src/test/java/org/onlab/onos/provider/of/device/impl/OpenFlowDeviceProviderTest.java
@@ -59,7 +59,7 @@
private static final List<OFPortDesc> PLIST = Lists.newArrayList(PD1, PD2);
private static final Device DEV1 =
- new DefaultDevice(PID, DID1, SWITCH, "", "", "", "");
+ new DefaultDevice(PID, DID1, SWITCH, "", "", "", "", null);
private static final TestOpenFlowSwitch SW1 = new TestOpenFlowSwitch();
diff --git a/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/FlowEntryBuilder.java b/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/FlowEntryBuilder.java
index 14c2c22..9924a71 100644
--- a/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/FlowEntryBuilder.java
+++ b/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/FlowEntryBuilder.java
@@ -35,6 +35,7 @@
import org.projectfloodlight.openflow.protocol.match.Match;
import org.projectfloodlight.openflow.protocol.match.MatchField;
import org.projectfloodlight.openflow.types.IPv4Address;
+import org.projectfloodlight.openflow.types.Masked;
import org.slf4j.Logger;
import com.google.common.collect.Lists;
@@ -218,23 +219,35 @@
builder.matchEthType((short) ethType);
break;
case IPV4_DST:
- IPv4Address di = match.get(MatchField.IPV4_DST);
IpPrefix dip;
- if (di.isCidrMask()) {
- dip = IpPrefix.valueOf(di.getInt(), di.asCidrMaskLength());
+ if (match.isPartiallyMasked(MatchField.IPV4_DST)) {
+ Masked<IPv4Address> maskedIp = match.getMasked(MatchField.IPV4_DST);
+
+ dip = IpPrefix.valueOf(
+ maskedIp.getValue().getInt(),
+ maskedIp.getMask().asCidrMaskLength());
} else {
- dip = IpPrefix.valueOf(di.getInt());
+ dip = IpPrefix.valueOf(
+ match.get(MatchField.IPV4_DST).getInt(),
+ IpPrefix.MAX_INET_MASK);
}
+
builder.matchIPDst(dip);
break;
case IPV4_SRC:
- IPv4Address si = match.get(MatchField.IPV4_SRC);
IpPrefix sip;
- if (si.isCidrMask()) {
- sip = IpPrefix.valueOf(si.getInt(), si.asCidrMaskLength());
+ if (match.isPartiallyMasked(MatchField.IPV4_SRC)) {
+ Masked<IPv4Address> maskedIp = match.getMasked(MatchField.IPV4_SRC);
+
+ sip = IpPrefix.valueOf(
+ maskedIp.getValue().getInt(),
+ maskedIp.getMask().asCidrMaskLength());
} else {
- sip = IpPrefix.valueOf(si.getInt());
+ sip = IpPrefix.valueOf(
+ match.get(MatchField.IPV4_SRC).getInt(),
+ IpPrefix.MAX_INET_MASK);
}
+
builder.matchIPSrc(sip);
break;
case IP_PROTO:
@@ -249,6 +262,12 @@
VlanId vlanId = VlanId.vlanId(match.get(MatchField.VLAN_VID).getVlan());
builder.matchVlanId(vlanId);
break;
+ case TCP_DST:
+ builder.matchTcpDst((short) match.get(MatchField.TCP_DST).getPort());
+ break;
+ case TCP_SRC:
+ builder.matchTcpSrc((short) match.get(MatchField.TCP_SRC).getPort());
+ break;
case ARP_OP:
case ARP_SHA:
case ARP_SPA:
@@ -272,8 +291,6 @@
case MPLS_TC:
case SCTP_DST:
case SCTP_SRC:
- case TCP_DST:
- case TCP_SRC:
case TUNNEL_ID:
case UDP_DST:
case UDP_SRC:
diff --git a/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/FlowModBuilder.java b/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/FlowModBuilder.java
index c1c56c3..aa50833 100644
--- a/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/FlowModBuilder.java
+++ b/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/FlowModBuilder.java
@@ -15,6 +15,7 @@
import org.onlab.onos.net.flow.criteria.Criteria.IPCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.IPProtocolCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.PortCriterion;
+import org.onlab.onos.net.flow.criteria.Criteria.TcpPortCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.VlanIdCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.VlanPcpCriterion;
import org.onlab.onos.net.flow.criteria.Criterion;
@@ -42,6 +43,7 @@
import org.projectfloodlight.openflow.types.OFBufferId;
import org.projectfloodlight.openflow.types.OFPort;
import org.projectfloodlight.openflow.types.OFVlanVidMatch;
+import org.projectfloodlight.openflow.types.TransportPort;
import org.projectfloodlight.openflow.types.U64;
import org.projectfloodlight.openflow.types.VlanPcp;
import org.projectfloodlight.openflow.types.VlanVid;
@@ -161,10 +163,10 @@
switch (l3m.subtype()) {
case IP_DST:
ip = (ModIPInstruction) i;
- return factory.actions().setNwDst(IPv4Address.of(ip.ip().toRealInt()));
+ return factory.actions().setNwDst(IPv4Address.of(ip.ip().toInt()));
case IP_SRC:
ip = (ModIPInstruction) i;
- return factory.actions().setNwSrc(IPv4Address.of(ip.ip().toRealInt()));
+ return factory.actions().setNwSrc(IPv4Address.of(ip.ip().toInt()));
default:
log.warn("Unimplemented action type {}.", l3m.subtype());
break;
@@ -199,6 +201,7 @@
Match.Builder mBuilder = factory.buildMatch();
EthCriterion eth;
IPCriterion ip;
+ TcpPortCriterion tp;
for (Criterion c : selector.criteria()) {
switch (c.type()) {
case IN_PORT:
@@ -220,21 +223,21 @@
case IPV4_DST:
ip = (IPCriterion) c;
if (ip.ip().isMasked()) {
- Masked<IPv4Address> maskedIp = Masked.of(IPv4Address.of(ip.ip().toRealInt()),
- IPv4Address.of(ip.ip().netmask().toRealInt()));
+ Masked<IPv4Address> maskedIp = Masked.of(IPv4Address.of(ip.ip().toInt()),
+ IPv4Address.of(ip.ip().netmask().toInt()));
mBuilder.setMasked(MatchField.IPV4_DST, maskedIp);
} else {
- mBuilder.setExact(MatchField.IPV4_DST, IPv4Address.of(ip.ip().toRealInt()));
+ mBuilder.setExact(MatchField.IPV4_DST, IPv4Address.of(ip.ip().toInt()));
}
break;
case IPV4_SRC:
ip = (IPCriterion) c;
if (ip.ip().isMasked()) {
- Masked<IPv4Address> maskedIp = Masked.of(IPv4Address.of(ip.ip().toRealInt()),
- IPv4Address.of(ip.ip().netmask().toRealInt()));
+ Masked<IPv4Address> maskedIp = Masked.of(IPv4Address.of(ip.ip().toInt()),
+ IPv4Address.of(ip.ip().netmask().toInt()));
mBuilder.setMasked(MatchField.IPV4_SRC, maskedIp);
} else {
- mBuilder.setExact(MatchField.IPV4_SRC, IPv4Address.of(ip.ip().toRealInt()));
+ mBuilder.setExact(MatchField.IPV4_SRC, IPv4Address.of(ip.ip().toInt()));
}
break;
case IP_PROTO:
@@ -250,6 +253,14 @@
mBuilder.setExact(MatchField.VLAN_VID,
OFVlanVidMatch.ofVlanVid(VlanVid.ofVlan(vid.vlanId().toShort())));
break;
+ case TCP_DST:
+ tp = (TcpPortCriterion) c;
+ mBuilder.setExact(MatchField.TCP_DST, TransportPort.of(tp.tcpPort()));
+ break;
+ case TCP_SRC:
+ tp = (TcpPortCriterion) c;
+ mBuilder.setExact(MatchField.TCP_SRC, TransportPort.of(tp.tcpPort()));
+ break;
case ARP_OP:
case ARP_SHA:
case ARP_SPA:
@@ -276,8 +287,6 @@
case PBB_ISID:
case SCTP_DST:
case SCTP_SRC:
- case TCP_DST:
- case TCP_SRC:
case TUNNEL_ID:
case UDP_DST:
case UDP_SRC:
diff --git a/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/OpenFlowRuleProvider.java b/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/OpenFlowRuleProvider.java
index ac0bb61..94d1282 100644
--- a/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/OpenFlowRuleProvider.java
+++ b/providers/openflow/flow/src/main/java/org/onlab/onos/provider/of/flow/impl/OpenFlowRuleProvider.java
@@ -1,20 +1,9 @@
package org.onlab.onos.provider.of.flow.impl;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -68,10 +57,20 @@
import org.projectfloodlight.openflow.types.U32;
import org.slf4j.Logger;
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimap;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.slf4j.LoggerFactory.getLogger;
/**
* Provider which uses an OpenFlow controller to detect network
@@ -166,6 +165,16 @@
for (FlowRuleBatchEntry fbe : batch.getOperations()) {
FlowRule flowRule = fbe.getTarget();
OpenFlowSwitch sw = controller.getSwitch(Dpid.dpid(flowRule.deviceId().uri()));
+ if (sw == null) {
+ /*
+ * if a switch we are supposed to install to is gone then
+ * cancel (ie. rollback) the work that has been done so far
+ * and return the associated future.
+ */
+ InstallationFuture failed = new InstallationFuture(sws, fmXids);
+ failed.cancel(true);
+ return failed;
+ }
sws.add(new Dpid(sw.getId()));
FlowModBuilder builder = new FlowModBuilder(flowRule, sw.factory());
switch (fbe.getOperator()) {
@@ -322,6 +331,7 @@
public void fail(OFErrorMsg msg, Dpid dpid) {
ok.set(false);
+ removeRequirement(dpid);
FlowEntry fe = null;
FlowRuleBatchEntry fbe = fms.get(msg.getXid());
FlowRule offending = fbe.getTarget();
@@ -374,11 +384,8 @@
public void satisfyRequirement(Dpid dpid) {
- log.warn("Satisfaction from switch {}", dpid);
- sws.remove(dpid);
- countDownLatch.countDown();
- cleanUp();
-
+ log.debug("Satisfaction from switch {}", dpid);
+ removeRequirement(dpid);
}
@@ -395,6 +402,7 @@
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
+ ok.set(false);
this.state = BatchState.CANCELLED;
cleanUp();
for (FlowRuleBatchEntry fbe : fms.values()) {
@@ -438,7 +446,7 @@
}
private void cleanUp() {
- if (sws.isEmpty()) {
+ if (isDone() || isCancelled()) {
pendingFutures.remove(pendingXid);
for (Long xid : fms.keySet()) {
pendingFMs.remove(xid);
@@ -446,6 +454,12 @@
}
}
+ private void removeRequirement(Dpid dpid) {
+ countDownLatch.countDown();
+ sws.remove(dpid);
+ cleanUp();
+ }
+
}
}
diff --git a/providers/openflow/host/src/main/java/org/onlab/onos/provider/of/host/impl/OpenFlowHostProvider.java b/providers/openflow/host/src/main/java/org/onlab/onos/provider/of/host/impl/OpenFlowHostProvider.java
index 45a7bd8..845cc19 100644
--- a/providers/openflow/host/src/main/java/org/onlab/onos/provider/of/host/impl/OpenFlowHostProvider.java
+++ b/providers/openflow/host/src/main/java/org/onlab/onos/provider/of/host/impl/OpenFlowHostProvider.java
@@ -1,5 +1,9 @@
package org.onlab.onos.provider.of.host.impl;
+import static org.onlab.onos.net.DeviceId.deviceId;
+import static org.onlab.onos.net.PortNumber.portNumber;
+import static org.slf4j.LoggerFactory.getLogger;
+
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Deactivate;
@@ -29,15 +33,12 @@
import org.onlab.packet.VlanId;
import org.slf4j.Logger;
-import static org.onlab.onos.net.DeviceId.deviceId;
-import static org.onlab.onos.net.PortNumber.portNumber;
-import static org.slf4j.LoggerFactory.getLogger;
-
/**
* Provider which uses an OpenFlow controller to detect network
* end-station hosts.
*/
@Component(immediate = true)
+@Deprecated
public class OpenFlowHostProvider extends AbstractProvider implements HostProvider {
private final Logger log = getLogger(getClass());
@@ -109,14 +110,16 @@
// Potentially a new or moved host
if (eth.getEtherType() == Ethernet.TYPE_ARP) {
ARP arp = (ARP) eth.getPayload();
- IpPrefix ip = IpPrefix.valueOf(arp.getSenderProtocolAddress());
+ IpPrefix ip = IpPrefix.valueOf(arp.getSenderProtocolAddress(),
+ IpPrefix.MAX_INET_MASK);
HostDescription hdescr =
new DefaultHostDescription(eth.getSourceMAC(), vlan, hloc, ip);
providerService.hostDetected(hid, hdescr);
} else if (ipLearn && eth.getEtherType() == Ethernet.TYPE_IPV4) {
IPv4 pip = (IPv4) eth.getPayload();
- IpPrefix ip = IpPrefix.valueOf(pip.getSourceAddress());
+ IpPrefix ip = IpPrefix.valueOf(pip.getSourceAddress(),
+ IpPrefix.MAX_INET_MASK);
HostDescription hdescr =
new DefaultHostDescription(eth.getSourceMAC(), vlan, hloc, ip);
providerService.hostDetected(hid, hdescr);
diff --git a/providers/openflow/link/src/main/java/org/onlab/onos/provider/of/link/impl/LinkDiscovery.java b/providers/openflow/link/src/main/java/org/onlab/onos/provider/of/link/impl/LinkDiscovery.java
index 0c4502b..3319302 100644
--- a/providers/openflow/link/src/main/java/org/onlab/onos/provider/of/link/impl/LinkDiscovery.java
+++ b/providers/openflow/link/src/main/java/org/onlab/onos/provider/of/link/impl/LinkDiscovery.java
@@ -66,6 +66,7 @@
* TODO: add 'fast discovery' mode: drop LLDPs in destination switch but listen
* for flow_removed messages
*/
+@Deprecated
public class LinkDiscovery implements TimerTask {
private final OpenFlowSwitch sw;
@@ -339,9 +340,14 @@
final Iterator<Integer> fastIterator = this.fastPorts.iterator();
while (fastIterator.hasNext()) {
final Integer portNumber = fastIterator.next();
+ OFPortDesc port = findPort(portNumber);
+ if (port == null) {
+ // port can be null
+ // #removePort modifies `ports` outside synchronized block
+ continue;
+ }
final int probeCount = this.portProbeCount.get(portNumber)
.getAndIncrement();
- OFPortDesc port = findPort(portNumber);
if (probeCount < LinkDiscovery.MAX_PROBE_COUNT) {
this.log.debug("sending fast probe to port");
diff --git a/providers/openflow/link/src/main/java/org/onlab/onos/provider/of/link/impl/OpenFlowLinkProvider.java b/providers/openflow/link/src/main/java/org/onlab/onos/provider/of/link/impl/OpenFlowLinkProvider.java
index f1e3861..7f16eaa 100644
--- a/providers/openflow/link/src/main/java/org/onlab/onos/provider/of/link/impl/OpenFlowLinkProvider.java
+++ b/providers/openflow/link/src/main/java/org/onlab/onos/provider/of/link/impl/OpenFlowLinkProvider.java
@@ -35,6 +35,7 @@
* infrastructure links.
*/
@Component(immediate = true)
+@Deprecated
public class OpenFlowLinkProvider extends AbstractProvider implements LinkProvider {
private final Logger log = getLogger(getClass());
diff --git a/providers/openflow/packet/src/main/java/org/onlab/onos/provider/of/packet/impl/OpenFlowPacketProvider.java b/providers/openflow/packet/src/main/java/org/onlab/onos/provider/of/packet/impl/OpenFlowPacketProvider.java
index 94f7a33..8be1fec 100644
--- a/providers/openflow/packet/src/main/java/org/onlab/onos/provider/of/packet/impl/OpenFlowPacketProvider.java
+++ b/providers/openflow/packet/src/main/java/org/onlab/onos/provider/of/packet/impl/OpenFlowPacketProvider.java
@@ -28,7 +28,6 @@
import org.onlab.onos.openflow.controller.OpenFlowPacketContext;
import org.onlab.onos.openflow.controller.OpenFlowSwitch;
import org.onlab.onos.openflow.controller.PacketListener;
-import org.onlab.packet.Ethernet;
import org.projectfloodlight.openflow.protocol.OFPacketOut;
import org.projectfloodlight.openflow.protocol.OFPortDesc;
import org.projectfloodlight.openflow.protocol.action.OFAction;
@@ -96,13 +95,13 @@
return;
}
- Ethernet eth = new Ethernet();
- eth.deserialize(packet.data().array(), 0, packet.data().array().length);
+ //Ethernet eth = new Ethernet();
+ //eth.deserialize(packet.data().array(), 0, packet.data().array().length);
OFPortDesc p = null;
for (Instruction inst : packet.treatment().instructions()) {
if (inst.type().equals(Instruction.Type.OUTPUT)) {
p = portDesc(((OutputInstruction) inst).port());
- OFPacketOut po = packetOut(sw, eth, p.getPortNo());
+ OFPacketOut po = packetOut(sw, packet.data().array(), p.getPortNo());
sw.sendMsg(po);
}
}
@@ -116,7 +115,7 @@
return builder.build();
}
- private OFPacketOut packetOut(OpenFlowSwitch sw, Ethernet eth, OFPort out) {
+ private OFPacketOut packetOut(OpenFlowSwitch sw, byte[] eth, OFPort out) {
OFPacketOut.Builder builder = sw.factory().buildPacketOut();
OFAction act = sw.factory().actions()
.buildOutput()
@@ -126,7 +125,7 @@
.setBufferId(OFBufferId.NO_BUFFER)
.setInPort(OFPort.NO_MASK)
.setActions(Collections.singletonList(act))
- .setData(eth.serialize())
+ .setData(eth)
.build();
}
diff --git a/providers/pom.xml b/providers/pom.xml
index da63b72..b2ed2f1 100644
--- a/providers/pom.xml
+++ b/providers/pom.xml
@@ -18,6 +18,8 @@
<modules>
<module>openflow</module>
+ <module>lldp</module>
+ <module>host</module>
</modules>
<dependencies>
diff --git a/tools/build/envDefaults b/tools/build/envDefaults
index f1e1346..184ad33 100644
--- a/tools/build/envDefaults
+++ b/tools/build/envDefaults
@@ -5,8 +5,9 @@
# M2 repository and Karaf gold bits
export M2_REPO=${M2_REPO:-~/.m2/repository}
-export KARAF_ZIP=${KARAF_ZIP:-~/Downloads/apache-karaf-3.0.1.zip}
-export KARAF_TAR=${KARAF_TAR:-~/Downloads/apache-karaf-3.0.1.tar.gz}
+export KARAF_VERSION=${KARAF_VERSION:-3.0.1}
+export KARAF_ZIP=${KARAF_ZIP:-~/Downloads/apache-karaf-$KARAF_VERSION.zip}
+export KARAF_TAR=${KARAF_TAR:-~/Downloads/apache-karaf-$KARAF_VERSION.tar.gz}
export KARAF_DIST=$(basename $KARAF_ZIP .zip)
# Add ONOS-specific directories to the exectable PATH
@@ -25,6 +26,9 @@
export ONOS_TAR=$ONOS_STAGE.tar.gz
# Defaults for ONOS testing using remote machines.
+if [ -n "${ONOS_CELL}" -a -f $ONOS_ROOT/tools/test/cells/${ONOS_CELL} ]; then
+ . $ONOS_ROOT/tools/test/cells/${ONOS_CELL}
+fi
export ONOS_INSTALL_DIR="/opt/onos" # Installation directory on remote
export OCI="${OCI:-192.168.56.101}" # ONOS Controller Instance
export ONOS_USER="sdn" # ONOS user on remote system
diff --git a/tools/build/onos-package b/tools/build/onos-package
index 5ae80a2..9368cb4 100755
--- a/tools/build/onos-package
+++ b/tools/build/onos-package
@@ -13,13 +13,23 @@
# Make sure we have the original apache karaf bits first
[ ! -d $M2_REPO ] && echo "M2 repository $M2_REPO not found" && exit 1
-[ ! -f $KARAF_ZIP -a ! -f $KARAF_TAR ] && echo "Apache Karaf bits $KARAF_ZIP or $KARAF_TAR not found" && exit 1
[ -d $ONOS_STAGE ] && echo "ONOS stage $ONOS_STAGE already exists" && exit 1
# Create the stage directory and warp into it
mkdir -p $ONOS_STAGE
cd $ONOS_STAGE
+# Check if Apache Karaf bits are available and if not, fetch them.
+if [ ! -f $KARAF_ZIP -a ! -f $KARAF_TAR ]; then
+ echo "Downloading $KARAF_TAR..."
+ karafURL=$(curl -s http://www.apache.org/dyn/closer.cgi/karaf/$KARAF_VERSION/apache-karaf-$KARAF_VERSION.tar.gz |
+ grep "<a href=\".*apache-karaf-$KARAF_VERSION.tar.gz\"" |
+ head -n 1 | sed 's/.*<a href="//g;s/".*//g')
+ curl -s $karafURL > $KARAF_TAR
+fi
+[ ! -f $KARAF_ZIP -a ! -f $KARAF_TAR ] && \
+ echo "Apache Karaf bits $KARAF_ZIP or $KARAF_TAR not found" && exit 1
+
# Unroll the Apache Karaf bits, prune them and make ONOS top-level directories.
[ -f $KARAF_ZIP ] && unzip -q $KARAF_ZIP && rm -rf $KARAF_DIST/demos
[ -f $KARAF_TAR ] && tar zxf $KARAF_TAR && rm -rf $KARAF_DIST/demos
@@ -28,7 +38,13 @@
# Stage the ONOS admin scripts and patch in Karaf service wrapper extras
cp -r $ONOS_ROOT/tools/package/bin .
cp -r $ONOS_ROOT/tools/package/debian $ONOS_STAGE/debian
-cp -r $ONOS_ROOT/tools/package/etc/* $KARAF_DIST/etc
+cp -r $ONOS_ROOT/tools/package/etc/* $ONOS_STAGE/$KARAF_DIST/etc
+
+# Patch-in proper Karaf version into the startup script.
+sed "s/\$KARAF_VERSION/$KARAF_VERSION/g" \
+ $ONOS_ROOT/tools/package/bin/onos-service > bin/onos-service
+sed "s/\$KARAF_VERSION/$KARAF_VERSION/g" \
+ $ONOS_ROOT/tools/package/bin/onos > bin/onos
# Stage the ONOS bundles
mkdir -p $KARAF_DIST/system/org/onlab
@@ -36,16 +52,6 @@
export ONOS_FEATURES="${ONOS_FEATURES:-webconsole,onos-api,onos-core,onos-cli,onos-rest,onos-gui,onos-openflow,onos-app-fwd,onos-app-foo}"
-# Cellar Patching --------------------------------------------------------------
-
-# Patch the Apache Karaf distribution file to add Cellar features repository
-#perl -pi.old -e "s|^(featuresRepositories=.*)|\1,mvn:org.apache.karaf.cellar/apache-karaf-cellar/3.0.0/xml/features|" \
-# $ONOS_STAGE/$KARAF_DIST/etc/org.apache.karaf.features.cfg
-
-# Patch the Apache Karaf distribution file to load ONOS features
-#perl -pi.old -e 's|^(featuresBoot=.*)|\1,cellar|' \
-# $ONOS_STAGE/$KARAF_DIST/etc/org.apache.karaf.features.cfg
-
# ONOS Patching ----------------------------------------------------------------
# Patch the Apache Karaf distribution file to add ONOS features repository
diff --git a/tools/build/onos-test b/tools/build/onos-test
index 740e370..4ccb85e 100755
--- a/tools/build/onos-test
+++ b/tools/build/onos-test
@@ -9,6 +9,8 @@
nodes=$(env | sort | egrep "OC[0-9]+" | cut -d= -f2)
onos-package
+onos-verify-cell || exit 1
+
for node in $nodes; do onos-install -f $node 1>/dev/null & done
# Wait for shutdown before waiting for restart
diff --git a/tools/dev/bash_profile b/tools/dev/bash_profile
index e14c43b..5ee2392 100644
--- a/tools/dev/bash_profile
+++ b/tools/dev/bash_profile
@@ -6,7 +6,13 @@
export ONOS_ROOT=${ONOS_ROOT:-~/onos-next}
# Setup some environmental context for developers
-export JAVA_HOME=${JAVA_HOME:-$(/usr/libexec/java_home -v 1.7)}
+if [ -z "${JAVA_HOME}" ]; then
+ if [ -x /usr/libexec/java_home ]; then
+ export JAVA_HOME=$(/usr/libexec/java_home -v 1.7)
+ elif [ -d /usr/lib/jvm/java-7-openjdk-amd64 ]; then
+ export JAVA_HOME="/usr/lib/jvm/java-7-openjdk-amd64"
+ fi
+fi
export MAVEN=${MAVEN:-~/Applications/apache-maven-3.2.2}
export KARAF=${KARAF:-~/Applications/apache-karaf-3.0.1}
export KARAF_LOG=$KARAF/data/log/karaf.log
@@ -15,7 +21,6 @@
export PATH="$PATH:$ONOS_ROOT/tools/dev/bin:$ONOS_ROOT/tools/test/bin"
export PATH="$PATH:$ONOS_ROOT/tools/build"
export PATH="$PATH:$MAVEN/bin:$KARAF/bin"
-export PATH="$PATH:."
# Convenience utility to warp to various ONOS source projects
# e.g. 'o api', 'o dev', 'o'
@@ -40,6 +45,7 @@
# Short-hand for tailing the ONOS (karaf) log
alias tl='$ONOS_ROOT/tools/dev/bin/onos-local-log'
alias tlo='tl | grep --colour=always org.onlab'
+alias ll='less $KARAF_LOG'
# Pretty-print JSON output
alias pp='python -m json.tool'
@@ -62,13 +68,14 @@
[ ! -f $ONOS_ROOT/tools/test/cells/$1 ] && \
echo "No such cell: $1" >&2 && return 1
unset ONOS_CELL ONOS_NIC ONOS_FEATURES
- unset OC1 OC2 OC3 OC4 OC5 OC6 OC7 OC8 OC9 OCN OCI
+ unset OC0 OC1 OC2 OC3 OC4 OC5 OC6 OC7 OC8 OC9 OCN OCI
+ export ONOS_CELL=$1
. $ONOS_ROOT/tools/test/cells/$1
cell
else
env | egrep "ONOS_CELL"
env | egrep "OCI"
- env | egrep "OC[1-9]+" | sort
+ env | egrep "OC[0-9]+" | sort
env | egrep "OCN"
env | egrep "ONOS_" | egrep -v 'ONOS_ROOT|ONOS_CELL'
fi
diff --git a/tools/dev/onos.cshrc b/tools/dev/onos.cshrc
new file mode 100644
index 0000000..8dba4b2
--- /dev/null
+++ b/tools/dev/onos.cshrc
@@ -0,0 +1,36 @@
+#!/bin/tcsh
+# ONOS developer csh/tcsh profile conveniences
+# Simply include in your own $HOME/.cshrc file. E.g.:
+#
+# setenv ONOS_ROOT $HOME/onos
+# if ( -f $ONOS_ROOT/tools/dev/onos.cshrc ) then
+# source $ONOS_ROOT/tools/dev/onos.cshrc
+# endif
+#
+
+# Root of the ONOS source tree
+if ( ! $?ONOS_ROOT ) then
+ setenv ONOS_ROOT $HOME/onos
+endif
+
+# Setup some environmental context for developers
+if ( ! $?JAVA_HOME ) then
+ if ( -x /usr/libexec/java_home ) then
+ setenv JAVA_HOME `/usr/libexec/java_home -v 1.7`
+ else if ( -d /usr/lib/jvm/java-7-openjdk-amd64 ) then
+ setenv JAVA_HOME /usr/lib/jvm/java-7-openjdk-amd64
+ endif
+endif
+if ( ! $?MAVEN ) then
+ setenv MAVEN $HOME/Applications/apache-maven-3.2.2
+endif
+if ( ! $?KARAF ) then
+ setenv KARAF $HOME/Applications/apache-karaf-3.0.1
+endif
+setenv KARAF_LOG $KARAF/data/log/karaf.log
+
+alias onos-setup-cell ' ( $ONOS_ROOT/tools/test/bin/onos-show-cell \!^ ) && setenv ONOS_CELL \!^'
+
+set path=( $path $ONOS_ROOT/tools/dev/bin $ONOS_ROOT/tools/test/bin )
+set path=( $path $ONOS_ROOT/tools/build )
+set path=( $path $KARAF/bin )
diff --git a/tools/package/bin/onos b/tools/package/bin/onos
index 0489318..bd5059f 100755
--- a/tools/package/bin/onos
+++ b/tools/package/bin/onos
@@ -5,5 +5,5 @@
export JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-7-openjdk-amd64/}
-cd $(dirname $0)/../apache-karaf-*/bin
+cd $(dirname $0)/../apache-karaf-$KARAF_VERSION/bin
./client -h localhost "$@"
diff --git a/tools/package/bin/onos-service b/tools/package/bin/onos-service
index 7c8850f..299cb9a 100755
--- a/tools/package/bin/onos-service
+++ b/tools/package/bin/onos-service
@@ -7,4 +7,4 @@
export JAVA_OPTS="-Xms256M -Xmx2048M"
cd /opt/onos
-/opt/onos/apache-karaf-3.0.1/bin/karaf "$@"
+/opt/onos/apache-karaf-$KARAF_VERSION/bin/karaf "$@"
diff --git a/tools/test/bin/onos-service b/tools/test/bin/onos-service
index 1c62ae8..d5fabe5 100755
--- a/tools/test/bin/onos-service
+++ b/tools/test/bin/onos-service
@@ -6,4 +6,39 @@
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
. $ONOS_ROOT/tools/build/envDefaults
-ssh $ONOS_USER@${1:-$OCI} "sudo service onos ${2:-status}"
+function print_usage {
+ command_name=`basename $0`
+ echo "Remotely administer the ONOS service on a single node or the current ONOS cell."
+ echo
+ echo "Usage: $command_name <TARGET> [COMMAND]"
+ echo " $command_name [-h | --help]"
+ echo "Options:"
+ echo " TARGET The target of the command"
+ echo " COMMAND The command to execute. Default value is 'status'"
+ echo " [-h | --help] Print this help"
+ echo ""
+ echo "TARGET: <hostname | --cell>"
+ echo " hostname Execute on the specified host name"
+ echo " --cell Execute on the current ONOS cell"
+ echo ""
+ echo "COMMAND: [start|stop|restart|status]"
+ echo ""
+}
+
+# Print usage
+if [ "${1}" = "-h" -o "${1}" = "--help" ]; then
+ print_usage
+ exit 0
+fi
+
+# Select the target
+if [ "${1}" = "--cell" ]; then
+ nodes=$(env | sort | egrep "OC[0-9]+" | cut -d= -f2)
+else
+ nodes=${1:-$OCI}
+fi
+
+# Execute the remote commands
+for node in $nodes; do
+ ssh $ONOS_USER@${node} "sudo service onos ${2:-status}"
+done
diff --git a/tools/test/bin/onos-show-cell b/tools/test/bin/onos-show-cell
index d7e56c3..5aee338 100755
--- a/tools/test/bin/onos-show-cell
+++ b/tools/test/bin/onos-show-cell
@@ -42,7 +42,7 @@
echo "ONOS_CELL=${ONOS_CELL}"
echo "ONOS_NIC=${ONOS_NIC}"
-for n in {1..9}; do
+for n in {0..9}; do
ocn="OC${n}"
if [ -n "${!ocn}" ]; then
echo "$ocn=${!ocn}"
diff --git a/tools/test/cells/local b/tools/test/cells/local
index 2edb074..f506c08 100644
--- a/tools/test/cells/local
+++ b/tools/test/cells/local
@@ -8,4 +8,4 @@
export OCN="192.168.56.103"
export OCI="${OC1}"
-export ONOS_FEATURES=""
+export ONOS_FEATURES="${ONOS_FEATURES:-webconsole,onos-api,onos-core,onos-cli,onos-openflow,onos-app-fwd,onos-app-proxyarp,onos-app-tvue}"
diff --git a/tools/test/cells/office b/tools/test/cells/office
index 72520a0..7c6345b 100644
--- a/tools/test/cells/office
+++ b/tools/test/cells/office
@@ -2,8 +2,8 @@
export ONOS_CELL="office"
-export ONOS_NIC="10.128.4.*"
-export OC1="10.128.4.60"
+export ONOS_NIC="10.1.10.*"
+export OC1="10.1.10.223"
export OCI="${OC1}"
export ONOS_FEATURES="webconsole,onos-api,onos-core-trivial,onos-cli,onos-openflow,onos-app-fwd,onos-app-mobility,onos-app-tvue,onos-app-proxyarp"
diff --git a/tools/test/cells/single b/tools/test/cells/single
index 7c03ef4..6b13756 100644
--- a/tools/test/cells/single
+++ b/tools/test/cells/single
@@ -7,4 +7,4 @@
export OCN="192.168.56.103"
export OCI="${OC1}"
-export ONOS_FEATURES=""
+export ONOS_FEATURES="${ONOS_FEATURES:-webconsole,onos-api,onos-core-trivial,onos-cli,onos-openflow,onos-app-fwd,onos-app-proxyarp,onos-app-tvue}"
diff --git a/tools/test/cells/single_optical b/tools/test/cells/single_optical
new file mode 100644
index 0000000..61b0d24
--- /dev/null
+++ b/tools/test/cells/single_optical
@@ -0,0 +1,7 @@
+# Local VirtualBox-based single ONOS instance & ONOS mininet box
+
+export ONOS_NIC=192.168.56.*
+export OC1="192.168.56.101"
+export OCN="192.168.56.103"
+
+export ONOS_FEATURES=webconsole,onos-api,onos-core-trivial,onos-cli,onos-openflow,onos-app-fwd,onos-app-mobility,onos-app-tvue,onos-app-optical
diff --git a/utils/misc/src/main/java/org/onlab/packet/ChassisId.java b/utils/misc/src/main/java/org/onlab/packet/ChassisId.java
new file mode 100644
index 0000000..3029647
--- /dev/null
+++ b/utils/misc/src/main/java/org/onlab/packet/ChassisId.java
@@ -0,0 +1,74 @@
+package org.onlab.packet;
+
+/**
+ * The class representing a network device chassisId.
+ * This class is immutable.
+ */
+// TODO: Move this to a reasonable place.
+public final class ChassisId {
+
+ private static final long UNKNOWN = 0;
+ private final long value;
+
+ /**
+ * Default constructor.
+ */
+ public ChassisId() {
+ this.value = ChassisId.UNKNOWN;
+ }
+
+ /**
+ * Constructor from a long value.
+ *
+ * @param value the value to use.
+ */
+ public ChassisId(long value) {
+ this.value = value;
+ }
+
+ /**
+ * Constructor from a string.
+ *
+ * @param value the value to use.
+ */
+ public ChassisId(String value) {
+ this.value = Long.valueOf(value);
+ }
+
+ /**
+ * Get the value of the chassis id.
+ *
+ * @return the value of the chassis id.
+ */
+ public long value() {
+ return value;
+ }
+
+ /**
+ * Convert the Chassis Id value to a ':' separated hexadecimal string.
+ *
+ * @return the Chassis Id value as a ':' separated hexadecimal string.
+ */
+ @Override
+ public String toString() {
+ return Long.toHexString(this.value);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof ChassisId)) {
+ return false;
+ }
+
+ ChassisId otherChassisId = (ChassisId) other;
+
+ return value == otherChassisId.value;
+ }
+
+ @Override
+ public int hashCode() {
+ int hash = 17;
+ hash += 31 * hash + (int) (value ^ value >>> 32);
+ return hash;
+ }
+}
diff --git a/utils/misc/src/main/java/org/onlab/packet/Ethernet.java b/utils/misc/src/main/java/org/onlab/packet/Ethernet.java
index 7587a54..eecdb53 100644
--- a/utils/misc/src/main/java/org/onlab/packet/Ethernet.java
+++ b/utils/misc/src/main/java/org/onlab/packet/Ethernet.java
@@ -58,6 +58,7 @@
Ethernet.etherTypeClassMap.put(Ethernet.TYPE_RARP, ARP.class);
Ethernet.etherTypeClassMap.put(Ethernet.TYPE_IPV4, IPv4.class);
Ethernet.etherTypeClassMap.put(Ethernet.TYPE_LLDP, LLDP.class);
+ Ethernet.etherTypeClassMap.put(Ethernet.TYPE_BSN, LLDP.class);
}
protected MacAddress destinationMACAddress;
diff --git a/utils/misc/src/main/java/org/onlab/packet/IpAddress.java b/utils/misc/src/main/java/org/onlab/packet/IpAddress.java
index fef0cfd..a440b98 100644
--- a/utils/misc/src/main/java/org/onlab/packet/IpAddress.java
+++ b/utils/misc/src/main/java/org/onlab/packet/IpAddress.java
@@ -2,13 +2,15 @@
import java.util.Arrays;
+
+
/**
* A class representing an IPv4 address.
* <p/>
* TODO this class is a clone of IpPrefix and still needs to be modified to
* look more like an IpAddress.
*/
-public final class IpAddress {
+public final class IpAddress implements Comparable<IpAddress> {
// TODO a comparator for netmasks? E.g. for sorting by prefix match order.
@@ -121,7 +123,7 @@
int mask = DEFAULT_MASK;
if (parts.length == 2) {
- mask = Integer.valueOf(parts[1]);
+ mask = Integer.parseInt(parts[1]);
if (mask > MAX_INET_MASK) {
throw new IllegalArgumentException(
"Value of subnet mask cannot exceed "
@@ -174,14 +176,6 @@
* @return the IP address's value as an integer
*/
public int toInt() {
- int address = 0;
- for (int i = 0; i < INET_LEN; i++) {
- address |= octets[i] << ((INET_LEN - (i + 1)) * 8);
- }
- return address;
- }
-
- public int toRealInt() {
int val = 0;
for (int i = 0; i < octets.length; i++) {
val <<= 8;
@@ -191,6 +185,15 @@
}
/**
+ * Converts the IP address to a /32 IP prefix.
+ *
+ * @return the new IP prefix
+ */
+ public IpPrefix toPrefix() {
+ return IpPrefix.valueOf(octets, MAX_INET_MASK);
+ }
+
+ /**
* Helper for computing the mask value from CIDR.
*
* @return an integer bitmask
@@ -280,6 +283,13 @@
}
@Override
+ public int compareTo(IpAddress o) {
+ Long lv = ((long) this.toInt()) & 0xffffffffL;
+ Long rv = ((long) o.toInt()) & 0xffffffffL;
+ return lv.compareTo(rv);
+ }
+
+ @Override
public int hashCode() {
final int prime = 31;
int result = 1;
diff --git a/utils/misc/src/main/java/org/onlab/packet/IpPrefix.java b/utils/misc/src/main/java/org/onlab/packet/IpPrefix.java
index 84acb82..6e1ebb5 100644
--- a/utils/misc/src/main/java/org/onlab/packet/IpPrefix.java
+++ b/utils/misc/src/main/java/org/onlab/packet/IpPrefix.java
@@ -120,7 +120,7 @@
int mask = DEFAULT_MASK;
if (parts.length == 2) {
- mask = Integer.valueOf(parts[1]);
+ mask = Integer.parseInt(parts[1]);
if (mask > MAX_INET_MASK) {
throw new IllegalArgumentException(
"Value of subnet mask cannot exceed "
@@ -173,14 +173,6 @@
* @return the IP address's value as an integer
*/
public int toInt() {
- int address = 0;
- for (int i = 0; i < INET_LEN; i++) {
- address |= octets[i] << ((INET_LEN - (i + 1)) * 8);
- }
- return address;
- }
-
- public int toRealInt() {
int val = 0;
for (int i = 0; i < octets.length; i++) {
val <<= 8;
diff --git a/utils/misc/src/main/java/org/onlab/packet/LLDP.java b/utils/misc/src/main/java/org/onlab/packet/LLDP.java
index 105a9f3..7277cda 100644
--- a/utils/misc/src/main/java/org/onlab/packet/LLDP.java
+++ b/utils/misc/src/main/java/org/onlab/packet/LLDP.java
@@ -150,7 +150,7 @@
final ByteBuffer bb = ByteBuffer.wrap(data, offset, length);
LLDPTLV tlv;
do {
- tlv = new LLDPTLV().deserialize(bb);
+ tlv = new LLDPOrganizationalTLV().deserialize(bb);
// if there was a failure to deserialize stop processing TLVs
if (tlv == null) {
@@ -169,6 +169,7 @@
case 0x3:
this.ttl = tlv;
break;
+
default:
this.optionalTLVList.add(tlv);
break;
diff --git a/utils/misc/src/main/java/org/onlab/packet/LLDPOrganizationalTLV.java b/utils/misc/src/main/java/org/onlab/packet/LLDPOrganizationalTLV.java
index fb359a4..4d4e0a4 100644
--- a/utils/misc/src/main/java/org/onlab/packet/LLDPOrganizationalTLV.java
+++ b/utils/misc/src/main/java/org/onlab/packet/LLDPOrganizationalTLV.java
@@ -140,6 +140,9 @@
@Override
public byte[] serialize() {
+ if (this.type != LLDPOrganizationalTLV.ORGANIZATIONAL_TLV_TYPE) {
+ return super.serialize();
+ }
final int valueLength = LLDPOrganizationalTLV.OUI_LENGTH
+ LLDPOrganizationalTLV.SUBTYPE_LENGTH + this.infoString.length;
this.value = new byte[valueLength];
@@ -152,7 +155,11 @@
@Override
public LLDPTLV deserialize(final ByteBuffer bb) {
- super.deserialize(bb);
+ LLDPTLV tlv = super.deserialize(bb);
+ if (tlv.getType() != LLDPOrganizationalTLV.ORGANIZATIONAL_TLV_TYPE) {
+ return tlv;
+ }
+
final ByteBuffer optionalField = ByteBuffer.wrap(this.value);
final byte[] oui = new byte[LLDPOrganizationalTLV.OUI_LENGTH];
diff --git a/utils/misc/src/main/java/org/onlab/packet/LLDPTLV.java b/utils/misc/src/main/java/org/onlab/packet/LLDPTLV.java
index 04f89a0..16c9e31 100644
--- a/utils/misc/src/main/java/org/onlab/packet/LLDPTLV.java
+++ b/utils/misc/src/main/java/org/onlab/packet/LLDPTLV.java
@@ -111,6 +111,7 @@
sscratch = bb.getShort();
this.type = (byte) (sscratch >> 9 & 0x7f);
this.length = (short) (sscratch & 0x1ff);
+
if (this.length > 0) {
this.value = new byte[this.length];
@@ -120,6 +121,7 @@
}
bb.get(this.value);
}
+
return this;
}
diff --git a/utils/misc/src/main/java/org/onlab/packet/ONLabLddp.java b/utils/misc/src/main/java/org/onlab/packet/ONLabLddp.java
index 37213d0..ecfcbd8 100644
--- a/utils/misc/src/main/java/org/onlab/packet/ONLabLddp.java
+++ b/utils/misc/src/main/java/org/onlab/packet/ONLabLddp.java
@@ -30,6 +30,7 @@
* Refer to IEEE Std 802.1ABTM-2009 for more information.
*
*/
+@Deprecated
public class ONLabLddp extends LLDP {
private static final Logger LOG = LoggerFactory.getLogger(ONLabLddp.class);
diff --git a/utils/misc/src/main/java/org/onlab/packet/ONOSLLDP.java b/utils/misc/src/main/java/org/onlab/packet/ONOSLLDP.java
new file mode 100644
index 0000000..ec35de8
--- /dev/null
+++ b/utils/misc/src/main/java/org/onlab/packet/ONOSLLDP.java
@@ -0,0 +1,169 @@
+package org.onlab.packet;
+
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.ArrayUtils;
+
+import java.nio.ByteBuffer;
+
+/**
+ * ONOS LLDP containing organizational TLV for ONOS device dicovery.
+ */
+public class ONOSLLDP extends LLDP {
+
+ public static final byte[] ONLAB_OUI = {(byte) 0xa4, 0x23, 0x05};
+ public static final String DEFAULT_DEVICE = "INVALID";
+ public static final String DEFAULT_NAME = "ONOS Discovery";
+
+ public static final byte[] LLDP_NICIRA = {0x01, 0x23, 0x20, 0x00, 0x00,
+ 0x01};
+ public static final byte[] LLDP_MULTICAST = {0x01, (byte) 0x80,
+ (byte) 0xc2, 0x00, 0x00, 0x0e};
+ public static final byte[] BDDP_MULTICAST = {(byte) 0xff, (byte) 0xff,
+ (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff};
+
+ private static final byte NAME_SUBTYPE = 1;
+ private static final byte DEVICE_SUBTYPE = 2;
+ private static final short NAME_LENGTH = 4; //1 for subtype + 3 for OUI
+ private static final short DEVICE_LENGTH = 4; //1 for subtype + 3 for OUI
+ private final LLDPOrganizationalTLV nameTLV = new LLDPOrganizationalTLV();
+ private final LLDPOrganizationalTLV deviceTLV = new LLDPOrganizationalTLV();
+
+ // TLV constants: type, size and subtype
+ // Organizationally specific TLV also have packet offset and contents of TLV
+ // header
+ private static final byte CHASSIS_TLV_TYPE = 1;
+ private static final byte CHASSIS_TLV_SIZE = 7;
+ private static final byte CHASSIS_TLV_SUBTYPE = 4;
+
+ private static final byte PORT_TLV_TYPE = 2;
+ private static final byte PORT_TLV_SIZE = 5;
+ private static final byte PORT_TLV_SUBTYPE = 2;
+
+ private static final byte TTL_TLV_TYPE = 3;
+
+
+ private final byte[] ttlValue = new byte[] {0, 0x78};
+
+ public ONOSLLDP() {
+ super();
+ setName(DEFAULT_NAME);
+ setDevice(DEFAULT_DEVICE);
+ setOptionalTLVList(Lists.<LLDPTLV>newArrayList(nameTLV, deviceTLV));
+ setTtl(new LLDPTLV().setType((byte) TTL_TLV_TYPE)
+ .setLength((short) ttlValue.length)
+ .setValue(ttlValue));
+
+ }
+
+ private ONOSLLDP(LLDP lldp) {
+ this.portId = lldp.getPortId();
+ this.chassisId = lldp.getChassisId();
+ this.ttl = lldp.getTtl();
+ this.optionalTLVList = lldp.getOptionalTLVList();
+ }
+
+ public void setName(String name) {
+ nameTLV.setLength((short) (name.length() + NAME_LENGTH));
+ nameTLV.setInfoString(name);
+ nameTLV.setSubType(NAME_SUBTYPE);
+ nameTLV.setOUI(ONLAB_OUI);
+ }
+
+ public void setDevice(String device) {
+ deviceTLV.setInfoString(device);
+ deviceTLV.setLength((short) (device.length() + DEVICE_LENGTH));
+ deviceTLV.setSubType(DEVICE_SUBTYPE);
+ deviceTLV.setOUI(ONLAB_OUI);
+ }
+
+ public void setChassisId(final ChassisId chassisId) {
+ MacAddress chassisMac = MacAddress.valueOf(chassisId.value());
+ byte[] chassis = ArrayUtils.addAll(new byte[] {CHASSIS_TLV_SUBTYPE},
+ chassisMac.getAddress());
+
+ LLDPTLV chassisTLV = new LLDPTLV();
+ chassisTLV.setLength(CHASSIS_TLV_SIZE);
+ chassisTLV.setType(CHASSIS_TLV_TYPE);
+ chassisTLV.setValue(chassis);
+ this.setChassisId(chassisTLV);
+ }
+
+ public void setPortId(final int portNumber) {
+ byte[] port = ArrayUtils.addAll(new byte[] {PORT_TLV_SUBTYPE},
+ ByteBuffer.allocate(4).putInt(portNumber).array());
+
+ LLDPTLV portTLV = new LLDPTLV();
+ portTLV.setLength(PORT_TLV_SIZE);
+ portTLV.setType(PORT_TLV_TYPE);
+ portTLV.setValue(port);
+ this.setPortId(portTLV);
+ }
+
+ public LLDPOrganizationalTLV getNameTLV() {
+ for (LLDPTLV tlv : this.getOptionalTLVList()) {
+ if (tlv.getType() == LLDPOrganizationalTLV.ORGANIZATIONAL_TLV_TYPE) {
+ LLDPOrganizationalTLV orgTLV = (LLDPOrganizationalTLV) tlv;
+ if (orgTLV.getSubType() == NAME_SUBTYPE) {
+ return orgTLV;
+ }
+ }
+ }
+ return null;
+ }
+
+ public LLDPOrganizationalTLV getDeviceTLV() {
+ for (LLDPTLV tlv : this.getOptionalTLVList()) {
+ if (tlv.getType() == LLDPOrganizationalTLV.ORGANIZATIONAL_TLV_TYPE) {
+ LLDPOrganizationalTLV orgTLV = (LLDPOrganizationalTLV) tlv;
+ if (orgTLV.getSubType() == DEVICE_SUBTYPE) {
+ return orgTLV;
+ }
+ }
+ }
+ return null;
+ }
+
+ public String getNameString() {
+ LLDPOrganizationalTLV tlv = getNameTLV();
+ if (tlv != null) {
+ return new String(tlv.getInfoString());
+ }
+ return null;
+ }
+
+ public String getDeviceString() {
+ LLDPOrganizationalTLV tlv = getDeviceTLV();
+ if (tlv != null) {
+ return new String(tlv.getInfoString());
+ }
+ return null;
+ }
+
+ public Integer getPort() {
+ ByteBuffer portBB = ByteBuffer.wrap(this.getPortId().getValue());
+ portBB.position(1);
+ return portBB.getInt();
+ }
+
+ /**
+ * Given an ethernet packet, determines if this is an LLDP from
+ * ONOS and returns the device the LLDP came from.
+ * @param eth an ethernet packet
+ * @return a the lldp packet or null
+ */
+ public static ONOSLLDP parseONOSLLDP(Ethernet eth) {
+ if (eth.getEtherType() == Ethernet.TYPE_LLDP ||
+ eth.getEtherType() == Ethernet.TYPE_BSN) {
+ ONOSLLDP onosLldp = new ONOSLLDP((LLDP) eth.getPayload()); //(ONOSLLDP) eth.getPayload();
+ if (ONOSLLDP.DEFAULT_NAME.equals(onosLldp.getNameString())) {
+ return onosLldp;
+ }
+ }
+ return null;
+ }
+
+
+
+
+
+}
diff --git a/utils/misc/src/main/java/org/onlab/util/HexString.java b/utils/misc/src/main/java/org/onlab/util/HexString.java
new file mode 100644
index 0000000..db12aa3
--- /dev/null
+++ b/utils/misc/src/main/java/org/onlab/util/HexString.java
@@ -0,0 +1,94 @@
+package org.onlab.util;
+
+public final class HexString {
+
+ private HexString() {
+
+ }
+
+ /**
+ * Convert a string of bytes to a ':' separated hex string.
+ *
+ * @param bytes
+ * @return "0f:ca:fe:de:ad:be:ef"
+ */
+ public static String toHexString(final byte[] bytes) {
+ int i;
+ StringBuilder ret = new StringBuilder();
+ String tmp;
+ for (i = 0; i < bytes.length; i++) {
+ if (i > 0) {
+ ret.append(':');
+ }
+ tmp = Integer.toHexString((bytes[i] & 0xff));
+ if (tmp.length() == 1) {
+ ret.append('0');
+ }
+ ret.append(tmp);
+ }
+ return ret.toString();
+ }
+
+ public static String toHexString(final long val, final int padTo) {
+ char[] arr = Long.toHexString(val).toCharArray();
+ String ret = "";
+ // prepend the right number of leading zeros
+ int i = 0;
+ for (; i < (padTo * 2 - arr.length); i++) {
+ ret += "0";
+ if ((i % 2) != 0) {
+ ret += ":";
+ }
+ }
+ for (int j = 0; j < arr.length; j++) {
+ ret += arr[j];
+ if ((((i + j) % 2) != 0) && (j < (arr.length - 1))) {
+ ret += ":";
+ }
+ }
+ return ret;
+ }
+
+ public static String toHexString(final long val) {
+ return toHexString(val, 8);
+ }
+
+ /**
+ * Convert a string of hex values into a string of bytes.
+ *
+ * @param values
+ * "0f:ca:fe:de:ad:be:ef"
+ * @return [15, 5 ,2, 5, 17]
+ * @throws NumberFormatException
+ * If the string can not be parsed
+ */
+ public static byte[] fromHexString(final String values) {
+ String[] octets = values.split(":");
+ byte[] ret = new byte[octets.length];
+
+ for (int i = 0; i < octets.length; i++) {
+ if (octets[i].length() > 2) {
+ throw new NumberFormatException("Invalid octet length");
+ }
+ ret[i] = Integer.valueOf(octets[i], 16).byteValue();
+ }
+ return ret;
+ }
+
+ public static long toLong(String value) {
+ String[] octets = value.split(":");
+ if (octets.length > 8) {
+ throw new NumberFormatException("Input string is too big to fit in long: " + value);
+ }
+ long l = 0;
+ for (String octet: octets) {
+ if (octet.length() > 2) {
+ throw new NumberFormatException(
+ "Each colon-separated byte component must consist of 1 or 2 hex digits: " + value);
+ }
+ short s = Short.parseShort(octet, 16);
+ l = (l << 8) + s;
+ }
+ return l;
+ }
+}
diff --git a/utils/misc/src/main/java/org/onlab/util/KryoPool.java b/utils/misc/src/main/java/org/onlab/util/KryoNamespace.java
similarity index 90%
rename from utils/misc/src/main/java/org/onlab/util/KryoPool.java
rename to utils/misc/src/main/java/org/onlab/util/KryoNamespace.java
index 3fae0c5..42d801b 100644
--- a/utils/misc/src/main/java/org/onlab/util/KryoPool.java
+++ b/utils/misc/src/main/java/org/onlab/util/KryoNamespace.java
@@ -13,6 +13,7 @@
import com.esotericsoftware.kryo.io.ByteBufferOutput;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
+import com.esotericsoftware.kryo.pool.KryoFactory;
import com.google.common.collect.ImmutableList;
// TODO Add tests for this class.
@@ -20,7 +21,7 @@
* Pool of Kryo instances, with classes pre-registered.
*/
//@ThreadSafe
-public final class KryoPool {
+public final class KryoNamespace implements KryoFactory {
/**
* Default buffer size used for serialization.
@@ -34,7 +35,7 @@
private final boolean registrationRequired;
/**
- * KryoPool builder.
+ * KryoNamespace builder.
*/
//@NotThreadSafe
public static final class Builder {
@@ -42,12 +43,12 @@
private final List<Pair<Class<?>, Serializer<?>>> types = new ArrayList<>();
/**
- * Builds a {@link KryoPool} instance.
+ * Builds a {@link KryoNamespace} instance.
*
- * @return KryoPool
+ * @return KryoNamespace
*/
- public KryoPool build() {
- return new KryoPool(types);
+ public KryoNamespace build() {
+ return new KryoNamespace(types);
}
/**
@@ -76,19 +77,19 @@
}
/**
- * Registers all the class registered to given KryoPool.
+ * Registers all the class registered to given KryoNamespace.
*
- * @param pool KryoPool
+ * @param pool KryoNamespace
* @return this
*/
- public Builder register(final KryoPool pool) {
+ public Builder register(final KryoNamespace pool) {
types.addAll(pool.registeredTypes);
return this;
}
}
/**
- * Creates a new {@link KryoPool} builder.
+ * Creates a new {@link KryoNamespace} builder.
*
* @return builder
*/
@@ -101,7 +102,7 @@
*
* @param registerdTypes types to register
*/
- private KryoPool(final List<Pair<Class<?>, Serializer<?>>> registerdTypes) {
+ private KryoNamespace(final List<Pair<Class<?>, Serializer<?>>> registerdTypes) {
this.registeredTypes = ImmutableList.copyOf(registerdTypes);
// always true for now
this.registrationRequired = true;
@@ -113,10 +114,10 @@
* @param instances to add to the pool
* @return this
*/
- public KryoPool populate(int instances) {
+ public KryoNamespace populate(int instances) {
List<Kryo> kryos = new ArrayList<>(instances);
for (int i = 0; i < instances; ++i) {
- kryos.add(newKryoInstance());
+ kryos.add(create());
}
pool.addAll(kryos);
return this;
@@ -130,7 +131,7 @@
public Kryo getKryo() {
Kryo kryo = pool.poll();
if (kryo == null) {
- return newKryoInstance();
+ return create();
}
return kryo;
}
@@ -235,7 +236,8 @@
*
* @return Kryo instance
*/
- private Kryo newKryoInstance() {
+ @Override
+ public Kryo create() {
Kryo kryo = new Kryo();
kryo.setRegistrationRequired(registrationRequired);
for (Pair<Class<?>, Serializer<?>> registry : registeredTypes) {
diff --git a/utils/misc/src/main/java/org/onlab/util/TestUtils.java b/utils/misc/src/main/java/org/onlab/util/TestUtils.java
new file mode 100644
index 0000000..7e59564
--- /dev/null
+++ b/utils/misc/src/main/java/org/onlab/util/TestUtils.java
@@ -0,0 +1,169 @@
+package org.onlab.util;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+
+/**
+ * Utilities for testing.
+ */
+public final class TestUtils {
+
+ /**
+ * Sets the field, bypassing scope restriction.
+ *
+ * @param subject Object where the field belongs
+ * @param fieldName name of the field to set
+ * @param value value to set to the field.
+ * @param <T> subject type
+ * @param <U> value type
+ * @throws TestUtilsException if there are reflection errors while setting
+ * the field
+ */
+ public static <T, U> void setField(T subject, String fieldName, U value)
+ throws TestUtilsException {
+ @SuppressWarnings("unchecked")
+ Class<T> clazz = (Class<T>) subject.getClass();
+ try {
+ Field field = clazz.getDeclaredField(fieldName);
+ field.setAccessible(true);
+ field.set(subject, value);
+ } catch (NoSuchFieldException | SecurityException |
+ IllegalArgumentException | IllegalAccessException e) {
+ throw new TestUtilsException("setField failed", e);
+ }
+ }
+
+ /**
+ * Gets the field, bypassing scope restriction.
+ *
+ * @param subject Object where the field belongs
+ * @param fieldName name of the field to get
+ * @return value of the field.
+ * @param <T> subject type
+ * @param <U> field value type
+ * @throws TestUtilsException if there are reflection errors while getting
+ * the field
+ */
+ public static <T, U> U getField(T subject, String fieldName)
+ throws TestUtilsException {
+ try {
+ @SuppressWarnings("unchecked")
+ Class<T> clazz = (Class<T>) subject.getClass();
+ Field field = clazz.getDeclaredField(fieldName);
+ field.setAccessible(true);
+
+ @SuppressWarnings("unchecked")
+ U result = (U) field.get(subject);
+ return result;
+ } catch (NoSuchFieldException | SecurityException |
+ IllegalArgumentException | IllegalAccessException e) {
+ throw new TestUtilsException("getField failed", e);
+ }
+ }
+
+ /**
+ * Calls the method, bypassing scope restriction.
+ *
+ * @param subject Object where the method belongs
+ * @param methodName name of the method to call
+ * @param paramTypes formal parameter type array
+ * @param args arguments
+ * @return return value or null if void
+ * @param <T> subject type
+ * @param <U> return value type
+ * @throws TestUtilsException if there are reflection errors while calling
+ * the method
+ */
+ public static <T, U> U callMethod(T subject, String methodName,
+ Class<?>[] paramTypes, Object...args) throws TestUtilsException {
+
+ try {
+ @SuppressWarnings("unchecked")
+ Class<T> clazz = (Class<T>) subject.getClass();
+ final Method method;
+ if (paramTypes == null || paramTypes.length == 0) {
+ method = clazz.getDeclaredMethod(methodName);
+ } else {
+ method = clazz.getDeclaredMethod(methodName, paramTypes);
+ }
+ method.setAccessible(true);
+
+ @SuppressWarnings("unchecked")
+ U result = (U) method.invoke(subject, args);
+ return result;
+ } catch (NoSuchMethodException | SecurityException |
+ IllegalAccessException | IllegalArgumentException |
+ InvocationTargetException e) {
+ throw new TestUtilsException("callMethod failed", e);
+ }
+ }
+
+ /**
+ * Calls the method, bypassing scope restriction.
+ *
+ * @param subject Object where the method belongs
+ * @param methodName name of the method to call
+ * @param paramType formal parameter type
+ * @param arg argument
+ * @return return value or null if void
+ * @param <T> subject type
+ * @param <U> return value type
+ * @throws TestUtilsException if there are reflection errors while calling
+ * the method
+ */
+ public static <T, U> U callMethod(T subject, String methodName,
+ Class<?> paramType, Object arg) throws TestUtilsException {
+ return callMethod(subject, methodName, new Class<?>[]{paramType}, arg);
+ }
+
+ /**
+ * Triggers an allocation of an object of type <T> and forces a call to
+ * the private constructor.
+ *
+ * @param constructor Constructor to call
+ * @param <T> type of the object to create
+ * @return created object of type <T>
+ * @throws TestUtilsException if there are reflection errors while calling
+ * the constructor
+ */
+ public static <T> T callConstructor(Constructor<T> constructor)
+ throws TestUtilsException {
+ try {
+ constructor.setAccessible(true);
+ return constructor.newInstance();
+ } catch (InstantiationException | IllegalAccessException |
+ InvocationTargetException error) {
+ throw new TestUtilsException("callConstructor failed", error);
+ }
+ }
+
+ /**
+ * Avoid instantiation.
+ */
+ private TestUtils() {}
+
+ /**
+ * Exception that can be thrown if problems are encountered while executing
+ * the utility method. These are usually problems accessing fields/methods
+ * through reflection. The original exception can be found by examining the
+ * cause.
+ */
+ public static class TestUtilsException extends Exception {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Constructs a new exception with the specified detail message and
+ * cause.
+ *
+ * @param message the detail message
+ * @param cause the original cause of this exception
+ */
+ public TestUtilsException(String message, Throwable cause) {
+ super(message, cause);
+ }
+ }
+}
diff --git a/utils/misc/src/test/java/org/onlab/util/HexStringTest.java b/utils/misc/src/test/java/org/onlab/util/HexStringTest.java
new file mode 100644
index 0000000..c20238f
--- /dev/null
+++ b/utils/misc/src/test/java/org/onlab/util/HexStringTest.java
@@ -0,0 +1,70 @@
+package org.onlab.util;
+
+import org.junit.Test;
+
+import com.esotericsoftware.minlog.Log;
+
+import junit.framework.TestCase;
+
+/**
+ * Test of the Hexstring.
+ *
+ */
+
+public class HexStringTest extends TestCase {
+
+ @Test
+ public void testMarshalling() throws Exception {
+ String dpidStr = "00:00:00:23:20:2d:16:71";
+ long dpid = HexString.toLong(dpidStr);
+ String testStr = HexString.toHexString(dpid);
+ TestCase.assertEquals(dpidStr, testStr);
+ }
+
+ @Test
+ public void testToLong() {
+ String dpidStr = "3e:1f:01:fc:72:8c:63:31";
+ long valid = 0x3e1f01fc728c6331L;
+ long testLong = HexString.toLong(dpidStr);
+ TestCase.assertEquals(valid, testLong);
+ }
+
+ @Test
+ public void testToLongMSB() {
+ String dpidStr = "ca:7c:5e:d1:64:7a:95:9b";
+ long valid = -3856102927509056101L;
+ long testLong = HexString.toLong(dpidStr);
+ TestCase.assertEquals(valid, testLong);
+ }
+
+ @Test
+ public void testToLongError() {
+ String dpidStr = "09:08:07:06:05:04:03:02:01";
+ try {
+ HexString.toLong(dpidStr);
+ fail("HexString.toLong() should have thrown a NumberFormatException");
+ } catch (NumberFormatException expected) {
+ Log.info("HexString.toLong() have thrown a NumberFormatException");
+ }
+ }
+
+ @Test
+ public void testToStringBytes() {
+ byte[] dpid = {0, 0, 0, 0, 0, 0, 0, -1 };
+ String valid = "00:00:00:00:00:00:00:ff";
+ String testString = HexString.toHexString(dpid);
+ TestCase.assertEquals(valid, testString);
+ }
+
+ @Test
+ public void testFromHexStringError() {
+ String invalidStr = "00:00:00:00:00:00:ffff";
+ try {
+ HexString.fromHexString(invalidStr);
+ fail("HexString.fromHexString() should have thrown a NumberFormatException");
+ } catch (NumberFormatException expected) {
+ Log.info("HexString.toLong() have thrown a NumberFormatException");
+ }
+ }
+}
+
diff --git a/utils/misc/src/test/java/org/onlab/util/TestUtilsTest.java b/utils/misc/src/test/java/org/onlab/util/TestUtilsTest.java
new file mode 100644
index 0000000..58e60c1
--- /dev/null
+++ b/utils/misc/src/test/java/org/onlab/util/TestUtilsTest.java
@@ -0,0 +1,170 @@
+package org.onlab.util;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.onlab.util.TestUtils.TestUtilsException;
+
+/**
+ * Test and usage examples for TestUtils.
+ */
+public class TestUtilsTest {
+
+ /**
+ * Test data.
+ */
+ private static final class TestClass {
+
+ @SuppressWarnings("unused")
+ private int privateField = 42;
+
+ @SuppressWarnings("unused")
+ protected int protectedField = 2501; // CHECKSTYLE IGNORE THIS LINE
+
+ /**
+ * Protected method with multiple argument.
+ *
+ * @param x simply returns
+ * @param y not used
+ * @return x
+ */
+ @SuppressWarnings("unused")
+ private int privateMethod(Number x, Long y) {
+ return x.intValue();
+ }
+
+ /**
+ * Protected method with no argument.
+ *
+ * @return int
+ */
+ @SuppressWarnings("unused")
+ protected int protectedMethod() {
+ return 42;
+ }
+
+ /**
+ * Method returning array.
+ *
+ * @param ary random array
+ * @return ary
+ */
+ @SuppressWarnings("unused")
+ private int[] arrayReturnMethod(int[] ary) {
+ return ary;
+ }
+
+ /**
+ * Method without return value.
+ *
+ * @param s ignored
+ */
+ @SuppressWarnings("unused")
+ private void voidMethod(String s) {
+ System.out.println(s);
+ }
+ }
+
+ private TestClass test;
+
+ /**
+ * Sets up the test fixture.
+ */
+ @Before
+ public void setUp() {
+ test = new TestClass();
+ }
+
+ /**
+ * Example to access private field.
+ *
+ * @throws TestUtilsException TestUtils error
+ */
+ @Test
+ public void testSetGetPrivateField() throws TestUtilsException {
+
+ assertEquals(42, TestUtils.getField(test, "privateField"));
+ TestUtils.setField(test, "privateField", 0xDEAD);
+ assertEquals(0xDEAD, TestUtils.getField(test, "privateField"));
+ }
+
+ /**
+ * Example to access protected field.
+ *
+ * @throws TestUtilsException TestUtils error
+ */
+ @Test
+ public void testSetGetProtectedField() throws TestUtilsException {
+
+ assertEquals(2501, TestUtils.getField(test, "protectedField"));
+ TestUtils.setField(test, "protectedField", 0xBEEF);
+ assertEquals(0xBEEF, TestUtils.getField(test, "protectedField"));
+ }
+
+ /**
+ * Example to call private method and multiple parameters.
+ * <p/>
+ * It also illustrates that paramTypes must match declared type,
+ * not the runtime types of arguments.
+ *
+ * @throws TestUtilsException TestUtils error
+ */
+ @Test
+ public void testCallPrivateMethod() throws TestUtilsException {
+
+ int result = TestUtils.callMethod(test, "privateMethod",
+ new Class<?>[] {Number.class, Long.class},
+ Long.valueOf(42), Long.valueOf(32));
+ assertEquals(42, result);
+ }
+
+ /**
+ * Example to call protected method and no parameters.
+ *
+ * @throws TestUtilsException TestUtils error
+ */
+ @Test
+ public void testCallProtectedMethod() throws TestUtilsException {
+
+ int result = TestUtils.callMethod(test, "protectedMethod",
+ new Class<?>[] {});
+ assertEquals(42, result);
+ }
+
+ /**
+ * Example to call method returning array.
+ * <p/>
+ * Note: It is not required to receive as Object.
+ * Following is just verifying it is not Boxed arrays.
+ *
+ * @throws TestUtilsException TestUtils error
+ */
+ @Test
+ public void testCallArrayReturnMethod() throws TestUtilsException {
+
+ int[] array = {1, 2, 3};
+ Object aryResult = TestUtils.callMethod(test, "arrayReturnMethod",
+ new Class<?>[] {int[].class}, array);
+ assertEquals(int[].class, aryResult.getClass());
+ assertArrayEquals(array, (int[]) aryResult);
+ }
+
+
+ /**
+ * Example to call void returning method.
+ * <p/>
+ * Note: Return value will be null for void methods.
+ *
+ * @throws TestUtilsException TestUtils error
+ */
+ @Test
+ public void testCallVoidReturnMethod() throws TestUtilsException {
+
+ Object voidResult = TestUtils.callMethod(test, "voidMethod",
+ String.class, "foobar");
+ assertNull(voidResult);
+ }
+}
diff --git a/utils/netty/src/main/java/org/onlab/netty/KryoSerializer.java b/utils/netty/src/main/java/org/onlab/netty/KryoSerializer.java
index b8efb51..bc95fc6 100644
--- a/utils/netty/src/main/java/org/onlab/netty/KryoSerializer.java
+++ b/utils/netty/src/main/java/org/onlab/netty/KryoSerializer.java
@@ -1,6 +1,6 @@
package org.onlab.netty;
-import org.onlab.util.KryoPool;
+import org.onlab.util.KryoNamespace;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@@ -12,7 +12,7 @@
*/
public class KryoSerializer {
- private KryoPool serializerPool;
+ private KryoNamespace serializerPool;
public KryoSerializer() {
setupKryoPool();
@@ -23,7 +23,7 @@
*/
protected void setupKryoPool() {
// FIXME Slice out types used in common to separate pool/namespace.
- serializerPool = KryoPool.newBuilder()
+ serializerPool = KryoNamespace.newBuilder()
.register(ArrayList.class,
HashMap.class,
ArrayList.class,
diff --git a/utils/pom.xml b/utils/pom.xml
index feb60e9..df7b97b 100644
--- a/utils/pom.xml
+++ b/utils/pom.xml
@@ -23,6 +23,7 @@
<module>nio</module>
<module>osgi</module>
<module>rest</module>
+ <module>thirdparty</module>
</modules>
<dependencies>
diff --git a/utils/thirdparty/pom.xml b/utils/thirdparty/pom.xml
new file mode 100644
index 0000000..59ab818
--- /dev/null
+++ b/utils/thirdparty/pom.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.onlab.onos</groupId>
+ <artifactId>onlab-utils</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+
+ <artifactId>onlab-thirdparty</artifactId>
+ <packaging>bundle</packaging>
+
+ <description>ONLab third-party dependencies</description>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.googlecode.concurrent-trees</groupId>
+ <artifactId>concurrent-trees</artifactId>
+ <version>2.4.0</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <version>2.3</version>
+ <configuration>
+ <filters>
+ <filter>
+ <artifact>com.googlecode.concurrent-trees:concurrent-trees</artifact>
+ <includes>
+ <include>com/googlecode/**</include>
+ </includes>
+
+ </filter>
+ <filter>
+ <artifact>com.google.guava:guava</artifact>
+ <excludes>
+ <exclude>**</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Export-Package>
+ com.googlecode.concurrenttrees.*
+ </Export-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/utils/thirdparty/src/main/java/org/onlab/thirdparty/OnlabThirdparty.java b/utils/thirdparty/src/main/java/org/onlab/thirdparty/OnlabThirdparty.java
new file mode 100644
index 0000000..df7c48a
--- /dev/null
+++ b/utils/thirdparty/src/main/java/org/onlab/thirdparty/OnlabThirdparty.java
@@ -0,0 +1,11 @@
+package org.onlab.thirdparty;
+
+
+/**
+ * Empty class required to get the onlab-thirdparty module to build properly.
+ * <p/>
+ * TODO Figure out how to remove this.
+ */
+public class OnlabThirdparty {
+
+}
diff --git a/web/gui/src/main/webapp/index.html b/web/gui/src/main/webapp/index.html
index f959f93..d68a706 100644
--- a/web/gui/src/main/webapp/index.html
+++ b/web/gui/src/main/webapp/index.html
@@ -2,6 +2,9 @@
<html>
<head>
<title>ONOS GUI</title>
+
+ <script src="libs/d3.min.js"></script>
+ <script src="libs/jquery-2.1.1.min.js"></script>
</head>
<body>
<h1>ONOS GUI</h1>