Added support for firing up multiple raft partitions + Workaround for an issue where db calls timeout when a raft cluster node is down.
Change-Id: I67406da34c8a96b8ab9371d4d9b14653edfd2e2d
diff --git a/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionStore.java b/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionStore.java
index da49a3f..a522a85 100644
--- a/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionStore.java
+++ b/core/store/dist/src/main/java/org/onosproject/store/cluster/impl/ClusterDefinitionStore.java
@@ -15,25 +15,12 @@
*/
package org.onosproject.store.cluster.impl;
-import com.fasterxml.jackson.core.JsonEncoding;
-import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.databind.JsonNode;
+import static com.google.common.base.Preconditions.checkNotNull;
+
import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.node.ArrayNode;
-import com.fasterxml.jackson.databind.node.ObjectNode;
-import com.fasterxml.jackson.databind.node.TextNode;
-
-import org.onosproject.cluster.DefaultControllerNode;
-import org.onosproject.cluster.NodeId;
-import org.onlab.packet.IpAddress;
-
import java.io.File;
import java.io.IOException;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Set;
-//Not used right now
/**
* Allows for reading and writing cluster definition as a JSON file.
*/
@@ -43,54 +30,32 @@
/**
* Creates a reader/writer of the cluster definition file.
- *
* @param filePath location of the definition file
*/
public ClusterDefinitionStore(String filePath) {
file = new File(filePath);
}
- /*
- * Returns set of the controller nodes, including self.
- *
- * @return set of controller nodes
+ /**
+ * Returns the cluster definition.
+ * @return cluster definition
+ * @throws IOException when I/O exception of some sort has occurred
*/
public ClusterDefinition read() throws IOException {
- Set<DefaultControllerNode> nodes = new HashSet<>();
ObjectMapper mapper = new ObjectMapper();
- ObjectNode clusterNodeDef = (ObjectNode) mapper.readTree(file);
- Iterator<JsonNode> it = ((ArrayNode) clusterNodeDef.get("nodes")).elements();
- while (it.hasNext()) {
- ObjectNode nodeDef = (ObjectNode) it.next();
- nodes.add(new DefaultControllerNode(new NodeId(nodeDef.get("id").asText()),
- IpAddress.valueOf(nodeDef.get("ip").asText()),
- nodeDef.get("tcpPort").asInt(9876)));
- }
- String ipPrefix = clusterNodeDef.get("ipPrefix").asText();
-
- return ClusterDefinition.from(nodes, ipPrefix);
+ ClusterDefinition definition = mapper.readValue(file, ClusterDefinition.class);
+ return definition;
}
- /*
- * Writes the given cluster definition.
- *
- * @param cluster definition
+ /**
+ * Writes the specified cluster definition to file.
+ * @param definition cluster definition
+ * @throws IOException when I/O exception of some sort has occurred
*/
public void write(ClusterDefinition definition) throws IOException {
- ObjectMapper mapper = new ObjectMapper();
- ObjectNode clusterNodeDef = mapper.createObjectNode();
- clusterNodeDef.set("ipPrefix", new TextNode(definition.ipPrefix()));
- ArrayNode nodeDefs = mapper.createArrayNode();
- clusterNodeDef.set("nodes", nodeDefs);
- for (DefaultControllerNode node : definition.nodes()) {
- ObjectNode nodeDef = mapper.createObjectNode();
- nodeDef.put("id", node.id().toString())
- .put("ip", node.ip().toString())
- .put("tcpPort", node.tcpPort());
- nodeDefs.add(nodeDef);
- }
- mapper.writeTree(new JsonFactory().createGenerator(file, JsonEncoding.UTF8),
- clusterNodeDef);
+ checkNotNull(definition);
+ // write back to file
+ final ObjectMapper mapper = new ObjectMapper();
+ mapper.writeValue(file, definition);
}
-
-}
+}
\ No newline at end of file