blob: d56ac92ea8aa598c9fdbaf1fc8d7fcc417c9ac31 [file] [log] [blame]
Madan Jampani2ff05592014-10-10 15:42:47 -07001package org.onlab.onos.store.link.impl;
2
3import com.google.common.base.Function;
4import com.google.common.base.Predicate;
5import com.google.common.collect.FluentIterable;
6import com.google.common.collect.HashMultimap;
Madan Jampania97e8202014-10-10 17:01:33 -07007import com.google.common.collect.ImmutableList;
Madan Jampani2ff05592014-10-10 15:42:47 -07008import com.google.common.collect.Maps;
9import com.google.common.collect.SetMultimap;
10
Madan Jampania97e8202014-10-10 17:01:33 -070011import org.apache.commons.lang3.RandomUtils;
Madan Jampani2ff05592014-10-10 15:42:47 -070012import org.apache.commons.lang3.concurrent.ConcurrentUtils;
13import org.apache.felix.scr.annotations.Activate;
14import org.apache.felix.scr.annotations.Component;
15import org.apache.felix.scr.annotations.Deactivate;
16import org.apache.felix.scr.annotations.Reference;
17import org.apache.felix.scr.annotations.ReferenceCardinality;
18import org.apache.felix.scr.annotations.Service;
19import org.onlab.onos.cluster.ClusterService;
Madan Jampania97e8202014-10-10 17:01:33 -070020import org.onlab.onos.cluster.ControllerNode;
21import org.onlab.onos.cluster.NodeId;
Madan Jampani2ff05592014-10-10 15:42:47 -070022import org.onlab.onos.net.AnnotationsUtil;
23import org.onlab.onos.net.ConnectPoint;
24import org.onlab.onos.net.DefaultAnnotations;
25import org.onlab.onos.net.DefaultLink;
26import org.onlab.onos.net.DeviceId;
27import org.onlab.onos.net.Link;
28import org.onlab.onos.net.SparseAnnotations;
29import org.onlab.onos.net.Link.Type;
30import org.onlab.onos.net.LinkKey;
31import org.onlab.onos.net.Provided;
32import org.onlab.onos.net.link.DefaultLinkDescription;
33import org.onlab.onos.net.link.LinkDescription;
34import org.onlab.onos.net.link.LinkEvent;
35import org.onlab.onos.net.link.LinkStore;
36import org.onlab.onos.net.link.LinkStoreDelegate;
37import org.onlab.onos.net.provider.ProviderId;
38import org.onlab.onos.store.AbstractStore;
39import org.onlab.onos.store.ClockService;
40import org.onlab.onos.store.Timestamp;
41import org.onlab.onos.store.cluster.messaging.ClusterCommunicationService;
42import org.onlab.onos.store.cluster.messaging.ClusterMessage;
43import org.onlab.onos.store.cluster.messaging.ClusterMessageHandler;
44import org.onlab.onos.store.cluster.messaging.MessageSubject;
45import org.onlab.onos.store.common.impl.Timestamped;
46import org.onlab.onos.store.serializers.DistributedStoreSerializers;
47import org.onlab.onos.store.serializers.KryoSerializer;
48import org.onlab.util.KryoPool;
49import org.onlab.util.NewConcurrentHashMap;
50import org.slf4j.Logger;
51
52import java.io.IOException;
53import java.util.Collections;
Madan Jampania97e8202014-10-10 17:01:33 -070054import java.util.HashMap;
Madan Jampani2ff05592014-10-10 15:42:47 -070055import java.util.HashSet;
56import java.util.Map;
57import java.util.Set;
58import java.util.Map.Entry;
59import java.util.concurrent.ConcurrentHashMap;
60import java.util.concurrent.ConcurrentMap;
Madan Jampania97e8202014-10-10 17:01:33 -070061import java.util.concurrent.ScheduledExecutorService;
62import java.util.concurrent.TimeUnit;
Madan Jampani2ff05592014-10-10 15:42:47 -070063
Madan Jampania97e8202014-10-10 17:01:33 -070064import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor;
65import static org.onlab.onos.cluster.ControllerNodeToNodeId.toNodeId;
Madan Jampani2ff05592014-10-10 15:42:47 -070066import static org.onlab.onos.net.DefaultAnnotations.union;
67import static org.onlab.onos.net.DefaultAnnotations.merge;
68import static org.onlab.onos.net.Link.Type.DIRECT;
69import static org.onlab.onos.net.Link.Type.INDIRECT;
70import static org.onlab.onos.net.link.LinkEvent.Type.*;
Madan Jampania97e8202014-10-10 17:01:33 -070071import static org.onlab.util.Tools.namedThreads;
Madan Jampani2ff05592014-10-10 15:42:47 -070072import static org.slf4j.LoggerFactory.getLogger;
73import static com.google.common.collect.Multimaps.synchronizedSetMultimap;
74import static com.google.common.base.Predicates.notNull;
75
76/**
77 * Manages inventory of infrastructure links in distributed data store
78 * that uses optimistic replication and gossip based techniques.
79 */
80@Component(immediate = true)
81@Service
82public class GossipLinkStore
83 extends AbstractStore<LinkEvent, LinkStoreDelegate>
84 implements LinkStore {
85
86 private final Logger log = getLogger(getClass());
87
88 // Link inventory
89 private final ConcurrentMap<LinkKey, ConcurrentMap<ProviderId, Timestamped<LinkDescription>>> linkDescs =
90 new ConcurrentHashMap<>();
91
92 // Link instance cache
93 private final ConcurrentMap<LinkKey, Link> links = new ConcurrentHashMap<>();
94
95 // Egress and ingress link sets
96 private final SetMultimap<DeviceId, LinkKey> srcLinks = createSynchronizedHashMultiMap();
97 private final SetMultimap<DeviceId, LinkKey> dstLinks = createSynchronizedHashMultiMap();
98
99 // Remove links
100 private final Map<LinkKey, Timestamp> removedLinks = Maps.newHashMap();
101
102 @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
103 protected ClockService clockService;
104
105 @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
106 protected ClusterCommunicationService clusterCommunicator;
107
108 @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
109 protected ClusterService clusterService;
110
111 private static final KryoSerializer SERIALIZER = new KryoSerializer() {
112 @Override
113 protected void setupKryoPool() {
114 serializerPool = KryoPool.newBuilder()
115 .register(DistributedStoreSerializers.COMMON)
116 .register(InternalLinkEvent.class)
117 .register(InternalLinkRemovedEvent.class)
118 .build()
119 .populate(1);
120 }
121 };
122
Madan Jampania97e8202014-10-10 17:01:33 -0700123 private ScheduledExecutorService executor;
124
Madan Jampani2ff05592014-10-10 15:42:47 -0700125 @Activate
126 public void activate() {
127
128 clusterCommunicator.addSubscriber(
Madan Jampania97e8202014-10-10 17:01:33 -0700129 GossipLinkStoreMessageSubjects.LINK_UPDATE,
130 new InternalLinkEventListener());
Madan Jampani2ff05592014-10-10 15:42:47 -0700131 clusterCommunicator.addSubscriber(
Madan Jampania97e8202014-10-10 17:01:33 -0700132 GossipLinkStoreMessageSubjects.LINK_REMOVED,
133 new InternalLinkRemovedEventListener());
134 clusterCommunicator.addSubscriber(
135 GossipLinkStoreMessageSubjects.LINK_ANTI_ENTROPY_ADVERTISEMENT,
136 new InternalLinkAntiEntropyAdvertisementListener());
137
138 executor =
139 newSingleThreadScheduledExecutor(namedThreads("link-anti-entropy-%d"));
140
141 // TODO: Make these configurable
142 long initialDelaySec = 5;
143 long periodSec = 5;
144 // start anti-entropy thread
145 executor.scheduleAtFixedRate(new SendAdvertisementTask(),
146 initialDelaySec, periodSec, TimeUnit.SECONDS);
Madan Jampani2ff05592014-10-10 15:42:47 -0700147
148 log.info("Started");
149 }
150
151 @Deactivate
152 public void deactivate() {
153 linkDescs.clear();
154 links.clear();
155 srcLinks.clear();
156 dstLinks.clear();
157 log.info("Stopped");
158 }
159
160 @Override
161 public int getLinkCount() {
162 return links.size();
163 }
164
165 @Override
166 public Iterable<Link> getLinks() {
167 return Collections.unmodifiableCollection(links.values());
168 }
169
170 @Override
171 public Set<Link> getDeviceEgressLinks(DeviceId deviceId) {
172 // lock for iteration
173 synchronized (srcLinks) {
174 return FluentIterable.from(srcLinks.get(deviceId))
175 .transform(lookupLink())
176 .filter(notNull())
177 .toSet();
178 }
179 }
180
181 @Override
182 public Set<Link> getDeviceIngressLinks(DeviceId deviceId) {
183 // lock for iteration
184 synchronized (dstLinks) {
185 return FluentIterable.from(dstLinks.get(deviceId))
186 .transform(lookupLink())
187 .filter(notNull())
188 .toSet();
189 }
190 }
191
192 @Override
193 public Link getLink(ConnectPoint src, ConnectPoint dst) {
194 return links.get(new LinkKey(src, dst));
195 }
196
197 @Override
198 public Set<Link> getEgressLinks(ConnectPoint src) {
199 Set<Link> egress = new HashSet<>();
200 for (LinkKey linkKey : srcLinks.get(src.deviceId())) {
201 if (linkKey.src().equals(src)) {
202 egress.add(links.get(linkKey));
203 }
204 }
205 return egress;
206 }
207
208 @Override
209 public Set<Link> getIngressLinks(ConnectPoint dst) {
210 Set<Link> ingress = new HashSet<>();
211 for (LinkKey linkKey : dstLinks.get(dst.deviceId())) {
212 if (linkKey.dst().equals(dst)) {
213 ingress.add(links.get(linkKey));
214 }
215 }
216 return ingress;
217 }
218
219 @Override
220 public LinkEvent createOrUpdateLink(ProviderId providerId,
221 LinkDescription linkDescription) {
222
223 DeviceId dstDeviceId = linkDescription.dst().deviceId();
224 Timestamp newTimestamp = clockService.getTimestamp(dstDeviceId);
225
226 final Timestamped<LinkDescription> deltaDesc = new Timestamped<>(linkDescription, newTimestamp);
227
228 LinkEvent event = createOrUpdateLinkInternal(providerId, deltaDesc);
229
230 if (event != null) {
231 log.info("Notifying peers of a link update topology event from providerId: "
232 + "{} between src: {} and dst: {}",
233 providerId, linkDescription.src(), linkDescription.dst());
234 try {
235 notifyPeers(new InternalLinkEvent(providerId, deltaDesc));
236 } catch (IOException e) {
237 log.info("Failed to notify peers of a link update topology event from providerId: "
238 + "{} between src: {} and dst: {}",
239 providerId, linkDescription.src(), linkDescription.dst());
240 }
241 }
242 return event;
243 }
244
245 private LinkEvent createOrUpdateLinkInternal(
246 ProviderId providerId,
247 Timestamped<LinkDescription> linkDescription) {
248
249 LinkKey key = new LinkKey(linkDescription.value().src(), linkDescription.value().dst());
250 ConcurrentMap<ProviderId, Timestamped<LinkDescription>> descs = getLinkDescriptions(key);
251
252 synchronized (descs) {
253 // if the link was previously removed, we should proceed if and
254 // only if this request is more recent.
255 Timestamp linkRemovedTimestamp = removedLinks.get(key);
256 if (linkRemovedTimestamp != null) {
257 if (linkDescription.isNewer(linkRemovedTimestamp)) {
258 removedLinks.remove(key);
259 } else {
260 return null;
261 }
262 }
263
264 final Link oldLink = links.get(key);
265 // update description
266 createOrUpdateLinkDescription(descs, providerId, linkDescription);
267 final Link newLink = composeLink(descs);
268 if (oldLink == null) {
269 return createLink(key, newLink);
270 }
271 return updateLink(key, oldLink, newLink);
272 }
273 }
274
275 // Guarded by linkDescs value (=locking each Link)
276 private Timestamped<LinkDescription> createOrUpdateLinkDescription(
277 ConcurrentMap<ProviderId, Timestamped<LinkDescription>> existingLinkDescriptions,
278 ProviderId providerId,
279 Timestamped<LinkDescription> linkDescription) {
280
281 // merge existing attributes and merge
282 Timestamped<LinkDescription> existingLinkDescription = existingLinkDescriptions.get(providerId);
283 if (existingLinkDescription != null && existingLinkDescription.isNewer(linkDescription)) {
284 return null;
285 }
286 Timestamped<LinkDescription> newLinkDescription = linkDescription;
287 if (existingLinkDescription != null) {
288 SparseAnnotations merged = union(existingLinkDescription.value().annotations(),
289 linkDescription.value().annotations());
290 newLinkDescription = new Timestamped<LinkDescription>(
291 new DefaultLinkDescription(
292 linkDescription.value().src(),
293 linkDescription.value().dst(),
294 linkDescription.value().type(), merged),
295 linkDescription.timestamp());
296 }
297 return existingLinkDescriptions.put(providerId, newLinkDescription);
298 }
299
300 // Creates and stores the link and returns the appropriate event.
301 // Guarded by linkDescs value (=locking each Link)
302 private LinkEvent createLink(LinkKey key, Link newLink) {
303
304 if (newLink.providerId().isAncillary()) {
305 // TODO: revisit ancillary only Link handling
306
307 // currently treating ancillary only as down (not visible outside)
308 return null;
309 }
310
311 links.put(key, newLink);
312 srcLinks.put(newLink.src().deviceId(), key);
313 dstLinks.put(newLink.dst().deviceId(), key);
314 return new LinkEvent(LINK_ADDED, newLink);
315 }
316
317 // Updates, if necessary the specified link and returns the appropriate event.
318 // Guarded by linkDescs value (=locking each Link)
319 private LinkEvent updateLink(LinkKey key, Link oldLink, Link newLink) {
320
321 if (newLink.providerId().isAncillary()) {
322 // TODO: revisit ancillary only Link handling
323
324 // currently treating ancillary only as down (not visible outside)
325 return null;
326 }
327
328 if ((oldLink.type() == INDIRECT && newLink.type() == DIRECT) ||
329 !AnnotationsUtil.isEqual(oldLink.annotations(), newLink.annotations())) {
330
331 links.put(key, newLink);
332 // strictly speaking following can be ommitted
333 srcLinks.put(oldLink.src().deviceId(), key);
334 dstLinks.put(oldLink.dst().deviceId(), key);
335 return new LinkEvent(LINK_UPDATED, newLink);
336 }
337 return null;
338 }
339
340 @Override
341 public LinkEvent removeLink(ConnectPoint src, ConnectPoint dst) {
342 final LinkKey key = new LinkKey(src, dst);
343
344 DeviceId dstDeviceId = dst.deviceId();
345 Timestamp timestamp = clockService.getTimestamp(dstDeviceId);
346
347 LinkEvent event = removeLinkInternal(key, timestamp);
348
349 if (event != null) {
350 log.info("Notifying peers of a link removed topology event for a link "
351 + "between src: {} and dst: {}", src, dst);
352 try {
353 notifyPeers(new InternalLinkRemovedEvent(key, timestamp));
354 } catch (IOException e) {
355 log.error("Failed to notify peers of a link removed topology event for a link "
356 + "between src: {} and dst: {}", src, dst);
357 }
358 }
359 return event;
360 }
361
362 private LinkEvent removeLinkInternal(LinkKey key, Timestamp timestamp) {
363 ConcurrentMap<ProviderId, Timestamped<LinkDescription>> linkDescriptions =
364 getLinkDescriptions(key);
365 synchronized (linkDescriptions) {
366 // accept removal request if given timestamp is newer than
367 // the latest Timestamp from Primary provider
368 ProviderId primaryProviderId = pickPrimaryProviderId(linkDescriptions);
369 if (linkDescriptions.get(primaryProviderId).isNewer(timestamp)) {
370 return null;
371 }
372 removedLinks.put(key, timestamp);
373 Link link = links.remove(key);
374 linkDescriptions.clear();
375 if (link != null) {
376 srcLinks.remove(link.src().deviceId(), key);
377 dstLinks.remove(link.dst().deviceId(), key);
378 return new LinkEvent(LINK_REMOVED, link);
379 }
380 return null;
381 }
382 }
383
384 private static <K, V> SetMultimap<K, V> createSynchronizedHashMultiMap() {
385 return synchronizedSetMultimap(HashMultimap.<K, V>create());
386 }
387
388 /**
389 * @return primary ProviderID, or randomly chosen one if none exists
390 */
391 private ProviderId pickPrimaryProviderId(
392 ConcurrentMap<ProviderId, Timestamped<LinkDescription>> providerDescs) {
393
394 ProviderId fallBackPrimary = null;
395 for (Entry<ProviderId, Timestamped<LinkDescription>> e : providerDescs.entrySet()) {
396 if (!e.getKey().isAncillary()) {
397 return e.getKey();
398 } else if (fallBackPrimary == null) {
399 // pick randomly as a fallback in case there is no primary
400 fallBackPrimary = e.getKey();
401 }
402 }
403 return fallBackPrimary;
404 }
405
406 private Link composeLink(ConcurrentMap<ProviderId, Timestamped<LinkDescription>> linkDescriptions) {
407 ProviderId primaryProviderId = pickPrimaryProviderId(linkDescriptions);
408 Timestamped<LinkDescription> base = linkDescriptions.get(primaryProviderId);
409
410 ConnectPoint src = base.value().src();
411 ConnectPoint dst = base.value().dst();
412 Type type = base.value().type();
413 DefaultAnnotations annotations = DefaultAnnotations.builder().build();
414 annotations = merge(annotations, base.value().annotations());
415
416 for (Entry<ProviderId, Timestamped<LinkDescription>> e : linkDescriptions.entrySet()) {
417 if (primaryProviderId.equals(e.getKey())) {
418 continue;
419 }
420
421 // TODO: should keep track of Description timestamp
422 // and only merge conflicting keys when timestamp is newer
423 // Currently assuming there will never be a key conflict between
424 // providers
425
426 // annotation merging. not so efficient, should revisit later
427 annotations = merge(annotations, e.getValue().value().annotations());
428 }
429
430 return new DefaultLink(primaryProviderId , src, dst, type, annotations);
431 }
432
433 private ConcurrentMap<ProviderId, Timestamped<LinkDescription>> getLinkDescriptions(LinkKey key) {
434 return ConcurrentUtils.createIfAbsentUnchecked(linkDescs, key,
435 NewConcurrentHashMap.<ProviderId, Timestamped<LinkDescription>>ifNeeded());
436 }
437
Madan Jampania97e8202014-10-10 17:01:33 -0700438 private Timestamped<LinkDescription> getLinkDescription(LinkKey key, ProviderId providerId) {
439 return getLinkDescriptions(key).get(providerId);
440 }
441
Madan Jampani2ff05592014-10-10 15:42:47 -0700442 private final Function<LinkKey, Link> lookupLink = new LookupLink();
443 private Function<LinkKey, Link> lookupLink() {
444 return lookupLink;
445 }
446
447 private final class LookupLink implements Function<LinkKey, Link> {
448 @Override
449 public Link apply(LinkKey input) {
450 return links.get(input);
451 }
452 }
453
454 private static final Predicate<Provided> IS_PRIMARY = new IsPrimary();
455 private static final Predicate<Provided> isPrimary() {
456 return IS_PRIMARY;
457 }
458
459 private static final class IsPrimary implements Predicate<Provided> {
460
461 @Override
462 public boolean apply(Provided input) {
463 return !input.providerId().isAncillary();
464 }
465 }
466
467 private void notifyDelegateIfNotNull(LinkEvent event) {
468 if (event != null) {
469 notifyDelegate(event);
470 }
471 }
472
473 // TODO: should we be throwing exception?
474 private void broadcastMessage(MessageSubject subject, Object event) throws IOException {
475 ClusterMessage message = new ClusterMessage(
476 clusterService.getLocalNode().id(),
477 subject,
478 SERIALIZER.encode(event));
479 clusterCommunicator.broadcast(message);
480 }
481
Madan Jampania97e8202014-10-10 17:01:33 -0700482 // TODO: should we be throwing exception?
483 private void unicastMessage(NodeId recipient, MessageSubject subject, Object event) {
484 try {
485 ClusterMessage message = new ClusterMessage(
486 clusterService.getLocalNode().id(),
487 subject,
488 SERIALIZER.encode(event));
489 clusterCommunicator.unicast(message, recipient);
490 } catch (IOException e) {
491 log.error("Failed to send a {} message to {}", subject.value(), recipient);
492 }
493 }
494
Madan Jampani2ff05592014-10-10 15:42:47 -0700495 private void notifyPeers(InternalLinkEvent event) throws IOException {
496 broadcastMessage(GossipLinkStoreMessageSubjects.LINK_UPDATE, event);
497 }
498
499 private void notifyPeers(InternalLinkRemovedEvent event) throws IOException {
500 broadcastMessage(GossipLinkStoreMessageSubjects.LINK_REMOVED, event);
501 }
502
Madan Jampania97e8202014-10-10 17:01:33 -0700503 private void notifyPeer(NodeId peer, InternalLinkEvent event) {
504 unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_UPDATE, event);
505 }
506
507 private void notifyPeer(NodeId peer, InternalLinkRemovedEvent event) {
508 unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_REMOVED, event);
509 }
510
511 private final class SendAdvertisementTask implements Runnable {
512
513 @Override
514 public void run() {
515 if (Thread.currentThread().isInterrupted()) {
516 log.info("Interrupted, quitting");
517 return;
518 }
519
520 try {
521 final NodeId self = clusterService.getLocalNode().id();
522 Set<ControllerNode> nodes = clusterService.getNodes();
523
524 ImmutableList<NodeId> nodeIds = FluentIterable.from(nodes)
525 .transform(toNodeId())
526 .toList();
527
528 if (nodeIds.size() == 1 && nodeIds.get(0).equals(self)) {
529 log.info("No other peers in the cluster.");
530 return;
531 }
532
533 NodeId peer;
534 do {
535 int idx = RandomUtils.nextInt(0, nodeIds.size());
536 peer = nodeIds.get(idx);
537 } while (peer.equals(self));
538
539 LinkAntiEntropyAdvertisement ad = createAdvertisement();
540
541 if (Thread.currentThread().isInterrupted()) {
542 log.info("Interrupted, quitting");
543 return;
544 }
545
546 try {
547 unicastMessage(peer, GossipLinkStoreMessageSubjects.LINK_ANTI_ENTROPY_ADVERTISEMENT, ad);
548 } catch (Exception e) {
549 log.error("Failed to send anti-entropy advertisement", e);
550 return;
551 }
552 } catch (Exception e) {
553 // catch all Exception to avoid Scheduled task being suppressed.
554 log.error("Exception thrown while sending advertisement", e);
555 }
556 }
557 }
558
559 private LinkAntiEntropyAdvertisement createAdvertisement() {
560 final NodeId self = clusterService.getLocalNode().id();
561
562 Map<LinkFragmentId, Timestamp> linkTimestamps = new HashMap<>(linkDescs.size());
563 Map<LinkKey, Timestamp> linkTombstones = new HashMap<>(removedLinks.size());
564
565 for (Entry<LinkKey, ConcurrentMap<ProviderId, Timestamped<LinkDescription>>>
566 provs : linkDescs.entrySet()) {
567
568 final LinkKey linkKey = provs.getKey();
569 final ConcurrentMap<ProviderId, Timestamped<LinkDescription>> linkDesc = provs.getValue();
570 synchronized (linkDesc) {
571 for (Map.Entry<ProviderId, Timestamped<LinkDescription>> e : linkDesc.entrySet()) {
572 linkTimestamps.put(new LinkFragmentId(linkKey, e.getKey()), e.getValue().timestamp());
573 }
574 }
575 }
576
577 linkTombstones.putAll(removedLinks);
578
579 return new LinkAntiEntropyAdvertisement(self, linkTimestamps, linkTombstones);
580 }
581
582 private void handleAntiEntropyAdvertisement(LinkAntiEntropyAdvertisement advertisement) {
583
584 NodeId peer = advertisement.sender();
585
586 Map<LinkFragmentId, Timestamp> linkTimestamps = advertisement.linkTimestamps();
587 Map<LinkKey, Timestamp> linkTombstones = advertisement.linkTombstones();
588 for (Map.Entry<LinkFragmentId, Timestamp> entry : linkTimestamps.entrySet()) {
589 LinkFragmentId linkFragmentId = entry.getKey();
590 Timestamp peerTimestamp = entry.getValue();
591
592 LinkKey key = linkFragmentId.linkKey();
593 ProviderId providerId = linkFragmentId.providerId();
594
595 Timestamped<LinkDescription> linkDescription = getLinkDescription(key, providerId);
596 if (linkDescription.isNewer(peerTimestamp)) {
597 // I have more recent link description. update peer.
598 notifyPeer(peer, new InternalLinkEvent(providerId, linkDescription));
599 }
600 // else TODO: Peer has more recent link description. request it.
601
602 Timestamp linkRemovedTimestamp = removedLinks.get(key);
603 if (linkRemovedTimestamp != null && linkRemovedTimestamp.compareTo(peerTimestamp) > 0) {
604 // peer has a zombie link. update peer.
605 notifyPeer(peer, new InternalLinkRemovedEvent(key, linkRemovedTimestamp));
606 }
607 }
608
609 for (Map.Entry<LinkKey, Timestamp> entry : linkTombstones.entrySet()) {
610 LinkKey key = entry.getKey();
611 Timestamp peerTimestamp = entry.getValue();
612
613 ProviderId primaryProviderId = pickPrimaryProviderId(getLinkDescriptions(key));
614 if (primaryProviderId != null) {
615 if (!getLinkDescription(key, primaryProviderId).isNewer(peerTimestamp)) {
616 notifyDelegateIfNotNull(removeLinkInternal(key, peerTimestamp));
617 }
618 }
619 }
620 }
621
Madan Jampani2ff05592014-10-10 15:42:47 -0700622 private class InternalLinkEventListener implements ClusterMessageHandler {
623 @Override
624 public void handle(ClusterMessage message) {
625
626 log.info("Received link event from peer: {}", message.sender());
627 InternalLinkEvent event = (InternalLinkEvent) SERIALIZER.decode(message.payload());
628
629 ProviderId providerId = event.providerId();
630 Timestamped<LinkDescription> linkDescription = event.linkDescription();
631
632 notifyDelegateIfNotNull(createOrUpdateLinkInternal(providerId, linkDescription));
633 }
634 }
635
636 private class InternalLinkRemovedEventListener implements ClusterMessageHandler {
637 @Override
638 public void handle(ClusterMessage message) {
639
640 log.info("Received link removed event from peer: {}", message.sender());
641 InternalLinkRemovedEvent event = (InternalLinkRemovedEvent) SERIALIZER.decode(message.payload());
642
643 LinkKey linkKey = event.linkKey();
644 Timestamp timestamp = event.timestamp();
645
646 notifyDelegateIfNotNull(removeLinkInternal(linkKey, timestamp));
647 }
648 }
Madan Jampania97e8202014-10-10 17:01:33 -0700649
650 private final class InternalLinkAntiEntropyAdvertisementListener implements ClusterMessageHandler {
651
652 @Override
653 public void handle(ClusterMessage message) {
654 log.info("Received Link Anti-Entropy advertisement from peer: {}", message.sender());
655 LinkAntiEntropyAdvertisement advertisement = SERIALIZER.decode(message.payload());
656 handleAntiEntropyAdvertisement(advertisement);
657 }
658 }
Madan Jampani2ff05592014-10-10 15:42:47 -0700659}