Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1 | /* |
Pier Luigi | 69f774d | 2018-02-28 12:10:50 +0100 | [diff] [blame] | 2 | * Copyright 2018-present Open Networking Foundation |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Pier Luigi | 69f774d | 2018-02-28 12:10:50 +0100 | [diff] [blame] | 17 | package org.onosproject.segmentrouting.mcast; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 18 | |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 19 | import com.google.common.base.Objects; |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 20 | import com.google.common.collect.HashMultimap; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 21 | import com.google.common.collect.ImmutableSet; |
| 22 | import com.google.common.collect.Lists; |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 23 | import com.google.common.collect.Maps; |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 24 | import com.google.common.collect.Multimap; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 25 | import com.google.common.collect.Sets; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 26 | import org.onlab.packet.IpAddress; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 27 | import org.onlab.packet.VlanId; |
| 28 | import org.onlab.util.KryoNamespace; |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 29 | import org.onosproject.cluster.NodeId; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 30 | import org.onosproject.core.ApplicationId; |
| 31 | import org.onosproject.core.CoreService; |
Pier | 1f87aca | 2018-03-14 16:47:32 -0700 | [diff] [blame] | 32 | import org.onosproject.mcast.api.McastEvent; |
| 33 | import org.onosproject.mcast.api.McastRoute; |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 34 | import org.onosproject.mcast.api.McastRouteData; |
Pier | 1f87aca | 2018-03-14 16:47:32 -0700 | [diff] [blame] | 35 | import org.onosproject.mcast.api.McastRouteUpdate; |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 36 | import org.onosproject.net.Device; |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 37 | import org.onosproject.net.HostId; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 38 | import org.onosproject.net.ConnectPoint; |
| 39 | import org.onosproject.net.DeviceId; |
| 40 | import org.onosproject.net.Link; |
| 41 | import org.onosproject.net.Path; |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 42 | import org.onosproject.net.Port; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 43 | import org.onosproject.net.PortNumber; |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 44 | import org.onosproject.net.flowobjective.DefaultObjectiveContext; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 45 | import org.onosproject.net.flowobjective.ForwardingObjective; |
| 46 | import org.onosproject.net.flowobjective.NextObjective; |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 47 | import org.onosproject.net.flowobjective.ObjectiveContext; |
Pier Luigi | 69f774d | 2018-02-28 12:10:50 +0100 | [diff] [blame] | 48 | import org.onosproject.segmentrouting.SegmentRoutingManager; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 49 | import org.onosproject.store.serializers.KryoNamespaces; |
| 50 | import org.onosproject.store.service.ConsistentMap; |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 51 | import org.onosproject.store.service.ConsistentMultimap; |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 52 | import org.onosproject.store.service.DistributedSet; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 53 | import org.onosproject.store.service.Serializer; |
Andrea Campanella | 5b4cd65 | 2018-06-05 14:19:21 +0200 | [diff] [blame] | 54 | import org.onosproject.store.service.Versioned; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 55 | import org.slf4j.Logger; |
| 56 | import org.slf4j.LoggerFactory; |
| 57 | |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 58 | import java.time.Instant; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 59 | import java.util.Collection; |
| 60 | import java.util.Collections; |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 61 | import java.util.Comparator; |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 62 | import java.util.Iterator; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 63 | import java.util.List; |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 64 | import java.util.Map; |
Pier | 1f87aca | 2018-03-14 16:47:32 -0700 | [diff] [blame] | 65 | import java.util.Map.Entry; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 66 | import java.util.Optional; |
| 67 | import java.util.Set; |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 68 | import java.util.concurrent.ScheduledExecutorService; |
| 69 | import java.util.concurrent.TimeUnit; |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 70 | import java.util.concurrent.atomic.AtomicInteger; |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 71 | import java.util.concurrent.atomic.AtomicReference; |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 72 | import java.util.stream.Collectors; |
| 73 | |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 74 | import static java.util.concurrent.Executors.newScheduledThreadPool; |
| 75 | import static org.onlab.util.Tools.groupedThreads; |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 76 | |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 77 | import static org.onosproject.mcast.api.McastEvent.Type.ROUTE_ADDED; |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 78 | import static org.onosproject.mcast.api.McastEvent.Type.ROUTE_REMOVED; |
Andrea Campanella | ef30d7a | 2018-04-27 14:44:15 +0200 | [diff] [blame] | 79 | import static org.onosproject.mcast.api.McastEvent.Type.SOURCES_ADDED; |
| 80 | import static org.onosproject.mcast.api.McastEvent.Type.SOURCES_REMOVED; |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 81 | import static org.onosproject.mcast.api.McastEvent.Type.SINKS_ADDED; |
| 82 | import static org.onosproject.mcast.api.McastEvent.Type.SINKS_REMOVED; |
| 83 | |
Pier | 979e61a | 2018-03-07 11:42:50 +0100 | [diff] [blame] | 84 | import static org.onosproject.segmentrouting.mcast.McastRole.EGRESS; |
| 85 | import static org.onosproject.segmentrouting.mcast.McastRole.INGRESS; |
| 86 | import static org.onosproject.segmentrouting.mcast.McastRole.TRANSIT; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 87 | |
| 88 | /** |
Pier Luigi | 69f774d | 2018-02-28 12:10:50 +0100 | [diff] [blame] | 89 | * Handles Multicast related events. |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 90 | */ |
Charles Chan | 1eaf480 | 2016-04-18 13:44:03 -0700 | [diff] [blame] | 91 | public class McastHandler { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 92 | // Internal elements |
Charles Chan | 1eaf480 | 2016-04-18 13:44:03 -0700 | [diff] [blame] | 93 | private static final Logger log = LoggerFactory.getLogger(McastHandler.class); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 94 | private final SegmentRoutingManager srManager; |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 95 | private final McastUtils mcastUtils; |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 96 | private final ConsistentMap<McastStoreKey, NextObjective> mcastNextObjStore; |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 97 | private final ConsistentMap<McastRoleStoreKey, McastRole> mcastRoleStore; |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 98 | private final ConsistentMultimap<McastPathStoreKey, List<Link>> mcastPathStore; |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 99 | private final DistributedSet<McastFilteringObjStoreKey> mcastFilteringObjStore; |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 100 | // Stability threshold for Mcast. Seconds |
| 101 | private static final long MCAST_STABLITY_THRESHOLD = 5; |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 102 | // Verify interval for Mcast bucket corrector |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 103 | private static final long MCAST_VERIFY_INTERVAL = 30; |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 104 | // Max verify that can be processed at the same time |
| 105 | private static final int MAX_VERIFY_ON_FLIGHT = 10; |
| 106 | // Last change done |
| 107 | private AtomicReference<Instant> lastMcastChange = new AtomicReference<>(Instant.now()); |
| 108 | // Last bucker corrector execution |
| 109 | private AtomicReference<Instant> lastBktCorrExecution = new AtomicReference<>(Instant.now()); |
| 110 | // Executors for mcast bucket corrector and for the events |
| 111 | private ScheduledExecutorService mcastCorrector |
| 112 | = newScheduledThreadPool(1, groupedThreads("onos", "m-corrector", log)); |
| 113 | private ScheduledExecutorService mcastWorker |
| 114 | = newScheduledThreadPool(1, groupedThreads("onos", "m-worker-%d", log)); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 115 | |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 116 | /** |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 117 | * Constructs the McastEventHandler. |
| 118 | * |
| 119 | * @param srManager Segment Routing manager |
| 120 | */ |
Charles Chan | 1eaf480 | 2016-04-18 13:44:03 -0700 | [diff] [blame] | 121 | public McastHandler(SegmentRoutingManager srManager) { |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 122 | ApplicationId coreAppId = srManager.coreService.getAppId(CoreService.CORE_APP_NAME); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 123 | this.srManager = srManager; |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 124 | KryoNamespace.Builder mcastKryo = new KryoNamespace.Builder() |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 125 | .register(KryoNamespaces.API) |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 126 | .register(new McastStoreKeySerializer(), McastStoreKey.class); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 127 | mcastNextObjStore = srManager.storageService |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 128 | .<McastStoreKey, NextObjective>consistentMapBuilder() |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 129 | .withName("onos-mcast-nextobj-store") |
Charles Chan | 4922a17 | 2016-05-23 16:45:45 -0700 | [diff] [blame] | 130 | .withSerializer(Serializer.using(mcastKryo.build("McastHandler-NextObj"))) |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 131 | .build(); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 132 | mcastKryo = new KryoNamespace.Builder() |
| 133 | .register(KryoNamespaces.API) |
| 134 | .register(new McastRoleStoreKeySerializer(), McastRoleStoreKey.class) |
| 135 | .register(McastRole.class); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 136 | mcastRoleStore = srManager.storageService |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 137 | .<McastRoleStoreKey, McastRole>consistentMapBuilder() |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 138 | .withName("onos-mcast-role-store") |
Charles Chan | 4922a17 | 2016-05-23 16:45:45 -0700 | [diff] [blame] | 139 | .withSerializer(Serializer.using(mcastKryo.build("McastHandler-Role"))) |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 140 | .build(); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 141 | mcastKryo = new KryoNamespace.Builder() |
| 142 | .register(KryoNamespaces.API) |
| 143 | .register(new McastFilteringObjStoreSerializer(), McastFilteringObjStoreKey.class); |
| 144 | mcastFilteringObjStore = srManager.storageService |
| 145 | .<McastFilteringObjStoreKey>setBuilder() |
| 146 | .withName("onos-mcast-filtering-store") |
| 147 | .withSerializer(Serializer.using(mcastKryo.build("McastHandler-FilteringObj"))) |
| 148 | .build() |
| 149 | .asDistributedSet(); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 150 | mcastKryo = new KryoNamespace.Builder() |
| 151 | .register(KryoNamespaces.API) |
| 152 | .register(new McastPathStoreKeySerializer(), McastPathStoreKey.class); |
| 153 | mcastPathStore = srManager.storageService |
| 154 | .<McastPathStoreKey, List<Link>>consistentMultimapBuilder() |
| 155 | .withName("onos-mcast-path-store") |
| 156 | .withSerializer(Serializer.using(mcastKryo.build("McastHandler-Path"))) |
| 157 | .build(); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 158 | mcastUtils = new McastUtils(srManager, coreAppId, log); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 159 | // Init the executor for the buckets corrector |
| 160 | mcastCorrector.scheduleWithFixedDelay(new McastBucketCorrector(), 10, |
| 161 | MCAST_VERIFY_INTERVAL, TimeUnit.SECONDS); |
| 162 | } |
| 163 | |
| 164 | /** |
| 165 | * Determines if mcast in the network has been stable in the last |
| 166 | * MCAST_STABLITY_THRESHOLD seconds, by comparing the current time |
| 167 | * to the last mcast change timestamp. |
| 168 | * |
| 169 | * @return true if stable |
| 170 | */ |
| 171 | private boolean isMcastStable() { |
| 172 | long last = (long) (lastMcastChange.get().toEpochMilli() / 1000.0); |
| 173 | long now = (long) (Instant.now().toEpochMilli() / 1000.0); |
| 174 | log.trace("Multicast stable since {}s", now - last); |
| 175 | return (now - last) > MCAST_STABLITY_THRESHOLD; |
| 176 | } |
| 177 | |
| 178 | /** |
| 179 | * Assures there are always MCAST_VERIFY_INTERVAL seconds between each execution, |
| 180 | * by comparing the current time with the last corrector execution. |
| 181 | * |
| 182 | * @return true if stable |
| 183 | */ |
| 184 | private boolean wasBktCorrRunning() { |
| 185 | long last = (long) (lastBktCorrExecution.get().toEpochMilli() / 1000.0); |
| 186 | long now = (long) (Instant.now().toEpochMilli() / 1000.0); |
| 187 | log.trace("McastBucketCorrector executed {}s ago", now - last); |
| 188 | return (now - last) < MCAST_VERIFY_INTERVAL; |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | /** |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 192 | * Read initial multicast configuration from mcast store. |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 193 | */ |
Pier Luigi | 69f774d | 2018-02-28 12:10:50 +0100 | [diff] [blame] | 194 | public void init() { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 195 | mcastWorker.execute(this::initInternal); |
| 196 | } |
| 197 | |
| 198 | private void initInternal() { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 199 | srManager.multicastRouteService.getRoutes().forEach(mcastRoute -> { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 200 | lastMcastChange.set(Instant.now()); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 201 | log.debug("Init group {}", mcastRoute.group()); |
| 202 | if (!mcastUtils.isLeader(mcastRoute.group())) { |
| 203 | log.debug("Skip {} due to lack of leadership", mcastRoute.group()); |
| 204 | return; |
| 205 | } |
| 206 | McastRouteData mcastRouteData = srManager.multicastRouteService.routeData(mcastRoute); |
| 207 | // For each source process the mcast tree |
| 208 | srManager.multicastRouteService.sources(mcastRoute).forEach(source -> { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 209 | McastPathStoreKey pathStoreKey = new McastPathStoreKey(mcastRoute.group(), source); |
| 210 | Collection<? extends List<Link>> storedPaths = Versioned.valueOrElse( |
| 211 | mcastPathStore.get(pathStoreKey), Lists.newArrayList()); |
| 212 | Map<ConnectPoint, List<ConnectPoint>> mcastPaths = buildMcastPaths(storedPaths, mcastRoute.group(), |
| 213 | source); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 214 | // Get all the sinks and process them |
| 215 | Set<ConnectPoint> sinks = processSinksToBeAdded(source, mcastRoute.group(), |
| 216 | mcastRouteData.sinks()); |
| 217 | // Filter out all the working sinks, we do not want to move them |
| 218 | // TODO we need a better way to distinguish flows coming from different sources |
| 219 | sinks = sinks.stream() |
| 220 | .filter(sink -> !mcastPaths.containsKey(sink) || |
| 221 | !isSinkForSource(mcastRoute.group(), sink, source)) |
| 222 | .collect(Collectors.toSet()); |
| 223 | if (sinks.isEmpty()) { |
| 224 | log.debug("Skip {} for source {} nothing to do", mcastRoute.group(), source); |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 225 | return; |
| 226 | } |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 227 | Map<ConnectPoint, List<Path>> mcasTree = mcastUtils.computeSinkMcastTree(mcastRoute.group(), |
| 228 | source.deviceId(), sinks); |
| 229 | mcasTree.forEach((sink, paths) -> processSinkAddedInternal(source, sink, mcastRoute.group(), |
| 230 | null)); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 231 | }); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 232 | }); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 233 | } |
| 234 | |
| 235 | /** |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 236 | * Clean up when deactivating the application. |
| 237 | */ |
Pier Luigi | 69f774d | 2018-02-28 12:10:50 +0100 | [diff] [blame] | 238 | public void terminate() { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 239 | mcastCorrector.shutdown(); |
| 240 | mcastWorker.shutdown(); |
Pier | 72d0e58 | 2018-04-20 14:14:34 +0200 | [diff] [blame] | 241 | mcastNextObjStore.destroy(); |
| 242 | mcastRoleStore.destroy(); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 243 | mcastFilteringObjStore.destroy(); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 244 | mcastPathStore.destroy(); |
Pier | 72d0e58 | 2018-04-20 14:14:34 +0200 | [diff] [blame] | 245 | mcastUtils.terminate(); |
| 246 | log.info("Terminated"); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | /** |
Pier Luigi | d29ca7c | 2018-02-28 17:24:03 +0100 | [diff] [blame] | 250 | * Processes the SOURCE_ADDED, SOURCE_UPDATED, SINK_ADDED, |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 251 | * SINK_REMOVED, ROUTE_ADDED and ROUTE_REMOVED events. |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 252 | * |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 253 | * @param event the multicast event to be processed |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 254 | */ |
Pier Luigi | d29ca7c | 2018-02-28 17:24:03 +0100 | [diff] [blame] | 255 | public void processMcastEvent(McastEvent event) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 256 | mcastWorker.execute(() -> processMcastEventInternal(event)); |
| 257 | } |
| 258 | |
| 259 | private void processMcastEventInternal(McastEvent event) { |
| 260 | lastMcastChange.set(Instant.now()); |
| 261 | // Current subject is null, for ROUTE_REMOVED events |
| 262 | final McastRouteUpdate mcastUpdate = event.subject(); |
| 263 | final McastRouteUpdate mcastPrevUpdate = event.prevSubject(); |
| 264 | IpAddress mcastIp = mcastPrevUpdate.route().group(); |
| 265 | Set<ConnectPoint> prevSinks = mcastPrevUpdate.sinks() |
| 266 | .values().stream().flatMap(Collection::stream).collect(Collectors.toSet()); |
| 267 | Set<ConnectPoint> prevSources = mcastPrevUpdate.sources() |
| 268 | .values().stream().flatMap(Collection::stream).collect(Collectors.toSet()); |
| 269 | Set<ConnectPoint> sources; |
| 270 | // Events handling |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 271 | if (event.type() == ROUTE_ADDED) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 272 | processRouteAddedInternal(mcastUpdate.route().group()); |
| 273 | } else if (event.type() == ROUTE_REMOVED) { |
| 274 | processRouteRemovedInternal(prevSources, mcastIp); |
| 275 | } else if (event.type() == SOURCES_ADDED) { |
| 276 | // Current subject and prev just differ for the source connect points |
| 277 | sources = mcastUpdate.sources() |
| 278 | .values().stream().flatMap(Collection::stream).collect(Collectors.toSet()); |
| 279 | Set<ConnectPoint> sourcesToBeAdded = Sets.difference(sources, prevSources); |
| 280 | processSourcesAddedInternal(sourcesToBeAdded, mcastIp, mcastUpdate.sinks()); |
| 281 | } else if (event.type() == SOURCES_REMOVED) { |
| 282 | // Current subject and prev just differ for the source connect points |
| 283 | sources = mcastUpdate.sources() |
| 284 | .values().stream().flatMap(Collection::stream).collect(Collectors.toSet()); |
| 285 | Set<ConnectPoint> sourcesToBeRemoved = Sets.difference(prevSources, sources); |
| 286 | processSourcesRemovedInternal(sourcesToBeRemoved, sources, mcastIp, mcastUpdate.sinks()); |
| 287 | } else if (event.type() == SINKS_ADDED) { |
| 288 | processSinksAddedInternal(prevSources, mcastIp, mcastUpdate.sinks(), prevSinks); |
| 289 | } else if (event.type() == SINKS_REMOVED) { |
| 290 | processSinksRemovedInternal(prevSources, mcastIp, mcastUpdate.sinks(), mcastPrevUpdate.sinks()); |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 291 | } else { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 292 | log.warn("Event {} not handled", event); |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 293 | } |
Pier Luigi | 6786b92 | 2018-02-02 16:19:11 +0100 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | /** |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 297 | * Process the SOURCES_ADDED event. |
| 298 | * |
| 299 | * @param sources the sources connect point |
| 300 | * @param mcastIp the group address |
| 301 | * @param sinks the sinks connect points |
| 302 | */ |
| 303 | private void processSourcesAddedInternal(Set<ConnectPoint> sources, IpAddress mcastIp, |
| 304 | Map<HostId, Set<ConnectPoint>> sinks) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 305 | lastMcastChange.set(Instant.now()); |
| 306 | log.info("Processing sources added {} for group {}", sources, mcastIp); |
| 307 | if (!mcastUtils.isLeader(mcastIp)) { |
| 308 | log.debug("Skip {} due to lack of leadership", mcastIp); |
| 309 | return; |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 310 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 311 | if (sources.isEmpty()) { |
| 312 | log.debug("Skip {} due to empty sources to be added", mcastIp); |
| 313 | return; |
| 314 | } |
| 315 | sources.forEach(source -> { |
| 316 | Set<ConnectPoint> sinksToBeAdded = processSinksToBeAdded(source, mcastIp, sinks); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 317 | Map<ConnectPoint, List<Path>> mcasTree = mcastUtils.computeSinkMcastTree(mcastIp, source.deviceId(), |
| 318 | sinksToBeAdded); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 319 | mcasTree.forEach((sink, paths) -> processSinkAddedInternal(source, sink, mcastIp, paths)); |
| 320 | }); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | /** |
| 324 | * Process the SOURCES_REMOVED event. |
| 325 | * |
| 326 | * @param sourcesToBeRemoved the source connect points to be removed |
| 327 | * @param remainingSources the remainig source connect points |
| 328 | * @param mcastIp the group address |
| 329 | * @param sinks the sinks connect points |
| 330 | */ |
| 331 | private void processSourcesRemovedInternal(Set<ConnectPoint> sourcesToBeRemoved, |
| 332 | Set<ConnectPoint> remainingSources, |
| 333 | IpAddress mcastIp, |
| 334 | Map<HostId, Set<ConnectPoint>> sinks) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 335 | lastMcastChange.set(Instant.now()); |
| 336 | log.info("Processing sources removed {} for group {}", sourcesToBeRemoved, mcastIp); |
| 337 | if (!mcastUtils.isLeader(mcastIp)) { |
| 338 | log.debug("Skip {} due to lack of leadership", mcastIp); |
| 339 | return; |
| 340 | } |
| 341 | if (remainingSources.isEmpty()) { |
| 342 | log.debug("There are no more sources for {}", mcastIp); |
| 343 | processRouteRemovedInternal(sourcesToBeRemoved, mcastIp); |
| 344 | return; |
| 345 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 346 | // Let's heal the trees |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 347 | Set<Link> notAffectedLinks = Sets.newHashSet(); |
| 348 | Map<ConnectPoint, Set<Link>> affectedLinks = Maps.newHashMap(); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 349 | Map<ConnectPoint, Set<ConnectPoint>> candidateSinks = Maps.newHashMap(); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 350 | Set<ConnectPoint> totalSources = Sets.newHashSet(sourcesToBeRemoved); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 351 | totalSources.addAll(remainingSources); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 352 | // Calculate all the links used by the sources and the current sinks |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 353 | totalSources.forEach(source -> { |
| 354 | Set<ConnectPoint> currentSinks = sinks.values() |
| 355 | .stream().flatMap(Collection::stream) |
| 356 | .filter(sink -> isSinkForSource(mcastIp, sink, source)) |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 357 | .collect(Collectors.toSet()); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 358 | candidateSinks.put(source, currentSinks); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 359 | McastPathStoreKey pathStoreKey = new McastPathStoreKey(mcastIp, source); |
| 360 | Collection<? extends List<Link>> storedPaths = Versioned.valueOrElse( |
| 361 | mcastPathStore.get(pathStoreKey), Lists.newArrayList()); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 362 | currentSinks.forEach(currentSink -> { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 363 | Optional<? extends List<Link>> currentPath = mcastUtils.getStoredPath(currentSink.deviceId(), |
| 364 | storedPaths); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 365 | if (currentPath.isPresent()) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 366 | if (!sourcesToBeRemoved.contains(source)) { |
| 367 | notAffectedLinks.addAll(currentPath.get()); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 368 | } else { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 369 | affectedLinks.compute(source, (k, v) -> { |
| 370 | v = v == null ? Sets.newHashSet() : v; |
| 371 | v.addAll(currentPath.get()); |
| 372 | return v; |
| 373 | }); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 374 | } |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 375 | } |
| 376 | }); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 377 | }); |
| 378 | // Clean transit links |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 379 | affectedLinks.forEach((source, currentCandidateLinks) -> { |
| 380 | Set<Link> linksToBeRemoved = Sets.difference(currentCandidateLinks, notAffectedLinks) |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 381 | .immutableCopy(); |
| 382 | if (!linksToBeRemoved.isEmpty()) { |
| 383 | currentCandidateLinks.forEach(link -> { |
| 384 | DeviceId srcLink = link.src().deviceId(); |
| 385 | // Remove ports only on links to be removed |
| 386 | if (linksToBeRemoved.contains(link)) { |
| 387 | removePortFromDevice(link.src().deviceId(), link.src().port(), mcastIp, |
| 388 | mcastUtils.assignedVlan(srcLink.equals(source.deviceId()) ? |
| 389 | source : null)); |
| 390 | } |
| 391 | // Remove role on the candidate links |
| 392 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, srcLink, source)); |
| 393 | }); |
| 394 | } |
| 395 | }); |
| 396 | // Clean ingress and egress |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 397 | sourcesToBeRemoved.forEach(source -> { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 398 | Set<ConnectPoint> currentSinks = candidateSinks.get(source); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 399 | McastPathStoreKey pathStoreKey = new McastPathStoreKey(mcastIp, source); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 400 | currentSinks.forEach(currentSink -> { |
| 401 | VlanId assignedVlan = mcastUtils.assignedVlan(source.deviceId().equals(currentSink.deviceId()) ? |
| 402 | source : null); |
| 403 | // Sinks co-located with the source |
| 404 | if (source.deviceId().equals(currentSink.deviceId())) { |
| 405 | if (source.port().equals(currentSink.port())) { |
| 406 | log.warn("Skip {} since sink {} is on the same port of source {}. Abort", |
| 407 | mcastIp, currentSink, source); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 408 | return; |
| 409 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 410 | // We need to check against the other sources and if it is |
| 411 | // necessary remove the port from the device - no overlap |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 412 | Set<VlanId> otherVlans = remainingSources.stream() |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 413 | // Only sources co-located and having this sink |
| 414 | .filter(remainingSource -> remainingSource.deviceId() |
| 415 | .equals(source.deviceId()) && candidateSinks.get(remainingSource) |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 416 | .contains(currentSink)) |
| 417 | .map(remainingSource -> mcastUtils.assignedVlan( |
| 418 | remainingSource.deviceId().equals(currentSink.deviceId()) ? |
| 419 | remainingSource : null)).collect(Collectors.toSet()); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 420 | if (!otherVlans.contains(assignedVlan)) { |
| 421 | removePortFromDevice(currentSink.deviceId(), currentSink.port(), |
| 422 | mcastIp, assignedVlan); |
| 423 | } |
| 424 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, currentSink.deviceId(), |
| 425 | source)); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 426 | return; |
| 427 | } |
| 428 | Set<VlanId> otherVlans = remainingSources.stream() |
| 429 | .filter(remainingSource -> candidateSinks.get(remainingSource) |
| 430 | .contains(currentSink)) |
| 431 | .map(remainingSource -> mcastUtils.assignedVlan( |
| 432 | remainingSource.deviceId().equals(currentSink.deviceId()) ? |
| 433 | remainingSource : null)).collect(Collectors.toSet()); |
| 434 | // Sinks on other leaves |
| 435 | if (!otherVlans.contains(assignedVlan)) { |
| 436 | removePortFromDevice(currentSink.deviceId(), currentSink.port(), |
| 437 | mcastIp, assignedVlan); |
| 438 | } |
| 439 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, currentSink.deviceId(), |
| 440 | source)); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 441 | }); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 442 | // Clean the mcast paths |
| 443 | mcastPathStore.removeAll(pathStoreKey); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 444 | }); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 445 | } |
| 446 | |
| 447 | /** |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 448 | * Process the ROUTE_ADDED event. |
Pier Luigi | e80d6b4 | 2018-02-26 12:31:38 +0100 | [diff] [blame] | 449 | * |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 450 | * @param mcastIp the group address |
Pier Luigi | e80d6b4 | 2018-02-26 12:31:38 +0100 | [diff] [blame] | 451 | */ |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 452 | private void processRouteAddedInternal(IpAddress mcastIp) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 453 | lastMcastChange.set(Instant.now()); |
| 454 | log.info("Processing route added for Multicast group {}", mcastIp); |
| 455 | // Just elect a new leader |
| 456 | mcastUtils.isLeader(mcastIp); |
Pier Luigi | e80d6b4 | 2018-02-26 12:31:38 +0100 | [diff] [blame] | 457 | } |
| 458 | |
| 459 | /** |
Pier Luigi | 6786b92 | 2018-02-02 16:19:11 +0100 | [diff] [blame] | 460 | * Removes the entire mcast tree related to this group. |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 461 | * @param sources the source connect points |
Pier Luigi | 6786b92 | 2018-02-02 16:19:11 +0100 | [diff] [blame] | 462 | * @param mcastIp multicast group IP address |
| 463 | */ |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 464 | private void processRouteRemovedInternal(Set<ConnectPoint> sources, IpAddress mcastIp) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 465 | lastMcastChange.set(Instant.now()); |
| 466 | log.info("Processing route removed for group {}", mcastIp); |
| 467 | if (!mcastUtils.isLeader(mcastIp)) { |
| 468 | log.debug("Skip {} due to lack of leadership", mcastIp); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 469 | mcastUtils.withdrawLeader(mcastIp); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 470 | return; |
Pier Luigi | 6786b92 | 2018-02-02 16:19:11 +0100 | [diff] [blame] | 471 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 472 | sources.forEach(source -> { |
| 473 | // Find out the ingress, transit and egress device of the affected group |
| 474 | DeviceId ingressDevice = getDevice(mcastIp, INGRESS, source) |
| 475 | .stream().findFirst().orElse(null); |
| 476 | Set<DeviceId> transitDevices = getDevice(mcastIp, TRANSIT, source); |
| 477 | Set<DeviceId> egressDevices = getDevice(mcastIp, EGRESS, source); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 478 | McastPathStoreKey pathStoreKey = new McastPathStoreKey(mcastIp, source); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 479 | // If there are no egress and transit devices, sinks could be only on the ingress |
| 480 | if (!egressDevices.isEmpty()) { |
| 481 | egressDevices.forEach(deviceId -> { |
| 482 | removeGroupFromDevice(deviceId, mcastIp, mcastUtils.assignedVlan(null)); |
| 483 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, deviceId, source)); |
| 484 | }); |
| 485 | } |
| 486 | if (!transitDevices.isEmpty()) { |
| 487 | transitDevices.forEach(deviceId -> { |
| 488 | removeGroupFromDevice(deviceId, mcastIp, mcastUtils.assignedVlan(null)); |
| 489 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, deviceId, source)); |
| 490 | }); |
| 491 | } |
| 492 | if (ingressDevice != null) { |
| 493 | removeGroupFromDevice(ingressDevice, mcastIp, mcastUtils.assignedVlan(source)); |
| 494 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, ingressDevice, source)); |
| 495 | } |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 496 | // Clean the mcast paths |
| 497 | mcastPathStore.removeAll(pathStoreKey); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 498 | }); |
| 499 | // Finally, withdraw the leadership |
| 500 | mcastUtils.withdrawLeader(mcastIp); |
Pier Luigi | 6786b92 | 2018-02-02 16:19:11 +0100 | [diff] [blame] | 501 | } |
| 502 | |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 503 | /** |
| 504 | * Process sinks to be removed. |
| 505 | * |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 506 | * @param sources the source connect points |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 507 | * @param mcastIp the ip address of the group |
| 508 | * @param newSinks the new sinks to be processed |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 509 | * @param prevSinks the previous sinks |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 510 | */ |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 511 | private void processSinksRemovedInternal(Set<ConnectPoint> sources, IpAddress mcastIp, |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 512 | Map<HostId, Set<ConnectPoint>> newSinks, |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 513 | Map<HostId, Set<ConnectPoint>> prevSinks) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 514 | lastMcastChange.set(Instant.now()); |
| 515 | log.info("Processing sinks removed for group {} and for sources {}", |
| 516 | mcastIp, sources); |
| 517 | if (!mcastUtils.isLeader(mcastIp)) { |
| 518 | log.debug("Skip {} due to lack of leadership", mcastIp); |
| 519 | return; |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 520 | } |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 521 | Map<ConnectPoint, Map<ConnectPoint, Optional<? extends List<Link>>>> treesToBeRemoved = Maps.newHashMap(); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 522 | Map<ConnectPoint, Set<ConnectPoint>> treesToBeAdded = Maps.newHashMap(); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 523 | Set<Link> goodLinks = Sets.newHashSet(); |
| 524 | Map<ConnectPoint, Set<DeviceId>> goodDevicesBySource = Maps.newHashMap(); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 525 | sources.forEach(source -> { |
| 526 | // Save the path associated to the sinks to be removed |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 527 | Set<ConnectPoint> sinksToBeRemoved = processSinksToBeRemoved(mcastIp, prevSinks, |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 528 | newSinks, source); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 529 | Map<ConnectPoint, Optional<? extends List<Link>>> treeToBeRemoved = Maps.newHashMap(); |
| 530 | McastPathStoreKey pathStoreKey = new McastPathStoreKey(mcastIp, source); |
| 531 | Collection<? extends List<Link>> storedPaths = Versioned.valueOrElse( |
| 532 | mcastPathStore.get(pathStoreKey), Lists.newArrayList()); |
| 533 | sinksToBeRemoved.forEach(sink -> treeToBeRemoved.put(sink, mcastUtils.getStoredPath(sink.deviceId(), |
| 534 | storedPaths))); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 535 | treesToBeRemoved.put(source, treeToBeRemoved); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 536 | // Save the good links and good devices |
| 537 | Set<DeviceId> goodDevices = Sets.newHashSet(); |
| 538 | Set<DeviceId> totalDevices = Sets.newHashSet(getDevice(mcastIp, EGRESS, source)); |
| 539 | totalDevices.addAll(getDevice(mcastIp, INGRESS, source)); |
| 540 | Set<ConnectPoint> notAffectedSinks = Sets.newHashSet(); |
| 541 | // Compute good sinks |
| 542 | totalDevices.forEach(device -> { |
| 543 | Set<ConnectPoint> sinks = getSinks(mcastIp, device, source); |
| 544 | notAffectedSinks.addAll(Sets.difference(sinks, sinksToBeRemoved)); |
| 545 | }); |
| 546 | // Compute good paths and good devices |
| 547 | notAffectedSinks.forEach(notAffectedSink -> { |
| 548 | Optional<? extends List<Link>> notAffectedPath = mcastUtils.getStoredPath(notAffectedSink.deviceId(), |
| 549 | storedPaths); |
| 550 | if (notAffectedPath.isPresent()) { |
| 551 | List<Link> goodPath = notAffectedPath.get(); |
| 552 | goodLinks.addAll(goodPath); |
| 553 | goodPath.forEach(link -> goodDevices.add(link.src().deviceId())); |
| 554 | } else { |
| 555 | goodDevices.add(notAffectedSink.deviceId()); |
| 556 | } |
| 557 | }); |
| 558 | goodDevicesBySource.compute(source, (k, v) -> { |
| 559 | v = v == null ? Sets.newHashSet() : v; |
| 560 | v.addAll(goodDevices); |
| 561 | return v; |
| 562 | }); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 563 | // Recover the dual-homed sinks |
| 564 | Set<ConnectPoint> sinksToBeRecovered = processSinksToBeRecovered(mcastIp, newSinks, |
| 565 | prevSinks, source); |
| 566 | treesToBeAdded.put(source, sinksToBeRecovered); |
| 567 | }); |
| 568 | // Remove the sinks taking into account the multiple sources and the original paths |
| 569 | treesToBeRemoved.forEach((source, tree) -> |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 570 | tree.forEach((sink, path) -> processSinkRemovedInternal(source, sink, mcastIp, path, |
| 571 | goodLinks, goodDevicesBySource.get(source)))); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 572 | // Add new sinks according to the recovery procedure |
| 573 | treesToBeAdded.forEach((source, sinks) -> |
| 574 | sinks.forEach(sink -> processSinkAddedInternal(source, sink, mcastIp, null))); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 575 | } |
| 576 | |
Pier Luigi | 6786b92 | 2018-02-02 16:19:11 +0100 | [diff] [blame] | 577 | /** |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 578 | * Removes a path from source to sink for given multicast group. |
| 579 | * |
| 580 | * @param source connect point of the multicast source |
| 581 | * @param sink connection point of the multicast sink |
| 582 | * @param mcastIp multicast group IP address |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 583 | * @param mcastPath path associated to the sink |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 584 | * @param usedLinks links used by the other sinks |
| 585 | * @param usedDevices devices used by other sinks |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 586 | */ |
| 587 | private void processSinkRemovedInternal(ConnectPoint source, ConnectPoint sink, |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 588 | IpAddress mcastIp, Optional<? extends List<Link>> mcastPath, |
| 589 | Set<Link> usedLinks, Set<DeviceId> usedDevices) { |
| 590 | |
| 591 | log.info("Used links {}", usedLinks); |
| 592 | log.info("Used devices {}", usedDevices); |
| 593 | |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 594 | lastMcastChange.set(Instant.now()); |
| 595 | log.info("Processing sink removed {} for group {} and for source {}", sink, mcastIp, source); |
| 596 | boolean isLast; |
| 597 | // When source and sink are on the same device |
| 598 | if (source.deviceId().equals(sink.deviceId())) { |
| 599 | // Source and sink are on even the same port. There must be something wrong. |
| 600 | if (source.port().equals(sink.port())) { |
| 601 | log.warn("Skip {} since sink {} is on the same port of source {}. Abort", mcastIp, sink, source); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 602 | return; |
| 603 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 604 | isLast = removePortFromDevice(sink.deviceId(), sink.port(), mcastIp, mcastUtils.assignedVlan(source)); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 605 | if (isLast) { |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 606 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, sink.deviceId(), source)); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 607 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 608 | return; |
| 609 | } |
| 610 | // Process the egress device |
| 611 | isLast = removePortFromDevice(sink.deviceId(), sink.port(), mcastIp, mcastUtils.assignedVlan(null)); |
| 612 | if (isLast) { |
| 613 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, sink.deviceId(), source)); |
| 614 | } |
| 615 | // If this is the last sink on the device, also update upstream |
| 616 | if (mcastPath.isPresent()) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 617 | List<Link> links = Lists.newArrayList(mcastPath.get()); |
| 618 | if (isLast) { |
| 619 | // Clean the path |
| 620 | McastPathStoreKey pathStoreKey = new McastPathStoreKey(mcastIp, source); |
| 621 | mcastPathStore.remove(pathStoreKey, mcastPath.get()); |
| 622 | Collections.reverse(links); |
| 623 | for (Link link : links) { |
| 624 | // If nobody is using the port remove |
| 625 | if (!usedLinks.contains(link)) { |
| 626 | removePortFromDevice(link.src().deviceId(), link.src().port(), mcastIp, |
| 627 | mcastUtils.assignedVlan(link.src().deviceId().equals(source.deviceId()) ? source : null)); |
| 628 | } |
| 629 | // If nobody is using the device |
| 630 | if (!usedDevices.contains(link.src().deviceId())) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 631 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, link.src().deviceId(), source)); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 632 | } |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 633 | } |
| 634 | } |
| 635 | } |
| 636 | } |
| 637 | |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 638 | /** |
| 639 | * Process sinks to be added. |
| 640 | * |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 641 | * @param sources the source connect points |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 642 | * @param mcastIp the group IP |
| 643 | * @param newSinks the new sinks to be processed |
| 644 | * @param allPrevSinks all previous sinks |
| 645 | */ |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 646 | private void processSinksAddedInternal(Set<ConnectPoint> sources, IpAddress mcastIp, |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 647 | Map<HostId, Set<ConnectPoint>> newSinks, |
| 648 | Set<ConnectPoint> allPrevSinks) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 649 | lastMcastChange.set(Instant.now()); |
| 650 | log.info("Processing sinks added for group {} and for sources {}", mcastIp, sources); |
| 651 | if (!mcastUtils.isLeader(mcastIp)) { |
| 652 | log.debug("Skip {} due to lack of leadership", mcastIp); |
| 653 | return; |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 654 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 655 | sources.forEach(source -> { |
| 656 | Set<ConnectPoint> sinksToBeAdded = processSinksToBeAdded(source, mcastIp, newSinks); |
| 657 | sinksToBeAdded = Sets.difference(sinksToBeAdded, allPrevSinks); |
| 658 | sinksToBeAdded.forEach(sink -> processSinkAddedInternal(source, sink, mcastIp, null)); |
| 659 | }); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 660 | } |
| 661 | |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 662 | /** |
| 663 | * Establishes a path from source to sink for given multicast group. |
| 664 | * |
| 665 | * @param source connect point of the multicast source |
| 666 | * @param sink connection point of the multicast sink |
| 667 | * @param mcastIp multicast group IP address |
| 668 | */ |
| 669 | private void processSinkAddedInternal(ConnectPoint source, ConnectPoint sink, |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 670 | IpAddress mcastIp, List<Path> allPaths) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 671 | lastMcastChange.set(Instant.now()); |
| 672 | log.info("Processing sink added {} for group {} and for source {}", sink, mcastIp, source); |
| 673 | // Process the ingress device |
| 674 | McastFilteringObjStoreKey mcastFilterObjStoreKey = new McastFilteringObjStoreKey(source, |
| 675 | mcastUtils.assignedVlan(source), mcastIp.isIp4()); |
| 676 | addFilterToDevice(mcastFilterObjStoreKey, mcastIp, INGRESS); |
| 677 | if (source.deviceId().equals(sink.deviceId())) { |
| 678 | if (source.port().equals(sink.port())) { |
| 679 | log.warn("Skip {} since sink {} is on the same port of source {}. Abort", |
| 680 | mcastIp, sink, source); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 681 | return; |
| 682 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 683 | addPortToDevice(sink.deviceId(), sink.port(), mcastIp, mcastUtils.assignedVlan(source)); |
| 684 | mcastRoleStore.put(new McastRoleStoreKey(mcastIp, sink.deviceId(), source), INGRESS); |
| 685 | return; |
| 686 | } |
| 687 | // Find a path. If present, create/update groups and flows for each hop |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 688 | Optional<Path> mcastPath = getPath(source.deviceId(), sink.deviceId(), mcastIp, allPaths); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 689 | if (mcastPath.isPresent()) { |
| 690 | List<Link> links = mcastPath.get().links(); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 691 | McastPathStoreKey pathStoreKey = new McastPathStoreKey(mcastIp, source); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 692 | // Setup mcast role for ingress |
| 693 | mcastRoleStore.put(new McastRoleStoreKey(mcastIp, source.deviceId(), source), INGRESS); |
| 694 | // Setup properly the transit forwarding |
| 695 | links.forEach(link -> { |
| 696 | addPortToDevice(link.src().deviceId(), link.src().port(), mcastIp, |
| 697 | mcastUtils.assignedVlan(link.src().deviceId() |
| 698 | .equals(source.deviceId()) ? source : null)); |
| 699 | McastFilteringObjStoreKey filteringKey = new McastFilteringObjStoreKey(link.dst(), |
| 700 | mcastUtils.assignedVlan(null), mcastIp.isIp4()); |
| 701 | addFilterToDevice(filteringKey, mcastIp, null); |
| 702 | }); |
| 703 | // Setup mcast role for the transit |
| 704 | links.stream() |
| 705 | .filter(link -> !link.dst().deviceId().equals(sink.deviceId())) |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 706 | .forEach(link -> mcastRoleStore.put(new McastRoleStoreKey(mcastIp, link.dst().deviceId(), |
| 707 | source), TRANSIT)); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 708 | // Process the egress device |
| 709 | addPortToDevice(sink.deviceId(), sink.port(), mcastIp, mcastUtils.assignedVlan(null)); |
| 710 | // Setup mcast role for egress |
| 711 | mcastRoleStore.put(new McastRoleStoreKey(mcastIp, sink.deviceId(), source), EGRESS); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 712 | // Store the used path |
| 713 | mcastPathStore.put(pathStoreKey, links); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 714 | } else { |
| 715 | log.warn("Unable to find a path from {} to {}. Abort sinkAdded", source.deviceId(), sink.deviceId()); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 716 | } |
| 717 | } |
| 718 | |
| 719 | /** |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 720 | * Processes the PORT_UPDATED event. |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 721 | * |
| 722 | * @param affectedDevice Affected device |
| 723 | * @param affectedPort Affected port |
| 724 | */ |
| 725 | public void processPortUpdate(Device affectedDevice, Port affectedPort) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 726 | mcastWorker.execute(() -> processPortUpdateInternal(affectedDevice, affectedPort)); |
| 727 | } |
| 728 | |
| 729 | private void processPortUpdateInternal(Device affectedDevice, Port affectedPort) { |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 730 | // Clean the filtering obj store. Edge port case. |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 731 | lastMcastChange.set(Instant.now()); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 732 | ConnectPoint portDown = new ConnectPoint(affectedDevice.id(), affectedPort.number()); |
| 733 | if (!affectedPort.isEnabled()) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 734 | log.info("Processing port down {}", portDown); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 735 | updateFilterObjStoreByPort(portDown); |
| 736 | } |
| 737 | } |
| 738 | |
| 739 | /** |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 740 | * Processes the LINK_DOWN event. |
| 741 | * |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 742 | * @param linkDown Link that is going down |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 743 | */ |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 744 | public void processLinkDown(Link linkDown) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 745 | mcastWorker.execute(() -> processLinkDownInternal(linkDown)); |
| 746 | } |
| 747 | |
| 748 | private void processLinkDownInternal(Link linkDown) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 749 | // Get mcast groups affected by the link going down |
| 750 | Set<IpAddress> affectedGroups = getAffectedGroups(linkDown); |
| 751 | log.info("Processing link down {} for groups {}", linkDown, affectedGroups); |
| 752 | affectedGroups.forEach(mcastIp -> { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 753 | lastMcastChange.set(Instant.now()); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 754 | log.debug("Processing link down {} for group {}", linkDown, mcastIp); |
| 755 | recoverFailure(mcastIp, linkDown); |
| 756 | }); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 757 | } |
| 758 | |
| 759 | /** |
Pier Luigi | 580fd8a | 2018-01-16 10:47:50 +0100 | [diff] [blame] | 760 | * Process the DEVICE_DOWN event. |
| 761 | * |
| 762 | * @param deviceDown device going down |
| 763 | */ |
Pier Luigi | 69f774d | 2018-02-28 12:10:50 +0100 | [diff] [blame] | 764 | public void processDeviceDown(DeviceId deviceDown) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 765 | mcastWorker.execute(() -> processDeviceDownInternal(deviceDown)); |
| 766 | } |
| 767 | |
| 768 | private void processDeviceDownInternal(DeviceId deviceDown) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 769 | // Get the mcast groups affected by the device going down |
| 770 | Set<IpAddress> affectedGroups = getAffectedGroups(deviceDown); |
| 771 | log.info("Processing device down {} for groups {}", deviceDown, affectedGroups); |
| 772 | updateFilterObjStoreByDevice(deviceDown); |
| 773 | affectedGroups.forEach(mcastIp -> { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 774 | lastMcastChange.set(Instant.now()); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 775 | log.debug("Processing device down {} for group {}", deviceDown, mcastIp); |
| 776 | recoverFailure(mcastIp, deviceDown); |
| 777 | }); |
Pier Luigi | 580fd8a | 2018-01-16 10:47:50 +0100 | [diff] [blame] | 778 | } |
| 779 | |
| 780 | /** |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 781 | * General failure recovery procedure. |
| 782 | * |
| 783 | * @param mcastIp the group to recover |
| 784 | * @param failedElement the failed element |
| 785 | */ |
| 786 | private void recoverFailure(IpAddress mcastIp, Object failedElement) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 787 | // Do not proceed if we are not the leaders |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 788 | if (!mcastUtils.isLeader(mcastIp)) { |
| 789 | log.debug("Skip {} due to lack of leadership", mcastIp); |
| 790 | return; |
| 791 | } |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 792 | // Skip if it is not an infra failure |
| 793 | Set<DeviceId> transitDevices = getDevice(mcastIp, TRANSIT); |
| 794 | if (!mcastUtils.isInfraFailure(transitDevices, failedElement)) { |
| 795 | log.debug("Skip {} not an infrastructure failure", mcastIp); |
| 796 | return; |
| 797 | } |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 798 | // Do not proceed if the sources of this group are missing |
| 799 | Set<ConnectPoint> sources = getSources(mcastIp); |
| 800 | if (sources.isEmpty()) { |
| 801 | log.warn("Missing sources for group {}", mcastIp); |
| 802 | return; |
| 803 | } |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 804 | // Get all the paths, affected paths, good links and good devices |
| 805 | Set<List<Link>> storedPaths = getStoredPaths(mcastIp); |
| 806 | Set<List<Link>> affectedPaths = mcastUtils.getAffectedPaths(storedPaths, failedElement); |
| 807 | Set<Link> goodLinks = Sets.newHashSet(); |
| 808 | Map<DeviceId, Set<DeviceId>> goodDevicesBySource = Maps.newHashMap(); |
| 809 | Map<DeviceId, Set<ConnectPoint>> processedSourcesByEgress = Maps.newHashMap(); |
| 810 | Sets.difference(storedPaths, affectedPaths).forEach(goodPath -> { |
| 811 | goodLinks.addAll(goodPath); |
| 812 | DeviceId srcDevice = goodPath.get(0).src().deviceId(); |
| 813 | Set<DeviceId> goodDevices = Sets.newHashSet(); |
| 814 | goodPath.forEach(link -> goodDevices.add(link.src().deviceId())); |
| 815 | goodDevicesBySource.compute(srcDevice, (k, v) -> { |
| 816 | v = v == null ? Sets.newHashSet() : v; |
| 817 | v.addAll(goodDevices); |
| 818 | return v; |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 819 | }); |
| 820 | }); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 821 | affectedPaths.forEach(affectedPath -> { |
| 822 | // TODO remove |
| 823 | log.info("Good links {}", goodLinks); |
| 824 | // TODO remove |
| 825 | log.info("Good devices {}", goodDevicesBySource); |
| 826 | // TODO trace |
| 827 | log.info("Healing the path {}", affectedPath); |
| 828 | DeviceId srcDevice = affectedPath.get(0).src().deviceId(); |
| 829 | DeviceId dstDevice = affectedPath.get(affectedPath.size() - 1).dst().deviceId(); |
| 830 | // Fix in one shot multiple sources |
| 831 | Set<ConnectPoint> affectedSources = sources.stream() |
| 832 | .filter(device -> device.deviceId().equals(srcDevice)) |
| 833 | .collect(Collectors.toSet()); |
| 834 | Set<ConnectPoint> processedSources = processedSourcesByEgress.getOrDefault(dstDevice, |
| 835 | Collections.emptySet()); |
| 836 | Optional<Path> alternativePath = getPath(srcDevice, dstDevice, mcastIp, null); |
| 837 | // If an alternative is possible go ahead |
| 838 | if (alternativePath.isPresent()) { |
| 839 | // TODO trace |
| 840 | log.info("Alternative path {}", alternativePath.get().links()); |
| 841 | } else { |
| 842 | // Otherwise try to come up with an alternative |
| 843 | // TODO trace |
| 844 | log.info("No alternative path"); |
| 845 | Set<ConnectPoint> notAffectedSources = Sets.difference(sources, affectedSources); |
| 846 | Set<ConnectPoint> remainingSources = Sets.difference(notAffectedSources, processedSources); |
| 847 | alternativePath = recoverSinks(dstDevice, mcastIp, affectedSources, remainingSources); |
| 848 | processedSourcesByEgress.compute(dstDevice, (k, v) -> { |
| 849 | v = v == null ? Sets.newHashSet() : v; |
| 850 | v.addAll(affectedSources); |
| 851 | return v; |
| 852 | }); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 853 | } |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 854 | // Recover from the failure if possible |
| 855 | Optional<Path> finalPath = alternativePath; |
| 856 | affectedSources.forEach(affectedSource -> { |
| 857 | // Update the mcastPath store |
| 858 | McastPathStoreKey mcastPathStoreKey = new McastPathStoreKey(mcastIp, affectedSource); |
| 859 | // Verify if there are local sinks |
| 860 | Set<DeviceId> localSinks = getSinks(mcastIp, srcDevice, affectedSource).stream() |
| 861 | .map(ConnectPoint::deviceId) |
| 862 | .collect(Collectors.toSet()); |
| 863 | Set<DeviceId> goodDevices = goodDevicesBySource.compute(affectedSource.deviceId(), (k, v) -> { |
| 864 | v = v == null ? Sets.newHashSet() : v; |
| 865 | v.addAll(localSinks); |
| 866 | return v; |
| 867 | }); |
| 868 | // TODO remove |
| 869 | log.info("Good devices {}", goodDevicesBySource); |
| 870 | Collection<? extends List<Link>> storedPathsBySource = Versioned.valueOrElse( |
| 871 | mcastPathStore.get(mcastPathStoreKey), Lists.newArrayList()); |
| 872 | Optional<? extends List<Link>> storedPath = storedPathsBySource.stream() |
| 873 | .filter(path -> path.equals(affectedPath)) |
| 874 | .findFirst(); |
| 875 | // Remove bad links |
| 876 | affectedPath.forEach(affectedLink -> { |
| 877 | DeviceId affectedDevice = affectedLink.src().deviceId(); |
| 878 | // If there is overlap with good paths - skip it |
| 879 | if (!goodLinks.contains(affectedLink)) { |
| 880 | removePortFromDevice(affectedDevice, affectedLink.src().port(), mcastIp, |
| 881 | mcastUtils.assignedVlan(affectedDevice.equals(affectedSource.deviceId()) ? |
| 882 | affectedSource : null)); |
| 883 | } |
| 884 | // Remove role on the affected links if last |
| 885 | if (!goodDevices.contains(affectedDevice)) { |
| 886 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, affectedDevice, affectedSource)); |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 887 | } |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 888 | }); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 889 | // Sometimes the removal fails for serialization issue |
| 890 | // trying with the original object as workaround |
| 891 | if (storedPath.isPresent()) { |
| 892 | mcastPathStore.remove(mcastPathStoreKey, storedPath.get()); |
| 893 | } else { |
| 894 | log.warn("Unable to find the corresponding path - trying removeal"); |
| 895 | mcastPathStore.remove(mcastPathStoreKey, affectedPath); |
| 896 | } |
| 897 | // Program new links |
| 898 | if (finalPath.isPresent()) { |
| 899 | List<Link> links = finalPath.get().links(); |
| 900 | installPath(mcastIp, affectedSource, links); |
| 901 | mcastPathStore.put(mcastPathStoreKey, links); |
| 902 | links.forEach(link -> goodDevices.add(link.src().deviceId())); |
| 903 | goodDevicesBySource.compute(srcDevice, (k, v) -> { |
| 904 | v = v == null ? Sets.newHashSet() : v; |
| 905 | v.addAll(goodDevices); |
| 906 | return v; |
| 907 | }); |
| 908 | goodLinks.addAll(finalPath.get().links()); |
| 909 | } |
| 910 | }); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 911 | }); |
| 912 | } |
| 913 | |
| 914 | /** |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 915 | * Try to recover sinks using alternative locations. |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 916 | * |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 917 | * @param notRecovered the device not recovered |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 918 | * @param mcastIp the group address |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 919 | * @param affectedSources affected sources |
| 920 | * @param goodSources sources not affected |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 921 | */ |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 922 | private Optional<Path> recoverSinks(DeviceId notRecovered, IpAddress mcastIp, |
| 923 | Set<ConnectPoint> affectedSources, |
| 924 | Set<ConnectPoint> goodSources) { |
| 925 | log.debug("Processing recover sinks on {} for group {}", notRecovered, mcastIp); |
| 926 | Map<ConnectPoint, Set<ConnectPoint>> affectedSinksBySource = Maps.newHashMap(); |
| 927 | Map<ConnectPoint, Set<ConnectPoint>> sinksBySource = Maps.newHashMap(); |
| 928 | Set<ConnectPoint> sources = Sets.union(affectedSources, goodSources); |
| 929 | // Hosts influenced by the failure |
| 930 | Map<HostId, Set<ConnectPoint>> hostIdSetMap = mcastUtils.getAffectedSinks(notRecovered, mcastIp); |
| 931 | // Locations influenced by the failure |
| 932 | Set<ConnectPoint> affectedSinks = hostIdSetMap.values() |
| 933 | .stream() |
| 934 | .flatMap(Collection::stream) |
| 935 | .filter(connectPoint -> connectPoint.deviceId().equals(notRecovered)) |
| 936 | .collect(Collectors.toSet()); |
| 937 | // All locations |
| 938 | Set<ConnectPoint> sinks = hostIdSetMap.values() |
| 939 | .stream() |
| 940 | .flatMap(Collection::stream) |
| 941 | .collect(Collectors.toSet()); |
| 942 | // Maps sinks with the sources |
| 943 | sources.forEach(source -> { |
| 944 | Set<ConnectPoint> currentSinks = affectedSinks.stream() |
| 945 | .filter(sink -> isSinkForSource(mcastIp, sink, source)) |
| 946 | .collect(Collectors.toSet()); |
| 947 | affectedSinksBySource.put(source, currentSinks); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 948 | }); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 949 | // Remove sinks one by one if they are not used by other sources |
| 950 | affectedSources.forEach(affectedSource -> { |
| 951 | Set<ConnectPoint> currentSinks = affectedSinksBySource.get(affectedSource); |
| 952 | log.info("Current sinks {} for source {}", currentSinks, affectedSource); |
| 953 | currentSinks.forEach(currentSink -> { |
| 954 | VlanId assignedVlan = mcastUtils.assignedVlan( |
| 955 | affectedSource.deviceId().equals(currentSink.deviceId()) ? affectedSource : null); |
| 956 | log.info("Assigned vlan {}", assignedVlan); |
| 957 | Set<VlanId> otherVlans = goodSources.stream() |
| 958 | .filter(remainingSource -> affectedSinksBySource.get(remainingSource).contains(currentSink)) |
| 959 | .map(remainingSource -> mcastUtils.assignedVlan( |
| 960 | remainingSource.deviceId().equals(currentSink.deviceId()) ? remainingSource : null)) |
| 961 | .collect(Collectors.toSet()); |
| 962 | log.info("Other vlans {}", otherVlans); |
| 963 | // Sinks on other leaves |
| 964 | if (!otherVlans.contains(assignedVlan)) { |
| 965 | removePortFromDevice(currentSink.deviceId(), currentSink.port(), mcastIp, assignedVlan); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 966 | } |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 967 | mcastRoleStore.remove(new McastRoleStoreKey(mcastIp, currentSink.deviceId(), affectedSource)); |
| 968 | }); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 969 | }); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 970 | // Get the sinks to be added and the new egress |
| 971 | Set<DeviceId> newEgress = Sets.newHashSet(); |
| 972 | affectedSources.forEach(affectedSource -> { |
| 973 | Set<ConnectPoint> currentSinks = affectedSinksBySource.get(affectedSource); |
| 974 | Set<ConnectPoint> newSinks = Sets.difference(sinks, currentSinks); |
| 975 | sinksBySource.put(affectedSource, newSinks); |
| 976 | newSinks.stream() |
| 977 | .map(ConnectPoint::deviceId) |
| 978 | .forEach(newEgress::add); |
| 979 | }); |
| 980 | log.info("newEgress {}", newEgress); |
| 981 | // If there are more than one new egresses, return the problem |
| 982 | if (newEgress.size() != 1) { |
| 983 | log.warn("There are {} new egress, wrong configuration. Abort.", newEgress.size()); |
| 984 | return Optional.empty(); |
| 985 | } |
| 986 | DeviceId egress = newEgress.stream() |
| 987 | .findFirst() |
| 988 | .orElse(null); |
| 989 | DeviceId ingress = affectedSources.stream() |
| 990 | .map(ConnectPoint::deviceId) |
| 991 | .findFirst() |
| 992 | .orElse(null); |
| 993 | log.info("Ingress {}", ingress); |
| 994 | if (ingress == null) { |
| 995 | log.warn("No new ingress, wrong configuration. Abort."); |
| 996 | return Optional.empty(); |
| 997 | } |
| 998 | // Get an alternative path |
| 999 | Optional<Path> alternativePath = getPath(ingress, egress, mcastIp, null); |
| 1000 | // If there are new path install sinks and return path |
| 1001 | if (alternativePath.isPresent()) { |
| 1002 | log.info("Alternative path {}", alternativePath.get().links()); |
| 1003 | affectedSources.forEach(affectedSource -> { |
| 1004 | Set<ConnectPoint> newSinks = sinksBySource.get(affectedSource); |
| 1005 | newSinks.forEach(newSink -> { |
| 1006 | addPortToDevice(newSink.deviceId(), newSink.port(), mcastIp, mcastUtils.assignedVlan(null)); |
| 1007 | mcastRoleStore.put(new McastRoleStoreKey(mcastIp, newSink.deviceId(), affectedSource), EGRESS); |
| 1008 | }); |
| 1009 | }); |
| 1010 | return alternativePath; |
| 1011 | } |
| 1012 | // No new path but sinks co-located with sources install sinks and return empty |
| 1013 | if (ingress.equals(egress)) { |
| 1014 | log.info("No Alternative path but sinks co-located"); |
| 1015 | affectedSources.forEach(affectedSource -> { |
| 1016 | Set<ConnectPoint> newSinks = sinksBySource.get(affectedSource); |
| 1017 | newSinks.forEach(newSink -> { |
| 1018 | if (affectedSource.port().equals(newSink.port())) { |
| 1019 | log.warn("Skip {} since sink {} is on the same port of source {}. Abort", |
| 1020 | mcastIp, newSink, affectedSource); |
| 1021 | return; |
| 1022 | } |
| 1023 | addPortToDevice(newSink.deviceId(), newSink.port(), mcastIp, |
| 1024 | mcastUtils.assignedVlan(affectedSource)); |
| 1025 | mcastRoleStore.put(new McastRoleStoreKey(mcastIp, newSink.deviceId(), affectedSource), INGRESS); |
| 1026 | }); |
| 1027 | }); |
| 1028 | } |
| 1029 | return Optional.empty(); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1030 | } |
| 1031 | |
| 1032 | /** |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1033 | * Process all the sinks related to a mcast group and return |
| 1034 | * the ones to be removed. |
| 1035 | * |
| 1036 | * @param mcastIp the group address |
| 1037 | * @param prevsinks the previous sinks to be evaluated |
| 1038 | * @param newSinks the new sinks to be evaluted |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1039 | * @param source the source connect point |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1040 | * @return the set of the sinks to be removed |
| 1041 | */ |
| 1042 | private Set<ConnectPoint> processSinksToBeRemoved(IpAddress mcastIp, |
| 1043 | Map<HostId, Set<ConnectPoint>> prevsinks, |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1044 | Map<HostId, Set<ConnectPoint>> newSinks, |
| 1045 | ConnectPoint source) { |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1046 | final Set<ConnectPoint> sinksToBeProcessed = Sets.newHashSet(); |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 1047 | log.debug("Processing sinks to be removed for Multicast group {}, source {}", |
| 1048 | mcastIp, source); |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1049 | prevsinks.forEach(((hostId, connectPoints) -> { |
Shekhar Aryan | 27bbe2a | 2019-06-20 14:03:07 +0000 | [diff] [blame] | 1050 | if (Objects.equal(HostId.NONE, hostId)) { |
Esin Karaman | f1f46e3 | 2019-03-05 13:49:02 +0000 | [diff] [blame] | 1051 | //in this case connect points are single homed sinks. |
| 1052 | //just found the difference btw previous and new sinks for this source. |
| 1053 | Set<ConnectPoint> difference = Sets.difference(connectPoints, newSinks.get(hostId)); |
| 1054 | sinksToBeProcessed.addAll(difference); |
| 1055 | return; |
| 1056 | } |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1057 | // We have to check with the existing flows |
| 1058 | ConnectPoint sinkToBeProcessed = connectPoints.stream() |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1059 | .filter(connectPoint -> isSinkForSource(mcastIp, connectPoint, source)) |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1060 | .findFirst().orElse(null); |
| 1061 | if (sinkToBeProcessed != null) { |
| 1062 | // If the host has been removed or location has been removed |
| 1063 | if (!newSinks.containsKey(hostId) || |
| 1064 | !newSinks.get(hostId).contains(sinkToBeProcessed)) { |
| 1065 | sinksToBeProcessed.add(sinkToBeProcessed); |
| 1066 | } |
| 1067 | } |
| 1068 | })); |
| 1069 | // We have done, return the set |
| 1070 | return sinksToBeProcessed; |
| 1071 | } |
| 1072 | |
| 1073 | /** |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1074 | * Process new locations and return the set of sinks to be added |
| 1075 | * in the context of the recovery. |
| 1076 | * |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1077 | * @param newSinks the remaining sinks |
| 1078 | * @param prevSinks the previous sinks |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1079 | * @param source the source connect point |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1080 | * @return the set of the sinks to be processed |
| 1081 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1082 | private Set<ConnectPoint> processSinksToBeRecovered(IpAddress mcastIp, |
| 1083 | Map<HostId, Set<ConnectPoint>> newSinks, |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1084 | Map<HostId, Set<ConnectPoint>> prevSinks, |
| 1085 | ConnectPoint source) { |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1086 | final Set<ConnectPoint> sinksToBeProcessed = Sets.newHashSet(); |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 1087 | log.debug("Processing sinks to be recovered for Multicast group {}, source {}", |
| 1088 | mcastIp, source); |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1089 | newSinks.forEach((hostId, connectPoints) -> { |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1090 | // If it has more than 1 locations |
| 1091 | if (connectPoints.size() > 1 || connectPoints.size() == 0) { |
| 1092 | log.debug("Skip {} since sink {} has {} locations", |
| 1093 | mcastIp, hostId, connectPoints.size()); |
| 1094 | return; |
| 1095 | } |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1096 | // If previously it had two locations, we need to recover it |
| 1097 | // Filter out if the remaining location is already served |
| 1098 | if (prevSinks.containsKey(hostId) && prevSinks.get(hostId).size() == 2) { |
Pier | 665b0fc | 2018-04-19 15:53:20 +0200 | [diff] [blame] | 1099 | ConnectPoint sinkToBeProcessed = connectPoints.stream() |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1100 | .filter(connectPoint -> !isSinkForSource(mcastIp, connectPoint, source)) |
Pier | 665b0fc | 2018-04-19 15:53:20 +0200 | [diff] [blame] | 1101 | .findFirst().orElse(null); |
| 1102 | if (sinkToBeProcessed != null) { |
| 1103 | sinksToBeProcessed.add(sinkToBeProcessed); |
| 1104 | } |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1105 | } |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1106 | }); |
| 1107 | return sinksToBeProcessed; |
| 1108 | } |
| 1109 | |
| 1110 | /** |
| 1111 | * Process all the sinks related to a mcast group and return |
| 1112 | * the ones to be processed. |
| 1113 | * |
| 1114 | * @param source the source connect point |
| 1115 | * @param mcastIp the group address |
| 1116 | * @param sinks the sinks to be evaluated |
| 1117 | * @return the set of the sinks to be processed |
| 1118 | */ |
| 1119 | private Set<ConnectPoint> processSinksToBeAdded(ConnectPoint source, IpAddress mcastIp, |
| 1120 | Map<HostId, Set<ConnectPoint>> sinks) { |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1121 | final Set<ConnectPoint> sinksToBeProcessed = Sets.newHashSet(); |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 1122 | log.debug("Processing sinks to be added for Multicast group {}, source {}", |
| 1123 | mcastIp, source); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1124 | sinks.forEach(((hostId, connectPoints) -> { |
Esin Karaman | f1f46e3 | 2019-03-05 13:49:02 +0000 | [diff] [blame] | 1125 | //add all connect points that are not tied with any host |
Shekhar Aryan | 27bbe2a | 2019-06-20 14:03:07 +0000 | [diff] [blame] | 1126 | if (Objects.equal(HostId.NONE, hostId)) { |
Esin Karaman | f1f46e3 | 2019-03-05 13:49:02 +0000 | [diff] [blame] | 1127 | sinksToBeProcessed.addAll(connectPoints); |
| 1128 | return; |
| 1129 | } |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1130 | // If it has more than 2 locations |
| 1131 | if (connectPoints.size() > 2 || connectPoints.size() == 0) { |
| 1132 | log.debug("Skip {} since sink {} has {} locations", |
| 1133 | mcastIp, hostId, connectPoints.size()); |
| 1134 | return; |
| 1135 | } |
| 1136 | // If it has one location, just use it |
| 1137 | if (connectPoints.size() == 1) { |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1138 | sinksToBeProcessed.add(connectPoints.stream().findFirst().orElse(null)); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1139 | return; |
| 1140 | } |
| 1141 | // We prefer to reuse existing flows |
| 1142 | ConnectPoint sinkToBeProcessed = connectPoints.stream() |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1143 | .filter(connectPoint -> { |
| 1144 | if (!isSinkForGroup(mcastIp, connectPoint, source)) { |
| 1145 | return false; |
| 1146 | } |
| 1147 | if (!isSinkReachable(mcastIp, connectPoint, source)) { |
| 1148 | return false; |
| 1149 | } |
| 1150 | ConnectPoint other = connectPoints.stream() |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1151 | .filter(remaining -> !remaining.equals(connectPoint)) |
| 1152 | .findFirst().orElse(null); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1153 | // We are already serving the sink |
| 1154 | return !isSinkForSource(mcastIp, other, source); |
| 1155 | }).findFirst().orElse(null); |
| 1156 | |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1157 | if (sinkToBeProcessed != null) { |
| 1158 | sinksToBeProcessed.add(sinkToBeProcessed); |
| 1159 | return; |
| 1160 | } |
| 1161 | // Otherwise we prefer to reuse existing egresses |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1162 | Set<DeviceId> egresses = getDevice(mcastIp, EGRESS, source); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1163 | sinkToBeProcessed = connectPoints.stream() |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1164 | .filter(connectPoint -> { |
| 1165 | if (!egresses.contains(connectPoint.deviceId())) { |
| 1166 | return false; |
| 1167 | } |
| 1168 | if (!isSinkReachable(mcastIp, connectPoint, source)) { |
| 1169 | return false; |
| 1170 | } |
| 1171 | ConnectPoint other = connectPoints.stream() |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1172 | .filter(remaining -> !remaining.equals(connectPoint)) |
| 1173 | .findFirst().orElse(null); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1174 | return !isSinkForSource(mcastIp, other, source); |
| 1175 | }).findFirst().orElse(null); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1176 | if (sinkToBeProcessed != null) { |
| 1177 | sinksToBeProcessed.add(sinkToBeProcessed); |
| 1178 | return; |
| 1179 | } |
| 1180 | // Otherwise we prefer a location co-located with the source (if it exists) |
| 1181 | sinkToBeProcessed = connectPoints.stream() |
| 1182 | .filter(connectPoint -> connectPoint.deviceId().equals(source.deviceId())) |
| 1183 | .findFirst().orElse(null); |
| 1184 | if (sinkToBeProcessed != null) { |
| 1185 | sinksToBeProcessed.add(sinkToBeProcessed); |
| 1186 | return; |
| 1187 | } |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1188 | // Finally, we randomly pick a new location if it is reachable |
| 1189 | sinkToBeProcessed = connectPoints.stream() |
| 1190 | .filter(connectPoint -> { |
| 1191 | if (!isSinkReachable(mcastIp, connectPoint, source)) { |
| 1192 | return false; |
| 1193 | } |
| 1194 | ConnectPoint other = connectPoints.stream() |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1195 | .filter(remaining -> !remaining.equals(connectPoint)) |
| 1196 | .findFirst().orElse(null); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1197 | return !isSinkForSource(mcastIp, other, source); |
| 1198 | }).findFirst().orElse(null); |
| 1199 | if (sinkToBeProcessed != null) { |
| 1200 | sinksToBeProcessed.add(sinkToBeProcessed); |
| 1201 | } |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1202 | })); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1203 | return sinksToBeProcessed; |
| 1204 | } |
| 1205 | |
| 1206 | /** |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1207 | * Adds a port to given multicast group on given device. This involves the |
| 1208 | * update of L3 multicast group and multicast routing table entry. |
| 1209 | * |
| 1210 | * @param deviceId device ID |
| 1211 | * @param port port to be added |
| 1212 | * @param mcastIp multicast group |
| 1213 | * @param assignedVlan assigned VLAN ID |
| 1214 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1215 | private void addPortToDevice(DeviceId deviceId, PortNumber port, |
| 1216 | IpAddress mcastIp, VlanId assignedVlan) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1217 | // TODO trace |
| 1218 | log.info("Adding {} on {}/{} and vlan {}", mcastIp, deviceId, port, assignedVlan); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1219 | McastStoreKey mcastStoreKey = new McastStoreKey(mcastIp, deviceId, assignedVlan); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1220 | ImmutableSet.Builder<PortNumber> portBuilder = ImmutableSet.builder(); |
Pier Luigi | 4f0dd21 | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1221 | NextObjective newNextObj; |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1222 | if (!mcastNextObjStore.containsKey(mcastStoreKey)) { |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1223 | // First time someone request this mcast group via this device |
| 1224 | portBuilder.add(port); |
Pier Luigi | 4f0dd21 | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1225 | // New nextObj |
Vignesh Ethiraj | 7579012 | 2019-08-26 12:18:42 +0000 | [diff] [blame] | 1226 | if (!srManager.deviceConfiguration().isConfigured(deviceId)) { |
| 1227 | log.debug("Passing 0 as nextId for unconfigured device {}", deviceId); |
| 1228 | newNextObj = mcastUtils.nextObjBuilder(mcastIp, assignedVlan, |
| 1229 | portBuilder.build(), 0).add(); |
| 1230 | } else { |
| 1231 | newNextObj = mcastUtils.nextObjBuilder(mcastIp, assignedVlan, |
| 1232 | portBuilder.build(), null).add(); |
| 1233 | } |
Pier Luigi | 4f0dd21 | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1234 | // Store the new port |
| 1235 | mcastNextObjStore.put(mcastStoreKey, newNextObj); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1236 | // Create, store and apply the new nextObj and fwdObj |
| 1237 | ObjectiveContext context = new DefaultObjectiveContext( |
| 1238 | (objective) -> log.debug("Successfully add {} on {}/{}, vlan {}", |
| 1239 | mcastIp, deviceId, port.toLong(), assignedVlan), |
| 1240 | (objective, error) -> { |
| 1241 | log.warn("Failed to add {} on {}/{}, vlan {}: {}", |
| 1242 | mcastIp, deviceId, port.toLong(), assignedVlan, error); |
pier | e23cd86 | 2020-03-04 14:36:41 +0100 | [diff] [blame] | 1243 | // Schedule the removal using directly the key |
| 1244 | mcastWorker.execute(() -> mcastNextObjStore.remove(mcastStoreKey)); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1245 | }); |
| 1246 | ForwardingObjective fwdObj = mcastUtils.fwdObjBuilder(mcastIp, assignedVlan, |
| 1247 | newNextObj.id()).add(context); |
| 1248 | if (!srManager.deviceConfiguration().isConfigured(deviceId)) { |
| 1249 | log.debug("skip next and forward flowobjective addition for device: {}", deviceId); |
| 1250 | } else { |
| 1251 | srManager.flowObjectiveService.next(deviceId, newNextObj); |
| 1252 | srManager.flowObjectiveService.forward(deviceId, fwdObj); |
| 1253 | } |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1254 | } else { |
| 1255 | // This device already serves some subscribers of this mcast group |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1256 | NextObjective nextObj = mcastNextObjStore.get(mcastStoreKey).value(); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1257 | // Stop if the port is already in the nextobj |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1258 | Set<PortNumber> existingPorts = mcastUtils.getPorts(nextObj.next()); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1259 | if (existingPorts.contains(port)) { |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 1260 | log.debug("Port {}/{} already exists for {}. Abort", deviceId, port, mcastIp); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1261 | return; |
| 1262 | } |
Pier Luigi | 4f0dd21 | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1263 | // Let's add the port and reuse the previous one |
Yuta HIGUCHI | bef07b5 | 2018-02-09 18:05:23 -0800 | [diff] [blame] | 1264 | portBuilder.addAll(existingPorts).add(port); |
Pier Luigi | 4f0dd21 | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1265 | // Reuse previous nextObj |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1266 | newNextObj = mcastUtils.nextObjBuilder(mcastIp, assignedVlan, |
Pier Luigi | 4f0dd21 | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1267 | portBuilder.build(), nextObj.id()).addToExisting(); |
| 1268 | // Store the final next objective and send only the difference to the driver |
| 1269 | mcastNextObjStore.put(mcastStoreKey, newNextObj); |
| 1270 | // Add just the new port |
| 1271 | portBuilder = ImmutableSet.builder(); |
| 1272 | portBuilder.add(port); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1273 | newNextObj = mcastUtils.nextObjBuilder(mcastIp, assignedVlan, |
Pier Luigi | 4f0dd21 | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1274 | portBuilder.build(), nextObj.id()).addToExisting(); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1275 | if (!srManager.deviceConfiguration().isConfigured(deviceId)) { |
| 1276 | log.debug("skip next flowobjective update for device: {}", deviceId); |
| 1277 | } else { |
| 1278 | // no need to update the flow here since we have updated the nextobjective/group |
| 1279 | // the existing flow will keep pointing to the updated nextobj |
| 1280 | srManager.flowObjectiveService.next(deviceId, newNextObj); |
| 1281 | } |
Vignesh Ethiraj | 7579012 | 2019-08-26 12:18:42 +0000 | [diff] [blame] | 1282 | } |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1283 | } |
| 1284 | |
| 1285 | /** |
| 1286 | * Removes a port from given multicast group on given device. |
| 1287 | * This involves the update of L3 multicast group and multicast routing |
| 1288 | * table entry. |
| 1289 | * |
| 1290 | * @param deviceId device ID |
| 1291 | * @param port port to be added |
| 1292 | * @param mcastIp multicast group |
| 1293 | * @param assignedVlan assigned VLAN ID |
| 1294 | * @return true if this is the last sink on this device |
| 1295 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1296 | private boolean removePortFromDevice(DeviceId deviceId, PortNumber port, |
| 1297 | IpAddress mcastIp, VlanId assignedVlan) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1298 | // TODO trace |
| 1299 | log.info("Removing {} on {}/{} and vlan {}", mcastIp, deviceId, port, assignedVlan); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1300 | McastStoreKey mcastStoreKey = |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1301 | new McastStoreKey(mcastIp, deviceId, assignedVlan); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1302 | // This device is not serving this multicast group |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1303 | if (!mcastNextObjStore.containsKey(mcastStoreKey)) { |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1304 | return true; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1305 | } |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1306 | NextObjective nextObj = mcastNextObjStore.get(mcastStoreKey).value(); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1307 | Set<PortNumber> existingPorts = mcastUtils.getPorts(nextObj.next()); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1308 | // This port does not serve this multicast group |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1309 | if (!existingPorts.contains(port)) { |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1310 | if (!existingPorts.isEmpty()) { |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 1311 | log.debug("{} is not serving {} on port {}. Abort.", deviceId, mcastIp, port); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1312 | return false; |
| 1313 | } |
| 1314 | return true; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1315 | } |
| 1316 | // Copy and modify the ImmutableSet |
| 1317 | existingPorts = Sets.newHashSet(existingPorts); |
| 1318 | existingPorts.remove(port); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1319 | NextObjective newNextObj; |
Pier Luigi | 8cd46de | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1320 | ObjectiveContext context; |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1321 | ForwardingObjective fwdObj; |
| 1322 | if (existingPorts.isEmpty()) { |
Pier Luigi | 8cd46de | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1323 | context = new DefaultObjectiveContext( |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1324 | (objective) -> log.debug("Successfully remove {} on {}/{}, vlan {}", |
| 1325 | mcastIp, deviceId, port.toLong(), assignedVlan), |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1326 | (objective, error) -> log.warn("Failed to remove {} on {}/{}, vlan {}: {}", |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1327 | mcastIp, deviceId, port.toLong(), assignedVlan, error)); |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1328 | fwdObj = mcastUtils.fwdObjBuilder(mcastIp, assignedVlan, nextObj.id()).remove(context); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1329 | if (!srManager.deviceConfiguration().isConfigured(deviceId)) { |
| 1330 | log.debug("skip forward flowobjective removal for device: {}", deviceId); |
| 1331 | } else { |
| 1332 | srManager.flowObjectiveService.forward(deviceId, fwdObj); |
| 1333 | } |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1334 | mcastNextObjStore.remove(mcastStoreKey); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1335 | } else { |
Pier Luigi | 8cd46de | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1336 | // Here we store the next objective with the remaining port |
Pier | 7b65716 | 2018-03-27 11:29:42 -0700 | [diff] [blame] | 1337 | newNextObj = mcastUtils.nextObjBuilder(mcastIp, assignedVlan, |
Pier Luigi | 8cd46de | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1338 | existingPorts, nextObj.id()).removeFromExisting(); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1339 | mcastNextObjStore.put(mcastStoreKey, newNextObj); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1340 | // Let's modify the next objective removing the bucket |
| 1341 | newNextObj = mcastUtils.nextObjBuilder(mcastIp, assignedVlan, |
Pier Luigi | 8cd46de | 2018-01-19 10:24:53 +0100 | [diff] [blame] | 1342 | ImmutableSet.of(port), nextObj.id()).removeFromExisting(); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1343 | if (!srManager.deviceConfiguration().isConfigured(deviceId)) { |
| 1344 | log.debug("skip next flowobjective update for device: {}", deviceId); |
| 1345 | } else { |
| 1346 | // no need to update the flow here since we have updated the next objective + group |
| 1347 | // the existing flow will keep pointing to the updated nextobj |
| 1348 | srManager.flowObjectiveService.next(deviceId, newNextObj); |
| 1349 | } |
Vignesh Ethiraj | 7579012 | 2019-08-26 12:18:42 +0000 | [diff] [blame] | 1350 | } |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1351 | return existingPorts.isEmpty(); |
| 1352 | } |
| 1353 | |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1354 | /** |
| 1355 | * Removes entire group on given device. |
| 1356 | * |
| 1357 | * @param deviceId device ID |
| 1358 | * @param mcastIp multicast group to be removed |
| 1359 | * @param assignedVlan assigned VLAN ID |
| 1360 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1361 | private void removeGroupFromDevice(DeviceId deviceId, IpAddress mcastIp, |
| 1362 | VlanId assignedVlan) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1363 | // TODO trace |
| 1364 | log.info("Removing {} on {} and vlan {}", mcastIp, deviceId, assignedVlan); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1365 | McastStoreKey mcastStoreKey = new McastStoreKey(mcastIp, deviceId, assignedVlan); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1366 | // This device is not serving this multicast group |
| 1367 | if (!mcastNextObjStore.containsKey(mcastStoreKey)) { |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 1368 | log.debug("{} is not serving {}. Abort.", deviceId, mcastIp); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1369 | return; |
| 1370 | } |
| 1371 | NextObjective nextObj = mcastNextObjStore.get(mcastStoreKey).value(); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1372 | ObjectiveContext context = new DefaultObjectiveContext( |
| 1373 | (objective) -> log.debug("Successfully remove {} on {}, vlan {}", |
| 1374 | mcastIp, deviceId, assignedVlan), |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1375 | (objective, error) -> log.warn("Failed to remove {} on {}, vlan {}: {}", |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1376 | mcastIp, deviceId, assignedVlan, error)); |
Vignesh Ethiraj | 7579012 | 2019-08-26 12:18:42 +0000 | [diff] [blame] | 1377 | if (!srManager.deviceConfiguration().isConfigured(deviceId)) { |
| 1378 | log.debug("skip flow changes on unconfigured device: {}", deviceId); |
| 1379 | } else { |
| 1380 | ForwardingObjective fwdObj = mcastUtils.fwdObjBuilder(mcastIp, assignedVlan, nextObj.id()).remove(context); |
| 1381 | srManager.flowObjectiveService.forward(deviceId, fwdObj); |
| 1382 | } |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1383 | mcastNextObjStore.remove(mcastStoreKey); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1384 | } |
| 1385 | |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1386 | private void installPath(IpAddress mcastIp, ConnectPoint source, List<Link> links) { |
kezhiyong | 168fbba | 2018-12-03 16:14:29 +0800 | [diff] [blame] | 1387 | if (links.isEmpty()) { |
| 1388 | log.warn("There is no link that can be used. Stopping installation."); |
| 1389 | return; |
| 1390 | } |
Pier | 1a7e0c0 | 2018-03-12 15:00:54 -0700 | [diff] [blame] | 1391 | // Setup new ingress mcast role |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1392 | mcastRoleStore.put(new McastRoleStoreKey(mcastIp, links.get(0).src().deviceId(), source), |
Pier | 1a7e0c0 | 2018-03-12 15:00:54 -0700 | [diff] [blame] | 1393 | INGRESS); |
Pier Luigi | 580fd8a | 2018-01-16 10:47:50 +0100 | [diff] [blame] | 1394 | // For each link, modify the next on the source device adding the src port |
| 1395 | // and a new filter objective on the destination port |
| 1396 | links.forEach(link -> { |
| 1397 | addPortToDevice(link.src().deviceId(), link.src().port(), mcastIp, |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1398 | mcastUtils.assignedVlan(link.src().deviceId().equals(source.deviceId()) ? source : null)); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1399 | McastFilteringObjStoreKey mcastFilterObjStoreKey = new McastFilteringObjStoreKey(link.dst(), |
| 1400 | mcastUtils.assignedVlan(null), mcastIp.isIp4()); |
| 1401 | addFilterToDevice(mcastFilterObjStoreKey, mcastIp, null); |
Pier Luigi | 580fd8a | 2018-01-16 10:47:50 +0100 | [diff] [blame] | 1402 | }); |
Pier | 1a7e0c0 | 2018-03-12 15:00:54 -0700 | [diff] [blame] | 1403 | // Setup mcast role for the transit |
| 1404 | links.stream() |
| 1405 | .filter(link -> !link.src().deviceId().equals(source.deviceId())) |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1406 | .forEach(link -> mcastRoleStore.put(new McastRoleStoreKey(mcastIp, link.src().deviceId(), source), |
Pier | 1a7e0c0 | 2018-03-12 15:00:54 -0700 | [diff] [blame] | 1407 | TRANSIT)); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1408 | } |
| 1409 | |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1410 | /** |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1411 | * Gets a path from src to dst. |
| 1412 | * If a path was allocated before, returns the allocated path. |
| 1413 | * Otherwise, randomly pick one from available paths. |
| 1414 | * |
| 1415 | * @param src source device ID |
| 1416 | * @param dst destination device ID |
| 1417 | * @param mcastIp multicast group |
Pier | 1f87aca | 2018-03-14 16:47:32 -0700 | [diff] [blame] | 1418 | * @param allPaths paths list |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1419 | * |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1420 | * @return an optional path from src to dst |
| 1421 | */ |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1422 | private Optional<Path> getPath(DeviceId src, DeviceId dst, |
| 1423 | IpAddress mcastIp, List<Path> allPaths) { |
Pier | 1f87aca | 2018-03-14 16:47:32 -0700 | [diff] [blame] | 1424 | if (allPaths == null) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1425 | allPaths = mcastUtils.getPaths(src, dst, Collections.emptySet()); |
Pier | 1f87aca | 2018-03-14 16:47:32 -0700 | [diff] [blame] | 1426 | } |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1427 | if (allPaths.isEmpty()) { |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1428 | return Optional.empty(); |
| 1429 | } |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1430 | // Create a map index of suitability-to-list of paths. For example |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1431 | // a path in the list associated to the index 1 shares only one link |
| 1432 | // and it is less suitable of a path belonging to the index 2 |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1433 | Map<Integer, List<Path>> eligiblePaths = Maps.newHashMap(); |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1434 | int score; |
| 1435 | // Let's build the multicast tree |
| 1436 | Set<List<Link>> storedPaths = getStoredPaths(mcastIp); |
| 1437 | Set<Link> storedTree = storedPaths.stream() |
| 1438 | .flatMap(Collection::stream).collect(Collectors.toSet()); |
| 1439 | log.trace("Stored tree {}", storedTree); |
| 1440 | Set<Link> pathLinks; |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1441 | for (Path path : allPaths) { |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1442 | if (!src.equals(path.links().get(0).src().deviceId())) { |
| 1443 | continue; |
| 1444 | } |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1445 | pathLinks = Sets.newHashSet(path.links()); |
| 1446 | score = Sets.intersection(pathLinks, storedTree).size(); |
| 1447 | // score defines the index |
| 1448 | if (score > 0) { |
| 1449 | eligiblePaths.compute(score, (index, paths) -> { |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1450 | paths = paths == null ? Lists.newArrayList() : paths; |
| 1451 | paths.add(path); |
| 1452 | return paths; |
| 1453 | }); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1454 | } |
| 1455 | } |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1456 | if (eligiblePaths.isEmpty()) { |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 1457 | log.trace("No eligiblePath(s) found from {} to {}", src, dst); |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1458 | Collections.shuffle(allPaths); |
| 1459 | return allPaths.stream().findFirst(); |
| 1460 | } |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1461 | // Let's take the best ones |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1462 | Integer bestIndex = eligiblePaths.keySet().stream() |
| 1463 | .sorted(Comparator.reverseOrder()).findFirst().orElse(null); |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1464 | List<Path> bestPaths = eligiblePaths.get(bestIndex); |
pier | eaddb18 | 2020-02-03 13:50:53 +0100 | [diff] [blame] | 1465 | log.trace("{} eligiblePath(s) found from {} to {}", |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1466 | bestPaths.size(), src, dst); |
Pier Luigi | 91573e1 | 2018-01-23 16:06:38 +0100 | [diff] [blame] | 1467 | Collections.shuffle(bestPaths); |
| 1468 | return bestPaths.stream().findFirst(); |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 1469 | } |
| 1470 | |
| 1471 | /** |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1472 | * Gets stored paths of the group. |
| 1473 | * |
| 1474 | * @param mcastIp group address |
| 1475 | * @return a collection of paths |
| 1476 | */ |
| 1477 | private Set<List<Link>> getStoredPaths(IpAddress mcastIp) { |
| 1478 | return mcastPathStore.stream() |
| 1479 | .filter(entry -> entry.getKey().mcastIp().equals(mcastIp)) |
| 1480 | .map(Entry::getValue) |
| 1481 | .collect(Collectors.toSet()); |
| 1482 | } |
| 1483 | |
| 1484 | /** |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1485 | * Gets device(s) of given role and of given source in given multicast tree. |
| 1486 | * |
| 1487 | * @param mcastIp multicast IP |
| 1488 | * @param role multicast role |
| 1489 | * @param source source connect point |
| 1490 | * @return set of device ID or empty set if not found |
| 1491 | */ |
| 1492 | private Set<DeviceId> getDevice(IpAddress mcastIp, McastRole role, ConnectPoint source) { |
| 1493 | return mcastRoleStore.entrySet().stream() |
| 1494 | .filter(entry -> entry.getKey().mcastIp().equals(mcastIp) && |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1495 | entry.getKey().source().equals(source) && |
| 1496 | entry.getValue().value() == role) |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1497 | .map(Entry::getKey).map(McastRoleStoreKey::deviceId).collect(Collectors.toSet()); |
| 1498 | } |
| 1499 | |
| 1500 | /** |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1501 | * Gets device(s) of given role in given multicast group. |
| 1502 | * |
| 1503 | * @param mcastIp multicast IP |
| 1504 | * @param role multicast role |
| 1505 | * @return set of device ID or empty set if not found |
| 1506 | */ |
| 1507 | private Set<DeviceId> getDevice(IpAddress mcastIp, McastRole role) { |
| 1508 | return mcastRoleStore.entrySet().stream() |
| 1509 | .filter(entry -> entry.getKey().mcastIp().equals(mcastIp) && |
| 1510 | entry.getValue().value() == role) |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1511 | .map(Entry::getKey).map(McastRoleStoreKey::deviceId).collect(Collectors.toSet()); |
| 1512 | } |
| 1513 | |
| 1514 | /** |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1515 | * Gets source(s) of given multicast group. |
| 1516 | * |
| 1517 | * @param mcastIp multicast IP |
| 1518 | * @return set of device ID or empty set if not found |
| 1519 | */ |
| 1520 | private Set<ConnectPoint> getSources(IpAddress mcastIp) { |
| 1521 | return mcastRoleStore.entrySet().stream() |
| 1522 | .filter(entry -> entry.getKey().mcastIp().equals(mcastIp)) |
| 1523 | .map(Entry::getKey).map(McastRoleStoreKey::source).collect(Collectors.toSet()); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1524 | } |
| 1525 | |
| 1526 | /** |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1527 | * Gets sink(s) of given multicast group. |
| 1528 | * |
| 1529 | * @param mcastIp multicast IP |
| 1530 | * @return set of connect point or empty set if not found |
| 1531 | */ |
| 1532 | private Set<ConnectPoint> getSinks(IpAddress mcastIp, DeviceId device, ConnectPoint source) { |
| 1533 | McastPathStoreKey pathStoreKey = new McastPathStoreKey(mcastIp, source); |
| 1534 | Collection<? extends List<Link>> storedPaths = Versioned.valueOrElse( |
| 1535 | mcastPathStore.get(pathStoreKey), Lists.newArrayList()); |
| 1536 | VlanId assignedVlan = mcastUtils.assignedVlan(device.equals(source.deviceId()) ? source : null); |
| 1537 | McastStoreKey mcastStoreKey = new McastStoreKey(mcastIp, device, assignedVlan); |
| 1538 | NextObjective nextObjective = Versioned.valueOrNull(mcastNextObjStore.get(mcastStoreKey)); |
| 1539 | ImmutableSet.Builder<ConnectPoint> cpBuilder = ImmutableSet.builder(); |
| 1540 | if (nextObjective != null) { |
| 1541 | Set<PortNumber> outputPorts = mcastUtils.getPorts(nextObjective.next()); |
| 1542 | outputPorts.forEach(portNumber -> cpBuilder.add(new ConnectPoint(device, portNumber))); |
| 1543 | } |
| 1544 | Set<ConnectPoint> egressCp = cpBuilder.build(); |
| 1545 | return egressCp.stream() |
| 1546 | .filter(connectPoint -> !mcastUtils.isInfraPort(connectPoint, storedPaths)) |
| 1547 | .collect(Collectors.toSet()); |
| 1548 | } |
| 1549 | |
| 1550 | |
| 1551 | |
| 1552 | /** |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1553 | * Gets groups which is affected by the link down event. |
| 1554 | * |
| 1555 | * @param link link going down |
| 1556 | * @return a set of multicast IpAddress |
| 1557 | */ |
| 1558 | private Set<IpAddress> getAffectedGroups(Link link) { |
| 1559 | DeviceId deviceId = link.src().deviceId(); |
| 1560 | PortNumber port = link.src().port(); |
| 1561 | return mcastNextObjStore.entrySet().stream() |
| 1562 | .filter(entry -> entry.getKey().deviceId().equals(deviceId) && |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1563 | mcastUtils.getPorts(entry.getValue().value().next()).contains(port)) |
| 1564 | .map(Entry::getKey).map(McastStoreKey::mcastIp).collect(Collectors.toSet()); |
Charles Chan | 7277950 | 2016-04-23 17:36:10 -0700 | [diff] [blame] | 1565 | } |
| 1566 | |
| 1567 | /** |
Pier Luigi | 580fd8a | 2018-01-16 10:47:50 +0100 | [diff] [blame] | 1568 | * Gets groups which are affected by the device down event. |
| 1569 | * |
| 1570 | * @param deviceId device going down |
| 1571 | * @return a set of multicast IpAddress |
| 1572 | */ |
| 1573 | private Set<IpAddress> getAffectedGroups(DeviceId deviceId) { |
| 1574 | return mcastNextObjStore.entrySet().stream() |
| 1575 | .filter(entry -> entry.getKey().deviceId().equals(deviceId)) |
Pier | 1f87aca | 2018-03-14 16:47:32 -0700 | [diff] [blame] | 1576 | .map(Entry::getKey).map(McastStoreKey::mcastIp) |
Pier Luigi | 580fd8a | 2018-01-16 10:47:50 +0100 | [diff] [blame] | 1577 | .collect(Collectors.toSet()); |
| 1578 | } |
| 1579 | |
| 1580 | /** |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1581 | * Verify if a given connect point is sink for this group. |
| 1582 | * |
| 1583 | * @param mcastIp group address |
| 1584 | * @param connectPoint connect point to be verified |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1585 | * @param source source connect point |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1586 | * @return true if the connect point is sink of the group |
| 1587 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1588 | private boolean isSinkForGroup(IpAddress mcastIp, ConnectPoint connectPoint, |
| 1589 | ConnectPoint source) { |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1590 | VlanId assignedVlan = mcastUtils.assignedVlan(connectPoint.deviceId().equals(source.deviceId()) ? |
| 1591 | source : null); |
| 1592 | McastStoreKey mcastStoreKey = new McastStoreKey(mcastIp, connectPoint.deviceId(), assignedVlan); |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1593 | if (!mcastNextObjStore.containsKey(mcastStoreKey)) { |
| 1594 | return false; |
| 1595 | } |
Pier | 2816468 | 2018-04-17 15:50:43 +0200 | [diff] [blame] | 1596 | NextObjective mcastNext = mcastNextObjStore.get(mcastStoreKey).value(); |
| 1597 | return mcastUtils.getPorts(mcastNext.next()).contains(connectPoint.port()); |
| 1598 | } |
| 1599 | |
| 1600 | /** |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1601 | * Verify if a given connect point is sink for this group and for this source. |
| 1602 | * |
| 1603 | * @param mcastIp group address |
| 1604 | * @param connectPoint connect point to be verified |
| 1605 | * @param source source connect point |
| 1606 | * @return true if the connect point is sink of the group |
| 1607 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1608 | private boolean isSinkForSource(IpAddress mcastIp, ConnectPoint connectPoint, |
| 1609 | ConnectPoint source) { |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1610 | boolean isSink = isSinkForGroup(mcastIp, connectPoint, source); |
| 1611 | DeviceId device; |
| 1612 | if (connectPoint.deviceId().equals(source.deviceId())) { |
| 1613 | device = getDevice(mcastIp, INGRESS, source).stream() |
| 1614 | .filter(deviceId -> deviceId.equals(connectPoint.deviceId())) |
| 1615 | .findFirst().orElse(null); |
| 1616 | } else { |
| 1617 | device = getDevice(mcastIp, EGRESS, source).stream() |
| 1618 | .filter(deviceId -> deviceId.equals(connectPoint.deviceId())) |
| 1619 | .findFirst().orElse(null); |
| 1620 | } |
| 1621 | return isSink && device != null; |
| 1622 | } |
| 1623 | |
| 1624 | /** |
| 1625 | * Verify if a sink is reachable from this source. |
| 1626 | * |
| 1627 | * @param mcastIp group address |
| 1628 | * @param sink connect point to be verified |
| 1629 | * @param source source connect point |
| 1630 | * @return true if the connect point is reachable from the source |
| 1631 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1632 | private boolean isSinkReachable(IpAddress mcastIp, ConnectPoint sink, |
| 1633 | ConnectPoint source) { |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1634 | return sink.deviceId().equals(source.deviceId()) || |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1635 | getPath(source.deviceId(), sink.deviceId(), mcastIp, null).isPresent(); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1636 | } |
| 1637 | |
| 1638 | /** |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1639 | * Updates filtering objective for given device and port. |
| 1640 | * It is called in general when the mcast config has been |
| 1641 | * changed. |
Jonghwan Hyun | e5ef762 | 2017-08-25 17:48:36 -0700 | [diff] [blame] | 1642 | * |
| 1643 | * @param deviceId device ID |
| 1644 | * @param portNum ingress port number |
| 1645 | * @param vlanId assigned VLAN ID |
| 1646 | * @param install true to add, false to remove |
| 1647 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1648 | public void updateFilterToDevice(DeviceId deviceId, PortNumber portNum, |
| 1649 | VlanId vlanId, boolean install) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 1650 | mcastWorker.execute(() -> updateFilterToDeviceInternal(deviceId, portNum, vlanId, install)); |
| 1651 | } |
| 1652 | |
| 1653 | private void updateFilterToDeviceInternal(DeviceId deviceId, PortNumber portNum, |
| 1654 | VlanId vlanId, boolean install) { |
| 1655 | lastMcastChange.set(Instant.now()); |
| 1656 | // Iterates over the route and updates properly the filtering objective on the source device. |
| 1657 | srManager.multicastRouteService.getRoutes().forEach(mcastRoute -> { |
| 1658 | log.debug("Update filter for {}", mcastRoute.group()); |
| 1659 | if (!mcastUtils.isLeader(mcastRoute.group())) { |
| 1660 | log.debug("Skip {} due to lack of leadership", mcastRoute.group()); |
| 1661 | return; |
| 1662 | } |
| 1663 | // Get the sources and for each one update properly the filtering objectives |
| 1664 | Set<ConnectPoint> sources = srManager.multicastRouteService.sources(mcastRoute); |
| 1665 | sources.forEach(source -> { |
| 1666 | if (source.deviceId().equals(deviceId) && source.port().equals(portNum)) { |
| 1667 | if (install) { |
| 1668 | McastFilteringObjStoreKey mcastFilterObjStoreKey = new McastFilteringObjStoreKey(source, |
| 1669 | vlanId, mcastRoute.group().isIp4()); |
| 1670 | addFilterToDevice(mcastFilterObjStoreKey, mcastRoute.group(), INGRESS); |
| 1671 | } else { |
| 1672 | mcastUtils.removeFilterToDevice(deviceId, portNum, vlanId, mcastRoute.group(), null); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1673 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 1674 | } |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1675 | }); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 1676 | }); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1677 | } |
| 1678 | |
| 1679 | /** |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1680 | * Add filtering to the device if needed. |
| 1681 | * |
| 1682 | * @param filterObjStoreKey the filtering obj key |
| 1683 | * @param mcastIp the multicast group |
| 1684 | * @param mcastRole the multicast role |
| 1685 | */ |
| 1686 | private void addFilterToDevice(McastFilteringObjStoreKey filterObjStoreKey, |
| 1687 | IpAddress mcastIp, |
| 1688 | McastRole mcastRole) { |
| 1689 | if (!containsFilterInTheDevice(filterObjStoreKey)) { |
| 1690 | // if this is the first sink for this group/device |
| 1691 | // match additionally on mac |
| 1692 | log.debug("Filtering not available for device {}, vlan {} and {}", |
| 1693 | filterObjStoreKey.ingressCP().deviceId(), filterObjStoreKey.vlanId(), |
| 1694 | filterObjStoreKey.isIpv4() ? "IPv4" : "IPv6"); |
| 1695 | mcastUtils.addFilterToDevice(filterObjStoreKey.ingressCP().deviceId(), |
| 1696 | filterObjStoreKey.ingressCP().port(), |
| 1697 | filterObjStoreKey.vlanId(), mcastIp, |
| 1698 | mcastRole, true); |
| 1699 | mcastFilteringObjStore.add(filterObjStoreKey); |
| 1700 | } else if (!mcastFilteringObjStore.contains(filterObjStoreKey)) { |
| 1701 | // match only vlan |
| 1702 | log.debug("Filtering not available for connect point {}, vlan {} and {}", |
| 1703 | filterObjStoreKey.ingressCP(), filterObjStoreKey.vlanId(), |
| 1704 | filterObjStoreKey.isIpv4() ? "IPv4" : "IPv6"); |
| 1705 | mcastUtils.addFilterToDevice(filterObjStoreKey.ingressCP().deviceId(), |
| 1706 | filterObjStoreKey.ingressCP().port(), |
| 1707 | filterObjStoreKey.vlanId(), mcastIp, |
| 1708 | mcastRole, false); |
| 1709 | mcastFilteringObjStore.add(filterObjStoreKey); |
| 1710 | } else { |
| 1711 | // do nothing |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1712 | log.debug("Filtering already present for connect point {}, vlan {} and {}. Abort", |
| 1713 | filterObjStoreKey.ingressCP(), filterObjStoreKey.vlanId(), |
| 1714 | filterObjStoreKey.isIpv4() ? "IPv4" : "IPv6"); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1715 | } |
| 1716 | } |
| 1717 | |
| 1718 | /** |
| 1719 | * Verify if there are related filtering obj in the device. |
| 1720 | * |
| 1721 | * @param filteringKey the filtering obj key |
| 1722 | * @return true if related filtering obj are found |
| 1723 | */ |
| 1724 | private boolean containsFilterInTheDevice(McastFilteringObjStoreKey filteringKey) { |
| 1725 | // check if filters are already added on the device |
| 1726 | McastFilteringObjStoreKey key = mcastFilteringObjStore.stream() |
| 1727 | .filter(mcastFilteringKey -> |
| 1728 | mcastFilteringKey.ingressCP().deviceId().equals(filteringKey.ingressCP().deviceId()) |
| 1729 | && mcastFilteringKey.isIpv4() == filteringKey.isIpv4() |
| 1730 | && mcastFilteringKey.vlanId().equals(filteringKey.vlanId()) |
| 1731 | ).findFirst().orElse(null); |
| 1732 | // we are interested to filt obj on the same device, same vlan and same ip type |
| 1733 | return key != null; |
| 1734 | } |
| 1735 | |
| 1736 | /** |
| 1737 | * Update the filtering objective store upon device failure. |
| 1738 | * |
| 1739 | * @param affectedDevice the affected device |
| 1740 | */ |
| 1741 | private void updateFilterObjStoreByDevice(DeviceId affectedDevice) { |
| 1742 | // purge the related filter objective key |
| 1743 | Set<McastFilteringObjStoreKey> filterObjs = Sets.newHashSet(mcastFilteringObjStore); |
| 1744 | Iterator<McastFilteringObjStoreKey> filterIterator = filterObjs.iterator(); |
| 1745 | McastFilteringObjStoreKey filterKey; |
| 1746 | while (filterIterator.hasNext()) { |
| 1747 | filterKey = filterIterator.next(); |
| 1748 | if (filterKey.ingressCP().deviceId().equals(affectedDevice)) { |
| 1749 | mcastFilteringObjStore.remove(filterKey); |
| 1750 | } |
| 1751 | } |
| 1752 | } |
| 1753 | |
| 1754 | /** |
| 1755 | * Update the filtering objective store upon port failure. |
| 1756 | * |
| 1757 | * @param affectedPort the affected port |
| 1758 | */ |
| 1759 | private void updateFilterObjStoreByPort(ConnectPoint affectedPort) { |
| 1760 | // purge the related filter objective key |
| 1761 | Set<McastFilteringObjStoreKey> filterObjs = Sets.newHashSet(mcastFilteringObjStore); |
| 1762 | Iterator<McastFilteringObjStoreKey> filterIterator = filterObjs.iterator(); |
| 1763 | McastFilteringObjStoreKey filterKey; |
| 1764 | while (filterIterator.hasNext()) { |
| 1765 | filterKey = filterIterator.next(); |
| 1766 | if (filterKey.ingressCP().equals(affectedPort)) { |
| 1767 | mcastFilteringObjStore.remove(filterKey); |
| 1768 | } |
| 1769 | } |
| 1770 | } |
| 1771 | |
| 1772 | /** |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1773 | * Performs bucket verification operation for all mcast groups in the devices. |
| 1774 | * Firstly, it verifies that mcast is stable before trying verification operation. |
| 1775 | * Verification consists in creating new nexts with VERIFY operation. Actually, |
| 1776 | * the operation is totally delegated to the driver. |
| 1777 | */ |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1778 | private final class McastBucketCorrector implements Runnable { |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1779 | private final AtomicInteger verifyOnFlight = new AtomicInteger(0); |
| 1780 | // Define the context used for the back pressure mechanism |
| 1781 | private final ObjectiveContext context = new DefaultObjectiveContext( |
| 1782 | (objective) -> { |
| 1783 | synchronized (verifyOnFlight) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 1784 | log.trace("Verify {} done", objective.id()); |
| 1785 | verifyOnFlight.updateAndGet(i -> i > 0 ? i - 1 : i); |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1786 | verifyOnFlight.notify(); |
| 1787 | } |
| 1788 | }, |
| 1789 | (objective, error) -> { |
| 1790 | synchronized (verifyOnFlight) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 1791 | log.trace("Verify {} error {}", objective.id(), error); |
| 1792 | verifyOnFlight.updateAndGet(i -> i > 0 ? i - 1 : i); |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1793 | verifyOnFlight.notify(); |
| 1794 | } |
| 1795 | }); |
| 1796 | |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1797 | @Override |
| 1798 | public void run() { |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1799 | try { |
| 1800 | // Iterates over the routes and verify the related next objectives |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1801 | for (McastRoute mcastRoute : srManager.multicastRouteService.getRoutes()) { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 1802 | if (!isMcastStable() || wasBktCorrRunning()) { |
| 1803 | return; |
| 1804 | } |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1805 | IpAddress mcastIp = mcastRoute.group(); |
| 1806 | log.trace("Running mcast buckets corrector for mcast group: {}", mcastIp); |
| 1807 | // Verify leadership on the operation |
| 1808 | if (!mcastUtils.isLeader(mcastIp)) { |
| 1809 | log.trace("Skip {} due to lack of leadership", mcastIp); |
| 1810 | continue; |
| 1811 | } |
| 1812 | // Get sources and sinks from Mcast Route Service and warn about errors |
| 1813 | Set<ConnectPoint> sources = mcastUtils.getSources(mcastIp); |
| 1814 | Set<ConnectPoint> sinks = mcastUtils.getSinks(mcastIp).values().stream() |
| 1815 | .flatMap(Collection::stream).collect(Collectors.toSet()); |
| 1816 | // Do not proceed if sources of this group are missing |
| 1817 | if (sources.isEmpty()) { |
| 1818 | if (!sinks.isEmpty()) { |
| 1819 | log.warn("Unable to run buckets corrector. " + |
| 1820 | "Missing source {} for group {}", sources, mcastIp); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1821 | } |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1822 | continue; |
| 1823 | } |
| 1824 | // For each group we get current information in the store |
| 1825 | // and issue a check of the next objectives in place |
| 1826 | Set<McastStoreKey> processedKeys = Sets.newHashSet(); |
| 1827 | for (ConnectPoint source : sources) { |
| 1828 | Set<DeviceId> ingressDevices = getDevice(mcastIp, INGRESS, source); |
| 1829 | Set<DeviceId> transitDevices = getDevice(mcastIp, TRANSIT, source); |
| 1830 | Set<DeviceId> egressDevices = getDevice(mcastIp, EGRESS, source); |
| 1831 | // Do not proceed if ingress devices are missing |
| 1832 | if (ingressDevices.isEmpty()) { |
Pier Luigi | 92e69be | 2018-03-02 12:53:37 +0100 | [diff] [blame] | 1833 | if (!sinks.isEmpty()) { |
| 1834 | log.warn("Unable to run buckets corrector. " + |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1835 | "Missing ingress {} for source {} and for group {}", |
| 1836 | ingressDevices, source, mcastIp); |
Pier Luigi | 92e69be | 2018-03-02 12:53:37 +0100 | [diff] [blame] | 1837 | } |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1838 | continue; |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1839 | } |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1840 | // Create the set of the devices to be processed |
| 1841 | ImmutableSet.Builder<DeviceId> devicesBuilder = ImmutableSet.builder(); |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 1842 | devicesBuilder.addAll(ingressDevices); |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1843 | if (!transitDevices.isEmpty()) { |
| 1844 | devicesBuilder.addAll(transitDevices); |
| 1845 | } |
| 1846 | if (!egressDevices.isEmpty()) { |
| 1847 | devicesBuilder.addAll(egressDevices); |
| 1848 | } |
| 1849 | Set<DeviceId> devicesToProcess = devicesBuilder.build(); |
| 1850 | for (DeviceId deviceId : devicesToProcess) { |
| 1851 | if (!srManager.deviceConfiguration().isConfigured(deviceId)) { |
| 1852 | log.trace("Skipping Bucket corrector for unconfigured device {}", deviceId); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 1853 | continue; |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1854 | } |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1855 | synchronized (verifyOnFlight) { |
| 1856 | while (verifyOnFlight.get() == MAX_VERIFY_ON_FLIGHT) { |
| 1857 | verifyOnFlight.wait(); |
Vignesh Ethiraj | 7579012 | 2019-08-26 12:18:42 +0000 | [diff] [blame] | 1858 | } |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1859 | } |
| 1860 | VlanId assignedVlan = mcastUtils.assignedVlan(deviceId.equals(source.deviceId()) ? |
| 1861 | source : null); |
| 1862 | McastStoreKey currentKey = new McastStoreKey(mcastIp, deviceId, assignedVlan); |
| 1863 | // Check if we already processed this next - trees merge at some point |
| 1864 | if (processedKeys.contains(currentKey)) { |
| 1865 | continue; |
| 1866 | } |
| 1867 | // Verify the nextobjective or skip to next device |
| 1868 | if (mcastNextObjStore.containsKey(currentKey)) { |
| 1869 | NextObjective currentNext = mcastNextObjStore.get(currentKey).value(); |
| 1870 | // Rebuild the next objective using assigned vlan |
| 1871 | currentNext = mcastUtils.nextObjBuilder(mcastIp, assignedVlan, |
| 1872 | mcastUtils.getPorts(currentNext.next()), currentNext.id()).verify(context); |
| 1873 | // Send to the flowobjective service |
| 1874 | srManager.flowObjectiveService.next(deviceId, currentNext); |
| 1875 | verifyOnFlight.incrementAndGet(); |
| 1876 | log.trace("Verify on flight {}", verifyOnFlight); |
| 1877 | processedKeys.add(currentKey); |
| 1878 | } else { |
| 1879 | log.warn("Unable to run buckets corrector. " + |
| 1880 | "Missing next for {}, for source {} and for group {}", |
| 1881 | deviceId, source, mcastIp); |
| 1882 | } |
| 1883 | } |
| 1884 | } |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 1885 | // Let's wait the group before start the next one |
| 1886 | synchronized (verifyOnFlight) { |
| 1887 | while (verifyOnFlight.get() > 0) { |
| 1888 | verifyOnFlight.wait(); |
| 1889 | } |
| 1890 | } |
pier | c32ef42 | 2020-01-27 17:45:03 +0100 | [diff] [blame] | 1891 | } |
| 1892 | } catch (InterruptedException e) { |
| 1893 | log.warn("BktCorr has been interrupted"); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1894 | } finally { |
pier | 62e0b07 | 2019-12-23 19:21:49 +0100 | [diff] [blame] | 1895 | lastBktCorrExecution.set(Instant.now()); |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1896 | } |
Pier Luigi | 35dab3f | 2018-01-25 16:16:02 +0100 | [diff] [blame] | 1897 | } |
Jonghwan Hyun | e5ef762 | 2017-08-25 17:48:36 -0700 | [diff] [blame] | 1898 | } |
Pier Luigi | 0f9635b | 2018-01-15 18:06:43 +0100 | [diff] [blame] | 1899 | |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1900 | /** |
| 1901 | * Returns the associated next ids to the mcast groups or to the single |
| 1902 | * group if mcastIp is present. |
| 1903 | * |
| 1904 | * @param mcastIp the group ip |
| 1905 | * @return the mapping mcastIp-device to next id |
| 1906 | */ |
Charles Chan | 0b1dd7e | 2018-08-19 19:21:46 -0700 | [diff] [blame] | 1907 | public Map<McastStoreKey, Integer> getNextIds(IpAddress mcastIp) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1908 | log.info("mcastNexts {}", mcastNextObjStore.size()); |
Pier Luigi | 0f9635b | 2018-01-15 18:06:43 +0100 | [diff] [blame] | 1909 | if (mcastIp != null) { |
| 1910 | return mcastNextObjStore.entrySet().stream() |
| 1911 | .filter(mcastEntry -> mcastIp.equals(mcastEntry.getKey().mcastIp())) |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1912 | .collect(Collectors.toMap(Entry::getKey, entry -> entry.getValue().value().id())); |
Pier Luigi | 0f9635b | 2018-01-15 18:06:43 +0100 | [diff] [blame] | 1913 | } |
Pier Luigi | 0f9635b | 2018-01-15 18:06:43 +0100 | [diff] [blame] | 1914 | return mcastNextObjStore.entrySet().stream() |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1915 | .collect(Collectors.toMap(Entry::getKey, entry -> entry.getValue().value().id())); |
Pier Luigi | 0f9635b | 2018-01-15 18:06:43 +0100 | [diff] [blame] | 1916 | } |
| 1917 | |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 1918 | /** |
Charles Chan | 0b1dd7e | 2018-08-19 19:21:46 -0700 | [diff] [blame] | 1919 | * Removes given next ID from mcast next id store. |
| 1920 | * |
| 1921 | * @param nextId next id |
| 1922 | */ |
| 1923 | public void removeNextId(int nextId) { |
| 1924 | mcastNextObjStore.entrySet().forEach(e -> { |
| 1925 | if (e.getValue().value().id() == nextId) { |
| 1926 | mcastNextObjStore.remove(e.getKey()); |
| 1927 | } |
| 1928 | }); |
| 1929 | } |
| 1930 | |
| 1931 | /** |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1932 | * Build the mcast paths. |
| 1933 | * |
| 1934 | * @param storedPaths mcast tree |
| 1935 | * @param mcastIp the group ip |
| 1936 | * @param source the source |
| 1937 | */ |
| 1938 | private Map<ConnectPoint, List<ConnectPoint>> buildMcastPaths(Collection<? extends List<Link>> storedPaths, |
| 1939 | IpAddress mcastIp, ConnectPoint source) { |
| 1940 | Map<ConnectPoint, List<ConnectPoint>> mcastTree = Maps.newHashMap(); |
| 1941 | // Local sinks |
| 1942 | Set<ConnectPoint> localSinks = getSinks(mcastIp, source.deviceId(), source); |
| 1943 | localSinks.forEach(localSink -> mcastTree.put(localSink, Lists.newArrayList(localSink, source))); |
| 1944 | // Remote sinks |
| 1945 | storedPaths.forEach(path -> { |
| 1946 | List<Link> links = path; |
| 1947 | DeviceId egressDevice = links.get(links.size() - 1).dst().deviceId(); |
| 1948 | Set<ConnectPoint> remoteSinks = getSinks(mcastIp, egressDevice, source); |
| 1949 | List<ConnectPoint> connectPoints = Lists.newArrayList(source); |
| 1950 | links.forEach(link -> { |
| 1951 | connectPoints.add(link.src()); |
| 1952 | connectPoints.add(link.dst()); |
| 1953 | }); |
| 1954 | Collections.reverse(connectPoints); |
| 1955 | remoteSinks.forEach(remoteSink -> { |
| 1956 | List<ConnectPoint> finalPath = Lists.newArrayList(connectPoints); |
| 1957 | finalPath.add(0, remoteSink); |
| 1958 | mcastTree.put(remoteSink, finalPath); |
| 1959 | }); |
| 1960 | }); |
| 1961 | return mcastTree; |
| 1962 | } |
| 1963 | |
| 1964 | /** |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1965 | * Returns the associated roles to the mcast groups. |
| 1966 | * |
| 1967 | * @param mcastIp the group ip |
| 1968 | * @param sourcecp the source connect point |
| 1969 | * @return the mapping mcastIp-device to mcast role |
| 1970 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1971 | public Map<McastRoleStoreKey, McastRole> getMcastRoles(IpAddress mcastIp, |
| 1972 | ConnectPoint sourcecp) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 1973 | log.info("mcastRoles {}", mcastRoleStore.size()); |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 1974 | if (mcastIp != null) { |
| 1975 | Map<McastRoleStoreKey, McastRole> roles = mcastRoleStore.entrySet().stream() |
| 1976 | .filter(mcastEntry -> mcastIp.equals(mcastEntry.getKey().mcastIp())) |
| 1977 | .collect(Collectors.toMap(entry -> new McastRoleStoreKey(entry.getKey().mcastIp(), |
| 1978 | entry.getKey().deviceId(), entry.getKey().source()), entry -> entry.getValue().value())); |
| 1979 | if (sourcecp != null) { |
| 1980 | roles = roles.entrySet().stream() |
| 1981 | .filter(mcastEntry -> sourcecp.equals(mcastEntry.getKey().source())) |
| 1982 | .collect(Collectors.toMap(entry -> new McastRoleStoreKey(entry.getKey().mcastIp(), |
| 1983 | entry.getKey().deviceId(), entry.getKey().source()), Entry::getValue)); |
| 1984 | } |
| 1985 | return roles; |
| 1986 | } |
| 1987 | return mcastRoleStore.entrySet().stream() |
| 1988 | .collect(Collectors.toMap(entry -> new McastRoleStoreKey(entry.getKey().mcastIp(), |
| 1989 | entry.getKey().deviceId(), entry.getKey().source()), entry -> entry.getValue().value())); |
| 1990 | } |
| 1991 | |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 1992 | /** |
| 1993 | * Returns the associated trees to the mcast group. |
| 1994 | * |
| 1995 | * @param mcastIp the group ip |
| 1996 | * @param sourcecp the source connect point |
| 1997 | * @return the mapping egress point to mcast path |
| 1998 | */ |
Charles Chan | ba59dd6 | 2018-05-10 22:19:49 +0000 | [diff] [blame] | 1999 | public Multimap<ConnectPoint, List<ConnectPoint>> getMcastTrees(IpAddress mcastIp, |
| 2000 | ConnectPoint sourcecp) { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 2001 | // TODO remove |
| 2002 | log.info("{}", getStoredPaths(mcastIp)); |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 2003 | Multimap<ConnectPoint, List<ConnectPoint>> mcastTrees = HashMultimap.create(); |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 2004 | Set<ConnectPoint> sources = mcastUtils.getSources(mcastIp); |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 2005 | if (sourcecp != null) { |
| 2006 | sources = sources.stream() |
Pier | e99511d | 2018-04-19 16:47:06 +0200 | [diff] [blame] | 2007 | .filter(source -> source.equals(sourcecp)).collect(Collectors.toSet()); |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 2008 | } |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 2009 | if (!sources.isEmpty()) { |
| 2010 | sources.forEach(source -> { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 2011 | McastPathStoreKey pathStoreKey = new McastPathStoreKey(mcastIp, source); |
| 2012 | Collection<? extends List<Link>> storedPaths = Versioned.valueOrElse( |
| 2013 | mcastPathStore.get(pathStoreKey), Lists.newArrayList()); |
| 2014 | // TODO remove |
| 2015 | log.info("Paths for group {} and source {} - {}", mcastIp, source, storedPaths.size()); |
| 2016 | Map<ConnectPoint, List<ConnectPoint>> mcastTree = buildMcastPaths(storedPaths, mcastIp, source); |
| 2017 | mcastTree.forEach(mcastTrees::put); |
Pier | 71c5577 | 2018-04-17 17:25:22 +0200 | [diff] [blame] | 2018 | }); |
| 2019 | } |
| 2020 | return mcastTrees; |
| 2021 | } |
| 2022 | |
| 2023 | /** |
Pier | db27b8d | 2018-04-17 16:29:56 +0200 | [diff] [blame] | 2024 | * Return the leaders of the mcast groups. |
| 2025 | * |
| 2026 | * @param mcastIp the group ip |
| 2027 | * @return the mapping group-node |
| 2028 | */ |
| 2029 | public Map<IpAddress, NodeId> getMcastLeaders(IpAddress mcastIp) { |
| 2030 | return mcastUtils.getMcastLeaders(mcastIp); |
| 2031 | } |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 2032 | |
| 2033 | /** |
| 2034 | * Returns the mcast filtering obj. |
| 2035 | * |
| 2036 | * @return the mapping group-node |
| 2037 | */ |
| 2038 | public Map<DeviceId, List<McastFilteringObjStoreKey>> getMcastFilters() { |
pier | 9e02ab7 | 2020-02-12 20:40:55 +0100 | [diff] [blame] | 2039 | // TODO remove |
| 2040 | log.info("mcastFilters {}", mcastFilteringObjStore.size()); |
Harshada Chaundkar | 9204f31 | 2019-07-02 16:01:24 +0000 | [diff] [blame] | 2041 | Map<DeviceId, List<McastFilteringObjStoreKey>> mapping = Maps.newHashMap(); |
| 2042 | Set<McastFilteringObjStoreKey> currentKeys = Sets.newHashSet(mcastFilteringObjStore); |
| 2043 | currentKeys.forEach(filteringObjStoreKey -> |
| 2044 | mapping.compute(filteringObjStoreKey.ingressCP().deviceId(), (k, v) -> { |
| 2045 | List<McastFilteringObjStoreKey> values = v; |
| 2046 | if (values == null) { |
| 2047 | values = Lists.newArrayList(); |
| 2048 | } |
| 2049 | values.add(filteringObjStoreKey); |
| 2050 | return values; |
| 2051 | }) |
| 2052 | ); |
| 2053 | return mapping; |
| 2054 | } |
Charles Chan | c91c878 | 2016-03-30 17:54:24 -0700 | [diff] [blame] | 2055 | } |