Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 16 | #include <vnet/adj/adj_mcast.h> |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 17 | #include <vnet/adj/adj_internal.h> |
| 18 | #include <vnet/fib/fib_walk.h> |
| 19 | #include <vnet/ip/ip.h> |
| 20 | |
| 21 | /* |
| 22 | * The 'DB' of all mcast adjs. |
| 23 | * There is only one mcast per-interface per-protocol, so this is a per-interface |
| 24 | * vector |
| 25 | */ |
| 26 | static adj_index_t *adj_mcasts[FIB_PROTOCOL_MAX]; |
| 27 | |
| 28 | static u32 |
| 29 | adj_get_mcast_node (fib_protocol_t proto) |
| 30 | { |
| 31 | switch (proto) { |
| 32 | case FIB_PROTOCOL_IP4: |
| 33 | return (ip4_rewrite_mcast_node.index); |
| 34 | case FIB_PROTOCOL_IP6: |
| 35 | return (ip6_rewrite_mcast_node.index); |
| 36 | case FIB_PROTOCOL_MPLS: |
| 37 | break; |
| 38 | } |
| 39 | ASSERT(0); |
| 40 | return (0); |
| 41 | } |
| 42 | |
| 43 | /* |
| 44 | * adj_mcast_add_or_lock |
| 45 | * |
| 46 | * The next_hop address here is used for source address selection in the DP. |
| 47 | * The mcast adj is added to an interface's connected prefix, the next-hop |
| 48 | * passed here is the local prefix on the same interface. |
| 49 | */ |
| 50 | adj_index_t |
| 51 | adj_mcast_add_or_lock (fib_protocol_t proto, |
| 52 | vnet_link_t link_type, |
| 53 | u32 sw_if_index) |
| 54 | { |
| 55 | ip_adjacency_t * adj; |
| 56 | |
| 57 | vec_validate_init_empty(adj_mcasts[proto], sw_if_index, ADJ_INDEX_INVALID); |
| 58 | |
| 59 | if (ADJ_INDEX_INVALID == adj_mcasts[proto][sw_if_index]) |
| 60 | { |
| 61 | vnet_main_t *vnm; |
| 62 | |
| 63 | vnm = vnet_get_main(); |
| 64 | adj = adj_alloc(proto); |
| 65 | |
| 66 | adj->lookup_next_index = IP_LOOKUP_NEXT_MCAST; |
| 67 | adj->ia_nh_proto = proto; |
| 68 | adj->ia_link = link_type; |
Neale Ranns | cbe25aa | 2019-09-30 10:53:31 +0000 | [diff] [blame] | 69 | adj->ia_node_index = adj_get_mcast_node(proto); |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 70 | adj_mcasts[proto][sw_if_index] = adj_get_index(adj); |
| 71 | adj_lock(adj_get_index(adj)); |
| 72 | |
Ole Troan | d723161 | 2018-06-07 10:17:57 +0200 | [diff] [blame] | 73 | vnet_rewrite_init(vnm, sw_if_index, link_type, |
Neale Ranns | cbe25aa | 2019-09-30 10:53:31 +0000 | [diff] [blame] | 74 | adj->ia_node_index, |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 75 | vnet_tx_node_index_for_sw_interface(vnm, sw_if_index), |
| 76 | &adj->rewrite_header); |
| 77 | |
| 78 | /* |
| 79 | * we need a rewrite where the destination IP address is converted |
| 80 | * to the appropriate link-layer address. This is interface specific. |
| 81 | * So ask the interface to do it. |
| 82 | */ |
| 83 | vnet_update_adjacency_for_sw_interface(vnm, sw_if_index, |
| 84 | adj_get_index(adj)); |
Peter Morrow | a58055d | 2022-10-05 11:50:22 +0100 | [diff] [blame] | 85 | |
| 86 | adj_delegate_adj_created(adj); |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 87 | } |
| 88 | else |
| 89 | { |
| 90 | adj = adj_get(adj_mcasts[proto][sw_if_index]); |
| 91 | adj_lock(adj_get_index(adj)); |
| 92 | } |
| 93 | |
| 94 | return (adj_get_index(adj)); |
| 95 | } |
| 96 | |
| 97 | /** |
| 98 | * adj_mcast_update_rewrite |
| 99 | * |
| 100 | * Update the adjacency's rewrite string. A NULL string implies the |
Paul Vinciguerra | 8feeaff | 2019-03-27 11:25:48 -0700 | [diff] [blame] | 101 | * rewrite is reset (i.e. when ARP/ND entry is gone). |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 102 | * NB: the adj being updated may be handling traffic in the DP. |
| 103 | */ |
| 104 | void |
| 105 | adj_mcast_update_rewrite (adj_index_t adj_index, |
Neale Ranns | 2e7fbcc | 2017-03-15 04:22:25 -0700 | [diff] [blame] | 106 | u8 *rewrite, |
Neale Ranns | 889fe94 | 2017-06-01 05:43:19 -0400 | [diff] [blame] | 107 | u8 offset) |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 108 | { |
| 109 | ip_adjacency_t *adj; |
| 110 | |
| 111 | ASSERT(ADJ_INDEX_INVALID != adj_index); |
| 112 | |
| 113 | adj = adj_get(adj_index); |
| 114 | |
| 115 | /* |
| 116 | * update the adj's rewrite string and build the arc |
| 117 | * from the rewrite node to the interface's TX node |
| 118 | */ |
| 119 | adj_nbr_update_rewrite_internal(adj, IP_LOOKUP_NEXT_MCAST, |
| 120 | adj_get_mcast_node(adj->ia_nh_proto), |
| 121 | vnet_tx_node_index_for_sw_interface( |
| 122 | vnet_get_main(), |
| 123 | adj->rewrite_header.sw_if_index), |
| 124 | rewrite); |
Neale Ranns | 2e7fbcc | 2017-03-15 04:22:25 -0700 | [diff] [blame] | 125 | /* |
Neale Ranns | 889fe94 | 2017-06-01 05:43:19 -0400 | [diff] [blame] | 126 | * set the offset corresponding to the mcast IP address rewrite |
Neale Ranns | 2e7fbcc | 2017-03-15 04:22:25 -0700 | [diff] [blame] | 127 | */ |
| 128 | adj->rewrite_header.dst_mcast_offset = offset; |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 129 | } |
| 130 | |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 131 | /** |
| 132 | * adj_mcast_midchain_update_rewrite |
| 133 | * |
| 134 | * Update the adjacency's rewrite string. A NULL string implies the |
Paul Vinciguerra | 8feeaff | 2019-03-27 11:25:48 -0700 | [diff] [blame] | 135 | * rewrite is reset (i.e. when ARP/ND entry is gone). |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 136 | * NB: the adj being updated may be handling traffic in the DP. |
| 137 | */ |
| 138 | void |
| 139 | adj_mcast_midchain_update_rewrite (adj_index_t adj_index, |
| 140 | adj_midchain_fixup_t fixup, |
Neale Ranns | db14f5a | 2018-01-29 10:43:33 -0800 | [diff] [blame] | 141 | const void *fixup_data, |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 142 | adj_flags_t flags, |
| 143 | u8 *rewrite, |
| 144 | u8 offset, |
| 145 | u32 mask) |
| 146 | { |
| 147 | ip_adjacency_t *adj; |
| 148 | |
| 149 | ASSERT(ADJ_INDEX_INVALID != adj_index); |
| 150 | |
| 151 | adj = adj_get(adj_index); |
| 152 | |
| 153 | /* |
Paul Vinciguerra | 8feeaff | 2019-03-27 11:25:48 -0700 | [diff] [blame] | 154 | * one time only update. since we don't support changing the tunnel |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 155 | * src,dst, this is all we need. |
| 156 | */ |
| 157 | ASSERT(adj->lookup_next_index == IP_LOOKUP_NEXT_MCAST); |
| 158 | /* |
| 159 | * tunnels can always provide a rewrite. |
| 160 | */ |
| 161 | ASSERT(NULL != rewrite); |
| 162 | |
Neale Ranns | db14f5a | 2018-01-29 10:43:33 -0800 | [diff] [blame] | 163 | adj_midchain_setup(adj_index, fixup, fixup_data, flags); |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 164 | |
| 165 | /* |
| 166 | * update the adj's rewrite string and build the arc |
| 167 | * from the rewrite node to the interface's TX node |
| 168 | */ |
| 169 | adj_nbr_update_rewrite_internal(adj, IP_LOOKUP_NEXT_MCAST_MIDCHAIN, |
| 170 | adj_get_mcast_node(adj->ia_nh_proto), |
| 171 | vnet_tx_node_index_for_sw_interface( |
| 172 | vnet_get_main(), |
| 173 | adj->rewrite_header.sw_if_index), |
| 174 | rewrite); |
| 175 | |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 176 | adj->rewrite_header.dst_mcast_offset = offset; |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 177 | } |
| 178 | |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 179 | void |
| 180 | adj_mcast_remove (fib_protocol_t proto, |
| 181 | u32 sw_if_index) |
| 182 | { |
| 183 | ASSERT(sw_if_index < vec_len(adj_mcasts[proto])); |
| 184 | |
| 185 | adj_mcasts[proto][sw_if_index] = ADJ_INDEX_INVALID; |
| 186 | } |
| 187 | |
| 188 | static clib_error_t * |
| 189 | adj_mcast_interface_state_change (vnet_main_t * vnm, |
| 190 | u32 sw_if_index, |
| 191 | u32 flags) |
| 192 | { |
| 193 | /* |
| 194 | * for each mcast on the interface trigger a walk back to the children |
| 195 | */ |
| 196 | fib_protocol_t proto; |
| 197 | ip_adjacency_t *adj; |
| 198 | |
| 199 | |
| 200 | for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++) |
| 201 | { |
| 202 | if (sw_if_index >= vec_len(adj_mcasts[proto]) || |
| 203 | ADJ_INDEX_INVALID == adj_mcasts[proto][sw_if_index]) |
| 204 | continue; |
| 205 | |
| 206 | adj = adj_get(adj_mcasts[proto][sw_if_index]); |
| 207 | |
| 208 | fib_node_back_walk_ctx_t bw_ctx = { |
| 209 | .fnbw_reason = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP ? |
| 210 | FIB_NODE_BW_REASON_FLAG_INTERFACE_UP : |
| 211 | FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN), |
| 212 | }; |
| 213 | |
| 214 | fib_walk_sync(FIB_NODE_TYPE_ADJ, adj_get_index(adj), &bw_ctx); |
| 215 | } |
| 216 | |
| 217 | return (NULL); |
| 218 | } |
| 219 | |
| 220 | VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(adj_mcast_interface_state_change); |
| 221 | |
| 222 | /** |
| 223 | * @brief Invoked on each SW interface of a HW interface when the |
| 224 | * HW interface state changes |
| 225 | */ |
Neale Ranns | 0053de6 | 2018-05-22 08:40:52 -0700 | [diff] [blame] | 226 | static walk_rc_t |
Neale Ranns | c2aad53 | 2017-05-30 09:53:52 -0700 | [diff] [blame] | 227 | adj_mcast_hw_sw_interface_state_change (vnet_main_t * vnm, |
| 228 | u32 sw_if_index, |
| 229 | void *arg) |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 230 | { |
| 231 | adj_mcast_interface_state_change(vnm, sw_if_index, (uword) arg); |
Neale Ranns | 0053de6 | 2018-05-22 08:40:52 -0700 | [diff] [blame] | 232 | |
| 233 | return (WALK_CONTINUE); |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | /** |
| 237 | * @brief Registered callback for HW interface state changes |
| 238 | */ |
| 239 | static clib_error_t * |
| 240 | adj_mcast_hw_interface_state_change (vnet_main_t * vnm, |
| 241 | u32 hw_if_index, |
| 242 | u32 flags) |
| 243 | { |
| 244 | /* |
| 245 | * walk SW interfaces on the HW |
| 246 | */ |
| 247 | uword sw_flags; |
| 248 | |
| 249 | sw_flags = ((flags & VNET_HW_INTERFACE_FLAG_LINK_UP) ? |
| 250 | VNET_SW_INTERFACE_FLAG_ADMIN_UP : |
| 251 | 0); |
| 252 | |
| 253 | vnet_hw_interface_walk_sw(vnm, hw_if_index, |
Neale Ranns | c2aad53 | 2017-05-30 09:53:52 -0700 | [diff] [blame] | 254 | adj_mcast_hw_sw_interface_state_change, |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 255 | (void*) sw_flags); |
| 256 | |
| 257 | return (NULL); |
| 258 | } |
| 259 | |
| 260 | VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION( |
| 261 | adj_mcast_hw_interface_state_change); |
| 262 | |
| 263 | static clib_error_t * |
| 264 | adj_mcast_interface_delete (vnet_main_t * vnm, |
| 265 | u32 sw_if_index, |
| 266 | u32 is_add) |
| 267 | { |
| 268 | /* |
| 269 | * for each mcast on the interface trigger a walk back to the children |
| 270 | */ |
| 271 | fib_protocol_t proto; |
| 272 | ip_adjacency_t *adj; |
| 273 | |
| 274 | if (is_add) |
| 275 | { |
| 276 | /* |
| 277 | * not interested in interface additions. we will not back walk |
| 278 | * to resolve paths through newly added interfaces. Why? The control |
| 279 | * plane should have the brains to add interfaces first, then routes. |
| 280 | * So the case where there are paths with a interface that matches |
| 281 | * one just created is the case where the path resolved through an |
| 282 | * interface that was deleted, and still has not been removed. The |
| 283 | * new interface added, is NO GUARANTEE that the interface being |
| 284 | * added now, even though it may have the same sw_if_index, is the |
| 285 | * same interface that the path needs. So tough! |
| 286 | * If the control plane wants these routes to resolve it needs to |
| 287 | * remove and add them again. |
| 288 | */ |
| 289 | return (NULL); |
| 290 | } |
| 291 | |
| 292 | for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++) |
| 293 | { |
| 294 | if (sw_if_index >= vec_len(adj_mcasts[proto]) || |
| 295 | ADJ_INDEX_INVALID == adj_mcasts[proto][sw_if_index]) |
| 296 | continue; |
| 297 | |
| 298 | adj = adj_get(adj_mcasts[proto][sw_if_index]); |
| 299 | |
| 300 | fib_node_back_walk_ctx_t bw_ctx = { |
| 301 | .fnbw_reason = FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE, |
| 302 | }; |
| 303 | |
| 304 | fib_walk_sync(FIB_NODE_TYPE_ADJ, adj_get_index(adj), &bw_ctx); |
| 305 | } |
| 306 | |
| 307 | return (NULL); |
| 308 | } |
| 309 | |
| 310 | VNET_SW_INTERFACE_ADD_DEL_FUNCTION(adj_mcast_interface_delete); |
| 311 | |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 312 | /** |
| 313 | * @brief Walk the multicast Adjacencies on a given interface |
| 314 | */ |
| 315 | void |
| 316 | adj_mcast_walk (u32 sw_if_index, |
| 317 | fib_protocol_t proto, |
| 318 | adj_walk_cb_t cb, |
| 319 | void *ctx) |
| 320 | { |
| 321 | if (vec_len(adj_mcasts[proto]) > sw_if_index) |
| 322 | { |
| 323 | if (ADJ_INDEX_INVALID != adj_mcasts[proto][sw_if_index]) |
| 324 | { |
| 325 | cb(adj_mcasts[proto][sw_if_index], ctx); |
| 326 | } |
| 327 | } |
| 328 | } |
| 329 | |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 330 | u8* |
| 331 | format_adj_mcast (u8* s, va_list *ap) |
| 332 | { |
| 333 | index_t index = va_arg(*ap, index_t); |
| 334 | CLIB_UNUSED(u32 indent) = va_arg(*ap, u32); |
BenoƮt Ganne | 138c37a | 2019-07-18 17:34:28 +0200 | [diff] [blame] | 335 | ip_adjacency_t * adj; |
| 336 | |
| 337 | if (!adj_is_valid(index)) |
| 338 | return format(s, "<invalid adjacency>"); |
| 339 | |
| 340 | adj = adj_get(index); |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 341 | |
| 342 | s = format(s, "%U-mcast: ", |
| 343 | format_fib_protocol, adj->ia_nh_proto); |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 344 | if (adj->rewrite_header.flags & VNET_REWRITE_HAS_FEATURES) |
| 345 | s = format(s, "[features] "); |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 346 | s = format (s, "%U", |
| 347 | format_vnet_rewrite, |
Neale Ranns | b069a69 | 2017-03-15 12:34:25 -0400 | [diff] [blame] | 348 | &adj->rewrite_header, sizeof (adj->rewrite_data), 0); |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 349 | |
| 350 | return (s); |
| 351 | } |
| 352 | |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 353 | u8* |
| 354 | format_adj_mcast_midchain (u8* s, va_list *ap) |
| 355 | { |
| 356 | index_t index = va_arg(*ap, index_t); |
| 357 | CLIB_UNUSED(u32 indent) = va_arg(*ap, u32); |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 358 | ip_adjacency_t * adj = adj_get(index); |
| 359 | |
| 360 | s = format(s, "%U-mcast-midchain: ", |
| 361 | format_fib_protocol, adj->ia_nh_proto); |
| 362 | s = format (s, "%U", |
| 363 | format_vnet_rewrite, |
Neale Ranns | db14f5a | 2018-01-29 10:43:33 -0800 | [diff] [blame] | 364 | &adj->rewrite_header, |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 365 | sizeof (adj->rewrite_data), 0); |
| 366 | s = format (s, "\n%Ustacked-on:\n%U%U", |
| 367 | format_white_space, indent, |
| 368 | format_white_space, indent+2, |
| 369 | format_dpo_id, &adj->sub_type.midchain.next_dpo, indent+2); |
| 370 | |
| 371 | return (s); |
| 372 | } |
| 373 | |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 374 | |
| 375 | static void |
| 376 | adj_dpo_lock (dpo_id_t *dpo) |
| 377 | { |
| 378 | adj_lock(dpo->dpoi_index); |
| 379 | } |
| 380 | static void |
| 381 | adj_dpo_unlock (dpo_id_t *dpo) |
| 382 | { |
| 383 | adj_unlock(dpo->dpoi_index); |
| 384 | } |
| 385 | |
| 386 | const static dpo_vft_t adj_mcast_dpo_vft = { |
| 387 | .dv_lock = adj_dpo_lock, |
| 388 | .dv_unlock = adj_dpo_unlock, |
| 389 | .dv_format = format_adj_mcast, |
Andrew Yourtchenko | 5f3fcb9 | 2017-10-25 05:50:37 -0700 | [diff] [blame] | 390 | .dv_get_urpf = adj_dpo_get_urpf, |
Neale Ranns | 8f5fef2 | 2020-12-21 08:29:34 +0000 | [diff] [blame] | 391 | .dv_get_mtu = adj_dpo_get_mtu, |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 392 | }; |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 393 | const static dpo_vft_t adj_mcast_midchain_dpo_vft = { |
| 394 | .dv_lock = adj_dpo_lock, |
| 395 | .dv_unlock = adj_dpo_unlock, |
| 396 | .dv_format = format_adj_mcast_midchain, |
Andrew Yourtchenko | 5f3fcb9 | 2017-10-25 05:50:37 -0700 | [diff] [blame] | 397 | .dv_get_urpf = adj_dpo_get_urpf, |
Neale Ranns | 8f5fef2 | 2020-12-21 08:29:34 +0000 | [diff] [blame] | 398 | .dv_get_mtu = adj_dpo_get_mtu, |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 399 | }; |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 400 | |
| 401 | /** |
| 402 | * @brief The per-protocol VLIB graph nodes that are assigned to a mcast |
| 403 | * object. |
| 404 | * |
| 405 | * this means that these graph nodes are ones from which a mcast is the |
| 406 | * parent object in the DPO-graph. |
| 407 | */ |
| 408 | const static char* const adj_mcast_ip4_nodes[] = |
| 409 | { |
| 410 | "ip4-rewrite-mcast", |
| 411 | NULL, |
| 412 | }; |
| 413 | const static char* const adj_mcast_ip6_nodes[] = |
| 414 | { |
| 415 | "ip6-rewrite-mcast", |
| 416 | NULL, |
| 417 | }; |
| 418 | |
| 419 | const static char* const * const adj_mcast_nodes[DPO_PROTO_NUM] = |
| 420 | { |
| 421 | [DPO_PROTO_IP4] = adj_mcast_ip4_nodes, |
| 422 | [DPO_PROTO_IP6] = adj_mcast_ip6_nodes, |
| 423 | [DPO_PROTO_MPLS] = NULL, |
| 424 | }; |
| 425 | |
| 426 | /** |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 427 | * @brief The per-protocol VLIB graph nodes that are assigned to a mcast |
| 428 | * object. |
| 429 | * |
| 430 | * this means that these graph nodes are ones from which a mcast is the |
| 431 | * parent object in the DPO-graph. |
| 432 | */ |
| 433 | const static char* const adj_mcast_midchain_ip4_nodes[] = |
| 434 | { |
| 435 | "ip4-mcast-midchain", |
| 436 | NULL, |
| 437 | }; |
| 438 | const static char* const adj_mcast_midchain_ip6_nodes[] = |
| 439 | { |
| 440 | "ip6-mcast-midchain", |
| 441 | NULL, |
| 442 | }; |
| 443 | |
| 444 | const static char* const * const adj_mcast_midchain_nodes[DPO_PROTO_NUM] = |
| 445 | { |
| 446 | [DPO_PROTO_IP4] = adj_mcast_midchain_ip4_nodes, |
| 447 | [DPO_PROTO_IP6] = adj_mcast_midchain_ip6_nodes, |
| 448 | [DPO_PROTO_MPLS] = NULL, |
| 449 | }; |
| 450 | |
| 451 | /** |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 452 | * @brief Return the size of the adj DB. |
| 453 | * This is only for testing purposes so an efficient implementation is not needed |
| 454 | */ |
| 455 | u32 |
| 456 | adj_mcast_db_size (void) |
| 457 | { |
| 458 | u32 n_adjs, sw_if_index; |
| 459 | fib_protocol_t proto; |
| 460 | |
| 461 | n_adjs = 0; |
| 462 | for (proto = FIB_PROTOCOL_IP4; proto <= FIB_PROTOCOL_IP6; proto++) |
| 463 | { |
| 464 | for (sw_if_index = 0; |
| 465 | sw_if_index < vec_len(adj_mcasts[proto]); |
| 466 | sw_if_index++) |
| 467 | { |
| 468 | if (ADJ_INDEX_INVALID != adj_mcasts[proto][sw_if_index]) |
| 469 | { |
| 470 | n_adjs++; |
| 471 | } |
| 472 | } |
| 473 | } |
| 474 | |
| 475 | return (n_adjs); |
| 476 | } |
| 477 | |
| 478 | void |
| 479 | adj_mcast_module_init (void) |
| 480 | { |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 481 | dpo_register(DPO_ADJACENCY_MCAST, |
| 482 | &adj_mcast_dpo_vft, |
| 483 | adj_mcast_nodes); |
| 484 | dpo_register(DPO_ADJACENCY_MCAST_MIDCHAIN, |
| 485 | &adj_mcast_midchain_dpo_vft, |
| 486 | adj_mcast_midchain_nodes); |
Neale Ranns | 32e1c01 | 2016-11-22 17:07:28 +0000 | [diff] [blame] | 487 | } |