blob: 8df104bd19d3c0eaeed1abb6e96d9e9a27a7878f [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/adj/adj.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010017#include <vnet/adj/adj_internal.h>
18#include <vnet/fib/fib_walk.h>
19
20/*
21 * The 'DB' of all glean adjs.
Neale Rannse2fe0972020-11-26 08:37:27 +000022 * There is one glean per-{interface, protocol, connected prefix}
Neale Ranns0bfe5d82016-08-25 15:29:12 +010023 */
Neale Rannse2fe0972020-11-26 08:37:27 +000024static uword **adj_gleans[FIB_PROTOCOL_IP_MAX];
Neale Ranns0bfe5d82016-08-25 15:29:12 +010025
Neale Rannscbe25aa2019-09-30 10:53:31 +000026static inline u32
Neale Ranns0bfe5d82016-08-25 15:29:12 +010027adj_get_glean_node (fib_protocol_t proto)
28{
29 switch (proto) {
30 case FIB_PROTOCOL_IP4:
Neale Rannscbe25aa2019-09-30 10:53:31 +000031 return (ip4_glean_node.index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +010032 case FIB_PROTOCOL_IP6:
Neale Rannscbe25aa2019-09-30 10:53:31 +000033 return (ip6_glean_node.index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +010034 case FIB_PROTOCOL_MPLS:
35 break;
36 }
37 ASSERT(0);
Neale Rannscbe25aa2019-09-30 10:53:31 +000038 return (~0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +010039}
40
Neale Rannse2fe0972020-11-26 08:37:27 +000041static adj_index_t
42adj_glean_db_lookup (fib_protocol_t proto,
43 u32 sw_if_index,
44 const ip46_address_t *nh_addr)
45{
46 uword *p;
47
48 if (vec_len(adj_gleans[proto]) <= sw_if_index)
49 return (ADJ_INDEX_INVALID);
50
51 p = hash_get_mem (adj_gleans[proto][sw_if_index], nh_addr);
52
53 if (p)
54 return (p[0]);
55
56 return (ADJ_INDEX_INVALID);
57}
58
59static void
60adj_glean_db_insert (fib_protocol_t proto,
61 u32 sw_if_index,
62 const ip46_address_t *nh_addr,
63 adj_index_t ai)
64{
65 vlib_main_t *vm = vlib_get_main();
66
67 vlib_worker_thread_barrier_sync(vm);
68
69 vec_validate(adj_gleans[proto], sw_if_index);
70
71 if (NULL == adj_gleans[proto][sw_if_index])
72 {
73 adj_gleans[proto][sw_if_index] =
74 hash_create_mem (0, sizeof(ip46_address_t), sizeof(adj_index_t));
75 }
76
77 hash_set_mem_alloc (&adj_gleans[proto][sw_if_index],
78 nh_addr, ai);
79
80 vlib_worker_thread_barrier_release(vm);
81}
82
83static void
84adj_glean_db_remove (fib_protocol_t proto,
85 u32 sw_if_index,
86 const ip46_address_t *nh_addr)
87{
88 vlib_main_t *vm = vlib_get_main();
89
90 vlib_worker_thread_barrier_sync(vm);
91
92 ASSERT(ADJ_INDEX_INVALID != adj_glean_db_lookup(proto, sw_if_index, nh_addr));
93 hash_unset_mem_free (&adj_gleans[proto][sw_if_index],
94 nh_addr);
95
96 if (0 == hash_elts(adj_gleans[proto][sw_if_index]))
97 {
98 hash_free(adj_gleans[proto][sw_if_index]);
99 adj_gleans[proto][sw_if_index] = NULL;
100 }
101 vlib_worker_thread_barrier_release(vm);
102}
103
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100104/*
105 * adj_glean_add_or_lock
106 *
107 * The next_hop address here is used for source address selection in the DP.
108 * The glean adj is added to an interface's connected prefix, the next-hop
109 * passed here is the local prefix on the same interface.
110 */
111adj_index_t
112adj_glean_add_or_lock (fib_protocol_t proto,
Ole Troan6ee40512018-02-12 18:14:39 +0100113 vnet_link_t linkt,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100114 u32 sw_if_index,
Neale Rannse2fe0972020-11-26 08:37:27 +0000115 const fib_prefix_t *conn)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100116{
117 ip_adjacency_t * adj;
Neale Rannse2fe0972020-11-26 08:37:27 +0000118 adj_index_t ai;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100119
Neale Rannse2fe0972020-11-26 08:37:27 +0000120 ai = adj_glean_db_lookup(proto, sw_if_index, &conn->fp_addr);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100121
Neale Rannse2fe0972020-11-26 08:37:27 +0000122 if (ADJ_INDEX_INVALID == ai)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100123 {
124 adj = adj_alloc(proto);
125
126 adj->lookup_next_index = IP_LOOKUP_NEXT_GLEAN;
127 adj->ia_nh_proto = proto;
Ole Troan6ee40512018-02-12 18:14:39 +0100128 adj->ia_link = linkt;
Neale Rannscbe25aa2019-09-30 10:53:31 +0000129 adj->ia_node_index = adj_get_glean_node(proto);
Neale Rannse2fe0972020-11-26 08:37:27 +0000130 ai = adj_get_index(adj);
131 adj_lock(ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100132
Neale Rannse2fe0972020-11-26 08:37:27 +0000133 ASSERT(conn);
134 fib_prefix_normalize(conn, &adj->sub_type.glean.rx_pfx);
Neale Rannsc819fc62018-02-16 02:44:05 -0800135 adj->rewrite_header.sw_if_index = sw_if_index;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100136 adj->rewrite_header.data_bytes = 0;
Ole Troan6ee40512018-02-12 18:14:39 +0100137 adj->rewrite_header.max_l3_packet_bytes =
Ole Troand7231612018-06-07 10:17:57 +0200138 vnet_sw_interface_get_mtu(vnet_get_main(), sw_if_index,
139 vnet_link_to_mtu(linkt));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100140
Neale Rannsc819fc62018-02-16 02:44:05 -0800141 vnet_update_adjacency_for_sw_interface(vnet_get_main(),
142 sw_if_index,
Neale Rannse2fe0972020-11-26 08:37:27 +0000143 ai);
144
145 adj_glean_db_insert(proto, sw_if_index,
146 &adj->sub_type.glean.rx_pfx.fp_addr, ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100147 }
148 else
149 {
Neale Rannse2fe0972020-11-26 08:37:27 +0000150 adj = adj_get(ai);
151 adj_lock(ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100152 }
153
Neale Ranns77cfc012019-12-15 22:26:37 +0000154 adj_delegate_adj_created(adj);
155
Neale Rannse2fe0972020-11-26 08:37:27 +0000156 return (ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100157}
158
Neale Rannsc819fc62018-02-16 02:44:05 -0800159/**
160 * adj_glean_update_rewrite
161 */
162void
163adj_glean_update_rewrite (adj_index_t adj_index)
164{
165 ip_adjacency_t *adj;
166
167 ASSERT(ADJ_INDEX_INVALID != adj_index);
168
169 adj = adj_get(adj_index);
170
171 vnet_rewrite_for_sw_interface(vnet_get_main(),
172 adj_fib_proto_2_nd(adj->ia_nh_proto),
173 adj->rewrite_header.sw_if_index,
Neale Rannscbe25aa2019-09-30 10:53:31 +0000174 adj->ia_node_index,
Neale Rannsc819fc62018-02-16 02:44:05 -0800175 VNET_REWRITE_FOR_SW_INTERFACE_ADDRESS_BROADCAST,
176 &adj->rewrite_header,
177 sizeof (adj->rewrite_data));
178}
179
Neale Rannse2fe0972020-11-26 08:37:27 +0000180static adj_walk_rc_t
181adj_glean_update_rewrite_walk (adj_index_t ai,
182 void *data)
183{
184 adj_glean_update_rewrite(ai);
185
186 return (ADJ_WALK_RC_CONTINUE);
187}
188
189void
190adj_glean_update_rewrite_itf (u32 sw_if_index)
191{
192 adj_glean_walk (sw_if_index, adj_glean_update_rewrite_walk, NULL);
193}
194
195void
196adj_glean_walk (u32 sw_if_index,
197 adj_walk_cb_t cb,
198 void *data)
199{
200 fib_protocol_t proto;
201
202 FOR_EACH_FIB_IP_PROTOCOL(proto)
203 {
204 adj_index_t ai, *aip, *ais = NULL;
205 ip46_address_t *conn;
206
207 if (vec_len(adj_gleans[proto]) <= sw_if_index ||
208 NULL == adj_gleans[proto][sw_if_index])
209 continue;
210
211 /*
212 * Walk first to collect the indices
213 * then walk the collection. This is safe
214 * to modifications of the hash table
215 */
216 hash_foreach_mem(conn, ai, adj_gleans[proto][sw_if_index],
217 ({
218 vec_add1(ais, ai);
219 }));
220
221 vec_foreach(aip, ais)
222 {
223 if (ADJ_WALK_RC_STOP == cb(*aip, data))
224 break;
225 }
226 vec_free(ais);
227 }
228}
229
Neale Rannsc819fc62018-02-16 02:44:05 -0800230adj_index_t
231adj_glean_get (fib_protocol_t proto,
Neale Rannse2fe0972020-11-26 08:37:27 +0000232 u32 sw_if_index,
233 const ip46_address_t *nh)
Neale Rannsc819fc62018-02-16 02:44:05 -0800234{
Neale Rannse2fe0972020-11-26 08:37:27 +0000235 if (NULL != nh)
Neale Rannsc819fc62018-02-16 02:44:05 -0800236 {
Neale Rannse2fe0972020-11-26 08:37:27 +0000237 return adj_glean_db_lookup(proto, sw_if_index, nh);
238 }
239 else
240 {
241 ip46_address_t *conn;
242 adj_index_t ai;
243
244 if (vec_len(adj_gleans[proto]) <= sw_if_index ||
245 NULL == adj_gleans[proto][sw_if_index])
246 return (ADJ_INDEX_INVALID);
247
248 hash_foreach_mem(conn, ai, adj_gleans[proto][sw_if_index],
249 ({
250 return (ai);
251 }));
Neale Rannsc819fc62018-02-16 02:44:05 -0800252 }
253 return (ADJ_INDEX_INVALID);
254}
255
Neale Rannse2fe0972020-11-26 08:37:27 +0000256const ip46_address_t *
257adj_glean_get_src (fib_protocol_t proto,
258 u32 sw_if_index,
259 const ip46_address_t *nh)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100260{
Július Milan98874cd2021-02-16 19:20:47 +0100261 const ip46_address_t *conn, *source;
Neale Rannse2fe0972020-11-26 08:37:27 +0000262 const ip_adjacency_t *adj;
Neale Rannse2fe0972020-11-26 08:37:27 +0000263 adj_index_t ai;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100264
Neale Rannse2fe0972020-11-26 08:37:27 +0000265 if (vec_len(adj_gleans[proto]) <= sw_if_index ||
266 NULL == adj_gleans[proto][sw_if_index])
267 return (NULL);
268
269 fib_prefix_t pfx = {
270 .fp_len = fib_prefix_get_host_length(proto),
271 .fp_proto = proto,
272 };
273
274 if (nh)
275 pfx.fp_addr = *nh;
276
Július Milan98874cd2021-02-16 19:20:47 +0100277 /*
278 * An interface can have more than one glean address. Where
279 * possible we want to return a source address from the same
280 * subnet as the destination. If this is not possible then any address
281 * will do.
282 */
283 source = NULL;
284
Neale Rannse2fe0972020-11-26 08:37:27 +0000285 hash_foreach_mem(conn, ai, adj_gleans[proto][sw_if_index],
286 ({
287 adj = adj_get(ai);
288
289 if (adj->sub_type.glean.rx_pfx.fp_len > 0)
290 {
Július Milan98874cd2021-02-16 19:20:47 +0100291 source = &adj->sub_type.glean.rx_pfx.fp_addr;
292
Neale Rannse2fe0972020-11-26 08:37:27 +0000293 /* if no destination is specified use the just glean */
294 if (NULL == nh)
Július Milan98874cd2021-02-16 19:20:47 +0100295 return (source);
Neale Rannse2fe0972020-11-26 08:37:27 +0000296
297 /* check the clean covers the desintation */
298 if (fib_prefix_is_cover(&adj->sub_type.glean.rx_pfx, &pfx))
Július Milan98874cd2021-02-16 19:20:47 +0100299 return (source);
Neale Rannse2fe0972020-11-26 08:37:27 +0000300 }
301 }));
302
Július Milan98874cd2021-02-16 19:20:47 +0100303 return (source);
Neale Rannse2fe0972020-11-26 08:37:27 +0000304}
305
306void
307adj_glean_remove (ip_adjacency_t *adj)
308{
309 fib_prefix_t norm;
310
311 fib_prefix_normalize(&adj->sub_type.glean.rx_pfx,
312 &norm);
313 adj_glean_db_remove(adj->ia_nh_proto,
314 adj->rewrite_header.sw_if_index,
315 &norm.fp_addr);
316}
317
318static adj_walk_rc_t
319adj_glean_start_backwalk (adj_index_t ai,
320 void *data)
321{
322 fib_node_back_walk_ctx_t bw_ctx = *(fib_node_back_walk_ctx_t*) data;
323
324 fib_walk_sync(FIB_NODE_TYPE_ADJ, ai, &bw_ctx);
325
326 return (ADJ_WALK_RC_CONTINUE);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100327}
328
329static clib_error_t *
330adj_glean_interface_state_change (vnet_main_t * vnm,
331 u32 sw_if_index,
332 u32 flags)
333{
334 /*
335 * for each glean on the interface trigger a walk back to the children
336 */
Neale Rannse2fe0972020-11-26 08:37:27 +0000337 fib_node_back_walk_ctx_t bw_ctx = {
338 .fnbw_reason = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP ?
339 FIB_NODE_BW_REASON_FLAG_INTERFACE_UP :
340 FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN),
341 };
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100342
Neale Rannse2fe0972020-11-26 08:37:27 +0000343 adj_glean_walk (sw_if_index, adj_glean_start_backwalk, &bw_ctx);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100344
345 return (NULL);
346}
347
348VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(adj_glean_interface_state_change);
349
Neale Ranns8b37b872016-11-21 12:25:22 +0000350/**
351 * @brief Invoked on each SW interface of a HW interface when the
352 * HW interface state changes
353 */
Neale Ranns0053de62018-05-22 08:40:52 -0700354static walk_rc_t
Neale Ranns8b37b872016-11-21 12:25:22 +0000355adj_nbr_hw_sw_interface_state_change (vnet_main_t * vnm,
356 u32 sw_if_index,
357 void *arg)
358{
359 adj_glean_interface_state_change(vnm, sw_if_index, (uword) arg);
Neale Ranns0053de62018-05-22 08:40:52 -0700360
361 return (WALK_CONTINUE);
Neale Ranns8b37b872016-11-21 12:25:22 +0000362}
363
364/**
365 * @brief Registered callback for HW interface state changes
366 */
367static clib_error_t *
368adj_glean_hw_interface_state_change (vnet_main_t * vnm,
369 u32 hw_if_index,
370 u32 flags)
371{
372 /*
373 * walk SW interfaces on the HW
374 */
375 uword sw_flags;
376
377 sw_flags = ((flags & VNET_HW_INTERFACE_FLAG_LINK_UP) ?
378 VNET_SW_INTERFACE_FLAG_ADMIN_UP :
379 0);
380
381 vnet_hw_interface_walk_sw(vnm, hw_if_index,
382 adj_nbr_hw_sw_interface_state_change,
383 (void*) sw_flags);
384
385 return (NULL);
386}
387
388VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
389 adj_glean_hw_interface_state_change);
390
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100391static clib_error_t *
392adj_glean_interface_delete (vnet_main_t * vnm,
393 u32 sw_if_index,
394 u32 is_add)
395{
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100396 if (is_add)
397 {
398 /*
399 * not interested in interface additions. we will not back walk
400 * to resolve paths through newly added interfaces. Why? The control
401 * plane should have the brains to add interfaces first, then routes.
402 * So the case where there are paths with a interface that matches
403 * one just created is the case where the path resolved through an
404 * interface that was deleted, and still has not been removed. The
405 * new interface added, is NO GUARANTEE that the interface being
406 * added now, even though it may have the same sw_if_index, is the
407 * same interface that the path needs. So tough!
408 * If the control plane wants these routes to resolve it needs to
409 * remove and add them again.
410 */
411 return (NULL);
412 }
413
Neale Rannse2fe0972020-11-26 08:37:27 +0000414 /*
415 * for each glean on the interface trigger a walk back to the children
416 */
417 fib_node_back_walk_ctx_t bw_ctx = {
418 .fnbw_reason = FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE,
419 };
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100420
Neale Rannse2fe0972020-11-26 08:37:27 +0000421 adj_glean_walk (sw_if_index, adj_glean_start_backwalk, &bw_ctx);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100422
423 return (NULL);
424}
425
426VNET_SW_INTERFACE_ADD_DEL_FUNCTION(adj_glean_interface_delete);
427
428u8*
429format_adj_glean (u8* s, va_list *ap)
430{
Billy McFallcfcf1e22016-10-14 09:51:49 -0400431 index_t index = va_arg(*ap, index_t);
432 CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100433 ip_adjacency_t * adj = adj_get(index);
434
Neale Rannse2fe0972020-11-26 08:37:27 +0000435 s = format(s, "%U-glean: [src:%U] %U",
Neale Rannsc819fc62018-02-16 02:44:05 -0800436 format_fib_protocol, adj->ia_nh_proto,
Neale Rannse2fe0972020-11-26 08:37:27 +0000437 format_fib_prefix, &adj->sub_type.glean.rx_pfx,
438 format_vnet_rewrite,
439 &adj->rewrite_header, sizeof (adj->rewrite_data), 0);
Neale Rannsc819fc62018-02-16 02:44:05 -0800440
441 return (s);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100442}
443
Neale Rannse2fe0972020-11-26 08:37:27 +0000444u32
445adj_glean_db_size (void)
446{
447 fib_protocol_t proto;
448 u32 sw_if_index = 0;
449 u64 count = 0;
450
451 FOR_EACH_FIB_IP_PROTOCOL(proto)
452 {
453 vec_foreach_index(sw_if_index, adj_gleans[proto])
454 {
455 if (NULL != adj_gleans[proto][sw_if_index])
456 {
457 count += hash_elts(adj_gleans[proto][sw_if_index]);
458 }
459 }
460 }
461 return (count);
462}
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100463
464static void
465adj_dpo_lock (dpo_id_t *dpo)
466{
467 adj_lock(dpo->dpoi_index);
468}
469static void
470adj_dpo_unlock (dpo_id_t *dpo)
471{
472 adj_unlock(dpo->dpoi_index);
473}
474
475const static dpo_vft_t adj_glean_dpo_vft = {
476 .dv_lock = adj_dpo_lock,
477 .dv_unlock = adj_dpo_unlock,
478 .dv_format = format_adj_glean,
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -0700479 .dv_get_urpf = adj_dpo_get_urpf,
Neale Ranns8f5fef22020-12-21 08:29:34 +0000480 .dv_get_mtu = adj_dpo_get_mtu,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100481};
482
483/**
484 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
485 * object.
486 *
487 * this means that these graph nodes are ones from which a glean is the
488 * parent object in the DPO-graph.
489 */
490const static char* const glean_ip4_nodes[] =
491{
492 "ip4-glean",
493 NULL,
494};
495const static char* const glean_ip6_nodes[] =
496{
497 "ip6-glean",
498 NULL,
499};
500
501const static char* const * const glean_nodes[DPO_PROTO_NUM] =
502{
503 [DPO_PROTO_IP4] = glean_ip4_nodes,
504 [DPO_PROTO_IP6] = glean_ip6_nodes,
505 [DPO_PROTO_MPLS] = NULL,
506};
507
508void
509adj_glean_module_init (void)
510{
511 dpo_register(DPO_ADJACENCY_GLEAN, &adj_glean_dpo_vft, glean_nodes);
512}