blob: c52e3d096933e74e5df6b3dd4de2a0d3f74a8229 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/adj/adj.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010017#include <vnet/adj/adj_internal.h>
18#include <vnet/fib/fib_walk.h>
19
20/*
21 * The 'DB' of all glean adjs.
Neale Rannse2fe0972020-11-26 08:37:27 +000022 * There is one glean per-{interface, protocol, connected prefix}
Neale Ranns0bfe5d82016-08-25 15:29:12 +010023 */
Neale Rannse2fe0972020-11-26 08:37:27 +000024static uword **adj_gleans[FIB_PROTOCOL_IP_MAX];
Neale Ranns0bfe5d82016-08-25 15:29:12 +010025
Neale Rannscbe25aa2019-09-30 10:53:31 +000026static inline u32
Neale Ranns0bfe5d82016-08-25 15:29:12 +010027adj_get_glean_node (fib_protocol_t proto)
28{
29 switch (proto) {
30 case FIB_PROTOCOL_IP4:
Neale Rannscbe25aa2019-09-30 10:53:31 +000031 return (ip4_glean_node.index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +010032 case FIB_PROTOCOL_IP6:
Neale Rannscbe25aa2019-09-30 10:53:31 +000033 return (ip6_glean_node.index);
Neale Ranns0bfe5d82016-08-25 15:29:12 +010034 case FIB_PROTOCOL_MPLS:
35 break;
36 }
37 ASSERT(0);
Neale Rannscbe25aa2019-09-30 10:53:31 +000038 return (~0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +010039}
40
Neale Rannse2fe0972020-11-26 08:37:27 +000041static adj_index_t
42adj_glean_db_lookup (fib_protocol_t proto,
43 u32 sw_if_index,
44 const ip46_address_t *nh_addr)
45{
46 uword *p;
47
48 if (vec_len(adj_gleans[proto]) <= sw_if_index)
49 return (ADJ_INDEX_INVALID);
50
51 p = hash_get_mem (adj_gleans[proto][sw_if_index], nh_addr);
52
53 if (p)
54 return (p[0]);
55
56 return (ADJ_INDEX_INVALID);
57}
58
59static void
60adj_glean_db_insert (fib_protocol_t proto,
61 u32 sw_if_index,
62 const ip46_address_t *nh_addr,
63 adj_index_t ai)
64{
65 vlib_main_t *vm = vlib_get_main();
66
67 vlib_worker_thread_barrier_sync(vm);
68
69 vec_validate(adj_gleans[proto], sw_if_index);
70
71 if (NULL == adj_gleans[proto][sw_if_index])
72 {
73 adj_gleans[proto][sw_if_index] =
74 hash_create_mem (0, sizeof(ip46_address_t), sizeof(adj_index_t));
75 }
76
77 hash_set_mem_alloc (&adj_gleans[proto][sw_if_index],
78 nh_addr, ai);
79
80 vlib_worker_thread_barrier_release(vm);
81}
82
83static void
84adj_glean_db_remove (fib_protocol_t proto,
85 u32 sw_if_index,
86 const ip46_address_t *nh_addr)
87{
88 vlib_main_t *vm = vlib_get_main();
89
90 vlib_worker_thread_barrier_sync(vm);
91
92 ASSERT(ADJ_INDEX_INVALID != adj_glean_db_lookup(proto, sw_if_index, nh_addr));
93 hash_unset_mem_free (&adj_gleans[proto][sw_if_index],
94 nh_addr);
95
96 if (0 == hash_elts(adj_gleans[proto][sw_if_index]))
97 {
98 hash_free(adj_gleans[proto][sw_if_index]);
99 adj_gleans[proto][sw_if_index] = NULL;
100 }
101 vlib_worker_thread_barrier_release(vm);
102}
103
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100104/*
105 * adj_glean_add_or_lock
106 *
107 * The next_hop address here is used for source address selection in the DP.
108 * The glean adj is added to an interface's connected prefix, the next-hop
109 * passed here is the local prefix on the same interface.
110 */
111adj_index_t
112adj_glean_add_or_lock (fib_protocol_t proto,
Ole Troan6ee40512018-02-12 18:14:39 +0100113 vnet_link_t linkt,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100114 u32 sw_if_index,
Neale Rannse2fe0972020-11-26 08:37:27 +0000115 const fib_prefix_t *conn)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100116{
117 ip_adjacency_t * adj;
Neale Rannse2fe0972020-11-26 08:37:27 +0000118 adj_index_t ai;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100119
Neale Rannse2fe0972020-11-26 08:37:27 +0000120 ai = adj_glean_db_lookup(proto, sw_if_index, &conn->fp_addr);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100121
Neale Rannse2fe0972020-11-26 08:37:27 +0000122 if (ADJ_INDEX_INVALID == ai)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100123 {
124 adj = adj_alloc(proto);
125
126 adj->lookup_next_index = IP_LOOKUP_NEXT_GLEAN;
127 adj->ia_nh_proto = proto;
Ole Troan6ee40512018-02-12 18:14:39 +0100128 adj->ia_link = linkt;
Neale Rannscbe25aa2019-09-30 10:53:31 +0000129 adj->ia_node_index = adj_get_glean_node(proto);
Neale Rannse2fe0972020-11-26 08:37:27 +0000130 ai = adj_get_index(adj);
131 adj_lock(ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100132
Neale Rannse2fe0972020-11-26 08:37:27 +0000133 ASSERT(conn);
134 fib_prefix_normalize(conn, &adj->sub_type.glean.rx_pfx);
Neale Rannsc819fc62018-02-16 02:44:05 -0800135 adj->rewrite_header.sw_if_index = sw_if_index;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100136 adj->rewrite_header.data_bytes = 0;
Ole Troan6ee40512018-02-12 18:14:39 +0100137 adj->rewrite_header.max_l3_packet_bytes =
Ole Troand7231612018-06-07 10:17:57 +0200138 vnet_sw_interface_get_mtu(vnet_get_main(), sw_if_index,
139 vnet_link_to_mtu(linkt));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100140
Neale Rannsc819fc62018-02-16 02:44:05 -0800141 vnet_update_adjacency_for_sw_interface(vnet_get_main(),
142 sw_if_index,
Neale Rannse2fe0972020-11-26 08:37:27 +0000143 ai);
144
145 adj_glean_db_insert(proto, sw_if_index,
146 &adj->sub_type.glean.rx_pfx.fp_addr, ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100147 }
148 else
149 {
Neale Rannse2fe0972020-11-26 08:37:27 +0000150 adj = adj_get(ai);
151 adj_lock(ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100152 }
153
Neale Ranns77cfc012019-12-15 22:26:37 +0000154 adj_delegate_adj_created(adj);
155
Neale Rannse2fe0972020-11-26 08:37:27 +0000156 return (ai);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100157}
158
Neale Rannsc819fc62018-02-16 02:44:05 -0800159/**
160 * adj_glean_update_rewrite
161 */
162void
163adj_glean_update_rewrite (adj_index_t adj_index)
164{
165 ip_adjacency_t *adj;
166
167 ASSERT(ADJ_INDEX_INVALID != adj_index);
168
169 adj = adj_get(adj_index);
170
171 vnet_rewrite_for_sw_interface(vnet_get_main(),
172 adj_fib_proto_2_nd(adj->ia_nh_proto),
173 adj->rewrite_header.sw_if_index,
Neale Rannscbe25aa2019-09-30 10:53:31 +0000174 adj->ia_node_index,
Neale Rannsc819fc62018-02-16 02:44:05 -0800175 VNET_REWRITE_FOR_SW_INTERFACE_ADDRESS_BROADCAST,
176 &adj->rewrite_header,
177 sizeof (adj->rewrite_data));
178}
179
Neale Rannse2fe0972020-11-26 08:37:27 +0000180static adj_walk_rc_t
181adj_glean_update_rewrite_walk (adj_index_t ai,
182 void *data)
183{
184 adj_glean_update_rewrite(ai);
185
186 return (ADJ_WALK_RC_CONTINUE);
187}
188
189void
190adj_glean_update_rewrite_itf (u32 sw_if_index)
191{
192 adj_glean_walk (sw_if_index, adj_glean_update_rewrite_walk, NULL);
193}
194
195void
196adj_glean_walk (u32 sw_if_index,
197 adj_walk_cb_t cb,
198 void *data)
199{
200 fib_protocol_t proto;
201
202 FOR_EACH_FIB_IP_PROTOCOL(proto)
203 {
204 adj_index_t ai, *aip, *ais = NULL;
205 ip46_address_t *conn;
206
207 if (vec_len(adj_gleans[proto]) <= sw_if_index ||
208 NULL == adj_gleans[proto][sw_if_index])
209 continue;
210
211 /*
212 * Walk first to collect the indices
213 * then walk the collection. This is safe
214 * to modifications of the hash table
215 */
216 hash_foreach_mem(conn, ai, adj_gleans[proto][sw_if_index],
217 ({
218 vec_add1(ais, ai);
219 }));
220
221 vec_foreach(aip, ais)
222 {
223 if (ADJ_WALK_RC_STOP == cb(*aip, data))
224 break;
225 }
226 vec_free(ais);
227 }
228}
229
Neale Rannsc819fc62018-02-16 02:44:05 -0800230adj_index_t
231adj_glean_get (fib_protocol_t proto,
Neale Rannse2fe0972020-11-26 08:37:27 +0000232 u32 sw_if_index,
233 const ip46_address_t *nh)
Neale Rannsc819fc62018-02-16 02:44:05 -0800234{
Neale Rannse2fe0972020-11-26 08:37:27 +0000235 if (NULL != nh)
Neale Rannsc819fc62018-02-16 02:44:05 -0800236 {
Neale Rannse2fe0972020-11-26 08:37:27 +0000237 return adj_glean_db_lookup(proto, sw_if_index, nh);
238 }
239 else
240 {
241 ip46_address_t *conn;
242 adj_index_t ai;
243
244 if (vec_len(adj_gleans[proto]) <= sw_if_index ||
245 NULL == adj_gleans[proto][sw_if_index])
246 return (ADJ_INDEX_INVALID);
247
248 hash_foreach_mem(conn, ai, adj_gleans[proto][sw_if_index],
249 ({
250 return (ai);
251 }));
Neale Rannsc819fc62018-02-16 02:44:05 -0800252 }
253 return (ADJ_INDEX_INVALID);
254}
255
Neale Rannse2fe0972020-11-26 08:37:27 +0000256const ip46_address_t *
257adj_glean_get_src (fib_protocol_t proto,
258 u32 sw_if_index,
259 const ip46_address_t *nh)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100260{
Neale Rannse2fe0972020-11-26 08:37:27 +0000261 const ip_adjacency_t *adj;
262 ip46_address_t *conn;
263 adj_index_t ai;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100264
Neale Rannse2fe0972020-11-26 08:37:27 +0000265 if (vec_len(adj_gleans[proto]) <= sw_if_index ||
266 NULL == adj_gleans[proto][sw_if_index])
267 return (NULL);
268
269 fib_prefix_t pfx = {
270 .fp_len = fib_prefix_get_host_length(proto),
271 .fp_proto = proto,
272 };
273
274 if (nh)
275 pfx.fp_addr = *nh;
276
277 hash_foreach_mem(conn, ai, adj_gleans[proto][sw_if_index],
278 ({
279 adj = adj_get(ai);
280
281 if (adj->sub_type.glean.rx_pfx.fp_len > 0)
282 {
283 /* if no destination is specified use the just glean */
284 if (NULL == nh)
285 return (&adj->sub_type.glean.rx_pfx.fp_addr);
286
287 /* check the clean covers the desintation */
288 if (fib_prefix_is_cover(&adj->sub_type.glean.rx_pfx, &pfx))
289 return (&adj->sub_type.glean.rx_pfx.fp_addr);
290 }
291 }));
292
293 return (NULL);
294}
295
296void
297adj_glean_remove (ip_adjacency_t *adj)
298{
299 fib_prefix_t norm;
300
301 fib_prefix_normalize(&adj->sub_type.glean.rx_pfx,
302 &norm);
303 adj_glean_db_remove(adj->ia_nh_proto,
304 adj->rewrite_header.sw_if_index,
305 &norm.fp_addr);
306}
307
308static adj_walk_rc_t
309adj_glean_start_backwalk (adj_index_t ai,
310 void *data)
311{
312 fib_node_back_walk_ctx_t bw_ctx = *(fib_node_back_walk_ctx_t*) data;
313
314 fib_walk_sync(FIB_NODE_TYPE_ADJ, ai, &bw_ctx);
315
316 return (ADJ_WALK_RC_CONTINUE);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100317}
318
319static clib_error_t *
320adj_glean_interface_state_change (vnet_main_t * vnm,
321 u32 sw_if_index,
322 u32 flags)
323{
324 /*
325 * for each glean on the interface trigger a walk back to the children
326 */
Neale Rannse2fe0972020-11-26 08:37:27 +0000327 fib_node_back_walk_ctx_t bw_ctx = {
328 .fnbw_reason = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP ?
329 FIB_NODE_BW_REASON_FLAG_INTERFACE_UP :
330 FIB_NODE_BW_REASON_FLAG_INTERFACE_DOWN),
331 };
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100332
Neale Rannse2fe0972020-11-26 08:37:27 +0000333 adj_glean_walk (sw_if_index, adj_glean_start_backwalk, &bw_ctx);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100334
335 return (NULL);
336}
337
338VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(adj_glean_interface_state_change);
339
Neale Ranns8b37b872016-11-21 12:25:22 +0000340/**
341 * @brief Invoked on each SW interface of a HW interface when the
342 * HW interface state changes
343 */
Neale Ranns0053de62018-05-22 08:40:52 -0700344static walk_rc_t
Neale Ranns8b37b872016-11-21 12:25:22 +0000345adj_nbr_hw_sw_interface_state_change (vnet_main_t * vnm,
346 u32 sw_if_index,
347 void *arg)
348{
349 adj_glean_interface_state_change(vnm, sw_if_index, (uword) arg);
Neale Ranns0053de62018-05-22 08:40:52 -0700350
351 return (WALK_CONTINUE);
Neale Ranns8b37b872016-11-21 12:25:22 +0000352}
353
354/**
355 * @brief Registered callback for HW interface state changes
356 */
357static clib_error_t *
358adj_glean_hw_interface_state_change (vnet_main_t * vnm,
359 u32 hw_if_index,
360 u32 flags)
361{
362 /*
363 * walk SW interfaces on the HW
364 */
365 uword sw_flags;
366
367 sw_flags = ((flags & VNET_HW_INTERFACE_FLAG_LINK_UP) ?
368 VNET_SW_INTERFACE_FLAG_ADMIN_UP :
369 0);
370
371 vnet_hw_interface_walk_sw(vnm, hw_if_index,
372 adj_nbr_hw_sw_interface_state_change,
373 (void*) sw_flags);
374
375 return (NULL);
376}
377
378VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
379 adj_glean_hw_interface_state_change);
380
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100381static clib_error_t *
382adj_glean_interface_delete (vnet_main_t * vnm,
383 u32 sw_if_index,
384 u32 is_add)
385{
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100386 if (is_add)
387 {
388 /*
389 * not interested in interface additions. we will not back walk
390 * to resolve paths through newly added interfaces. Why? The control
391 * plane should have the brains to add interfaces first, then routes.
392 * So the case where there are paths with a interface that matches
393 * one just created is the case where the path resolved through an
394 * interface that was deleted, and still has not been removed. The
395 * new interface added, is NO GUARANTEE that the interface being
396 * added now, even though it may have the same sw_if_index, is the
397 * same interface that the path needs. So tough!
398 * If the control plane wants these routes to resolve it needs to
399 * remove and add them again.
400 */
401 return (NULL);
402 }
403
Neale Rannse2fe0972020-11-26 08:37:27 +0000404 /*
405 * for each glean on the interface trigger a walk back to the children
406 */
407 fib_node_back_walk_ctx_t bw_ctx = {
408 .fnbw_reason = FIB_NODE_BW_REASON_FLAG_INTERFACE_DELETE,
409 };
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100410
Neale Rannse2fe0972020-11-26 08:37:27 +0000411 adj_glean_walk (sw_if_index, adj_glean_start_backwalk, &bw_ctx);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100412
413 return (NULL);
414}
415
416VNET_SW_INTERFACE_ADD_DEL_FUNCTION(adj_glean_interface_delete);
417
418u8*
419format_adj_glean (u8* s, va_list *ap)
420{
Billy McFallcfcf1e22016-10-14 09:51:49 -0400421 index_t index = va_arg(*ap, index_t);
422 CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100423 ip_adjacency_t * adj = adj_get(index);
424
Neale Rannse2fe0972020-11-26 08:37:27 +0000425 s = format(s, "%U-glean: [src:%U] %U",
Neale Rannsc819fc62018-02-16 02:44:05 -0800426 format_fib_protocol, adj->ia_nh_proto,
Neale Rannse2fe0972020-11-26 08:37:27 +0000427 format_fib_prefix, &adj->sub_type.glean.rx_pfx,
428 format_vnet_rewrite,
429 &adj->rewrite_header, sizeof (adj->rewrite_data), 0);
Neale Rannsc819fc62018-02-16 02:44:05 -0800430
431 return (s);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100432}
433
Neale Rannse2fe0972020-11-26 08:37:27 +0000434u32
435adj_glean_db_size (void)
436{
437 fib_protocol_t proto;
438 u32 sw_if_index = 0;
439 u64 count = 0;
440
441 FOR_EACH_FIB_IP_PROTOCOL(proto)
442 {
443 vec_foreach_index(sw_if_index, adj_gleans[proto])
444 {
445 if (NULL != adj_gleans[proto][sw_if_index])
446 {
447 count += hash_elts(adj_gleans[proto][sw_if_index]);
448 }
449 }
450 }
451 return (count);
452}
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100453
454static void
455adj_dpo_lock (dpo_id_t *dpo)
456{
457 adj_lock(dpo->dpoi_index);
458}
459static void
460adj_dpo_unlock (dpo_id_t *dpo)
461{
462 adj_unlock(dpo->dpoi_index);
463}
464
465const static dpo_vft_t adj_glean_dpo_vft = {
466 .dv_lock = adj_dpo_lock,
467 .dv_unlock = adj_dpo_unlock,
468 .dv_format = format_adj_glean,
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -0700469 .dv_get_urpf = adj_dpo_get_urpf,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100470};
471
472/**
473 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
474 * object.
475 *
476 * this means that these graph nodes are ones from which a glean is the
477 * parent object in the DPO-graph.
478 */
479const static char* const glean_ip4_nodes[] =
480{
481 "ip4-glean",
482 NULL,
483};
484const static char* const glean_ip6_nodes[] =
485{
486 "ip6-glean",
487 NULL,
488};
489
490const static char* const * const glean_nodes[DPO_PROTO_NUM] =
491{
492 [DPO_PROTO_IP4] = glean_ip4_nodes,
493 [DPO_PROTO_IP6] = glean_ip6_nodes,
494 [DPO_PROTO_MPLS] = NULL,
495};
496
497void
498adj_glean_module_init (void)
499{
500 dpo_register(DPO_ADJACENCY_GLEAN, &adj_glean_dpo_vft, glean_nodes);
501}