blob: fae7c1db94a02320ea4e6dbceb91f7b169769869 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
Neale Ranns0bfe5d82016-08-25 15:29:12 +010016#include <vnet/dpo/load_balance.h>
17#include <vnet/dpo/load_balance_map.h>
18#include <vnet/dpo/drop_dpo.h>
19#include <vppinfra/math.h> /* for fabs */
20#include <vnet/adj/adj.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010021#include <vnet/adj/adj_internal.h>
Neale Ranns3ee44042016-10-03 13:05:48 +010022#include <vnet/fib/fib_urpf_list.h>
Neale Rannsef90ed02018-09-13 08:45:12 -070023#include <vnet/bier/bier_fwd.h>
Neale Ranns023d23a2019-06-26 02:16:50 -070024#include <vnet/fib/mpls_fib.h>
Neale Rannse4031132020-10-26 13:00:06 +000025#include <vnet/ip/ip4_inlines.h>
26#include <vnet/ip/ip6_inlines.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010027
Neale Ranns8f5fef22020-12-21 08:29:34 +000028// clang-format off
29
Neale Ranns0bfe5d82016-08-25 15:29:12 +010030/*
31 * distribution error tolerance for load-balancing
32 */
33const f64 multipath_next_hop_error_tolerance = 0.1;
34
Neale Rannsac64b712018-10-08 14:51:11 +000035static const char *load_balance_attr_names[] = LOAD_BALANCE_ATTR_NAMES;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010036
Neale Ranns710071b2018-09-24 12:36:26 +000037/**
38 * the logger
39 */
40vlib_log_class_t load_balance_logger;
41
Neale Ranns0bfe5d82016-08-25 15:29:12 +010042#define LB_DBG(_lb, _fmt, _args...) \
43{ \
Neale Ranns710071b2018-09-24 12:36:26 +000044 vlib_log_debug(load_balance_logger, \
45 "lb:[%U]:" _fmt, \
46 format_load_balance, load_balance_get_index(_lb), \
47 LOAD_BALANCE_FORMAT_NONE, \
48 ##_args); \
Neale Ranns0bfe5d82016-08-25 15:29:12 +010049}
Neale Ranns0bfe5d82016-08-25 15:29:12 +010050
51/**
52 * Pool of all DPOs. It's not static so the DP can have fast access
53 */
54load_balance_t *load_balance_pool;
55
56/**
57 * The one instance of load-balance main
58 */
Neale Ranns008dbe12018-09-07 09:32:36 -070059load_balance_main_t load_balance_main = {
60 .lbm_to_counters = {
61 .name = "route-to",
62 .stat_segment_name = "/net/route/to",
63 },
64 .lbm_via_counters = {
65 .name = "route-via",
66 .stat_segment_name = "/net/route/via",
67 }
68};
Neale Ranns0bfe5d82016-08-25 15:29:12 +010069
70f64
71load_balance_get_multipath_tolerance (void)
72{
73 return (multipath_next_hop_error_tolerance);
74}
75
76static inline index_t
77load_balance_get_index (const load_balance_t *lb)
78{
79 return (lb - load_balance_pool);
80}
81
82static inline dpo_id_t*
83load_balance_get_buckets (load_balance_t *lb)
84{
85 if (LB_HAS_INLINE_BUCKETS(lb))
86 {
87 return (lb->lb_buckets_inline);
88 }
89 else
90 {
91 return (lb->lb_buckets);
92 }
93}
94
95static load_balance_t *
96load_balance_alloc_i (void)
97{
98 load_balance_t *lb;
Dave Barach8341f762020-06-03 08:05:15 -040099 u8 need_barrier_sync = 0;
100 vlib_main_t *vm = vlib_get_main();
101 ASSERT (vm->thread_index == 0);
102
Damjan Marion66d4cb52022-03-17 18:59:46 +0100103 need_barrier_sync = pool_get_will_expand (load_balance_pool);
104
Dave Barach8341f762020-06-03 08:05:15 -0400105 if (need_barrier_sync)
106 vlib_worker_thread_barrier_sync (vm);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100107
108 pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
Dave Barachb7b92992018-10-17 10:38:51 -0400109 clib_memset(lb, 0, sizeof(*lb));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100110
111 lb->lb_map = INDEX_INVALID;
Neale Ranns3ee44042016-10-03 13:05:48 +0100112 lb->lb_urpf = INDEX_INVALID;
Dave Barach8341f762020-06-03 08:05:15 -0400113
114 if (need_barrier_sync == 0)
115 {
116 need_barrier_sync += vlib_validate_combined_counter_will_expand
117 (&(load_balance_main.lbm_to_counters),
118 load_balance_get_index(lb));
119 need_barrier_sync += vlib_validate_combined_counter_will_expand
120 (&(load_balance_main.lbm_via_counters),
121 load_balance_get_index(lb));
122 if (need_barrier_sync)
123 vlib_worker_thread_barrier_sync (vm);
124 }
125
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100126 vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
127 load_balance_get_index(lb));
128 vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
129 load_balance_get_index(lb));
130 vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
131 load_balance_get_index(lb));
132 vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
133 load_balance_get_index(lb));
134
Dave Barach8341f762020-06-03 08:05:15 -0400135 if (need_barrier_sync)
136 vlib_worker_thread_barrier_release (vm);
137
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100138 return (lb);
139}
140
141static u8*
142load_balance_format (index_t lbi,
143 load_balance_format_flags_t flags,
144 u32 indent,
145 u8 *s)
146{
147 vlib_counter_t to, via;
148 load_balance_t *lb;
149 dpo_id_t *buckets;
150 u32 i;
151
152 lb = load_balance_get(lbi);
153 vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
154 vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
155 buckets = load_balance_get_buckets(lb);
156
157 s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700158 s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto);
159 s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets);
Neale Ranns3ee44042016-10-03 13:05:48 +0100160 s = format(s, "uRPF:%d ", lb->lb_urpf);
Neale Rannsac64b712018-10-08 14:51:11 +0000161 if (lb->lb_flags)
162 {
163 load_balance_attr_t attr;
164
165 s = format(s, "flags:[");
166
167 FOR_EACH_LOAD_BALANCE_ATTR(attr)
168 {
169 if (lb->lb_flags & (1 << attr))
170 {
171 s = format (s, "%s", load_balance_attr_names[attr]);
172 }
173 }
174 s = format(s, "] ");
175 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100176 s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
177 if (0 != via.packets)
178 {
179 s = format(s, " via:[%Ld:%Ld]",
180 via.packets, via.bytes);
181 }
182 s = format(s, "]");
183
184 if (INDEX_INVALID != lb->lb_map)
185 {
186 s = format(s, "\n%U%U",
187 format_white_space, indent+4,
188 format_load_balance_map, lb->lb_map, indent+4);
189 }
190 for (i = 0; i < lb->lb_n_buckets; i++)
191 {
192 s = format(s, "\n%U[%d] %U",
193 format_white_space, indent+2,
194 i,
195 format_dpo_id,
196 &buckets[i], indent+6);
197 }
198 return (s);
199}
200
201u8*
202format_load_balance (u8 * s, va_list * args)
203{
Billy McFallcfcf1e22016-10-14 09:51:49 -0400204 index_t lbi = va_arg(*args, index_t);
205 load_balance_format_flags_t flags = va_arg(*args, load_balance_format_flags_t);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100206
207 return (load_balance_format(lbi, flags, 0, s));
208}
Neale Ranns710071b2018-09-24 12:36:26 +0000209
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100210static u8*
211format_load_balance_dpo (u8 * s, va_list * args)
212{
Billy McFallcfcf1e22016-10-14 09:51:49 -0400213 index_t lbi = va_arg(*args, index_t);
214 u32 indent = va_arg(*args, u32);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100215
216 return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
217}
218
Neale Ranns023d23a2019-06-26 02:16:50 -0700219flow_hash_config_t
220load_balance_get_default_flow_hash (dpo_proto_t lb_proto)
221{
222 switch (lb_proto)
223 {
224 case DPO_PROTO_IP4:
225 case DPO_PROTO_IP6:
226 return (IP_FLOW_HASH_DEFAULT);
227
228 case DPO_PROTO_MPLS:
229 return (MPLS_FLOW_HASH_DEFAULT);
230
231 case DPO_PROTO_ETHERNET:
232 case DPO_PROTO_BIER:
233 case DPO_PROTO_NSH:
234 break;
235 }
236
237 return (0);
238}
239
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100240static load_balance_t *
241load_balance_create_i (u32 num_buckets,
242 dpo_proto_t lb_proto,
243 flow_hash_config_t fhc)
244{
245 load_balance_t *lb;
246
Benoît Gannee211ac42023-02-21 16:09:47 +0100247 ASSERT (num_buckets <= LB_MAX_BUCKETS);
248
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100249 lb = load_balance_alloc_i();
250 lb->lb_hash_config = fhc;
251 lb->lb_n_buckets = num_buckets;
252 lb->lb_n_buckets_minus_1 = num_buckets-1;
253 lb->lb_proto = lb_proto;
254
255 if (!LB_HAS_INLINE_BUCKETS(lb))
256 {
257 vec_validate_aligned(lb->lb_buckets,
258 lb->lb_n_buckets - 1,
259 CLIB_CACHE_LINE_BYTES);
260 }
261
262 LB_DBG(lb, "create");
263
264 return (lb);
265}
266
267index_t
268load_balance_create (u32 n_buckets,
269 dpo_proto_t lb_proto,
270 flow_hash_config_t fhc)
271{
272 return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
273}
274
275static inline void
276load_balance_set_bucket_i (load_balance_t *lb,
277 u32 bucket,
278 dpo_id_t *buckets,
279 const dpo_id_t *next)
280{
281 dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
282}
283
284void
285load_balance_set_bucket (index_t lbi,
286 u32 bucket,
287 const dpo_id_t *next)
288{
289 load_balance_t *lb;
290 dpo_id_t *buckets;
291
292 lb = load_balance_get(lbi);
293 buckets = load_balance_get_buckets(lb);
294
295 ASSERT(bucket < lb->lb_n_buckets);
296
297 load_balance_set_bucket_i(lb, bucket, buckets, next);
298}
299
300int
301load_balance_is_drop (const dpo_id_t *dpo)
302{
303 load_balance_t *lb;
304
305 if (DPO_LOAD_BALANCE != dpo->dpoi_type)
306 return (0);
307
308 lb = load_balance_get(dpo->dpoi_index);
309
310 if (1 == lb->lb_n_buckets)
311 {
312 return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
313 }
314 return (0);
315}
316
Neale Ranns2303cb12018-02-21 04:57:17 -0800317u16
318load_balance_n_buckets (index_t lbi)
319{
320 load_balance_t *lb;
321
322 lb = load_balance_get(lbi);
323
324 return (lb->lb_n_buckets);
325}
326
Neale Ranns3ee44042016-10-03 13:05:48 +0100327void
Neale Ranns32e1c012016-11-22 17:07:28 +0000328load_balance_set_fib_entry_flags (index_t lbi,
329 fib_entry_flag_t flags)
330{
331 load_balance_t *lb;
332
333 lb = load_balance_get(lbi);
334 lb->lb_fib_entry_flags = flags;
335}
336
337
338void
Neale Ranns3ee44042016-10-03 13:05:48 +0100339load_balance_set_urpf (index_t lbi,
340 index_t urpf)
341{
342 load_balance_t *lb;
343 index_t old;
344
345 lb = load_balance_get(lbi);
346
347 /*
348 * packets in flight we see this change. but it's atomic, so :P
349 */
350 old = lb->lb_urpf;
351 lb->lb_urpf = urpf;
352
353 fib_urpf_list_unlock(old);
354 fib_urpf_list_lock(urpf);
355}
356
357index_t
358load_balance_get_urpf (index_t lbi)
359{
360 load_balance_t *lb;
361
362 lb = load_balance_get(lbi);
363
364 return (lb->lb_urpf);
365}
366
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100367const dpo_id_t *
368load_balance_get_bucket (index_t lbi,
369 u32 bucket)
370{
371 load_balance_t *lb;
372
373 lb = load_balance_get(lbi);
374
375 return (load_balance_get_bucket_i(lb, bucket));
376}
377
378static int
Neale Rannsc0790cf2017-01-05 01:01:47 -0800379next_hop_sort_by_weight (const load_balance_path_t * n1,
380 const load_balance_path_t * n2)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100381{
382 return ((int) n1->path_weight - (int) n2->path_weight);
383}
384
385/* Given next hop vector is over-written with normalized one with sorted weights and
386 with weights corresponding to the number of adjacencies for each next hop.
387 Returns number of adjacencies in block. */
388u32
Neale Rannsc0790cf2017-01-05 01:01:47 -0800389ip_multipath_normalize_next_hops (const load_balance_path_t * raw_next_hops,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100390 load_balance_path_t ** normalized_next_hops,
391 u32 *sum_weight_in,
392 f64 multipath_next_hop_error_tolerance)
393{
394 load_balance_path_t * nhs;
395 uword n_nhs, n_adj, n_adj_left, i, sum_weight;
396 f64 norm, error;
397
398 n_nhs = vec_len (raw_next_hops);
399 ASSERT (n_nhs > 0);
400 if (n_nhs == 0)
401 return 0;
402
403 /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
404 nhs = *normalized_next_hops;
405 vec_validate (nhs, 2*n_nhs - 1);
406
407 /* Fast path: 1 next hop in block. */
408 n_adj = n_nhs;
409 if (n_nhs == 1)
410 {
411 nhs[0] = raw_next_hops[0];
412 nhs[0].path_weight = 1;
Damjan Marion8bea5892022-04-04 22:40:45 +0200413 vec_set_len (nhs, 1);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100414 sum_weight = 1;
415 goto done;
416 }
417
418 else if (n_nhs == 2)
419 {
420 int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
421
422 /* Fast sort. */
423 nhs[0] = raw_next_hops[cmp];
424 nhs[1] = raw_next_hops[cmp ^ 1];
425
426 /* Fast path: equal cost multipath with 2 next hops. */
427 if (nhs[0].path_weight == nhs[1].path_weight)
428 {
429 nhs[0].path_weight = nhs[1].path_weight = 1;
Damjan Marion8bea5892022-04-04 22:40:45 +0200430 vec_set_len (nhs, 2);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100431 sum_weight = 2;
432 goto done;
433 }
434 }
435 else
436 {
Dave Barach178cf492018-11-13 16:34:13 -0500437 clib_memcpy_fast (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100438 qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
439 }
440
441 /* Find total weight to normalize weights. */
442 sum_weight = 0;
443 for (i = 0; i < n_nhs; i++)
444 sum_weight += nhs[i].path_weight;
445
446 /* In the unlikely case that all weights are given as 0, set them all to 1. */
447 if (sum_weight == 0)
448 {
449 for (i = 0; i < n_nhs; i++)
450 nhs[i].path_weight = 1;
451 sum_weight = n_nhs;
452 }
453
454 /* Save copies of all next hop weights to avoid being overwritten in loop below. */
455 for (i = 0; i < n_nhs; i++)
456 nhs[n_nhs + i].path_weight = nhs[i].path_weight;
457
458 /* Try larger and larger power of 2 sized adjacency blocks until we
459 find one where traffic flows to within 1% of specified weights. */
Benoît Gannee211ac42023-02-21 16:09:47 +0100460 for (n_adj = clib_min(max_pow2 (n_nhs), LB_MAX_BUCKETS); ; n_adj *= 2)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100461 {
Benoît Gannee211ac42023-02-21 16:09:47 +0100462 ASSERT (n_adj <= LB_MAX_BUCKETS);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100463 error = 0;
464
465 norm = n_adj / ((f64) sum_weight);
466 n_adj_left = n_adj;
467 for (i = 0; i < n_nhs; i++)
468 {
469 f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
470 word n = flt_round_nearest (nf);
471
472 n = n > n_adj_left ? n_adj_left : n;
473 n_adj_left -= n;
474 error += fabs (nf - n);
475 nhs[i].path_weight = n;
Neale Ranns0bd36ea2016-11-16 11:47:44 +0000476
477 if (0 == nhs[i].path_weight)
478 {
479 /*
480 * when the weight skew is high (norm is small) and n == nf.
481 * without this correction the path with a low weight would have
Lijian.Zhang33af8c12019-09-16 16:22:36 +0800482 * no representation in the load-balanace - don't want that.
Neale Ranns0bd36ea2016-11-16 11:47:44 +0000483 * If the weight skew is high so the load-balance has many buckets
484 * to allow it. pays ya money takes ya choice.
485 */
486 error = n_adj;
487 break;
488 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100489 }
490
491 nhs[0].path_weight += n_adj_left;
492
Benoît Gannee211ac42023-02-21 16:09:47 +0100493 /* Less than 1% average error per adjacency with this size adjacency block,
494 * or did we reached the maximum number of buckets we support? */
495 if (error <= multipath_next_hop_error_tolerance*n_adj ||
496 n_adj >= LB_MAX_BUCKETS)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100497 {
Benoît Gannee211ac42023-02-21 16:09:47 +0100498 if (i < n_nhs)
499 {
500 /* Truncate any next hops in excess */
501 vlib_log_err(load_balance_logger,
502 "Too many paths for load-balance, truncating %d -> %d",
503 n_nhs, i);
504 for (int j = i; j < n_nhs; j++)
505 dpo_reset (&vec_elt(nhs, j).path_dpo);
506 }
507 vec_set_len (nhs, i);
508 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100509 }
510 }
511
512done:
513 /* Save vector for next call. */
514 *normalized_next_hops = nhs;
515 *sum_weight_in = sum_weight;
516 return n_adj;
517}
518
519static load_balance_path_t *
Neale Rannsc0790cf2017-01-05 01:01:47 -0800520load_balance_multipath_next_hop_fixup (const load_balance_path_t *nhs,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100521 dpo_proto_t drop_proto)
522{
523 if (0 == vec_len(nhs))
524 {
Neale Rannsc0790cf2017-01-05 01:01:47 -0800525 load_balance_path_t *new_nhs = NULL, *nh;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100526
527 /*
528 * we need something for the load-balance. so use the drop
529 */
Neale Rannsc0790cf2017-01-05 01:01:47 -0800530 vec_add2(new_nhs, nh, 1);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100531
532 nh->path_weight = 1;
533 dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
Neale Rannsc0790cf2017-01-05 01:01:47 -0800534
535 return (new_nhs);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100536 }
537
Neale Rannsc0790cf2017-01-05 01:01:47 -0800538 return (NULL);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100539}
540
541/*
542 * Fill in adjacencies in block based on corresponding
543 * next hop adjacencies.
544 */
545static void
Neale Rannsac64b712018-10-08 14:51:11 +0000546load_balance_fill_buckets_norm (load_balance_t *lb,
547 load_balance_path_t *nhs,
548 dpo_id_t *buckets,
549 u32 n_buckets)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100550{
Neale Rannsac64b712018-10-08 14:51:11 +0000551 load_balance_path_t *nh;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100552 u16 ii, bucket;
553
554 bucket = 0;
555
556 /*
557 * the next-hops have normalised weights. that means their sum is the number
558 * of buckets we need to fill.
559 */
560 vec_foreach (nh, nhs)
561 {
562 for (ii = 0; ii < nh->path_weight; ii++)
563 {
564 ASSERT(bucket < n_buckets);
565 load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
566 }
567 }
568}
Neale Rannsac64b712018-10-08 14:51:11 +0000569static void
570load_balance_fill_buckets_sticky (load_balance_t *lb,
571 load_balance_path_t *nhs,
572 dpo_id_t *buckets,
573 u32 n_buckets)
574{
575 load_balance_path_t *nh, *fwding_paths;
576 u16 ii, bucket, fpath;
577
578 fpath = bucket = 0;
579 fwding_paths = NULL;
580
581 vec_foreach (nh, nhs)
582 {
583 if (!dpo_is_drop(&nh->path_dpo))
584 {
585 vec_add1(fwding_paths, *nh);
586 }
587 }
588 if (vec_len(fwding_paths) == 0)
589 fwding_paths = vec_dup(nhs);
590
591 /*
592 * the next-hops have normalised weights. that means their sum is the number
593 * of buckets we need to fill.
594 */
595 vec_foreach (nh, nhs)
596 {
597 for (ii = 0; ii < nh->path_weight; ii++)
598 {
599 ASSERT(bucket < n_buckets);
600 if (!dpo_is_drop(&nh->path_dpo))
601 {
602 load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
603 }
604 else
605 {
606 /* fill the bucks from the next up path */
607 load_balance_set_bucket_i(lb, bucket++, buckets, &fwding_paths[fpath].path_dpo);
Andrew Yourtchenko165c9632022-08-23 17:23:47 +0000608 ASSERT(vec_len(fwding_paths) > 0);
Neale Rannsac64b712018-10-08 14:51:11 +0000609 fpath = (fpath + 1) % vec_len(fwding_paths);
610 }
611 }
612 }
613
614 vec_free(fwding_paths);
615}
616
617static void
618load_balance_fill_buckets (load_balance_t *lb,
619 load_balance_path_t *nhs,
620 dpo_id_t *buckets,
621 u32 n_buckets,
622 load_balance_flags_t flags)
623{
624 if (flags & LOAD_BALANCE_FLAG_STICKY)
625 {
626 load_balance_fill_buckets_sticky(lb, nhs, buckets, n_buckets);
627 }
628 else
629 {
630 load_balance_fill_buckets_norm(lb, nhs, buckets, n_buckets);
631 }
632}
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100633
634static inline void
635load_balance_set_n_buckets (load_balance_t *lb,
636 u32 n_buckets)
637{
Benoît Gannee211ac42023-02-21 16:09:47 +0100638 ASSERT (n_buckets <= LB_MAX_BUCKETS);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100639 lb->lb_n_buckets = n_buckets;
640 lb->lb_n_buckets_minus_1 = n_buckets-1;
641}
642
643void
644load_balance_multipath_update (const dpo_id_t *dpo,
Neale Rannsc0790cf2017-01-05 01:01:47 -0800645 const load_balance_path_t * raw_nhs,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100646 load_balance_flags_t flags)
647{
Neale Rannsc0790cf2017-01-05 01:01:47 -0800648 load_balance_path_t *nh, *nhs, *fixed_nhs;
649 u32 sum_of_weights, n_buckets, ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100650 index_t lbmi, old_lbmi;
651 load_balance_t *lb;
652 dpo_id_t *tmp_dpo;
653
654 nhs = NULL;
655
656 ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
657 lb = load_balance_get(dpo->dpoi_index);
Neale Rannsac64b712018-10-08 14:51:11 +0000658 lb->lb_flags = flags;
Neale Rannsc0790cf2017-01-05 01:01:47 -0800659 fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100660 n_buckets =
Neale Rannsc0790cf2017-01-05 01:01:47 -0800661 ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
662 raw_nhs :
663 fixed_nhs),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100664 &nhs,
665 &sum_of_weights,
666 multipath_next_hop_error_tolerance);
667
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100668 /*
669 * Save the old load-balance map used, and get a new one if required.
670 */
671 old_lbmi = lb->lb_map;
672 if (flags & LOAD_BALANCE_FLAG_USES_MAP)
673 {
674 lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
675 }
676 else
677 {
678 lbmi = INDEX_INVALID;
679 }
680
681 if (0 == lb->lb_n_buckets)
682 {
683 /*
684 * first time initialisation. no packets inflight, so we can write
685 * at leisure.
686 */
687 load_balance_set_n_buckets(lb, n_buckets);
688
689 if (!LB_HAS_INLINE_BUCKETS(lb))
690 vec_validate_aligned(lb->lb_buckets,
691 lb->lb_n_buckets - 1,
692 CLIB_CACHE_LINE_BYTES);
693
694 load_balance_fill_buckets(lb, nhs,
695 load_balance_get_buckets(lb),
Neale Rannsac64b712018-10-08 14:51:11 +0000696 n_buckets, flags);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100697 lb->lb_map = lbmi;
698 }
699 else
700 {
701 /*
702 * This is a modification of an existing load-balance.
703 * We need to ensure that packets inflight see a consistent state, that
704 * is the number of reported buckets the LB has (read from
705 * lb_n_buckets_minus_1) is not more than it actually has. So if the
706 * number of buckets is increasing, we must update the bucket array first,
707 * then the reported number. vice-versa if the number of buckets goes down.
708 */
709 if (n_buckets == lb->lb_n_buckets)
710 {
711 /*
712 * no change in the number of buckets. we can simply fill what
713 * is new over what is old.
714 */
715 load_balance_fill_buckets(lb, nhs,
716 load_balance_get_buckets(lb),
Neale Rannsac64b712018-10-08 14:51:11 +0000717 n_buckets, flags);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100718 lb->lb_map = lbmi;
719 }
720 else if (n_buckets > lb->lb_n_buckets)
721 {
722 /*
723 * we have more buckets. the old load-balance map (if there is one)
724 * will remain valid, i.e. mapping to indices within range, so we
725 * update it last.
726 */
727 if (n_buckets > LB_NUM_INLINE_BUCKETS &&
728 lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
729 {
730 /*
731 * the new increased number of buckets is crossing the threshold
732 * from the inline storage to out-line. Alloc the outline buckets
733 * first, then fixup the number. then reset the inlines.
734 */
735 ASSERT(NULL == lb->lb_buckets);
736 vec_validate_aligned(lb->lb_buckets,
737 n_buckets - 1,
738 CLIB_CACHE_LINE_BYTES);
739
740 load_balance_fill_buckets(lb, nhs,
741 lb->lb_buckets,
Neale Rannsac64b712018-10-08 14:51:11 +0000742 n_buckets, flags);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100743 CLIB_MEMORY_BARRIER();
744 load_balance_set_n_buckets(lb, n_buckets);
745
746 CLIB_MEMORY_BARRIER();
747
748 for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
749 {
750 dpo_reset(&lb->lb_buckets_inline[ii]);
751 }
752 }
753 else
754 {
Neale Ranns0bd36ea2016-11-16 11:47:44 +0000755 if (n_buckets <= LB_NUM_INLINE_BUCKETS)
756 {
757 /*
758 * we are not crossing the threshold and it's still inline buckets.
759 * we can write the new on the old..
760 */
761 load_balance_fill_buckets(lb, nhs,
762 load_balance_get_buckets(lb),
Neale Rannsac64b712018-10-08 14:51:11 +0000763 n_buckets, flags);
Neale Ranns0bd36ea2016-11-16 11:47:44 +0000764 CLIB_MEMORY_BARRIER();
765 load_balance_set_n_buckets(lb, n_buckets);
766 }
767 else
768 {
769 /*
770 * we are not crossing the threshold. We need a new bucket array to
771 * hold the increased number of choices.
772 */
773 dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
774
775 new_buckets = NULL;
776 old_buckets = load_balance_get_buckets(lb);
777
778 vec_validate_aligned(new_buckets,
779 n_buckets - 1,
780 CLIB_CACHE_LINE_BYTES);
781
Neale Rannsac64b712018-10-08 14:51:11 +0000782 load_balance_fill_buckets(lb, nhs, new_buckets,
783 n_buckets, flags);
Neale Ranns0bd36ea2016-11-16 11:47:44 +0000784 CLIB_MEMORY_BARRIER();
785 lb->lb_buckets = new_buckets;
786 CLIB_MEMORY_BARRIER();
787 load_balance_set_n_buckets(lb, n_buckets);
788
789 vec_foreach(tmp_dpo, old_buckets)
790 {
791 dpo_reset(tmp_dpo);
792 }
793 vec_free(old_buckets);
794 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100795 }
796
797 /*
798 * buckets fixed. ready for the MAP update.
799 */
800 lb->lb_map = lbmi;
801 }
802 else
803 {
804 /*
805 * bucket size shrinkage.
806 * Any map we have will be based on the old
807 * larger number of buckets, so will be translating to indices
808 * out of range. So the new MAP must be installed first.
809 */
810 lb->lb_map = lbmi;
811 CLIB_MEMORY_BARRIER();
812
813
814 if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
815 lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
816 {
817 /*
818 * the new decreased number of buckets is crossing the threshold
819 * from out-line storage to inline:
820 * 1 - Fill the inline buckets,
821 * 2 - fixup the number (and this point the inline buckets are
822 * used).
823 * 3 - free the outline buckets
824 */
825 load_balance_fill_buckets(lb, nhs,
826 lb->lb_buckets_inline,
Neale Rannsac64b712018-10-08 14:51:11 +0000827 n_buckets, flags);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100828 CLIB_MEMORY_BARRIER();
829 load_balance_set_n_buckets(lb, n_buckets);
830 CLIB_MEMORY_BARRIER();
831
832 vec_foreach(tmp_dpo, lb->lb_buckets)
833 {
834 dpo_reset(tmp_dpo);
835 }
836 vec_free(lb->lb_buckets);
837 }
838 else
839 {
840 /*
841 * not crossing the threshold.
842 * 1 - update the number to the smaller size
843 * 2 - write the new buckets
844 * 3 - reset those no longer used.
845 */
846 dpo_id_t *buckets;
847 u32 old_n_buckets;
848
849 old_n_buckets = lb->lb_n_buckets;
850 buckets = load_balance_get_buckets(lb);
851
852 load_balance_set_n_buckets(lb, n_buckets);
853 CLIB_MEMORY_BARRIER();
854
Neale Rannsac64b712018-10-08 14:51:11 +0000855 load_balance_fill_buckets(lb, nhs, buckets,
856 n_buckets, flags);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100857
Neale Ranns32e1c012016-11-22 17:07:28 +0000858 for (ii = n_buckets; ii < old_n_buckets; ii++)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100859 {
860 dpo_reset(&buckets[ii]);
861 }
862 }
863 }
864 }
865
866 vec_foreach (nh, nhs)
867 {
868 dpo_reset(&nh->path_dpo);
869 }
Neale Ranns33a7dd52016-10-07 15:14:33 +0100870 vec_free(nhs);
Neale Rannsc0790cf2017-01-05 01:01:47 -0800871 vec_free(fixed_nhs);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100872
873 load_balance_map_unlock(old_lbmi);
874}
875
876static void
877load_balance_lock (dpo_id_t *dpo)
878{
879 load_balance_t *lb;
880
881 lb = load_balance_get(dpo->dpoi_index);
882
883 lb->lb_locks++;
884}
885
886static void
887load_balance_destroy (load_balance_t *lb)
888{
889 dpo_id_t *buckets;
890 int i;
891
892 buckets = load_balance_get_buckets(lb);
893
894 for (i = 0; i < lb->lb_n_buckets; i++)
895 {
896 dpo_reset(&buckets[i]);
897 }
898
899 LB_DBG(lb, "destroy");
900 if (!LB_HAS_INLINE_BUCKETS(lb))
901 {
902 vec_free(lb->lb_buckets);
903 }
904
Neale Ranns3ee44042016-10-03 13:05:48 +0100905 fib_urpf_list_unlock(lb->lb_urpf);
906 load_balance_map_unlock(lb->lb_map);
907
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100908 pool_put(load_balance_pool, lb);
909}
910
911static void
912load_balance_unlock (dpo_id_t *dpo)
913{
914 load_balance_t *lb;
915
916 lb = load_balance_get(dpo->dpoi_index);
917
918 lb->lb_locks--;
919
920 if (0 == lb->lb_locks)
921 {
922 load_balance_destroy(lb);
923 }
924}
925
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100926static void
927load_balance_mem_show (void)
928{
929 fib_show_memory_usage("load-balance",
930 pool_elts(load_balance_pool),
931 pool_len(load_balance_pool),
932 sizeof(load_balance_t));
Neale Ranns3ee44042016-10-03 13:05:48 +0100933 load_balance_map_show_mem();
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100934}
935
Neale Ranns8f5fef22020-12-21 08:29:34 +0000936static u16
937load_balance_dpo_get_mtu (const dpo_id_t *dpo)
938{
939 const dpo_id_t *buckets;
940 load_balance_t *lb;
941 u16 i, mtu = 0xffff;
942
943 lb = load_balance_get(dpo->dpoi_index);
944 buckets = load_balance_get_buckets(lb);
945
946 for (i = 0; i < lb->lb_n_buckets; i++)
947 {
948 mtu = clib_min (mtu, dpo_get_mtu (&buckets[i]));
949 }
950
951 return (mtu);
952}
953
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100954const static dpo_vft_t lb_vft = {
955 .dv_lock = load_balance_lock,
956 .dv_unlock = load_balance_unlock,
957 .dv_format = format_load_balance_dpo,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100958 .dv_mem_show = load_balance_mem_show,
Neale Ranns8f5fef22020-12-21 08:29:34 +0000959 .dv_get_mtu = load_balance_dpo_get_mtu,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100960};
961
962/**
963 * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
964 * object.
965 *
966 * this means that these graph nodes are ones from which a load-balance is the
967 * parent object in the DPO-graph.
968 *
969 * We do not list all the load-balance nodes, such as the *-lookup. instead
970 * we are relying on the correct use of the .sibling_of field when setting
971 * up these sibling nodes.
972 */
973const static char* const load_balance_ip4_nodes[] =
974{
975 "ip4-load-balance",
976 NULL,
977};
978const static char* const load_balance_ip6_nodes[] =
979{
980 "ip6-load-balance",
981 NULL,
982};
983const static char* const load_balance_mpls_nodes[] =
984{
985 "mpls-load-balance",
986 NULL,
987};
Neale Ranns5e575b12016-10-03 09:40:25 +0100988const static char* const load_balance_l2_nodes[] =
989{
990 "l2-load-balance",
991 NULL,
992};
Florin Corasb69111e2017-02-13 23:55:27 -0800993const static char* const load_balance_nsh_nodes[] =
994{
995 "nsh-load-balance",
Gabriel Ganne88fd5042017-11-14 16:07:34 +0100996 NULL
Neale Rannsd792d9c2017-10-21 10:53:20 -0700997};
998const static char* const load_balance_bier_nodes[] =
999{
1000 "bier-load-balance",
Florin Corasb69111e2017-02-13 23:55:27 -08001001 NULL,
1002};
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001003const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
1004{
1005 [DPO_PROTO_IP4] = load_balance_ip4_nodes,
1006 [DPO_PROTO_IP6] = load_balance_ip6_nodes,
1007 [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
Neale Ranns5e575b12016-10-03 09:40:25 +01001008 [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
Florin Corasb69111e2017-02-13 23:55:27 -08001009 [DPO_PROTO_NSH] = load_balance_nsh_nodes,
Neale Rannsd792d9c2017-10-21 10:53:20 -07001010 [DPO_PROTO_BIER] = load_balance_bier_nodes,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001011};
1012
1013void
1014load_balance_module_init (void)
1015{
Neale Rannsa3af3372017-03-28 03:49:52 -07001016 index_t lbi;
1017
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001018 dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
1019
Neale Ranns04a75e32017-03-23 06:46:01 -07001020 /*
1021 * Special LB with index zero. we need to define this since the v4 mtrie
1022 * assumes an index of 0 implies the ply is empty. therefore all 'real'
1023 * adjs need a non-zero index.
Neale Rannsa3af3372017-03-28 03:49:52 -07001024 * This should never be used, but just in case, stack it on a drop.
Neale Ranns04a75e32017-03-23 06:46:01 -07001025 */
Neale Rannsa3af3372017-03-28 03:49:52 -07001026 lbi = load_balance_create(1, DPO_PROTO_IP4, 0);
1027 load_balance_set_bucket(lbi, 0, drop_dpo_get(DPO_PROTO_IP4));
Neale Ranns04a75e32017-03-23 06:46:01 -07001028
Neale Ranns710071b2018-09-24 12:36:26 +00001029 load_balance_logger =
1030 vlib_log_register_class("dpo", "load-balance");
1031
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001032 load_balance_map_module_init();
1033}
1034
1035static clib_error_t *
1036load_balance_show (vlib_main_t * vm,
1037 unformat_input_t * input,
1038 vlib_cli_command_t * cmd)
1039{
1040 index_t lbi = INDEX_INVALID;
1041
1042 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1043 {
1044 if (unformat (input, "%d", &lbi))
1045 ;
1046 else
1047 break;
1048 }
1049
1050 if (INDEX_INVALID != lbi)
1051 {
Neale Rannsac64b712018-10-08 14:51:11 +00001052 if (pool_is_free_index(load_balance_pool, lbi))
1053 {
1054 vlib_cli_output (vm, "no such load-balance:%d", lbi);
1055 }
1056 else
1057 {
1058 vlib_cli_output (vm, "%U", format_load_balance, lbi,
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001059 LOAD_BALANCE_FORMAT_DETAIL);
Neale Rannsac64b712018-10-08 14:51:11 +00001060 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001061 }
1062 else
1063 {
1064 load_balance_t *lb;
1065
Damjan Marionb2c31b62020-12-13 21:47:40 +01001066 pool_foreach (lb, load_balance_pool)
1067 {
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001068 vlib_cli_output (vm, "%U", format_load_balance,
1069 load_balance_get_index(lb),
1070 LOAD_BALANCE_FORMAT_NONE);
Damjan Marionb2c31b62020-12-13 21:47:40 +01001071 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001072 }
1073
1074 return 0;
1075}
1076
1077VLIB_CLI_COMMAND (load_balance_show_command, static) = {
1078 .path = "show load-balance",
1079 .short_help = "show load-balance [<index>]",
1080 .function = load_balance_show,
1081};
Neale Ranns5e575b12016-10-03 09:40:25 +01001082
1083
1084always_inline u32
1085ip_flow_hash (void *data)
1086{
1087 ip4_header_t *iph = (ip4_header_t *) data;
1088
1089 if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
1090 return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
1091 else
1092 return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
1093}
1094
1095always_inline u64
1096mac_to_u64 (u8 * m)
1097{
1098 return (*((u64 *) m) & 0xffffffffffff);
1099}
1100
1101always_inline u32
1102l2_flow_hash (vlib_buffer_t * b0)
1103{
1104 ethernet_header_t *eh;
1105 u64 a, b, c;
1106 uword is_ip, eh_size;
1107 u16 eh_type;
1108
1109 eh = vlib_buffer_get_current (b0);
1110 eh_type = clib_net_to_host_u16 (eh->type);
1111 eh_size = ethernet_buffer_header_size (b0);
1112
1113 is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
1114
1115 /* since we have 2 cache lines, use them */
1116 if (is_ip)
1117 a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
1118 else
1119 a = eh->type;
1120
1121 b = mac_to_u64 ((u8 *) eh->dst_address);
1122 c = mac_to_u64 ((u8 *) eh->src_address);
1123 hash_mix64 (a, b, c);
1124
1125 return (u32) c;
1126}
1127
1128typedef struct load_balance_trace_t_
1129{
1130 index_t lb_index;
1131} load_balance_trace_t;
1132
Neale Rannsd792d9c2017-10-21 10:53:20 -07001133always_inline uword
1134load_balance_inline (vlib_main_t * vm,
1135 vlib_node_runtime_t * node,
1136 vlib_frame_t * frame,
1137 int is_l2)
Neale Ranns5e575b12016-10-03 09:40:25 +01001138{
1139 u32 n_left_from, next_index, *from, *to_next;
1140
1141 from = vlib_frame_vector_args (frame);
1142 n_left_from = frame->n_vectors;
1143
1144 next_index = node->cached_next_index;
1145
1146 while (n_left_from > 0)
1147 {
1148 u32 n_left_to_next;
1149
1150 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1151
1152 while (n_left_from > 0 && n_left_to_next > 0)
1153 {
1154 vlib_buffer_t *b0;
1155 u32 bi0, lbi0, next0;
1156 const dpo_id_t *dpo0;
1157 const load_balance_t *lb0;
1158
1159 bi0 = from[0];
1160 to_next[0] = bi0;
1161 from += 1;
1162 to_next += 1;
1163 n_left_from -= 1;
1164 n_left_to_next -= 1;
1165
1166 b0 = vlib_get_buffer (vm, bi0);
1167
1168 /* lookup dst + src mac */
1169 lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1170 lb0 = load_balance_get(lbi0);
1171
Neale Rannsd792d9c2017-10-21 10:53:20 -07001172 if (is_l2)
1173 {
1174 vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
1175 }
1176 else
1177 {
1178 /* it's BIER */
1179 const bier_hdr_t *bh0 = vlib_buffer_get_current(b0);
Neale Rannsef90ed02018-09-13 08:45:12 -07001180 vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
Neale Rannsd792d9c2017-10-21 10:53:20 -07001181 }
Neale Ranns5e575b12016-10-03 09:40:25 +01001182
Dave Barach8341f762020-06-03 08:05:15 -04001183 dpo0 = load_balance_get_bucket_i(lb0,
Neale Ranns5e575b12016-10-03 09:40:25 +01001184 vnet_buffer(b0)->ip.flow_hash &
1185 (lb0->lb_n_buckets_minus_1));
1186
1187 next0 = dpo0->dpoi_next_node;
1188 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1189
1190 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1191 {
1192 load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1193 sizeof (*tr));
1194 tr->lb_index = lbi0;
1195 }
1196 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1197 n_left_to_next, bi0, next0);
1198 }
1199
1200 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1201 }
1202
1203 return frame->n_vectors;
1204}
1205
Neale Rannsd792d9c2017-10-21 10:53:20 -07001206static uword
1207l2_load_balance (vlib_main_t * vm,
1208 vlib_node_runtime_t * node,
1209 vlib_frame_t * frame)
1210{
1211 return (load_balance_inline(vm, node, frame, 1));
1212}
1213
Neale Ranns5e575b12016-10-03 09:40:25 +01001214static u8 *
Florin Corasb69111e2017-02-13 23:55:27 -08001215format_l2_load_balance_trace (u8 * s, va_list * args)
Neale Ranns5e575b12016-10-03 09:40:25 +01001216{
1217 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1218 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1219 load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1220
1221 s = format (s, "L2-load-balance: index %d", t->lb_index);
1222 return s;
1223}
1224
1225/**
1226 * @brief
1227 */
1228VLIB_REGISTER_NODE (l2_load_balance_node) = {
1229 .function = l2_load_balance,
1230 .name = "l2-load-balance",
1231 .vector_size = sizeof (u32),
1232
Florin Corasb69111e2017-02-13 23:55:27 -08001233 .format_trace = format_l2_load_balance_trace,
1234 .n_next_nodes = 1,
1235 .next_nodes = {
1236 [0] = "error-drop",
1237 },
1238};
1239
1240static uword
1241nsh_load_balance (vlib_main_t * vm,
1242 vlib_node_runtime_t * node,
1243 vlib_frame_t * frame)
1244{
1245 u32 n_left_from, next_index, *from, *to_next;
1246
1247 from = vlib_frame_vector_args (frame);
1248 n_left_from = frame->n_vectors;
1249
1250 next_index = node->cached_next_index;
1251
1252 while (n_left_from > 0)
1253 {
1254 u32 n_left_to_next;
1255
1256 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1257
1258 while (n_left_from > 0 && n_left_to_next > 0)
1259 {
1260 vlib_buffer_t *b0;
1261 u32 bi0, lbi0, next0, *nsh0;
1262 const dpo_id_t *dpo0;
1263 const load_balance_t *lb0;
1264
1265 bi0 = from[0];
1266 to_next[0] = bi0;
1267 from += 1;
1268 to_next += 1;
1269 n_left_from -= 1;
1270 n_left_to_next -= 1;
1271
1272 b0 = vlib_get_buffer (vm, bi0);
1273
1274 lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1275 lb0 = load_balance_get(lbi0);
1276
1277 /* SPI + SI are the second word of the NSH header */
1278 nsh0 = vlib_buffer_get_current (b0);
1279 vnet_buffer(b0)->ip.flow_hash = nsh0[1] % lb0->lb_n_buckets;
1280
1281 dpo0 = load_balance_get_bucket_i(lb0,
1282 vnet_buffer(b0)->ip.flow_hash &
1283 (lb0->lb_n_buckets_minus_1));
1284
1285 next0 = dpo0->dpoi_next_node;
1286 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1287
1288 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1289 {
1290 load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1291 sizeof (*tr));
1292 tr->lb_index = lbi0;
1293 }
1294 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1295 n_left_to_next, bi0, next0);
1296 }
1297
1298 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1299 }
1300
1301 return frame->n_vectors;
1302}
1303
1304static u8 *
1305format_nsh_load_balance_trace (u8 * s, va_list * args)
1306{
1307 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1308 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1309 load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1310
1311 s = format (s, "NSH-load-balance: index %d", t->lb_index);
1312 return s;
1313}
1314
1315/**
1316 * @brief
1317 */
1318VLIB_REGISTER_NODE (nsh_load_balance_node) = {
1319 .function = nsh_load_balance,
1320 .name = "nsh-load-balance",
1321 .vector_size = sizeof (u32),
1322
1323 .format_trace = format_nsh_load_balance_trace,
Neale Ranns5e575b12016-10-03 09:40:25 +01001324 .n_next_nodes = 1,
1325 .next_nodes = {
1326 [0] = "error-drop",
1327 },
1328};
Neale Rannsd792d9c2017-10-21 10:53:20 -07001329
1330static u8 *
1331format_bier_load_balance_trace (u8 * s, va_list * args)
1332{
1333 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1334 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1335 load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1336
1337 s = format (s, "BIER-load-balance: index %d", t->lb_index);
1338 return s;
1339}
1340
1341static uword
1342bier_load_balance (vlib_main_t * vm,
1343 vlib_node_runtime_t * node,
1344 vlib_frame_t * frame)
1345{
1346 return (load_balance_inline(vm, node, frame, 0));
1347}
1348
1349/**
1350 * @brief
1351 */
1352VLIB_REGISTER_NODE (bier_load_balance_node) = {
1353 .function = bier_load_balance,
1354 .name = "bier-load-balance",
1355 .vector_size = sizeof (u32),
1356
1357 .format_trace = format_bier_load_balance_trace,
1358 .sibling_of = "mpls-load-balance",
1359};
Neale Ranns8f5fef22020-12-21 08:29:34 +00001360
1361// clang-format on