blob: 807455597f8255e3e8de36d89d7e748a7180ad12 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ip/lookup.h>
17#include <vnet/dpo/load_balance.h>
18#include <vnet/dpo/load_balance_map.h>
19#include <vnet/dpo/drop_dpo.h>
20#include <vppinfra/math.h> /* for fabs */
21#include <vnet/adj/adj.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010022#include <vnet/adj/adj_internal.h>
Neale Ranns3ee44042016-10-03 13:05:48 +010023#include <vnet/fib/fib_urpf_list.h>
Neale Rannsd792d9c2017-10-21 10:53:20 -070024#include <vnet/bier/bier_hdr_inlines.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010025
26/*
27 * distribution error tolerance for load-balancing
28 */
29const f64 multipath_next_hop_error_tolerance = 0.1;
30
31#undef LB_DEBUG
32
33#ifdef LB_DEBUG
34#define LB_DBG(_lb, _fmt, _args...) \
35{ \
36 u8* _tmp =NULL; \
37 clib_warning("lb:[%s]:" _fmt, \
38 load_balance_format(load_balance_get_index((_lb)), \
39 0, _tmp), \
40 ##_args); \
41 vec_free(_tmp); \
42}
43#else
44#define LB_DBG(_p, _fmt, _args...)
45#endif
46
47
48/**
49 * Pool of all DPOs. It's not static so the DP can have fast access
50 */
51load_balance_t *load_balance_pool;
52
53/**
54 * The one instance of load-balance main
55 */
56load_balance_main_t load_balance_main;
57
58f64
59load_balance_get_multipath_tolerance (void)
60{
61 return (multipath_next_hop_error_tolerance);
62}
63
64static inline index_t
65load_balance_get_index (const load_balance_t *lb)
66{
67 return (lb - load_balance_pool);
68}
69
70static inline dpo_id_t*
71load_balance_get_buckets (load_balance_t *lb)
72{
73 if (LB_HAS_INLINE_BUCKETS(lb))
74 {
75 return (lb->lb_buckets_inline);
76 }
77 else
78 {
79 return (lb->lb_buckets);
80 }
81}
82
83static load_balance_t *
84load_balance_alloc_i (void)
85{
86 load_balance_t *lb;
87
88 pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
89 memset(lb, 0, sizeof(*lb));
90
91 lb->lb_map = INDEX_INVALID;
Neale Ranns3ee44042016-10-03 13:05:48 +010092 lb->lb_urpf = INDEX_INVALID;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010093 vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
94 load_balance_get_index(lb));
95 vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
96 load_balance_get_index(lb));
97 vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
98 load_balance_get_index(lb));
99 vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
100 load_balance_get_index(lb));
101
102 return (lb);
103}
104
105static u8*
106load_balance_format (index_t lbi,
107 load_balance_format_flags_t flags,
108 u32 indent,
109 u8 *s)
110{
111 vlib_counter_t to, via;
112 load_balance_t *lb;
113 dpo_id_t *buckets;
114 u32 i;
115
116 lb = load_balance_get(lbi);
117 vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
118 vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
119 buckets = load_balance_get_buckets(lb);
120
121 s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
Neale Rannsf12a83f2017-04-18 09:09:40 -0700122 s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto);
123 s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets);
Neale Ranns3ee44042016-10-03 13:05:48 +0100124 s = format(s, "uRPF:%d ", lb->lb_urpf);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100125 s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
126 if (0 != via.packets)
127 {
128 s = format(s, " via:[%Ld:%Ld]",
129 via.packets, via.bytes);
130 }
131 s = format(s, "]");
132
133 if (INDEX_INVALID != lb->lb_map)
134 {
135 s = format(s, "\n%U%U",
136 format_white_space, indent+4,
137 format_load_balance_map, lb->lb_map, indent+4);
138 }
139 for (i = 0; i < lb->lb_n_buckets; i++)
140 {
141 s = format(s, "\n%U[%d] %U",
142 format_white_space, indent+2,
143 i,
144 format_dpo_id,
145 &buckets[i], indent+6);
146 }
147 return (s);
148}
149
150u8*
151format_load_balance (u8 * s, va_list * args)
152{
Billy McFallcfcf1e22016-10-14 09:51:49 -0400153 index_t lbi = va_arg(*args, index_t);
154 load_balance_format_flags_t flags = va_arg(*args, load_balance_format_flags_t);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100155
156 return (load_balance_format(lbi, flags, 0, s));
157}
158static u8*
159format_load_balance_dpo (u8 * s, va_list * args)
160{
Billy McFallcfcf1e22016-10-14 09:51:49 -0400161 index_t lbi = va_arg(*args, index_t);
162 u32 indent = va_arg(*args, u32);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100163
164 return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
165}
166
167
168static load_balance_t *
169load_balance_create_i (u32 num_buckets,
170 dpo_proto_t lb_proto,
171 flow_hash_config_t fhc)
172{
173 load_balance_t *lb;
174
175 lb = load_balance_alloc_i();
176 lb->lb_hash_config = fhc;
177 lb->lb_n_buckets = num_buckets;
178 lb->lb_n_buckets_minus_1 = num_buckets-1;
179 lb->lb_proto = lb_proto;
180
181 if (!LB_HAS_INLINE_BUCKETS(lb))
182 {
183 vec_validate_aligned(lb->lb_buckets,
184 lb->lb_n_buckets - 1,
185 CLIB_CACHE_LINE_BYTES);
186 }
187
188 LB_DBG(lb, "create");
189
190 return (lb);
191}
192
193index_t
194load_balance_create (u32 n_buckets,
195 dpo_proto_t lb_proto,
196 flow_hash_config_t fhc)
197{
198 return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
199}
200
201static inline void
202load_balance_set_bucket_i (load_balance_t *lb,
203 u32 bucket,
204 dpo_id_t *buckets,
205 const dpo_id_t *next)
206{
207 dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
208}
209
210void
211load_balance_set_bucket (index_t lbi,
212 u32 bucket,
213 const dpo_id_t *next)
214{
215 load_balance_t *lb;
216 dpo_id_t *buckets;
217
218 lb = load_balance_get(lbi);
219 buckets = load_balance_get_buckets(lb);
220
221 ASSERT(bucket < lb->lb_n_buckets);
222
223 load_balance_set_bucket_i(lb, bucket, buckets, next);
224}
225
226int
227load_balance_is_drop (const dpo_id_t *dpo)
228{
229 load_balance_t *lb;
230
231 if (DPO_LOAD_BALANCE != dpo->dpoi_type)
232 return (0);
233
234 lb = load_balance_get(dpo->dpoi_index);
235
236 if (1 == lb->lb_n_buckets)
237 {
238 return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
239 }
240 return (0);
241}
242
Neale Ranns3ee44042016-10-03 13:05:48 +0100243void
Neale Ranns32e1c012016-11-22 17:07:28 +0000244load_balance_set_fib_entry_flags (index_t lbi,
245 fib_entry_flag_t flags)
246{
247 load_balance_t *lb;
248
249 lb = load_balance_get(lbi);
250 lb->lb_fib_entry_flags = flags;
251}
252
253
254void
Neale Ranns3ee44042016-10-03 13:05:48 +0100255load_balance_set_urpf (index_t lbi,
256 index_t urpf)
257{
258 load_balance_t *lb;
259 index_t old;
260
261 lb = load_balance_get(lbi);
262
263 /*
264 * packets in flight we see this change. but it's atomic, so :P
265 */
266 old = lb->lb_urpf;
267 lb->lb_urpf = urpf;
268
269 fib_urpf_list_unlock(old);
270 fib_urpf_list_lock(urpf);
271}
272
273index_t
274load_balance_get_urpf (index_t lbi)
275{
276 load_balance_t *lb;
277
278 lb = load_balance_get(lbi);
279
280 return (lb->lb_urpf);
281}
282
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100283const dpo_id_t *
284load_balance_get_bucket (index_t lbi,
285 u32 bucket)
286{
287 load_balance_t *lb;
288
289 lb = load_balance_get(lbi);
290
291 return (load_balance_get_bucket_i(lb, bucket));
292}
293
294static int
Neale Rannsc0790cf2017-01-05 01:01:47 -0800295next_hop_sort_by_weight (const load_balance_path_t * n1,
296 const load_balance_path_t * n2)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100297{
298 return ((int) n1->path_weight - (int) n2->path_weight);
299}
300
301/* Given next hop vector is over-written with normalized one with sorted weights and
302 with weights corresponding to the number of adjacencies for each next hop.
303 Returns number of adjacencies in block. */
304u32
Neale Rannsc0790cf2017-01-05 01:01:47 -0800305ip_multipath_normalize_next_hops (const load_balance_path_t * raw_next_hops,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100306 load_balance_path_t ** normalized_next_hops,
307 u32 *sum_weight_in,
308 f64 multipath_next_hop_error_tolerance)
309{
310 load_balance_path_t * nhs;
311 uword n_nhs, n_adj, n_adj_left, i, sum_weight;
312 f64 norm, error;
313
314 n_nhs = vec_len (raw_next_hops);
315 ASSERT (n_nhs > 0);
316 if (n_nhs == 0)
317 return 0;
318
319 /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
320 nhs = *normalized_next_hops;
321 vec_validate (nhs, 2*n_nhs - 1);
322
323 /* Fast path: 1 next hop in block. */
324 n_adj = n_nhs;
325 if (n_nhs == 1)
326 {
327 nhs[0] = raw_next_hops[0];
328 nhs[0].path_weight = 1;
329 _vec_len (nhs) = 1;
330 sum_weight = 1;
331 goto done;
332 }
333
334 else if (n_nhs == 2)
335 {
336 int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
337
338 /* Fast sort. */
339 nhs[0] = raw_next_hops[cmp];
340 nhs[1] = raw_next_hops[cmp ^ 1];
341
342 /* Fast path: equal cost multipath with 2 next hops. */
343 if (nhs[0].path_weight == nhs[1].path_weight)
344 {
345 nhs[0].path_weight = nhs[1].path_weight = 1;
346 _vec_len (nhs) = 2;
347 sum_weight = 2;
348 goto done;
349 }
350 }
351 else
352 {
353 clib_memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
354 qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
355 }
356
357 /* Find total weight to normalize weights. */
358 sum_weight = 0;
359 for (i = 0; i < n_nhs; i++)
360 sum_weight += nhs[i].path_weight;
361
362 /* In the unlikely case that all weights are given as 0, set them all to 1. */
363 if (sum_weight == 0)
364 {
365 for (i = 0; i < n_nhs; i++)
366 nhs[i].path_weight = 1;
367 sum_weight = n_nhs;
368 }
369
370 /* Save copies of all next hop weights to avoid being overwritten in loop below. */
371 for (i = 0; i < n_nhs; i++)
372 nhs[n_nhs + i].path_weight = nhs[i].path_weight;
373
374 /* Try larger and larger power of 2 sized adjacency blocks until we
375 find one where traffic flows to within 1% of specified weights. */
376 for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
377 {
378 error = 0;
379
380 norm = n_adj / ((f64) sum_weight);
381 n_adj_left = n_adj;
382 for (i = 0; i < n_nhs; i++)
383 {
384 f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
385 word n = flt_round_nearest (nf);
386
387 n = n > n_adj_left ? n_adj_left : n;
388 n_adj_left -= n;
389 error += fabs (nf - n);
390 nhs[i].path_weight = n;
Neale Ranns0bd36ea2016-11-16 11:47:44 +0000391
392 if (0 == nhs[i].path_weight)
393 {
394 /*
395 * when the weight skew is high (norm is small) and n == nf.
396 * without this correction the path with a low weight would have
397 * no represenation in the load-balanace - don't want that.
398 * If the weight skew is high so the load-balance has many buckets
399 * to allow it. pays ya money takes ya choice.
400 */
401 error = n_adj;
402 break;
403 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100404 }
405
406 nhs[0].path_weight += n_adj_left;
407
408 /* Less than 5% average error per adjacency with this size adjacency block? */
409 if (error <= multipath_next_hop_error_tolerance*n_adj)
410 {
411 /* Truncate any next hops with zero weight. */
412 _vec_len (nhs) = i;
413 break;
414 }
415 }
416
417done:
418 /* Save vector for next call. */
419 *normalized_next_hops = nhs;
420 *sum_weight_in = sum_weight;
421 return n_adj;
422}
423
424static load_balance_path_t *
Neale Rannsc0790cf2017-01-05 01:01:47 -0800425load_balance_multipath_next_hop_fixup (const load_balance_path_t *nhs,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100426 dpo_proto_t drop_proto)
427{
428 if (0 == vec_len(nhs))
429 {
Neale Rannsc0790cf2017-01-05 01:01:47 -0800430 load_balance_path_t *new_nhs = NULL, *nh;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100431
432 /*
433 * we need something for the load-balance. so use the drop
434 */
Neale Rannsc0790cf2017-01-05 01:01:47 -0800435 vec_add2(new_nhs, nh, 1);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100436
437 nh->path_weight = 1;
438 dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
Neale Rannsc0790cf2017-01-05 01:01:47 -0800439
440 return (new_nhs);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100441 }
442
Neale Rannsc0790cf2017-01-05 01:01:47 -0800443 return (NULL);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100444}
445
446/*
447 * Fill in adjacencies in block based on corresponding
448 * next hop adjacencies.
449 */
450static void
451load_balance_fill_buckets (load_balance_t *lb,
452 load_balance_path_t *nhs,
453 dpo_id_t *buckets,
454 u32 n_buckets)
455{
456 load_balance_path_t * nh;
457 u16 ii, bucket;
458
459 bucket = 0;
460
461 /*
462 * the next-hops have normalised weights. that means their sum is the number
463 * of buckets we need to fill.
464 */
465 vec_foreach (nh, nhs)
466 {
467 for (ii = 0; ii < nh->path_weight; ii++)
468 {
469 ASSERT(bucket < n_buckets);
470 load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
471 }
472 }
473}
474
475static inline void
476load_balance_set_n_buckets (load_balance_t *lb,
477 u32 n_buckets)
478{
479 lb->lb_n_buckets = n_buckets;
480 lb->lb_n_buckets_minus_1 = n_buckets-1;
481}
482
483void
484load_balance_multipath_update (const dpo_id_t *dpo,
Neale Rannsc0790cf2017-01-05 01:01:47 -0800485 const load_balance_path_t * raw_nhs,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100486 load_balance_flags_t flags)
487{
Neale Rannsc0790cf2017-01-05 01:01:47 -0800488 load_balance_path_t *nh, *nhs, *fixed_nhs;
489 u32 sum_of_weights, n_buckets, ii;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100490 index_t lbmi, old_lbmi;
491 load_balance_t *lb;
492 dpo_id_t *tmp_dpo;
493
494 nhs = NULL;
495
496 ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
497 lb = load_balance_get(dpo->dpoi_index);
Neale Rannsc0790cf2017-01-05 01:01:47 -0800498 fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100499 n_buckets =
Neale Rannsc0790cf2017-01-05 01:01:47 -0800500 ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
501 raw_nhs :
502 fixed_nhs),
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100503 &nhs,
504 &sum_of_weights,
505 multipath_next_hop_error_tolerance);
506
Neale Rannsc0790cf2017-01-05 01:01:47 -0800507 ASSERT (n_buckets >= vec_len (raw_nhs));
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100508
509 /*
510 * Save the old load-balance map used, and get a new one if required.
511 */
512 old_lbmi = lb->lb_map;
513 if (flags & LOAD_BALANCE_FLAG_USES_MAP)
514 {
515 lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
516 }
517 else
518 {
519 lbmi = INDEX_INVALID;
520 }
521
522 if (0 == lb->lb_n_buckets)
523 {
524 /*
525 * first time initialisation. no packets inflight, so we can write
526 * at leisure.
527 */
528 load_balance_set_n_buckets(lb, n_buckets);
529
530 if (!LB_HAS_INLINE_BUCKETS(lb))
531 vec_validate_aligned(lb->lb_buckets,
532 lb->lb_n_buckets - 1,
533 CLIB_CACHE_LINE_BYTES);
534
535 load_balance_fill_buckets(lb, nhs,
536 load_balance_get_buckets(lb),
537 n_buckets);
538 lb->lb_map = lbmi;
539 }
540 else
541 {
542 /*
543 * This is a modification of an existing load-balance.
544 * We need to ensure that packets inflight see a consistent state, that
545 * is the number of reported buckets the LB has (read from
546 * lb_n_buckets_minus_1) is not more than it actually has. So if the
547 * number of buckets is increasing, we must update the bucket array first,
548 * then the reported number. vice-versa if the number of buckets goes down.
549 */
550 if (n_buckets == lb->lb_n_buckets)
551 {
552 /*
553 * no change in the number of buckets. we can simply fill what
554 * is new over what is old.
555 */
556 load_balance_fill_buckets(lb, nhs,
557 load_balance_get_buckets(lb),
558 n_buckets);
559 lb->lb_map = lbmi;
560 }
561 else if (n_buckets > lb->lb_n_buckets)
562 {
563 /*
564 * we have more buckets. the old load-balance map (if there is one)
565 * will remain valid, i.e. mapping to indices within range, so we
566 * update it last.
567 */
568 if (n_buckets > LB_NUM_INLINE_BUCKETS &&
569 lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
570 {
571 /*
572 * the new increased number of buckets is crossing the threshold
573 * from the inline storage to out-line. Alloc the outline buckets
574 * first, then fixup the number. then reset the inlines.
575 */
576 ASSERT(NULL == lb->lb_buckets);
577 vec_validate_aligned(lb->lb_buckets,
578 n_buckets - 1,
579 CLIB_CACHE_LINE_BYTES);
580
581 load_balance_fill_buckets(lb, nhs,
582 lb->lb_buckets,
583 n_buckets);
584 CLIB_MEMORY_BARRIER();
585 load_balance_set_n_buckets(lb, n_buckets);
586
587 CLIB_MEMORY_BARRIER();
588
589 for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
590 {
591 dpo_reset(&lb->lb_buckets_inline[ii]);
592 }
593 }
594 else
595 {
Neale Ranns0bd36ea2016-11-16 11:47:44 +0000596 if (n_buckets <= LB_NUM_INLINE_BUCKETS)
597 {
598 /*
599 * we are not crossing the threshold and it's still inline buckets.
600 * we can write the new on the old..
601 */
602 load_balance_fill_buckets(lb, nhs,
603 load_balance_get_buckets(lb),
604 n_buckets);
605 CLIB_MEMORY_BARRIER();
606 load_balance_set_n_buckets(lb, n_buckets);
607 }
608 else
609 {
610 /*
611 * we are not crossing the threshold. We need a new bucket array to
612 * hold the increased number of choices.
613 */
614 dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
615
616 new_buckets = NULL;
617 old_buckets = load_balance_get_buckets(lb);
618
619 vec_validate_aligned(new_buckets,
620 n_buckets - 1,
621 CLIB_CACHE_LINE_BYTES);
622
623 load_balance_fill_buckets(lb, nhs, new_buckets, n_buckets);
624 CLIB_MEMORY_BARRIER();
625 lb->lb_buckets = new_buckets;
626 CLIB_MEMORY_BARRIER();
627 load_balance_set_n_buckets(lb, n_buckets);
628
629 vec_foreach(tmp_dpo, old_buckets)
630 {
631 dpo_reset(tmp_dpo);
632 }
633 vec_free(old_buckets);
634 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100635 }
636
637 /*
638 * buckets fixed. ready for the MAP update.
639 */
640 lb->lb_map = lbmi;
641 }
642 else
643 {
644 /*
645 * bucket size shrinkage.
646 * Any map we have will be based on the old
647 * larger number of buckets, so will be translating to indices
648 * out of range. So the new MAP must be installed first.
649 */
650 lb->lb_map = lbmi;
651 CLIB_MEMORY_BARRIER();
652
653
654 if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
655 lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
656 {
657 /*
658 * the new decreased number of buckets is crossing the threshold
659 * from out-line storage to inline:
660 * 1 - Fill the inline buckets,
661 * 2 - fixup the number (and this point the inline buckets are
662 * used).
663 * 3 - free the outline buckets
664 */
665 load_balance_fill_buckets(lb, nhs,
666 lb->lb_buckets_inline,
667 n_buckets);
668 CLIB_MEMORY_BARRIER();
669 load_balance_set_n_buckets(lb, n_buckets);
670 CLIB_MEMORY_BARRIER();
671
672 vec_foreach(tmp_dpo, lb->lb_buckets)
673 {
674 dpo_reset(tmp_dpo);
675 }
676 vec_free(lb->lb_buckets);
677 }
678 else
679 {
680 /*
681 * not crossing the threshold.
682 * 1 - update the number to the smaller size
683 * 2 - write the new buckets
684 * 3 - reset those no longer used.
685 */
686 dpo_id_t *buckets;
687 u32 old_n_buckets;
688
689 old_n_buckets = lb->lb_n_buckets;
690 buckets = load_balance_get_buckets(lb);
691
692 load_balance_set_n_buckets(lb, n_buckets);
693 CLIB_MEMORY_BARRIER();
694
695 load_balance_fill_buckets(lb, nhs,
696 buckets,
697 n_buckets);
698
Neale Ranns32e1c012016-11-22 17:07:28 +0000699 for (ii = n_buckets; ii < old_n_buckets; ii++)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100700 {
701 dpo_reset(&buckets[ii]);
702 }
703 }
704 }
705 }
706
707 vec_foreach (nh, nhs)
708 {
709 dpo_reset(&nh->path_dpo);
710 }
Neale Ranns33a7dd52016-10-07 15:14:33 +0100711 vec_free(nhs);
Neale Rannsc0790cf2017-01-05 01:01:47 -0800712 vec_free(fixed_nhs);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100713
714 load_balance_map_unlock(old_lbmi);
715}
716
717static void
718load_balance_lock (dpo_id_t *dpo)
719{
720 load_balance_t *lb;
721
722 lb = load_balance_get(dpo->dpoi_index);
723
724 lb->lb_locks++;
725}
726
727static void
728load_balance_destroy (load_balance_t *lb)
729{
730 dpo_id_t *buckets;
731 int i;
732
733 buckets = load_balance_get_buckets(lb);
734
735 for (i = 0; i < lb->lb_n_buckets; i++)
736 {
737 dpo_reset(&buckets[i]);
738 }
739
740 LB_DBG(lb, "destroy");
741 if (!LB_HAS_INLINE_BUCKETS(lb))
742 {
743 vec_free(lb->lb_buckets);
744 }
745
Neale Ranns3ee44042016-10-03 13:05:48 +0100746 fib_urpf_list_unlock(lb->lb_urpf);
747 load_balance_map_unlock(lb->lb_map);
748
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100749 pool_put(load_balance_pool, lb);
750}
751
752static void
753load_balance_unlock (dpo_id_t *dpo)
754{
755 load_balance_t *lb;
756
757 lb = load_balance_get(dpo->dpoi_index);
758
759 lb->lb_locks--;
760
761 if (0 == lb->lb_locks)
762 {
763 load_balance_destroy(lb);
764 }
765}
766
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100767static void
768load_balance_mem_show (void)
769{
770 fib_show_memory_usage("load-balance",
771 pool_elts(load_balance_pool),
772 pool_len(load_balance_pool),
773 sizeof(load_balance_t));
Neale Ranns3ee44042016-10-03 13:05:48 +0100774 load_balance_map_show_mem();
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100775}
776
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100777const static dpo_vft_t lb_vft = {
778 .dv_lock = load_balance_lock,
779 .dv_unlock = load_balance_unlock,
780 .dv_format = format_load_balance_dpo,
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100781 .dv_mem_show = load_balance_mem_show,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100782};
783
784/**
785 * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
786 * object.
787 *
788 * this means that these graph nodes are ones from which a load-balance is the
789 * parent object in the DPO-graph.
790 *
791 * We do not list all the load-balance nodes, such as the *-lookup. instead
792 * we are relying on the correct use of the .sibling_of field when setting
793 * up these sibling nodes.
794 */
795const static char* const load_balance_ip4_nodes[] =
796{
797 "ip4-load-balance",
798 NULL,
799};
800const static char* const load_balance_ip6_nodes[] =
801{
802 "ip6-load-balance",
803 NULL,
804};
805const static char* const load_balance_mpls_nodes[] =
806{
807 "mpls-load-balance",
808 NULL,
809};
Neale Ranns5e575b12016-10-03 09:40:25 +0100810const static char* const load_balance_l2_nodes[] =
811{
812 "l2-load-balance",
813 NULL,
814};
Florin Corasb69111e2017-02-13 23:55:27 -0800815const static char* const load_balance_nsh_nodes[] =
816{
817 "nsh-load-balance",
Gabriel Ganne88fd5042017-11-14 16:07:34 +0100818 NULL
Neale Rannsd792d9c2017-10-21 10:53:20 -0700819};
820const static char* const load_balance_bier_nodes[] =
821{
822 "bier-load-balance",
Florin Corasb69111e2017-02-13 23:55:27 -0800823 NULL,
824};
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100825const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
826{
827 [DPO_PROTO_IP4] = load_balance_ip4_nodes,
828 [DPO_PROTO_IP6] = load_balance_ip6_nodes,
829 [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
Neale Ranns5e575b12016-10-03 09:40:25 +0100830 [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
Florin Corasb69111e2017-02-13 23:55:27 -0800831 [DPO_PROTO_NSH] = load_balance_nsh_nodes,
Neale Rannsd792d9c2017-10-21 10:53:20 -0700832 [DPO_PROTO_BIER] = load_balance_bier_nodes,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100833};
834
835void
836load_balance_module_init (void)
837{
Neale Rannsa3af3372017-03-28 03:49:52 -0700838 index_t lbi;
839
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100840 dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
841
Neale Ranns04a75e32017-03-23 06:46:01 -0700842 /*
843 * Special LB with index zero. we need to define this since the v4 mtrie
844 * assumes an index of 0 implies the ply is empty. therefore all 'real'
845 * adjs need a non-zero index.
Neale Rannsa3af3372017-03-28 03:49:52 -0700846 * This should never be used, but just in case, stack it on a drop.
Neale Ranns04a75e32017-03-23 06:46:01 -0700847 */
Neale Rannsa3af3372017-03-28 03:49:52 -0700848 lbi = load_balance_create(1, DPO_PROTO_IP4, 0);
849 load_balance_set_bucket(lbi, 0, drop_dpo_get(DPO_PROTO_IP4));
Neale Ranns04a75e32017-03-23 06:46:01 -0700850
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100851 load_balance_map_module_init();
852}
853
854static clib_error_t *
855load_balance_show (vlib_main_t * vm,
856 unformat_input_t * input,
857 vlib_cli_command_t * cmd)
858{
859 index_t lbi = INDEX_INVALID;
860
861 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
862 {
863 if (unformat (input, "%d", &lbi))
864 ;
865 else
866 break;
867 }
868
869 if (INDEX_INVALID != lbi)
870 {
871 vlib_cli_output (vm, "%U", format_load_balance, lbi,
872 LOAD_BALANCE_FORMAT_DETAIL);
873 }
874 else
875 {
876 load_balance_t *lb;
877
878 pool_foreach(lb, load_balance_pool,
879 ({
880 vlib_cli_output (vm, "%U", format_load_balance,
881 load_balance_get_index(lb),
882 LOAD_BALANCE_FORMAT_NONE);
883 }));
884 }
885
886 return 0;
887}
888
889VLIB_CLI_COMMAND (load_balance_show_command, static) = {
890 .path = "show load-balance",
891 .short_help = "show load-balance [<index>]",
892 .function = load_balance_show,
893};
Neale Ranns5e575b12016-10-03 09:40:25 +0100894
895
896always_inline u32
897ip_flow_hash (void *data)
898{
899 ip4_header_t *iph = (ip4_header_t *) data;
900
901 if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
902 return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
903 else
904 return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
905}
906
907always_inline u64
908mac_to_u64 (u8 * m)
909{
910 return (*((u64 *) m) & 0xffffffffffff);
911}
912
913always_inline u32
914l2_flow_hash (vlib_buffer_t * b0)
915{
916 ethernet_header_t *eh;
917 u64 a, b, c;
918 uword is_ip, eh_size;
919 u16 eh_type;
920
921 eh = vlib_buffer_get_current (b0);
922 eh_type = clib_net_to_host_u16 (eh->type);
923 eh_size = ethernet_buffer_header_size (b0);
924
925 is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
926
927 /* since we have 2 cache lines, use them */
928 if (is_ip)
929 a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
930 else
931 a = eh->type;
932
933 b = mac_to_u64 ((u8 *) eh->dst_address);
934 c = mac_to_u64 ((u8 *) eh->src_address);
935 hash_mix64 (a, b, c);
936
937 return (u32) c;
938}
939
940typedef struct load_balance_trace_t_
941{
942 index_t lb_index;
943} load_balance_trace_t;
944
Neale Rannsd792d9c2017-10-21 10:53:20 -0700945always_inline uword
946load_balance_inline (vlib_main_t * vm,
947 vlib_node_runtime_t * node,
948 vlib_frame_t * frame,
949 int is_l2)
Neale Ranns5e575b12016-10-03 09:40:25 +0100950{
951 u32 n_left_from, next_index, *from, *to_next;
952
953 from = vlib_frame_vector_args (frame);
954 n_left_from = frame->n_vectors;
955
956 next_index = node->cached_next_index;
957
958 while (n_left_from > 0)
959 {
960 u32 n_left_to_next;
961
962 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
963
964 while (n_left_from > 0 && n_left_to_next > 0)
965 {
966 vlib_buffer_t *b0;
967 u32 bi0, lbi0, next0;
968 const dpo_id_t *dpo0;
969 const load_balance_t *lb0;
970
971 bi0 = from[0];
972 to_next[0] = bi0;
973 from += 1;
974 to_next += 1;
975 n_left_from -= 1;
976 n_left_to_next -= 1;
977
978 b0 = vlib_get_buffer (vm, bi0);
979
980 /* lookup dst + src mac */
981 lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
982 lb0 = load_balance_get(lbi0);
983
Neale Rannsd792d9c2017-10-21 10:53:20 -0700984 if (is_l2)
985 {
986 vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
987 }
988 else
989 {
990 /* it's BIER */
991 const bier_hdr_t *bh0 = vlib_buffer_get_current(b0);
992 vnet_buffer(b0)->ip.flow_hash = bier_hdr_get_entropy(bh0);
993 }
Neale Ranns5e575b12016-10-03 09:40:25 +0100994
995 dpo0 = load_balance_get_bucket_i(lb0,
996 vnet_buffer(b0)->ip.flow_hash &
997 (lb0->lb_n_buckets_minus_1));
998
999 next0 = dpo0->dpoi_next_node;
1000 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1001
1002 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1003 {
1004 load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1005 sizeof (*tr));
1006 tr->lb_index = lbi0;
1007 }
1008 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1009 n_left_to_next, bi0, next0);
1010 }
1011
1012 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1013 }
1014
1015 return frame->n_vectors;
1016}
1017
Neale Rannsd792d9c2017-10-21 10:53:20 -07001018static uword
1019l2_load_balance (vlib_main_t * vm,
1020 vlib_node_runtime_t * node,
1021 vlib_frame_t * frame)
1022{
1023 return (load_balance_inline(vm, node, frame, 1));
1024}
1025
Neale Ranns5e575b12016-10-03 09:40:25 +01001026static u8 *
Florin Corasb69111e2017-02-13 23:55:27 -08001027format_l2_load_balance_trace (u8 * s, va_list * args)
Neale Ranns5e575b12016-10-03 09:40:25 +01001028{
1029 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1030 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1031 load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1032
1033 s = format (s, "L2-load-balance: index %d", t->lb_index);
1034 return s;
1035}
1036
1037/**
1038 * @brief
1039 */
1040VLIB_REGISTER_NODE (l2_load_balance_node) = {
1041 .function = l2_load_balance,
1042 .name = "l2-load-balance",
1043 .vector_size = sizeof (u32),
1044
Florin Corasb69111e2017-02-13 23:55:27 -08001045 .format_trace = format_l2_load_balance_trace,
1046 .n_next_nodes = 1,
1047 .next_nodes = {
1048 [0] = "error-drop",
1049 },
1050};
1051
1052static uword
1053nsh_load_balance (vlib_main_t * vm,
1054 vlib_node_runtime_t * node,
1055 vlib_frame_t * frame)
1056{
1057 u32 n_left_from, next_index, *from, *to_next;
1058
1059 from = vlib_frame_vector_args (frame);
1060 n_left_from = frame->n_vectors;
1061
1062 next_index = node->cached_next_index;
1063
1064 while (n_left_from > 0)
1065 {
1066 u32 n_left_to_next;
1067
1068 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1069
1070 while (n_left_from > 0 && n_left_to_next > 0)
1071 {
1072 vlib_buffer_t *b0;
1073 u32 bi0, lbi0, next0, *nsh0;
1074 const dpo_id_t *dpo0;
1075 const load_balance_t *lb0;
1076
1077 bi0 = from[0];
1078 to_next[0] = bi0;
1079 from += 1;
1080 to_next += 1;
1081 n_left_from -= 1;
1082 n_left_to_next -= 1;
1083
1084 b0 = vlib_get_buffer (vm, bi0);
1085
1086 lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1087 lb0 = load_balance_get(lbi0);
1088
1089 /* SPI + SI are the second word of the NSH header */
1090 nsh0 = vlib_buffer_get_current (b0);
1091 vnet_buffer(b0)->ip.flow_hash = nsh0[1] % lb0->lb_n_buckets;
1092
1093 dpo0 = load_balance_get_bucket_i(lb0,
1094 vnet_buffer(b0)->ip.flow_hash &
1095 (lb0->lb_n_buckets_minus_1));
1096
1097 next0 = dpo0->dpoi_next_node;
1098 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1099
1100 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1101 {
1102 load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1103 sizeof (*tr));
1104 tr->lb_index = lbi0;
1105 }
1106 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1107 n_left_to_next, bi0, next0);
1108 }
1109
1110 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1111 }
1112
1113 return frame->n_vectors;
1114}
1115
1116static u8 *
1117format_nsh_load_balance_trace (u8 * s, va_list * args)
1118{
1119 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1120 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1121 load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1122
1123 s = format (s, "NSH-load-balance: index %d", t->lb_index);
1124 return s;
1125}
1126
1127/**
1128 * @brief
1129 */
1130VLIB_REGISTER_NODE (nsh_load_balance_node) = {
1131 .function = nsh_load_balance,
1132 .name = "nsh-load-balance",
1133 .vector_size = sizeof (u32),
1134
1135 .format_trace = format_nsh_load_balance_trace,
Neale Ranns5e575b12016-10-03 09:40:25 +01001136 .n_next_nodes = 1,
1137 .next_nodes = {
1138 [0] = "error-drop",
1139 },
1140};
Neale Rannsd792d9c2017-10-21 10:53:20 -07001141
1142static u8 *
1143format_bier_load_balance_trace (u8 * s, va_list * args)
1144{
1145 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1146 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1147 load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1148
1149 s = format (s, "BIER-load-balance: index %d", t->lb_index);
1150 return s;
1151}
1152
1153static uword
1154bier_load_balance (vlib_main_t * vm,
1155 vlib_node_runtime_t * node,
1156 vlib_frame_t * frame)
1157{
1158 return (load_balance_inline(vm, node, frame, 0));
1159}
1160
1161/**
1162 * @brief
1163 */
1164VLIB_REGISTER_NODE (bier_load_balance_node) = {
1165 .function = bier_load_balance,
1166 .name = "bier-load-balance",
1167 .vector_size = sizeof (u32),
1168
1169 .format_trace = format_bier_load_balance_trace,
1170 .sibling_of = "mpls-load-balance",
1171};