blob: 5f88f12b91065f8743b4132bc44586246db07d21 [file] [log] [blame]
Neale Ranns32e1c012016-11-22 17:07:28 +00001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ip/lookup.h>
17#include <vnet/dpo/replicate_dpo.h>
18#include <vnet/dpo/drop_dpo.h>
Neale Ranns9e829a82018-12-17 05:50:32 -080019#include <vnet/dpo/receive_dpo.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000020#include <vnet/adj/adj.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080021#include <vnet/mpls/mpls_types.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000022
Neale Ranns710071b2018-09-24 12:36:26 +000023/**
24 * the logger
25 */
26vlib_log_class_t replicate_logger;
Neale Ranns32e1c012016-11-22 17:07:28 +000027
Neale Ranns32e1c012016-11-22 17:07:28 +000028#define REP_DBG(_rep, _fmt, _args...) \
29{ \
Neale Ranns710071b2018-09-24 12:36:26 +000030 vlib_log_debug(replicate_logger, \
31 "rep:[%U]:" _fmt, \
32 format_replicate, \
33 replicate_get_index(_rep), \
34 REPLICATE_FORMAT_NONE, \
35 ##_args); \
Neale Ranns32e1c012016-11-22 17:07:28 +000036}
Neale Ranns32e1c012016-11-22 17:07:28 +000037
Dave Barach26cd8c12017-02-23 17:11:26 -050038#define foreach_replicate_dpo_error \
39_(BUFFER_ALLOCATION_FAILURE, "Buffer Allocation Failure")
40
41typedef enum {
42#define _(sym,str) REPLICATE_DPO_ERROR_##sym,
43 foreach_replicate_dpo_error
44#undef _
45 REPLICATE_DPO_N_ERROR,
46} replicate_dpo_error_t;
47
48static char * replicate_dpo_error_strings[] = {
49#define _(sym,string) string,
50 foreach_replicate_dpo_error
51#undef _
52};
Neale Ranns32e1c012016-11-22 17:07:28 +000053
54/**
55 * Pool of all DPOs. It's not static so the DP can have fast access
56 */
57replicate_t *replicate_pool;
58
59/**
60 * The one instance of replicate main
61 */
Neale Ranns28c142e2018-09-07 09:37:07 -070062replicate_main_t replicate_main = {
63 .repm_counters = {
64 .name = "mroutes",
65 .stat_segment_name = "/net/mroute",
66 },
67};
Neale Ranns32e1c012016-11-22 17:07:28 +000068
69static inline index_t
70replicate_get_index (const replicate_t *rep)
71{
72 return (rep - replicate_pool);
73}
74
75static inline dpo_id_t*
76replicate_get_buckets (replicate_t *rep)
77{
78 if (REP_HAS_INLINE_BUCKETS(rep))
79 {
80 return (rep->rep_buckets_inline);
81 }
82 else
83 {
84 return (rep->rep_buckets);
85 }
86}
87
88static replicate_t *
89replicate_alloc_i (void)
90{
91 replicate_t *rep;
92
93 pool_get_aligned(replicate_pool, rep, CLIB_CACHE_LINE_BYTES);
Dave Barachb7b92992018-10-17 10:38:51 -040094 clib_memset(rep, 0, sizeof(*rep));
Neale Ranns32e1c012016-11-22 17:07:28 +000095
96 vlib_validate_combined_counter(&(replicate_main.repm_counters),
97 replicate_get_index(rep));
98 vlib_zero_combined_counter(&(replicate_main.repm_counters),
99 replicate_get_index(rep));
100
101 return (rep);
102}
103
104static u8*
Neale Ranns9e829a82018-12-17 05:50:32 -0800105format_replicate_flags (u8 *s, va_list *args)
106{
107 int flags = va_arg (*args, int);
108
109 if (flags == REPLICATE_FLAGS_NONE)
110 {
111 s = format (s, "none");
112 }
113 else if (flags & REPLICATE_FLAGS_HAS_LOCAL)
114 {
115 s = format (s, "has-local ");
116 }
117
118 return (s);
119}
120
121static u8*
Neale Ranns32e1c012016-11-22 17:07:28 +0000122replicate_format (index_t repi,
123 replicate_format_flags_t flags,
124 u32 indent,
125 u8 *s)
126{
127 vlib_counter_t to;
128 replicate_t *rep;
129 dpo_id_t *buckets;
130 u32 i;
131
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800132 repi &= ~MPLS_IS_REPLICATE;
Neale Ranns32e1c012016-11-22 17:07:28 +0000133 rep = replicate_get(repi);
134 vlib_get_combined_counter(&(replicate_main.repm_counters), repi, &to);
135 buckets = replicate_get_buckets(rep);
136
137 s = format(s, "%U: ", format_dpo_type, DPO_REPLICATE);
138 s = format(s, "[index:%d buckets:%d ", repi, rep->rep_n_buckets);
Neale Ranns9e829a82018-12-17 05:50:32 -0800139 s = format(s, "flags:[%U] ", format_replicate_flags, rep->rep_flags);
Neale Ranns32e1c012016-11-22 17:07:28 +0000140 s = format(s, "to:[%Ld:%Ld]]", to.packets, to.bytes);
141
142 for (i = 0; i < rep->rep_n_buckets; i++)
143 {
144 s = format(s, "\n%U", format_white_space, indent+2);
145 s = format(s, "[%d]", i);
146 s = format(s, " %U", format_dpo_id, &buckets[i], indent+6);
147 }
148 return (s);
149}
150
151u8*
152format_replicate (u8 * s, va_list * args)
153{
154 index_t repi = va_arg(*args, index_t);
155 replicate_format_flags_t flags = va_arg(*args, replicate_format_flags_t);
156
157 return (replicate_format(repi, flags, 0, s));
158}
159static u8*
160format_replicate_dpo (u8 * s, va_list * args)
161{
162 index_t repi = va_arg(*args, index_t);
163 u32 indent = va_arg(*args, u32);
164
165 return (replicate_format(repi, REPLICATE_FORMAT_DETAIL, indent, s));
166}
167
168
169static replicate_t *
170replicate_create_i (u32 num_buckets,
171 dpo_proto_t rep_proto)
172{
173 replicate_t *rep;
174
175 rep = replicate_alloc_i();
176 rep->rep_n_buckets = num_buckets;
177 rep->rep_proto = rep_proto;
178
179 if (!REP_HAS_INLINE_BUCKETS(rep))
180 {
181 vec_validate_aligned(rep->rep_buckets,
182 rep->rep_n_buckets - 1,
183 CLIB_CACHE_LINE_BYTES);
184 }
185
186 REP_DBG(rep, "create");
187
188 return (rep);
189}
190
191index_t
192replicate_create (u32 n_buckets,
193 dpo_proto_t rep_proto)
194{
195 return (replicate_get_index(replicate_create_i(n_buckets, rep_proto)));
196}
197
198static inline void
199replicate_set_bucket_i (replicate_t *rep,
200 u32 bucket,
201 dpo_id_t *buckets,
202 const dpo_id_t *next)
203{
Neale Ranns9e829a82018-12-17 05:50:32 -0800204 if (dpo_is_receive(&buckets[bucket]))
205 {
206 rep->rep_flags &= ~REPLICATE_FLAGS_HAS_LOCAL;
207 }
208 if (dpo_is_receive(next))
209 {
210 rep->rep_flags |= REPLICATE_FLAGS_HAS_LOCAL;
211 }
Neale Ranns32e1c012016-11-22 17:07:28 +0000212 dpo_stack(DPO_REPLICATE, rep->rep_proto, &buckets[bucket], next);
213}
214
215void
216replicate_set_bucket (index_t repi,
217 u32 bucket,
218 const dpo_id_t *next)
219{
220 replicate_t *rep;
221 dpo_id_t *buckets;
222
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800223 repi &= ~MPLS_IS_REPLICATE;
Neale Ranns32e1c012016-11-22 17:07:28 +0000224 rep = replicate_get(repi);
225 buckets = replicate_get_buckets(rep);
226
227 ASSERT(bucket < rep->rep_n_buckets);
228
229 replicate_set_bucket_i(rep, bucket, buckets, next);
230}
231
232int
233replicate_is_drop (const dpo_id_t *dpo)
234{
235 replicate_t *rep;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800236 index_t repi;
Neale Ranns32e1c012016-11-22 17:07:28 +0000237
238 if (DPO_REPLICATE != dpo->dpoi_type)
239 return (0);
240
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800241 repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
242 rep = replicate_get(repi);
Neale Ranns32e1c012016-11-22 17:07:28 +0000243
244 if (1 == rep->rep_n_buckets)
245 {
246 return (dpo_is_drop(replicate_get_bucket_i(rep, 0)));
247 }
248 return (0);
249}
250
251const dpo_id_t *
252replicate_get_bucket (index_t repi,
253 u32 bucket)
254{
255 replicate_t *rep;
256
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800257 repi &= ~MPLS_IS_REPLICATE;
Neale Ranns32e1c012016-11-22 17:07:28 +0000258 rep = replicate_get(repi);
259
260 return (replicate_get_bucket_i(rep, bucket));
261}
262
263
264static load_balance_path_t *
265replicate_multipath_next_hop_fixup (load_balance_path_t *nhs,
266 dpo_proto_t drop_proto)
267{
268 if (0 == vec_len(nhs))
269 {
270 load_balance_path_t *nh;
271
272 /*
273 * we need something for the replicate. so use the drop
274 */
275 vec_add2(nhs, nh, 1);
276
277 nh->path_weight = 1;
278 dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
279 }
280
281 return (nhs);
282}
283
284/*
285 * Fill in adjacencies in block based on corresponding
286 * next hop adjacencies.
287 */
288static void
289replicate_fill_buckets (replicate_t *rep,
290 load_balance_path_t *nhs,
291 dpo_id_t *buckets,
292 u32 n_buckets)
293{
294 load_balance_path_t * nh;
Neale Rannsd792d9c2017-10-21 10:53:20 -0700295 u16 bucket;
Neale Ranns32e1c012016-11-22 17:07:28 +0000296
297 bucket = 0;
298
299 /*
300 * the next-hops have normalised weights. that means their sum is the number
301 * of buckets we need to fill.
302 */
303 vec_foreach (nh, nhs)
304 {
Neale Rannsd792d9c2017-10-21 10:53:20 -0700305 ASSERT(bucket < n_buckets);
306 replicate_set_bucket_i(rep, bucket++, buckets, &nh->path_dpo);
Neale Ranns32e1c012016-11-22 17:07:28 +0000307 }
308}
309
310static inline void
311replicate_set_n_buckets (replicate_t *rep,
312 u32 n_buckets)
313{
314 rep->rep_n_buckets = n_buckets;
315}
316
317void
318replicate_multipath_update (const dpo_id_t *dpo,
319 load_balance_path_t * next_hops)
320{
321 load_balance_path_t * nh, * nhs;
322 dpo_id_t *tmp_dpo;
323 u32 ii, n_buckets;
324 replicate_t *rep;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800325 index_t repi;
Neale Ranns32e1c012016-11-22 17:07:28 +0000326
327 ASSERT(DPO_REPLICATE == dpo->dpoi_type);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800328 repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
329 rep = replicate_get(repi);
Neale Ranns32e1c012016-11-22 17:07:28 +0000330 nhs = replicate_multipath_next_hop_fixup(next_hops,
331 rep->rep_proto);
332 n_buckets = vec_len(nhs);
333
334 if (0 == rep->rep_n_buckets)
335 {
336 /*
337 * first time initialisation. no packets inflight, so we can write
338 * at leisure.
339 */
340 replicate_set_n_buckets(rep, n_buckets);
341
342 if (!REP_HAS_INLINE_BUCKETS(rep))
343 vec_validate_aligned(rep->rep_buckets,
344 rep->rep_n_buckets - 1,
345 CLIB_CACHE_LINE_BYTES);
346
347 replicate_fill_buckets(rep, nhs,
348 replicate_get_buckets(rep),
349 n_buckets);
350 }
351 else
352 {
353 /*
354 * This is a modification of an existing replicate.
355 * We need to ensure that packets in flight see a consistent state, that
356 * is the number of reported buckets the REP has
357 * is not more than it actually has. So if the
358 * number of buckets is increasing, we must update the bucket array first,
359 * then the reported number. vice-versa if the number of buckets goes down.
360 */
361 if (n_buckets == rep->rep_n_buckets)
362 {
363 /*
364 * no change in the number of buckets. we can simply fill what
365 * is new over what is old.
366 */
367 replicate_fill_buckets(rep, nhs,
368 replicate_get_buckets(rep),
369 n_buckets);
370 }
371 else if (n_buckets > rep->rep_n_buckets)
372 {
373 /*
374 * we have more buckets. the old replicate map (if there is one)
375 * will remain valid, i.e. mapping to indices within range, so we
376 * update it last.
377 */
378 if (n_buckets > REP_NUM_INLINE_BUCKETS &&
379 rep->rep_n_buckets <= REP_NUM_INLINE_BUCKETS)
380 {
381 /*
382 * the new increased number of buckets is crossing the threshold
383 * from the inline storage to out-line. Alloc the outline buckets
384 * first, then fixup the number. then reset the inlines.
385 */
386 ASSERT(NULL == rep->rep_buckets);
387 vec_validate_aligned(rep->rep_buckets,
388 n_buckets - 1,
389 CLIB_CACHE_LINE_BYTES);
390
391 replicate_fill_buckets(rep, nhs,
392 rep->rep_buckets,
393 n_buckets);
394 CLIB_MEMORY_BARRIER();
395 replicate_set_n_buckets(rep, n_buckets);
396
397 CLIB_MEMORY_BARRIER();
398
399 for (ii = 0; ii < REP_NUM_INLINE_BUCKETS; ii++)
400 {
401 dpo_reset(&rep->rep_buckets_inline[ii]);
402 }
403 }
404 else
405 {
406 if (n_buckets <= REP_NUM_INLINE_BUCKETS)
407 {
408 /*
409 * we are not crossing the threshold and it's still inline buckets.
410 * we can write the new on the old..
411 */
412 replicate_fill_buckets(rep, nhs,
413 replicate_get_buckets(rep),
414 n_buckets);
415 CLIB_MEMORY_BARRIER();
416 replicate_set_n_buckets(rep, n_buckets);
417 }
418 else
419 {
420 /*
421 * we are not crossing the threshold. We need a new bucket array to
422 * hold the increased number of choices.
423 */
424 dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
425
426 new_buckets = NULL;
427 old_buckets = replicate_get_buckets(rep);
428
429 vec_validate_aligned(new_buckets,
430 n_buckets - 1,
431 CLIB_CACHE_LINE_BYTES);
432
433 replicate_fill_buckets(rep, nhs, new_buckets, n_buckets);
434 CLIB_MEMORY_BARRIER();
435 rep->rep_buckets = new_buckets;
436 CLIB_MEMORY_BARRIER();
437 replicate_set_n_buckets(rep, n_buckets);
438
439 vec_foreach(tmp_dpo, old_buckets)
440 {
441 dpo_reset(tmp_dpo);
442 }
443 vec_free(old_buckets);
444 }
445 }
446 }
447 else
448 {
449 /*
450 * bucket size shrinkage.
451 */
452 if (n_buckets <= REP_NUM_INLINE_BUCKETS &&
453 rep->rep_n_buckets > REP_NUM_INLINE_BUCKETS)
454 {
455 /*
456 * the new decreased number of buckets is crossing the threshold
457 * from out-line storage to inline:
458 * 1 - Fill the inline buckets,
459 * 2 - fixup the number (and this point the inline buckets are
460 * used).
461 * 3 - free the outline buckets
462 */
463 replicate_fill_buckets(rep, nhs,
464 rep->rep_buckets_inline,
465 n_buckets);
466 CLIB_MEMORY_BARRIER();
467 replicate_set_n_buckets(rep, n_buckets);
468 CLIB_MEMORY_BARRIER();
469
470 vec_foreach(tmp_dpo, rep->rep_buckets)
471 {
472 dpo_reset(tmp_dpo);
473 }
474 vec_free(rep->rep_buckets);
475 }
476 else
477 {
478 /*
479 * not crossing the threshold.
480 * 1 - update the number to the smaller size
481 * 2 - write the new buckets
482 * 3 - reset those no longer used.
483 */
484 dpo_id_t *buckets;
485 u32 old_n_buckets;
486
487 old_n_buckets = rep->rep_n_buckets;
488 buckets = replicate_get_buckets(rep);
489
490 replicate_set_n_buckets(rep, n_buckets);
491 CLIB_MEMORY_BARRIER();
492
493 replicate_fill_buckets(rep, nhs,
494 buckets,
495 n_buckets);
496
497 for (ii = n_buckets; ii < old_n_buckets; ii++)
498 {
499 dpo_reset(&buckets[ii]);
500 }
501 }
502 }
503 }
504
505 vec_foreach (nh, nhs)
506 {
507 dpo_reset(&nh->path_dpo);
508 }
509 vec_free(nhs);
510}
511
512static void
513replicate_lock (dpo_id_t *dpo)
514{
515 replicate_t *rep;
516
517 rep = replicate_get(dpo->dpoi_index);
518
519 rep->rep_locks++;
520}
521
Neale Ranns9e829a82018-12-17 05:50:32 -0800522index_t
523replicate_dup (replicate_flags_t flags,
524 index_t repi)
525{
526 replicate_t *rep, *copy;
527
528 rep = replicate_get(repi);
529
530 if (rep->rep_flags == flags ||
531 flags & REPLICATE_FLAGS_HAS_LOCAL)
532 {
533 /*
534 * we can include all the buckets from the original in the copy
535 */
536 return (repi);
537 }
538 else
539 {
540 /*
541 * caller doesn't want the local paths that the original has
542 */
543 if (rep->rep_n_buckets == 1)
544 {
545 /*
546 * original has only one bucket that is the local, so create
547 * a new one with only the drop
548 */
549 copy = replicate_create_i (1, rep->rep_proto);
550
551 replicate_set_bucket_i(copy, 0,
552 replicate_get_buckets(copy),
553 drop_dpo_get(rep->rep_proto));
554 }
555 else
556 {
557 dpo_id_t *old_buckets, *copy_buckets;
558 u16 bucket, pos;
559
560 copy = replicate_create_i(rep->rep_n_buckets - 1,
561 rep->rep_proto);
562
563 rep = replicate_get(repi);
564 old_buckets = replicate_get_buckets(rep);
565 copy_buckets = replicate_get_buckets(copy);
566 pos = 0;
567
568 for (bucket = 0; bucket < rep->rep_n_buckets; bucket++)
569 {
570 if (!dpo_is_receive(&old_buckets[bucket]))
571 {
572 replicate_set_bucket_i(copy, pos, copy_buckets,
573 (&old_buckets[bucket]));
574 pos++;
575 }
576 }
577 }
578 }
579
580 return (replicate_get_index(copy));
581}
582
Neale Ranns32e1c012016-11-22 17:07:28 +0000583static void
584replicate_destroy (replicate_t *rep)
585{
586 dpo_id_t *buckets;
587 int i;
588
589 buckets = replicate_get_buckets(rep);
590
591 for (i = 0; i < rep->rep_n_buckets; i++)
592 {
593 dpo_reset(&buckets[i]);
594 }
595
596 REP_DBG(rep, "destroy");
597 if (!REP_HAS_INLINE_BUCKETS(rep))
598 {
599 vec_free(rep->rep_buckets);
600 }
601
602 pool_put(replicate_pool, rep);
603}
604
605static void
606replicate_unlock (dpo_id_t *dpo)
607{
608 replicate_t *rep;
609
610 rep = replicate_get(dpo->dpoi_index);
611
612 rep->rep_locks--;
613
614 if (0 == rep->rep_locks)
615 {
616 replicate_destroy(rep);
617 }
618}
619
620static void
621replicate_mem_show (void)
622{
623 fib_show_memory_usage("replicate",
624 pool_elts(replicate_pool),
625 pool_len(replicate_pool),
626 sizeof(replicate_t));
627}
628
629const static dpo_vft_t rep_vft = {
630 .dv_lock = replicate_lock,
631 .dv_unlock = replicate_unlock,
632 .dv_format = format_replicate_dpo,
633 .dv_mem_show = replicate_mem_show,
634};
635
636/**
637 * @brief The per-protocol VLIB graph nodes that are assigned to a replicate
638 * object.
639 *
640 * this means that these graph nodes are ones from which a replicate is the
641 * parent object in the DPO-graph.
642 */
643const static char* const replicate_ip4_nodes[] =
644{
645 "ip4-replicate",
646 NULL,
647};
648const static char* const replicate_ip6_nodes[] =
649{
650 "ip6-replicate",
651 NULL,
652};
653const static char* const replicate_mpls_nodes[] =
654{
655 "mpls-replicate",
656 NULL,
657};
658
659const static char* const * const replicate_nodes[DPO_PROTO_NUM] =
660{
661 [DPO_PROTO_IP4] = replicate_ip4_nodes,
662 [DPO_PROTO_IP6] = replicate_ip6_nodes,
663 [DPO_PROTO_MPLS] = replicate_mpls_nodes,
664};
665
666void
667replicate_module_init (void)
668{
669 dpo_register(DPO_REPLICATE, &rep_vft, replicate_nodes);
Neale Ranns710071b2018-09-24 12:36:26 +0000670 replicate_logger = vlib_log_register_class("dpo", "replicate");
Neale Ranns32e1c012016-11-22 17:07:28 +0000671}
672
673static clib_error_t *
674replicate_show (vlib_main_t * vm,
675 unformat_input_t * input,
676 vlib_cli_command_t * cmd)
677{
678 index_t repi = INDEX_INVALID;
679
680 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
681 {
682 if (unformat (input, "%d", &repi))
683 ;
684 else
685 break;
686 }
687
688 if (INDEX_INVALID != repi)
689 {
ShivaShankarK35acaac2020-04-10 18:20:02 +0530690 if (pool_is_free_index (replicate_pool, repi))
691 vlib_cli_output (vm, "no such index %d", repi);
692 else
693 vlib_cli_output (vm, "%U", format_replicate, repi,
Neale Ranns32e1c012016-11-22 17:07:28 +0000694 REPLICATE_FORMAT_DETAIL);
695 }
696 else
697 {
698 replicate_t *rep;
699
Damjan Marionb2c31b62020-12-13 21:47:40 +0100700 pool_foreach (rep, replicate_pool)
701 {
Neale Ranns32e1c012016-11-22 17:07:28 +0000702 vlib_cli_output (vm, "%U", format_replicate,
703 replicate_get_index(rep),
704 REPLICATE_FORMAT_NONE);
Damjan Marionb2c31b62020-12-13 21:47:40 +0100705 }
Neale Ranns32e1c012016-11-22 17:07:28 +0000706 }
707
708 return 0;
709}
710
711VLIB_CLI_COMMAND (replicate_show_command, static) = {
712 .path = "show replicate",
713 .short_help = "show replicate [<index>]",
714 .function = replicate_show,
715};
716
717typedef struct replicate_trace_t_
718{
719 index_t rep_index;
Neale Rannsa9374df2017-02-02 02:18:18 -0800720 dpo_id_t dpo;
Neale Ranns32e1c012016-11-22 17:07:28 +0000721} replicate_trace_t;
722
723static uword
724replicate_inline (vlib_main_t * vm,
725 vlib_node_runtime_t * node,
726 vlib_frame_t * frame)
727{
728 vlib_combined_counter_main_t * cm = &replicate_main.repm_counters;
Damjan Marionc47ed032017-01-25 14:18:03 +0100729 replicate_main_t * rm = &replicate_main;
Neale Ranns32e1c012016-11-22 17:07:28 +0000730 u32 n_left_from, * from, * to_next, next_index;
Damjan Marion586afd72017-04-05 19:18:20 +0200731 u32 thread_index = vlib_get_thread_index();
Neale Ranns32e1c012016-11-22 17:07:28 +0000732
733 from = vlib_frame_vector_args (frame);
734 n_left_from = frame->n_vectors;
735 next_index = node->cached_next_index;
736
737 while (n_left_from > 0)
738 {
739 u32 n_left_to_next;
740
741 vlib_get_next_frame (vm, node, next_index,
742 to_next, n_left_to_next);
743
744 while (n_left_from > 0 && n_left_to_next > 0)
745 {
746 u32 next0, ci0, bi0, bucket, repi0;
747 const replicate_t *rep0;
748 vlib_buffer_t * b0, *c0;
749 const dpo_id_t *dpo0;
Damjan Marionc47ed032017-01-25 14:18:03 +0100750 u8 num_cloned;
Neale Ranns32e1c012016-11-22 17:07:28 +0000751
752 bi0 = from[0];
Neale Ranns32e1c012016-11-22 17:07:28 +0000753 from += 1;
Neale Ranns32e1c012016-11-22 17:07:28 +0000754 n_left_from -= 1;
Neale Ranns32e1c012016-11-22 17:07:28 +0000755
756 b0 = vlib_get_buffer (vm, bi0);
757 repi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
758 rep0 = replicate_get(repi0);
759
760 vlib_increment_combined_counter(
Damjan Marion586afd72017-04-05 19:18:20 +0200761 cm, thread_index, repi0, 1,
Neale Ranns32e1c012016-11-22 17:07:28 +0000762 vlib_buffer_length_in_chain(vm, b0));
763
Damjan Marion586afd72017-04-05 19:18:20 +0200764 vec_validate (rm->clones[thread_index], rep0->rep_n_buckets - 1);
Neale Ranns32e1c012016-11-22 17:07:28 +0000765
Neale Rannsf0510722018-01-31 11:35:41 -0800766 num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[thread_index],
Damjan Marionbd0da972018-10-31 10:59:02 +0100767 rep0->rep_n_buckets,
768 VLIB_BUFFER_CLONE_HEAD_SIZE);
Damjan Marionc47ed032017-01-25 14:18:03 +0100769
770 if (num_cloned != rep0->rep_n_buckets)
771 {
772 vlib_node_increment_counter
773 (vm, node->node_index,
774 REPLICATE_DPO_ERROR_BUFFER_ALLOCATION_FAILURE, 1);
775 }
776
777 for (bucket = 0; bucket < num_cloned; bucket++)
Neale Ranns32e1c012016-11-22 17:07:28 +0000778 {
Damjan Marion586afd72017-04-05 19:18:20 +0200779 ci0 = rm->clones[thread_index][bucket];
Damjan Marionc47ed032017-01-25 14:18:03 +0100780 c0 = vlib_get_buffer(vm, ci0);
Neale Ranns32e1c012016-11-22 17:07:28 +0000781
782 to_next[0] = ci0;
783 to_next += 1;
784 n_left_to_next -= 1;
785
786 dpo0 = replicate_get_bucket_i(rep0, bucket);
787 next0 = dpo0->dpoi_next_node;
788 vnet_buffer (c0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
789
Neale Ranns2fd4b562018-10-08 06:08:59 +0000790 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
Neale Ranns32e1c012016-11-22 17:07:28 +0000791 {
Neale Rannsd792d9c2017-10-21 10:53:20 -0700792 replicate_trace_t *t;
793
Neale Rannsd792d9c2017-10-21 10:53:20 -0700794 t = vlib_add_trace (vm, node, c0, sizeof (*t));
Neale Ranns32e1c012016-11-22 17:07:28 +0000795 t->rep_index = repi0;
Neale Rannsa9374df2017-02-02 02:18:18 -0800796 t->dpo = *dpo0;
Neale Ranns32e1c012016-11-22 17:07:28 +0000797 }
798
799 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
800 to_next, n_left_to_next,
801 ci0, next0);
Damjan Marionc47ed032017-01-25 14:18:03 +0100802 if (PREDICT_FALSE (n_left_to_next == 0))
803 {
804 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
805 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
806 }
Neale Ranns32e1c012016-11-22 17:07:28 +0000807 }
Damjan Marion586afd72017-04-05 19:18:20 +0200808 vec_reset_length (rm->clones[thread_index]);
Neale Ranns32e1c012016-11-22 17:07:28 +0000809 }
810
811 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
812 }
813
814 return frame->n_vectors;
815}
816
817static u8 *
818format_replicate_trace (u8 * s, va_list * args)
819{
820 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
821 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
822 replicate_trace_t *t = va_arg (*args, replicate_trace_t *);
823
Neale Rannsa9374df2017-02-02 02:18:18 -0800824 s = format (s, "replicate: %d via %U",
Neale Ranns32e1c012016-11-22 17:07:28 +0000825 t->rep_index,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800826 format_dpo_id, &t->dpo, 0);
Neale Ranns32e1c012016-11-22 17:07:28 +0000827 return s;
828}
829
830static uword
831ip4_replicate (vlib_main_t * vm,
832 vlib_node_runtime_t * node,
833 vlib_frame_t * frame)
834{
835 return (replicate_inline (vm, node, frame));
836}
837
838/**
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800839 * @brief IP4 replication node
Neale Ranns32e1c012016-11-22 17:07:28 +0000840 */
841VLIB_REGISTER_NODE (ip4_replicate_node) = {
842 .function = ip4_replicate,
843 .name = "ip4-replicate",
844 .vector_size = sizeof (u32),
845
Dave Barach26cd8c12017-02-23 17:11:26 -0500846 .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
847 .error_strings = replicate_dpo_error_strings,
848
Neale Ranns32e1c012016-11-22 17:07:28 +0000849 .format_trace = format_replicate_trace,
850 .n_next_nodes = 1,
851 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800852 [0] = "ip4-drop",
Neale Ranns32e1c012016-11-22 17:07:28 +0000853 },
854};
855
856static uword
857ip6_replicate (vlib_main_t * vm,
858 vlib_node_runtime_t * node,
859 vlib_frame_t * frame)
860{
861 return (replicate_inline (vm, node, frame));
862}
863
864/**
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800865 * @brief IPv6 replication node
Neale Ranns32e1c012016-11-22 17:07:28 +0000866 */
867VLIB_REGISTER_NODE (ip6_replicate_node) = {
868 .function = ip6_replicate,
869 .name = "ip6-replicate",
870 .vector_size = sizeof (u32),
871
Dave Barach26cd8c12017-02-23 17:11:26 -0500872 .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
873 .error_strings = replicate_dpo_error_strings,
874
Neale Ranns32e1c012016-11-22 17:07:28 +0000875 .format_trace = format_replicate_trace,
876 .n_next_nodes = 1,
877 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800878 [0] = "ip6-drop",
879 },
880};
881
882static uword
883mpls_replicate (vlib_main_t * vm,
884 vlib_node_runtime_t * node,
885 vlib_frame_t * frame)
886{
887 return (replicate_inline (vm, node, frame));
888}
889
890/**
891 * @brief MPLS replication node
892 */
893VLIB_REGISTER_NODE (mpls_replicate_node) = {
894 .function = mpls_replicate,
895 .name = "mpls-replicate",
896 .vector_size = sizeof (u32),
897
898 .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
899 .error_strings = replicate_dpo_error_strings,
900
901 .format_trace = format_replicate_trace,
902 .n_next_nodes = 1,
903 .next_nodes = {
904 [0] = "mpls-drop",
Neale Ranns32e1c012016-11-22 17:07:28 +0000905 },
906};
Damjan Marionc47ed032017-01-25 14:18:03 +0100907
908clib_error_t *
909replicate_dpo_init (vlib_main_t * vm)
910{
911 replicate_main_t * rm = &replicate_main;
912
913 vec_validate (rm->clones, vlib_num_workers());
914
915 return 0;
916}
917
918VLIB_INIT_FUNCTION (replicate_dpo_init);