blob: 39b17f103c94f1cc5eac2b5c0553eb5d096acf77 [file] [log] [blame]
Neale Ranns32e1c012016-11-22 17:07:28 +00001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/ip/lookup.h>
17#include <vnet/dpo/replicate_dpo.h>
18#include <vnet/dpo/drop_dpo.h>
19#include <vnet/adj/adj.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080020#include <vnet/mpls/mpls_types.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000021
Neale Ranns710071b2018-09-24 12:36:26 +000022/**
23 * the logger
24 */
25vlib_log_class_t replicate_logger;
Neale Ranns32e1c012016-11-22 17:07:28 +000026
Neale Ranns32e1c012016-11-22 17:07:28 +000027#define REP_DBG(_rep, _fmt, _args...) \
28{ \
Neale Ranns710071b2018-09-24 12:36:26 +000029 vlib_log_debug(replicate_logger, \
30 "rep:[%U]:" _fmt, \
31 format_replicate, \
32 replicate_get_index(_rep), \
33 REPLICATE_FORMAT_NONE, \
34 ##_args); \
Neale Ranns32e1c012016-11-22 17:07:28 +000035}
Neale Ranns32e1c012016-11-22 17:07:28 +000036
Dave Barach26cd8c12017-02-23 17:11:26 -050037#define foreach_replicate_dpo_error \
38_(BUFFER_ALLOCATION_FAILURE, "Buffer Allocation Failure")
39
40typedef enum {
41#define _(sym,str) REPLICATE_DPO_ERROR_##sym,
42 foreach_replicate_dpo_error
43#undef _
44 REPLICATE_DPO_N_ERROR,
45} replicate_dpo_error_t;
46
47static char * replicate_dpo_error_strings[] = {
48#define _(sym,string) string,
49 foreach_replicate_dpo_error
50#undef _
51};
Neale Ranns32e1c012016-11-22 17:07:28 +000052
53/**
54 * Pool of all DPOs. It's not static so the DP can have fast access
55 */
56replicate_t *replicate_pool;
57
58/**
59 * The one instance of replicate main
60 */
Neale Ranns28c142e2018-09-07 09:37:07 -070061replicate_main_t replicate_main = {
62 .repm_counters = {
63 .name = "mroutes",
64 .stat_segment_name = "/net/mroute",
65 },
66};
Neale Ranns32e1c012016-11-22 17:07:28 +000067
68static inline index_t
69replicate_get_index (const replicate_t *rep)
70{
71 return (rep - replicate_pool);
72}
73
74static inline dpo_id_t*
75replicate_get_buckets (replicate_t *rep)
76{
77 if (REP_HAS_INLINE_BUCKETS(rep))
78 {
79 return (rep->rep_buckets_inline);
80 }
81 else
82 {
83 return (rep->rep_buckets);
84 }
85}
86
87static replicate_t *
88replicate_alloc_i (void)
89{
90 replicate_t *rep;
91
92 pool_get_aligned(replicate_pool, rep, CLIB_CACHE_LINE_BYTES);
Dave Barachb7b92992018-10-17 10:38:51 -040093 clib_memset(rep, 0, sizeof(*rep));
Neale Ranns32e1c012016-11-22 17:07:28 +000094
95 vlib_validate_combined_counter(&(replicate_main.repm_counters),
96 replicate_get_index(rep));
97 vlib_zero_combined_counter(&(replicate_main.repm_counters),
98 replicate_get_index(rep));
99
100 return (rep);
101}
102
103static u8*
104replicate_format (index_t repi,
105 replicate_format_flags_t flags,
106 u32 indent,
107 u8 *s)
108{
109 vlib_counter_t to;
110 replicate_t *rep;
111 dpo_id_t *buckets;
112 u32 i;
113
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800114 repi &= ~MPLS_IS_REPLICATE;
Neale Ranns32e1c012016-11-22 17:07:28 +0000115 rep = replicate_get(repi);
116 vlib_get_combined_counter(&(replicate_main.repm_counters), repi, &to);
117 buckets = replicate_get_buckets(rep);
118
119 s = format(s, "%U: ", format_dpo_type, DPO_REPLICATE);
120 s = format(s, "[index:%d buckets:%d ", repi, rep->rep_n_buckets);
121 s = format(s, "to:[%Ld:%Ld]]", to.packets, to.bytes);
122
123 for (i = 0; i < rep->rep_n_buckets; i++)
124 {
125 s = format(s, "\n%U", format_white_space, indent+2);
126 s = format(s, "[%d]", i);
127 s = format(s, " %U", format_dpo_id, &buckets[i], indent+6);
128 }
129 return (s);
130}
131
132u8*
133format_replicate (u8 * s, va_list * args)
134{
135 index_t repi = va_arg(*args, index_t);
136 replicate_format_flags_t flags = va_arg(*args, replicate_format_flags_t);
137
138 return (replicate_format(repi, flags, 0, s));
139}
140static u8*
141format_replicate_dpo (u8 * s, va_list * args)
142{
143 index_t repi = va_arg(*args, index_t);
144 u32 indent = va_arg(*args, u32);
145
146 return (replicate_format(repi, REPLICATE_FORMAT_DETAIL, indent, s));
147}
148
149
150static replicate_t *
151replicate_create_i (u32 num_buckets,
152 dpo_proto_t rep_proto)
153{
154 replicate_t *rep;
155
156 rep = replicate_alloc_i();
157 rep->rep_n_buckets = num_buckets;
158 rep->rep_proto = rep_proto;
159
160 if (!REP_HAS_INLINE_BUCKETS(rep))
161 {
162 vec_validate_aligned(rep->rep_buckets,
163 rep->rep_n_buckets - 1,
164 CLIB_CACHE_LINE_BYTES);
165 }
166
167 REP_DBG(rep, "create");
168
169 return (rep);
170}
171
172index_t
173replicate_create (u32 n_buckets,
174 dpo_proto_t rep_proto)
175{
176 return (replicate_get_index(replicate_create_i(n_buckets, rep_proto)));
177}
178
179static inline void
180replicate_set_bucket_i (replicate_t *rep,
181 u32 bucket,
182 dpo_id_t *buckets,
183 const dpo_id_t *next)
184{
185 dpo_stack(DPO_REPLICATE, rep->rep_proto, &buckets[bucket], next);
186}
187
188void
189replicate_set_bucket (index_t repi,
190 u32 bucket,
191 const dpo_id_t *next)
192{
193 replicate_t *rep;
194 dpo_id_t *buckets;
195
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800196 repi &= ~MPLS_IS_REPLICATE;
Neale Ranns32e1c012016-11-22 17:07:28 +0000197 rep = replicate_get(repi);
198 buckets = replicate_get_buckets(rep);
199
200 ASSERT(bucket < rep->rep_n_buckets);
201
202 replicate_set_bucket_i(rep, bucket, buckets, next);
203}
204
205int
206replicate_is_drop (const dpo_id_t *dpo)
207{
208 replicate_t *rep;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800209 index_t repi;
Neale Ranns32e1c012016-11-22 17:07:28 +0000210
211 if (DPO_REPLICATE != dpo->dpoi_type)
212 return (0);
213
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800214 repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
215 rep = replicate_get(repi);
Neale Ranns32e1c012016-11-22 17:07:28 +0000216
217 if (1 == rep->rep_n_buckets)
218 {
219 return (dpo_is_drop(replicate_get_bucket_i(rep, 0)));
220 }
221 return (0);
222}
223
224const dpo_id_t *
225replicate_get_bucket (index_t repi,
226 u32 bucket)
227{
228 replicate_t *rep;
229
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800230 repi &= ~MPLS_IS_REPLICATE;
Neale Ranns32e1c012016-11-22 17:07:28 +0000231 rep = replicate_get(repi);
232
233 return (replicate_get_bucket_i(rep, bucket));
234}
235
236
237static load_balance_path_t *
238replicate_multipath_next_hop_fixup (load_balance_path_t *nhs,
239 dpo_proto_t drop_proto)
240{
241 if (0 == vec_len(nhs))
242 {
243 load_balance_path_t *nh;
244
245 /*
246 * we need something for the replicate. so use the drop
247 */
248 vec_add2(nhs, nh, 1);
249
250 nh->path_weight = 1;
251 dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
252 }
253
254 return (nhs);
255}
256
257/*
258 * Fill in adjacencies in block based on corresponding
259 * next hop adjacencies.
260 */
261static void
262replicate_fill_buckets (replicate_t *rep,
263 load_balance_path_t *nhs,
264 dpo_id_t *buckets,
265 u32 n_buckets)
266{
267 load_balance_path_t * nh;
Neale Rannsd792d9c2017-10-21 10:53:20 -0700268 u16 bucket;
Neale Ranns32e1c012016-11-22 17:07:28 +0000269
270 bucket = 0;
271
272 /*
273 * the next-hops have normalised weights. that means their sum is the number
274 * of buckets we need to fill.
275 */
276 vec_foreach (nh, nhs)
277 {
Neale Rannsd792d9c2017-10-21 10:53:20 -0700278 ASSERT(bucket < n_buckets);
279 replicate_set_bucket_i(rep, bucket++, buckets, &nh->path_dpo);
Neale Ranns32e1c012016-11-22 17:07:28 +0000280 }
281}
282
283static inline void
284replicate_set_n_buckets (replicate_t *rep,
285 u32 n_buckets)
286{
287 rep->rep_n_buckets = n_buckets;
288}
289
290void
291replicate_multipath_update (const dpo_id_t *dpo,
292 load_balance_path_t * next_hops)
293{
294 load_balance_path_t * nh, * nhs;
295 dpo_id_t *tmp_dpo;
296 u32 ii, n_buckets;
297 replicate_t *rep;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800298 index_t repi;
Neale Ranns32e1c012016-11-22 17:07:28 +0000299
300 ASSERT(DPO_REPLICATE == dpo->dpoi_type);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800301 repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
302 rep = replicate_get(repi);
Neale Ranns32e1c012016-11-22 17:07:28 +0000303 nhs = replicate_multipath_next_hop_fixup(next_hops,
304 rep->rep_proto);
305 n_buckets = vec_len(nhs);
306
307 if (0 == rep->rep_n_buckets)
308 {
309 /*
310 * first time initialisation. no packets inflight, so we can write
311 * at leisure.
312 */
313 replicate_set_n_buckets(rep, n_buckets);
314
315 if (!REP_HAS_INLINE_BUCKETS(rep))
316 vec_validate_aligned(rep->rep_buckets,
317 rep->rep_n_buckets - 1,
318 CLIB_CACHE_LINE_BYTES);
319
320 replicate_fill_buckets(rep, nhs,
321 replicate_get_buckets(rep),
322 n_buckets);
323 }
324 else
325 {
326 /*
327 * This is a modification of an existing replicate.
328 * We need to ensure that packets in flight see a consistent state, that
329 * is the number of reported buckets the REP has
330 * is not more than it actually has. So if the
331 * number of buckets is increasing, we must update the bucket array first,
332 * then the reported number. vice-versa if the number of buckets goes down.
333 */
334 if (n_buckets == rep->rep_n_buckets)
335 {
336 /*
337 * no change in the number of buckets. we can simply fill what
338 * is new over what is old.
339 */
340 replicate_fill_buckets(rep, nhs,
341 replicate_get_buckets(rep),
342 n_buckets);
343 }
344 else if (n_buckets > rep->rep_n_buckets)
345 {
346 /*
347 * we have more buckets. the old replicate map (if there is one)
348 * will remain valid, i.e. mapping to indices within range, so we
349 * update it last.
350 */
351 if (n_buckets > REP_NUM_INLINE_BUCKETS &&
352 rep->rep_n_buckets <= REP_NUM_INLINE_BUCKETS)
353 {
354 /*
355 * the new increased number of buckets is crossing the threshold
356 * from the inline storage to out-line. Alloc the outline buckets
357 * first, then fixup the number. then reset the inlines.
358 */
359 ASSERT(NULL == rep->rep_buckets);
360 vec_validate_aligned(rep->rep_buckets,
361 n_buckets - 1,
362 CLIB_CACHE_LINE_BYTES);
363
364 replicate_fill_buckets(rep, nhs,
365 rep->rep_buckets,
366 n_buckets);
367 CLIB_MEMORY_BARRIER();
368 replicate_set_n_buckets(rep, n_buckets);
369
370 CLIB_MEMORY_BARRIER();
371
372 for (ii = 0; ii < REP_NUM_INLINE_BUCKETS; ii++)
373 {
374 dpo_reset(&rep->rep_buckets_inline[ii]);
375 }
376 }
377 else
378 {
379 if (n_buckets <= REP_NUM_INLINE_BUCKETS)
380 {
381 /*
382 * we are not crossing the threshold and it's still inline buckets.
383 * we can write the new on the old..
384 */
385 replicate_fill_buckets(rep, nhs,
386 replicate_get_buckets(rep),
387 n_buckets);
388 CLIB_MEMORY_BARRIER();
389 replicate_set_n_buckets(rep, n_buckets);
390 }
391 else
392 {
393 /*
394 * we are not crossing the threshold. We need a new bucket array to
395 * hold the increased number of choices.
396 */
397 dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
398
399 new_buckets = NULL;
400 old_buckets = replicate_get_buckets(rep);
401
402 vec_validate_aligned(new_buckets,
403 n_buckets - 1,
404 CLIB_CACHE_LINE_BYTES);
405
406 replicate_fill_buckets(rep, nhs, new_buckets, n_buckets);
407 CLIB_MEMORY_BARRIER();
408 rep->rep_buckets = new_buckets;
409 CLIB_MEMORY_BARRIER();
410 replicate_set_n_buckets(rep, n_buckets);
411
412 vec_foreach(tmp_dpo, old_buckets)
413 {
414 dpo_reset(tmp_dpo);
415 }
416 vec_free(old_buckets);
417 }
418 }
419 }
420 else
421 {
422 /*
423 * bucket size shrinkage.
424 */
425 if (n_buckets <= REP_NUM_INLINE_BUCKETS &&
426 rep->rep_n_buckets > REP_NUM_INLINE_BUCKETS)
427 {
428 /*
429 * the new decreased number of buckets is crossing the threshold
430 * from out-line storage to inline:
431 * 1 - Fill the inline buckets,
432 * 2 - fixup the number (and this point the inline buckets are
433 * used).
434 * 3 - free the outline buckets
435 */
436 replicate_fill_buckets(rep, nhs,
437 rep->rep_buckets_inline,
438 n_buckets);
439 CLIB_MEMORY_BARRIER();
440 replicate_set_n_buckets(rep, n_buckets);
441 CLIB_MEMORY_BARRIER();
442
443 vec_foreach(tmp_dpo, rep->rep_buckets)
444 {
445 dpo_reset(tmp_dpo);
446 }
447 vec_free(rep->rep_buckets);
448 }
449 else
450 {
451 /*
452 * not crossing the threshold.
453 * 1 - update the number to the smaller size
454 * 2 - write the new buckets
455 * 3 - reset those no longer used.
456 */
457 dpo_id_t *buckets;
458 u32 old_n_buckets;
459
460 old_n_buckets = rep->rep_n_buckets;
461 buckets = replicate_get_buckets(rep);
462
463 replicate_set_n_buckets(rep, n_buckets);
464 CLIB_MEMORY_BARRIER();
465
466 replicate_fill_buckets(rep, nhs,
467 buckets,
468 n_buckets);
469
470 for (ii = n_buckets; ii < old_n_buckets; ii++)
471 {
472 dpo_reset(&buckets[ii]);
473 }
474 }
475 }
476 }
477
478 vec_foreach (nh, nhs)
479 {
480 dpo_reset(&nh->path_dpo);
481 }
482 vec_free(nhs);
483}
484
485static void
486replicate_lock (dpo_id_t *dpo)
487{
488 replicate_t *rep;
489
490 rep = replicate_get(dpo->dpoi_index);
491
492 rep->rep_locks++;
493}
494
495static void
496replicate_destroy (replicate_t *rep)
497{
498 dpo_id_t *buckets;
499 int i;
500
501 buckets = replicate_get_buckets(rep);
502
503 for (i = 0; i < rep->rep_n_buckets; i++)
504 {
505 dpo_reset(&buckets[i]);
506 }
507
508 REP_DBG(rep, "destroy");
509 if (!REP_HAS_INLINE_BUCKETS(rep))
510 {
511 vec_free(rep->rep_buckets);
512 }
513
514 pool_put(replicate_pool, rep);
515}
516
517static void
518replicate_unlock (dpo_id_t *dpo)
519{
520 replicate_t *rep;
521
522 rep = replicate_get(dpo->dpoi_index);
523
524 rep->rep_locks--;
525
526 if (0 == rep->rep_locks)
527 {
528 replicate_destroy(rep);
529 }
530}
531
532static void
533replicate_mem_show (void)
534{
535 fib_show_memory_usage("replicate",
536 pool_elts(replicate_pool),
537 pool_len(replicate_pool),
538 sizeof(replicate_t));
539}
540
541const static dpo_vft_t rep_vft = {
542 .dv_lock = replicate_lock,
543 .dv_unlock = replicate_unlock,
544 .dv_format = format_replicate_dpo,
545 .dv_mem_show = replicate_mem_show,
546};
547
548/**
549 * @brief The per-protocol VLIB graph nodes that are assigned to a replicate
550 * object.
551 *
552 * this means that these graph nodes are ones from which a replicate is the
553 * parent object in the DPO-graph.
554 */
555const static char* const replicate_ip4_nodes[] =
556{
557 "ip4-replicate",
558 NULL,
559};
560const static char* const replicate_ip6_nodes[] =
561{
562 "ip6-replicate",
563 NULL,
564};
565const static char* const replicate_mpls_nodes[] =
566{
567 "mpls-replicate",
568 NULL,
569};
570
571const static char* const * const replicate_nodes[DPO_PROTO_NUM] =
572{
573 [DPO_PROTO_IP4] = replicate_ip4_nodes,
574 [DPO_PROTO_IP6] = replicate_ip6_nodes,
575 [DPO_PROTO_MPLS] = replicate_mpls_nodes,
576};
577
578void
579replicate_module_init (void)
580{
581 dpo_register(DPO_REPLICATE, &rep_vft, replicate_nodes);
Neale Ranns710071b2018-09-24 12:36:26 +0000582 replicate_logger = vlib_log_register_class("dpo", "replicate");
Neale Ranns32e1c012016-11-22 17:07:28 +0000583}
584
585static clib_error_t *
586replicate_show (vlib_main_t * vm,
587 unformat_input_t * input,
588 vlib_cli_command_t * cmd)
589{
590 index_t repi = INDEX_INVALID;
591
592 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
593 {
594 if (unformat (input, "%d", &repi))
595 ;
596 else
597 break;
598 }
599
600 if (INDEX_INVALID != repi)
601 {
602 vlib_cli_output (vm, "%U", format_replicate, repi,
603 REPLICATE_FORMAT_DETAIL);
604 }
605 else
606 {
607 replicate_t *rep;
608
609 pool_foreach(rep, replicate_pool,
610 ({
611 vlib_cli_output (vm, "%U", format_replicate,
612 replicate_get_index(rep),
613 REPLICATE_FORMAT_NONE);
614 }));
615 }
616
617 return 0;
618}
619
620VLIB_CLI_COMMAND (replicate_show_command, static) = {
621 .path = "show replicate",
622 .short_help = "show replicate [<index>]",
623 .function = replicate_show,
624};
625
626typedef struct replicate_trace_t_
627{
628 index_t rep_index;
Neale Rannsa9374df2017-02-02 02:18:18 -0800629 dpo_id_t dpo;
Neale Ranns32e1c012016-11-22 17:07:28 +0000630} replicate_trace_t;
631
632static uword
633replicate_inline (vlib_main_t * vm,
634 vlib_node_runtime_t * node,
635 vlib_frame_t * frame)
636{
637 vlib_combined_counter_main_t * cm = &replicate_main.repm_counters;
Damjan Marionc47ed032017-01-25 14:18:03 +0100638 replicate_main_t * rm = &replicate_main;
Neale Ranns32e1c012016-11-22 17:07:28 +0000639 u32 n_left_from, * from, * to_next, next_index;
Damjan Marion586afd72017-04-05 19:18:20 +0200640 u32 thread_index = vlib_get_thread_index();
Neale Ranns32e1c012016-11-22 17:07:28 +0000641
642 from = vlib_frame_vector_args (frame);
643 n_left_from = frame->n_vectors;
644 next_index = node->cached_next_index;
645
646 while (n_left_from > 0)
647 {
648 u32 n_left_to_next;
649
650 vlib_get_next_frame (vm, node, next_index,
651 to_next, n_left_to_next);
652
653 while (n_left_from > 0 && n_left_to_next > 0)
654 {
655 u32 next0, ci0, bi0, bucket, repi0;
656 const replicate_t *rep0;
657 vlib_buffer_t * b0, *c0;
658 const dpo_id_t *dpo0;
Damjan Marionc47ed032017-01-25 14:18:03 +0100659 u8 num_cloned;
Neale Ranns32e1c012016-11-22 17:07:28 +0000660
661 bi0 = from[0];
Neale Ranns32e1c012016-11-22 17:07:28 +0000662 from += 1;
Neale Ranns32e1c012016-11-22 17:07:28 +0000663 n_left_from -= 1;
Neale Ranns32e1c012016-11-22 17:07:28 +0000664
665 b0 = vlib_get_buffer (vm, bi0);
666 repi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
667 rep0 = replicate_get(repi0);
668
669 vlib_increment_combined_counter(
Damjan Marion586afd72017-04-05 19:18:20 +0200670 cm, thread_index, repi0, 1,
Neale Ranns32e1c012016-11-22 17:07:28 +0000671 vlib_buffer_length_in_chain(vm, b0));
672
Damjan Marion586afd72017-04-05 19:18:20 +0200673 vec_validate (rm->clones[thread_index], rep0->rep_n_buckets - 1);
Neale Ranns32e1c012016-11-22 17:07:28 +0000674
Neale Rannsf0510722018-01-31 11:35:41 -0800675 num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[thread_index],
Damjan Marionbd0da972018-10-31 10:59:02 +0100676 rep0->rep_n_buckets,
677 VLIB_BUFFER_CLONE_HEAD_SIZE);
Damjan Marionc47ed032017-01-25 14:18:03 +0100678
679 if (num_cloned != rep0->rep_n_buckets)
680 {
681 vlib_node_increment_counter
682 (vm, node->node_index,
683 REPLICATE_DPO_ERROR_BUFFER_ALLOCATION_FAILURE, 1);
684 }
685
686 for (bucket = 0; bucket < num_cloned; bucket++)
Neale Ranns32e1c012016-11-22 17:07:28 +0000687 {
Damjan Marion586afd72017-04-05 19:18:20 +0200688 ci0 = rm->clones[thread_index][bucket];
Damjan Marionc47ed032017-01-25 14:18:03 +0100689 c0 = vlib_get_buffer(vm, ci0);
Neale Ranns32e1c012016-11-22 17:07:28 +0000690
691 to_next[0] = ci0;
692 to_next += 1;
693 n_left_to_next -= 1;
694
695 dpo0 = replicate_get_bucket_i(rep0, bucket);
696 next0 = dpo0->dpoi_next_node;
697 vnet_buffer (c0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
698
Neale Ranns2fd4b562018-10-08 06:08:59 +0000699 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
Neale Ranns32e1c012016-11-22 17:07:28 +0000700 {
Neale Rannsd792d9c2017-10-21 10:53:20 -0700701 replicate_trace_t *t;
702
Neale Ranns2fd4b562018-10-08 06:08:59 +0000703 if (c0 != b0)
704 {
705 vlib_buffer_copy_trace_flag (vm, b0, ci0);
706 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (c0);
707 }
Neale Rannsd792d9c2017-10-21 10:53:20 -0700708 t = vlib_add_trace (vm, node, c0, sizeof (*t));
Neale Ranns32e1c012016-11-22 17:07:28 +0000709 t->rep_index = repi0;
Neale Rannsa9374df2017-02-02 02:18:18 -0800710 t->dpo = *dpo0;
Neale Ranns32e1c012016-11-22 17:07:28 +0000711 }
712
713 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
714 to_next, n_left_to_next,
715 ci0, next0);
Damjan Marionc47ed032017-01-25 14:18:03 +0100716 if (PREDICT_FALSE (n_left_to_next == 0))
717 {
718 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
719 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
720 }
Neale Ranns32e1c012016-11-22 17:07:28 +0000721 }
Damjan Marion586afd72017-04-05 19:18:20 +0200722 vec_reset_length (rm->clones[thread_index]);
Neale Ranns32e1c012016-11-22 17:07:28 +0000723 }
724
725 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
726 }
727
728 return frame->n_vectors;
729}
730
731static u8 *
732format_replicate_trace (u8 * s, va_list * args)
733{
734 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
735 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
736 replicate_trace_t *t = va_arg (*args, replicate_trace_t *);
737
Neale Rannsa9374df2017-02-02 02:18:18 -0800738 s = format (s, "replicate: %d via %U",
Neale Ranns32e1c012016-11-22 17:07:28 +0000739 t->rep_index,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800740 format_dpo_id, &t->dpo, 0);
Neale Ranns32e1c012016-11-22 17:07:28 +0000741 return s;
742}
743
744static uword
745ip4_replicate (vlib_main_t * vm,
746 vlib_node_runtime_t * node,
747 vlib_frame_t * frame)
748{
749 return (replicate_inline (vm, node, frame));
750}
751
752/**
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800753 * @brief IP4 replication node
Neale Ranns32e1c012016-11-22 17:07:28 +0000754 */
755VLIB_REGISTER_NODE (ip4_replicate_node) = {
756 .function = ip4_replicate,
757 .name = "ip4-replicate",
758 .vector_size = sizeof (u32),
759
Dave Barach26cd8c12017-02-23 17:11:26 -0500760 .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
761 .error_strings = replicate_dpo_error_strings,
762
Neale Ranns32e1c012016-11-22 17:07:28 +0000763 .format_trace = format_replicate_trace,
764 .n_next_nodes = 1,
765 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800766 [0] = "ip4-drop",
Neale Ranns32e1c012016-11-22 17:07:28 +0000767 },
768};
769
770static uword
771ip6_replicate (vlib_main_t * vm,
772 vlib_node_runtime_t * node,
773 vlib_frame_t * frame)
774{
775 return (replicate_inline (vm, node, frame));
776}
777
778/**
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800779 * @brief IPv6 replication node
Neale Ranns32e1c012016-11-22 17:07:28 +0000780 */
781VLIB_REGISTER_NODE (ip6_replicate_node) = {
782 .function = ip6_replicate,
783 .name = "ip6-replicate",
784 .vector_size = sizeof (u32),
785
Dave Barach26cd8c12017-02-23 17:11:26 -0500786 .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
787 .error_strings = replicate_dpo_error_strings,
788
Neale Ranns32e1c012016-11-22 17:07:28 +0000789 .format_trace = format_replicate_trace,
790 .n_next_nodes = 1,
791 .next_nodes = {
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800792 [0] = "ip6-drop",
793 },
794};
795
796static uword
797mpls_replicate (vlib_main_t * vm,
798 vlib_node_runtime_t * node,
799 vlib_frame_t * frame)
800{
801 return (replicate_inline (vm, node, frame));
802}
803
804/**
805 * @brief MPLS replication node
806 */
807VLIB_REGISTER_NODE (mpls_replicate_node) = {
808 .function = mpls_replicate,
809 .name = "mpls-replicate",
810 .vector_size = sizeof (u32),
811
812 .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
813 .error_strings = replicate_dpo_error_strings,
814
815 .format_trace = format_replicate_trace,
816 .n_next_nodes = 1,
817 .next_nodes = {
818 [0] = "mpls-drop",
Neale Ranns32e1c012016-11-22 17:07:28 +0000819 },
820};
Damjan Marionc47ed032017-01-25 14:18:03 +0100821
822clib_error_t *
823replicate_dpo_init (vlib_main_t * vm)
824{
825 replicate_main_t * rm = &replicate_main;
826
827 vec_validate (rm->clones, vlib_num_workers());
828
829 return 0;
830}
831
832VLIB_INIT_FUNCTION (replicate_dpo_init);