blob: d8342ff17aece30979ce9695bc0c8895c49ba4f6 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/**
16 * @brief
17 * A Data-Path Object is an object that represents actions that are
18 * applied to packets are they are switched through VPP.
Dave Barachf8d50682019-05-14 18:01:44 -040019 *
Neale Ranns0bfe5d82016-08-25 15:29:12 +010020 * The DPO is a base class that is specialised by other objects to provide
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -070021 * concrete actions
Neale Ranns0bfe5d82016-08-25 15:29:12 +010022 *
23 * The VLIB graph nodes are graph of types, the DPO graph is a graph of instances.
24 */
25
Neale Ranns8f5fef22020-12-21 08:29:34 +000026// clang-format off
27
Neale Ranns0bfe5d82016-08-25 15:29:12 +010028#include <vnet/dpo/dpo.h>
29#include <vnet/ip/lookup.h>
30#include <vnet/ip/format.h>
31#include <vnet/adj/adj.h>
32
33#include <vnet/dpo/load_balance.h>
34#include <vnet/dpo/mpls_label_dpo.h>
35#include <vnet/dpo/lookup_dpo.h>
36#include <vnet/dpo/drop_dpo.h>
37#include <vnet/dpo/receive_dpo.h>
38#include <vnet/dpo/punt_dpo.h>
39#include <vnet/dpo/classify_dpo.h>
Neale Ranns948e00f2016-10-20 13:39:34 +010040#include <vnet/dpo/ip_null_dpo.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000041#include <vnet/dpo/replicate_dpo.h>
Neale Ranns43161a82017-08-12 02:12:00 -070042#include <vnet/dpo/interface_rx_dpo.h>
43#include <vnet/dpo/interface_tx_dpo.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080044#include <vnet/dpo/mpls_disposition.h>
Neale Rannsf068c3e2018-01-03 04:18:48 -080045#include <vnet/dpo/dvr_dpo.h>
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -070046#include <vnet/dpo/l3_proxy_dpo.h>
Neale Ranns53da2212018-02-24 02:11:19 -080047#include <vnet/dpo/ip6_ll_dpo.h>
Neale Ranns1dbcf302019-07-19 11:44:53 +000048#include <vnet/dpo/pw_cw.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010049
50/**
51 * Array of char* names for the DPO types and protos
52 */
53static const char* dpo_type_names[] = DPO_TYPES;
54static const char* dpo_proto_names[] = DPO_PROTOS;
55
56/**
57 * @brief Vector of virtual function tables for the DPO types
58 *
59 * This is a vector so we can dynamically register new DPO types in plugins.
60 */
61static dpo_vft_t *dpo_vfts;
62
63/**
64 * @brief vector of graph node names associated with each DPO type and protocol.
65 *
66 * dpo_nodes[child_type][child_proto][node_X] = node_name;
67 * i.e.
68 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][0] = "ip4-lookup"
69 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][1] = "ip4-load-balance"
70 *
71 * This is a vector so we can dynamically register new DPO types in plugins.
72 */
73static const char* const * const ** dpo_nodes;
74
75/**
76 * @brief Vector of edge indicies from parent DPO nodes to child
77 *
Neale Ranns8fe8cc22016-11-01 10:05:08 +000078 * dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge_index
Neale Ranns0bfe5d82016-08-25 15:29:12 +010079 *
80 * This array is derived at init time from the dpo_nodes above. Note that
81 * the third dimension in dpo_nodes is lost, hence, the edge index from each
82 * node MUST be the same.
Neale Ranns8fe8cc22016-11-01 10:05:08 +000083 * Including both the child and parent protocol is required to support the
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -070084 * case where it changes as the graph is traversed, most notably when an
Neale Ranns8fe8cc22016-11-01 10:05:08 +000085 * MPLS label is popped.
Neale Ranns0bfe5d82016-08-25 15:29:12 +010086 *
87 * Note that this array is child type specific, not child instance specific.
88 */
Neale Ranns8fe8cc22016-11-01 10:05:08 +000089static u32 ****dpo_edges;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010090
91/**
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -070092 * @brief The DPO type value that can be assigned to the next dynamic
Neale Ranns0bfe5d82016-08-25 15:29:12 +010093 * type registration.
94 */
95static dpo_type_t dpo_dynamic = DPO_LAST;
96
Neale Rannsad95b5d2016-11-10 20:35:14 +000097dpo_proto_t
98vnet_link_to_dpo_proto (vnet_link_t linkt)
99{
100 switch (linkt)
101 {
102 case VNET_LINK_IP6:
103 return (DPO_PROTO_IP6);
104 case VNET_LINK_IP4:
105 return (DPO_PROTO_IP4);
106 case VNET_LINK_MPLS:
107 return (DPO_PROTO_MPLS);
108 case VNET_LINK_ETHERNET:
109 return (DPO_PROTO_ETHERNET);
Florin Corasce1b4c72017-01-26 14:25:34 -0800110 case VNET_LINK_NSH:
111 return (DPO_PROTO_NSH);
Neale Rannsad95b5d2016-11-10 20:35:14 +0000112 case VNET_LINK_ARP:
113 break;
114 }
115 ASSERT(0);
116 return (0);
117}
118
Neale Rannsda78f952017-05-24 09:15:43 -0700119vnet_link_t
120dpo_proto_to_link (dpo_proto_t dp)
121{
122 switch (dp)
123 {
124 case DPO_PROTO_IP6:
125 return (VNET_LINK_IP6);
126 case DPO_PROTO_IP4:
127 return (VNET_LINK_IP4);
128 case DPO_PROTO_MPLS:
Neale Rannsd792d9c2017-10-21 10:53:20 -0700129 case DPO_PROTO_BIER:
Neale Rannsda78f952017-05-24 09:15:43 -0700130 return (VNET_LINK_MPLS);
131 case DPO_PROTO_ETHERNET:
132 return (VNET_LINK_ETHERNET);
133 case DPO_PROTO_NSH:
134 return (VNET_LINK_NSH);
135 }
136 return (~0);
137}
138
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100139u8 *
140format_dpo_type (u8 * s, va_list * args)
141{
142 dpo_type_t type = va_arg (*args, int);
143
144 s = format(s, "%s", dpo_type_names[type]);
145
146 return (s);
147}
148
149u8 *
150format_dpo_id (u8 * s, va_list * args)
151{
152 dpo_id_t *dpo = va_arg (*args, dpo_id_t*);
153 u32 indent = va_arg (*args, u32);
154
155 s = format(s, "[@%d]: ", dpo->dpoi_next_node);
156
157 if (NULL != dpo_vfts[dpo->dpoi_type].dv_format)
158 {
Neale Ranns2303cb12018-02-21 04:57:17 -0800159 s = format(s, "%U",
160 dpo_vfts[dpo->dpoi_type].dv_format,
161 dpo->dpoi_index,
162 indent);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100163 }
Neale Ranns2303cb12018-02-21 04:57:17 -0800164 else
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100165 {
Neale Ranns2303cb12018-02-21 04:57:17 -0800166 switch (dpo->dpoi_type)
167 {
168 case DPO_FIRST:
169 s = format(s, "unset");
170 break;
171 default:
172 s = format(s, "unknown");
173 break;
174 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100175 }
176 return (s);
177}
178
179u8 *
180format_dpo_proto (u8 * s, va_list * args)
181{
182 dpo_proto_t proto = va_arg (*args, int);
183
184 return (format(s, "%s", dpo_proto_names[proto]));
185}
186
187void
188dpo_set (dpo_id_t *dpo,
189 dpo_type_t type,
190 dpo_proto_t proto,
191 index_t index)
192{
193 dpo_id_t tmp = *dpo;
194
195 dpo->dpoi_type = type;
196 dpo->dpoi_proto = proto,
197 dpo->dpoi_index = index;
198
199 if (DPO_ADJACENCY == type)
200 {
201 /*
202 * set the adj subtype
203 */
204 ip_adjacency_t *adj;
205
206 adj = adj_get(index);
207
208 switch (adj->lookup_next_index)
209 {
210 case IP_LOOKUP_NEXT_ARP:
211 dpo->dpoi_type = DPO_ADJACENCY_INCOMPLETE;
212 break;
213 case IP_LOOKUP_NEXT_MIDCHAIN:
214 dpo->dpoi_type = DPO_ADJACENCY_MIDCHAIN;
215 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800216 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
217 dpo->dpoi_type = DPO_ADJACENCY_MCAST_MIDCHAIN;
218 break;
219 case IP_LOOKUP_NEXT_MCAST:
220 dpo->dpoi_type = DPO_ADJACENCY_MCAST;
Neale Ranns8c4611b2017-05-23 03:43:47 -0700221 break;
222 case IP_LOOKUP_NEXT_GLEAN:
223 dpo->dpoi_type = DPO_ADJACENCY_GLEAN;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800224 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100225 default:
226 break;
227 }
228 }
229 dpo_lock(dpo);
230 dpo_unlock(&tmp);
231}
232
233void
234dpo_reset (dpo_id_t *dpo)
235{
Neale Rannsad95b5d2016-11-10 20:35:14 +0000236 dpo_id_t tmp = DPO_INVALID;
237
238 /*
239 * use the atomic copy operation.
240 */
241 dpo_copy(dpo, &tmp);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100242}
243
244/**
245 * \brief
246 * Compare two Data-path objects
247 *
248 * like memcmp, return 0 is matching, !0 otherwise.
249 */
250int
251dpo_cmp (const dpo_id_t *dpo1,
252 const dpo_id_t *dpo2)
253{
254 int res;
255
256 res = dpo1->dpoi_type - dpo2->dpoi_type;
257
258 if (0 != res) return (res);
259
260 return (dpo1->dpoi_index - dpo2->dpoi_index);
261}
262
263void
264dpo_copy (dpo_id_t *dst,
265 const dpo_id_t *src)
266{
Neale Rannsab4fbed2020-11-26 09:41:01 +0000267 dpo_id_t tmp = {
268 .as_u64 = dst->as_u64
269 };
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100270
271 /*
272 * the destination is written in a single u64 write - hence atomically w.r.t
273 * any packets inflight.
274 */
Neale Rannsab4fbed2020-11-26 09:41:01 +0000275 dst->as_u64 = src->as_u64;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100276
277 dpo_lock(dst);
Dave Barachf8d50682019-05-14 18:01:44 -0400278 dpo_unlock(&tmp);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100279}
280
281int
282dpo_is_adj (const dpo_id_t *dpo)
283{
284 return ((dpo->dpoi_type == DPO_ADJACENCY) ||
285 (dpo->dpoi_type == DPO_ADJACENCY_INCOMPLETE) ||
Neale Rannsab4fbed2020-11-26 09:41:01 +0000286 (dpo->dpoi_type == DPO_ADJACENCY_GLEAN) ||
287 (dpo->dpoi_type == DPO_ADJACENCY_MCAST) ||
288 (dpo->dpoi_type == DPO_ADJACENCY_MCAST_MIDCHAIN) ||
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100289 (dpo->dpoi_type == DPO_ADJACENCY_MIDCHAIN) ||
290 (dpo->dpoi_type == DPO_ADJACENCY_GLEAN));
291}
292
Neale Ranns43161a82017-08-12 02:12:00 -0700293static u32 *
294dpo_default_get_next_node (const dpo_id_t *dpo)
295{
296 u32 *node_indices = NULL;
297 const char *node_name;
298 u32 ii = 0;
299
300 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
301 while (NULL != node_name)
302 {
303 vlib_node_t *node;
304
305 node = vlib_get_node_by_name(vlib_get_main(), (u8*) node_name);
306 ASSERT(NULL != node);
307 vec_add1(node_indices, node->index);
308
309 ++ii;
310 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
311 }
312
313 return (node_indices);
314}
315
Neale Ranns2303cb12018-02-21 04:57:17 -0800316/**
317 * A default variant of the make interpose function that just returns
318 * the original
319 */
320static void
321dpo_default_mk_interpose (const dpo_id_t *original,
322 const dpo_id_t *parent,
323 dpo_id_t *clone)
324{
325 dpo_copy(clone, original);
326}
327
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100328void
329dpo_register (dpo_type_t type,
330 const dpo_vft_t *vft,
331 const char * const * const * nodes)
332{
333 vec_validate(dpo_vfts, type);
334 dpo_vfts[type] = *vft;
Neale Ranns43161a82017-08-12 02:12:00 -0700335 if (NULL == dpo_vfts[type].dv_get_next_node)
336 {
337 dpo_vfts[type].dv_get_next_node = dpo_default_get_next_node;
338 }
Neale Ranns2303cb12018-02-21 04:57:17 -0800339 if (NULL == dpo_vfts[type].dv_mk_interpose)
340 {
341 dpo_vfts[type].dv_mk_interpose = dpo_default_mk_interpose;
342 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100343
344 vec_validate(dpo_nodes, type);
345 dpo_nodes[type] = nodes;
346}
347
348dpo_type_t
349dpo_register_new_type (const dpo_vft_t *vft,
350 const char * const * const * nodes)
351{
352 dpo_type_t type = dpo_dynamic++;
353
354 dpo_register(type, vft, nodes);
355
356 return (type);
357}
358
359void
Neale Ranns2303cb12018-02-21 04:57:17 -0800360dpo_mk_interpose (const dpo_id_t *original,
361 const dpo_id_t *parent,
362 dpo_id_t *clone)
363{
364 if (!dpo_id_is_valid(original))
365 return;
366
367 dpo_vfts[original->dpoi_type].dv_mk_interpose(original, parent, clone);
368}
369
370void
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100371dpo_lock (dpo_id_t *dpo)
372{
373 if (!dpo_id_is_valid(dpo))
374 return;
375
376 dpo_vfts[dpo->dpoi_type].dv_lock(dpo);
377}
378
379void
380dpo_unlock (dpo_id_t *dpo)
381{
382 if (!dpo_id_is_valid(dpo))
383 return;
384
385 dpo_vfts[dpo->dpoi_type].dv_unlock(dpo);
386}
387
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -0700388u32
389dpo_get_urpf(const dpo_id_t *dpo)
390{
391 if (dpo_id_is_valid(dpo) &&
392 (NULL != dpo_vfts[dpo->dpoi_type].dv_get_urpf))
393 {
394 return (dpo_vfts[dpo->dpoi_type].dv_get_urpf(dpo));
395 }
396
397 return (~0);
398}
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100399
Neale Ranns8f5fef22020-12-21 08:29:34 +0000400u16
401dpo_get_mtu(const dpo_id_t *dpo)
402{
403 if (dpo_id_is_valid(dpo) &&
404 (NULL != dpo_vfts[dpo->dpoi_type].dv_get_mtu))
405 {
406 return (dpo_vfts[dpo->dpoi_type].dv_get_mtu(dpo));
407 }
408
409 return (0xffff);
410}
411
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100412static u32
413dpo_get_next_node (dpo_type_t child_type,
414 dpo_proto_t child_proto,
415 const dpo_id_t *parent_dpo)
416{
417 dpo_proto_t parent_proto;
418 dpo_type_t parent_type;
419
420 parent_type = parent_dpo->dpoi_type;
421 parent_proto = parent_dpo->dpoi_proto;
422
423 vec_validate(dpo_edges, child_type);
424 vec_validate(dpo_edges[child_type], child_proto);
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000425 vec_validate(dpo_edges[child_type][child_proto], parent_type);
426 vec_validate_init_empty(
427 dpo_edges[child_type][child_proto][parent_type],
428 parent_proto, ~0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100429
430 /*
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -0700431 * if the edge index has not yet been created for this node to node transition
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100432 */
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000433 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100434 {
Neale Ranns43161a82017-08-12 02:12:00 -0700435 vlib_node_t *child_node;
436 u32 *parent_indices;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100437 vlib_main_t *vm;
Neale Ranns43161a82017-08-12 02:12:00 -0700438 u32 edge, *pi, cc;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100439
440 vm = vlib_get_main();
441
Neale Ranns43161a82017-08-12 02:12:00 -0700442 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100443 ASSERT(NULL != dpo_nodes[child_type]);
444 ASSERT(NULL != dpo_nodes[child_type][child_proto]);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100445
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000446 cc = 0;
Neale Ranns43161a82017-08-12 02:12:00 -0700447 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent_dpo);
448
449 vlib_worker_thread_barrier_sync(vm);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100450
451 /*
Neale Ranns43161a82017-08-12 02:12:00 -0700452 * create a graph arc from each of the child's registered node types,
453 * to each of the parent's.
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100454 */
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000455 while (NULL != dpo_nodes[child_type][child_proto][cc])
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100456 {
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000457 child_node =
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100458 vlib_get_node_by_name(vm,
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000459 (u8*) dpo_nodes[child_type][child_proto][cc]);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100460
Neale Ranns43161a82017-08-12 02:12:00 -0700461 vec_foreach(pi, parent_indices)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100462 {
Neale Ranns43161a82017-08-12 02:12:00 -0700463 edge = vlib_node_add_next(vm, child_node->index, *pi);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100464
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000465 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100466 {
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000467 dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100468 }
469 else
470 {
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000471 ASSERT(dpo_edges[child_type][child_proto][parent_type][parent_proto] == edge);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100472 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100473 }
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000474 cc++;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100475 }
Neale Rannsbb620d72017-06-29 00:19:08 -0700476
477 vlib_worker_thread_barrier_release(vm);
Neale Ranns43161a82017-08-12 02:12:00 -0700478 vec_free(parent_indices);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100479 }
480
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000481 return (dpo_edges[child_type][child_proto][parent_type][parent_proto]);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100482}
483
484/**
Vijayabhaskar Katamreddyb9ca61b2018-03-14 14:04:27 -0700485 * @brief return already stacked up next node index for a given
486 * child_type/child_proto and parent_type/patent_proto.
487 * The VLIB graph arc used is taken from the parent and child types
488 * passed.
489 */
490u32
491dpo_get_next_node_by_type_and_proto (dpo_type_t child_type,
492 dpo_proto_t child_proto,
493 dpo_type_t parent_type,
494 dpo_proto_t parent_proto)
495{
496 return (dpo_edges[child_type][child_proto][parent_type][parent_proto]);
497}
498
499/**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100500 * @brief Stack one DPO object on another, and thus establish a child parent
501 * relationship. The VLIB graph arc used is taken from the parent and child types
502 * passed.
503 */
504static void
505dpo_stack_i (u32 edge,
506 dpo_id_t *dpo,
507 const dpo_id_t *parent)
508{
509 /*
510 * in order to get an atomic update of the parent we create a temporary,
511 * from a copy of the child, and add the next_node. then we copy to the parent
512 */
Neale Ranns948e00f2016-10-20 13:39:34 +0100513 dpo_id_t tmp = DPO_INVALID;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100514 dpo_copy(&tmp, parent);
515
516 /*
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -0700517 * get the edge index for the parent to child VLIB graph transition
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100518 */
519 tmp.dpoi_next_node = edge;
520
521 /*
522 * this update is atomic.
523 */
524 dpo_copy(dpo, &tmp);
525
526 dpo_reset(&tmp);
527}
528
529/**
530 * @brief Stack one DPO object on another, and thus establish a child-parent
531 * relationship. The VLIB graph arc used is taken from the parent and child types
532 * passed.
533 */
534void
535dpo_stack (dpo_type_t child_type,
536 dpo_proto_t child_proto,
537 dpo_id_t *dpo,
538 const dpo_id_t *parent)
539{
540 dpo_stack_i(dpo_get_next_node(child_type, child_proto, parent), dpo, parent);
541}
542
543/**
544 * @brief Stack one DPO object on another, and thus establish a child parent
545 * relationship. A new VLIB graph arc is created from the child node passed
546 * to the nodes registered by the parent. The VLIB infra will ensure this arc
547 * is added only once.
548 */
549void
550dpo_stack_from_node (u32 child_node_index,
551 dpo_id_t *dpo,
552 const dpo_id_t *parent)
553{
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100554 dpo_type_t parent_type;
Neale Ranns43161a82017-08-12 02:12:00 -0700555 u32 *parent_indices;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100556 vlib_main_t *vm;
Neale Ranns43161a82017-08-12 02:12:00 -0700557 u32 edge, *pi;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100558
Neale Ranns43161a82017-08-12 02:12:00 -0700559 edge = 0;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100560 parent_type = parent->dpoi_type;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100561 vm = vlib_get_main();
562
Neale Ranns43161a82017-08-12 02:12:00 -0700563 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
564 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent);
565 ASSERT(parent_indices);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100566
Neale Ranns43161a82017-08-12 02:12:00 -0700567 /*
568 * This loop is purposefully written with the worker thread lock in the
569 * inner loop because;
570 * 1) the likelihood that the edge does not exist is smaller
571 * 2) the likelihood there is more than one node is even smaller
572 * so we are optimising for not need to take the lock
573 */
574 vec_foreach(pi, parent_indices)
Neale Rannsbb620d72017-06-29 00:19:08 -0700575 {
Neale Ranns43161a82017-08-12 02:12:00 -0700576 edge = vlib_node_get_next(vm, child_node_index, *pi);
Neale Rannsbb620d72017-06-29 00:19:08 -0700577
Neale Ranns43161a82017-08-12 02:12:00 -0700578 if (~0 == edge)
579 {
580 vlib_worker_thread_barrier_sync(vm);
Neale Rannsbb620d72017-06-29 00:19:08 -0700581
Neale Ranns43161a82017-08-12 02:12:00 -0700582 edge = vlib_node_add_next(vm, child_node_index, *pi);
583
584 vlib_worker_thread_barrier_release(vm);
585 }
Neale Rannsbb620d72017-06-29 00:19:08 -0700586 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100587 dpo_stack_i(edge, dpo, parent);
Kingwel Xiecd4d5b72018-04-24 17:47:56 +0800588
589 /* should free this local vector to avoid memory leak */
590 vec_free(parent_indices);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100591}
592
593static clib_error_t *
594dpo_module_init (vlib_main_t * vm)
595{
596 drop_dpo_module_init();
597 punt_dpo_module_init();
598 receive_dpo_module_init();
599 load_balance_module_init();
600 mpls_label_dpo_module_init();
601 classify_dpo_module_init();
602 lookup_dpo_module_init();
Neale Ranns948e00f2016-10-20 13:39:34 +0100603 ip_null_dpo_module_init();
Neale Ranns53da2212018-02-24 02:11:19 -0800604 ip6_ll_dpo_module_init();
Neale Ranns32e1c012016-11-22 17:07:28 +0000605 replicate_module_init();
Neale Ranns43161a82017-08-12 02:12:00 -0700606 interface_rx_dpo_module_init();
607 interface_tx_dpo_module_init();
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800608 mpls_disp_dpo_module_init();
Neale Rannsf068c3e2018-01-03 04:18:48 -0800609 dvr_dpo_module_init();
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -0700610 l3_proxy_dpo_module_init();
Neale Ranns1dbcf302019-07-19 11:44:53 +0000611 pw_cw_dpo_module_init();
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100612
613 return (NULL);
614}
615
Dave Barachf8d50682019-05-14 18:01:44 -0400616/* *INDENT-OFF* */
617VLIB_INIT_FUNCTION(dpo_module_init) =
618{
619 .runs_before = VLIB_INITS ("ip_main_init"),
620};
621/* *INDENT-ON* */
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100622
623static clib_error_t *
624dpo_memory_show (vlib_main_t * vm,
625 unformat_input_t * input,
626 vlib_cli_command_t * cmd)
627{
628 dpo_vft_t *vft;
629
630 vlib_cli_output (vm, "DPO memory");
631 vlib_cli_output (vm, "%=30s %=5s %=8s/%=9s totals",
632 "Name","Size", "in-use", "allocated");
633
634 vec_foreach(vft, dpo_vfts)
635 {
636 if (NULL != vft->dv_mem_show)
637 vft->dv_mem_show();
638 }
639
640 return (NULL);
641}
642
643/* *INDENT-OFF* */
644/*?
645 * The '<em>sh dpo memory </em>' command displays the memory usage for each
646 * data-plane object type.
647 *
648 * @cliexpar
649 * @cliexstart{show dpo memory}
650 * DPO memory
651 * Name Size in-use /allocated totals
652 * load-balance 64 12 / 12 768/768
653 * Adjacency 256 1 / 1 256/256
654 * Receive 24 5 / 5 120/120
655 * Lookup 12 0 / 0 0/0
656 * Classify 12 0 / 0 0/0
657 * MPLS label 24 0 / 0 0/0
658 * @cliexend
659?*/
660VLIB_CLI_COMMAND (show_fib_memory, static) = {
661 .path = "show dpo memory",
662 .function = dpo_memory_show,
663 .short_help = "show dpo memory",
664};
665/* *INDENT-ON* */
Neale Ranns8f5fef22020-12-21 08:29:34 +0000666
667// clang-format on