blob: 1331b5501bc35a394d366026529dd87793a81b25 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/**
16 * @brief
17 * A Data-Path Object is an object that represents actions that are
18 * applied to packets are they are switched through VPP.
Dave Barachf8d50682019-05-14 18:01:44 -040019 *
Neale Ranns0bfe5d82016-08-25 15:29:12 +010020 * The DPO is a base class that is specialised by other objects to provide
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -070021 * concrete actions
Neale Ranns0bfe5d82016-08-25 15:29:12 +010022 *
23 * The VLIB graph nodes are graph of types, the DPO graph is a graph of instances.
24 */
25
26#include <vnet/dpo/dpo.h>
27#include <vnet/ip/lookup.h>
28#include <vnet/ip/format.h>
29#include <vnet/adj/adj.h>
30
31#include <vnet/dpo/load_balance.h>
32#include <vnet/dpo/mpls_label_dpo.h>
33#include <vnet/dpo/lookup_dpo.h>
34#include <vnet/dpo/drop_dpo.h>
35#include <vnet/dpo/receive_dpo.h>
36#include <vnet/dpo/punt_dpo.h>
37#include <vnet/dpo/classify_dpo.h>
Neale Ranns948e00f2016-10-20 13:39:34 +010038#include <vnet/dpo/ip_null_dpo.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000039#include <vnet/dpo/replicate_dpo.h>
Neale Ranns43161a82017-08-12 02:12:00 -070040#include <vnet/dpo/interface_rx_dpo.h>
41#include <vnet/dpo/interface_tx_dpo.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080042#include <vnet/dpo/mpls_disposition.h>
Neale Rannsf068c3e2018-01-03 04:18:48 -080043#include <vnet/dpo/dvr_dpo.h>
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -070044#include <vnet/dpo/l3_proxy_dpo.h>
Neale Ranns53da2212018-02-24 02:11:19 -080045#include <vnet/dpo/ip6_ll_dpo.h>
Neale Ranns1dbcf302019-07-19 11:44:53 +000046#include <vnet/dpo/pw_cw.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010047
48/**
49 * Array of char* names for the DPO types and protos
50 */
51static const char* dpo_type_names[] = DPO_TYPES;
52static const char* dpo_proto_names[] = DPO_PROTOS;
53
54/**
55 * @brief Vector of virtual function tables for the DPO types
56 *
57 * This is a vector so we can dynamically register new DPO types in plugins.
58 */
59static dpo_vft_t *dpo_vfts;
60
61/**
62 * @brief vector of graph node names associated with each DPO type and protocol.
63 *
64 * dpo_nodes[child_type][child_proto][node_X] = node_name;
65 * i.e.
66 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][0] = "ip4-lookup"
67 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][1] = "ip4-load-balance"
68 *
69 * This is a vector so we can dynamically register new DPO types in plugins.
70 */
71static const char* const * const ** dpo_nodes;
72
73/**
74 * @brief Vector of edge indicies from parent DPO nodes to child
75 *
Neale Ranns8fe8cc22016-11-01 10:05:08 +000076 * dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge_index
Neale Ranns0bfe5d82016-08-25 15:29:12 +010077 *
78 * This array is derived at init time from the dpo_nodes above. Note that
79 * the third dimension in dpo_nodes is lost, hence, the edge index from each
80 * node MUST be the same.
Neale Ranns8fe8cc22016-11-01 10:05:08 +000081 * Including both the child and parent protocol is required to support the
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -070082 * case where it changes as the graph is traversed, most notably when an
Neale Ranns8fe8cc22016-11-01 10:05:08 +000083 * MPLS label is popped.
Neale Ranns0bfe5d82016-08-25 15:29:12 +010084 *
85 * Note that this array is child type specific, not child instance specific.
86 */
Neale Ranns8fe8cc22016-11-01 10:05:08 +000087static u32 ****dpo_edges;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010088
89/**
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -070090 * @brief The DPO type value that can be assigned to the next dynamic
Neale Ranns0bfe5d82016-08-25 15:29:12 +010091 * type registration.
92 */
93static dpo_type_t dpo_dynamic = DPO_LAST;
94
Neale Rannsad95b5d2016-11-10 20:35:14 +000095dpo_proto_t
96vnet_link_to_dpo_proto (vnet_link_t linkt)
97{
98 switch (linkt)
99 {
100 case VNET_LINK_IP6:
101 return (DPO_PROTO_IP6);
102 case VNET_LINK_IP4:
103 return (DPO_PROTO_IP4);
104 case VNET_LINK_MPLS:
105 return (DPO_PROTO_MPLS);
106 case VNET_LINK_ETHERNET:
107 return (DPO_PROTO_ETHERNET);
Florin Corasce1b4c72017-01-26 14:25:34 -0800108 case VNET_LINK_NSH:
109 return (DPO_PROTO_NSH);
Neale Rannsad95b5d2016-11-10 20:35:14 +0000110 case VNET_LINK_ARP:
111 break;
112 }
113 ASSERT(0);
114 return (0);
115}
116
Neale Rannsda78f952017-05-24 09:15:43 -0700117vnet_link_t
118dpo_proto_to_link (dpo_proto_t dp)
119{
120 switch (dp)
121 {
122 case DPO_PROTO_IP6:
123 return (VNET_LINK_IP6);
124 case DPO_PROTO_IP4:
125 return (VNET_LINK_IP4);
126 case DPO_PROTO_MPLS:
Neale Rannsd792d9c2017-10-21 10:53:20 -0700127 case DPO_PROTO_BIER:
Neale Rannsda78f952017-05-24 09:15:43 -0700128 return (VNET_LINK_MPLS);
129 case DPO_PROTO_ETHERNET:
130 return (VNET_LINK_ETHERNET);
131 case DPO_PROTO_NSH:
132 return (VNET_LINK_NSH);
133 }
134 return (~0);
135}
136
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100137u8 *
138format_dpo_type (u8 * s, va_list * args)
139{
140 dpo_type_t type = va_arg (*args, int);
141
142 s = format(s, "%s", dpo_type_names[type]);
143
144 return (s);
145}
146
147u8 *
148format_dpo_id (u8 * s, va_list * args)
149{
150 dpo_id_t *dpo = va_arg (*args, dpo_id_t*);
151 u32 indent = va_arg (*args, u32);
152
153 s = format(s, "[@%d]: ", dpo->dpoi_next_node);
154
155 if (NULL != dpo_vfts[dpo->dpoi_type].dv_format)
156 {
Neale Ranns2303cb12018-02-21 04:57:17 -0800157 s = format(s, "%U",
158 dpo_vfts[dpo->dpoi_type].dv_format,
159 dpo->dpoi_index,
160 indent);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100161 }
Neale Ranns2303cb12018-02-21 04:57:17 -0800162 else
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100163 {
Neale Ranns2303cb12018-02-21 04:57:17 -0800164 switch (dpo->dpoi_type)
165 {
166 case DPO_FIRST:
167 s = format(s, "unset");
168 break;
169 default:
170 s = format(s, "unknown");
171 break;
172 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100173 }
174 return (s);
175}
176
177u8 *
178format_dpo_proto (u8 * s, va_list * args)
179{
180 dpo_proto_t proto = va_arg (*args, int);
181
182 return (format(s, "%s", dpo_proto_names[proto]));
183}
184
185void
186dpo_set (dpo_id_t *dpo,
187 dpo_type_t type,
188 dpo_proto_t proto,
189 index_t index)
190{
191 dpo_id_t tmp = *dpo;
192
193 dpo->dpoi_type = type;
194 dpo->dpoi_proto = proto,
195 dpo->dpoi_index = index;
196
197 if (DPO_ADJACENCY == type)
198 {
199 /*
200 * set the adj subtype
201 */
202 ip_adjacency_t *adj;
203
204 adj = adj_get(index);
205
206 switch (adj->lookup_next_index)
207 {
208 case IP_LOOKUP_NEXT_ARP:
209 dpo->dpoi_type = DPO_ADJACENCY_INCOMPLETE;
210 break;
211 case IP_LOOKUP_NEXT_MIDCHAIN:
212 dpo->dpoi_type = DPO_ADJACENCY_MIDCHAIN;
213 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800214 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
215 dpo->dpoi_type = DPO_ADJACENCY_MCAST_MIDCHAIN;
216 break;
217 case IP_LOOKUP_NEXT_MCAST:
218 dpo->dpoi_type = DPO_ADJACENCY_MCAST;
Neale Ranns8c4611b2017-05-23 03:43:47 -0700219 break;
220 case IP_LOOKUP_NEXT_GLEAN:
221 dpo->dpoi_type = DPO_ADJACENCY_GLEAN;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800222 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100223 default:
224 break;
225 }
226 }
227 dpo_lock(dpo);
228 dpo_unlock(&tmp);
229}
230
231void
232dpo_reset (dpo_id_t *dpo)
233{
Neale Rannsad95b5d2016-11-10 20:35:14 +0000234 dpo_id_t tmp = DPO_INVALID;
235
236 /*
237 * use the atomic copy operation.
238 */
239 dpo_copy(dpo, &tmp);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100240}
241
242/**
243 * \brief
244 * Compare two Data-path objects
245 *
246 * like memcmp, return 0 is matching, !0 otherwise.
247 */
248int
249dpo_cmp (const dpo_id_t *dpo1,
250 const dpo_id_t *dpo2)
251{
252 int res;
253
254 res = dpo1->dpoi_type - dpo2->dpoi_type;
255
256 if (0 != res) return (res);
257
258 return (dpo1->dpoi_index - dpo2->dpoi_index);
259}
260
261void
262dpo_copy (dpo_id_t *dst,
263 const dpo_id_t *src)
264{
Neale Rannsab4fbed2020-11-26 09:41:01 +0000265 dpo_id_t tmp = {
266 .as_u64 = dst->as_u64
267 };
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100268
269 /*
270 * the destination is written in a single u64 write - hence atomically w.r.t
271 * any packets inflight.
272 */
Neale Rannsab4fbed2020-11-26 09:41:01 +0000273 dst->as_u64 = src->as_u64;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100274
275 dpo_lock(dst);
Dave Barachf8d50682019-05-14 18:01:44 -0400276 dpo_unlock(&tmp);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100277}
278
279int
280dpo_is_adj (const dpo_id_t *dpo)
281{
282 return ((dpo->dpoi_type == DPO_ADJACENCY) ||
283 (dpo->dpoi_type == DPO_ADJACENCY_INCOMPLETE) ||
Neale Rannsab4fbed2020-11-26 09:41:01 +0000284 (dpo->dpoi_type == DPO_ADJACENCY_GLEAN) ||
285 (dpo->dpoi_type == DPO_ADJACENCY_MCAST) ||
286 (dpo->dpoi_type == DPO_ADJACENCY_MCAST_MIDCHAIN) ||
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100287 (dpo->dpoi_type == DPO_ADJACENCY_MIDCHAIN) ||
288 (dpo->dpoi_type == DPO_ADJACENCY_GLEAN));
289}
290
Neale Ranns43161a82017-08-12 02:12:00 -0700291static u32 *
292dpo_default_get_next_node (const dpo_id_t *dpo)
293{
294 u32 *node_indices = NULL;
295 const char *node_name;
296 u32 ii = 0;
297
298 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
299 while (NULL != node_name)
300 {
301 vlib_node_t *node;
302
303 node = vlib_get_node_by_name(vlib_get_main(), (u8*) node_name);
304 ASSERT(NULL != node);
305 vec_add1(node_indices, node->index);
306
307 ++ii;
308 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
309 }
310
311 return (node_indices);
312}
313
Neale Ranns2303cb12018-02-21 04:57:17 -0800314/**
315 * A default variant of the make interpose function that just returns
316 * the original
317 */
318static void
319dpo_default_mk_interpose (const dpo_id_t *original,
320 const dpo_id_t *parent,
321 dpo_id_t *clone)
322{
323 dpo_copy(clone, original);
324}
325
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100326void
327dpo_register (dpo_type_t type,
328 const dpo_vft_t *vft,
329 const char * const * const * nodes)
330{
331 vec_validate(dpo_vfts, type);
332 dpo_vfts[type] = *vft;
Neale Ranns43161a82017-08-12 02:12:00 -0700333 if (NULL == dpo_vfts[type].dv_get_next_node)
334 {
335 dpo_vfts[type].dv_get_next_node = dpo_default_get_next_node;
336 }
Neale Ranns2303cb12018-02-21 04:57:17 -0800337 if (NULL == dpo_vfts[type].dv_mk_interpose)
338 {
339 dpo_vfts[type].dv_mk_interpose = dpo_default_mk_interpose;
340 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100341
342 vec_validate(dpo_nodes, type);
343 dpo_nodes[type] = nodes;
344}
345
346dpo_type_t
347dpo_register_new_type (const dpo_vft_t *vft,
348 const char * const * const * nodes)
349{
350 dpo_type_t type = dpo_dynamic++;
351
352 dpo_register(type, vft, nodes);
353
354 return (type);
355}
356
357void
Neale Ranns2303cb12018-02-21 04:57:17 -0800358dpo_mk_interpose (const dpo_id_t *original,
359 const dpo_id_t *parent,
360 dpo_id_t *clone)
361{
362 if (!dpo_id_is_valid(original))
363 return;
364
365 dpo_vfts[original->dpoi_type].dv_mk_interpose(original, parent, clone);
366}
367
368void
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100369dpo_lock (dpo_id_t *dpo)
370{
371 if (!dpo_id_is_valid(dpo))
372 return;
373
374 dpo_vfts[dpo->dpoi_type].dv_lock(dpo);
375}
376
377void
378dpo_unlock (dpo_id_t *dpo)
379{
380 if (!dpo_id_is_valid(dpo))
381 return;
382
383 dpo_vfts[dpo->dpoi_type].dv_unlock(dpo);
384}
385
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -0700386u32
387dpo_get_urpf(const dpo_id_t *dpo)
388{
389 if (dpo_id_is_valid(dpo) &&
390 (NULL != dpo_vfts[dpo->dpoi_type].dv_get_urpf))
391 {
392 return (dpo_vfts[dpo->dpoi_type].dv_get_urpf(dpo));
393 }
394
395 return (~0);
396}
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100397
398static u32
399dpo_get_next_node (dpo_type_t child_type,
400 dpo_proto_t child_proto,
401 const dpo_id_t *parent_dpo)
402{
403 dpo_proto_t parent_proto;
404 dpo_type_t parent_type;
405
406 parent_type = parent_dpo->dpoi_type;
407 parent_proto = parent_dpo->dpoi_proto;
408
409 vec_validate(dpo_edges, child_type);
410 vec_validate(dpo_edges[child_type], child_proto);
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000411 vec_validate(dpo_edges[child_type][child_proto], parent_type);
412 vec_validate_init_empty(
413 dpo_edges[child_type][child_proto][parent_type],
414 parent_proto, ~0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100415
416 /*
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -0700417 * if the edge index has not yet been created for this node to node transition
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100418 */
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000419 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100420 {
Neale Ranns43161a82017-08-12 02:12:00 -0700421 vlib_node_t *child_node;
422 u32 *parent_indices;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100423 vlib_main_t *vm;
Neale Ranns43161a82017-08-12 02:12:00 -0700424 u32 edge, *pi, cc;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100425
426 vm = vlib_get_main();
427
Neale Ranns43161a82017-08-12 02:12:00 -0700428 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100429 ASSERT(NULL != dpo_nodes[child_type]);
430 ASSERT(NULL != dpo_nodes[child_type][child_proto]);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100431
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000432 cc = 0;
Neale Ranns43161a82017-08-12 02:12:00 -0700433 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent_dpo);
434
435 vlib_worker_thread_barrier_sync(vm);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100436
437 /*
Neale Ranns43161a82017-08-12 02:12:00 -0700438 * create a graph arc from each of the child's registered node types,
439 * to each of the parent's.
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100440 */
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000441 while (NULL != dpo_nodes[child_type][child_proto][cc])
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100442 {
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000443 child_node =
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100444 vlib_get_node_by_name(vm,
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000445 (u8*) dpo_nodes[child_type][child_proto][cc]);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100446
Neale Ranns43161a82017-08-12 02:12:00 -0700447 vec_foreach(pi, parent_indices)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100448 {
Neale Ranns43161a82017-08-12 02:12:00 -0700449 edge = vlib_node_add_next(vm, child_node->index, *pi);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100450
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000451 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100452 {
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000453 dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100454 }
455 else
456 {
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000457 ASSERT(dpo_edges[child_type][child_proto][parent_type][parent_proto] == edge);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100458 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100459 }
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000460 cc++;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100461 }
Neale Rannsbb620d72017-06-29 00:19:08 -0700462
463 vlib_worker_thread_barrier_release(vm);
Neale Ranns43161a82017-08-12 02:12:00 -0700464 vec_free(parent_indices);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100465 }
466
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000467 return (dpo_edges[child_type][child_proto][parent_type][parent_proto]);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100468}
469
470/**
Vijayabhaskar Katamreddyb9ca61b2018-03-14 14:04:27 -0700471 * @brief return already stacked up next node index for a given
472 * child_type/child_proto and parent_type/patent_proto.
473 * The VLIB graph arc used is taken from the parent and child types
474 * passed.
475 */
476u32
477dpo_get_next_node_by_type_and_proto (dpo_type_t child_type,
478 dpo_proto_t child_proto,
479 dpo_type_t parent_type,
480 dpo_proto_t parent_proto)
481{
482 return (dpo_edges[child_type][child_proto][parent_type][parent_proto]);
483}
484
485/**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100486 * @brief Stack one DPO object on another, and thus establish a child parent
487 * relationship. The VLIB graph arc used is taken from the parent and child types
488 * passed.
489 */
490static void
491dpo_stack_i (u32 edge,
492 dpo_id_t *dpo,
493 const dpo_id_t *parent)
494{
495 /*
496 * in order to get an atomic update of the parent we create a temporary,
497 * from a copy of the child, and add the next_node. then we copy to the parent
498 */
Neale Ranns948e00f2016-10-20 13:39:34 +0100499 dpo_id_t tmp = DPO_INVALID;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100500 dpo_copy(&tmp, parent);
501
502 /*
Paul Vinciguerra8feeaff2019-03-27 11:25:48 -0700503 * get the edge index for the parent to child VLIB graph transition
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100504 */
505 tmp.dpoi_next_node = edge;
506
507 /*
508 * this update is atomic.
509 */
510 dpo_copy(dpo, &tmp);
511
512 dpo_reset(&tmp);
513}
514
515/**
516 * @brief Stack one DPO object on another, and thus establish a child-parent
517 * relationship. The VLIB graph arc used is taken from the parent and child types
518 * passed.
519 */
520void
521dpo_stack (dpo_type_t child_type,
522 dpo_proto_t child_proto,
523 dpo_id_t *dpo,
524 const dpo_id_t *parent)
525{
526 dpo_stack_i(dpo_get_next_node(child_type, child_proto, parent), dpo, parent);
527}
528
529/**
530 * @brief Stack one DPO object on another, and thus establish a child parent
531 * relationship. A new VLIB graph arc is created from the child node passed
532 * to the nodes registered by the parent. The VLIB infra will ensure this arc
533 * is added only once.
534 */
535void
536dpo_stack_from_node (u32 child_node_index,
537 dpo_id_t *dpo,
538 const dpo_id_t *parent)
539{
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100540 dpo_type_t parent_type;
Neale Ranns43161a82017-08-12 02:12:00 -0700541 u32 *parent_indices;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100542 vlib_main_t *vm;
Neale Ranns43161a82017-08-12 02:12:00 -0700543 u32 edge, *pi;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100544
Neale Ranns43161a82017-08-12 02:12:00 -0700545 edge = 0;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100546 parent_type = parent->dpoi_type;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100547 vm = vlib_get_main();
548
Neale Ranns43161a82017-08-12 02:12:00 -0700549 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
550 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent);
551 ASSERT(parent_indices);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100552
Neale Ranns43161a82017-08-12 02:12:00 -0700553 /*
554 * This loop is purposefully written with the worker thread lock in the
555 * inner loop because;
556 * 1) the likelihood that the edge does not exist is smaller
557 * 2) the likelihood there is more than one node is even smaller
558 * so we are optimising for not need to take the lock
559 */
560 vec_foreach(pi, parent_indices)
Neale Rannsbb620d72017-06-29 00:19:08 -0700561 {
Neale Ranns43161a82017-08-12 02:12:00 -0700562 edge = vlib_node_get_next(vm, child_node_index, *pi);
Neale Rannsbb620d72017-06-29 00:19:08 -0700563
Neale Ranns43161a82017-08-12 02:12:00 -0700564 if (~0 == edge)
565 {
566 vlib_worker_thread_barrier_sync(vm);
Neale Rannsbb620d72017-06-29 00:19:08 -0700567
Neale Ranns43161a82017-08-12 02:12:00 -0700568 edge = vlib_node_add_next(vm, child_node_index, *pi);
569
570 vlib_worker_thread_barrier_release(vm);
571 }
Neale Rannsbb620d72017-06-29 00:19:08 -0700572 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100573 dpo_stack_i(edge, dpo, parent);
Kingwel Xiecd4d5b72018-04-24 17:47:56 +0800574
575 /* should free this local vector to avoid memory leak */
576 vec_free(parent_indices);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100577}
578
579static clib_error_t *
580dpo_module_init (vlib_main_t * vm)
581{
582 drop_dpo_module_init();
583 punt_dpo_module_init();
584 receive_dpo_module_init();
585 load_balance_module_init();
586 mpls_label_dpo_module_init();
587 classify_dpo_module_init();
588 lookup_dpo_module_init();
Neale Ranns948e00f2016-10-20 13:39:34 +0100589 ip_null_dpo_module_init();
Neale Ranns53da2212018-02-24 02:11:19 -0800590 ip6_ll_dpo_module_init();
Neale Ranns32e1c012016-11-22 17:07:28 +0000591 replicate_module_init();
Neale Ranns43161a82017-08-12 02:12:00 -0700592 interface_rx_dpo_module_init();
593 interface_tx_dpo_module_init();
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800594 mpls_disp_dpo_module_init();
Neale Rannsf068c3e2018-01-03 04:18:48 -0800595 dvr_dpo_module_init();
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -0700596 l3_proxy_dpo_module_init();
Neale Ranns1dbcf302019-07-19 11:44:53 +0000597 pw_cw_dpo_module_init();
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100598
599 return (NULL);
600}
601
Dave Barachf8d50682019-05-14 18:01:44 -0400602/* *INDENT-OFF* */
603VLIB_INIT_FUNCTION(dpo_module_init) =
604{
605 .runs_before = VLIB_INITS ("ip_main_init"),
606};
607/* *INDENT-ON* */
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100608
609static clib_error_t *
610dpo_memory_show (vlib_main_t * vm,
611 unformat_input_t * input,
612 vlib_cli_command_t * cmd)
613{
614 dpo_vft_t *vft;
615
616 vlib_cli_output (vm, "DPO memory");
617 vlib_cli_output (vm, "%=30s %=5s %=8s/%=9s totals",
618 "Name","Size", "in-use", "allocated");
619
620 vec_foreach(vft, dpo_vfts)
621 {
622 if (NULL != vft->dv_mem_show)
623 vft->dv_mem_show();
624 }
625
626 return (NULL);
627}
628
629/* *INDENT-OFF* */
630/*?
631 * The '<em>sh dpo memory </em>' command displays the memory usage for each
632 * data-plane object type.
633 *
634 * @cliexpar
635 * @cliexstart{show dpo memory}
636 * DPO memory
637 * Name Size in-use /allocated totals
638 * load-balance 64 12 / 12 768/768
639 * Adjacency 256 1 / 1 256/256
640 * Receive 24 5 / 5 120/120
641 * Lookup 12 0 / 0 0/0
642 * Classify 12 0 / 0 0/0
643 * MPLS label 24 0 / 0 0/0
644 * @cliexend
645?*/
646VLIB_CLI_COMMAND (show_fib_memory, static) = {
647 .path = "show dpo memory",
648 .function = dpo_memory_show,
649 .short_help = "show dpo memory",
650};
651/* *INDENT-ON* */