blob: 7658132d47ac04c465b56c024b98e5d3478e6848 [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/**
16 * @brief
17 * A Data-Path Object is an object that represents actions that are
18 * applied to packets are they are switched through VPP.
19 *
20 * The DPO is a base class that is specialised by other objects to provide
21 * concreate actions
22 *
23 * The VLIB graph nodes are graph of types, the DPO graph is a graph of instances.
24 */
25
26#include <vnet/dpo/dpo.h>
27#include <vnet/ip/lookup.h>
28#include <vnet/ip/format.h>
29#include <vnet/adj/adj.h>
30
31#include <vnet/dpo/load_balance.h>
32#include <vnet/dpo/mpls_label_dpo.h>
33#include <vnet/dpo/lookup_dpo.h>
34#include <vnet/dpo/drop_dpo.h>
35#include <vnet/dpo/receive_dpo.h>
36#include <vnet/dpo/punt_dpo.h>
37#include <vnet/dpo/classify_dpo.h>
Neale Ranns948e00f2016-10-20 13:39:34 +010038#include <vnet/dpo/ip_null_dpo.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000039#include <vnet/dpo/replicate_dpo.h>
Neale Ranns43161a82017-08-12 02:12:00 -070040#include <vnet/dpo/interface_rx_dpo.h>
41#include <vnet/dpo/interface_tx_dpo.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080042#include <vnet/dpo/mpls_disposition.h>
Neale Ranns6f631152017-10-03 08:20:21 -070043#include <vnet/dpo/l2_bridge_dpo.h>
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -070044#include <vnet/dpo/l3_proxy_dpo.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010045
46/**
47 * Array of char* names for the DPO types and protos
48 */
49static const char* dpo_type_names[] = DPO_TYPES;
50static const char* dpo_proto_names[] = DPO_PROTOS;
51
52/**
53 * @brief Vector of virtual function tables for the DPO types
54 *
55 * This is a vector so we can dynamically register new DPO types in plugins.
56 */
57static dpo_vft_t *dpo_vfts;
58
59/**
60 * @brief vector of graph node names associated with each DPO type and protocol.
61 *
62 * dpo_nodes[child_type][child_proto][node_X] = node_name;
63 * i.e.
64 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][0] = "ip4-lookup"
65 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][1] = "ip4-load-balance"
66 *
67 * This is a vector so we can dynamically register new DPO types in plugins.
68 */
69static const char* const * const ** dpo_nodes;
70
71/**
72 * @brief Vector of edge indicies from parent DPO nodes to child
73 *
Neale Ranns8fe8cc22016-11-01 10:05:08 +000074 * dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge_index
Neale Ranns0bfe5d82016-08-25 15:29:12 +010075 *
76 * This array is derived at init time from the dpo_nodes above. Note that
77 * the third dimension in dpo_nodes is lost, hence, the edge index from each
78 * node MUST be the same.
Neale Ranns8fe8cc22016-11-01 10:05:08 +000079 * Including both the child and parent protocol is required to support the
80 * case where it changes as the grapth is traversed, most notablly when an
81 * MPLS label is popped.
Neale Ranns0bfe5d82016-08-25 15:29:12 +010082 *
83 * Note that this array is child type specific, not child instance specific.
84 */
Neale Ranns8fe8cc22016-11-01 10:05:08 +000085static u32 ****dpo_edges;
Neale Ranns0bfe5d82016-08-25 15:29:12 +010086
87/**
88 * @brief The DPO type value that can be assigend to the next dynamic
89 * type registration.
90 */
91static dpo_type_t dpo_dynamic = DPO_LAST;
92
Neale Rannsad95b5d2016-11-10 20:35:14 +000093dpo_proto_t
94vnet_link_to_dpo_proto (vnet_link_t linkt)
95{
96 switch (linkt)
97 {
98 case VNET_LINK_IP6:
99 return (DPO_PROTO_IP6);
100 case VNET_LINK_IP4:
101 return (DPO_PROTO_IP4);
102 case VNET_LINK_MPLS:
103 return (DPO_PROTO_MPLS);
104 case VNET_LINK_ETHERNET:
105 return (DPO_PROTO_ETHERNET);
Florin Corasce1b4c72017-01-26 14:25:34 -0800106 case VNET_LINK_NSH:
107 return (DPO_PROTO_NSH);
Neale Rannsad95b5d2016-11-10 20:35:14 +0000108 case VNET_LINK_ARP:
109 break;
110 }
111 ASSERT(0);
112 return (0);
113}
114
Neale Rannsda78f952017-05-24 09:15:43 -0700115vnet_link_t
116dpo_proto_to_link (dpo_proto_t dp)
117{
118 switch (dp)
119 {
120 case DPO_PROTO_IP6:
121 return (VNET_LINK_IP6);
122 case DPO_PROTO_IP4:
123 return (VNET_LINK_IP4);
124 case DPO_PROTO_MPLS:
125 return (VNET_LINK_MPLS);
126 case DPO_PROTO_ETHERNET:
127 return (VNET_LINK_ETHERNET);
128 case DPO_PROTO_NSH:
129 return (VNET_LINK_NSH);
130 }
131 return (~0);
132}
133
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100134u8 *
135format_dpo_type (u8 * s, va_list * args)
136{
137 dpo_type_t type = va_arg (*args, int);
138
139 s = format(s, "%s", dpo_type_names[type]);
140
141 return (s);
142}
143
144u8 *
145format_dpo_id (u8 * s, va_list * args)
146{
147 dpo_id_t *dpo = va_arg (*args, dpo_id_t*);
148 u32 indent = va_arg (*args, u32);
149
150 s = format(s, "[@%d]: ", dpo->dpoi_next_node);
151
152 if (NULL != dpo_vfts[dpo->dpoi_type].dv_format)
153 {
154 return (format(s, "%U",
155 dpo_vfts[dpo->dpoi_type].dv_format,
156 dpo->dpoi_index,
157 indent));
158 }
159
160 switch (dpo->dpoi_type)
161 {
162 case DPO_FIRST:
163 s = format(s, "unset");
164 break;
165 default:
166 s = format(s, "unknown");
167 break;
168 }
169 return (s);
170}
171
172u8 *
173format_dpo_proto (u8 * s, va_list * args)
174{
175 dpo_proto_t proto = va_arg (*args, int);
176
177 return (format(s, "%s", dpo_proto_names[proto]));
178}
179
180void
181dpo_set (dpo_id_t *dpo,
182 dpo_type_t type,
183 dpo_proto_t proto,
184 index_t index)
185{
186 dpo_id_t tmp = *dpo;
187
188 dpo->dpoi_type = type;
189 dpo->dpoi_proto = proto,
190 dpo->dpoi_index = index;
191
192 if (DPO_ADJACENCY == type)
193 {
194 /*
195 * set the adj subtype
196 */
197 ip_adjacency_t *adj;
198
199 adj = adj_get(index);
200
201 switch (adj->lookup_next_index)
202 {
203 case IP_LOOKUP_NEXT_ARP:
204 dpo->dpoi_type = DPO_ADJACENCY_INCOMPLETE;
205 break;
206 case IP_LOOKUP_NEXT_MIDCHAIN:
207 dpo->dpoi_type = DPO_ADJACENCY_MIDCHAIN;
208 break;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800209 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
210 dpo->dpoi_type = DPO_ADJACENCY_MCAST_MIDCHAIN;
211 break;
212 case IP_LOOKUP_NEXT_MCAST:
213 dpo->dpoi_type = DPO_ADJACENCY_MCAST;
Neale Ranns8c4611b2017-05-23 03:43:47 -0700214 break;
215 case IP_LOOKUP_NEXT_GLEAN:
216 dpo->dpoi_type = DPO_ADJACENCY_GLEAN;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800217 break;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100218 default:
219 break;
220 }
221 }
222 dpo_lock(dpo);
223 dpo_unlock(&tmp);
224}
225
226void
227dpo_reset (dpo_id_t *dpo)
228{
Neale Rannsad95b5d2016-11-10 20:35:14 +0000229 dpo_id_t tmp = DPO_INVALID;
230
231 /*
232 * use the atomic copy operation.
233 */
234 dpo_copy(dpo, &tmp);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100235}
236
237/**
238 * \brief
239 * Compare two Data-path objects
240 *
241 * like memcmp, return 0 is matching, !0 otherwise.
242 */
243int
244dpo_cmp (const dpo_id_t *dpo1,
245 const dpo_id_t *dpo2)
246{
247 int res;
248
249 res = dpo1->dpoi_type - dpo2->dpoi_type;
250
251 if (0 != res) return (res);
252
253 return (dpo1->dpoi_index - dpo2->dpoi_index);
254}
255
256void
257dpo_copy (dpo_id_t *dst,
258 const dpo_id_t *src)
259{
260 dpo_id_t tmp = *dst;
261
262 /*
263 * the destination is written in a single u64 write - hence atomically w.r.t
264 * any packets inflight.
265 */
266 *((u64*)dst) = *(u64*)src;
267
268 dpo_lock(dst);
269 dpo_unlock(&tmp);
270}
271
272int
273dpo_is_adj (const dpo_id_t *dpo)
274{
275 return ((dpo->dpoi_type == DPO_ADJACENCY) ||
276 (dpo->dpoi_type == DPO_ADJACENCY_INCOMPLETE) ||
277 (dpo->dpoi_type == DPO_ADJACENCY_MIDCHAIN) ||
278 (dpo->dpoi_type == DPO_ADJACENCY_GLEAN));
279}
280
Neale Ranns43161a82017-08-12 02:12:00 -0700281static u32 *
282dpo_default_get_next_node (const dpo_id_t *dpo)
283{
284 u32 *node_indices = NULL;
285 const char *node_name;
286 u32 ii = 0;
287
288 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
289 while (NULL != node_name)
290 {
291 vlib_node_t *node;
292
293 node = vlib_get_node_by_name(vlib_get_main(), (u8*) node_name);
294 ASSERT(NULL != node);
295 vec_add1(node_indices, node->index);
296
297 ++ii;
298 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
299 }
300
301 return (node_indices);
302}
303
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100304void
305dpo_register (dpo_type_t type,
306 const dpo_vft_t *vft,
307 const char * const * const * nodes)
308{
309 vec_validate(dpo_vfts, type);
310 dpo_vfts[type] = *vft;
Neale Ranns43161a82017-08-12 02:12:00 -0700311 if (NULL == dpo_vfts[type].dv_get_next_node)
312 {
313 dpo_vfts[type].dv_get_next_node = dpo_default_get_next_node;
314 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100315
316 vec_validate(dpo_nodes, type);
317 dpo_nodes[type] = nodes;
318}
319
320dpo_type_t
321dpo_register_new_type (const dpo_vft_t *vft,
322 const char * const * const * nodes)
323{
324 dpo_type_t type = dpo_dynamic++;
325
326 dpo_register(type, vft, nodes);
327
328 return (type);
329}
330
331void
332dpo_lock (dpo_id_t *dpo)
333{
334 if (!dpo_id_is_valid(dpo))
335 return;
336
337 dpo_vfts[dpo->dpoi_type].dv_lock(dpo);
338}
339
340void
341dpo_unlock (dpo_id_t *dpo)
342{
343 if (!dpo_id_is_valid(dpo))
344 return;
345
346 dpo_vfts[dpo->dpoi_type].dv_unlock(dpo);
347}
348
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -0700349u32
350dpo_get_urpf(const dpo_id_t *dpo)
351{
352 if (dpo_id_is_valid(dpo) &&
353 (NULL != dpo_vfts[dpo->dpoi_type].dv_get_urpf))
354 {
355 return (dpo_vfts[dpo->dpoi_type].dv_get_urpf(dpo));
356 }
357
358 return (~0);
359}
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100360
361static u32
362dpo_get_next_node (dpo_type_t child_type,
363 dpo_proto_t child_proto,
364 const dpo_id_t *parent_dpo)
365{
366 dpo_proto_t parent_proto;
367 dpo_type_t parent_type;
368
369 parent_type = parent_dpo->dpoi_type;
370 parent_proto = parent_dpo->dpoi_proto;
371
372 vec_validate(dpo_edges, child_type);
373 vec_validate(dpo_edges[child_type], child_proto);
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000374 vec_validate(dpo_edges[child_type][child_proto], parent_type);
375 vec_validate_init_empty(
376 dpo_edges[child_type][child_proto][parent_type],
377 parent_proto, ~0);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100378
379 /*
380 * if the edge index has not yet been created for this node to node transistion
381 */
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000382 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100383 {
Neale Ranns43161a82017-08-12 02:12:00 -0700384 vlib_node_t *child_node;
385 u32 *parent_indices;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100386 vlib_main_t *vm;
Neale Ranns43161a82017-08-12 02:12:00 -0700387 u32 edge, *pi, cc;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100388
389 vm = vlib_get_main();
390
Neale Ranns43161a82017-08-12 02:12:00 -0700391 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100392 ASSERT(NULL != dpo_nodes[child_type]);
393 ASSERT(NULL != dpo_nodes[child_type][child_proto]);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100394
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000395 cc = 0;
Neale Ranns43161a82017-08-12 02:12:00 -0700396 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent_dpo);
397
398 vlib_worker_thread_barrier_sync(vm);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100399
400 /*
Neale Ranns43161a82017-08-12 02:12:00 -0700401 * create a graph arc from each of the child's registered node types,
402 * to each of the parent's.
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100403 */
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000404 while (NULL != dpo_nodes[child_type][child_proto][cc])
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100405 {
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000406 child_node =
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100407 vlib_get_node_by_name(vm,
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000408 (u8*) dpo_nodes[child_type][child_proto][cc]);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100409
Neale Ranns43161a82017-08-12 02:12:00 -0700410 vec_foreach(pi, parent_indices)
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100411 {
Neale Ranns43161a82017-08-12 02:12:00 -0700412 edge = vlib_node_add_next(vm, child_node->index, *pi);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100413
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000414 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100415 {
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000416 dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100417 }
418 else
419 {
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000420 ASSERT(dpo_edges[child_type][child_proto][parent_type][parent_proto] == edge);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100421 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100422 }
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000423 cc++;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100424 }
Neale Rannsbb620d72017-06-29 00:19:08 -0700425
426 vlib_worker_thread_barrier_release(vm);
Neale Ranns43161a82017-08-12 02:12:00 -0700427 vec_free(parent_indices);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100428 }
429
Neale Ranns8fe8cc22016-11-01 10:05:08 +0000430 return (dpo_edges[child_type][child_proto][parent_type][parent_proto]);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100431}
432
433/**
434 * @brief Stack one DPO object on another, and thus establish a child parent
435 * relationship. The VLIB graph arc used is taken from the parent and child types
436 * passed.
437 */
438static void
439dpo_stack_i (u32 edge,
440 dpo_id_t *dpo,
441 const dpo_id_t *parent)
442{
443 /*
444 * in order to get an atomic update of the parent we create a temporary,
445 * from a copy of the child, and add the next_node. then we copy to the parent
446 */
Neale Ranns948e00f2016-10-20 13:39:34 +0100447 dpo_id_t tmp = DPO_INVALID;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100448 dpo_copy(&tmp, parent);
449
450 /*
451 * get the edge index for the parent to child VLIB graph transisition
452 */
453 tmp.dpoi_next_node = edge;
454
455 /*
456 * this update is atomic.
457 */
458 dpo_copy(dpo, &tmp);
459
460 dpo_reset(&tmp);
461}
462
463/**
464 * @brief Stack one DPO object on another, and thus establish a child-parent
465 * relationship. The VLIB graph arc used is taken from the parent and child types
466 * passed.
467 */
468void
469dpo_stack (dpo_type_t child_type,
470 dpo_proto_t child_proto,
471 dpo_id_t *dpo,
472 const dpo_id_t *parent)
473{
474 dpo_stack_i(dpo_get_next_node(child_type, child_proto, parent), dpo, parent);
475}
476
477/**
478 * @brief Stack one DPO object on another, and thus establish a child parent
479 * relationship. A new VLIB graph arc is created from the child node passed
480 * to the nodes registered by the parent. The VLIB infra will ensure this arc
481 * is added only once.
482 */
483void
484dpo_stack_from_node (u32 child_node_index,
485 dpo_id_t *dpo,
486 const dpo_id_t *parent)
487{
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100488 dpo_type_t parent_type;
Neale Ranns43161a82017-08-12 02:12:00 -0700489 u32 *parent_indices;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100490 vlib_main_t *vm;
Neale Ranns43161a82017-08-12 02:12:00 -0700491 u32 edge, *pi;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100492
Neale Ranns43161a82017-08-12 02:12:00 -0700493 edge = 0;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100494 parent_type = parent->dpoi_type;
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100495 vm = vlib_get_main();
496
Neale Ranns43161a82017-08-12 02:12:00 -0700497 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
498 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent);
499 ASSERT(parent_indices);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100500
Neale Ranns43161a82017-08-12 02:12:00 -0700501 /*
502 * This loop is purposefully written with the worker thread lock in the
503 * inner loop because;
504 * 1) the likelihood that the edge does not exist is smaller
505 * 2) the likelihood there is more than one node is even smaller
506 * so we are optimising for not need to take the lock
507 */
508 vec_foreach(pi, parent_indices)
Neale Rannsbb620d72017-06-29 00:19:08 -0700509 {
Neale Ranns43161a82017-08-12 02:12:00 -0700510 edge = vlib_node_get_next(vm, child_node_index, *pi);
Neale Rannsbb620d72017-06-29 00:19:08 -0700511
Neale Ranns43161a82017-08-12 02:12:00 -0700512 if (~0 == edge)
513 {
514 vlib_worker_thread_barrier_sync(vm);
Neale Rannsbb620d72017-06-29 00:19:08 -0700515
Neale Ranns43161a82017-08-12 02:12:00 -0700516 edge = vlib_node_add_next(vm, child_node_index, *pi);
517
518 vlib_worker_thread_barrier_release(vm);
519 }
Neale Rannsbb620d72017-06-29 00:19:08 -0700520 }
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100521 dpo_stack_i(edge, dpo, parent);
522}
523
524static clib_error_t *
525dpo_module_init (vlib_main_t * vm)
526{
527 drop_dpo_module_init();
528 punt_dpo_module_init();
529 receive_dpo_module_init();
530 load_balance_module_init();
531 mpls_label_dpo_module_init();
532 classify_dpo_module_init();
533 lookup_dpo_module_init();
Neale Ranns948e00f2016-10-20 13:39:34 +0100534 ip_null_dpo_module_init();
Neale Ranns32e1c012016-11-22 17:07:28 +0000535 replicate_module_init();
Neale Ranns43161a82017-08-12 02:12:00 -0700536 interface_rx_dpo_module_init();
537 interface_tx_dpo_module_init();
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800538 mpls_disp_dpo_module_init();
Neale Ranns6f631152017-10-03 08:20:21 -0700539 l2_bridge_dpo_module_init();
Andrew Yourtchenko5f3fcb92017-10-25 05:50:37 -0700540 l3_proxy_dpo_module_init();
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100541
542 return (NULL);
543}
544
545VLIB_INIT_FUNCTION(dpo_module_init);
Neale Ranns6c3ebcc2016-10-02 21:20:15 +0100546
547static clib_error_t *
548dpo_memory_show (vlib_main_t * vm,
549 unformat_input_t * input,
550 vlib_cli_command_t * cmd)
551{
552 dpo_vft_t *vft;
553
554 vlib_cli_output (vm, "DPO memory");
555 vlib_cli_output (vm, "%=30s %=5s %=8s/%=9s totals",
556 "Name","Size", "in-use", "allocated");
557
558 vec_foreach(vft, dpo_vfts)
559 {
560 if (NULL != vft->dv_mem_show)
561 vft->dv_mem_show();
562 }
563
564 return (NULL);
565}
566
567/* *INDENT-OFF* */
568/*?
569 * The '<em>sh dpo memory </em>' command displays the memory usage for each
570 * data-plane object type.
571 *
572 * @cliexpar
573 * @cliexstart{show dpo memory}
574 * DPO memory
575 * Name Size in-use /allocated totals
576 * load-balance 64 12 / 12 768/768
577 * Adjacency 256 1 / 1 256/256
578 * Receive 24 5 / 5 120/120
579 * Lookup 12 0 / 0 0/0
580 * Classify 12 0 / 0 0/0
581 * MPLS label 24 0 / 0 0/0
582 * @cliexend
583?*/
584VLIB_CLI_COMMAND (show_fib_memory, static) = {
585 .path = "show dpo memory",
586 .function = dpo_memory_show,
587 .short_help = "show dpo memory",
588};
589/* *INDENT-ON* */