blob: 4a6832ade9e7a5cca1fb0b55071f31aa49c0f719 [file] [log] [blame]
Neale Ranns43161a82017-08-12 02:12:00 -07001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/dpo/interface_rx_dpo.h>
17#include <vnet/fib/fib_node.h>
18
19/*
20 * The 'DB' of interface DPOs.
21 * There is only one per-interface per-protocol, so this is a per-interface
22 * vector
23 */
24static index_t *interface_rx_dpo_db[DPO_PROTO_NUM];
25
26static interface_rx_dpo_t *
27interface_rx_dpo_alloc (void)
28{
29 interface_rx_dpo_t *ido;
30
31 pool_get(interface_rx_dpo_pool, ido);
32
33 return (ido);
34}
35
36static inline interface_rx_dpo_t *
37interface_rx_dpo_get_from_dpo (const dpo_id_t *dpo)
38{
39 ASSERT(DPO_INTERFACE_RX == dpo->dpoi_type);
40
41 return (interface_rx_dpo_get(dpo->dpoi_index));
42}
43
44static inline index_t
45interface_rx_dpo_get_index (interface_rx_dpo_t *ido)
46{
47 return (ido - interface_rx_dpo_pool);
48}
49
50static void
51interface_rx_dpo_lock (dpo_id_t *dpo)
52{
53 interface_rx_dpo_t *ido;
54
55 ido = interface_rx_dpo_get_from_dpo(dpo);
56 ido->ido_locks++;
57}
58
59static void
60interface_rx_dpo_unlock (dpo_id_t *dpo)
61{
62 interface_rx_dpo_t *ido;
63
64 ido = interface_rx_dpo_get_from_dpo(dpo);
65 ido->ido_locks--;
66
67 if (0 == ido->ido_locks)
68 {
69 interface_rx_dpo_db[ido->ido_proto][ido->ido_sw_if_index] =
70 INDEX_INVALID;
71 pool_put(interface_rx_dpo_pool, ido);
72 }
73}
74
75/*
76 * interface_rx_dpo_add_or_lock
77 *
78 * Add/create and lock a new or lock an existing for the interface DPO
79 * on the interface and protocol given
80 */
81void
82interface_rx_dpo_add_or_lock (dpo_proto_t proto,
83 u32 sw_if_index,
84 dpo_id_t *dpo)
85{
86 interface_rx_dpo_t *ido;
87
88 vec_validate_init_empty(interface_rx_dpo_db[proto],
89 sw_if_index,
90 INDEX_INVALID);
91
92 if (INDEX_INVALID == interface_rx_dpo_db[proto][sw_if_index])
93 {
94 ido = interface_rx_dpo_alloc();
95
96 ido->ido_sw_if_index = sw_if_index;
97 ido->ido_proto = proto;
98
99 interface_rx_dpo_db[proto][sw_if_index] =
100 interface_rx_dpo_get_index(ido);
101 }
102 else
103 {
104 ido = interface_rx_dpo_get(interface_rx_dpo_db[proto][sw_if_index]);
105 }
106
107 dpo_set(dpo, DPO_INTERFACE_RX, proto, interface_rx_dpo_get_index(ido));
108}
109
110
111static clib_error_t *
112interface_rx_dpo_interface_state_change (vnet_main_t * vnm,
113 u32 sw_if_index,
114 u32 flags)
115{
116 /*
117 */
118 return (NULL);
119}
120
121VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(
122 interface_rx_dpo_interface_state_change);
123
124/**
125 * @brief Registered callback for HW interface state changes
126 */
127static clib_error_t *
128interface_rx_dpo_hw_interface_state_change (vnet_main_t * vnm,
129 u32 hw_if_index,
130 u32 flags)
131{
132 return (NULL);
133}
134
135VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
136 interface_rx_dpo_hw_interface_state_change);
137
138static clib_error_t *
139interface_rx_dpo_interface_delete (vnet_main_t * vnm,
140 u32 sw_if_index,
141 u32 is_add)
142{
143 return (NULL);
144}
145
146VNET_SW_INTERFACE_ADD_DEL_FUNCTION(
147 interface_rx_dpo_interface_delete);
148
149u8*
150format_interface_rx_dpo (u8* s, va_list *ap)
151{
152 index_t index = va_arg(*ap, index_t);
153 CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
154 vnet_main_t * vnm = vnet_get_main();
155 interface_rx_dpo_t *ido = interface_rx_dpo_get(index);
156
Ole Troan37d39672017-11-03 14:02:26 +0100157 return (format(s, "%U-rx-dpo: %U",
Neale Ranns43161a82017-08-12 02:12:00 -0700158 format_vnet_sw_interface_name,
159 vnm,
160 vnet_get_sw_interface(vnm, ido->ido_sw_if_index),
161 format_dpo_proto, ido->ido_proto));
162}
163
164static void
165interface_rx_dpo_mem_show (void)
166{
167 fib_show_memory_usage("Interface",
168 pool_elts(interface_rx_dpo_pool),
169 pool_len(interface_rx_dpo_pool),
170 sizeof(interface_rx_dpo_t));
171}
172
173
174const static dpo_vft_t interface_rx_dpo_vft = {
175 .dv_lock = interface_rx_dpo_lock,
176 .dv_unlock = interface_rx_dpo_unlock,
177 .dv_format = format_interface_rx_dpo,
178 .dv_mem_show = interface_rx_dpo_mem_show,
179};
180
181/**
182 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
183 * object.
184 *
185 * this means that these graph nodes are ones from which a glean is the
186 * parent object in the DPO-graph.
187 */
188const static char* const interface_rx_dpo_ip4_nodes[] =
189{
190 "interface-rx-dpo-ip4",
191 NULL,
192};
193const static char* const interface_rx_dpo_ip6_nodes[] =
194{
195 "interface-rx-dpo-ip6",
196 NULL,
197};
198const static char* const interface_rx_dpo_l2_nodes[] =
199{
200 "interface-rx-dpo-l2",
201 NULL,
202};
203
204const static char* const * const interface_rx_dpo_nodes[DPO_PROTO_NUM] =
205{
206 [DPO_PROTO_IP4] = interface_rx_dpo_ip4_nodes,
207 [DPO_PROTO_IP6] = interface_rx_dpo_ip6_nodes,
208 [DPO_PROTO_ETHERNET] = interface_rx_dpo_l2_nodes,
209 [DPO_PROTO_MPLS] = NULL,
210};
211
212void
213interface_rx_dpo_module_init (void)
214{
215 dpo_register(DPO_INTERFACE_RX,
216 &interface_rx_dpo_vft,
217 interface_rx_dpo_nodes);
218}
219
220/**
221 * @brief Interface DPO trace data
222 */
223typedef struct interface_rx_dpo_trace_t_
224{
225 u32 sw_if_index;
226} interface_rx_dpo_trace_t;
227
228typedef enum interface_rx_dpo_next_t_
229{
230 INTERFACE_RX_DPO_DROP = 0,
231 INTERFACE_RX_DPO_INPUT = 1,
232} interface_rx_dpo_next_t;
233
234always_inline uword
235interface_rx_dpo_inline (vlib_main_t * vm,
236 vlib_node_runtime_t * node,
237 vlib_frame_t * from_frame)
238{
239 u32 n_left_from, next_index, * from, * to_next;
240 u32 thread_index = vlib_get_thread_index ();
241 vnet_interface_main_t *im;
242
243 im = &vnet_get_main ()->interface_main;
244 from = vlib_frame_vector_args (from_frame);
245 n_left_from = from_frame->n_vectors;
246
Neale Ranns756cd942018-04-06 09:18:11 -0700247 next_index = INTERFACE_RX_DPO_INPUT;
Neale Ranns43161a82017-08-12 02:12:00 -0700248
249 while (n_left_from > 0)
250 {
251 u32 n_left_to_next;
252
253 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
254
255 while (n_left_from >= 4 && n_left_to_next > 2)
256 {
257 const interface_rx_dpo_t *ido0, *ido1;
258 u32 bi0, idoi0, bi1, idoi1;
259 vlib_buffer_t *b0, *b1;
260
261 bi0 = from[0];
262 to_next[0] = bi0;
263 bi1 = from[1];
264 to_next[1] = bi1;
265 from += 2;
266 to_next += 2;
267 n_left_from -= 2;
268 n_left_to_next -= 2;
269
270 b0 = vlib_get_buffer (vm, bi0);
271 b1 = vlib_get_buffer (vm, bi1);
272
273 idoi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
274 idoi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
275 ido0 = interface_rx_dpo_get(idoi0);
276 ido1 = interface_rx_dpo_get(idoi1);
277
278 vnet_buffer(b0)->sw_if_index[VLIB_RX] = ido0->ido_sw_if_index;
279 vnet_buffer(b1)->sw_if_index[VLIB_RX] = ido1->ido_sw_if_index;
280
281 vlib_increment_combined_counter (im->combined_sw_if_counters
282 + VNET_INTERFACE_COUNTER_RX,
283 thread_index,
284 ido0->ido_sw_if_index,
285 1,
286 vlib_buffer_length_in_chain (vm, b0));
287 vlib_increment_combined_counter (im->combined_sw_if_counters
288 + VNET_INTERFACE_COUNTER_RX,
289 thread_index,
290 ido1->ido_sw_if_index,
291 1,
292 vlib_buffer_length_in_chain (vm, b1));
293
294 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
295 {
296 interface_rx_dpo_trace_t *tr0;
297
298 tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
299 tr0->sw_if_index = ido0->ido_sw_if_index;
300 }
301 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
302 {
303 interface_rx_dpo_trace_t *tr1;
304
305 tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
306 tr1->sw_if_index = ido1->ido_sw_if_index;
307 }
Neale Ranns43161a82017-08-12 02:12:00 -0700308 }
309
310 while (n_left_from > 0 && n_left_to_next > 0)
311 {
312 const interface_rx_dpo_t * ido0;
313 vlib_buffer_t * b0;
314 u32 bi0, idoi0;
315
316 bi0 = from[0];
317 to_next[0] = bi0;
318 from += 1;
319 to_next += 1;
320 n_left_from -= 1;
321 n_left_to_next -= 1;
322
323 b0 = vlib_get_buffer (vm, bi0);
324
325 idoi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
326 ido0 = interface_rx_dpo_get(idoi0);
327
328 /* Swap the RX interface of the packet to the one the
329 * interface DPR represents */
330 vnet_buffer(b0)->sw_if_index[VLIB_RX] = ido0->ido_sw_if_index;
331
332 /* Bump the interface's RX coutners */
333 vlib_increment_combined_counter (im->combined_sw_if_counters
334 + VNET_INTERFACE_COUNTER_RX,
335 thread_index,
336 ido0->ido_sw_if_index,
337 1,
338 vlib_buffer_length_in_chain (vm, b0));
339
340 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
341 {
342 interface_rx_dpo_trace_t *tr;
343
344 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
345 tr->sw_if_index = ido0->ido_sw_if_index;
346 }
Neale Ranns43161a82017-08-12 02:12:00 -0700347 }
348 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
349 }
350 return from_frame->n_vectors;
351}
352
353static u8 *
354format_interface_rx_dpo_trace (u8 * s, va_list * args)
355{
356 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
357 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
358 interface_rx_dpo_trace_t * t = va_arg (*args, interface_rx_dpo_trace_t *);
Christophe Fontained3c008d2017-10-02 18:10:54 +0200359 u32 indent = format_get_indent (s);
Neale Ranns43161a82017-08-12 02:12:00 -0700360 s = format (s, "%U sw_if_index:%d",
361 format_white_space, indent,
362 t->sw_if_index);
363 return s;
364}
365
366static uword
367interface_rx_dpo_ip4 (vlib_main_t * vm,
368 vlib_node_runtime_t * node,
369 vlib_frame_t * from_frame)
370{
371 return (interface_rx_dpo_inline(vm, node, from_frame));
372}
373
374static uword
375interface_rx_dpo_ip6 (vlib_main_t * vm,
376 vlib_node_runtime_t * node,
377 vlib_frame_t * from_frame)
378{
379 return (interface_rx_dpo_inline(vm, node, from_frame));
380}
381
382static uword
383interface_rx_dpo_l2 (vlib_main_t * vm,
384 vlib_node_runtime_t * node,
385 vlib_frame_t * from_frame)
386{
387 return (interface_rx_dpo_inline(vm, node, from_frame));
388}
389
390VLIB_REGISTER_NODE (interface_rx_dpo_ip4_node) = {
391 .function = interface_rx_dpo_ip4,
392 .name = "interface-rx-dpo-ip4",
393 .vector_size = sizeof (u32),
394 .format_trace = format_interface_rx_dpo_trace,
395
396 .n_next_nodes = 2,
397 .next_nodes = {
398 [INTERFACE_RX_DPO_DROP] = "ip4-drop",
399 [INTERFACE_RX_DPO_INPUT] = "ip4-input",
400 },
401};
402
403VLIB_NODE_FUNCTION_MULTIARCH (interface_rx_dpo_ip4_node,
404 interface_rx_dpo_ip4)
405
406VLIB_REGISTER_NODE (interface_rx_dpo_ip6_node) = {
407 .function = interface_rx_dpo_ip6,
408 .name = "interface-rx-dpo-ip6",
409 .vector_size = sizeof (u32),
410 .format_trace = format_interface_rx_dpo_trace,
411
412 .n_next_nodes = 2,
413 .next_nodes = {
414 [INTERFACE_RX_DPO_DROP] = "ip6-drop",
415 [INTERFACE_RX_DPO_INPUT] = "ip6-input",
416 },
417};
418
419VLIB_NODE_FUNCTION_MULTIARCH (interface_rx_dpo_ip6_node,
420 interface_rx_dpo_ip6)
421
422VLIB_REGISTER_NODE (interface_rx_dpo_l2_node) = {
423 .function = interface_rx_dpo_l2,
424 .name = "interface-rx-dpo-l2",
425 .vector_size = sizeof (u32),
426 .format_trace = format_interface_rx_dpo_trace,
427
428 .n_next_nodes = 2,
429 .next_nodes = {
430 [INTERFACE_RX_DPO_DROP] = "error-drop",
431 [INTERFACE_RX_DPO_INPUT] = "l2-input",
432 },
433};
434
435VLIB_NODE_FUNCTION_MULTIARCH (interface_rx_dpo_l2_node,
436 interface_rx_dpo_l2)