blob: a624f514564ded3f8983034cc97b329b2440095c [file] [log] [blame]
Neale Ranns43161a82017-08-12 02:12:00 -07001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/dpo/interface_rx_dpo.h>
17#include <vnet/fib/fib_node.h>
18
19/*
20 * The 'DB' of interface DPOs.
21 * There is only one per-interface per-protocol, so this is a per-interface
22 * vector
23 */
24static index_t *interface_rx_dpo_db[DPO_PROTO_NUM];
25
26static interface_rx_dpo_t *
27interface_rx_dpo_alloc (void)
28{
29 interface_rx_dpo_t *ido;
30
31 pool_get(interface_rx_dpo_pool, ido);
32
33 return (ido);
34}
35
36static inline interface_rx_dpo_t *
37interface_rx_dpo_get_from_dpo (const dpo_id_t *dpo)
38{
39 ASSERT(DPO_INTERFACE_RX == dpo->dpoi_type);
40
41 return (interface_rx_dpo_get(dpo->dpoi_index));
42}
43
44static inline index_t
45interface_rx_dpo_get_index (interface_rx_dpo_t *ido)
46{
47 return (ido - interface_rx_dpo_pool);
48}
49
50static void
51interface_rx_dpo_lock (dpo_id_t *dpo)
52{
53 interface_rx_dpo_t *ido;
54
55 ido = interface_rx_dpo_get_from_dpo(dpo);
56 ido->ido_locks++;
57}
58
59static void
60interface_rx_dpo_unlock (dpo_id_t *dpo)
61{
62 interface_rx_dpo_t *ido;
63
64 ido = interface_rx_dpo_get_from_dpo(dpo);
65 ido->ido_locks--;
66
67 if (0 == ido->ido_locks)
68 {
69 interface_rx_dpo_db[ido->ido_proto][ido->ido_sw_if_index] =
70 INDEX_INVALID;
71 pool_put(interface_rx_dpo_pool, ido);
72 }
73}
74
75/*
76 * interface_rx_dpo_add_or_lock
77 *
78 * Add/create and lock a new or lock an existing for the interface DPO
79 * on the interface and protocol given
80 */
81void
82interface_rx_dpo_add_or_lock (dpo_proto_t proto,
83 u32 sw_if_index,
84 dpo_id_t *dpo)
85{
86 interface_rx_dpo_t *ido;
87
88 vec_validate_init_empty(interface_rx_dpo_db[proto],
89 sw_if_index,
90 INDEX_INVALID);
91
92 if (INDEX_INVALID == interface_rx_dpo_db[proto][sw_if_index])
93 {
94 ido = interface_rx_dpo_alloc();
95
96 ido->ido_sw_if_index = sw_if_index;
97 ido->ido_proto = proto;
98
99 interface_rx_dpo_db[proto][sw_if_index] =
100 interface_rx_dpo_get_index(ido);
101 }
102 else
103 {
104 ido = interface_rx_dpo_get(interface_rx_dpo_db[proto][sw_if_index]);
105 }
106
107 dpo_set(dpo, DPO_INTERFACE_RX, proto, interface_rx_dpo_get_index(ido));
108}
109
110
111static clib_error_t *
112interface_rx_dpo_interface_state_change (vnet_main_t * vnm,
113 u32 sw_if_index,
114 u32 flags)
115{
116 /*
117 */
118 return (NULL);
119}
120
121VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(
122 interface_rx_dpo_interface_state_change);
123
124/**
125 * @brief Registered callback for HW interface state changes
126 */
127static clib_error_t *
128interface_rx_dpo_hw_interface_state_change (vnet_main_t * vnm,
129 u32 hw_if_index,
130 u32 flags)
131{
132 return (NULL);
133}
134
135VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
136 interface_rx_dpo_hw_interface_state_change);
137
138static clib_error_t *
139interface_rx_dpo_interface_delete (vnet_main_t * vnm,
140 u32 sw_if_index,
141 u32 is_add)
142{
143 return (NULL);
144}
145
146VNET_SW_INTERFACE_ADD_DEL_FUNCTION(
147 interface_rx_dpo_interface_delete);
148
149u8*
150format_interface_rx_dpo (u8* s, va_list *ap)
151{
152 index_t index = va_arg(*ap, index_t);
153 CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
154 vnet_main_t * vnm = vnet_get_main();
155 interface_rx_dpo_t *ido = interface_rx_dpo_get(index);
156
157 return (format(s, "%U-dpo: %U",
158 format_vnet_sw_interface_name,
159 vnm,
160 vnet_get_sw_interface(vnm, ido->ido_sw_if_index),
161 format_dpo_proto, ido->ido_proto));
162}
163
164static void
165interface_rx_dpo_mem_show (void)
166{
167 fib_show_memory_usage("Interface",
168 pool_elts(interface_rx_dpo_pool),
169 pool_len(interface_rx_dpo_pool),
170 sizeof(interface_rx_dpo_t));
171}
172
173
174const static dpo_vft_t interface_rx_dpo_vft = {
175 .dv_lock = interface_rx_dpo_lock,
176 .dv_unlock = interface_rx_dpo_unlock,
177 .dv_format = format_interface_rx_dpo,
178 .dv_mem_show = interface_rx_dpo_mem_show,
179};
180
181/**
182 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
183 * object.
184 *
185 * this means that these graph nodes are ones from which a glean is the
186 * parent object in the DPO-graph.
187 */
188const static char* const interface_rx_dpo_ip4_nodes[] =
189{
190 "interface-rx-dpo-ip4",
191 NULL,
192};
193const static char* const interface_rx_dpo_ip6_nodes[] =
194{
195 "interface-rx-dpo-ip6",
196 NULL,
197};
198const static char* const interface_rx_dpo_l2_nodes[] =
199{
200 "interface-rx-dpo-l2",
201 NULL,
202};
203
204const static char* const * const interface_rx_dpo_nodes[DPO_PROTO_NUM] =
205{
206 [DPO_PROTO_IP4] = interface_rx_dpo_ip4_nodes,
207 [DPO_PROTO_IP6] = interface_rx_dpo_ip6_nodes,
208 [DPO_PROTO_ETHERNET] = interface_rx_dpo_l2_nodes,
209 [DPO_PROTO_MPLS] = NULL,
210};
211
212void
213interface_rx_dpo_module_init (void)
214{
215 dpo_register(DPO_INTERFACE_RX,
216 &interface_rx_dpo_vft,
217 interface_rx_dpo_nodes);
218}
219
220/**
221 * @brief Interface DPO trace data
222 */
223typedef struct interface_rx_dpo_trace_t_
224{
225 u32 sw_if_index;
226} interface_rx_dpo_trace_t;
227
228typedef enum interface_rx_dpo_next_t_
229{
230 INTERFACE_RX_DPO_DROP = 0,
231 INTERFACE_RX_DPO_INPUT = 1,
232} interface_rx_dpo_next_t;
233
234always_inline uword
235interface_rx_dpo_inline (vlib_main_t * vm,
236 vlib_node_runtime_t * node,
237 vlib_frame_t * from_frame)
238{
239 u32 n_left_from, next_index, * from, * to_next;
240 u32 thread_index = vlib_get_thread_index ();
241 vnet_interface_main_t *im;
242
243 im = &vnet_get_main ()->interface_main;
244 from = vlib_frame_vector_args (from_frame);
245 n_left_from = from_frame->n_vectors;
246
247 next_index = node->cached_next_index;
248
249 while (n_left_from > 0)
250 {
251 u32 n_left_to_next;
252
253 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
254
255 while (n_left_from >= 4 && n_left_to_next > 2)
256 {
257 const interface_rx_dpo_t *ido0, *ido1;
258 u32 bi0, idoi0, bi1, idoi1;
259 vlib_buffer_t *b0, *b1;
260
261 bi0 = from[0];
262 to_next[0] = bi0;
263 bi1 = from[1];
264 to_next[1] = bi1;
265 from += 2;
266 to_next += 2;
267 n_left_from -= 2;
268 n_left_to_next -= 2;
269
270 b0 = vlib_get_buffer (vm, bi0);
271 b1 = vlib_get_buffer (vm, bi1);
272
273 idoi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
274 idoi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
275 ido0 = interface_rx_dpo_get(idoi0);
276 ido1 = interface_rx_dpo_get(idoi1);
277
278 vnet_buffer(b0)->sw_if_index[VLIB_RX] = ido0->ido_sw_if_index;
279 vnet_buffer(b1)->sw_if_index[VLIB_RX] = ido1->ido_sw_if_index;
280
281 vlib_increment_combined_counter (im->combined_sw_if_counters
282 + VNET_INTERFACE_COUNTER_RX,
283 thread_index,
284 ido0->ido_sw_if_index,
285 1,
286 vlib_buffer_length_in_chain (vm, b0));
287 vlib_increment_combined_counter (im->combined_sw_if_counters
288 + VNET_INTERFACE_COUNTER_RX,
289 thread_index,
290 ido1->ido_sw_if_index,
291 1,
292 vlib_buffer_length_in_chain (vm, b1));
293
294 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
295 {
296 interface_rx_dpo_trace_t *tr0;
297
298 tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
299 tr0->sw_if_index = ido0->ido_sw_if_index;
300 }
301 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
302 {
303 interface_rx_dpo_trace_t *tr1;
304
305 tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
306 tr1->sw_if_index = ido1->ido_sw_if_index;
307 }
308
309 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
310 n_left_to_next, bi0, bi1,
311 INTERFACE_RX_DPO_INPUT,
312 INTERFACE_RX_DPO_INPUT);
313 }
314
315 while (n_left_from > 0 && n_left_to_next > 0)
316 {
317 const interface_rx_dpo_t * ido0;
318 vlib_buffer_t * b0;
319 u32 bi0, idoi0;
320
321 bi0 = from[0];
322 to_next[0] = bi0;
323 from += 1;
324 to_next += 1;
325 n_left_from -= 1;
326 n_left_to_next -= 1;
327
328 b0 = vlib_get_buffer (vm, bi0);
329
330 idoi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
331 ido0 = interface_rx_dpo_get(idoi0);
332
333 /* Swap the RX interface of the packet to the one the
334 * interface DPR represents */
335 vnet_buffer(b0)->sw_if_index[VLIB_RX] = ido0->ido_sw_if_index;
336
337 /* Bump the interface's RX coutners */
338 vlib_increment_combined_counter (im->combined_sw_if_counters
339 + VNET_INTERFACE_COUNTER_RX,
340 thread_index,
341 ido0->ido_sw_if_index,
342 1,
343 vlib_buffer_length_in_chain (vm, b0));
344
345 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
346 {
347 interface_rx_dpo_trace_t *tr;
348
349 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
350 tr->sw_if_index = ido0->ido_sw_if_index;
351 }
352
353 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
354 n_left_to_next, bi0,
355 INTERFACE_RX_DPO_INPUT);
356 }
357 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
358 }
359 return from_frame->n_vectors;
360}
361
362static u8 *
363format_interface_rx_dpo_trace (u8 * s, va_list * args)
364{
365 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
366 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
367 interface_rx_dpo_trace_t * t = va_arg (*args, interface_rx_dpo_trace_t *);
368 uword indent = format_get_indent (s);
369 s = format (s, "%U sw_if_index:%d",
370 format_white_space, indent,
371 t->sw_if_index);
372 return s;
373}
374
375static uword
376interface_rx_dpo_ip4 (vlib_main_t * vm,
377 vlib_node_runtime_t * node,
378 vlib_frame_t * from_frame)
379{
380 return (interface_rx_dpo_inline(vm, node, from_frame));
381}
382
383static uword
384interface_rx_dpo_ip6 (vlib_main_t * vm,
385 vlib_node_runtime_t * node,
386 vlib_frame_t * from_frame)
387{
388 return (interface_rx_dpo_inline(vm, node, from_frame));
389}
390
391static uword
392interface_rx_dpo_l2 (vlib_main_t * vm,
393 vlib_node_runtime_t * node,
394 vlib_frame_t * from_frame)
395{
396 return (interface_rx_dpo_inline(vm, node, from_frame));
397}
398
399VLIB_REGISTER_NODE (interface_rx_dpo_ip4_node) = {
400 .function = interface_rx_dpo_ip4,
401 .name = "interface-rx-dpo-ip4",
402 .vector_size = sizeof (u32),
403 .format_trace = format_interface_rx_dpo_trace,
404
405 .n_next_nodes = 2,
406 .next_nodes = {
407 [INTERFACE_RX_DPO_DROP] = "ip4-drop",
408 [INTERFACE_RX_DPO_INPUT] = "ip4-input",
409 },
410};
411
412VLIB_NODE_FUNCTION_MULTIARCH (interface_rx_dpo_ip4_node,
413 interface_rx_dpo_ip4)
414
415VLIB_REGISTER_NODE (interface_rx_dpo_ip6_node) = {
416 .function = interface_rx_dpo_ip6,
417 .name = "interface-rx-dpo-ip6",
418 .vector_size = sizeof (u32),
419 .format_trace = format_interface_rx_dpo_trace,
420
421 .n_next_nodes = 2,
422 .next_nodes = {
423 [INTERFACE_RX_DPO_DROP] = "ip6-drop",
424 [INTERFACE_RX_DPO_INPUT] = "ip6-input",
425 },
426};
427
428VLIB_NODE_FUNCTION_MULTIARCH (interface_rx_dpo_ip6_node,
429 interface_rx_dpo_ip6)
430
431VLIB_REGISTER_NODE (interface_rx_dpo_l2_node) = {
432 .function = interface_rx_dpo_l2,
433 .name = "interface-rx-dpo-l2",
434 .vector_size = sizeof (u32),
435 .format_trace = format_interface_rx_dpo_trace,
436
437 .n_next_nodes = 2,
438 .next_nodes = {
439 [INTERFACE_RX_DPO_DROP] = "error-drop",
440 [INTERFACE_RX_DPO_INPUT] = "l2-input",
441 },
442};
443
444VLIB_NODE_FUNCTION_MULTIARCH (interface_rx_dpo_l2_node,
445 interface_rx_dpo_l2)