blob: 7317fcdacc3e2fd71510aac6e37231f8bbef9894 [file] [log] [blame]
Neale Ranns6f631152017-10-03 08:20:21 -07001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/dpo/l2_bridge_dpo.h>
17#include <vnet/fib/fib_node.h>
18#include <vnet/ethernet/ethernet.h>
19
20/*
21 * The 'DB' of L2 bridge DPOs.
22 * There is only one per-interface, so this is a per-interface vector
23 */
24static index_t *l2_bridge_dpo_db;
25
26static l2_bridge_dpo_t *
27l2_bridge_dpo_alloc (void)
28{
29 l2_bridge_dpo_t *l2b;
30
31 pool_get(l2_bridge_dpo_pool, l2b);
32
33 return (l2b);
34}
35
36static inline l2_bridge_dpo_t *
37l2_bridge_dpo_get_from_dpo (const dpo_id_t *dpo)
38{
39 ASSERT(DPO_L2_BRIDGE == dpo->dpoi_type);
40
41 return (l2_bridge_dpo_get(dpo->dpoi_index));
42}
43
44static inline index_t
45l2_bridge_dpo_get_index (l2_bridge_dpo_t *l2b)
46{
47 return (l2b - l2_bridge_dpo_pool);
48}
49
50static void
51l2_bridge_dpo_lock (dpo_id_t *dpo)
52{
53 l2_bridge_dpo_t *l2b;
54
55 l2b = l2_bridge_dpo_get_from_dpo(dpo);
56 l2b->l2b_locks++;
57}
58
59static void
60l2_bridge_dpo_unlock (dpo_id_t *dpo)
61{
62 l2_bridge_dpo_t *l2b;
63
64 l2b = l2_bridge_dpo_get_from_dpo(dpo);
65 l2b->l2b_locks--;
66
67 if (0 == l2b->l2b_locks)
68 {
69 l2_bridge_dpo_db[l2b->l2b_sw_if_index] = INDEX_INVALID;
70 pool_put(l2_bridge_dpo_pool, l2b);
71 }
72}
73
74/*
75 * l2_bridge_dpo_add_or_lock
76 *
77 * Add/create and lock a new or lock an existing for the L2 Bridge
78 * on the interface given
79 */
80void
81l2_bridge_dpo_add_or_lock (u32 sw_if_index,
82 dpo_id_t *dpo)
83{
84 l2_bridge_dpo_t *l2b;
85
86 vec_validate_init_empty(l2_bridge_dpo_db,
87 sw_if_index,
88 INDEX_INVALID);
89
90 if (INDEX_INVALID == l2_bridge_dpo_db[sw_if_index])
91 {
92 l2b = l2_bridge_dpo_alloc();
93
94 l2b->l2b_sw_if_index = sw_if_index;
95
96 l2_bridge_dpo_db[sw_if_index] =
97 l2_bridge_dpo_get_index(l2b);
98 }
99 else
100 {
101 l2b = l2_bridge_dpo_get(l2_bridge_dpo_db[sw_if_index]);
102 }
103
104 dpo_set(dpo, DPO_L2_BRIDGE, DPO_PROTO_ETHERNET, l2_bridge_dpo_get_index(l2b));
105}
106
107
108static clib_error_t *
109l2_bridge_dpo_interface_state_change (vnet_main_t * vnm,
110 u32 sw_if_index,
111 u32 flags)
112{
113 /*
114 */
115 return (NULL);
116}
117
118VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(
119 l2_bridge_dpo_interface_state_change);
120
121/**
122 * @brief Registered callback for HW interface state changes
123 */
124static clib_error_t *
125l2_bridge_dpo_hw_interface_state_change (vnet_main_t * vnm,
126 u32 hw_if_index,
127 u32 flags)
128{
129 return (NULL);
130}
131
132VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
133 l2_bridge_dpo_hw_interface_state_change);
134
135static clib_error_t *
136l2_bridge_dpo_interface_delete (vnet_main_t * vnm,
137 u32 sw_if_index,
138 u32 is_add)
139{
140 return (NULL);
141}
142
143VNET_SW_INTERFACE_ADD_DEL_FUNCTION(
144 l2_bridge_dpo_interface_delete);
145
146u8*
147format_l2_bridge_dpo (u8* s, va_list *ap)
148{
149 index_t index = va_arg(*ap, index_t);
150 CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
151 vnet_main_t * vnm = vnet_get_main();
152 l2_bridge_dpo_t *l2b = l2_bridge_dpo_get(index);
153
154 return (format(s, "l2-bridge-%U-dpo",
155 format_vnet_sw_interface_name,
156 vnm,
157 vnet_get_sw_interface(vnm, l2b->l2b_sw_if_index)));
158}
159
160static void
161l2_bridge_dpo_mem_show (void)
162{
163 fib_show_memory_usage("L2-bridge",
164 pool_elts(l2_bridge_dpo_pool),
165 pool_len(l2_bridge_dpo_pool),
166 sizeof(l2_bridge_dpo_t));
167}
168
169
170const static dpo_vft_t l2_bridge_dpo_vft = {
171 .dv_lock = l2_bridge_dpo_lock,
172 .dv_unlock = l2_bridge_dpo_unlock,
173 .dv_format = format_l2_bridge_dpo,
174 .dv_mem_show = l2_bridge_dpo_mem_show,
175};
176
177/**
178 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
179 * object.
180 *
181 * this means that these graph nodes are ones from which a glean is the
182 * parent object in the DPO-graph.
183 */
184const static char* const l2_bridge_dpo_l2_nodes[] =
185{
186 "l2-bridge-dpo",
187 NULL,
188};
189
190const static char* const * const l2_bridge_dpo_nodes[DPO_PROTO_NUM] =
191{
192 [DPO_PROTO_ETHERNET] = l2_bridge_dpo_l2_nodes,
193};
194
195void
196l2_bridge_dpo_module_init (void)
197{
198 dpo_register(DPO_L2_BRIDGE,
199 &l2_bridge_dpo_vft,
200 l2_bridge_dpo_nodes);
201}
202
203/**
204 * @brief Interface DPO trace data
205 */
206typedef struct l2_bridge_dpo_trace_t_
207{
208 u32 sw_if_index;
209} l2_bridge_dpo_trace_t;
210
211typedef enum l2_bridge_dpo_next_t_
212{
213 L2_BRIDGE_DPO_DROP = 0,
214 L2_BRIDGE_DPO_OUTPUT = 1,
215} l2_bridge_dpo_next_t;
216
217always_inline uword
218l2_bridge_dpo_inline (vlib_main_t * vm,
219 vlib_node_runtime_t * node,
220 vlib_frame_t * from_frame)
221{
222 u32 n_left_from, next_index, * from, * to_next;
223
224 from = vlib_frame_vector_args (from_frame);
225 n_left_from = from_frame->n_vectors;
226
227 next_index = node->cached_next_index;
228
229 while (n_left_from > 0)
230 {
231 u32 n_left_to_next;
232
233 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
234
235 while (n_left_from >= 4 && n_left_to_next > 2)
236 {
237 const l2_bridge_dpo_t *l2b0, *l2b1;
238 u32 bi0, l2bi0, bi1, l2bi1;
239 vlib_buffer_t *b0, *b1;
240 u8 len0, len1;
241
242 bi0 = from[0];
243 to_next[0] = bi0;
244 bi1 = from[1];
245 to_next[1] = bi1;
246 from += 2;
247 to_next += 2;
248 n_left_from -= 2;
249 n_left_to_next -= 2;
250
251 b0 = vlib_get_buffer (vm, bi0);
252 b1 = vlib_get_buffer (vm, bi1);
253
254 l2bi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
255 l2bi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
256 l2b0 = l2_bridge_dpo_get(l2bi0);
257 l2b1 = l2_bridge_dpo_get(l2bi1);
258
259 vnet_buffer(b0)->sw_if_index[VLIB_TX] = l2b0->l2b_sw_if_index;
260 vnet_buffer(b1)->sw_if_index[VLIB_TX] = l2b1->l2b_sw_if_index;
261
262 len0 = ((u8*)vlib_buffer_get_current(b0) -
263 (u8*)ethernet_buffer_get_header(b0));
264 len1 = ((u8*)vlib_buffer_get_current(b1) -
265 (u8*)ethernet_buffer_get_header(b1));
266 vnet_buffer(b0)->l2.l2_len = len0;
267 vnet_buffer(b1)->l2.l2_len = len1;
268
269 vlib_buffer_advance(b0, -len0);
270 vlib_buffer_advance(b1, -len1);
271
272 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
273 {
274 l2_bridge_dpo_trace_t *tr0;
275
276 tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
277 tr0->sw_if_index = l2b0->l2b_sw_if_index;
278 }
279 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
280 {
281 l2_bridge_dpo_trace_t *tr1;
282
283 tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
284 tr1->sw_if_index = l2b1->l2b_sw_if_index;
285 }
286
287 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
288 n_left_to_next, bi0, bi1,
289 L2_BRIDGE_DPO_OUTPUT,
290 L2_BRIDGE_DPO_OUTPUT);
291 }
292
293 while (n_left_from > 0 && n_left_to_next > 0)
294 {
295 const l2_bridge_dpo_t * l2b0;
296 vlib_buffer_t * b0;
297 u32 bi0, l2bi0;
298 u8 len0;
299
300 bi0 = from[0];
301 to_next[0] = bi0;
302 from += 1;
303 to_next += 1;
304 n_left_from -= 1;
305 n_left_to_next -= 1;
306
307 b0 = vlib_get_buffer (vm, bi0);
308
309 l2bi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
310 l2b0 = l2_bridge_dpo_get(l2bi0);
311
312 vnet_buffer(b0)->sw_if_index[VLIB_TX] = l2b0->l2b_sw_if_index;
313
314 /*
315 * take that, and rewind it back...
316 */
317 len0 = ((u8*)vlib_buffer_get_current(b0) -
318 (u8*)ethernet_buffer_get_header(b0));
319 vnet_buffer(b0)->l2.l2_len = len0;
320 vlib_buffer_advance(b0, -len0);
321
322 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
323 {
324 l2_bridge_dpo_trace_t *tr;
325
326 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
327 tr->sw_if_index = l2b0->l2b_sw_if_index;
328 }
329
330 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
331 n_left_to_next, bi0,
332 L2_BRIDGE_DPO_OUTPUT);
333 }
334 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
335 }
336 return from_frame->n_vectors;
337}
338
339static u8 *
340format_l2_bridge_dpo_trace (u8 * s, va_list * args)
341{
342 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
343 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
344 l2_bridge_dpo_trace_t * t = va_arg (*args, l2_bridge_dpo_trace_t *);
Gabriel Gannee3ea7972017-10-12 10:53:31 +0200345 u32 indent = format_get_indent (s);
Neale Ranns6f631152017-10-03 08:20:21 -0700346 s = format (s, "%U sw_if_index:%d",
347 format_white_space, indent,
348 t->sw_if_index);
349 return s;
350}
351
352static uword
353l2_bridge_dpo_l2 (vlib_main_t * vm,
354 vlib_node_runtime_t * node,
355 vlib_frame_t * from_frame)
356{
357 return (l2_bridge_dpo_inline(vm, node, from_frame));
358}
359
360
361VLIB_REGISTER_NODE (l2_bridge_dpo_l2_node) = {
362 .function = l2_bridge_dpo_l2,
363 .name = "l2-bridge-dpo",
364 .vector_size = sizeof (u32),
365 .format_trace = format_l2_bridge_dpo_trace,
366
367 .n_next_nodes = 2,
368 .next_nodes = {
369 [L2_BRIDGE_DPO_DROP] = "error-drop",
370 [L2_BRIDGE_DPO_OUTPUT] = "l2-output",
371 },
372};
373
374VLIB_NODE_FUNCTION_MULTIARCH (l2_bridge_dpo_l2_node,
375 l2_bridge_dpo_l2)