blob: 77429de411618f9ebdab896a8bf5d9b2b38e2cec [file] [log] [blame]
Neale Ranns0f26c5a2017-03-01 15:12:11 -08001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
Neale Ranns4c7c8e52017-10-21 09:37:55 -070016#include <vnet/ip/ip4_input.h>
17#include <vnet/ip/ip6_input.h>
Neale Ranns0f26c5a2017-03-01 15:12:11 -080018#include <vnet/dpo/mpls_disposition.h>
19#include <vnet/mpls/mpls.h>
20
21/*
22 * pool of all MPLS Label DPOs
23 */
24mpls_disp_dpo_t *mpls_disp_dpo_pool;
25
26static mpls_disp_dpo_t *
27mpls_disp_dpo_alloc (void)
28{
29 mpls_disp_dpo_t *mdd;
30
31 pool_get_aligned(mpls_disp_dpo_pool, mdd, CLIB_CACHE_LINE_BYTES);
32 memset(mdd, 0, sizeof(*mdd));
33
34 dpo_reset(&mdd->mdd_dpo);
35
36 return (mdd);
37}
38
39static index_t
40mpls_disp_dpo_get_index (mpls_disp_dpo_t *mdd)
41{
42 return (mdd - mpls_disp_dpo_pool);
43}
44
45index_t
46mpls_disp_dpo_create (dpo_proto_t payload_proto,
47 fib_rpf_id_t rpf_id,
48 const dpo_id_t *dpo)
49{
50 mpls_disp_dpo_t *mdd;
51
52 mdd = mpls_disp_dpo_alloc();
53
54 mdd->mdd_payload_proto = payload_proto;
55 mdd->mdd_rpf_id = rpf_id;
56
57 dpo_stack(DPO_MPLS_DISPOSITION,
58 mdd->mdd_payload_proto,
59 &mdd->mdd_dpo,
60 dpo);
61
62 return (mpls_disp_dpo_get_index(mdd));
63}
64
65u8*
66format_mpls_disp_dpo (u8 *s, va_list *args)
67{
68 index_t index = va_arg (*args, index_t);
69 u32 indent = va_arg (*args, u32);
70 mpls_disp_dpo_t *mdd;
71
72 mdd = mpls_disp_dpo_get(index);
73
74 s = format(s, "mpls-disposition:[%d]:[%U]",
75 index,
76 format_dpo_proto, mdd->mdd_payload_proto);
77
78 s = format(s, "\n%U", format_white_space, indent);
79 s = format(s, "%U", format_dpo_id, &mdd->mdd_dpo, indent+2);
80
81 return (s);
82}
83
84static void
85mpls_disp_dpo_lock (dpo_id_t *dpo)
86{
87 mpls_disp_dpo_t *mdd;
88
89 mdd = mpls_disp_dpo_get(dpo->dpoi_index);
90
91 mdd->mdd_locks++;
92}
93
94static void
95mpls_disp_dpo_unlock (dpo_id_t *dpo)
96{
97 mpls_disp_dpo_t *mdd;
98
99 mdd = mpls_disp_dpo_get(dpo->dpoi_index);
100
101 mdd->mdd_locks--;
102
103 if (0 == mdd->mdd_locks)
104 {
105 dpo_reset(&mdd->mdd_dpo);
106 pool_put(mpls_disp_dpo_pool, mdd);
107 }
108}
109
110/**
111 * @brief A struct to hold tracing information for the MPLS label disposition
112 * node.
113 */
114typedef struct mpls_label_disposition_trace_t_
115{
116 index_t mdd;
117} mpls_label_disposition_trace_t;
118
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700119extern vlib_node_registration_t ip4_mpls_label_disposition_node;
120extern vlib_node_registration_t ip6_mpls_label_disposition_node;
121
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800122always_inline uword
123mpls_label_disposition_inline (vlib_main_t * vm,
124 vlib_node_runtime_t * node,
125 vlib_frame_t * from_frame,
126 u8 payload_is_ip4,
127 u8 payload_is_ip6)
128{
129 u32 n_left_from, next_index, * from, * to_next;
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700130 vlib_node_runtime_t *error_node;
131
132 if (payload_is_ip4)
133 error_node = vlib_node_get_runtime (vm, ip4_mpls_label_disposition_node.index);
134 else
135 error_node = vlib_node_get_runtime (vm, ip6_mpls_label_disposition_node.index);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800136
137 from = vlib_frame_vector_args (from_frame);
138 n_left_from = from_frame->n_vectors;
139
140 next_index = node->cached_next_index;
141
142 while (n_left_from > 0)
143 {
144 u32 n_left_to_next;
145
146 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
147
148 while (n_left_from >= 4 && n_left_to_next >= 2)
149 {
150 mpls_disp_dpo_t *mdd0, *mdd1;
151 u32 bi0, mddi0, bi1, mddi1;
152 vlib_buffer_t * b0, *b1;
153 u32 next0, next1;
154
155 bi0 = to_next[0] = from[0];
156 bi1 = to_next[1] = from[1];
157
158 /* Prefetch next iteration. */
159 {
160 vlib_buffer_t * p2, * p3;
161
162 p2 = vlib_get_buffer (vm, from[2]);
163 p3 = vlib_get_buffer (vm, from[3]);
164
165 vlib_prefetch_buffer_header (p2, STORE);
166 vlib_prefetch_buffer_header (p3, STORE);
167
168 CLIB_PREFETCH (p2->data, sizeof (ip6_header_t), STORE);
169 CLIB_PREFETCH (p3->data, sizeof (ip6_header_t), STORE);
170 }
171
172 from += 2;
173 to_next += 2;
174 n_left_from -= 2;
175 n_left_to_next -= 2;
176
177 b0 = vlib_get_buffer (vm, bi0);
178 b1 = vlib_get_buffer (vm, bi1);
179
180 /* dst lookup was done by ip4 lookup */
181 mddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
182 mddi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
183 mdd0 = mpls_disp_dpo_get(mddi0);
184 mdd1 = mpls_disp_dpo_get(mddi1);
185
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700186 next0 = mdd0->mdd_dpo.dpoi_next_node;
187 next1 = mdd1->mdd_dpo.dpoi_next_node;
188
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800189 if (payload_is_ip4)
190 {
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700191 ip4_header_t *ip0, *ip1;
192
193 ip0 = vlib_buffer_get_current (b0);
194 ip1 = vlib_buffer_get_current (b1);
195
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800196 /*
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700197 * IPv4 input checks on the exposed IP header
198 * including checksum
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800199 */
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700200 ip4_input_check_x2 (vm, error_node,
201 b0, b1, ip0, ip1,
202 &next0, &next1, 1);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800203 }
204 else if (payload_is_ip6)
205 {
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700206 ip6_header_t *ip0, *ip1;
207
208 ip0 = vlib_buffer_get_current (b0);
209 ip1 = vlib_buffer_get_current (b1);
210
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800211 /*
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700212 * IPv6 input checks on the exposed IP header
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800213 */
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700214 ip6_input_check_x2 (vm, error_node,
215 b0, b1, ip0, ip1,
216 &next0, &next1);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800217 }
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700218
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800219 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mdd0->mdd_dpo.dpoi_index;
220 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = mdd1->mdd_dpo.dpoi_index;
221 vnet_buffer(b0)->ip.rpf_id = mdd0->mdd_rpf_id;
222 vnet_buffer(b1)->ip.rpf_id = mdd1->mdd_rpf_id;
223
224 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
225 {
226 mpls_label_disposition_trace_t *tr =
227 vlib_add_trace (vm, node, b0, sizeof (*tr));
228
229 tr->mdd = mddi0;
230 }
231 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
232 {
233 mpls_label_disposition_trace_t *tr =
234 vlib_add_trace (vm, node, b1, sizeof (*tr));
235 tr->mdd = mddi1;
236 }
237
238 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
239 n_left_to_next,
240 bi0, bi1, next0, next1);
241 }
242
243 while (n_left_from > 0 && n_left_to_next > 0)
244 {
245 mpls_disp_dpo_t *mdd0;
246 vlib_buffer_t * b0;
247 u32 bi0, mddi0;
248 u32 next0;
249
250 bi0 = from[0];
251 to_next[0] = bi0;
252 from += 1;
253 to_next += 1;
254 n_left_from -= 1;
255 n_left_to_next -= 1;
256
257 b0 = vlib_get_buffer (vm, bi0);
258
259 /* dst lookup was done by ip4 lookup */
260 mddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
261 mdd0 = mpls_disp_dpo_get(mddi0);
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700262 next0 = mdd0->mdd_dpo.dpoi_next_node;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800263
264 if (payload_is_ip4)
265 {
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700266 ip4_header_t *ip0;
267
268 ip0 = vlib_buffer_get_current (b0);
269
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800270 /*
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700271 * IPv4 input checks on the exposed IP header
272 * including checksum
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800273 */
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700274 ip4_input_check_x1 (vm, error_node, b0, ip0, &next0, 1);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800275 }
276 else if (payload_is_ip6)
277 {
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700278 ip6_header_t *ip0;
279
280 ip0 = vlib_buffer_get_current (b0);
281
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800282 /*
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700283 * IPv6 input checks on the exposed IP header
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800284 */
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700285 ip6_input_check_x1 (vm, error_node, b0, ip0, &next0);
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800286 }
287
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800288 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mdd0->mdd_dpo.dpoi_index;
289 vnet_buffer(b0)->ip.rpf_id = mdd0->mdd_rpf_id;
290
291 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
292 {
293 mpls_label_disposition_trace_t *tr =
294 vlib_add_trace (vm, node, b0, sizeof (*tr));
295 tr->mdd = mddi0;
296 }
297
298 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
299 n_left_to_next, bi0, next0);
300 }
301 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
302 }
303 return from_frame->n_vectors;
304}
305
306static u8 *
307format_mpls_label_disposition_trace (u8 * s, va_list * args)
308{
309 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
310 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
311 CLIB_UNUSED (mpls_label_disposition_trace_t * t);
312
313 t = va_arg (*args, mpls_label_disposition_trace_t *);
314
315 s = format(s, "disp:%d", t->mdd);
316 return (s);
317}
318
319static uword
320ip4_mpls_label_disposition (vlib_main_t * vm,
321 vlib_node_runtime_t * node,
322 vlib_frame_t * frame)
323{
324 return (mpls_label_disposition_inline(vm, node, frame, 1, 0));
325}
326
327VLIB_REGISTER_NODE (ip4_mpls_label_disposition_node) = {
328 .function = ip4_mpls_label_disposition,
329 .name = "ip4-mpls-label-disposition",
330 .vector_size = sizeof (u32),
331
332 .format_trace = format_mpls_label_disposition_trace,
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700333 .sibling_of = "ip4-input",
334 .n_errors = IP4_N_ERROR,
335 .error_strings = ip4_error_strings,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800336};
337VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_disposition_node,
338 ip4_mpls_label_disposition)
339
340static uword
341ip6_mpls_label_disposition (vlib_main_t * vm,
342 vlib_node_runtime_t * node,
343 vlib_frame_t * frame)
344{
345 return (mpls_label_disposition_inline(vm, node, frame, 0, 1));
346}
347
348VLIB_REGISTER_NODE (ip6_mpls_label_disposition_node) = {
349 .function = ip6_mpls_label_disposition,
350 .name = "ip6-mpls-label-disposition",
351 .vector_size = sizeof (u32),
352
353 .format_trace = format_mpls_label_disposition_trace,
Neale Ranns4c7c8e52017-10-21 09:37:55 -0700354 .sibling_of = "ip6-input",
355 .n_errors = IP6_N_ERROR,
356 .error_strings = ip6_error_strings,
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800357};
358VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_disposition_node,
359 ip6_mpls_label_disposition)
360
361static void
362mpls_disp_dpo_mem_show (void)
363{
364 fib_show_memory_usage("MPLS label",
365 pool_elts(mpls_disp_dpo_pool),
366 pool_len(mpls_disp_dpo_pool),
367 sizeof(mpls_disp_dpo_t));
368}
369
370const static dpo_vft_t mdd_vft = {
371 .dv_lock = mpls_disp_dpo_lock,
372 .dv_unlock = mpls_disp_dpo_unlock,
373 .dv_format = format_mpls_disp_dpo,
374 .dv_mem_show = mpls_disp_dpo_mem_show,
375};
376
377const static char* const mpls_label_disp_ip4_nodes[] =
378{
379 "ip4-mpls-label-disposition",
380 NULL,
381};
382const static char* const mpls_label_disp_ip6_nodes[] =
383{
384 "ip6-mpls-label-disposition",
385 NULL,
386};
387const static char* const * const mpls_label_disp_nodes[DPO_PROTO_NUM] =
388{
389 [DPO_PROTO_IP4] = mpls_label_disp_ip4_nodes,
390 [DPO_PROTO_IP6] = mpls_label_disp_ip6_nodes,
391};
392
393
394void
395mpls_disp_dpo_module_init (void)
396{
397 dpo_register(DPO_MPLS_DISPOSITION, &mdd_vft, mpls_label_disp_nodes);
398}