blob: 2d37d8b58dd42d74df487deab7c44329a8a09455 [file] [log] [blame]
Neale Ranns810086d2017-11-05 16:26:46 -08001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/udp/udp_encap.h>
17
18typedef struct udp4_encap_trace_t_
19{
20 udp_header_t udp;
21 ip4_header_t ip;
22} udp4_encap_trace_t;
23
24typedef struct udp6_encap_trace_t_
25{
26 udp_header_t udp;
27 ip6_header_t ip;
28} udp6_encap_trace_t;
29
Neale Ranns43b1f442018-03-20 01:47:35 -070030extern vlib_combined_counter_main_t udp_encap_counters;
31
Neale Ranns810086d2017-11-05 16:26:46 -080032static u8 *
33format_udp4_encap_trace (u8 * s, va_list * args)
34{
35 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
37 udp4_encap_trace_t *t;
38
39 t = va_arg (*args, udp4_encap_trace_t *);
40
41 s = format (s, "%U\n %U",
42 format_ip4_header, &t->ip, sizeof (t->ip),
43 format_udp_header, &t->udp, sizeof (t->udp));
44 return (s);
45}
46
47static u8 *
48format_udp6_encap_trace (u8 * s, va_list * args)
49{
50 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
52 udp6_encap_trace_t *t;
53
54 t = va_arg (*args, udp6_encap_trace_t *);
55
56 s = format (s, "%U\n %U",
57 format_ip6_header, &t->ip, sizeof (t->ip),
58 format_udp_header, &t->udp, sizeof (t->udp));
59 return (s);
60}
61
62always_inline uword
63udp_encap_inline (vlib_main_t * vm,
64 vlib_node_runtime_t * node,
65 vlib_frame_t * frame, int is_encap_v6)
66{
Neale Ranns43b1f442018-03-20 01:47:35 -070067 vlib_combined_counter_main_t *cm = &udp_encap_counters;
Neale Ranns810086d2017-11-05 16:26:46 -080068 u32 *from = vlib_frame_vector_args (frame);
69 u32 n_left_from, n_left_to_next, *to_next, next_index;
Neale Ranns43b1f442018-03-20 01:47:35 -070070 u32 thread_index = vlib_get_thread_index ();
Neale Ranns810086d2017-11-05 16:26:46 -080071
72 n_left_from = frame->n_vectors;
73 next_index = node->cached_next_index;
74
75 while (n_left_from > 0)
76 {
77 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
78
79 while (n_left_from >= 4 && n_left_to_next >= 2)
80 {
81 vlib_buffer_t *b0, *b1;
82 udp_encap_t *ue0, *ue1;
83 u32 bi0, next0, uei0;
84 u32 bi1, next1, uei1;
85
86 /* Prefetch next iteration. */
87 {
88 vlib_buffer_t *p2, *p3;
89
90 p2 = vlib_get_buffer (vm, from[2]);
91 p3 = vlib_get_buffer (vm, from[3]);
92
93 vlib_prefetch_buffer_header (p2, STORE);
94 vlib_prefetch_buffer_header (p3, STORE);
95 }
96
97 bi0 = to_next[0] = from[0];
98 bi1 = to_next[1] = from[1];
99
100 from += 2;
101 n_left_from -= 2;
102 to_next += 2;
103 n_left_to_next -= 2;
104
105 b0 = vlib_get_buffer (vm, bi0);
106 b1 = vlib_get_buffer (vm, bi1);
107
108 uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
109 uei1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
110
Neale Ranns43b1f442018-03-20 01:47:35 -0700111 vlib_increment_combined_counter (cm, thread_index, uei0, 1,
112 vlib_buffer_length_in_chain (vm,
113 b0));
114 vlib_increment_combined_counter (cm, thread_index, uei1, 1,
115 vlib_buffer_length_in_chain (vm,
116 b1));
117
Neale Ranns810086d2017-11-05 16:26:46 -0800118 /* Rewrite packet header and updates lengths. */
119 ue0 = udp_encap_get (uei0);
120 ue1 = udp_encap_get (uei1);
121
122 /* Paint */
123 if (is_encap_v6)
124 {
125 const u8 n_bytes =
126 sizeof (udp_header_t) + sizeof (ip6_header_t);
127 ip_udp_encap_two (vm, b0, b1, (u8 *) & ue0->ue_hdrs,
128 (u8 *) & ue1->ue_hdrs, n_bytes, 0);
129 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
130 {
131 udp6_encap_trace_t *tr =
132 vlib_add_trace (vm, node, b0, sizeof (*tr));
133 tr->udp = ue0->ue_hdrs.ip6.ue_udp;
134 tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
135 }
136 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
137 {
138 udp6_encap_trace_t *tr =
139 vlib_add_trace (vm, node, b1, sizeof (*tr));
140 tr->udp = ue1->ue_hdrs.ip6.ue_udp;
141 tr->ip = ue1->ue_hdrs.ip6.ue_ip6;
142 }
143 }
144 else
145 {
146 const u8 n_bytes =
147 sizeof (udp_header_t) + sizeof (ip4_header_t);
148
149 ip_udp_encap_two (vm, b0, b1,
150 (u8 *) & ue0->ue_hdrs,
151 (u8 *) & ue1->ue_hdrs, n_bytes, 1);
152
153 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
154 {
155 udp4_encap_trace_t *tr =
156 vlib_add_trace (vm, node, b0, sizeof (*tr));
157 tr->udp = ue0->ue_hdrs.ip4.ue_udp;
158 tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
159 }
160 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
161 {
162 udp4_encap_trace_t *tr =
163 vlib_add_trace (vm, node, b1, sizeof (*tr));
164 tr->udp = ue1->ue_hdrs.ip4.ue_udp;
165 tr->ip = ue1->ue_hdrs.ip4.ue_ip4;
166 }
167 }
168
169 next0 = ue0->ue_dpo.dpoi_next_node;
170 next1 = ue1->ue_dpo.dpoi_next_node;
171 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;
172 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = ue1->ue_dpo.dpoi_index;
173
174 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
175 to_next, n_left_to_next,
176 bi0, bi1, next0, next1);
177 }
178
179 while (n_left_from > 0 && n_left_to_next > 0)
180 {
181 u32 bi0, next0, uei0;
182 vlib_buffer_t *b0;
183 udp_encap_t *ue0;
184
185 bi0 = to_next[0] = from[0];
186
187 from += 1;
188 n_left_from -= 1;
189 to_next += 1;
190 n_left_to_next -= 1;
191
192 b0 = vlib_get_buffer (vm, bi0);
193
194 uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
195
196 /* Rewrite packet header and updates lengths. */
197 ue0 = udp_encap_get (uei0);
198
Neale Ranns43b1f442018-03-20 01:47:35 -0700199 vlib_increment_combined_counter (cm, thread_index, uei0, 1,
200 vlib_buffer_length_in_chain (vm,
201 b0));
202
Neale Ranns810086d2017-11-05 16:26:46 -0800203 /* Paint */
204 if (is_encap_v6)
205 {
206 const u8 n_bytes =
207 sizeof (udp_header_t) + sizeof (ip6_header_t);
208 ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip6, n_bytes,
209 0);
210
211 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
212 {
213 udp6_encap_trace_t *tr =
214 vlib_add_trace (vm, node, b0, sizeof (*tr));
215 tr->udp = ue0->ue_hdrs.ip6.ue_udp;
216 tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
217 }
218 }
219 else
220 {
221 const u8 n_bytes =
222 sizeof (udp_header_t) + sizeof (ip4_header_t);
223
224 ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip4, n_bytes,
225 1);
226
227 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
228 {
229 udp4_encap_trace_t *tr =
230 vlib_add_trace (vm, node, b0, sizeof (*tr));
231 tr->udp = ue0->ue_hdrs.ip4.ue_udp;
232 tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
233 }
234 }
235
236 next0 = ue0->ue_dpo.dpoi_next_node;
237 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;
238
239 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
240 to_next, n_left_to_next,
241 bi0, next0);
242 }
243
244 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
245 }
246
247 return frame->n_vectors;
248}
249
250static uword
251udp4_encap (vlib_main_t * vm,
252 vlib_node_runtime_t * node, vlib_frame_t * frame)
253{
254 return udp_encap_inline (vm, node, frame, 0);
255}
256
257static uword
258udp6_encap (vlib_main_t * vm,
259 vlib_node_runtime_t * node, vlib_frame_t * frame)
260{
261 return udp_encap_inline (vm, node, frame, 1);
262}
263
264/* *INDENT-OFF* */
265VLIB_REGISTER_NODE (udp4_encap_node) = {
266 .function = udp4_encap,
267 .name = "udp4-encap",
268 .vector_size = sizeof (u32),
269
270 .format_trace = format_udp4_encap_trace,
271
272 .n_next_nodes = 0,
273};
274VLIB_NODE_FUNCTION_MULTIARCH (udp4_encap_node, udp4_encap);
275
276VLIB_REGISTER_NODE (udp6_encap_node) = {
277 .function = udp6_encap,
278 .name = "udp6-encap",
279 .vector_size = sizeof (u32),
280
281 .format_trace = format_udp6_encap_trace,
282
283 .n_next_nodes = 0,
284};
285VLIB_NODE_FUNCTION_MULTIARCH (udp6_encap_node, udp6_encap);
286/* *INDENT-ON* */
287
288
289/*
290 * fd.io coding-style-patch-verification: ON
291 *
292 * Local Variables:
293 * eval: (c-set-style "gnu")
294 * End:
295 */