blob: 5b9fc0bf34ba156fbf458ccb794f1e6ce822be80 [file] [log] [blame]
Neale Ranns810086d2017-11-05 16:26:46 -08001/*
Florin Corasc5df8c72019-04-08 07:42:30 -07002 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
Neale Ranns810086d2017-11-05 16:26:46 -08003 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/udp/udp_encap.h>
Florin Corasb040f982020-10-20 14:59:43 -070017#include <vnet/udp/udp.h>
Neale Ranns810086d2017-11-05 16:26:46 -080018
19typedef struct udp4_encap_trace_t_
20{
21 udp_header_t udp;
22 ip4_header_t ip;
23} udp4_encap_trace_t;
24
25typedef struct udp6_encap_trace_t_
26{
27 udp_header_t udp;
28 ip6_header_t ip;
29} udp6_encap_trace_t;
30
Neale Ranns43b1f442018-03-20 01:47:35 -070031extern vlib_combined_counter_main_t udp_encap_counters;
32
Neale Ranns810086d2017-11-05 16:26:46 -080033static u8 *
34format_udp4_encap_trace (u8 * s, va_list * args)
35{
36 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
37 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
38 udp4_encap_trace_t *t;
39
40 t = va_arg (*args, udp4_encap_trace_t *);
41
42 s = format (s, "%U\n %U",
43 format_ip4_header, &t->ip, sizeof (t->ip),
44 format_udp_header, &t->udp, sizeof (t->udp));
45 return (s);
46}
47
48static u8 *
49format_udp6_encap_trace (u8 * s, va_list * args)
50{
51 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53 udp6_encap_trace_t *t;
54
55 t = va_arg (*args, udp6_encap_trace_t *);
56
57 s = format (s, "%U\n %U",
58 format_ip6_header, &t->ip, sizeof (t->ip),
59 format_udp_header, &t->udp, sizeof (t->udp));
60 return (s);
61}
62
63always_inline uword
64udp_encap_inline (vlib_main_t * vm,
65 vlib_node_runtime_t * node,
66 vlib_frame_t * frame, int is_encap_v6)
67{
Neale Ranns43b1f442018-03-20 01:47:35 -070068 vlib_combined_counter_main_t *cm = &udp_encap_counters;
Neale Ranns810086d2017-11-05 16:26:46 -080069 u32 *from = vlib_frame_vector_args (frame);
70 u32 n_left_from, n_left_to_next, *to_next, next_index;
Damjan Marion067cd622018-07-11 12:47:43 +020071 u32 thread_index = vm->thread_index;
Neale Ranns810086d2017-11-05 16:26:46 -080072
73 n_left_from = frame->n_vectors;
74 next_index = node->cached_next_index;
75
76 while (n_left_from > 0)
77 {
78 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
79
80 while (n_left_from >= 4 && n_left_to_next >= 2)
81 {
82 vlib_buffer_t *b0, *b1;
83 udp_encap_t *ue0, *ue1;
84 u32 bi0, next0, uei0;
85 u32 bi1, next1, uei1;
86
87 /* Prefetch next iteration. */
88 {
89 vlib_buffer_t *p2, *p3;
90
91 p2 = vlib_get_buffer (vm, from[2]);
92 p3 = vlib_get_buffer (vm, from[3]);
93
94 vlib_prefetch_buffer_header (p2, STORE);
95 vlib_prefetch_buffer_header (p3, STORE);
96 }
97
98 bi0 = to_next[0] = from[0];
99 bi1 = to_next[1] = from[1];
100
101 from += 2;
102 n_left_from -= 2;
103 to_next += 2;
104 n_left_to_next -= 2;
105
106 b0 = vlib_get_buffer (vm, bi0);
107 b1 = vlib_get_buffer (vm, bi1);
108
109 uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
110 uei1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
111
Neale Ranns43b1f442018-03-20 01:47:35 -0700112 vlib_increment_combined_counter (cm, thread_index, uei0, 1,
113 vlib_buffer_length_in_chain (vm,
114 b0));
115 vlib_increment_combined_counter (cm, thread_index, uei1, 1,
116 vlib_buffer_length_in_chain (vm,
117 b1));
118
Neale Ranns810086d2017-11-05 16:26:46 -0800119 /* Rewrite packet header and updates lengths. */
120 ue0 = udp_encap_get (uei0);
121 ue1 = udp_encap_get (uei1);
122
123 /* Paint */
124 if (is_encap_v6)
125 {
126 const u8 n_bytes =
127 sizeof (udp_header_t) + sizeof (ip6_header_t);
128 ip_udp_encap_two (vm, b0, b1, (u8 *) & ue0->ue_hdrs,
129 (u8 *) & ue1->ue_hdrs, n_bytes, 0);
130 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
131 {
132 udp6_encap_trace_t *tr =
133 vlib_add_trace (vm, node, b0, sizeof (*tr));
134 tr->udp = ue0->ue_hdrs.ip6.ue_udp;
135 tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
136 }
137 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
138 {
139 udp6_encap_trace_t *tr =
140 vlib_add_trace (vm, node, b1, sizeof (*tr));
141 tr->udp = ue1->ue_hdrs.ip6.ue_udp;
142 tr->ip = ue1->ue_hdrs.ip6.ue_ip6;
143 }
144 }
145 else
146 {
147 const u8 n_bytes =
148 sizeof (udp_header_t) + sizeof (ip4_header_t);
149
150 ip_udp_encap_two (vm, b0, b1,
151 (u8 *) & ue0->ue_hdrs,
152 (u8 *) & ue1->ue_hdrs, n_bytes, 1);
153
154 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
155 {
156 udp4_encap_trace_t *tr =
157 vlib_add_trace (vm, node, b0, sizeof (*tr));
158 tr->udp = ue0->ue_hdrs.ip4.ue_udp;
159 tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
160 }
161 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
162 {
163 udp4_encap_trace_t *tr =
164 vlib_add_trace (vm, node, b1, sizeof (*tr));
165 tr->udp = ue1->ue_hdrs.ip4.ue_udp;
166 tr->ip = ue1->ue_hdrs.ip4.ue_ip4;
167 }
168 }
169
170 next0 = ue0->ue_dpo.dpoi_next_node;
171 next1 = ue1->ue_dpo.dpoi_next_node;
172 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;
173 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = ue1->ue_dpo.dpoi_index;
174
175 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
176 to_next, n_left_to_next,
177 bi0, bi1, next0, next1);
178 }
179
180 while (n_left_from > 0 && n_left_to_next > 0)
181 {
182 u32 bi0, next0, uei0;
183 vlib_buffer_t *b0;
184 udp_encap_t *ue0;
185
186 bi0 = to_next[0] = from[0];
187
188 from += 1;
189 n_left_from -= 1;
190 to_next += 1;
191 n_left_to_next -= 1;
192
193 b0 = vlib_get_buffer (vm, bi0);
194
195 uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
196
197 /* Rewrite packet header and updates lengths. */
198 ue0 = udp_encap_get (uei0);
199
Neale Ranns43b1f442018-03-20 01:47:35 -0700200 vlib_increment_combined_counter (cm, thread_index, uei0, 1,
201 vlib_buffer_length_in_chain (vm,
202 b0));
203
Neale Ranns810086d2017-11-05 16:26:46 -0800204 /* Paint */
205 if (is_encap_v6)
206 {
207 const u8 n_bytes =
208 sizeof (udp_header_t) + sizeof (ip6_header_t);
209 ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip6, n_bytes,
210 0);
211
212 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
213 {
214 udp6_encap_trace_t *tr =
215 vlib_add_trace (vm, node, b0, sizeof (*tr));
216 tr->udp = ue0->ue_hdrs.ip6.ue_udp;
217 tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
218 }
219 }
220 else
221 {
222 const u8 n_bytes =
223 sizeof (udp_header_t) + sizeof (ip4_header_t);
224
225 ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip4, n_bytes,
226 1);
227
228 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
229 {
230 udp4_encap_trace_t *tr =
231 vlib_add_trace (vm, node, b0, sizeof (*tr));
232 tr->udp = ue0->ue_hdrs.ip4.ue_udp;
233 tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
234 }
235 }
236
237 next0 = ue0->ue_dpo.dpoi_next_node;
238 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;
239
240 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
241 to_next, n_left_to_next,
242 bi0, next0);
243 }
244
245 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
246 }
247
248 return frame->n_vectors;
249}
250
Filip Tehlar2c49ffe2019-03-06 07:16:08 -0800251VLIB_NODE_FN (udp4_encap_node) (vlib_main_t * vm,
252 vlib_node_runtime_t * node,
253 vlib_frame_t * frame)
Neale Ranns810086d2017-11-05 16:26:46 -0800254{
255 return udp_encap_inline (vm, node, frame, 0);
256}
257
Filip Tehlar2c49ffe2019-03-06 07:16:08 -0800258VLIB_NODE_FN (udp6_encap_node) (vlib_main_t * vm,
259 vlib_node_runtime_t * node,
260 vlib_frame_t * frame)
Neale Ranns810086d2017-11-05 16:26:46 -0800261{
262 return udp_encap_inline (vm, node, frame, 1);
263}
264
265/* *INDENT-OFF* */
266VLIB_REGISTER_NODE (udp4_encap_node) = {
Neale Ranns810086d2017-11-05 16:26:46 -0800267 .name = "udp4-encap",
268 .vector_size = sizeof (u32),
269
270 .format_trace = format_udp4_encap_trace,
271
272 .n_next_nodes = 0,
273};
Neale Ranns810086d2017-11-05 16:26:46 -0800274
275VLIB_REGISTER_NODE (udp6_encap_node) = {
Neale Ranns810086d2017-11-05 16:26:46 -0800276 .name = "udp6-encap",
277 .vector_size = sizeof (u32),
278
279 .format_trace = format_udp6_encap_trace,
280
281 .n_next_nodes = 0,
282};
Neale Ranns810086d2017-11-05 16:26:46 -0800283/* *INDENT-ON* */
284
285
286/*
287 * fd.io coding-style-patch-verification: ON
288 *
289 * Local Variables:
290 * eval: (c-set-style "gnu")
291 * End:
292 */