blob: 1207ec54b492db069a44b0baa058489970eed525 [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*---------------------------------------------------------------------------
2 * Copyright (c) 2009-2014 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 *---------------------------------------------------------------------------
15 */
16/*
17 * IPv4 Fragmentation Node
18 *
19 *
20 */
21
22#include "ip_frag.h"
23
24#include <vnet/ip/ip.h>
25
26
Dave Barachd7cb1b52016-12-09 09:52:16 -050027typedef struct
28{
Ed Warnickecb9cada2015-12-08 15:45:58 -070029 u8 ipv6;
30 u16 header_offset;
31 u16 mtu;
32 u8 next;
33 u16 n_fragments;
34} ip_frag_trace_t;
35
Dave Barachd7cb1b52016-12-09 09:52:16 -050036static u8 *
37format_ip_frag_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070038{
39 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
40 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Dave Barachd7cb1b52016-12-09 09:52:16 -050041 ip_frag_trace_t *t = va_arg (*args, ip_frag_trace_t *);
42 s = format (s, "IPv%s offset: %u mtu: %u fragments: %u",
43 t->ipv6 ? "6" : "4", t->header_offset, t->mtu, t->n_fragments);
Ed Warnickecb9cada2015-12-08 15:45:58 -070044 return s;
45}
46
47static u32 running_fragment_id;
48
49static void
Dave Barachd7cb1b52016-12-09 09:52:16 -050050ip4_frag_do_fragment (vlib_main_t * vm, u32 pi, u32 ** buffer,
51 ip_frag_error_t * error)
Ed Warnickecb9cada2015-12-08 15:45:58 -070052{
53 vlib_buffer_t *p;
54 ip4_header_t *ip4;
Dave Barachd7cb1b52016-12-09 09:52:16 -050055 u16 mtu, ptr, len, max, rem, offset, ip_frag_id, ip_frag_offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -070056 u8 *packet, more;
57
Dave Barachd7cb1b52016-12-09 09:52:16 -050058 vec_add1 (*buffer, pi);
59 p = vlib_get_buffer (vm, pi);
60 offset = vnet_buffer (p)->ip_frag.header_offset;
61 mtu = vnet_buffer (p)->ip_frag.mtu;
62 packet = (u8 *) vlib_buffer_get_current (p);
63 ip4 = (ip4_header_t *) (packet + offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -070064
Dave Barachd7cb1b52016-12-09 09:52:16 -050065 rem = clib_net_to_host_u16 (ip4->length) - sizeof (*ip4);
Ed Warnickecb9cada2015-12-08 15:45:58 -070066 ptr = 0;
Dave Barachd7cb1b52016-12-09 09:52:16 -050067 max = (mtu - sizeof (*ip4) - vnet_buffer (p)->ip_frag.header_offset) & ~0x7;
Ed Warnickecb9cada2015-12-08 15:45:58 -070068
Dave Barachd7cb1b52016-12-09 09:52:16 -050069 if (rem < (p->current_length - offset - sizeof (*ip4)))
70 {
71 *error = IP_FRAG_ERROR_MALFORMED;
72 return;
73 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070074
Dave Barachd7cb1b52016-12-09 09:52:16 -050075 if (mtu < sizeof (*ip4))
76 {
77 *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
78 return;
79 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070080
81 if (ip4->flags_and_fragment_offset &
Dave Barachd7cb1b52016-12-09 09:52:16 -050082 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT))
83 {
84 *error = IP_FRAG_ERROR_DONT_FRAGMENT_SET;
85 return;
86 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070087
Ole Troan313f7e22018-04-10 16:02:51 +020088 if (p->flags & VLIB_BUFFER_NEXT_PRESENT)
89 {
90 *error = IP_FRAG_ERROR_MALFORMED;
91 return;
92 }
93
Dave Barachd7cb1b52016-12-09 09:52:16 -050094 if (ip4_is_fragment (ip4))
95 {
96 ip_frag_id = ip4->fragment_id;
97 ip_frag_offset = ip4_get_fragment_offset (ip4);
98 more =
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -080099 !(!(ip4->flags_and_fragment_offset &
100 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)));
Dave Barachd7cb1b52016-12-09 09:52:16 -0500101 }
102 else
103 {
104 ip_frag_id = (++running_fragment_id);
105 ip_frag_offset = 0;
106 more = 0;
107 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700108
109 //Do the actual fragmentation
Dave Barachd7cb1b52016-12-09 09:52:16 -0500110 while (rem)
111 {
112 u32 bi;
113 vlib_buffer_t *b;
114 ip4_header_t *fip4;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700115
Dave Barachd7cb1b52016-12-09 09:52:16 -0500116 len =
117 (rem >
118 (mtu - sizeof (*ip4) -
119 vnet_buffer (p)->ip_frag.header_offset)) ? max : rem;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700120
Dave Barachd7cb1b52016-12-09 09:52:16 -0500121 if (ptr == 0)
122 {
123 bi = pi;
124 b = p;
125 fip4 = (ip4_header_t *) (vlib_buffer_get_current (b) + offset);
126 }
127 else
128 {
129 if (!vlib_buffer_alloc (vm, &bi, 1))
130 {
131 *error = IP_FRAG_ERROR_MEMORY;
132 return;
133 }
134 vec_add1 (*buffer, bi);
135 b = vlib_get_buffer (vm, bi);
136 vnet_buffer (b)->sw_if_index[VLIB_RX] =
137 vnet_buffer (p)->sw_if_index[VLIB_RX];
138 vnet_buffer (b)->sw_if_index[VLIB_TX] =
139 vnet_buffer (p)->sw_if_index[VLIB_TX];
Vijayabhaskar Katamreddyc592ca52018-01-25 15:12:11 -0800140 /* Copy Adj_index in case DPO based node is sending for the fragmentation,
141 the packet would be sent back to the proper DPO next node and Index */
142 vnet_buffer (b)->ip.adj_index[VLIB_RX] =
143 vnet_buffer (p)->ip.adj_index[VLIB_RX];
144 vnet_buffer (b)->ip.adj_index[VLIB_TX] =
145 vnet_buffer (p)->ip.adj_index[VLIB_TX];
Dave Barachd7cb1b52016-12-09 09:52:16 -0500146 fip4 = (ip4_header_t *) (vlib_buffer_get_current (b) + offset);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700147
Dave Barachd7cb1b52016-12-09 09:52:16 -0500148 //Copy offset and ip4 header
149 clib_memcpy (b->data, packet, offset + sizeof (*ip4));
150 //Copy data
151 clib_memcpy (((u8 *) (fip4)) + sizeof (*fip4),
152 packet + offset + sizeof (*fip4) + ptr, len);
153 }
154 b->current_length = offset + len + sizeof (*fip4);
155
156 fip4->fragment_id = ip_frag_id;
157 fip4->flags_and_fragment_offset =
158 clib_host_to_net_u16 ((ptr >> 3) + ip_frag_offset);
159 fip4->flags_and_fragment_offset |=
160 clib_host_to_net_u16 (((len != rem) || more) << 13);
161 // ((len0 != rem0) || more0) << 13 is optimization for
162 // ((len0 != rem0) || more0) ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0
163 fip4->length = clib_host_to_net_u16 (len + sizeof (*fip4));
164 fip4->checksum = ip4_header_checksum (fip4);
165
166 if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
167 {
168 //Encapsulating ipv4 header
169 ip4_header_t *encap_header4 =
170 (ip4_header_t *) vlib_buffer_get_current (b);
171 encap_header4->length = clib_host_to_net_u16 (b->current_length);
172 encap_header4->checksum = ip4_header_checksum (encap_header4);
173 }
174 else if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
175 {
176 //Encapsulating ipv6 header
177 ip6_header_t *encap_header6 =
178 (ip6_header_t *) vlib_buffer_get_current (b);
179 encap_header6->payload_length =
180 clib_host_to_net_u16 (b->current_length -
181 sizeof (*encap_header6));
182 }
183
184 rem -= len;
185 ptr += len;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700186 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700187}
188
Ole Troan9fb87552016-01-13 22:30:43 +0100189void
Dave Barachd7cb1b52016-12-09 09:52:16 -0500190ip_frag_set_vnet_buffer (vlib_buffer_t * b, u16 offset, u16 mtu,
191 u8 next_index, u8 flags)
Ole Troan9fb87552016-01-13 22:30:43 +0100192{
Dave Barachd7cb1b52016-12-09 09:52:16 -0500193 vnet_buffer (b)->ip_frag.header_offset = offset;
194 vnet_buffer (b)->ip_frag.mtu = mtu;
195 vnet_buffer (b)->ip_frag.next_index = next_index;
196 vnet_buffer (b)->ip_frag.flags = flags;
Ole Troan9fb87552016-01-13 22:30:43 +0100197}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700198
199static uword
Dave Barachd7cb1b52016-12-09 09:52:16 -0500200ip4_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700201{
202 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
Dave Barachd7cb1b52016-12-09 09:52:16 -0500203 vlib_node_runtime_t *error_node =
204 vlib_node_get_runtime (vm, ip4_frag_node.index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700205 from = vlib_frame_vector_args (frame);
206 n_left_from = frame->n_vectors;
207 next_index = node->cached_next_index;
208 u32 frag_sent = 0, small_packets = 0;
209 u32 *buffer = 0;
210
Dave Barachd7cb1b52016-12-09 09:52:16 -0500211 while (n_left_from > 0)
212 {
213 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700214
Dave Barachd7cb1b52016-12-09 09:52:16 -0500215 while (n_left_from > 0 && n_left_to_next > 0)
216 {
217 u32 pi0, *frag_from, frag_left;
218 vlib_buffer_t *p0;
219 ip_frag_error_t error0;
220 ip4_frag_next_t next0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700221
Dave Barachd7cb1b52016-12-09 09:52:16 -0500222 //Note: The packet is not enqueued now.
223 //It is instead put in a vector where other fragments
224 //will be put as well.
225 pi0 = from[0];
226 from += 1;
227 n_left_from -= 1;
228 error0 = IP_FRAG_ERROR_NONE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700229
Dave Barachd7cb1b52016-12-09 09:52:16 -0500230 p0 = vlib_get_buffer (vm, pi0);
231 ip4_frag_do_fragment (vm, pi0, &buffer, &error0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232
Dave Barachd7cb1b52016-12-09 09:52:16 -0500233 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
234 {
235 ip_frag_trace_t *tr =
236 vlib_add_trace (vm, node, p0, sizeof (*tr));
237 tr->header_offset = vnet_buffer (p0)->ip_frag.header_offset;
238 tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
239 tr->ipv6 = 0;
240 tr->n_fragments = vec_len (buffer);
241 tr->next = vnet_buffer (p0)->ip_frag.next_index;
242 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700243
Dave Barachd7cb1b52016-12-09 09:52:16 -0500244 if (error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
245 {
246 icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
247 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
248 vnet_buffer (p0)->ip_frag.mtu);
249 vlib_buffer_advance (p0,
250 vnet_buffer (p0)->ip_frag.header_offset);
251 next0 = IP4_FRAG_NEXT_ICMP_ERROR;
252 }
253 else
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800254 {
255 /* *INDENT-OFF* */
256 next0 = (error0 == IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
257 ip_frag.next_index : IP4_FRAG_NEXT_DROP;
258 /* *INDENT-ON* */
259 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700260
Dave Barachd7cb1b52016-12-09 09:52:16 -0500261 if (error0 == IP_FRAG_ERROR_NONE)
262 {
263 frag_sent += vec_len (buffer);
264 small_packets += (vec_len (buffer) == 1);
265 }
266 else
267 vlib_error_count (vm, ip4_frag_node.index, error0, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700268
Dave Barachd7cb1b52016-12-09 09:52:16 -0500269 //Send fragments that were added in the frame
270 frag_from = buffer;
271 frag_left = vec_len (buffer);
272
273 while (frag_left > 0)
274 {
275 while (frag_left > 0 && n_left_to_next > 0)
276 {
277 u32 i;
278 i = to_next[0] = frag_from[0];
279 frag_from += 1;
280 frag_left -= 1;
281 to_next += 1;
282 n_left_to_next -= 1;
283
284 vlib_get_buffer (vm, i)->error = error_node->errors[error0];
285 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
286 to_next, n_left_to_next, i,
287 next0);
288 }
289 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
290 vlib_get_next_frame (vm, node, next_index, to_next,
291 n_left_to_next);
292 }
293 vec_reset_length (buffer);
294 }
295 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296 }
Dave Barachd7cb1b52016-12-09 09:52:16 -0500297 vec_free (buffer);
298
299 vlib_node_increment_counter (vm, ip4_frag_node.index,
300 IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
301 vlib_node_increment_counter (vm, ip4_frag_node.index,
302 IP_FRAG_ERROR_SMALL_PACKET, small_packets);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700303
304 return frame->n_vectors;
305}
306
Dave Barachd7cb1b52016-12-09 09:52:16 -0500307
308static void
309ip6_frag_do_fragment (vlib_main_t * vm, u32 pi, u32 ** buffer,
310 ip_frag_error_t * error)
311{
312 vlib_buffer_t *p;
313 ip6_header_t *ip6_hdr;
314 ip6_frag_hdr_t *frag_hdr;
315 u8 *payload, *next_header;
316
317 p = vlib_get_buffer (vm, pi);
318
319 //Parsing the IPv6 headers
320 ip6_hdr =
321 vlib_buffer_get_current (p) + vnet_buffer (p)->ip_frag.header_offset;
322 payload = (u8 *) (ip6_hdr + 1);
323 next_header = &ip6_hdr->protocol;
324 if (*next_header == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
325 {
326 next_header = payload;
327 payload += payload[1] * 8;
328 }
329
330 if (*next_header == IP_PROTOCOL_IP6_DESTINATION_OPTIONS)
331 {
332 next_header = payload;
333 payload += payload[1] * 8;
334 }
335
336 if (*next_header == IP_PROTOCOL_IPV6_ROUTE)
337 {
338 next_header = payload;
339 payload += payload[1] * 8;
340 }
341
342 if (PREDICT_FALSE
343 (payload >= (u8 *) vlib_buffer_get_current (p) + p->current_length))
344 {
345 //A malicious packet could set an extension header with a too big size
346 //and make us modify another vlib_buffer
347 *error = IP_FRAG_ERROR_MALFORMED;
348 return;
349 }
350
Ole Troan313f7e22018-04-10 16:02:51 +0200351 if (p->flags & VLIB_BUFFER_NEXT_PRESENT)
352 {
353 *error = IP_FRAG_ERROR_MALFORMED;
354 return;
355 }
356
Dave Barachd7cb1b52016-12-09 09:52:16 -0500357 u8 has_more;
358 u16 initial_offset;
359 if (*next_header == IP_PROTOCOL_IPV6_FRAGMENTATION)
360 {
361 //The fragmentation header is already there
362 frag_hdr = (ip6_frag_hdr_t *) payload;
363 has_more = ip6_frag_hdr_more (frag_hdr);
364 initial_offset = ip6_frag_hdr_offset (frag_hdr);
365 }
366 else
367 {
368 //Insert a fragmentation header in the packet
369 u8 nh = *next_header;
370 *next_header = IP_PROTOCOL_IPV6_FRAGMENTATION;
371 vlib_buffer_advance (p, -sizeof (*frag_hdr));
372 u8 *start = vlib_buffer_get_current (p);
373 memmove (start, start + sizeof (*frag_hdr),
374 payload - (start + sizeof (*frag_hdr)));
375 frag_hdr = (ip6_frag_hdr_t *) (payload - sizeof (*frag_hdr));
376 frag_hdr->identification = ++running_fragment_id;
377 frag_hdr->next_hdr = nh;
378 frag_hdr->rsv = 0;
379 has_more = 0;
380 initial_offset = 0;
381 }
382 payload = (u8 *) (frag_hdr + 1);
383
384 u16 headers_len = payload - (u8 *) vlib_buffer_get_current (p);
385 u16 max_payload = vnet_buffer (p)->ip_frag.mtu - headers_len;
386 u16 rem = p->current_length - headers_len;
387 u16 ptr = 0;
388
389 if (max_payload < 8)
390 {
391 *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
392 return;
393 }
394
395 while (rem)
396 {
397 u32 bi;
398 vlib_buffer_t *b;
399 u16 len = (rem > max_payload) ? (max_payload & ~0x7) : rem;
400 rem -= len;
401
402 if (ptr != 0)
403 {
404 if (!vlib_buffer_alloc (vm, &bi, 1))
405 {
406 *error = IP_FRAG_ERROR_MEMORY;
407 return;
408 }
409 b = vlib_get_buffer (vm, bi);
410 vnet_buffer (b)->sw_if_index[VLIB_RX] =
411 vnet_buffer (p)->sw_if_index[VLIB_RX];
412 vnet_buffer (b)->sw_if_index[VLIB_TX] =
413 vnet_buffer (p)->sw_if_index[VLIB_TX];
Vijayabhaskar Katamreddyc592ca52018-01-25 15:12:11 -0800414
415 /* Copy Adj_index in case DPO based node is sending for the fragmentation,
416 the packet would be sent back to the proper DPO next node and Index */
417 vnet_buffer (b)->ip.adj_index[VLIB_RX] =
418 vnet_buffer (p)->ip.adj_index[VLIB_RX];
419 vnet_buffer (b)->ip.adj_index[VLIB_TX] =
420 vnet_buffer (p)->ip.adj_index[VLIB_TX];
421
Dave Barachd7cb1b52016-12-09 09:52:16 -0500422 clib_memcpy (vlib_buffer_get_current (b),
423 vlib_buffer_get_current (p), headers_len);
424 clib_memcpy (vlib_buffer_get_current (b) + headers_len,
425 payload + ptr, len);
426 frag_hdr =
427 vlib_buffer_get_current (b) + headers_len - sizeof (*frag_hdr);
428 }
429 else
430 {
431 bi = pi;
432 b = vlib_get_buffer (vm, bi);
433 //frag_hdr already set here
434 }
435
436 ip6_hdr =
437 vlib_buffer_get_current (b) + vnet_buffer (p)->ip_frag.header_offset;
438 frag_hdr->fragment_offset_and_more =
439 ip6_frag_hdr_offset_and_more (initial_offset + (ptr >> 3),
440 (rem || has_more));
441 b->current_length = headers_len + len;
442 ip6_hdr->payload_length =
443 clib_host_to_net_u16 (b->current_length -
444 vnet_buffer (p)->ip_frag.header_offset -
445 sizeof (*ip6_hdr));
446
447 if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
448 {
449 //Encapsulating ipv4 header
450 ip4_header_t *encap_header4 =
451 (ip4_header_t *) vlib_buffer_get_current (b);
452 encap_header4->length = clib_host_to_net_u16 (b->current_length);
453 encap_header4->checksum = ip4_header_checksum (encap_header4);
454 }
455 else if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
456 {
457 //Encapsulating ipv6 header
458 ip6_header_t *encap_header6 =
459 (ip6_header_t *) vlib_buffer_get_current (b);
460 encap_header6->payload_length =
461 clib_host_to_net_u16 (b->current_length -
462 sizeof (*encap_header6));
463 }
464
465 vec_add1 (*buffer, bi);
466
467 ptr += len;
468 }
469}
470
471static uword
472ip6_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
473{
474 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
475 vlib_node_runtime_t *error_node =
476 vlib_node_get_runtime (vm, ip6_frag_node.index);
477 from = vlib_frame_vector_args (frame);
478 n_left_from = frame->n_vectors;
479 next_index = node->cached_next_index;
480 u32 frag_sent = 0, small_packets = 0;
481 u32 *buffer = 0;
482
483 while (n_left_from > 0)
484 {
485 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
486
487 while (n_left_from > 0 && n_left_to_next > 0)
488 {
489 u32 pi0, *frag_from, frag_left;
490 vlib_buffer_t *p0;
491 ip_frag_error_t error0;
492 ip6_frag_next_t next0;
493
494 pi0 = from[0];
495 from += 1;
496 n_left_from -= 1;
497 error0 = IP_FRAG_ERROR_NONE;
498
499 p0 = vlib_get_buffer (vm, pi0);
500 ip6_frag_do_fragment (vm, pi0, &buffer, &error0);
501
502 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
503 {
504 ip_frag_trace_t *tr =
505 vlib_add_trace (vm, node, p0, sizeof (*tr));
506 tr->header_offset = vnet_buffer (p0)->ip_frag.header_offset;
507 tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
508 tr->ipv6 = 1;
509 tr->n_fragments = vec_len (buffer);
510 tr->next = vnet_buffer (p0)->ip_frag.next_index;
511 }
512
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800513 /* *INDENT-OFF* */
514 next0 = (error0 == IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
Dave Barachd7cb1b52016-12-09 09:52:16 -0500515 ip_frag.next_index : IP6_FRAG_NEXT_DROP;
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800516 /* *INDENT-ON* */
517
Dave Barachd7cb1b52016-12-09 09:52:16 -0500518 frag_sent += vec_len (buffer);
519 small_packets += (vec_len (buffer) == 1);
520
521 //Send fragments that were added in the frame
522 frag_from = buffer;
523 frag_left = vec_len (buffer);
524 while (frag_left > 0)
525 {
526 while (frag_left > 0 && n_left_to_next > 0)
527 {
528 u32 i;
529 i = to_next[0] = frag_from[0];
530 frag_from += 1;
531 frag_left -= 1;
532 to_next += 1;
533 n_left_to_next -= 1;
534
535 vlib_get_buffer (vm, i)->error = error_node->errors[error0];
536 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
537 to_next, n_left_to_next, i,
538 next0);
539 }
540 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
541 vlib_get_next_frame (vm, node, next_index, to_next,
542 n_left_to_next);
543 }
544 vec_reset_length (buffer);
545 }
546 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
547 }
548 vec_free (buffer);
549 vlib_node_increment_counter (vm, ip6_frag_node.index,
550 IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
551 vlib_node_increment_counter (vm, ip6_frag_node.index,
552 IP_FRAG_ERROR_SMALL_PACKET, small_packets);
553
554 return frame->n_vectors;
555}
556
557static char *ip4_frag_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700558#define _(sym,string) string,
559 foreach_ip_frag_error
560#undef _
561};
562
Dave Barachd7cb1b52016-12-09 09:52:16 -0500563/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700564VLIB_REGISTER_NODE (ip4_frag_node) = {
565 .function = ip4_frag,
566 .name = IP4_FRAG_NODE_NAME,
567 .vector_size = sizeof (u32),
568 .format_trace = format_ip_frag_trace,
569 .type = VLIB_NODE_TYPE_INTERNAL,
570
571 .n_errors = IP_FRAG_N_ERROR,
572 .error_strings = ip4_frag_error_strings,
573
574 .n_next_nodes = IP4_FRAG_N_NEXT,
575 .next_nodes = {
576 [IP4_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
577 [IP4_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
Ole Troan9fb87552016-01-13 22:30:43 +0100578 [IP4_FRAG_NEXT_ICMP_ERROR] = "ip4-icmp-error",
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800579 [IP4_FRAG_NEXT_DROP] = "ip4-drop"
Ed Warnickecb9cada2015-12-08 15:45:58 -0700580 },
581};
Dave Barachd7cb1b52016-12-09 09:52:16 -0500582/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700583
Dave Barachd7cb1b52016-12-09 09:52:16 -0500584/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700585VLIB_REGISTER_NODE (ip6_frag_node) = {
586 .function = ip6_frag,
587 .name = IP6_FRAG_NODE_NAME,
588 .vector_size = sizeof (u32),
589 .format_trace = format_ip_frag_trace,
590 .type = VLIB_NODE_TYPE_INTERNAL,
591
592 .n_errors = IP_FRAG_N_ERROR,
593 .error_strings = ip4_frag_error_strings,
594
595 .n_next_nodes = IP6_FRAG_N_NEXT,
596 .next_nodes = {
597 [IP6_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
598 [IP6_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800599 [IP6_FRAG_NEXT_DROP] = "ip6-drop"
Ed Warnickecb9cada2015-12-08 15:45:58 -0700600 },
601};
Dave Barachd7cb1b52016-12-09 09:52:16 -0500602/* *INDENT-ON* */
603
604/*
605 * fd.io coding-style-patch-verification: ON
606 *
607 * Local Variables:
608 * eval: (c-set-style "gnu")
609 * End:
610 */