blob: eb9bb4a5ebc2c9d79f2baa4bf863b253d784054b [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*---------------------------------------------------------------------------
2 * Copyright (c) 2009-2014 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 *---------------------------------------------------------------------------
15 */
16/*
17 * IPv4 Fragmentation Node
18 *
19 *
20 */
21
22#include "ip_frag.h"
23
24#include <vnet/ip/ip.h>
25
26
Dave Barachd7cb1b52016-12-09 09:52:16 -050027typedef struct
28{
Ed Warnickecb9cada2015-12-08 15:45:58 -070029 u8 ipv6;
30 u16 header_offset;
31 u16 mtu;
32 u8 next;
33 u16 n_fragments;
34} ip_frag_trace_t;
35
Dave Barachd7cb1b52016-12-09 09:52:16 -050036static u8 *
37format_ip_frag_trace (u8 * s, va_list * args)
Ed Warnickecb9cada2015-12-08 15:45:58 -070038{
39 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
40 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Dave Barachd7cb1b52016-12-09 09:52:16 -050041 ip_frag_trace_t *t = va_arg (*args, ip_frag_trace_t *);
42 s = format (s, "IPv%s offset: %u mtu: %u fragments: %u",
43 t->ipv6 ? "6" : "4", t->header_offset, t->mtu, t->n_fragments);
Ed Warnickecb9cada2015-12-08 15:45:58 -070044 return s;
45}
46
47static u32 running_fragment_id;
48
Ole Troan4146c652018-08-08 22:23:19 +020049/*
50 * Limitation: Does follow buffer chains in the packet to fragment,
51 * but does not generate buffer chains. I.e. a fragment is always
52 * contained with in a single buffer and limited to the max buffer
53 * size.
54 */
Vijayabhaskar Katamreddye95d1a12018-06-28 11:08:29 -070055void
Ole Troan4146c652018-08-08 22:23:19 +020056ip4_frag_do_fragment (vlib_main_t * vm, u32 from_bi, u32 ** buffer,
Dave Barachd7cb1b52016-12-09 09:52:16 -050057 ip_frag_error_t * error)
Ed Warnickecb9cada2015-12-08 15:45:58 -070058{
Ole Troan4146c652018-08-08 22:23:19 +020059 vlib_buffer_t *from_b;
Ed Warnickecb9cada2015-12-08 15:45:58 -070060 ip4_header_t *ip4;
Ole Troan4146c652018-08-08 22:23:19 +020061 u16 mtu, len, max, rem, offset, ip_frag_id, ip_frag_offset;
62 u8 *org_from_packet, more;
Ed Warnickecb9cada2015-12-08 15:45:58 -070063
Ole Troan4146c652018-08-08 22:23:19 +020064 from_b = vlib_get_buffer (vm, from_bi);
65 offset = vnet_buffer (from_b)->ip_frag.header_offset;
66 mtu = vnet_buffer (from_b)->ip_frag.mtu;
67 org_from_packet = vlib_buffer_get_current (from_b);
68 ip4 = (ip4_header_t *) vlib_buffer_get_current (from_b) + offset;
Ed Warnickecb9cada2015-12-08 15:45:58 -070069
Ole Troan4146c652018-08-08 22:23:19 +020070 rem = clib_net_to_host_u16 (ip4->length) - sizeof (ip4_header_t);
71 max =
72 (mtu - sizeof (ip4_header_t) -
73 vnet_buffer (from_b)->ip_frag.header_offset) & ~0x7;
Ed Warnickecb9cada2015-12-08 15:45:58 -070074
Ole Troan4146c652018-08-08 22:23:19 +020075 if (rem >
76 (vlib_buffer_length_in_chain (vm, from_b) - offset -
77 sizeof (ip4_header_t)))
Dave Barachd7cb1b52016-12-09 09:52:16 -050078 {
79 *error = IP_FRAG_ERROR_MALFORMED;
80 return;
81 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070082
Ole Troan4146c652018-08-08 22:23:19 +020083 if (mtu < sizeof (ip4_header_t))
Dave Barachd7cb1b52016-12-09 09:52:16 -050084 {
85 *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
86 return;
87 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070088
89 if (ip4->flags_and_fragment_offset &
Dave Barachd7cb1b52016-12-09 09:52:16 -050090 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT))
91 {
92 *error = IP_FRAG_ERROR_DONT_FRAGMENT_SET;
93 return;
94 }
Ed Warnickecb9cada2015-12-08 15:45:58 -070095
Dave Barachd7cb1b52016-12-09 09:52:16 -050096 if (ip4_is_fragment (ip4))
97 {
98 ip_frag_id = ip4->fragment_id;
99 ip_frag_offset = ip4_get_fragment_offset (ip4);
100 more =
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800101 !(!(ip4->flags_and_fragment_offset &
102 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)));
Dave Barachd7cb1b52016-12-09 09:52:16 -0500103 }
104 else
105 {
106 ip_frag_id = (++running_fragment_id);
107 ip_frag_offset = 0;
108 more = 0;
109 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700110
Ole Troan4146c652018-08-08 22:23:19 +0200111 u8 *from_data = (void *) (ip4 + 1);
112 vlib_buffer_t *org_from_b = from_b;
113 u16 ptr = 0, fo = 0;
114 u16 left_in_from_buffer =
115 from_b->current_length - offset - sizeof (ip4_header_t);
116
117 /* Do the actual fragmentation */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500118 while (rem)
119 {
Ole Troan4146c652018-08-08 22:23:19 +0200120 u32 to_bi;
121 vlib_buffer_t *to_b;
122 ip4_header_t *to_ip4;
123 u8 *to_data;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124
Ole Troan4146c652018-08-08 22:23:19 +0200125 len = (rem > (mtu - sizeof (ip4_header_t) - offset) ? max : rem);
126 if (len != rem) /* Last fragment does not need to divisible by 8 */
127 len &= ~0x7;
128 if (!vlib_buffer_alloc (vm, &to_bi, 1))
Dave Barachd7cb1b52016-12-09 09:52:16 -0500129 {
Ole Troan4146c652018-08-08 22:23:19 +0200130 *error = IP_FRAG_ERROR_MEMORY;
Ole Troan4146c652018-08-08 22:23:19 +0200131 return;
132 }
133 vec_add1 (*buffer, to_bi);
134 to_b = vlib_get_buffer (vm, to_bi);
135 vnet_buffer (to_b)->sw_if_index[VLIB_RX] =
136 vnet_buffer (org_from_b)->sw_if_index[VLIB_RX];
137 vnet_buffer (to_b)->sw_if_index[VLIB_TX] =
138 vnet_buffer (org_from_b)->sw_if_index[VLIB_TX];
139 /* Copy adj_index in case DPO based node is sending for the
140 * fragmentation, the packet would be sent back to the proper
141 * DPO next node and Index
142 */
143 vnet_buffer (to_b)->ip.adj_index[VLIB_RX] =
144 vnet_buffer (org_from_b)->ip.adj_index[VLIB_RX];
145 vnet_buffer (to_b)->ip.adj_index[VLIB_TX] =
146 vnet_buffer (org_from_b)->ip.adj_index[VLIB_TX];
147
148 /* Copy offset and ip4 header */
149 clib_memcpy (to_b->data, org_from_packet,
150 offset + sizeof (ip4_header_t));
151 to_ip4 = vlib_buffer_get_current (to_b) + offset;
152 to_data = (void *) (to_ip4 + 1);
153
Ole Troan7eb9d962018-08-10 14:39:48 +0200154 /* Spin through from buffers filling up the to buffer */
155 u16 to_ptr = 0;
156 u16 bytes_to_copy, left_in_to_buffer = len;
157 while (1)
Ole Troan4146c652018-08-08 22:23:19 +0200158 {
Ole Troan7eb9d962018-08-10 14:39:48 +0200159 /* Figure out how many bytes we can safely copy */
160 bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
161 left_in_to_buffer : left_in_from_buffer;
162 clib_memcpy (to_data + to_ptr, from_data + ptr, bytes_to_copy);
163 left_in_to_buffer -= bytes_to_copy;
164 ptr += bytes_to_copy;
165 left_in_from_buffer -= bytes_to_copy;
166 if (left_in_to_buffer == 0)
167 break;
Ole Troan4146c652018-08-08 22:23:19 +0200168
Ole Troan7eb9d962018-08-10 14:39:48 +0200169 ASSERT (left_in_from_buffer == 0);
Ole Troan4146c652018-08-08 22:23:19 +0200170 /* Move buffer */
171 if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
172 {
173 *error = IP_FRAG_ERROR_MALFORMED;
174 return;
175 }
176 from_b = vlib_get_buffer (vm, from_b->next_buffer);
177 from_data = (u8 *) vlib_buffer_get_current (from_b);
Ole Troan7eb9d962018-08-10 14:39:48 +0200178 ptr = 0;
179 left_in_from_buffer = from_b->current_length;
180 to_ptr += bytes_to_copy;
Dave Barachd7cb1b52016-12-09 09:52:16 -0500181 }
Ole Troan7eb9d962018-08-10 14:39:48 +0200182
Ole Troan4146c652018-08-08 22:23:19 +0200183 to_b->current_length = offset + len + sizeof (ip4_header_t);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500184
Ole Troan4146c652018-08-08 22:23:19 +0200185 to_ip4->fragment_id = ip_frag_id;
186 to_ip4->flags_and_fragment_offset =
187 clib_host_to_net_u16 ((fo >> 3) + ip_frag_offset);
188 to_ip4->flags_and_fragment_offset |=
Dave Barachd7cb1b52016-12-09 09:52:16 -0500189 clib_host_to_net_u16 (((len != rem) || more) << 13);
Ole Troan4146c652018-08-08 22:23:19 +0200190 to_ip4->length = clib_host_to_net_u16 (len + sizeof (ip4_header_t));
191 to_ip4->checksum = ip4_header_checksum (to_ip4);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500192
Ole Troan4146c652018-08-08 22:23:19 +0200193 if (vnet_buffer (org_from_b)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
Dave Barachd7cb1b52016-12-09 09:52:16 -0500194 {
Ole Troan4146c652018-08-08 22:23:19 +0200195 /* Encapsulating ipv4 header */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500196 ip4_header_t *encap_header4 =
Ole Troan4146c652018-08-08 22:23:19 +0200197 (ip4_header_t *) vlib_buffer_get_current (to_b);
198 encap_header4->length = clib_host_to_net_u16 (to_b->current_length);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500199 encap_header4->checksum = ip4_header_checksum (encap_header4);
200 }
Ole Troan4146c652018-08-08 22:23:19 +0200201 else if (vnet_buffer (org_from_b)->
202 ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
Dave Barachd7cb1b52016-12-09 09:52:16 -0500203 {
Ole Troan4146c652018-08-08 22:23:19 +0200204 /* Encapsulating ipv6 header */
Dave Barachd7cb1b52016-12-09 09:52:16 -0500205 ip6_header_t *encap_header6 =
Ole Troan4146c652018-08-08 22:23:19 +0200206 (ip6_header_t *) vlib_buffer_get_current (to_b);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500207 encap_header6->payload_length =
Ole Troan4146c652018-08-08 22:23:19 +0200208 clib_host_to_net_u16 (to_b->current_length -
Dave Barachd7cb1b52016-12-09 09:52:16 -0500209 sizeof (*encap_header6));
210 }
Dave Barachd7cb1b52016-12-09 09:52:16 -0500211 rem -= len;
Ole Troan4146c652018-08-08 22:23:19 +0200212 fo += len;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700213 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700214}
215
Ole Troan9fb87552016-01-13 22:30:43 +0100216void
Dave Barachd7cb1b52016-12-09 09:52:16 -0500217ip_frag_set_vnet_buffer (vlib_buffer_t * b, u16 offset, u16 mtu,
218 u8 next_index, u8 flags)
Ole Troan9fb87552016-01-13 22:30:43 +0100219{
Dave Barachd7cb1b52016-12-09 09:52:16 -0500220 vnet_buffer (b)->ip_frag.header_offset = offset;
221 vnet_buffer (b)->ip_frag.mtu = mtu;
222 vnet_buffer (b)->ip_frag.next_index = next_index;
223 vnet_buffer (b)->ip_frag.flags = flags;
Ole Troan9fb87552016-01-13 22:30:43 +0100224}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700225
226static uword
Dave Barachd7cb1b52016-12-09 09:52:16 -0500227ip4_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700228{
229 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
Dave Barachd7cb1b52016-12-09 09:52:16 -0500230 vlib_node_runtime_t *error_node =
231 vlib_node_get_runtime (vm, ip4_frag_node.index);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700232 from = vlib_frame_vector_args (frame);
233 n_left_from = frame->n_vectors;
234 next_index = node->cached_next_index;
235 u32 frag_sent = 0, small_packets = 0;
236 u32 *buffer = 0;
237
Dave Barachd7cb1b52016-12-09 09:52:16 -0500238 while (n_left_from > 0)
239 {
240 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700241
Dave Barachd7cb1b52016-12-09 09:52:16 -0500242 while (n_left_from > 0 && n_left_to_next > 0)
243 {
244 u32 pi0, *frag_from, frag_left;
245 vlib_buffer_t *p0;
246 ip_frag_error_t error0;
247 ip4_frag_next_t next0;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700248
Dave Barachd7cb1b52016-12-09 09:52:16 -0500249 //Note: The packet is not enqueued now.
250 //It is instead put in a vector where other fragments
251 //will be put as well.
252 pi0 = from[0];
253 from += 1;
254 n_left_from -= 1;
255 error0 = IP_FRAG_ERROR_NONE;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700256
Dave Barachd7cb1b52016-12-09 09:52:16 -0500257 p0 = vlib_get_buffer (vm, pi0);
258 ip4_frag_do_fragment (vm, pi0, &buffer, &error0);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700259
Dave Barachd7cb1b52016-12-09 09:52:16 -0500260 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
261 {
262 ip_frag_trace_t *tr =
263 vlib_add_trace (vm, node, p0, sizeof (*tr));
264 tr->header_offset = vnet_buffer (p0)->ip_frag.header_offset;
265 tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
266 tr->ipv6 = 0;
267 tr->n_fragments = vec_len (buffer);
268 tr->next = vnet_buffer (p0)->ip_frag.next_index;
269 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700270
Dave Barachd7cb1b52016-12-09 09:52:16 -0500271 if (error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
272 {
273 icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
274 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
275 vnet_buffer (p0)->ip_frag.mtu);
276 vlib_buffer_advance (p0,
277 vnet_buffer (p0)->ip_frag.header_offset);
278 next0 = IP4_FRAG_NEXT_ICMP_ERROR;
279 }
280 else
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800281 {
282 /* *INDENT-OFF* */
283 next0 = (error0 == IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
284 ip_frag.next_index : IP4_FRAG_NEXT_DROP;
285 /* *INDENT-ON* */
286 }
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287
Dave Barachd7cb1b52016-12-09 09:52:16 -0500288 if (error0 == IP_FRAG_ERROR_NONE)
289 {
Ole Troan7eb9d962018-08-10 14:39:48 +0200290 /* Free original buffer chain */
291 vlib_buffer_free_one (vm, pi0);
Dave Barachd7cb1b52016-12-09 09:52:16 -0500292 frag_sent += vec_len (buffer);
293 small_packets += (vec_len (buffer) == 1);
294 }
295 else
296 vlib_error_count (vm, ip4_frag_node.index, error0, 1);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700297
Dave Barachd7cb1b52016-12-09 09:52:16 -0500298 //Send fragments that were added in the frame
299 frag_from = buffer;
300 frag_left = vec_len (buffer);
301
302 while (frag_left > 0)
303 {
304 while (frag_left > 0 && n_left_to_next > 0)
305 {
306 u32 i;
307 i = to_next[0] = frag_from[0];
308 frag_from += 1;
309 frag_left -= 1;
310 to_next += 1;
311 n_left_to_next -= 1;
312
313 vlib_get_buffer (vm, i)->error = error_node->errors[error0];
314 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
315 to_next, n_left_to_next, i,
316 next0);
317 }
318 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
319 vlib_get_next_frame (vm, node, next_index, to_next,
320 n_left_to_next);
321 }
322 vec_reset_length (buffer);
323 }
324 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700325 }
Dave Barachd7cb1b52016-12-09 09:52:16 -0500326 vec_free (buffer);
327
328 vlib_node_increment_counter (vm, ip4_frag_node.index,
329 IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
330 vlib_node_increment_counter (vm, ip4_frag_node.index,
331 IP_FRAG_ERROR_SMALL_PACKET, small_packets);
Ed Warnickecb9cada2015-12-08 15:45:58 -0700332
333 return frame->n_vectors;
334}
335
Dave Barachd7cb1b52016-12-09 09:52:16 -0500336
Vijayabhaskar Katamreddye95d1a12018-06-28 11:08:29 -0700337void
Dave Barachd7cb1b52016-12-09 09:52:16 -0500338ip6_frag_do_fragment (vlib_main_t * vm, u32 pi, u32 ** buffer,
339 ip_frag_error_t * error)
340{
341 vlib_buffer_t *p;
342 ip6_header_t *ip6_hdr;
343 ip6_frag_hdr_t *frag_hdr;
344 u8 *payload, *next_header;
345
346 p = vlib_get_buffer (vm, pi);
347
348 //Parsing the IPv6 headers
349 ip6_hdr =
350 vlib_buffer_get_current (p) + vnet_buffer (p)->ip_frag.header_offset;
351 payload = (u8 *) (ip6_hdr + 1);
352 next_header = &ip6_hdr->protocol;
353 if (*next_header == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
354 {
355 next_header = payload;
356 payload += payload[1] * 8;
357 }
358
359 if (*next_header == IP_PROTOCOL_IP6_DESTINATION_OPTIONS)
360 {
361 next_header = payload;
362 payload += payload[1] * 8;
363 }
364
365 if (*next_header == IP_PROTOCOL_IPV6_ROUTE)
366 {
367 next_header = payload;
368 payload += payload[1] * 8;
369 }
370
371 if (PREDICT_FALSE
372 (payload >= (u8 *) vlib_buffer_get_current (p) + p->current_length))
373 {
374 //A malicious packet could set an extension header with a too big size
375 //and make us modify another vlib_buffer
376 *error = IP_FRAG_ERROR_MALFORMED;
377 return;
378 }
379
Ole Troan313f7e22018-04-10 16:02:51 +0200380 if (p->flags & VLIB_BUFFER_NEXT_PRESENT)
381 {
382 *error = IP_FRAG_ERROR_MALFORMED;
383 return;
384 }
385
Dave Barachd7cb1b52016-12-09 09:52:16 -0500386 u8 has_more;
387 u16 initial_offset;
388 if (*next_header == IP_PROTOCOL_IPV6_FRAGMENTATION)
389 {
390 //The fragmentation header is already there
391 frag_hdr = (ip6_frag_hdr_t *) payload;
392 has_more = ip6_frag_hdr_more (frag_hdr);
393 initial_offset = ip6_frag_hdr_offset (frag_hdr);
394 }
395 else
396 {
397 //Insert a fragmentation header in the packet
398 u8 nh = *next_header;
399 *next_header = IP_PROTOCOL_IPV6_FRAGMENTATION;
400 vlib_buffer_advance (p, -sizeof (*frag_hdr));
401 u8 *start = vlib_buffer_get_current (p);
402 memmove (start, start + sizeof (*frag_hdr),
403 payload - (start + sizeof (*frag_hdr)));
404 frag_hdr = (ip6_frag_hdr_t *) (payload - sizeof (*frag_hdr));
405 frag_hdr->identification = ++running_fragment_id;
406 frag_hdr->next_hdr = nh;
407 frag_hdr->rsv = 0;
408 has_more = 0;
409 initial_offset = 0;
410 }
411 payload = (u8 *) (frag_hdr + 1);
412
413 u16 headers_len = payload - (u8 *) vlib_buffer_get_current (p);
414 u16 max_payload = vnet_buffer (p)->ip_frag.mtu - headers_len;
415 u16 rem = p->current_length - headers_len;
416 u16 ptr = 0;
417
418 if (max_payload < 8)
419 {
420 *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
421 return;
422 }
423
424 while (rem)
425 {
426 u32 bi;
427 vlib_buffer_t *b;
428 u16 len = (rem > max_payload) ? (max_payload & ~0x7) : rem;
429 rem -= len;
430
431 if (ptr != 0)
432 {
433 if (!vlib_buffer_alloc (vm, &bi, 1))
434 {
435 *error = IP_FRAG_ERROR_MEMORY;
436 return;
437 }
438 b = vlib_get_buffer (vm, bi);
439 vnet_buffer (b)->sw_if_index[VLIB_RX] =
440 vnet_buffer (p)->sw_if_index[VLIB_RX];
441 vnet_buffer (b)->sw_if_index[VLIB_TX] =
442 vnet_buffer (p)->sw_if_index[VLIB_TX];
Vijayabhaskar Katamreddyc592ca52018-01-25 15:12:11 -0800443
444 /* Copy Adj_index in case DPO based node is sending for the fragmentation,
445 the packet would be sent back to the proper DPO next node and Index */
446 vnet_buffer (b)->ip.adj_index[VLIB_RX] =
447 vnet_buffer (p)->ip.adj_index[VLIB_RX];
448 vnet_buffer (b)->ip.adj_index[VLIB_TX] =
449 vnet_buffer (p)->ip.adj_index[VLIB_TX];
450
Dave Barachd7cb1b52016-12-09 09:52:16 -0500451 clib_memcpy (vlib_buffer_get_current (b),
452 vlib_buffer_get_current (p), headers_len);
453 clib_memcpy (vlib_buffer_get_current (b) + headers_len,
454 payload + ptr, len);
455 frag_hdr =
456 vlib_buffer_get_current (b) + headers_len - sizeof (*frag_hdr);
457 }
458 else
459 {
460 bi = pi;
461 b = vlib_get_buffer (vm, bi);
462 //frag_hdr already set here
463 }
464
465 ip6_hdr =
466 vlib_buffer_get_current (b) + vnet_buffer (p)->ip_frag.header_offset;
467 frag_hdr->fragment_offset_and_more =
468 ip6_frag_hdr_offset_and_more (initial_offset + (ptr >> 3),
469 (rem || has_more));
470 b->current_length = headers_len + len;
471 ip6_hdr->payload_length =
472 clib_host_to_net_u16 (b->current_length -
473 vnet_buffer (p)->ip_frag.header_offset -
474 sizeof (*ip6_hdr));
475
476 if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
477 {
478 //Encapsulating ipv4 header
479 ip4_header_t *encap_header4 =
480 (ip4_header_t *) vlib_buffer_get_current (b);
481 encap_header4->length = clib_host_to_net_u16 (b->current_length);
482 encap_header4->checksum = ip4_header_checksum (encap_header4);
483 }
484 else if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
485 {
486 //Encapsulating ipv6 header
487 ip6_header_t *encap_header6 =
488 (ip6_header_t *) vlib_buffer_get_current (b);
489 encap_header6->payload_length =
490 clib_host_to_net_u16 (b->current_length -
491 sizeof (*encap_header6));
492 }
493
494 vec_add1 (*buffer, bi);
495
496 ptr += len;
497 }
498}
499
500static uword
501ip6_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
502{
503 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
504 vlib_node_runtime_t *error_node =
505 vlib_node_get_runtime (vm, ip6_frag_node.index);
506 from = vlib_frame_vector_args (frame);
507 n_left_from = frame->n_vectors;
508 next_index = node->cached_next_index;
509 u32 frag_sent = 0, small_packets = 0;
510 u32 *buffer = 0;
511
512 while (n_left_from > 0)
513 {
514 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
515
516 while (n_left_from > 0 && n_left_to_next > 0)
517 {
518 u32 pi0, *frag_from, frag_left;
519 vlib_buffer_t *p0;
520 ip_frag_error_t error0;
521 ip6_frag_next_t next0;
522
523 pi0 = from[0];
524 from += 1;
525 n_left_from -= 1;
526 error0 = IP_FRAG_ERROR_NONE;
527
528 p0 = vlib_get_buffer (vm, pi0);
529 ip6_frag_do_fragment (vm, pi0, &buffer, &error0);
530
531 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
532 {
533 ip_frag_trace_t *tr =
534 vlib_add_trace (vm, node, p0, sizeof (*tr));
535 tr->header_offset = vnet_buffer (p0)->ip_frag.header_offset;
536 tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
537 tr->ipv6 = 1;
538 tr->n_fragments = vec_len (buffer);
539 tr->next = vnet_buffer (p0)->ip_frag.next_index;
540 }
541
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800542 /* *INDENT-OFF* */
543 next0 = (error0 == IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
Dave Barachd7cb1b52016-12-09 09:52:16 -0500544 ip_frag.next_index : IP6_FRAG_NEXT_DROP;
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800545 /* *INDENT-ON* */
546
Dave Barachd7cb1b52016-12-09 09:52:16 -0500547 frag_sent += vec_len (buffer);
548 small_packets += (vec_len (buffer) == 1);
549
550 //Send fragments that were added in the frame
551 frag_from = buffer;
552 frag_left = vec_len (buffer);
553 while (frag_left > 0)
554 {
555 while (frag_left > 0 && n_left_to_next > 0)
556 {
557 u32 i;
558 i = to_next[0] = frag_from[0];
559 frag_from += 1;
560 frag_left -= 1;
561 to_next += 1;
562 n_left_to_next -= 1;
563
564 vlib_get_buffer (vm, i)->error = error_node->errors[error0];
565 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
566 to_next, n_left_to_next, i,
567 next0);
568 }
569 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
570 vlib_get_next_frame (vm, node, next_index, to_next,
571 n_left_to_next);
572 }
573 vec_reset_length (buffer);
574 }
575 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
576 }
577 vec_free (buffer);
578 vlib_node_increment_counter (vm, ip6_frag_node.index,
579 IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
580 vlib_node_increment_counter (vm, ip6_frag_node.index,
581 IP_FRAG_ERROR_SMALL_PACKET, small_packets);
582
583 return frame->n_vectors;
584}
585
586static char *ip4_frag_error_strings[] = {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700587#define _(sym,string) string,
588 foreach_ip_frag_error
589#undef _
590};
591
Dave Barachd7cb1b52016-12-09 09:52:16 -0500592/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700593VLIB_REGISTER_NODE (ip4_frag_node) = {
594 .function = ip4_frag,
595 .name = IP4_FRAG_NODE_NAME,
596 .vector_size = sizeof (u32),
597 .format_trace = format_ip_frag_trace,
598 .type = VLIB_NODE_TYPE_INTERNAL,
599
600 .n_errors = IP_FRAG_N_ERROR,
601 .error_strings = ip4_frag_error_strings,
602
603 .n_next_nodes = IP4_FRAG_N_NEXT,
604 .next_nodes = {
Ole Troanb3655e52018-08-16 22:08:49 +0200605 [IP4_FRAG_NEXT_IP4_REWRITE] = "ip4-rewrite",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700606 [IP4_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
607 [IP4_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
Ole Troan9fb87552016-01-13 22:30:43 +0100608 [IP4_FRAG_NEXT_ICMP_ERROR] = "ip4-icmp-error",
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800609 [IP4_FRAG_NEXT_DROP] = "ip4-drop"
Ed Warnickecb9cada2015-12-08 15:45:58 -0700610 },
611};
Dave Barachd7cb1b52016-12-09 09:52:16 -0500612/* *INDENT-ON* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700613
Dave Barachd7cb1b52016-12-09 09:52:16 -0500614/* *INDENT-OFF* */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700615VLIB_REGISTER_NODE (ip6_frag_node) = {
616 .function = ip6_frag,
617 .name = IP6_FRAG_NODE_NAME,
618 .vector_size = sizeof (u32),
619 .format_trace = format_ip_frag_trace,
620 .type = VLIB_NODE_TYPE_INTERNAL,
621
622 .n_errors = IP_FRAG_N_ERROR,
623 .error_strings = ip4_frag_error_strings,
624
625 .n_next_nodes = IP6_FRAG_N_NEXT,
626 .next_nodes = {
Ole Troanb3655e52018-08-16 22:08:49 +0200627 [IP6_FRAG_NEXT_IP6_REWRITE] = "ip6-rewrite",
Ed Warnickecb9cada2015-12-08 15:45:58 -0700628 [IP6_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
629 [IP6_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
Vijayabhaskar Katamreddyce074122017-11-15 13:50:26 -0800630 [IP6_FRAG_NEXT_DROP] = "ip6-drop"
Ed Warnickecb9cada2015-12-08 15:45:58 -0700631 },
632};
Dave Barachd7cb1b52016-12-09 09:52:16 -0500633/* *INDENT-ON* */
634
635/*
636 * fd.io coding-style-patch-verification: ON
637 *
638 * Local Variables:
639 * eval: (c-set-style "gnu")
640 * End:
641 */