blob: 69d418e9d516a44bc4a63c403104f9d260946643 [file] [log] [blame]
Klement Sekera75e7d132017-09-20 08:26:30 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @file
Klement Sekera896c8962019-06-24 11:52:49 +000018 * @brief IPv4 Full Reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020019 *
Klement Sekera896c8962019-06-24 11:52:49 +000020 * This file contains the source code for IPv4 full reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020021 */
22
23#include <vppinfra/vec.h>
24#include <vnet/vnet.h>
25#include <vnet/ip/ip.h>
Klement Sekera896c8962019-06-24 11:52:49 +000026#include <vppinfra/fifo.h>
Klement Sekera8dcfed52018-06-28 11:16:15 +020027#include <vppinfra/bihash_16_8.h>
Klement Sekera896c8962019-06-24 11:52:49 +000028#include <vnet/ip/reass/ip4_full_reass.h>
Klement Sekera630ab582019-07-19 09:14:19 +000029#include <stddef.h>
Klement Sekera75e7d132017-09-20 08:26:30 +020030
31#define MSEC_PER_SEC 1000
32#define IP4_REASS_TIMEOUT_DEFAULT_MS 100
33#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
Klement Sekera4c533132018-02-22 11:41:12 +010034#define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
Klement Sekera3a343d42019-05-16 14:35:46 +020035#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
Klement Sekera75e7d132017-09-20 08:26:30 +020036#define IP4_REASS_HT_LOAD_FACTOR (0.75)
37
38#define IP4_REASS_DEBUG_BUFFERS 0
39#if IP4_REASS_DEBUG_BUFFERS
40#define IP4_REASS_DEBUG_BUFFER(bi, what) \
41 do \
42 { \
43 u32 _bi = bi; \
44 printf (#what "buffer %u", _bi); \
45 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi); \
46 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT) \
47 { \
48 _bi = _b->next_buffer; \
49 printf ("[%u]", _bi); \
50 _b = vlib_get_buffer (vm, _bi); \
51 } \
52 printf ("\n"); \
53 fflush (stdout); \
54 } \
55 while (0)
56#else
57#define IP4_REASS_DEBUG_BUFFER(...)
58#endif
59
Klement Sekerad0f70a32018-12-14 17:24:13 +010060typedef enum
61{
62 IP4_REASS_RC_OK,
Klement Sekera3a343d42019-05-16 14:35:46 +020063 IP4_REASS_RC_TOO_MANY_FRAGMENTS,
Klement Sekerad0f70a32018-12-14 17:24:13 +010064 IP4_REASS_RC_INTERNAL_ERROR,
Klement Sekeraf883f6a2019-02-13 11:01:32 +010065 IP4_REASS_RC_NO_BUF,
Klement Sekera630ab582019-07-19 09:14:19 +000066 IP4_REASS_RC_HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +000067} ip4_full_reass_rc_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020068
69typedef struct
70{
71 union
72 {
73 struct
74 {
Klement Sekera75e7d132017-09-20 08:26:30 +020075 u32 xx_id;
76 ip4_address_t src;
77 ip4_address_t dst;
Klement Sekera8dcfed52018-06-28 11:16:15 +020078 u16 frag_id;
79 u8 proto;
80 u8 unused;
Klement Sekera75e7d132017-09-20 08:26:30 +020081 };
Klement Sekera8dcfed52018-06-28 11:16:15 +020082 u64 as_u64[2];
Klement Sekera75e7d132017-09-20 08:26:30 +020083 };
Klement Sekera896c8962019-06-24 11:52:49 +000084} ip4_full_reass_key_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020085
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080086typedef union
87{
88 struct
89 {
90 u32 reass_index;
Klement Sekera630ab582019-07-19 09:14:19 +000091 u32 memory_owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080092 };
93 u64 as_u64;
Klement Sekera896c8962019-06-24 11:52:49 +000094} ip4_full_reass_val_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080095
96typedef union
97{
98 struct
99 {
Klement Sekera896c8962019-06-24 11:52:49 +0000100 ip4_full_reass_key_t k;
101 ip4_full_reass_val_t v;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800102 };
103 clib_bihash_kv_16_8_t kv;
Klement Sekera896c8962019-06-24 11:52:49 +0000104} ip4_full_reass_kv_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800105
Klement Sekera75e7d132017-09-20 08:26:30 +0200106always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +0000107ip4_full_reass_buffer_get_data_offset (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200108{
109 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100110 return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
Klement Sekera75e7d132017-09-20 08:26:30 +0200111}
112
113always_inline u16
Klement Sekera896c8962019-06-24 11:52:49 +0000114ip4_full_reass_buffer_get_data_len (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200115{
116 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100117 return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
Klement Sekera896c8962019-06-24 11:52:49 +0000118 (vnb->ip.reass.fragment_first +
119 ip4_full_reass_buffer_get_data_offset (b)) + 1;
Klement Sekera75e7d132017-09-20 08:26:30 +0200120}
121
122typedef struct
123{
124 // hash table key
Klement Sekera896c8962019-06-24 11:52:49 +0000125 ip4_full_reass_key_t key;
Klement Sekera75e7d132017-09-20 08:26:30 +0200126 // time when last packet was received
127 f64 last_heard;
128 // internal id of this reassembly
Klement Sekera4c533132018-02-22 11:41:12 +0100129 u64 id;
Klement Sekera75e7d132017-09-20 08:26:30 +0200130 // buffer index of first buffer in this reassembly context
131 u32 first_bi;
132 // last octet of packet, ~0 until fragment without more_fragments arrives
133 u32 last_packet_octet;
134 // length of data collected so far
135 u32 data_len;
136 // trace operation counter
137 u32 trace_op_counter;
Klement Sekera4c533132018-02-22 11:41:12 +0100138 // next index - used by non-feature node
Klement Sekera21aa8f12019-05-20 12:27:33 +0200139 u32 next_index;
Klement Sekerae8498652019-06-17 12:23:15 +0000140 // error next index - used by custom apps (~0 if not used)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200141 u32 error_next_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100142 // minimum fragment length for this reassembly - used to estimate MTU
143 u16 min_fragment_length;
Klement Sekera3a343d42019-05-16 14:35:46 +0200144 // number of fragments in this reassembly
145 u32 fragments_n;
Klement Sekera630ab582019-07-19 09:14:19 +0000146 // thread owning memory for this context (whose pool contains this ctx)
147 u32 memory_owner_thread_index;
148 // thread which received fragment with offset 0 and which sends out the
149 // completed reassembly
150 u32 sendout_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +0000151} ip4_full_reass_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200152
153typedef struct
154{
Klement Sekera896c8962019-06-24 11:52:49 +0000155 ip4_full_reass_t *pool;
Klement Sekera4c533132018-02-22 11:41:12 +0100156 u32 reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +0100157 u32 id_counter;
158 clib_spinlock_t lock;
Klement Sekera896c8962019-06-24 11:52:49 +0000159} ip4_full_reass_per_thread_t;
Klement Sekera4c533132018-02-22 11:41:12 +0100160
161typedef struct
162{
Klement Sekera75e7d132017-09-20 08:26:30 +0200163 // IPv4 config
164 u32 timeout_ms;
165 f64 timeout;
166 u32 expire_walk_interval_ms;
Klement Sekera3a343d42019-05-16 14:35:46 +0200167 // maximum number of fragments in one reassembly
168 u32 max_reass_len;
169 // maximum number of reassemblies
Klement Sekera75e7d132017-09-20 08:26:30 +0200170 u32 max_reass_n;
171
172 // IPv4 runtime
Klement Sekera8dcfed52018-06-28 11:16:15 +0200173 clib_bihash_16_8_t hash;
Klement Sekera4c533132018-02-22 11:41:12 +0100174 // per-thread data
Klement Sekera896c8962019-06-24 11:52:49 +0000175 ip4_full_reass_per_thread_t *per_thread_data;
Klement Sekera75e7d132017-09-20 08:26:30 +0200176
177 // convenience
178 vlib_main_t *vlib_main;
Klement Sekera75e7d132017-09-20 08:26:30 +0200179
180 // node index of ip4-drop node
181 u32 ip4_drop_idx;
Klement Sekera896c8962019-06-24 11:52:49 +0000182 u32 ip4_full_reass_expire_node_idx;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800183
184 /** Worker handoff */
185 u32 fq_index;
186 u32 fq_feature_index;
Klement Sekerafe8371f2020-09-10 12:03:54 +0000187 u32 fq_custom_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200188
Klement Sekera7b2e9fb2019-10-01 13:00:22 +0000189 // reference count for enabling/disabling feature - per interface
190 u32 *feature_use_refcount_per_intf;
Klement Sekera896c8962019-06-24 11:52:49 +0000191} ip4_full_reass_main_t;
192
193extern ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700194
195#ifndef CLIB_MARCH_VARIANT
Klement Sekera896c8962019-06-24 11:52:49 +0000196ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700197#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +0200198
199typedef enum
200{
Klement Sekera896c8962019-06-24 11:52:49 +0000201 IP4_FULL_REASS_NEXT_INPUT,
202 IP4_FULL_REASS_NEXT_DROP,
203 IP4_FULL_REASS_NEXT_HANDOFF,
204 IP4_FULL_REASS_N_NEXT,
205} ip4_full_reass_next_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200206
207typedef enum
208{
Klement Sekerafe8371f2020-09-10 12:03:54 +0000209 NORMAL,
210 FEATURE,
211 CUSTOM
212} ip4_full_reass_node_type_t;
213
214typedef enum
215{
Klement Sekera75e7d132017-09-20 08:26:30 +0200216 RANGE_NEW,
217 RANGE_SHRINK,
218 RANGE_DISCARD,
219 RANGE_OVERLAP,
220 FINALIZE,
Klement Sekera630ab582019-07-19 09:14:19 +0000221 HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +0000222} ip4_full_reass_trace_operation_e;
Klement Sekera75e7d132017-09-20 08:26:30 +0200223
224typedef struct
225{
226 u16 range_first;
227 u16 range_last;
228 u32 range_bi;
229 i32 data_offset;
230 u32 data_len;
231 u32 first_bi;
Klement Sekera896c8962019-06-24 11:52:49 +0000232} ip4_full_reass_range_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200233
234typedef struct
235{
Klement Sekera896c8962019-06-24 11:52:49 +0000236 ip4_full_reass_trace_operation_e action;
Klement Sekera75e7d132017-09-20 08:26:30 +0200237 u32 reass_id;
Klement Sekera896c8962019-06-24 11:52:49 +0000238 ip4_full_reass_range_trace_t trace_range;
Klement Sekera75e7d132017-09-20 08:26:30 +0200239 u32 size_diff;
240 u32 op_id;
Klement Sekera630ab582019-07-19 09:14:19 +0000241 u32 thread_id;
242 u32 thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200243 u32 fragment_first;
244 u32 fragment_last;
245 u32 total_data_len;
Klement Sekera8563cb32019-10-10 17:03:57 +0000246 bool is_after_handoff;
247 ip4_header_t ip4_header;
Klement Sekera896c8962019-06-24 11:52:49 +0000248} ip4_full_reass_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200249
Klement Sekera896c8962019-06-24 11:52:49 +0000250extern vlib_node_registration_t ip4_full_reass_node;
251extern vlib_node_registration_t ip4_full_reass_node_feature;
Klement Sekerafe8371f2020-09-10 12:03:54 +0000252extern vlib_node_registration_t ip4_full_reass_node_custom;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700253
Klement Sekera4c533132018-02-22 11:41:12 +0100254static void
Klement Sekera896c8962019-06-24 11:52:49 +0000255ip4_full_reass_trace_details (vlib_main_t * vm, u32 bi,
256 ip4_full_reass_range_trace_t * trace)
Klement Sekera75e7d132017-09-20 08:26:30 +0200257{
258 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
259 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
260 trace->range_first = vnb->ip.reass.range_first;
261 trace->range_last = vnb->ip.reass.range_last;
Klement Sekera896c8962019-06-24 11:52:49 +0000262 trace->data_offset = ip4_full_reass_buffer_get_data_offset (b);
263 trace->data_len = ip4_full_reass_buffer_get_data_len (b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200264 trace->range_bi = bi;
265}
266
Klement Sekera4c533132018-02-22 11:41:12 +0100267static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000268format_ip4_full_reass_range_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200269{
Klement Sekera896c8962019-06-24 11:52:49 +0000270 ip4_full_reass_range_trace_t *trace =
271 va_arg (*args, ip4_full_reass_range_trace_t *);
272 s =
273 format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
274 trace->range_last, trace->data_offset, trace->data_len,
275 trace->range_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200276 return s;
277}
278
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700279static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000280format_ip4_full_reass_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200281{
282 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
283 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +0000284 ip4_full_reass_trace_t *t = va_arg (*args, ip4_full_reass_trace_t *);
Klement Sekera630ab582019-07-19 09:14:19 +0000285 u32 indent = 0;
286 if (~0 != t->reass_id)
287 {
Klement Sekera8563cb32019-10-10 17:03:57 +0000288 if (t->is_after_handoff)
289 {
290 s =
291 format (s, "%U\n", format_ip4_header, &t->ip4_header,
292 sizeof (t->ip4_header));
293 indent = 2;
294 }
295 s =
296 format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
297 t->reass_id, t->op_id);
Klement Sekera630ab582019-07-19 09:14:19 +0000298 indent = format_get_indent (s);
299 s =
300 format (s,
301 "first bi: %u, data len: %u, ip/fragment[%u, %u]",
302 t->trace_range.first_bi, t->total_data_len, t->fragment_first,
303 t->fragment_last);
304 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200305 switch (t->action)
306 {
307 case RANGE_SHRINK:
308 s = format (s, "\n%Ushrink %U by %u", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000309 format_ip4_full_reass_range_trace, &t->trace_range,
Klement Sekera75e7d132017-09-20 08:26:30 +0200310 t->size_diff);
311 break;
312 case RANGE_DISCARD:
313 s = format (s, "\n%Udiscard %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000314 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200315 break;
316 case RANGE_NEW:
317 s = format (s, "\n%Unew %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000318 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200319 break;
320 case RANGE_OVERLAP:
321 s = format (s, "\n%Uoverlapping/ignored %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000322 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200323 break;
324 case FINALIZE:
325 s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
326 break;
Klement Sekera630ab582019-07-19 09:14:19 +0000327 case HANDOFF:
328 s =
329 format (s, "handoff from thread #%u to thread #%u", t->thread_id,
330 t->thread_id_to);
331 break;
Klement Sekera75e7d132017-09-20 08:26:30 +0200332 }
333 return s;
334}
335
Klement Sekera4c533132018-02-22 11:41:12 +0100336static void
Klement Sekera896c8962019-06-24 11:52:49 +0000337ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
338 ip4_full_reass_main_t * rm,
339 ip4_full_reass_t * reass, u32 bi,
340 ip4_full_reass_trace_operation_e action,
341 u32 size_diff, u32 thread_id_to)
Klement Sekera75e7d132017-09-20 08:26:30 +0200342{
343 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
344 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera53be16d2020-12-15 21:47:36 +0100345 if (pool_is_free_index
346 (vm->trace_main.trace_buffer_pool, vlib_buffer_get_trace_index (b)))
347 {
348 // this buffer's trace is gone
349 b->flags &= ~VLIB_BUFFER_IS_TRACED;
350 return;
351 }
Klement Sekera8563cb32019-10-10 17:03:57 +0000352 bool is_after_handoff = false;
353 if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
354 {
355 is_after_handoff = true;
356 }
Klement Sekera896c8962019-06-24 11:52:49 +0000357 ip4_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
Klement Sekera8563cb32019-10-10 17:03:57 +0000358 t->is_after_handoff = is_after_handoff;
359 if (t->is_after_handoff)
360 {
361 clib_memcpy (&t->ip4_header, vlib_buffer_get_current (b),
362 clib_min (sizeof (t->ip4_header), b->current_length));
363 }
Klement Sekera896c8962019-06-24 11:52:49 +0000364 if (reass)
365 {
366 t->reass_id = reass->id;
367 t->op_id = reass->trace_op_counter;
368 t->trace_range.first_bi = reass->first_bi;
369 t->total_data_len = reass->data_len;
370 ++reass->trace_op_counter;
371 }
372 else
373 {
374 t->reass_id = ~0;
375 t->op_id = 0;
376 t->trace_range.first_bi = 0;
377 t->total_data_len = 0;
378 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200379 t->action = action;
Klement Sekera896c8962019-06-24 11:52:49 +0000380 ip4_full_reass_trace_details (vm, bi, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200381 t->size_diff = size_diff;
Klement Sekera630ab582019-07-19 09:14:19 +0000382 t->thread_id = vm->thread_index;
383 t->thread_id_to = thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200384 t->fragment_first = vnb->ip.reass.fragment_first;
385 t->fragment_last = vnb->ip.reass.fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200386#if 0
387 static u8 *s = NULL;
Klement Sekera896c8962019-06-24 11:52:49 +0000388 s = format (s, "%U", format_ip4_full_reass_trace, NULL, NULL, t);
Klement Sekera75e7d132017-09-20 08:26:30 +0200389 printf ("%.*s\n", vec_len (s), s);
390 fflush (stdout);
391 vec_reset_length (s);
392#endif
393}
394
Klement Sekera630ab582019-07-19 09:14:19 +0000395always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000396ip4_full_reass_free_ctx (ip4_full_reass_per_thread_t * rt,
397 ip4_full_reass_t * reass)
Klement Sekera630ab582019-07-19 09:14:19 +0000398{
399 pool_put (rt->pool, reass);
400 --rt->reass_n;
401}
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800402
Klement Sekera4c533132018-02-22 11:41:12 +0100403always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000404ip4_full_reass_free (ip4_full_reass_main_t * rm,
405 ip4_full_reass_per_thread_t * rt,
406 ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200407{
Klement Sekera8dcfed52018-06-28 11:16:15 +0200408 clib_bihash_kv_16_8_t kv;
Klement Sekera75e7d132017-09-20 08:26:30 +0200409 kv.key[0] = reass->key.as_u64[0];
410 kv.key[1] = reass->key.as_u64[1];
Klement Sekera8dcfed52018-06-28 11:16:15 +0200411 clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
Klement Sekera896c8962019-06-24 11:52:49 +0000412 return ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200413}
414
Klement Sekera4c533132018-02-22 11:41:12 +0100415always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000416ip4_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
417 ip4_full_reass_main_t * rm, ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200418{
419 u32 range_bi = reass->first_bi;
420 vlib_buffer_t *range_b;
421 vnet_buffer_opaque_t *range_vnb;
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100422 u32 *to_free = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +0200423 while (~0 != range_bi)
424 {
425 range_b = vlib_get_buffer (vm, range_bi);
426 range_vnb = vnet_buffer (range_b);
427 u32 bi = range_bi;
428 while (~0 != bi)
429 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100430 vec_add1 (to_free, bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200431 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
432 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
433 {
434 bi = b->next_buffer;
435 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
436 }
437 else
438 {
439 bi = ~0;
440 }
441 }
442 range_bi = range_vnb->ip.reass.next_range_bi;
443 }
Klement Sekera21aa8f12019-05-20 12:27:33 +0200444 /* send to next_error_index */
Klement Sekerae8498652019-06-17 12:23:15 +0000445 if (~0 != reass->error_next_index)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200446 {
447 u32 n_left_to_next, *to_next, next_index;
448
449 next_index = reass->error_next_index;
450 u32 bi = ~0;
451
452 while (vec_len (to_free) > 0)
453 {
454 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
455
456 while (vec_len (to_free) > 0 && n_left_to_next > 0)
457 {
458 bi = vec_pop (to_free);
459
460 if (~0 != bi)
461 {
462 to_next[0] = bi;
463 to_next += 1;
464 n_left_to_next -= 1;
Klement Sekera21aa8f12019-05-20 12:27:33 +0200465 }
466 }
467 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
468 }
469 }
470 else
471 {
472 vlib_buffer_free (vm, to_free, vec_len (to_free));
473 }
barryxie01e94db2020-12-04 19:21:23 +0800474 vec_free (to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +0200475}
476
Klement Sekera896c8962019-06-24 11:52:49 +0000477always_inline void
478ip4_full_reass_init (ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200479{
Klement Sekera896c8962019-06-24 11:52:49 +0000480 reass->first_bi = ~0;
481 reass->last_packet_octet = ~0;
482 reass->data_len = 0;
483 reass->next_index = ~0;
484 reass->error_next_index = ~0;
485}
486
487always_inline ip4_full_reass_t *
488ip4_full_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
489 ip4_full_reass_main_t * rm,
490 ip4_full_reass_per_thread_t * rt,
491 ip4_full_reass_kv_t * kv, u8 * do_handoff)
492{
493 ip4_full_reass_t *reass;
Klement Sekera630ab582019-07-19 09:14:19 +0000494 f64 now;
Klement Sekera75e7d132017-09-20 08:26:30 +0200495
Klement Sekera630ab582019-07-19 09:14:19 +0000496again:
497
498 reass = NULL;
499 now = vlib_time_now (vm);
Klement Sekerac99c0252019-12-18 12:17:06 +0000500 if (!clib_bihash_search_16_8 (&rm->hash, &kv->kv, &kv->kv))
Klement Sekera75e7d132017-09-20 08:26:30 +0200501 {
Gao Feng9165e032020-04-26 09:57:18 +0800502 if (vm->thread_index != kv->v.memory_owner_thread_index)
503 {
504 *do_handoff = 1;
505 return NULL;
506 }
Klement Sekera630ab582019-07-19 09:14:19 +0000507 reass =
508 pool_elt_at_index (rm->per_thread_data
509 [kv->v.memory_owner_thread_index].pool,
510 kv->v.reass_index);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800511
Klement Sekera75e7d132017-09-20 08:26:30 +0200512 if (now > reass->last_heard + rm->timeout)
513 {
Klement Sekera896c8962019-06-24 11:52:49 +0000514 ip4_full_reass_drop_all (vm, node, rm, reass);
515 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200516 reass = NULL;
517 }
518 }
519
520 if (reass)
521 {
522 reass->last_heard = now;
523 return reass;
524 }
525
Klement Sekera4c533132018-02-22 11:41:12 +0100526 if (rt->reass_n >= rm->max_reass_n)
Klement Sekera75e7d132017-09-20 08:26:30 +0200527 {
528 reass = NULL;
529 return reass;
530 }
531 else
532 {
Klement Sekera4c533132018-02-22 11:41:12 +0100533 pool_get (rt->pool, reass);
Dave Barachb7b92992018-10-17 10:38:51 -0400534 clib_memset (reass, 0, sizeof (*reass));
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800535 reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
Klement Sekera630ab582019-07-19 09:14:19 +0000536 reass->memory_owner_thread_index = vm->thread_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100537 ++rt->id_counter;
Klement Sekera896c8962019-06-24 11:52:49 +0000538 ip4_full_reass_init (reass);
Klement Sekera4c533132018-02-22 11:41:12 +0100539 ++rt->reass_n;
Klement Sekera75e7d132017-09-20 08:26:30 +0200540 }
541
Klement Sekerac99c0252019-12-18 12:17:06 +0000542 reass->key.as_u64[0] = kv->kv.key[0];
543 reass->key.as_u64[1] = kv->kv.key[1];
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800544 kv->v.reass_index = (reass - rt->pool);
Klement Sekera630ab582019-07-19 09:14:19 +0000545 kv->v.memory_owner_thread_index = vm->thread_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200546 reass->last_heard = now;
547
Klement Sekerac99c0252019-12-18 12:17:06 +0000548 int rv = clib_bihash_add_del_16_8 (&rm->hash, &kv->kv, 2);
Klement Sekera630ab582019-07-19 09:14:19 +0000549 if (rv)
Klement Sekera75e7d132017-09-20 08:26:30 +0200550 {
Klement Sekera896c8962019-06-24 11:52:49 +0000551 ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200552 reass = NULL;
Klement Sekera630ab582019-07-19 09:14:19 +0000553 // if other worker created a context already work with the other copy
554 if (-2 == rv)
555 goto again;
Klement Sekera75e7d132017-09-20 08:26:30 +0200556 }
557
558 return reass;
559}
560
Klement Sekera896c8962019-06-24 11:52:49 +0000561always_inline ip4_full_reass_rc_t
562ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
563 ip4_full_reass_main_t * rm,
564 ip4_full_reass_per_thread_t * rt,
565 ip4_full_reass_t * reass, u32 * bi0,
Klement Sekerafe8371f2020-09-10 12:03:54 +0000566 u32 * next0, u32 * error0, bool is_custom)
Klement Sekera75e7d132017-09-20 08:26:30 +0200567{
Klement Sekera75e7d132017-09-20 08:26:30 +0200568 vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
569 vlib_buffer_t *last_b = NULL;
570 u32 sub_chain_bi = reass->first_bi;
571 u32 total_length = 0;
572 u32 buf_cnt = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200573 do
574 {
575 u32 tmp_bi = sub_chain_bi;
576 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
577 ip4_header_t *ip = vlib_buffer_get_current (tmp);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100578 vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
579 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
580 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
581 {
582 return IP4_REASS_RC_INTERNAL_ERROR;
583 }
584
Klement Sekera896c8962019-06-24 11:52:49 +0000585 u32 data_len = ip4_full_reass_buffer_get_data_len (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200586 u32 trim_front =
Klement Sekera896c8962019-06-24 11:52:49 +0000587 ip4_header_bytes (ip) + ip4_full_reass_buffer_get_data_offset (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200588 u32 trim_end =
589 vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
590 if (tmp_bi == reass->first_bi)
591 {
592 /* first buffer - keep ip4 header */
Klement Sekera896c8962019-06-24 11:52:49 +0000593 if (0 != ip4_full_reass_buffer_get_data_offset (tmp))
Klement Sekerad0f70a32018-12-14 17:24:13 +0100594 {
595 return IP4_REASS_RC_INTERNAL_ERROR;
596 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200597 trim_front = 0;
598 trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
599 ip4_header_bytes (ip);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100600 if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
601 {
602 return IP4_REASS_RC_INTERNAL_ERROR;
603 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200604 }
605 u32 keep_data =
606 vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
607 while (1)
608 {
609 ++buf_cnt;
610 if (trim_front)
611 {
612 if (trim_front > tmp->current_length)
613 {
614 /* drop whole buffer */
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200615 u32 to_be_freed_bi = tmp_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200616 trim_front -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100617 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
618 {
619 return IP4_REASS_RC_INTERNAL_ERROR;
620 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200621 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
622 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700623 tmp->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200624 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200625 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200626 continue;
627 }
628 else
629 {
630 vlib_buffer_advance (tmp, trim_front);
631 trim_front = 0;
632 }
633 }
634 if (keep_data)
635 {
636 if (last_b)
637 {
638 last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
639 last_b->next_buffer = tmp_bi;
640 }
641 last_b = tmp;
642 if (keep_data <= tmp->current_length)
643 {
644 tmp->current_length = keep_data;
645 keep_data = 0;
646 }
647 else
648 {
649 keep_data -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100650 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
651 {
652 return IP4_REASS_RC_INTERNAL_ERROR;
653 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200654 }
655 total_length += tmp->current_length;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200656 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
657 {
658 tmp_bi = tmp->next_buffer;
659 tmp = vlib_get_buffer (vm, tmp->next_buffer);
660 }
661 else
662 {
663 break;
664 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200665 }
666 else
667 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200668 u32 to_be_freed_bi = tmp_bi;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100669 if (reass->first_bi == tmp_bi)
670 {
671 return IP4_REASS_RC_INTERNAL_ERROR;
672 }
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200673 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
674 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700675 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200676 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700677 tmp->next_buffer = 0;
678 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200679 vlib_buffer_free_one (vm, to_be_freed_bi);
680 }
681 else
682 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700683 tmp->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200684 vlib_buffer_free_one (vm, to_be_freed_bi);
685 break;
686 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200687 }
688 }
689 sub_chain_bi =
690 vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
691 reass.next_range_bi;
692 }
693 while (~0 != sub_chain_bi);
Chris Luke30684ac2018-03-29 12:56:58 -0700694
Klement Sekerad0f70a32018-12-14 17:24:13 +0100695 if (!last_b)
696 {
697 return IP4_REASS_RC_INTERNAL_ERROR;
698 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200699 last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700700
Klement Sekerad0f70a32018-12-14 17:24:13 +0100701 if (total_length < first_b->current_length)
702 {
703 return IP4_REASS_RC_INTERNAL_ERROR;
704 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200705 total_length -= first_b->current_length;
706 first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
707 first_b->total_length_not_including_first_buffer = total_length;
708 ip4_header_t *ip = vlib_buffer_get_current (first_b);
709 ip->flags_and_fragment_offset = 0;
710 ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
711 ip->checksum = ip4_header_checksum (ip);
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100712 if (!vlib_buffer_chain_linearize (vm, first_b))
713 {
714 return IP4_REASS_RC_NO_BUF;
715 }
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700716 // reset to reconstruct the mbuf linking
717 first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Klement Sekera75e7d132017-09-20 08:26:30 +0200718 if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
719 {
Klement Sekera896c8962019-06-24 11:52:49 +0000720 ip4_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
721 FINALIZE, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200722#if 0
723 // following code does a hexdump of packet fragments to stdout ...
724 do
725 {
726 u32 bi = reass->first_bi;
727 u8 *s = NULL;
728 while (~0 != bi)
729 {
730 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
731 s = format (s, "%u: %U\n", bi, format_hexdump,
732 vlib_buffer_get_current (b), b->current_length);
733 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
734 {
735 bi = b->next_buffer;
736 }
737 else
738 {
739 break;
740 }
741 }
742 printf ("%.*s\n", vec_len (s), s);
743 fflush (stdout);
744 vec_free (s);
745 }
746 while (0);
747#endif
748 }
749 *bi0 = reass->first_bi;
Klement Sekerafe8371f2020-09-10 12:03:54 +0000750 if (!is_custom)
Klement Sekera4c533132018-02-22 11:41:12 +0100751 {
Klement Sekera896c8962019-06-24 11:52:49 +0000752 *next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +0100753 }
754 else
755 {
756 *next0 = reass->next_index;
757 }
758 vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
Klement Sekera75e7d132017-09-20 08:26:30 +0200759 *error0 = IP4_ERROR_NONE;
Klement Sekera896c8962019-06-24 11:52:49 +0000760 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200761 reass = NULL;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100762 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200763}
764
Klement Sekera896c8962019-06-24 11:52:49 +0000765always_inline ip4_full_reass_rc_t
766ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
767 ip4_full_reass_main_t * rm,
768 ip4_full_reass_per_thread_t * rt,
769 ip4_full_reass_t * reass,
770 u32 prev_range_bi, u32 new_next_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200771{
Klement Sekera75e7d132017-09-20 08:26:30 +0200772 vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
773 vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
774 if (~0 != prev_range_bi)
775 {
776 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
777 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
778 new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
779 prev_vnb->ip.reass.next_range_bi = new_next_bi;
780 }
781 else
782 {
783 if (~0 != reass->first_bi)
784 {
785 new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
786 }
787 reass->first_bi = new_next_bi;
788 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100789 vnet_buffer_opaque_t *vnb = vnet_buffer (new_next_b);
790 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
791 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
792 {
793 return IP4_REASS_RC_INTERNAL_ERROR;
794 }
Klement Sekera896c8962019-06-24 11:52:49 +0000795 reass->data_len += ip4_full_reass_buffer_get_data_len (new_next_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100796 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200797}
798
Klement Sekera896c8962019-06-24 11:52:49 +0000799always_inline ip4_full_reass_rc_t
800ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
801 vlib_node_runtime_t * node,
802 ip4_full_reass_main_t * rm,
803 ip4_full_reass_t * reass,
804 u32 prev_range_bi, u32 discard_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200805{
806 vlib_buffer_t *discard_b = vlib_get_buffer (vm, discard_bi);
807 vnet_buffer_opaque_t *discard_vnb = vnet_buffer (discard_b);
808 if (~0 != prev_range_bi)
809 {
810 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
811 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100812 if (!(prev_vnb->ip.reass.next_range_bi == discard_bi))
813 {
814 return IP4_REASS_RC_INTERNAL_ERROR;
815 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200816 prev_vnb->ip.reass.next_range_bi = discard_vnb->ip.reass.next_range_bi;
817 }
818 else
819 {
820 reass->first_bi = discard_vnb->ip.reass.next_range_bi;
821 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100822 vnet_buffer_opaque_t *vnb = vnet_buffer (discard_b);
823 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
824 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
825 {
826 return IP4_REASS_RC_INTERNAL_ERROR;
827 }
Klement Sekera896c8962019-06-24 11:52:49 +0000828 reass->data_len -= ip4_full_reass_buffer_get_data_len (discard_b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200829 while (1)
830 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200831 u32 to_be_freed_bi = discard_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200832 if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
833 {
Klement Sekera896c8962019-06-24 11:52:49 +0000834 ip4_full_reass_add_trace (vm, node, rm, reass, discard_bi,
835 RANGE_DISCARD, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200836 }
837 if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
838 {
839 discard_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
840 discard_bi = discard_b->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700841 discard_b->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200842 discard_b = vlib_get_buffer (vm, discard_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200843 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200844 }
845 else
846 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700847 discard_b->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200848 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200849 break;
850 }
851 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100852 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200853}
854
Klement Sekera896c8962019-06-24 11:52:49 +0000855always_inline ip4_full_reass_rc_t
856ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
857 ip4_full_reass_main_t * rm,
858 ip4_full_reass_per_thread_t * rt,
859 ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
Klement Sekerafe8371f2020-09-10 12:03:54 +0000860 u32 * error0, bool is_custom, u32 * handoff_thread_idx)
Klement Sekera75e7d132017-09-20 08:26:30 +0200861{
Klement Sekera75e7d132017-09-20 08:26:30 +0200862 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200863 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
Klement Sekerafe8371f2020-09-10 12:03:54 +0000864 if (is_custom)
Klement Sekerae8498652019-06-17 12:23:15 +0000865 {
866 // store (error_)next_index before it's overwritten
867 reass->next_index = fvnb->ip.reass.next_index;
868 reass->error_next_index = fvnb->ip.reass.error_next_index;
869 }
Klement Sekera896c8962019-06-24 11:52:49 +0000870 ip4_full_reass_rc_t rc = IP4_REASS_RC_OK;
871 int consumed = 0;
872 ip4_header_t *fip = vlib_buffer_get_current (fb);
Klement Sekera14d7e902018-12-10 13:46:09 +0100873 const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
874 const u32 fragment_length =
Klement Sekera75e7d132017-09-20 08:26:30 +0200875 clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
Klement Sekera14d7e902018-12-10 13:46:09 +0100876 const u32 fragment_last = fragment_first + fragment_length - 1;
877 fvnb->ip.reass.fragment_first = fragment_first;
878 fvnb->ip.reass.fragment_last = fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200879 int more_fragments = ip4_get_fragment_more (fip);
880 u32 candidate_range_bi = reass->first_bi;
881 u32 prev_range_bi = ~0;
882 fvnb->ip.reass.range_first = fragment_first;
883 fvnb->ip.reass.range_last = fragment_last;
884 fvnb->ip.reass.next_range_bi = ~0;
885 if (!more_fragments)
886 {
887 reass->last_packet_octet = fragment_last;
888 }
889 if (~0 == reass->first_bi)
890 {
891 // starting a new reassembly
Klement Sekerad0f70a32018-12-14 17:24:13 +0100892 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000893 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
894 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100895 if (IP4_REASS_RC_OK != rc)
896 {
897 return rc;
898 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200899 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
900 {
Klement Sekera896c8962019-06-24 11:52:49 +0000901 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
902 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200903 }
904 *bi0 = ~0;
Klement Sekera4c533132018-02-22 11:41:12 +0100905 reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
Klement Sekera3a343d42019-05-16 14:35:46 +0200906 reass->fragments_n = 1;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100907 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200908 }
Klement Sekera896c8962019-06-24 11:52:49 +0000909 reass->min_fragment_length =
910 clib_min (clib_net_to_host_u16 (fip->length),
911 fvnb->ip.reass.estimated_mtu);
Klement Sekera75e7d132017-09-20 08:26:30 +0200912 while (~0 != candidate_range_bi)
913 {
914 vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
915 vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
916 if (fragment_first > candidate_vnb->ip.reass.range_last)
917 {
918 // this fragments starts after candidate range
919 prev_range_bi = candidate_range_bi;
920 candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
921 if (candidate_vnb->ip.reass.range_last < fragment_last &&
922 ~0 == candidate_range_bi)
923 {
924 // special case - this fragment falls beyond all known ranges
Klement Sekerad0f70a32018-12-14 17:24:13 +0100925 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000926 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
927 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100928 if (IP4_REASS_RC_OK != rc)
929 {
930 return rc;
931 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200932 consumed = 1;
933 break;
934 }
935 continue;
936 }
937 if (fragment_last < candidate_vnb->ip.reass.range_first)
938 {
939 // this fragment ends before candidate range without any overlap
Klement Sekerad0f70a32018-12-14 17:24:13 +0100940 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000941 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
942 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100943 if (IP4_REASS_RC_OK != rc)
944 {
945 return rc;
946 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200947 consumed = 1;
948 }
949 else
950 {
951 if (fragment_first >= candidate_vnb->ip.reass.range_first &&
952 fragment_last <= candidate_vnb->ip.reass.range_last)
953 {
954 // this fragment is a (sub)part of existing range, ignore it
955 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
956 {
Klement Sekera896c8962019-06-24 11:52:49 +0000957 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0,
958 RANGE_OVERLAP, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200959 }
960 break;
961 }
962 int discard_candidate = 0;
963 if (fragment_first < candidate_vnb->ip.reass.range_first)
964 {
965 u32 overlap =
966 fragment_last - candidate_vnb->ip.reass.range_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +0000967 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +0200968 {
969 candidate_vnb->ip.reass.range_first += overlap;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100970 if (reass->data_len < overlap)
971 {
972 return IP4_REASS_RC_INTERNAL_ERROR;
973 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200974 reass->data_len -= overlap;
975 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
976 {
Klement Sekera896c8962019-06-24 11:52:49 +0000977 ip4_full_reass_add_trace (vm, node, rm, reass,
978 candidate_range_bi,
979 RANGE_SHRINK, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200980 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100981 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000982 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
983 prev_range_bi,
984 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100985 if (IP4_REASS_RC_OK != rc)
986 {
987 return rc;
988 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200989 consumed = 1;
990 }
991 else
992 {
993 discard_candidate = 1;
994 }
995 }
996 else if (fragment_last > candidate_vnb->ip.reass.range_last)
997 {
998 u32 overlap =
999 candidate_vnb->ip.reass.range_last - fragment_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +00001000 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +02001001 {
1002 fvnb->ip.reass.range_first += overlap;
1003 if (~0 != candidate_vnb->ip.reass.next_range_bi)
1004 {
1005 prev_range_bi = candidate_range_bi;
1006 candidate_range_bi =
1007 candidate_vnb->ip.reass.next_range_bi;
1008 continue;
1009 }
1010 else
1011 {
1012 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +01001013 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001014 ip4_full_reass_insert_range_in_chain (vm, rm, rt,
1015 reass,
1016 candidate_range_bi,
1017 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001018 if (IP4_REASS_RC_OK != rc)
1019 {
1020 return rc;
1021 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001022 consumed = 1;
1023 }
1024 }
1025 else
1026 {
1027 discard_candidate = 1;
1028 }
1029 }
1030 else
1031 {
1032 discard_candidate = 1;
1033 }
1034 if (discard_candidate)
1035 {
1036 u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
1037 // discard candidate range, probe next range
Klement Sekerad0f70a32018-12-14 17:24:13 +01001038 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001039 ip4_full_reass_remove_range_from_chain (vm, node, rm, reass,
1040 prev_range_bi,
1041 candidate_range_bi);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001042 if (IP4_REASS_RC_OK != rc)
1043 {
1044 return rc;
1045 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001046 if (~0 != next_range_bi)
1047 {
1048 candidate_range_bi = next_range_bi;
1049 continue;
1050 }
1051 else
1052 {
1053 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +01001054 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001055 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
1056 prev_range_bi,
1057 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001058 if (IP4_REASS_RC_OK != rc)
1059 {
1060 return rc;
1061 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001062 consumed = 1;
1063 }
1064 }
1065 }
1066 break;
1067 }
Klement Sekera3a343d42019-05-16 14:35:46 +02001068 ++reass->fragments_n;
Klement Sekera75e7d132017-09-20 08:26:30 +02001069 if (consumed)
1070 {
1071 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1072 {
Klement Sekera896c8962019-06-24 11:52:49 +00001073 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
1074 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001075 }
1076 }
1077 if (~0 != reass->last_packet_octet &&
1078 reass->data_len == reass->last_packet_octet + 1)
1079 {
Klement Sekera630ab582019-07-19 09:14:19 +00001080 *handoff_thread_idx = reass->sendout_thread_index;
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001081 int handoff =
1082 reass->memory_owner_thread_index != reass->sendout_thread_index;
Klement Sekera630ab582019-07-19 09:14:19 +00001083 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001084 ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001085 is_custom);
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001086 if (IP4_REASS_RC_OK == rc && handoff)
Klement Sekera630ab582019-07-19 09:14:19 +00001087 {
1088 rc = IP4_REASS_RC_HANDOFF;
1089 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001090 }
1091 else
1092 {
1093 if (consumed)
1094 {
1095 *bi0 = ~0;
Klement Sekera3a343d42019-05-16 14:35:46 +02001096 if (reass->fragments_n > rm->max_reass_len)
1097 {
1098 rc = IP4_REASS_RC_TOO_MANY_FRAGMENTS;
1099 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001100 }
1101 else
1102 {
Klement Sekera896c8962019-06-24 11:52:49 +00001103 *next0 = IP4_FULL_REASS_NEXT_DROP;
Klement Sekera75e7d132017-09-20 08:26:30 +02001104 *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
1105 }
1106 }
Klement Sekerad0f70a32018-12-14 17:24:13 +01001107 return rc;
Klement Sekera75e7d132017-09-20 08:26:30 +02001108}
1109
1110always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001111ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001112 vlib_frame_t * frame, ip4_full_reass_node_type_t type)
Klement Sekera75e7d132017-09-20 08:26:30 +02001113{
1114 u32 *from = vlib_frame_vector_args (frame);
1115 u32 n_left_from, n_left_to_next, *to_next, next_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001116 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1117 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001118 clib_spinlock_lock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001119
1120 n_left_from = frame->n_vectors;
1121 next_index = node->cached_next_index;
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001122 while (n_left_from > 0)
Klement Sekera75e7d132017-09-20 08:26:30 +02001123 {
1124 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1125
Klement Sekera75e7d132017-09-20 08:26:30 +02001126 while (n_left_from > 0 && n_left_to_next > 0)
1127 {
1128 u32 bi0;
1129 vlib_buffer_t *b0;
Klement Sekera4c533132018-02-22 11:41:12 +01001130 u32 next0;
1131 u32 error0 = IP4_ERROR_NONE;
Klement Sekera75e7d132017-09-20 08:26:30 +02001132
1133 bi0 = from[0];
1134 b0 = vlib_get_buffer (vm, bi0);
1135
1136 ip4_header_t *ip0 = vlib_buffer_get_current (b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001137 if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
Klement Sekera75e7d132017-09-20 08:26:30 +02001138 {
Klement Sekera4c533132018-02-22 11:41:12 +01001139 // this is a whole packet - no fragmentation
Klement Sekerafe8371f2020-09-10 12:03:54 +00001140 if (CUSTOM != type)
Klement Sekera4c533132018-02-22 11:41:12 +01001141 {
Klement Sekera896c8962019-06-24 11:52:49 +00001142 next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +01001143 }
1144 else
1145 {
1146 next0 = vnet_buffer (b0)->ip.reass.next_index;
1147 }
Klement Sekera896c8962019-06-24 11:52:49 +00001148 goto packet_enqueue;
1149 }
1150 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1151 const u32 fragment_length =
1152 clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
1153 const u32 fragment_last = fragment_first + fragment_length - 1;
1154 if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
1155 {
1156 next0 = IP4_FULL_REASS_NEXT_DROP;
1157 error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
1158 goto packet_enqueue;
1159 }
1160 ip4_full_reass_kv_t kv;
1161 u8 do_handoff = 0;
1162
1163 kv.k.as_u64[0] =
1164 (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
1165 vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
1166 (u64) ip0->src_address.as_u32 << 32;
1167 kv.k.as_u64[1] =
1168 (u64) ip0->dst_address.
1169 as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
1170
1171 ip4_full_reass_t *reass =
1172 ip4_full_reass_find_or_create (vm, node, rm, rt, &kv,
1173 &do_handoff);
1174
1175 if (reass)
1176 {
1177 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1178 if (0 == fragment_first)
1179 {
1180 reass->sendout_thread_index = vm->thread_index;
1181 }
1182 }
1183
1184 if (PREDICT_FALSE (do_handoff))
1185 {
1186 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
Klement Sekerade34c352019-06-25 11:19:22 +00001187 vnet_buffer (b0)->ip.reass.owner_thread_index =
1188 kv.v.memory_owner_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001189 }
1190 else if (reass)
1191 {
1192 u32 handoff_thread_idx;
1193 switch (ip4_full_reass_update
1194 (vm, node, rm, rt, reass, &bi0, &next0,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001195 &error0, CUSTOM == type, &handoff_thread_idx))
Klement Sekera896c8962019-06-24 11:52:49 +00001196 {
1197 case IP4_REASS_RC_OK:
1198 /* nothing to do here */
1199 break;
1200 case IP4_REASS_RC_HANDOFF:
1201 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
1202 b0 = vlib_get_buffer (vm, bi0);
Klement Sekerade34c352019-06-25 11:19:22 +00001203 vnet_buffer (b0)->ip.reass.owner_thread_index =
1204 handoff_thread_idx;
Klement Sekera896c8962019-06-24 11:52:49 +00001205 break;
1206 case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
1207 vlib_node_increment_counter (vm, node->node_index,
1208 IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1209 1);
1210 ip4_full_reass_drop_all (vm, node, rm, reass);
1211 ip4_full_reass_free (rm, rt, reass);
1212 goto next_packet;
1213 break;
1214 case IP4_REASS_RC_NO_BUF:
1215 vlib_node_increment_counter (vm, node->node_index,
1216 IP4_ERROR_REASS_NO_BUF, 1);
1217 ip4_full_reass_drop_all (vm, node, rm, reass);
1218 ip4_full_reass_free (rm, rt, reass);
1219 goto next_packet;
1220 break;
1221 case IP4_REASS_RC_INTERNAL_ERROR:
1222 /* drop everything and start with a clean slate */
1223 vlib_node_increment_counter (vm, node->node_index,
1224 IP4_ERROR_REASS_INTERNAL_ERROR,
1225 1);
1226 ip4_full_reass_drop_all (vm, node, rm, reass);
1227 ip4_full_reass_free (rm, rt, reass);
1228 goto next_packet;
1229 break;
1230 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001231 }
1232 else
1233 {
Klement Sekera896c8962019-06-24 11:52:49 +00001234 next0 = IP4_FULL_REASS_NEXT_DROP;
1235 error0 = IP4_ERROR_REASS_LIMIT_REACHED;
Klement Sekera4c533132018-02-22 11:41:12 +01001236 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001237
Klement Sekera896c8962019-06-24 11:52:49 +00001238
1239 packet_enqueue:
Klement Sekera896c8962019-06-24 11:52:49 +00001240
Klement Sekera75e7d132017-09-20 08:26:30 +02001241 if (bi0 != ~0)
1242 {
1243 to_next[0] = bi0;
1244 to_next += 1;
1245 n_left_to_next -= 1;
Benoît Gannecf7803d2019-10-23 13:53:49 +02001246
1247 /* bi0 might have been updated by reass_finalize, reload */
1248 b0 = vlib_get_buffer (vm, bi0);
Klement Sekera1766ddc2020-03-30 16:59:38 +02001249 if (IP4_ERROR_NONE != error0)
1250 {
1251 b0->error = node->errors[error0];
1252 }
Benoît Gannecf7803d2019-10-23 13:53:49 +02001253
Klement Sekera896c8962019-06-24 11:52:49 +00001254 if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
Klement Sekera630ab582019-07-19 09:14:19 +00001255 {
1256 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1257 {
Klement Sekerade34c352019-06-25 11:19:22 +00001258 ip4_full_reass_add_trace (vm, node, rm, NULL, bi0,
1259 HANDOFF, 0,
1260 vnet_buffer (b0)->ip.
1261 reass.owner_thread_index);
Klement Sekera630ab582019-07-19 09:14:19 +00001262 }
1263 }
Klement Sekerafe8371f2020-09-10 12:03:54 +00001264 else if (FEATURE == type && IP4_ERROR_NONE == error0)
Klement Sekera4c533132018-02-22 11:41:12 +01001265 {
Damjan Marion7d98a122018-07-19 20:42:08 +02001266 vnet_feature_next (&next0, b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001267 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001268 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1269 to_next, n_left_to_next,
1270 bi0, next0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001271 IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
1272 }
1273
Klement Sekerad0f70a32018-12-14 17:24:13 +01001274 next_packet:
Klement Sekera75e7d132017-09-20 08:26:30 +02001275 from += 1;
1276 n_left_from -= 1;
1277 }
1278
1279 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1280 }
1281
Klement Sekera4c533132018-02-22 11:41:12 +01001282 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001283 return frame->n_vectors;
1284}
1285
Klement Sekera896c8962019-06-24 11:52:49 +00001286static char *ip4_full_reass_error_strings[] = {
Klement Sekera75e7d132017-09-20 08:26:30 +02001287#define _(sym, string) string,
1288 foreach_ip4_error
1289#undef _
1290};
1291
Klement Sekera896c8962019-06-24 11:52:49 +00001292VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
1293 vlib_node_runtime_t * node,
1294 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001295{
Klement Sekerafe8371f2020-09-10 12:03:54 +00001296 return ip4_full_reass_inline (vm, node, frame, NORMAL);
Klement Sekera4c533132018-02-22 11:41:12 +01001297}
1298
Klement Sekera75e7d132017-09-20 08:26:30 +02001299/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001300VLIB_REGISTER_NODE (ip4_full_reass_node) = {
1301 .name = "ip4-full-reassembly",
Klement Sekera75e7d132017-09-20 08:26:30 +02001302 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001303 .format_trace = format_ip4_full_reass_trace,
1304 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1305 .error_strings = ip4_full_reass_error_strings,
1306 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera75e7d132017-09-20 08:26:30 +02001307 .next_nodes =
1308 {
Klement Sekera896c8962019-06-24 11:52:49 +00001309 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1310 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1311 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001312
Klement Sekera75e7d132017-09-20 08:26:30 +02001313 },
1314};
1315/* *INDENT-ON* */
1316
Klement Sekera896c8962019-06-24 11:52:49 +00001317VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
1318 vlib_node_runtime_t * node,
1319 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001320{
Klement Sekerafe8371f2020-09-10 12:03:54 +00001321 return ip4_full_reass_inline (vm, node, frame, FEATURE);
Klement Sekera4c533132018-02-22 11:41:12 +01001322}
1323
1324/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001325VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
1326 .name = "ip4-full-reassembly-feature",
Klement Sekera4c533132018-02-22 11:41:12 +01001327 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001328 .format_trace = format_ip4_full_reass_trace,
1329 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1330 .error_strings = ip4_full_reass_error_strings,
1331 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera4c533132018-02-22 11:41:12 +01001332 .next_nodes =
1333 {
Klement Sekera896c8962019-06-24 11:52:49 +00001334 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1335 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1336 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
Klement Sekera4c533132018-02-22 11:41:12 +01001337 },
1338};
1339/* *INDENT-ON* */
1340
Klement Sekera4c533132018-02-22 11:41:12 +01001341/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001342VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
Klement Sekera4c533132018-02-22 11:41:12 +01001343 .arc_name = "ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001344 .node_name = "ip4-full-reassembly-feature",
Neale Ranns14046982019-07-29 14:49:52 +00001345 .runs_before = VNET_FEATURES ("ip4-lookup",
Neale Ranns2be3eb62019-08-02 01:17:13 -07001346 "ipsec4-input-feature"),
Klement Sekera4c533132018-02-22 11:41:12 +01001347 .runs_after = 0,
1348};
1349/* *INDENT-ON* */
1350
Klement Sekerafe8371f2020-09-10 12:03:54 +00001351VLIB_NODE_FN (ip4_full_reass_node_custom) (vlib_main_t * vm,
1352 vlib_node_runtime_t * node,
1353 vlib_frame_t * frame)
1354{
1355 return ip4_full_reass_inline (vm, node, frame, CUSTOM);
1356}
1357
1358/* *INDENT-OFF* */
1359VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = {
1360 .name = "ip4-full-reassembly-custom",
1361 .vector_size = sizeof (u32),
1362 .format_trace = format_ip4_full_reass_trace,
1363 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1364 .error_strings = ip4_full_reass_error_strings,
1365 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
1366 .next_nodes =
1367 {
1368 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1369 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1370 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-custom-hoff",
1371 },
1372};
1373/* *INDENT-ON* */
1374
1375/* *INDENT-OFF* */
1376VNET_FEATURE_INIT (ip4_full_reass_custom, static) = {
1377 .arc_name = "ip4-unicast",
1378 .node_name = "ip4-full-reassembly-feature",
1379 .runs_before = VNET_FEATURES ("ip4-lookup",
1380 "ipsec4-input-feature"),
1381 .runs_after = 0,
1382};
1383
1384/* *INDENT-ON* */
1385
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001386#ifndef CLIB_MARCH_VARIANT
Klement Sekerafe8371f2020-09-10 12:03:54 +00001387uword
1388ip4_full_reass_custom_register_next_node (uword node_index)
1389{
1390 return vlib_node_add_next (vlib_get_main (),
1391 ip4_full_reass_node_custom.index, node_index);
1392}
1393
Klement Sekera4c533132018-02-22 11:41:12 +01001394always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +00001395ip4_full_reass_get_nbuckets ()
Klement Sekera75e7d132017-09-20 08:26:30 +02001396{
Klement Sekera896c8962019-06-24 11:52:49 +00001397 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001398 u32 nbuckets;
1399 u8 i;
1400
1401 nbuckets = (u32) (rm->max_reass_n / IP4_REASS_HT_LOAD_FACTOR);
1402
1403 for (i = 0; i < 31; i++)
1404 if ((1 << i) >= nbuckets)
1405 break;
1406 nbuckets = 1 << i;
1407
1408 return nbuckets;
1409}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001410#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001411
1412typedef enum
1413{
1414 IP4_EVENT_CONFIG_CHANGED = 1,
Klement Sekera896c8962019-06-24 11:52:49 +00001415} ip4_full_reass_event_t;
Klement Sekera75e7d132017-09-20 08:26:30 +02001416
1417typedef struct
1418{
1419 int failure;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001420 clib_bihash_16_8_t *new_hash;
Klement Sekera75e7d132017-09-20 08:26:30 +02001421} ip4_rehash_cb_ctx;
1422
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001423#ifndef CLIB_MARCH_VARIANT
Neale Rannsf50bac12019-12-06 05:53:17 +00001424static int
Klement Sekera8dcfed52018-06-28 11:16:15 +02001425ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
Klement Sekera75e7d132017-09-20 08:26:30 +02001426{
1427 ip4_rehash_cb_ctx *ctx = _ctx;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001428 if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
Klement Sekera75e7d132017-09-20 08:26:30 +02001429 {
1430 ctx->failure = 1;
1431 }
Neale Rannsf50bac12019-12-06 05:53:17 +00001432 return (BIHASH_WALK_CONTINUE);
Klement Sekera75e7d132017-09-20 08:26:30 +02001433}
1434
Klement Sekera4c533132018-02-22 11:41:12 +01001435static void
Klement Sekera896c8962019-06-24 11:52:49 +00001436ip4_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1437 u32 max_reassembly_length,
1438 u32 expire_walk_interval_ms)
Klement Sekera4c533132018-02-22 11:41:12 +01001439{
Klement Sekera896c8962019-06-24 11:52:49 +00001440 ip4_full_reass_main.timeout_ms = timeout_ms;
1441 ip4_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1442 ip4_full_reass_main.max_reass_n = max_reassemblies;
1443 ip4_full_reass_main.max_reass_len = max_reassembly_length;
1444 ip4_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
Klement Sekera4c533132018-02-22 11:41:12 +01001445}
1446
Klement Sekera75e7d132017-09-20 08:26:30 +02001447vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001448ip4_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1449 u32 max_reassembly_length, u32 expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001450{
Klement Sekera896c8962019-06-24 11:52:49 +00001451 u32 old_nbuckets = ip4_full_reass_get_nbuckets ();
1452 ip4_full_reass_set_params (timeout_ms, max_reassemblies,
1453 max_reassembly_length, expire_walk_interval_ms);
1454 vlib_process_signal_event (ip4_full_reass_main.vlib_main,
1455 ip4_full_reass_main.ip4_full_reass_expire_node_idx,
Klement Sekera75e7d132017-09-20 08:26:30 +02001456 IP4_EVENT_CONFIG_CHANGED, 0);
Klement Sekera896c8962019-06-24 11:52:49 +00001457 u32 new_nbuckets = ip4_full_reass_get_nbuckets ();
1458 if (ip4_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
Klement Sekera75e7d132017-09-20 08:26:30 +02001459 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001460 clib_bihash_16_8_t new_hash;
Dave Barachb7b92992018-10-17 10:38:51 -04001461 clib_memset (&new_hash, 0, sizeof (new_hash));
Klement Sekera75e7d132017-09-20 08:26:30 +02001462 ip4_rehash_cb_ctx ctx;
1463 ctx.failure = 0;
1464 ctx.new_hash = &new_hash;
Klement Sekera896c8962019-06-24 11:52:49 +00001465 clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
Klement Sekera75e7d132017-09-20 08:26:30 +02001466 new_nbuckets * 1024);
Klement Sekera896c8962019-06-24 11:52:49 +00001467 clib_bihash_foreach_key_value_pair_16_8 (&ip4_full_reass_main.hash,
Klement Sekera75e7d132017-09-20 08:26:30 +02001468 ip4_rehash_cb, &ctx);
1469 if (ctx.failure)
1470 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001471 clib_bihash_free_16_8 (&new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001472 return -1;
1473 }
1474 else
1475 {
Klement Sekera896c8962019-06-24 11:52:49 +00001476 clib_bihash_free_16_8 (&ip4_full_reass_main.hash);
1477 clib_memcpy_fast (&ip4_full_reass_main.hash, &new_hash,
1478 sizeof (ip4_full_reass_main.hash));
1479 clib_bihash_copied (&ip4_full_reass_main.hash, &new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001480 }
1481 }
1482 return 0;
1483}
1484
1485vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001486ip4_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1487 u32 * max_reassembly_length,
1488 u32 * expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001489{
Klement Sekera896c8962019-06-24 11:52:49 +00001490 *timeout_ms = ip4_full_reass_main.timeout_ms;
1491 *max_reassemblies = ip4_full_reass_main.max_reass_n;
1492 *max_reassembly_length = ip4_full_reass_main.max_reass_len;
1493 *expire_walk_interval_ms = ip4_full_reass_main.expire_walk_interval_ms;
Klement Sekera75e7d132017-09-20 08:26:30 +02001494 return 0;
1495}
1496
Klement Sekera4c533132018-02-22 11:41:12 +01001497static clib_error_t *
Klement Sekera896c8962019-06-24 11:52:49 +00001498ip4_full_reass_init_function (vlib_main_t * vm)
Klement Sekera75e7d132017-09-20 08:26:30 +02001499{
Klement Sekera896c8962019-06-24 11:52:49 +00001500 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001501 clib_error_t *error = 0;
1502 u32 nbuckets;
Dave Barach1403fcd2018-02-05 09:45:43 -05001503 vlib_node_t *node;
Klement Sekera75e7d132017-09-20 08:26:30 +02001504
1505 rm->vlib_main = vm;
Klement Sekera75e7d132017-09-20 08:26:30 +02001506
Juraj Slobodacd806922018-10-10 10:15:54 +02001507 vec_validate (rm->per_thread_data, vlib_num_workers ());
Klement Sekera896c8962019-06-24 11:52:49 +00001508 ip4_full_reass_per_thread_t *rt;
Klement Sekera4c533132018-02-22 11:41:12 +01001509 vec_foreach (rt, rm->per_thread_data)
1510 {
1511 clib_spinlock_init (&rt->lock);
1512 pool_alloc (rt->pool, rm->max_reass_n);
1513 }
Dave Barach1403fcd2018-02-05 09:45:43 -05001514
Klement Sekera896c8962019-06-24 11:52:49 +00001515 node = vlib_get_node_by_name (vm, (u8 *) "ip4-full-reassembly-expire-walk");
Dave Barach1403fcd2018-02-05 09:45:43 -05001516 ASSERT (node);
Klement Sekera896c8962019-06-24 11:52:49 +00001517 rm->ip4_full_reass_expire_node_idx = node->index;
Dave Barach1403fcd2018-02-05 09:45:43 -05001518
Klement Sekera896c8962019-06-24 11:52:49 +00001519 ip4_full_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
1520 IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
1521 IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
1522 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
Klement Sekera3ecc2212018-03-27 10:34:43 +02001523
Klement Sekera896c8962019-06-24 11:52:49 +00001524 nbuckets = ip4_full_reass_get_nbuckets ();
1525 clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
Klement Sekera75e7d132017-09-20 08:26:30 +02001526
Dave Barach1403fcd2018-02-05 09:45:43 -05001527 node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
Klement Sekera75e7d132017-09-20 08:26:30 +02001528 ASSERT (node);
1529 rm->ip4_drop_idx = node->index;
Klement Sekera4c533132018-02-22 11:41:12 +01001530
Klement Sekera896c8962019-06-24 11:52:49 +00001531 rm->fq_index = vlib_frame_queue_main_init (ip4_full_reass_node.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001532 rm->fq_feature_index =
Klement Sekera896c8962019-06-24 11:52:49 +00001533 vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
Klement Sekerafe8371f2020-09-10 12:03:54 +00001534 rm->fq_custom_index =
1535 vlib_frame_queue_main_init (ip4_full_reass_node_custom.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001536
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001537 rm->feature_use_refcount_per_intf = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +02001538 return error;
1539}
1540
Klement Sekera896c8962019-06-24 11:52:49 +00001541VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001542#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001543
1544static uword
Klement Sekera896c8962019-06-24 11:52:49 +00001545ip4_full_reass_walk_expired (vlib_main_t * vm,
1546 vlib_node_runtime_t * node, vlib_frame_t * f)
Klement Sekera75e7d132017-09-20 08:26:30 +02001547{
Klement Sekera896c8962019-06-24 11:52:49 +00001548 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001549 uword event_type, *event_data = 0;
1550
1551 while (true)
1552 {
1553 vlib_process_wait_for_event_or_clock (vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001554 (f64)
1555 rm->expire_walk_interval_ms /
1556 (f64) MSEC_PER_SEC);
Klement Sekera75e7d132017-09-20 08:26:30 +02001557 event_type = vlib_process_get_events (vm, &event_data);
1558
1559 switch (event_type)
1560 {
1561 case ~0: /* no events => timeout */
1562 /* nothing to do here */
1563 break;
1564 case IP4_EVENT_CONFIG_CHANGED:
1565 break;
1566 default:
1567 clib_warning ("BUG: event type 0x%wx", event_type);
1568 break;
1569 }
1570 f64 now = vlib_time_now (vm);
1571
Klement Sekera896c8962019-06-24 11:52:49 +00001572 ip4_full_reass_t *reass;
Klement Sekera75e7d132017-09-20 08:26:30 +02001573 int *pool_indexes_to_free = NULL;
1574
Klement Sekera4c533132018-02-22 11:41:12 +01001575 uword thread_index = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +02001576 int index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001577 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001578 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1579 {
Klement Sekera896c8962019-06-24 11:52:49 +00001580 ip4_full_reass_per_thread_t *rt =
1581 &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001582 clib_spinlock_lock (&rt->lock);
1583
1584 vec_reset_length (pool_indexes_to_free);
1585 /* *INDENT-OFF* */
Damjan Marionb2c31b62020-12-13 21:47:40 +01001586 pool_foreach_index (index, rt->pool) {
Klement Sekera4c533132018-02-22 11:41:12 +01001587 reass = pool_elt_at_index (rt->pool, index);
1588 if (now > reass->last_heard + rm->timeout)
1589 {
1590 vec_add1 (pool_indexes_to_free, index);
1591 }
Damjan Marionb2c31b62020-12-13 21:47:40 +01001592 }
Klement Sekera4c533132018-02-22 11:41:12 +01001593 /* *INDENT-ON* */
1594 int *i;
1595 /* *INDENT-OFF* */
1596 vec_foreach (i, pool_indexes_to_free)
1597 {
Klement Sekera896c8962019-06-24 11:52:49 +00001598 ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1599 ip4_full_reass_drop_all (vm, node, rm, reass);
1600 ip4_full_reass_free (rm, rt, reass);
Klement Sekera4c533132018-02-22 11:41:12 +01001601 }
1602 /* *INDENT-ON* */
1603
1604 clib_spinlock_unlock (&rt->lock);
1605 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001606
Klement Sekera75e7d132017-09-20 08:26:30 +02001607 vec_free (pool_indexes_to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +02001608 if (event_data)
1609 {
1610 _vec_len (event_data) = 0;
1611 }
1612 }
1613
1614 return 0;
1615}
1616
Klement Sekera75e7d132017-09-20 08:26:30 +02001617/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001618VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
1619 .function = ip4_full_reass_walk_expired,
Klement Sekera75e7d132017-09-20 08:26:30 +02001620 .type = VLIB_NODE_TYPE_PROCESS,
Klement Sekera896c8962019-06-24 11:52:49 +00001621 .name = "ip4-full-reassembly-expire-walk",
1622 .format_trace = format_ip4_full_reass_trace,
1623 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1624 .error_strings = ip4_full_reass_error_strings,
Klement Sekera75e7d132017-09-20 08:26:30 +02001625
1626};
1627/* *INDENT-ON* */
1628
1629static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001630format_ip4_full_reass_key (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +02001631{
Klement Sekera896c8962019-06-24 11:52:49 +00001632 ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *);
1633 s =
1634 format (s,
1635 "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1636 key->xx_id, format_ip4_address, &key->src, format_ip4_address,
1637 &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
Klement Sekera75e7d132017-09-20 08:26:30 +02001638 return s;
1639}
1640
1641static u8 *
1642format_ip4_reass (u8 * s, va_list * args)
1643{
1644 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001645 ip4_full_reass_t *reass = va_arg (*args, ip4_full_reass_t *);
Klement Sekera75e7d132017-09-20 08:26:30 +02001646
Klement Sekera4c533132018-02-22 11:41:12 +01001647 s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
Klement Sekera75e7d132017-09-20 08:26:30 +02001648 "last_packet_octet: %u, trace_op_counter: %u\n",
Klement Sekera896c8962019-06-24 11:52:49 +00001649 reass->id, format_ip4_full_reass_key, &reass->key,
1650 reass->first_bi, reass->data_len,
1651 reass->last_packet_octet, reass->trace_op_counter);
1652
Klement Sekera75e7d132017-09-20 08:26:30 +02001653 u32 bi = reass->first_bi;
1654 u32 counter = 0;
1655 while (~0 != bi)
1656 {
1657 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1658 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera896c8962019-06-24 11:52:49 +00001659 s =
1660 format (s,
1661 " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1662 "fragment[%u, %u]\n", counter, vnb->ip.reass.range_first,
1663 vnb->ip.reass.range_last, bi,
1664 ip4_full_reass_buffer_get_data_offset (b),
1665 ip4_full_reass_buffer_get_data_len (b),
1666 vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
Klement Sekera75e7d132017-09-20 08:26:30 +02001667 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1668 {
1669 bi = b->next_buffer;
1670 }
1671 else
1672 {
1673 bi = ~0;
1674 }
1675 }
1676 return s;
1677}
1678
1679static clib_error_t *
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001680show_ip4_reass (vlib_main_t * vm,
1681 unformat_input_t * input,
Klement Sekera75e7d132017-09-20 08:26:30 +02001682 CLIB_UNUSED (vlib_cli_command_t * lmd))
1683{
Klement Sekera896c8962019-06-24 11:52:49 +00001684 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001685
1686 vlib_cli_output (vm, "---------------------");
1687 vlib_cli_output (vm, "IP4 reassembly status");
1688 vlib_cli_output (vm, "---------------------");
Klement Sekera4c533132018-02-22 11:41:12 +01001689 bool details = false;
Klement Sekera75e7d132017-09-20 08:26:30 +02001690 if (unformat (input, "details"))
1691 {
Klement Sekera4c533132018-02-22 11:41:12 +01001692 details = true;
1693 }
1694
1695 u32 sum_reass_n = 0;
Klement Sekera896c8962019-06-24 11:52:49 +00001696 ip4_full_reass_t *reass;
Klement Sekera4c533132018-02-22 11:41:12 +01001697 uword thread_index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001698 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001699 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1700 {
Klement Sekera896c8962019-06-24 11:52:49 +00001701 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001702 clib_spinlock_lock (&rt->lock);
1703 if (details)
1704 {
1705 /* *INDENT-OFF* */
Damjan Marionb2c31b62020-12-13 21:47:40 +01001706 pool_foreach (reass, rt->pool) {
Klement Sekera4c533132018-02-22 11:41:12 +01001707 vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
Damjan Marionb2c31b62020-12-13 21:47:40 +01001708 }
Klement Sekera4c533132018-02-22 11:41:12 +01001709 /* *INDENT-ON* */
1710 }
1711 sum_reass_n += rt->reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +01001712 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001713 }
1714 vlib_cli_output (vm, "---------------------");
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001715 vlib_cli_output (vm, "Current full IP4 reassemblies count: %lu\n",
Klement Sekera4c533132018-02-22 11:41:12 +01001716 (long unsigned) sum_reass_n);
Klement Sekera75e7d132017-09-20 08:26:30 +02001717 vlib_cli_output (vm,
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001718 "Maximum configured concurrent full IP4 reassemblies per worker-thread: %lu\n",
Klement Sekera75e7d132017-09-20 08:26:30 +02001719 (long unsigned) rm->max_reass_n);
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001720 vlib_cli_output (vm,
1721 "Maximum configured full IP4 reassembly timeout: %lums\n",
1722 (long unsigned) rm->timeout_ms);
1723 vlib_cli_output (vm,
1724 "Maximum configured full IP4 reassembly expire walk interval: %lums\n",
1725 (long unsigned) rm->expire_walk_interval_ms);
Klement Sekera75e7d132017-09-20 08:26:30 +02001726 return 0;
1727}
1728
1729/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001730VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
1731 .path = "show ip4-full-reassembly",
1732 .short_help = "show ip4-full-reassembly [details]",
Klement Sekera75e7d132017-09-20 08:26:30 +02001733 .function = show_ip4_reass,
1734};
1735/* *INDENT-ON* */
1736
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001737#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001738vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001739ip4_full_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
Klement Sekera4c533132018-02-22 11:41:12 +01001740{
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001741 return vnet_feature_enable_disable ("ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001742 "ip4-full-reassembly-feature",
1743 sw_if_index, enable_disable, 0, 0);
Klement Sekera4c533132018-02-22 11:41:12 +01001744}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001745#endif /* CLIB_MARCH_VARIANT */
Klement Sekera4c533132018-02-22 11:41:12 +01001746
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001747
Klement Sekera896c8962019-06-24 11:52:49 +00001748#define foreach_ip4_full_reass_handoff_error \
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001749_(CONGESTION_DROP, "congestion drop")
1750
1751
1752typedef enum
1753{
Klement Sekera896c8962019-06-24 11:52:49 +00001754#define _(sym,str) IP4_FULL_REASS_HANDOFF_ERROR_##sym,
1755 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001756#undef _
Klement Sekera896c8962019-06-24 11:52:49 +00001757 IP4_FULL_REASS_HANDOFF_N_ERROR,
1758} ip4_full_reass_handoff_error_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001759
Klement Sekera896c8962019-06-24 11:52:49 +00001760static char *ip4_full_reass_handoff_error_strings[] = {
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001761#define _(sym,string) string,
Klement Sekera896c8962019-06-24 11:52:49 +00001762 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001763#undef _
1764};
1765
1766typedef struct
1767{
1768 u32 next_worker_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001769} ip4_full_reass_handoff_trace_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001770
1771static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001772format_ip4_full_reass_handoff_trace (u8 * s, va_list * args)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001773{
1774 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1775 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001776 ip4_full_reass_handoff_trace_t *t =
1777 va_arg (*args, ip4_full_reass_handoff_trace_t *);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001778
1779 s =
Klement Sekera896c8962019-06-24 11:52:49 +00001780 format (s, "ip4-full-reassembly-handoff: next-worker %d",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001781 t->next_worker_index);
1782
1783 return s;
1784}
1785
1786always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001787ip4_full_reass_handoff_node_inline (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001788 vlib_node_runtime_t * node,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001789 vlib_frame_t * frame,
1790 ip4_full_reass_node_type_t type)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001791{
Klement Sekera896c8962019-06-24 11:52:49 +00001792 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001793
1794 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1795 u32 n_enq, n_left_from, *from;
1796 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1797 u32 fq_index;
1798
1799 from = vlib_frame_vector_args (frame);
1800 n_left_from = frame->n_vectors;
1801 vlib_get_buffers (vm, from, bufs, n_left_from);
1802
1803 b = bufs;
1804 ti = thread_indices;
1805
Klement Sekerafe8371f2020-09-10 12:03:54 +00001806 switch (type)
1807 {
1808 case NORMAL:
1809 fq_index = rm->fq_index;
1810 break;
1811 case FEATURE:
1812 fq_index = rm->fq_feature_index;
1813 break;
1814 case CUSTOM:
1815 fq_index = rm->fq_custom_index;
1816 break;
1817 default:
1818 clib_warning ("Unexpected `type' (%d)!", type);
1819 ASSERT (0);
1820 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001821
1822 while (n_left_from > 0)
1823 {
Klement Sekerade34c352019-06-25 11:19:22 +00001824 ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001825
1826 if (PREDICT_FALSE
1827 ((node->flags & VLIB_NODE_FLAG_TRACE)
1828 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1829 {
Klement Sekera896c8962019-06-24 11:52:49 +00001830 ip4_full_reass_handoff_trace_t *t =
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001831 vlib_add_trace (vm, node, b[0], sizeof (*t));
1832 t->next_worker_index = ti[0];
1833 }
1834
1835 n_left_from -= 1;
1836 ti += 1;
1837 b += 1;
1838 }
1839 n_enq =
1840 vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1841 frame->n_vectors, 1);
1842
1843 if (n_enq < frame->n_vectors)
1844 vlib_node_increment_counter (vm, node->node_index,
Klement Sekera896c8962019-06-24 11:52:49 +00001845 IP4_FULL_REASS_HANDOFF_ERROR_CONGESTION_DROP,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001846 frame->n_vectors - n_enq);
1847 return frame->n_vectors;
1848}
1849
Klement Sekera896c8962019-06-24 11:52:49 +00001850VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001851 vlib_node_runtime_t * node,
1852 vlib_frame_t * frame)
1853{
Klement Sekerafe8371f2020-09-10 12:03:54 +00001854 return ip4_full_reass_handoff_node_inline (vm, node, frame, NORMAL);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001855}
1856
1857
1858/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001859VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
1860 .name = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001861 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001862 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1863 .error_strings = ip4_full_reass_handoff_error_strings,
1864 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001865
1866 .n_next_nodes = 1,
1867
1868 .next_nodes = {
1869 [0] = "error-drop",
1870 },
1871};
1872/* *INDENT-ON* */
1873
1874
1875/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001876VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001877 vlib_node_runtime_t *
1878 node,
1879 vlib_frame_t * frame)
1880{
Klement Sekerafe8371f2020-09-10 12:03:54 +00001881 return ip4_full_reass_handoff_node_inline (vm, node, frame, FEATURE);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001882}
1883/* *INDENT-ON* */
1884
1885
1886/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001887VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
1888 .name = "ip4-full-reass-feature-hoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001889 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001890 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1891 .error_strings = ip4_full_reass_handoff_error_strings,
1892 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001893
1894 .n_next_nodes = 1,
1895
1896 .next_nodes = {
1897 [0] = "error-drop",
1898 },
1899};
1900/* *INDENT-ON* */
1901
Klement Sekerafe8371f2020-09-10 12:03:54 +00001902/* *INDENT-OFF* */
1903VLIB_NODE_FN (ip4_full_reass_custom_handoff_node) (vlib_main_t * vm,
1904 vlib_node_runtime_t *
1905 node,
1906 vlib_frame_t * frame)
1907{
1908 return ip4_full_reass_handoff_node_inline (vm, node, frame, CUSTOM);
1909}
1910/* *INDENT-ON* */
1911
1912
1913/* *INDENT-OFF* */
1914VLIB_REGISTER_NODE (ip4_full_reass_custom_handoff_node) = {
1915 .name = "ip4-full-reass-custom-hoff",
1916 .vector_size = sizeof (u32),
1917 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1918 .error_strings = ip4_full_reass_handoff_error_strings,
1919 .format_trace = format_ip4_full_reass_handoff_trace,
1920
1921 .n_next_nodes = 1,
1922
1923 .next_nodes = {
1924 [0] = "error-drop",
1925 },
1926};
1927/* *INDENT-ON* */
1928
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001929#ifndef CLIB_MARCH_VARIANT
1930int
1931ip4_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
1932{
1933 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1934 vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
1935 if (is_enable)
1936 {
1937 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1938 {
1939 ++rm->feature_use_refcount_per_intf[sw_if_index];
1940 return vnet_feature_enable_disable ("ip4-unicast",
1941 "ip4-full-reassembly-feature",
1942 sw_if_index, 1, 0, 0);
1943 }
1944 ++rm->feature_use_refcount_per_intf[sw_if_index];
1945 }
1946 else
1947 {
1948 --rm->feature_use_refcount_per_intf[sw_if_index];
1949 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1950 return vnet_feature_enable_disable ("ip4-unicast",
1951 "ip4-full-reassembly-feature",
1952 sw_if_index, 0, 0, 0);
1953 }
1954 return -1;
1955}
1956#endif
1957
Klement Sekera75e7d132017-09-20 08:26:30 +02001958/*
1959 * fd.io coding-style-patch-verification: ON
1960 *
1961 * Local Variables:
1962 * eval: (c-set-style "gnu")
1963 * End:
1964 */