blob: c65f349e5b04cf3ddf0c9ec9c6e3df8235d54aa1 [file] [log] [blame]
Klement Sekera75e7d132017-09-20 08:26:30 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @file
Klement Sekera896c8962019-06-24 11:52:49 +000018 * @brief IPv4 Full Reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020019 *
Klement Sekera896c8962019-06-24 11:52:49 +000020 * This file contains the source code for IPv4 full reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020021 */
22
23#include <vppinfra/vec.h>
24#include <vnet/vnet.h>
25#include <vnet/ip/ip.h>
Neale Rannse22a7042022-08-09 03:03:29 +000026#include <vnet/ip/ip.api_enum.h>
Klement Sekera896c8962019-06-24 11:52:49 +000027#include <vppinfra/fifo.h>
Klement Sekera8dcfed52018-06-28 11:16:15 +020028#include <vppinfra/bihash_16_8.h>
Klement Sekera896c8962019-06-24 11:52:49 +000029#include <vnet/ip/reass/ip4_full_reass.h>
Klement Sekera630ab582019-07-19 09:14:19 +000030#include <stddef.h>
Klement Sekera75e7d132017-09-20 08:26:30 +020031
32#define MSEC_PER_SEC 1000
Vijayabhaskar Katamreddy8b874fc2022-05-13 13:07:19 +000033#define IP4_REASS_TIMEOUT_DEFAULT_MS 200
34
35/* As there are only 1024 reass context per thread, either the DDOS attacks
36 * or fractions of real timeouts, would consume these contexts quickly and
37 * running out context space and unable to perform reassembly */
38#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 50 // 50 ms default
Klement Sekera4c533132018-02-22 11:41:12 +010039#define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +000040#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
Klement Sekera75e7d132017-09-20 08:26:30 +020041#define IP4_REASS_HT_LOAD_FACTOR (0.75)
42
43#define IP4_REASS_DEBUG_BUFFERS 0
44#if IP4_REASS_DEBUG_BUFFERS
45#define IP4_REASS_DEBUG_BUFFER(bi, what) \
46 do \
47 { \
48 u32 _bi = bi; \
49 printf (#what "buffer %u", _bi); \
50 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi); \
51 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT) \
52 { \
53 _bi = _b->next_buffer; \
54 printf ("[%u]", _bi); \
55 _b = vlib_get_buffer (vm, _bi); \
56 } \
57 printf ("\n"); \
58 fflush (stdout); \
59 } \
60 while (0)
61#else
62#define IP4_REASS_DEBUG_BUFFER(...)
63#endif
64
Klement Sekerad0f70a32018-12-14 17:24:13 +010065typedef enum
66{
67 IP4_REASS_RC_OK,
Klement Sekera3a343d42019-05-16 14:35:46 +020068 IP4_REASS_RC_TOO_MANY_FRAGMENTS,
Klement Sekerad0f70a32018-12-14 17:24:13 +010069 IP4_REASS_RC_INTERNAL_ERROR,
Klement Sekeraf883f6a2019-02-13 11:01:32 +010070 IP4_REASS_RC_NO_BUF,
Klement Sekera630ab582019-07-19 09:14:19 +000071 IP4_REASS_RC_HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +000072} ip4_full_reass_rc_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020073
74typedef struct
75{
Damjan Marion121a16a2022-09-14 16:00:09 +020076 struct
Klement Sekera75e7d132017-09-20 08:26:30 +020077 {
Damjan Marion121a16a2022-09-14 16:00:09 +020078 u16 frag_id;
79 u8 proto;
80 u8 unused;
81 u32 fib_index;
82 ip4_address_t src;
83 ip4_address_t dst;
Klement Sekera75e7d132017-09-20 08:26:30 +020084 };
Klement Sekera896c8962019-06-24 11:52:49 +000085} ip4_full_reass_key_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020086
Damjan Marion121a16a2022-09-14 16:00:09 +020087STATIC_ASSERT_SIZEOF (ip4_full_reass_key_t, 16);
88
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080089typedef union
90{
91 struct
92 {
93 u32 reass_index;
Klement Sekera630ab582019-07-19 09:14:19 +000094 u32 memory_owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080095 };
96 u64 as_u64;
Klement Sekera896c8962019-06-24 11:52:49 +000097} ip4_full_reass_val_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080098
99typedef union
100{
101 struct
102 {
Klement Sekera896c8962019-06-24 11:52:49 +0000103 ip4_full_reass_key_t k;
104 ip4_full_reass_val_t v;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800105 };
106 clib_bihash_kv_16_8_t kv;
Klement Sekera896c8962019-06-24 11:52:49 +0000107} ip4_full_reass_kv_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800108
Klement Sekera75e7d132017-09-20 08:26:30 +0200109always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +0000110ip4_full_reass_buffer_get_data_offset (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200111{
112 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100113 return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
Klement Sekera75e7d132017-09-20 08:26:30 +0200114}
115
116always_inline u16
Klement Sekera896c8962019-06-24 11:52:49 +0000117ip4_full_reass_buffer_get_data_len (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200118{
119 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100120 return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
Klement Sekera896c8962019-06-24 11:52:49 +0000121 (vnb->ip.reass.fragment_first +
122 ip4_full_reass_buffer_get_data_offset (b)) + 1;
Klement Sekera75e7d132017-09-20 08:26:30 +0200123}
124
125typedef struct
126{
127 // hash table key
Klement Sekera896c8962019-06-24 11:52:49 +0000128 ip4_full_reass_key_t key;
Klement Sekera75e7d132017-09-20 08:26:30 +0200129 // time when last packet was received
130 f64 last_heard;
131 // internal id of this reassembly
Klement Sekera4c533132018-02-22 11:41:12 +0100132 u64 id;
Klement Sekera75e7d132017-09-20 08:26:30 +0200133 // buffer index of first buffer in this reassembly context
134 u32 first_bi;
135 // last octet of packet, ~0 until fragment without more_fragments arrives
136 u32 last_packet_octet;
137 // length of data collected so far
138 u32 data_len;
139 // trace operation counter
140 u32 trace_op_counter;
Klement Sekera4c533132018-02-22 11:41:12 +0100141 // next index - used by non-feature node
Klement Sekera21aa8f12019-05-20 12:27:33 +0200142 u32 next_index;
Klement Sekerae8498652019-06-17 12:23:15 +0000143 // error next index - used by custom apps (~0 if not used)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200144 u32 error_next_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100145 // minimum fragment length for this reassembly - used to estimate MTU
146 u16 min_fragment_length;
Klement Sekera3a343d42019-05-16 14:35:46 +0200147 // number of fragments in this reassembly
148 u32 fragments_n;
Klement Sekera630ab582019-07-19 09:14:19 +0000149 // thread owning memory for this context (whose pool contains this ctx)
150 u32 memory_owner_thread_index;
151 // thread which received fragment with offset 0 and which sends out the
152 // completed reassembly
153 u32 sendout_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +0000154} ip4_full_reass_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200155
156typedef struct
157{
Klement Sekera896c8962019-06-24 11:52:49 +0000158 ip4_full_reass_t *pool;
Klement Sekera4c533132018-02-22 11:41:12 +0100159 u32 reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +0100160 u32 id_counter;
Vijayabhaskar Katamreddy8b874fc2022-05-13 13:07:19 +0000161 // for pacing the main thread timeouts
162 u32 last_id;
Klement Sekera4c533132018-02-22 11:41:12 +0100163 clib_spinlock_t lock;
Klement Sekera896c8962019-06-24 11:52:49 +0000164} ip4_full_reass_per_thread_t;
Klement Sekera4c533132018-02-22 11:41:12 +0100165
166typedef struct
167{
Klement Sekera75e7d132017-09-20 08:26:30 +0200168 // IPv4 config
169 u32 timeout_ms;
170 f64 timeout;
171 u32 expire_walk_interval_ms;
Klement Sekera3a343d42019-05-16 14:35:46 +0200172 // maximum number of fragments in one reassembly
173 u32 max_reass_len;
174 // maximum number of reassemblies
Klement Sekera75e7d132017-09-20 08:26:30 +0200175 u32 max_reass_n;
176
177 // IPv4 runtime
Klement Sekera8dcfed52018-06-28 11:16:15 +0200178 clib_bihash_16_8_t hash;
Klement Sekera4c533132018-02-22 11:41:12 +0100179 // per-thread data
Klement Sekera896c8962019-06-24 11:52:49 +0000180 ip4_full_reass_per_thread_t *per_thread_data;
Klement Sekera75e7d132017-09-20 08:26:30 +0200181
182 // convenience
183 vlib_main_t *vlib_main;
Klement Sekera75e7d132017-09-20 08:26:30 +0200184
Klement Sekera896c8962019-06-24 11:52:49 +0000185 u32 ip4_full_reass_expire_node_idx;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800186
187 /** Worker handoff */
188 u32 fq_index;
Klement Sekera01c1fa42021-12-14 18:25:11 +0000189 u32 fq_local_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800190 u32 fq_feature_index;
Klement Sekerafe8371f2020-09-10 12:03:54 +0000191 u32 fq_custom_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200192
Klement Sekera7b2e9fb2019-10-01 13:00:22 +0000193 // reference count for enabling/disabling feature - per interface
194 u32 *feature_use_refcount_per_intf;
Klement Sekera01c1fa42021-12-14 18:25:11 +0000195
196 // whether local fragmented packets are reassembled or not
197 int is_local_reass_enabled;
Klement Sekera896c8962019-06-24 11:52:49 +0000198} ip4_full_reass_main_t;
199
200extern ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700201
202#ifndef CLIB_MARCH_VARIANT
Klement Sekera896c8962019-06-24 11:52:49 +0000203ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700204#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +0200205
206typedef enum
207{
Klement Sekera896c8962019-06-24 11:52:49 +0000208 IP4_FULL_REASS_NEXT_INPUT,
209 IP4_FULL_REASS_NEXT_DROP,
210 IP4_FULL_REASS_NEXT_HANDOFF,
211 IP4_FULL_REASS_N_NEXT,
212} ip4_full_reass_next_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200213
214typedef enum
215{
Klement Sekerafe8371f2020-09-10 12:03:54 +0000216 NORMAL,
217 FEATURE,
218 CUSTOM
219} ip4_full_reass_node_type_t;
220
221typedef enum
222{
Klement Sekera75e7d132017-09-20 08:26:30 +0200223 RANGE_NEW,
224 RANGE_SHRINK,
225 RANGE_DISCARD,
226 RANGE_OVERLAP,
227 FINALIZE,
Klement Sekera630ab582019-07-19 09:14:19 +0000228 HANDOFF,
Klement Sekera01c1fa42021-12-14 18:25:11 +0000229 PASSTHROUGH,
Klement Sekera896c8962019-06-24 11:52:49 +0000230} ip4_full_reass_trace_operation_e;
Klement Sekera75e7d132017-09-20 08:26:30 +0200231
232typedef struct
233{
234 u16 range_first;
235 u16 range_last;
236 u32 range_bi;
237 i32 data_offset;
238 u32 data_len;
239 u32 first_bi;
Klement Sekera896c8962019-06-24 11:52:49 +0000240} ip4_full_reass_range_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200241
242typedef struct
243{
Klement Sekera896c8962019-06-24 11:52:49 +0000244 ip4_full_reass_trace_operation_e action;
Klement Sekera75e7d132017-09-20 08:26:30 +0200245 u32 reass_id;
Klement Sekera896c8962019-06-24 11:52:49 +0000246 ip4_full_reass_range_trace_t trace_range;
Klement Sekera75e7d132017-09-20 08:26:30 +0200247 u32 size_diff;
248 u32 op_id;
Klement Sekera630ab582019-07-19 09:14:19 +0000249 u32 thread_id;
250 u32 thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200251 u32 fragment_first;
252 u32 fragment_last;
253 u32 total_data_len;
Klement Sekera8563cb32019-10-10 17:03:57 +0000254 bool is_after_handoff;
255 ip4_header_t ip4_header;
Klement Sekera896c8962019-06-24 11:52:49 +0000256} ip4_full_reass_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200257
Klement Sekera896c8962019-06-24 11:52:49 +0000258extern vlib_node_registration_t ip4_full_reass_node;
259extern vlib_node_registration_t ip4_full_reass_node_feature;
Klement Sekerafe8371f2020-09-10 12:03:54 +0000260extern vlib_node_registration_t ip4_full_reass_node_custom;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700261
Klement Sekera4c533132018-02-22 11:41:12 +0100262static void
Klement Sekera896c8962019-06-24 11:52:49 +0000263ip4_full_reass_trace_details (vlib_main_t * vm, u32 bi,
264 ip4_full_reass_range_trace_t * trace)
Klement Sekera75e7d132017-09-20 08:26:30 +0200265{
266 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
267 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
268 trace->range_first = vnb->ip.reass.range_first;
269 trace->range_last = vnb->ip.reass.range_last;
Klement Sekera896c8962019-06-24 11:52:49 +0000270 trace->data_offset = ip4_full_reass_buffer_get_data_offset (b);
271 trace->data_len = ip4_full_reass_buffer_get_data_len (b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200272 trace->range_bi = bi;
273}
274
Klement Sekera4c533132018-02-22 11:41:12 +0100275static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000276format_ip4_full_reass_range_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200277{
Klement Sekera896c8962019-06-24 11:52:49 +0000278 ip4_full_reass_range_trace_t *trace =
279 va_arg (*args, ip4_full_reass_range_trace_t *);
280 s =
281 format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
282 trace->range_last, trace->data_offset, trace->data_len,
283 trace->range_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200284 return s;
285}
286
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700287static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000288format_ip4_full_reass_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200289{
290 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
291 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +0000292 ip4_full_reass_trace_t *t = va_arg (*args, ip4_full_reass_trace_t *);
Klement Sekera630ab582019-07-19 09:14:19 +0000293 u32 indent = 0;
294 if (~0 != t->reass_id)
295 {
Klement Sekera8563cb32019-10-10 17:03:57 +0000296 if (t->is_after_handoff)
297 {
298 s =
299 format (s, "%U\n", format_ip4_header, &t->ip4_header,
300 sizeof (t->ip4_header));
301 indent = 2;
302 }
303 s =
304 format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
305 t->reass_id, t->op_id);
Klement Sekera630ab582019-07-19 09:14:19 +0000306 indent = format_get_indent (s);
307 s =
308 format (s,
309 "first bi: %u, data len: %u, ip/fragment[%u, %u]",
310 t->trace_range.first_bi, t->total_data_len, t->fragment_first,
311 t->fragment_last);
312 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200313 switch (t->action)
314 {
315 case RANGE_SHRINK:
316 s = format (s, "\n%Ushrink %U by %u", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000317 format_ip4_full_reass_range_trace, &t->trace_range,
Klement Sekera75e7d132017-09-20 08:26:30 +0200318 t->size_diff);
319 break;
320 case RANGE_DISCARD:
321 s = format (s, "\n%Udiscard %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000322 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200323 break;
324 case RANGE_NEW:
325 s = format (s, "\n%Unew %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000326 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200327 break;
328 case RANGE_OVERLAP:
329 s = format (s, "\n%Uoverlapping/ignored %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000330 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200331 break;
332 case FINALIZE:
333 s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
334 break;
Klement Sekera630ab582019-07-19 09:14:19 +0000335 case HANDOFF:
336 s =
337 format (s, "handoff from thread #%u to thread #%u", t->thread_id,
338 t->thread_id_to);
339 break;
Klement Sekera01c1fa42021-12-14 18:25:11 +0000340 case PASSTHROUGH:
341 s = format (s, "passthrough - not a fragment");
342 break;
Klement Sekera75e7d132017-09-20 08:26:30 +0200343 }
344 return s;
345}
346
Klement Sekera4c533132018-02-22 11:41:12 +0100347static void
Klement Sekera896c8962019-06-24 11:52:49 +0000348ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekera896c8962019-06-24 11:52:49 +0000349 ip4_full_reass_t * reass, u32 bi,
350 ip4_full_reass_trace_operation_e action,
351 u32 size_diff, u32 thread_id_to)
Klement Sekera75e7d132017-09-20 08:26:30 +0200352{
353 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
354 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera53be16d2020-12-15 21:47:36 +0100355 if (pool_is_free_index
356 (vm->trace_main.trace_buffer_pool, vlib_buffer_get_trace_index (b)))
357 {
358 // this buffer's trace is gone
359 b->flags &= ~VLIB_BUFFER_IS_TRACED;
360 return;
361 }
Klement Sekera8563cb32019-10-10 17:03:57 +0000362 bool is_after_handoff = false;
363 if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
364 {
365 is_after_handoff = true;
366 }
Klement Sekera896c8962019-06-24 11:52:49 +0000367 ip4_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
Klement Sekera8563cb32019-10-10 17:03:57 +0000368 t->is_after_handoff = is_after_handoff;
369 if (t->is_after_handoff)
370 {
371 clib_memcpy (&t->ip4_header, vlib_buffer_get_current (b),
372 clib_min (sizeof (t->ip4_header), b->current_length));
373 }
Klement Sekera896c8962019-06-24 11:52:49 +0000374 if (reass)
375 {
376 t->reass_id = reass->id;
377 t->op_id = reass->trace_op_counter;
378 t->trace_range.first_bi = reass->first_bi;
379 t->total_data_len = reass->data_len;
380 ++reass->trace_op_counter;
381 }
382 else
383 {
384 t->reass_id = ~0;
385 t->op_id = 0;
386 t->trace_range.first_bi = 0;
387 t->total_data_len = 0;
388 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200389 t->action = action;
Klement Sekera896c8962019-06-24 11:52:49 +0000390 ip4_full_reass_trace_details (vm, bi, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200391 t->size_diff = size_diff;
Klement Sekera630ab582019-07-19 09:14:19 +0000392 t->thread_id = vm->thread_index;
393 t->thread_id_to = thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200394 t->fragment_first = vnb->ip.reass.fragment_first;
395 t->fragment_last = vnb->ip.reass.fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200396#if 0
397 static u8 *s = NULL;
Klement Sekera896c8962019-06-24 11:52:49 +0000398 s = format (s, "%U", format_ip4_full_reass_trace, NULL, NULL, t);
Klement Sekera75e7d132017-09-20 08:26:30 +0200399 printf ("%.*s\n", vec_len (s), s);
400 fflush (stdout);
401 vec_reset_length (s);
402#endif
403}
404
Klement Sekera630ab582019-07-19 09:14:19 +0000405always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000406ip4_full_reass_free_ctx (ip4_full_reass_per_thread_t * rt,
407 ip4_full_reass_t * reass)
Klement Sekera630ab582019-07-19 09:14:19 +0000408{
409 pool_put (rt->pool, reass);
410 --rt->reass_n;
411}
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800412
Klement Sekera4c533132018-02-22 11:41:12 +0100413always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000414ip4_full_reass_free (ip4_full_reass_main_t * rm,
415 ip4_full_reass_per_thread_t * rt,
416 ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200417{
Damjan Marion121a16a2022-09-14 16:00:09 +0200418 clib_bihash_kv_16_8_t kv = {};
419 clib_memcpy_fast (&kv, &reass->key, sizeof (kv.key));
Klement Sekera8dcfed52018-06-28 11:16:15 +0200420 clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
Klement Sekera896c8962019-06-24 11:52:49 +0000421 return ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200422}
423
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000424/* n_left_to_next, and to_next are taken as input params, as this function
425 * could be called from a graphnode, where its managing local copy of these
426 * variables, and ignoring those and still trying to enqueue the buffers
427 * with local variables would cause either buffer leak or corruption */
Klement Sekera4c533132018-02-22 11:41:12 +0100428always_inline void
Klement Sekera42cec0e2021-08-02 16:14:15 +0200429ip4_full_reass_drop_all (vlib_main_t *vm, vlib_node_runtime_t *node,
Damjan Marion42ed8362022-09-14 17:40:24 +0200430 ip4_full_reass_t *reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200431{
432 u32 range_bi = reass->first_bi;
433 vlib_buffer_t *range_b;
434 vnet_buffer_opaque_t *range_vnb;
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100435 u32 *to_free = NULL;
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000436
Klement Sekera75e7d132017-09-20 08:26:30 +0200437 while (~0 != range_bi)
438 {
439 range_b = vlib_get_buffer (vm, range_bi);
440 range_vnb = vnet_buffer (range_b);
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000441
442 if (~0 != range_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200443 {
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000444 vec_add1 (to_free, range_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200445 }
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000446
Klement Sekera75e7d132017-09-20 08:26:30 +0200447 range_bi = range_vnb->ip.reass.next_range_bi;
448 }
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000449
Klement Sekera21aa8f12019-05-20 12:27:33 +0200450 /* send to next_error_index */
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000451 if (~0 != reass->error_next_index &&
452 reass->error_next_index < node->n_next_nodes)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200453 {
Damjan Marion42ed8362022-09-14 17:40:24 +0200454 u32 n_free = vec_len (to_free);
Klement Sekera21aa8f12019-05-20 12:27:33 +0200455
Vijayabhaskar Katamreddy14a74422022-05-18 14:31:03 +0000456 /* record number of packets sent to custom app */
457 vlib_node_increment_counter (vm, node->node_index,
Damjan Marion42ed8362022-09-14 17:40:24 +0200458 IP4_ERROR_REASS_TO_CUSTOM_APP, n_free);
Vijayabhaskar Katamreddy14a74422022-05-18 14:31:03 +0000459
Damjan Marion42ed8362022-09-14 17:40:24 +0200460 if (node->flags & VLIB_NODE_FLAG_TRACE)
461 for (u32 i = 0; i < n_free; i++)
462 {
463 vlib_buffer_t *b = vlib_get_buffer (vm, to_free[i]);
464 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
465 ip4_full_reass_add_trace (vm, node, reass, to_free[i],
466 RANGE_DISCARD, 0, ~0);
467 }
Klement Sekera21aa8f12019-05-20 12:27:33 +0200468
Damjan Marion42ed8362022-09-14 17:40:24 +0200469 vlib_buffer_enqueue_to_single_next (vm, node, to_free,
470 reass->error_next_index, n_free);
Klement Sekera21aa8f12019-05-20 12:27:33 +0200471 }
472 else
473 {
474 vlib_buffer_free (vm, to_free, vec_len (to_free));
475 }
barryxie01e94db2020-12-04 19:21:23 +0800476 vec_free (to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +0200477}
478
Klement Sekera896c8962019-06-24 11:52:49 +0000479always_inline void
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000480sanitize_reass_buffers_add_missing (vlib_main_t *vm, ip4_full_reass_t *reass,
481 u32 *bi0)
482{
483 u32 range_bi = reass->first_bi;
484 vlib_buffer_t *range_b;
485 vnet_buffer_opaque_t *range_vnb;
486
487 while (~0 != range_bi)
488 {
489 range_b = vlib_get_buffer (vm, range_bi);
490 range_vnb = vnet_buffer (range_b);
491 u32 bi = range_bi;
492 if (~0 != bi)
493 {
494 if (bi == *bi0)
495 *bi0 = ~0;
496 if (range_b->flags & VLIB_BUFFER_NEXT_PRESENT)
497 {
498 u32 _bi = bi;
499 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi);
500 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT)
501 {
502 if (_b->next_buffer != range_vnb->ip.reass.next_range_bi)
503 {
504 _bi = _b->next_buffer;
505 _b = vlib_get_buffer (vm, _bi);
506 }
507 else
508 {
509 _b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
510 break;
511 }
512 }
513 }
514 range_bi = range_vnb->ip.reass.next_range_bi;
515 }
516 }
517 if (*bi0 != ~0)
518 {
519 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
520 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
521 if (~0 != reass->first_bi)
522 {
523 fvnb->ip.reass.next_range_bi = reass->first_bi;
524 reass->first_bi = *bi0;
525 }
526 else
527 {
528 reass->first_bi = *bi0;
529 fvnb->ip.reass.next_range_bi = ~0;
530 }
531 *bi0 = ~0;
532 }
533}
534
535always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000536ip4_full_reass_init (ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200537{
Klement Sekera896c8962019-06-24 11:52:49 +0000538 reass->first_bi = ~0;
539 reass->last_packet_octet = ~0;
540 reass->data_len = 0;
541 reass->next_index = ~0;
542 reass->error_next_index = ~0;
543}
544
545always_inline ip4_full_reass_t *
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000546ip4_full_reass_find_or_create (vlib_main_t *vm, vlib_node_runtime_t *node,
547 ip4_full_reass_main_t *rm,
548 ip4_full_reass_per_thread_t *rt,
Damjan Marion42ed8362022-09-14 17:40:24 +0200549 ip4_full_reass_kv_t *kv, u8 *do_handoff)
Klement Sekera896c8962019-06-24 11:52:49 +0000550{
551 ip4_full_reass_t *reass;
Klement Sekera630ab582019-07-19 09:14:19 +0000552 f64 now;
Klement Sekera75e7d132017-09-20 08:26:30 +0200553
Klement Sekera630ab582019-07-19 09:14:19 +0000554again:
555
556 reass = NULL;
557 now = vlib_time_now (vm);
Klement Sekerac99c0252019-12-18 12:17:06 +0000558 if (!clib_bihash_search_16_8 (&rm->hash, &kv->kv, &kv->kv))
Klement Sekera75e7d132017-09-20 08:26:30 +0200559 {
Gao Feng9165e032020-04-26 09:57:18 +0800560 if (vm->thread_index != kv->v.memory_owner_thread_index)
561 {
562 *do_handoff = 1;
563 return NULL;
564 }
Klement Sekera630ab582019-07-19 09:14:19 +0000565 reass =
566 pool_elt_at_index (rm->per_thread_data
567 [kv->v.memory_owner_thread_index].pool,
568 kv->v.reass_index);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800569
Klement Sekera75e7d132017-09-20 08:26:30 +0200570 if (now > reass->last_heard + rm->timeout)
571 {
Vijayabhaskar Katamreddy14a74422022-05-18 14:31:03 +0000572 vlib_node_increment_counter (vm, node->node_index,
573 IP4_ERROR_REASS_TIMEOUT, 1);
Damjan Marion42ed8362022-09-14 17:40:24 +0200574 ip4_full_reass_drop_all (vm, node, reass);
Klement Sekera896c8962019-06-24 11:52:49 +0000575 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200576 reass = NULL;
577 }
578 }
579
580 if (reass)
581 {
582 reass->last_heard = now;
583 return reass;
584 }
585
Klement Sekera4c533132018-02-22 11:41:12 +0100586 if (rt->reass_n >= rm->max_reass_n)
Klement Sekera75e7d132017-09-20 08:26:30 +0200587 {
588 reass = NULL;
589 return reass;
590 }
591 else
592 {
Klement Sekera4c533132018-02-22 11:41:12 +0100593 pool_get (rt->pool, reass);
Dave Barachb7b92992018-10-17 10:38:51 -0400594 clib_memset (reass, 0, sizeof (*reass));
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800595 reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
Klement Sekera630ab582019-07-19 09:14:19 +0000596 reass->memory_owner_thread_index = vm->thread_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100597 ++rt->id_counter;
Klement Sekera896c8962019-06-24 11:52:49 +0000598 ip4_full_reass_init (reass);
Klement Sekera4c533132018-02-22 11:41:12 +0100599 ++rt->reass_n;
Klement Sekera75e7d132017-09-20 08:26:30 +0200600 }
601
Damjan Marion121a16a2022-09-14 16:00:09 +0200602 clib_memcpy_fast (&reass->key, &kv->kv.key, sizeof (reass->key));
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800603 kv->v.reass_index = (reass - rt->pool);
Klement Sekera630ab582019-07-19 09:14:19 +0000604 kv->v.memory_owner_thread_index = vm->thread_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200605 reass->last_heard = now;
606
Klement Sekerac99c0252019-12-18 12:17:06 +0000607 int rv = clib_bihash_add_del_16_8 (&rm->hash, &kv->kv, 2);
Klement Sekera630ab582019-07-19 09:14:19 +0000608 if (rv)
Klement Sekera75e7d132017-09-20 08:26:30 +0200609 {
Klement Sekera896c8962019-06-24 11:52:49 +0000610 ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200611 reass = NULL;
Klement Sekera630ab582019-07-19 09:14:19 +0000612 // if other worker created a context already work with the other copy
613 if (-2 == rv)
614 goto again;
Klement Sekera75e7d132017-09-20 08:26:30 +0200615 }
616
617 return reass;
618}
619
Klement Sekera896c8962019-06-24 11:52:49 +0000620always_inline ip4_full_reass_rc_t
621ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
622 ip4_full_reass_main_t * rm,
623 ip4_full_reass_per_thread_t * rt,
624 ip4_full_reass_t * reass, u32 * bi0,
Klement Sekerafe8371f2020-09-10 12:03:54 +0000625 u32 * next0, u32 * error0, bool is_custom)
Klement Sekera75e7d132017-09-20 08:26:30 +0200626{
Klement Sekera75e7d132017-09-20 08:26:30 +0200627 vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
628 vlib_buffer_t *last_b = NULL;
629 u32 sub_chain_bi = reass->first_bi;
630 u32 total_length = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200631 do
632 {
633 u32 tmp_bi = sub_chain_bi;
634 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
635 ip4_header_t *ip = vlib_buffer_get_current (tmp);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100636 vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
637 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
638 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
639 {
640 return IP4_REASS_RC_INTERNAL_ERROR;
641 }
642
Klement Sekera896c8962019-06-24 11:52:49 +0000643 u32 data_len = ip4_full_reass_buffer_get_data_len (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200644 u32 trim_front =
Klement Sekera896c8962019-06-24 11:52:49 +0000645 ip4_header_bytes (ip) + ip4_full_reass_buffer_get_data_offset (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200646 u32 trim_end =
647 vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
648 if (tmp_bi == reass->first_bi)
649 {
650 /* first buffer - keep ip4 header */
Klement Sekera896c8962019-06-24 11:52:49 +0000651 if (0 != ip4_full_reass_buffer_get_data_offset (tmp))
Klement Sekerad0f70a32018-12-14 17:24:13 +0100652 {
653 return IP4_REASS_RC_INTERNAL_ERROR;
654 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200655 trim_front = 0;
656 trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
657 ip4_header_bytes (ip);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100658 if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
659 {
660 return IP4_REASS_RC_INTERNAL_ERROR;
661 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200662 }
663 u32 keep_data =
664 vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
665 while (1)
666 {
Klement Sekera75e7d132017-09-20 08:26:30 +0200667 if (trim_front)
668 {
669 if (trim_front > tmp->current_length)
670 {
671 /* drop whole buffer */
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200672 u32 to_be_freed_bi = tmp_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200673 trim_front -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100674 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
675 {
676 return IP4_REASS_RC_INTERNAL_ERROR;
677 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200678 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
679 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700680 tmp->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200681 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200682 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200683 continue;
684 }
685 else
686 {
687 vlib_buffer_advance (tmp, trim_front);
688 trim_front = 0;
689 }
690 }
691 if (keep_data)
692 {
693 if (last_b)
694 {
695 last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
696 last_b->next_buffer = tmp_bi;
697 }
698 last_b = tmp;
699 if (keep_data <= tmp->current_length)
700 {
701 tmp->current_length = keep_data;
702 keep_data = 0;
703 }
704 else
705 {
706 keep_data -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100707 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
708 {
709 return IP4_REASS_RC_INTERNAL_ERROR;
710 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200711 }
712 total_length += tmp->current_length;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200713 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
714 {
715 tmp_bi = tmp->next_buffer;
716 tmp = vlib_get_buffer (vm, tmp->next_buffer);
717 }
718 else
719 {
720 break;
721 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200722 }
723 else
724 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200725 u32 to_be_freed_bi = tmp_bi;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100726 if (reass->first_bi == tmp_bi)
727 {
728 return IP4_REASS_RC_INTERNAL_ERROR;
729 }
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200730 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
731 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700732 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200733 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700734 tmp->next_buffer = 0;
735 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200736 vlib_buffer_free_one (vm, to_be_freed_bi);
737 }
738 else
739 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700740 tmp->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200741 vlib_buffer_free_one (vm, to_be_freed_bi);
742 break;
743 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200744 }
745 }
746 sub_chain_bi =
747 vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
748 reass.next_range_bi;
749 }
750 while (~0 != sub_chain_bi);
Chris Luke30684ac2018-03-29 12:56:58 -0700751
Klement Sekerad0f70a32018-12-14 17:24:13 +0100752 if (!last_b)
753 {
754 return IP4_REASS_RC_INTERNAL_ERROR;
755 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200756 last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700757
Klement Sekerad0f70a32018-12-14 17:24:13 +0100758 if (total_length < first_b->current_length)
759 {
760 return IP4_REASS_RC_INTERNAL_ERROR;
761 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200762 total_length -= first_b->current_length;
763 first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
764 first_b->total_length_not_including_first_buffer = total_length;
765 ip4_header_t *ip = vlib_buffer_get_current (first_b);
766 ip->flags_and_fragment_offset = 0;
767 ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
768 ip->checksum = ip4_header_checksum (ip);
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100769 if (!vlib_buffer_chain_linearize (vm, first_b))
770 {
771 return IP4_REASS_RC_NO_BUF;
772 }
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700773 // reset to reconstruct the mbuf linking
774 first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Klement Sekera75e7d132017-09-20 08:26:30 +0200775 if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
776 {
Klement Sekera42cec0e2021-08-02 16:14:15 +0200777 ip4_full_reass_add_trace (vm, node, reass, reass->first_bi, FINALIZE, 0,
778 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200779#if 0
780 // following code does a hexdump of packet fragments to stdout ...
781 do
782 {
783 u32 bi = reass->first_bi;
784 u8 *s = NULL;
785 while (~0 != bi)
786 {
787 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
788 s = format (s, "%u: %U\n", bi, format_hexdump,
789 vlib_buffer_get_current (b), b->current_length);
790 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
791 {
792 bi = b->next_buffer;
793 }
794 else
795 {
796 break;
797 }
798 }
799 printf ("%.*s\n", vec_len (s), s);
800 fflush (stdout);
801 vec_free (s);
802 }
803 while (0);
804#endif
805 }
806 *bi0 = reass->first_bi;
Klement Sekerafe8371f2020-09-10 12:03:54 +0000807 if (!is_custom)
Klement Sekera4c533132018-02-22 11:41:12 +0100808 {
Klement Sekera896c8962019-06-24 11:52:49 +0000809 *next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +0100810 }
811 else
812 {
813 *next0 = reass->next_index;
814 }
815 vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +0000816
Vijayabhaskar Katamreddy14a74422022-05-18 14:31:03 +0000817 /* Keep track of number of successfully reassembled packets and number of
818 * fragments reassembled */
819 vlib_node_increment_counter (vm, node->node_index, IP4_ERROR_REASS_SUCCESS,
820 1);
821
822 vlib_node_increment_counter (vm, node->node_index,
823 IP4_ERROR_REASS_FRAGMENTS_REASSEMBLED,
824 reass->fragments_n);
825
Klement Sekera75e7d132017-09-20 08:26:30 +0200826 *error0 = IP4_ERROR_NONE;
Klement Sekera896c8962019-06-24 11:52:49 +0000827 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200828 reass = NULL;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100829 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200830}
831
Klement Sekera896c8962019-06-24 11:52:49 +0000832always_inline ip4_full_reass_rc_t
833ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
Klement Sekera896c8962019-06-24 11:52:49 +0000834 ip4_full_reass_t * reass,
835 u32 prev_range_bi, u32 new_next_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200836{
Klement Sekera75e7d132017-09-20 08:26:30 +0200837 vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
838 vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
839 if (~0 != prev_range_bi)
840 {
841 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
842 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
843 new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
844 prev_vnb->ip.reass.next_range_bi = new_next_bi;
845 }
846 else
847 {
848 if (~0 != reass->first_bi)
849 {
850 new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
851 }
852 reass->first_bi = new_next_bi;
853 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100854 vnet_buffer_opaque_t *vnb = vnet_buffer (new_next_b);
855 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
856 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
857 {
858 return IP4_REASS_RC_INTERNAL_ERROR;
859 }
Klement Sekera896c8962019-06-24 11:52:49 +0000860 reass->data_len += ip4_full_reass_buffer_get_data_len (new_next_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100861 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200862}
863
Klement Sekera896c8962019-06-24 11:52:49 +0000864always_inline ip4_full_reass_rc_t
865ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
866 vlib_node_runtime_t * node,
Klement Sekera896c8962019-06-24 11:52:49 +0000867 ip4_full_reass_t * reass,
868 u32 prev_range_bi, u32 discard_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200869{
870 vlib_buffer_t *discard_b = vlib_get_buffer (vm, discard_bi);
871 vnet_buffer_opaque_t *discard_vnb = vnet_buffer (discard_b);
872 if (~0 != prev_range_bi)
873 {
874 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
875 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100876 if (!(prev_vnb->ip.reass.next_range_bi == discard_bi))
877 {
878 return IP4_REASS_RC_INTERNAL_ERROR;
879 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200880 prev_vnb->ip.reass.next_range_bi = discard_vnb->ip.reass.next_range_bi;
881 }
882 else
883 {
884 reass->first_bi = discard_vnb->ip.reass.next_range_bi;
885 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100886 vnet_buffer_opaque_t *vnb = vnet_buffer (discard_b);
887 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
888 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
889 {
890 return IP4_REASS_RC_INTERNAL_ERROR;
891 }
Klement Sekera896c8962019-06-24 11:52:49 +0000892 reass->data_len -= ip4_full_reass_buffer_get_data_len (discard_b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200893 while (1)
894 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200895 u32 to_be_freed_bi = discard_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200896 if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
897 {
Klement Sekera42cec0e2021-08-02 16:14:15 +0200898 ip4_full_reass_add_trace (vm, node, reass, discard_bi, RANGE_DISCARD,
899 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200900 }
901 if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
902 {
903 discard_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
904 discard_bi = discard_b->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700905 discard_b->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200906 discard_b = vlib_get_buffer (vm, discard_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200907 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200908 }
909 else
910 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700911 discard_b->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200912 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200913 break;
914 }
915 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100916 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200917}
918
Klement Sekera896c8962019-06-24 11:52:49 +0000919always_inline ip4_full_reass_rc_t
920ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
921 ip4_full_reass_main_t * rm,
922 ip4_full_reass_per_thread_t * rt,
923 ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
Klement Sekerafe8371f2020-09-10 12:03:54 +0000924 u32 * error0, bool is_custom, u32 * handoff_thread_idx)
Klement Sekera75e7d132017-09-20 08:26:30 +0200925{
Klement Sekera75e7d132017-09-20 08:26:30 +0200926 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200927 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
Klement Sekerafe8371f2020-09-10 12:03:54 +0000928 if (is_custom)
Klement Sekerae8498652019-06-17 12:23:15 +0000929 {
930 // store (error_)next_index before it's overwritten
931 reass->next_index = fvnb->ip.reass.next_index;
932 reass->error_next_index = fvnb->ip.reass.error_next_index;
933 }
Klement Sekera896c8962019-06-24 11:52:49 +0000934 ip4_full_reass_rc_t rc = IP4_REASS_RC_OK;
935 int consumed = 0;
936 ip4_header_t *fip = vlib_buffer_get_current (fb);
Klement Sekera14d7e902018-12-10 13:46:09 +0100937 const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
938 const u32 fragment_length =
Klement Sekera75e7d132017-09-20 08:26:30 +0200939 clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
Klement Sekera14d7e902018-12-10 13:46:09 +0100940 const u32 fragment_last = fragment_first + fragment_length - 1;
941 fvnb->ip.reass.fragment_first = fragment_first;
942 fvnb->ip.reass.fragment_last = fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200943 int more_fragments = ip4_get_fragment_more (fip);
944 u32 candidate_range_bi = reass->first_bi;
945 u32 prev_range_bi = ~0;
946 fvnb->ip.reass.range_first = fragment_first;
947 fvnb->ip.reass.range_last = fragment_last;
948 fvnb->ip.reass.next_range_bi = ~0;
949 if (!more_fragments)
950 {
951 reass->last_packet_octet = fragment_last;
952 }
953 if (~0 == reass->first_bi)
954 {
955 // starting a new reassembly
Klement Sekerad0f70a32018-12-14 17:24:13 +0100956 rc =
Klement Sekera42cec0e2021-08-02 16:14:15 +0200957 ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100958 if (IP4_REASS_RC_OK != rc)
959 {
960 return rc;
961 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200962 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
963 {
Klement Sekera42cec0e2021-08-02 16:14:15 +0200964 ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200965 }
966 *bi0 = ~0;
Klement Sekera4c533132018-02-22 11:41:12 +0100967 reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
Klement Sekera3a343d42019-05-16 14:35:46 +0200968 reass->fragments_n = 1;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100969 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200970 }
Klement Sekera896c8962019-06-24 11:52:49 +0000971 reass->min_fragment_length =
972 clib_min (clib_net_to_host_u16 (fip->length),
973 fvnb->ip.reass.estimated_mtu);
Klement Sekera75e7d132017-09-20 08:26:30 +0200974 while (~0 != candidate_range_bi)
975 {
976 vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
977 vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
978 if (fragment_first > candidate_vnb->ip.reass.range_last)
979 {
980 // this fragments starts after candidate range
981 prev_range_bi = candidate_range_bi;
982 candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
983 if (candidate_vnb->ip.reass.range_last < fragment_last &&
984 ~0 == candidate_range_bi)
985 {
986 // special case - this fragment falls beyond all known ranges
Klement Sekera42cec0e2021-08-02 16:14:15 +0200987 rc = ip4_full_reass_insert_range_in_chain (vm, reass,
988 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100989 if (IP4_REASS_RC_OK != rc)
990 {
991 return rc;
992 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200993 consumed = 1;
994 break;
995 }
996 continue;
997 }
998 if (fragment_last < candidate_vnb->ip.reass.range_first)
999 {
1000 // this fragment ends before candidate range without any overlap
Klement Sekera42cec0e2021-08-02 16:14:15 +02001001 rc = ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi,
1002 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001003 if (IP4_REASS_RC_OK != rc)
1004 {
1005 return rc;
1006 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001007 consumed = 1;
1008 }
1009 else
1010 {
1011 if (fragment_first >= candidate_vnb->ip.reass.range_first &&
1012 fragment_last <= candidate_vnb->ip.reass.range_last)
1013 {
1014 // this fragment is a (sub)part of existing range, ignore it
1015 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1016 {
Klement Sekera42cec0e2021-08-02 16:14:15 +02001017 ip4_full_reass_add_trace (vm, node, reass, *bi0,
Klement Sekera896c8962019-06-24 11:52:49 +00001018 RANGE_OVERLAP, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001019 }
1020 break;
1021 }
1022 int discard_candidate = 0;
1023 if (fragment_first < candidate_vnb->ip.reass.range_first)
1024 {
1025 u32 overlap =
1026 fragment_last - candidate_vnb->ip.reass.range_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +00001027 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +02001028 {
1029 candidate_vnb->ip.reass.range_first += overlap;
Klement Sekerad0f70a32018-12-14 17:24:13 +01001030 if (reass->data_len < overlap)
1031 {
1032 return IP4_REASS_RC_INTERNAL_ERROR;
1033 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001034 reass->data_len -= overlap;
1035 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1036 {
Klement Sekera42cec0e2021-08-02 16:14:15 +02001037 ip4_full_reass_add_trace (vm, node, reass,
Klement Sekera896c8962019-06-24 11:52:49 +00001038 candidate_range_bi,
1039 RANGE_SHRINK, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001040 }
Klement Sekera42cec0e2021-08-02 16:14:15 +02001041 rc = ip4_full_reass_insert_range_in_chain (
1042 vm, reass, prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001043 if (IP4_REASS_RC_OK != rc)
1044 {
1045 return rc;
1046 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001047 consumed = 1;
1048 }
1049 else
1050 {
1051 discard_candidate = 1;
1052 }
1053 }
1054 else if (fragment_last > candidate_vnb->ip.reass.range_last)
1055 {
1056 u32 overlap =
1057 candidate_vnb->ip.reass.range_last - fragment_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +00001058 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +02001059 {
1060 fvnb->ip.reass.range_first += overlap;
1061 if (~0 != candidate_vnb->ip.reass.next_range_bi)
1062 {
1063 prev_range_bi = candidate_range_bi;
1064 candidate_range_bi =
1065 candidate_vnb->ip.reass.next_range_bi;
1066 continue;
1067 }
1068 else
1069 {
1070 // special case - last range discarded
Klement Sekera42cec0e2021-08-02 16:14:15 +02001071 rc = ip4_full_reass_insert_range_in_chain (
1072 vm, reass, candidate_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001073 if (IP4_REASS_RC_OK != rc)
1074 {
1075 return rc;
1076 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001077 consumed = 1;
1078 }
1079 }
1080 else
1081 {
1082 discard_candidate = 1;
1083 }
1084 }
1085 else
1086 {
1087 discard_candidate = 1;
1088 }
1089 if (discard_candidate)
1090 {
1091 u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
1092 // discard candidate range, probe next range
Klement Sekera42cec0e2021-08-02 16:14:15 +02001093 rc = ip4_full_reass_remove_range_from_chain (
1094 vm, node, reass, prev_range_bi, candidate_range_bi);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001095 if (IP4_REASS_RC_OK != rc)
1096 {
1097 return rc;
1098 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001099 if (~0 != next_range_bi)
1100 {
1101 candidate_range_bi = next_range_bi;
1102 continue;
1103 }
1104 else
1105 {
1106 // special case - last range discarded
Klement Sekera42cec0e2021-08-02 16:14:15 +02001107 rc = ip4_full_reass_insert_range_in_chain (
1108 vm, reass, prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001109 if (IP4_REASS_RC_OK != rc)
1110 {
1111 return rc;
1112 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001113 consumed = 1;
1114 }
1115 }
1116 }
1117 break;
1118 }
Klement Sekera3a343d42019-05-16 14:35:46 +02001119 ++reass->fragments_n;
Klement Sekera75e7d132017-09-20 08:26:30 +02001120 if (consumed)
1121 {
1122 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1123 {
Klement Sekera42cec0e2021-08-02 16:14:15 +02001124 ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001125 }
1126 }
1127 if (~0 != reass->last_packet_octet &&
1128 reass->data_len == reass->last_packet_octet + 1)
1129 {
Klement Sekera630ab582019-07-19 09:14:19 +00001130 *handoff_thread_idx = reass->sendout_thread_index;
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001131 int handoff =
1132 reass->memory_owner_thread_index != reass->sendout_thread_index;
Klement Sekera630ab582019-07-19 09:14:19 +00001133 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001134 ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001135 is_custom);
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001136 if (IP4_REASS_RC_OK == rc && handoff)
Klement Sekera630ab582019-07-19 09:14:19 +00001137 {
1138 rc = IP4_REASS_RC_HANDOFF;
1139 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001140 }
1141 else
1142 {
1143 if (consumed)
1144 {
1145 *bi0 = ~0;
Klement Sekera3a343d42019-05-16 14:35:46 +02001146 if (reass->fragments_n > rm->max_reass_len)
1147 {
1148 rc = IP4_REASS_RC_TOO_MANY_FRAGMENTS;
1149 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001150 }
1151 else
1152 {
Klement Sekera896c8962019-06-24 11:52:49 +00001153 *next0 = IP4_FULL_REASS_NEXT_DROP;
Klement Sekera75e7d132017-09-20 08:26:30 +02001154 *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
1155 }
1156 }
Klement Sekerad0f70a32018-12-14 17:24:13 +01001157 return rc;
Klement Sekera75e7d132017-09-20 08:26:30 +02001158}
1159
1160always_inline uword
Klement Sekera01c1fa42021-12-14 18:25:11 +00001161ip4_full_reass_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1162 vlib_frame_t *frame, ip4_full_reass_node_type_t type,
1163 bool is_local)
Klement Sekera75e7d132017-09-20 08:26:30 +02001164{
1165 u32 *from = vlib_frame_vector_args (frame);
Damjan Marion65d25b42022-09-14 18:59:47 +02001166 u32 n_left, n_next = 0, to_next[VLIB_FRAME_SIZE];
Klement Sekera896c8962019-06-24 11:52:49 +00001167 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1168 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
Damjan Marion65d25b42022-09-14 18:59:47 +02001169 u16 nexts[VLIB_FRAME_SIZE];
1170
Klement Sekera4c533132018-02-22 11:41:12 +01001171 clib_spinlock_lock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001172
Damjan Marion65d25b42022-09-14 18:59:47 +02001173 n_left = frame->n_vectors;
1174 while (n_left > 0)
Klement Sekera75e7d132017-09-20 08:26:30 +02001175 {
Damjan Marion65d25b42022-09-14 18:59:47 +02001176 u32 bi0;
1177 vlib_buffer_t *b0;
1178 u32 next0;
1179 u32 error0 = IP4_ERROR_NONE;
Klement Sekera75e7d132017-09-20 08:26:30 +02001180
Damjan Marion65d25b42022-09-14 18:59:47 +02001181 bi0 = from[0];
1182 b0 = vlib_get_buffer (vm, bi0);
1183
1184 ip4_header_t *ip0 = vlib_buffer_get_current (b0);
1185 if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
Klement Sekera75e7d132017-09-20 08:26:30 +02001186 {
Damjan Marion65d25b42022-09-14 18:59:47 +02001187 // this is a whole packet - no fragmentation
1188 if (CUSTOM != type)
Klement Sekera75e7d132017-09-20 08:26:30 +02001189 {
Damjan Marion65d25b42022-09-14 18:59:47 +02001190 next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera75e7d132017-09-20 08:26:30 +02001191 }
1192 else
1193 {
Damjan Marion65d25b42022-09-14 18:59:47 +02001194 next0 = vnet_buffer (b0)->ip.reass.next_index;
Klement Sekera4c533132018-02-22 11:41:12 +01001195 }
Damjan Marion65d25b42022-09-14 18:59:47 +02001196 ip4_full_reass_add_trace (vm, node, NULL, bi0, PASSTHROUGH, 0, ~0);
1197 goto packet_enqueue;
Klement Sekera75e7d132017-09-20 08:26:30 +02001198 }
1199
Damjan Marion65d25b42022-09-14 18:59:47 +02001200 if (is_local && !rm->is_local_reass_enabled)
1201 {
1202 next0 = IP4_FULL_REASS_NEXT_DROP;
1203 goto packet_enqueue;
1204 }
1205
1206 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1207 const u32 fragment_length =
1208 clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
1209 const u32 fragment_last = fragment_first + fragment_length - 1;
1210
1211 /* Keep track of received fragments */
1212 vlib_node_increment_counter (vm, node->node_index,
1213 IP4_ERROR_REASS_FRAGMENTS_RCVD, 1);
1214
1215 if (fragment_first > fragment_last ||
1216 fragment_first + fragment_length > UINT16_MAX - 20 ||
1217 (fragment_length < 8 && // 8 is minimum frag length per RFC 791
1218 ip4_get_fragment_more (ip0)))
1219 {
1220 next0 = IP4_FULL_REASS_NEXT_DROP;
1221 error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
1222 goto packet_enqueue;
1223 }
1224
Florin Coras639beb92023-05-12 16:58:44 -07001225 u32 fib_index = (vnet_buffer (b0)->sw_if_index[VLIB_TX] == (u32) ~0) ?
1226 vec_elt (ip4_main.fib_index_by_sw_if_index,
1227 vnet_buffer (b0)->sw_if_index[VLIB_RX]) :
1228 vnet_buffer (b0)->sw_if_index[VLIB_TX];
Damjan Marion65d25b42022-09-14 18:59:47 +02001229
1230 ip4_full_reass_kv_t kv = { .k.fib_index = fib_index,
1231 .k.src.as_u32 = ip0->src_address.as_u32,
1232 .k.dst.as_u32 = ip0->dst_address.as_u32,
1233 .k.frag_id = ip0->fragment_id,
1234 .k.proto = ip0->protocol
1235
1236 };
1237 u8 do_handoff = 0;
1238
1239 ip4_full_reass_t *reass =
1240 ip4_full_reass_find_or_create (vm, node, rm, rt, &kv, &do_handoff);
1241
1242 if (reass)
1243 {
1244 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1245 if (0 == fragment_first)
1246 {
1247 reass->sendout_thread_index = vm->thread_index;
1248 }
1249 }
1250
1251 if (PREDICT_FALSE (do_handoff))
1252 {
1253 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
1254 vnet_buffer (b0)->ip.reass.owner_thread_index =
1255 kv.v.memory_owner_thread_index;
1256 }
1257 else if (reass)
1258 {
1259 u32 handoff_thread_idx;
1260 u32 counter = ~0;
1261 switch (ip4_full_reass_update (vm, node, rm, rt, reass, &bi0, &next0,
1262 &error0, CUSTOM == type,
1263 &handoff_thread_idx))
1264 {
1265 case IP4_REASS_RC_OK:
1266 /* nothing to do here */
1267 break;
1268 case IP4_REASS_RC_HANDOFF:
1269 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
1270 b0 = vlib_get_buffer (vm, bi0);
1271 vnet_buffer (b0)->ip.reass.owner_thread_index =
1272 handoff_thread_idx;
1273 break;
1274 case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
1275 counter = IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG;
1276 break;
1277 case IP4_REASS_RC_NO_BUF:
1278 counter = IP4_ERROR_REASS_NO_BUF;
1279 break;
1280 case IP4_REASS_RC_INTERNAL_ERROR:
1281 counter = IP4_ERROR_REASS_INTERNAL_ERROR;
1282 /* Sanitization is needed in internal error cases only, as
1283 * the incoming packet is already dropped in other cases,
1284 * also adding bi0 back to the reassembly list, fixes the
1285 * leaking of buffers during internal errors.
1286 *
1287 * Also it doesnt make sense to send these buffers custom
1288 * app, these fragments are with internal errors */
1289 sanitize_reass_buffers_add_missing (vm, reass, &bi0);
1290 reass->error_next_index = ~0;
1291 break;
1292 }
1293
1294 if (~0 != counter)
1295 {
1296 vlib_node_increment_counter (vm, node->node_index, counter, 1);
1297 ip4_full_reass_drop_all (vm, node, reass);
1298 ip4_full_reass_free (rm, rt, reass);
1299 goto next_packet;
1300 }
1301 }
1302 else
1303 {
1304 next0 = IP4_FULL_REASS_NEXT_DROP;
1305 error0 = IP4_ERROR_REASS_LIMIT_REACHED;
1306 }
1307
1308 packet_enqueue:
1309
1310 if (bi0 != ~0)
1311 {
1312 /* bi0 might have been updated by reass_finalize, reload */
1313 b0 = vlib_get_buffer (vm, bi0);
1314 if (IP4_ERROR_NONE != error0)
1315 {
1316 b0->error = node->errors[error0];
1317 }
1318
1319 if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
1320 {
1321 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1322 {
1323 ip4_full_reass_add_trace (
1324 vm, node, NULL, bi0, HANDOFF, 0,
1325 vnet_buffer (b0)->ip.reass.owner_thread_index);
1326 }
1327 }
1328 else if (FEATURE == type && IP4_ERROR_NONE == error0)
1329 {
1330 vnet_feature_next (&next0, b0);
1331 }
1332
1333 /* Increment the counter to-custom-app also as this fragment is
1334 * also going to application */
1335 if (CUSTOM == type)
1336 {
1337 vlib_node_increment_counter (vm, node->node_index,
1338 IP4_ERROR_REASS_TO_CUSTOM_APP, 1);
1339 }
1340
1341 to_next[n_next] = bi0;
1342 nexts[n_next] = next0;
1343 n_next++;
1344 IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
1345 }
1346
1347 next_packet:
1348 from += 1;
1349 n_left -= 1;
Klement Sekera75e7d132017-09-20 08:26:30 +02001350 }
1351
Klement Sekera4c533132018-02-22 11:41:12 +01001352 clib_spinlock_unlock (&rt->lock);
Damjan Marion65d25b42022-09-14 18:59:47 +02001353
1354 vlib_buffer_enqueue_to_next (vm, node, to_next, nexts, n_next);
Klement Sekera75e7d132017-09-20 08:26:30 +02001355 return frame->n_vectors;
1356}
1357
Klement Sekera896c8962019-06-24 11:52:49 +00001358VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
1359 vlib_node_runtime_t * node,
1360 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001361{
Klement Sekera01c1fa42021-12-14 18:25:11 +00001362 return ip4_full_reass_inline (vm, node, frame, NORMAL, false /* is_local */);
Klement Sekera4c533132018-02-22 11:41:12 +01001363}
1364
Klement Sekera896c8962019-06-24 11:52:49 +00001365VLIB_REGISTER_NODE (ip4_full_reass_node) = {
1366 .name = "ip4-full-reassembly",
Klement Sekera75e7d132017-09-20 08:26:30 +02001367 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001368 .format_trace = format_ip4_full_reass_trace,
Neale Rannse22a7042022-08-09 03:03:29 +00001369 .n_errors = IP4_N_ERROR,
1370 .error_counters = ip4_error_counters,
Klement Sekera896c8962019-06-24 11:52:49 +00001371 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera75e7d132017-09-20 08:26:30 +02001372 .next_nodes =
1373 {
Klement Sekera896c8962019-06-24 11:52:49 +00001374 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1375 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1376 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001377
Klement Sekera75e7d132017-09-20 08:26:30 +02001378 },
1379};
Klement Sekera75e7d132017-09-20 08:26:30 +02001380
Klement Sekera01c1fa42021-12-14 18:25:11 +00001381VLIB_NODE_FN (ip4_local_full_reass_node)
1382(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
1383{
1384 return ip4_full_reass_inline (vm, node, frame, NORMAL, true /* is_local */);
1385}
1386
1387VLIB_REGISTER_NODE (ip4_local_full_reass_node) = {
1388 .name = "ip4-local-full-reassembly",
1389 .vector_size = sizeof (u32),
1390 .format_trace = format_ip4_full_reass_trace,
Neale Rannse22a7042022-08-09 03:03:29 +00001391 .n_errors = IP4_N_ERROR,
1392 .error_counters = ip4_error_counters,
Klement Sekera01c1fa42021-12-14 18:25:11 +00001393 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
1394 .next_nodes =
1395 {
1396 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1397 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1398 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-local-full-reassembly-handoff",
1399
1400 },
1401};
1402
Klement Sekera896c8962019-06-24 11:52:49 +00001403VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
1404 vlib_node_runtime_t * node,
1405 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001406{
Klement Sekera01c1fa42021-12-14 18:25:11 +00001407 return ip4_full_reass_inline (vm, node, frame, FEATURE,
1408 false /* is_local */);
Klement Sekera4c533132018-02-22 11:41:12 +01001409}
1410
Klement Sekera896c8962019-06-24 11:52:49 +00001411VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
1412 .name = "ip4-full-reassembly-feature",
Klement Sekera4c533132018-02-22 11:41:12 +01001413 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001414 .format_trace = format_ip4_full_reass_trace,
Neale Rannse22a7042022-08-09 03:03:29 +00001415 .n_errors = IP4_N_ERROR,
1416 .error_counters = ip4_error_counters,
Klement Sekera896c8962019-06-24 11:52:49 +00001417 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera4c533132018-02-22 11:41:12 +01001418 .next_nodes =
1419 {
Klement Sekera896c8962019-06-24 11:52:49 +00001420 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1421 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1422 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
Klement Sekera4c533132018-02-22 11:41:12 +01001423 },
1424};
Klement Sekera4c533132018-02-22 11:41:12 +01001425
Klement Sekera896c8962019-06-24 11:52:49 +00001426VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
Klement Sekera4c533132018-02-22 11:41:12 +01001427 .arc_name = "ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001428 .node_name = "ip4-full-reassembly-feature",
Neale Ranns14046982019-07-29 14:49:52 +00001429 .runs_before = VNET_FEATURES ("ip4-lookup",
Neale Ranns2be3eb62019-08-02 01:17:13 -07001430 "ipsec4-input-feature"),
Klement Sekera4c533132018-02-22 11:41:12 +01001431 .runs_after = 0,
1432};
Klement Sekera4c533132018-02-22 11:41:12 +01001433
Klement Sekerafe8371f2020-09-10 12:03:54 +00001434VLIB_NODE_FN (ip4_full_reass_node_custom) (vlib_main_t * vm,
1435 vlib_node_runtime_t * node,
1436 vlib_frame_t * frame)
1437{
Klement Sekera01c1fa42021-12-14 18:25:11 +00001438 return ip4_full_reass_inline (vm, node, frame, CUSTOM, false /* is_local */);
Klement Sekerafe8371f2020-09-10 12:03:54 +00001439}
1440
Klement Sekerafe8371f2020-09-10 12:03:54 +00001441VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = {
1442 .name = "ip4-full-reassembly-custom",
1443 .vector_size = sizeof (u32),
1444 .format_trace = format_ip4_full_reass_trace,
Neale Rannse22a7042022-08-09 03:03:29 +00001445 .n_errors = IP4_N_ERROR,
1446 .error_counters = ip4_error_counters,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001447 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
1448 .next_nodes =
1449 {
1450 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1451 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1452 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-custom-hoff",
1453 },
1454};
Klement Sekerafe8371f2020-09-10 12:03:54 +00001455
Klement Sekerafe8371f2020-09-10 12:03:54 +00001456VNET_FEATURE_INIT (ip4_full_reass_custom, static) = {
1457 .arc_name = "ip4-unicast",
1458 .node_name = "ip4-full-reassembly-feature",
1459 .runs_before = VNET_FEATURES ("ip4-lookup",
1460 "ipsec4-input-feature"),
1461 .runs_after = 0,
1462};
1463
Klement Sekerafe8371f2020-09-10 12:03:54 +00001464
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001465#ifndef CLIB_MARCH_VARIANT
Klement Sekerafe8371f2020-09-10 12:03:54 +00001466uword
1467ip4_full_reass_custom_register_next_node (uword node_index)
1468{
1469 return vlib_node_add_next (vlib_get_main (),
1470 ip4_full_reass_node_custom.index, node_index);
1471}
1472
Klement Sekera4c533132018-02-22 11:41:12 +01001473always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +00001474ip4_full_reass_get_nbuckets ()
Klement Sekera75e7d132017-09-20 08:26:30 +02001475{
Klement Sekera896c8962019-06-24 11:52:49 +00001476 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001477 u32 nbuckets;
1478 u8 i;
1479
Vijayabhaskar Katamreddy3fda2002022-05-16 22:18:37 +00001480 /* need more mem with more workers */
1481 nbuckets = (u32) (rm->max_reass_n * (vlib_num_workers () + 1) /
1482 IP4_REASS_HT_LOAD_FACTOR);
Klement Sekera75e7d132017-09-20 08:26:30 +02001483
1484 for (i = 0; i < 31; i++)
1485 if ((1 << i) >= nbuckets)
1486 break;
1487 nbuckets = 1 << i;
1488
1489 return nbuckets;
1490}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001491#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001492
1493typedef enum
1494{
1495 IP4_EVENT_CONFIG_CHANGED = 1,
Klement Sekera896c8962019-06-24 11:52:49 +00001496} ip4_full_reass_event_t;
Klement Sekera75e7d132017-09-20 08:26:30 +02001497
1498typedef struct
1499{
1500 int failure;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001501 clib_bihash_16_8_t *new_hash;
Klement Sekera75e7d132017-09-20 08:26:30 +02001502} ip4_rehash_cb_ctx;
1503
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001504#ifndef CLIB_MARCH_VARIANT
Neale Rannsf50bac12019-12-06 05:53:17 +00001505static int
Klement Sekera8dcfed52018-06-28 11:16:15 +02001506ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
Klement Sekera75e7d132017-09-20 08:26:30 +02001507{
1508 ip4_rehash_cb_ctx *ctx = _ctx;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001509 if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
Klement Sekera75e7d132017-09-20 08:26:30 +02001510 {
1511 ctx->failure = 1;
1512 }
Neale Rannsf50bac12019-12-06 05:53:17 +00001513 return (BIHASH_WALK_CONTINUE);
Klement Sekera75e7d132017-09-20 08:26:30 +02001514}
1515
Klement Sekera4c533132018-02-22 11:41:12 +01001516static void
Klement Sekera896c8962019-06-24 11:52:49 +00001517ip4_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1518 u32 max_reassembly_length,
1519 u32 expire_walk_interval_ms)
Klement Sekera4c533132018-02-22 11:41:12 +01001520{
Klement Sekera896c8962019-06-24 11:52:49 +00001521 ip4_full_reass_main.timeout_ms = timeout_ms;
1522 ip4_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1523 ip4_full_reass_main.max_reass_n = max_reassemblies;
1524 ip4_full_reass_main.max_reass_len = max_reassembly_length;
1525 ip4_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
Klement Sekera4c533132018-02-22 11:41:12 +01001526}
1527
Klement Sekera75e7d132017-09-20 08:26:30 +02001528vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001529ip4_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1530 u32 max_reassembly_length, u32 expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001531{
Klement Sekera896c8962019-06-24 11:52:49 +00001532 u32 old_nbuckets = ip4_full_reass_get_nbuckets ();
1533 ip4_full_reass_set_params (timeout_ms, max_reassemblies,
1534 max_reassembly_length, expire_walk_interval_ms);
1535 vlib_process_signal_event (ip4_full_reass_main.vlib_main,
1536 ip4_full_reass_main.ip4_full_reass_expire_node_idx,
Klement Sekera75e7d132017-09-20 08:26:30 +02001537 IP4_EVENT_CONFIG_CHANGED, 0);
Klement Sekera896c8962019-06-24 11:52:49 +00001538 u32 new_nbuckets = ip4_full_reass_get_nbuckets ();
1539 if (ip4_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
Klement Sekera75e7d132017-09-20 08:26:30 +02001540 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001541 clib_bihash_16_8_t new_hash;
Dave Barachb7b92992018-10-17 10:38:51 -04001542 clib_memset (&new_hash, 0, sizeof (new_hash));
Klement Sekera75e7d132017-09-20 08:26:30 +02001543 ip4_rehash_cb_ctx ctx;
1544 ctx.failure = 0;
1545 ctx.new_hash = &new_hash;
Klement Sekera896c8962019-06-24 11:52:49 +00001546 clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
Klement Sekera75e7d132017-09-20 08:26:30 +02001547 new_nbuckets * 1024);
Klement Sekera896c8962019-06-24 11:52:49 +00001548 clib_bihash_foreach_key_value_pair_16_8 (&ip4_full_reass_main.hash,
Klement Sekera75e7d132017-09-20 08:26:30 +02001549 ip4_rehash_cb, &ctx);
1550 if (ctx.failure)
1551 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001552 clib_bihash_free_16_8 (&new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001553 return -1;
1554 }
1555 else
1556 {
Klement Sekera896c8962019-06-24 11:52:49 +00001557 clib_bihash_free_16_8 (&ip4_full_reass_main.hash);
1558 clib_memcpy_fast (&ip4_full_reass_main.hash, &new_hash,
1559 sizeof (ip4_full_reass_main.hash));
1560 clib_bihash_copied (&ip4_full_reass_main.hash, &new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001561 }
1562 }
1563 return 0;
1564}
1565
1566vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001567ip4_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1568 u32 * max_reassembly_length,
1569 u32 * expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001570{
Klement Sekera896c8962019-06-24 11:52:49 +00001571 *timeout_ms = ip4_full_reass_main.timeout_ms;
1572 *max_reassemblies = ip4_full_reass_main.max_reass_n;
1573 *max_reassembly_length = ip4_full_reass_main.max_reass_len;
1574 *expire_walk_interval_ms = ip4_full_reass_main.expire_walk_interval_ms;
Klement Sekera75e7d132017-09-20 08:26:30 +02001575 return 0;
1576}
1577
Klement Sekera4c533132018-02-22 11:41:12 +01001578static clib_error_t *
Klement Sekera896c8962019-06-24 11:52:49 +00001579ip4_full_reass_init_function (vlib_main_t * vm)
Klement Sekera75e7d132017-09-20 08:26:30 +02001580{
Klement Sekera896c8962019-06-24 11:52:49 +00001581 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001582 clib_error_t *error = 0;
1583 u32 nbuckets;
Dave Barach1403fcd2018-02-05 09:45:43 -05001584 vlib_node_t *node;
Klement Sekera75e7d132017-09-20 08:26:30 +02001585
1586 rm->vlib_main = vm;
Klement Sekera75e7d132017-09-20 08:26:30 +02001587
Juraj Slobodacd806922018-10-10 10:15:54 +02001588 vec_validate (rm->per_thread_data, vlib_num_workers ());
Klement Sekera896c8962019-06-24 11:52:49 +00001589 ip4_full_reass_per_thread_t *rt;
Klement Sekera4c533132018-02-22 11:41:12 +01001590 vec_foreach (rt, rm->per_thread_data)
1591 {
1592 clib_spinlock_init (&rt->lock);
1593 pool_alloc (rt->pool, rm->max_reass_n);
1594 }
Dave Barach1403fcd2018-02-05 09:45:43 -05001595
Klement Sekera896c8962019-06-24 11:52:49 +00001596 node = vlib_get_node_by_name (vm, (u8 *) "ip4-full-reassembly-expire-walk");
Dave Barach1403fcd2018-02-05 09:45:43 -05001597 ASSERT (node);
Klement Sekera896c8962019-06-24 11:52:49 +00001598 rm->ip4_full_reass_expire_node_idx = node->index;
Dave Barach1403fcd2018-02-05 09:45:43 -05001599
Klement Sekera896c8962019-06-24 11:52:49 +00001600 ip4_full_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
1601 IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
1602 IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
1603 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
Klement Sekera3ecc2212018-03-27 10:34:43 +02001604
Klement Sekera896c8962019-06-24 11:52:49 +00001605 nbuckets = ip4_full_reass_get_nbuckets ();
1606 clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
Klement Sekera75e7d132017-09-20 08:26:30 +02001607
Klement Sekera896c8962019-06-24 11:52:49 +00001608 rm->fq_index = vlib_frame_queue_main_init (ip4_full_reass_node.index, 0);
Klement Sekera01c1fa42021-12-14 18:25:11 +00001609 rm->fq_local_index =
1610 vlib_frame_queue_main_init (ip4_local_full_reass_node.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001611 rm->fq_feature_index =
Klement Sekera896c8962019-06-24 11:52:49 +00001612 vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
Klement Sekerafe8371f2020-09-10 12:03:54 +00001613 rm->fq_custom_index =
1614 vlib_frame_queue_main_init (ip4_full_reass_node_custom.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001615
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001616 rm->feature_use_refcount_per_intf = NULL;
Klement Sekera01c1fa42021-12-14 18:25:11 +00001617 rm->is_local_reass_enabled = 1;
1618
Klement Sekera75e7d132017-09-20 08:26:30 +02001619 return error;
1620}
1621
Klement Sekera896c8962019-06-24 11:52:49 +00001622VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001623#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001624
1625static uword
Klement Sekera42cec0e2021-08-02 16:14:15 +02001626ip4_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node,
1627 CLIB_UNUSED (vlib_frame_t *f))
Klement Sekera75e7d132017-09-20 08:26:30 +02001628{
Klement Sekera896c8962019-06-24 11:52:49 +00001629 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001630 uword event_type, *event_data = 0;
1631
1632 while (true)
1633 {
1634 vlib_process_wait_for_event_or_clock (vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001635 (f64)
1636 rm->expire_walk_interval_ms /
1637 (f64) MSEC_PER_SEC);
Klement Sekera75e7d132017-09-20 08:26:30 +02001638 event_type = vlib_process_get_events (vm, &event_data);
1639
1640 switch (event_type)
1641 {
Klement Sekera42cec0e2021-08-02 16:14:15 +02001642 case ~0:
1643 /* no events => timeout */
1644 /* fallthrough */
Klement Sekera75e7d132017-09-20 08:26:30 +02001645 case IP4_EVENT_CONFIG_CHANGED:
Klement Sekera42cec0e2021-08-02 16:14:15 +02001646 /* nothing to do here */
Klement Sekera75e7d132017-09-20 08:26:30 +02001647 break;
1648 default:
1649 clib_warning ("BUG: event type 0x%wx", event_type);
1650 break;
1651 }
1652 f64 now = vlib_time_now (vm);
1653
Klement Sekera896c8962019-06-24 11:52:49 +00001654 ip4_full_reass_t *reass;
Klement Sekera75e7d132017-09-20 08:26:30 +02001655 int *pool_indexes_to_free = NULL;
1656
Klement Sekera4c533132018-02-22 11:41:12 +01001657 uword thread_index = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +02001658 int index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001659 const uword nthreads = vlib_num_workers () + 1;
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +00001660
Klement Sekera4c533132018-02-22 11:41:12 +01001661 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1662 {
Klement Sekera896c8962019-06-24 11:52:49 +00001663 ip4_full_reass_per_thread_t *rt =
1664 &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001665 clib_spinlock_lock (&rt->lock);
1666
1667 vec_reset_length (pool_indexes_to_free);
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +00001668
Vijayabhaskar Katamreddy8b874fc2022-05-13 13:07:19 +00001669 /* Pace the number of timeouts handled per thread,to avoid barrier
1670 * sync issues in real world scenarios */
1671
1672 u32 beg = rt->last_id;
1673 /* to ensure we walk at least once per sec per context */
1674 u32 end =
1675 beg + (IP4_REASS_MAX_REASSEMBLIES_DEFAULT *
1676 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS / MSEC_PER_SEC +
1677 1);
1678 if (end > vec_len (rt->pool))
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +00001679 {
Vijayabhaskar Katamreddy8b874fc2022-05-13 13:07:19 +00001680 end = vec_len (rt->pool);
1681 rt->last_id = 0;
1682 }
1683 else
1684 {
1685 rt->last_id = end;
1686 }
1687
Vijayabhaskar Katamreddybeafecf2022-05-19 17:07:22 +00001688 pool_foreach_stepping_index (index, beg, end, rt->pool)
1689 {
1690 reass = pool_elt_at_index (rt->pool, index);
1691 if (now > reass->last_heard + rm->timeout)
1692 {
1693 vec_add1 (pool_indexes_to_free, index);
1694 }
1695 }
Vijayabhaskar Katamreddye0f901a2022-05-09 14:13:07 +00001696
Vijayabhaskar Katamreddy14a74422022-05-18 14:31:03 +00001697 if (vec_len (pool_indexes_to_free))
1698 vlib_node_increment_counter (vm, node->node_index,
1699 IP4_ERROR_REASS_TIMEOUT,
1700 vec_len (pool_indexes_to_free));
Klement Sekera4c533132018-02-22 11:41:12 +01001701 int *i;
Klement Sekera4c533132018-02-22 11:41:12 +01001702 vec_foreach (i, pool_indexes_to_free)
1703 {
Klement Sekera896c8962019-06-24 11:52:49 +00001704 ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
Damjan Marion42ed8362022-09-14 17:40:24 +02001705 ip4_full_reass_drop_all (vm, node, reass);
Klement Sekera42cec0e2021-08-02 16:14:15 +02001706 ip4_full_reass_free (rm, rt, reass);
1707 }
Klement Sekera4c533132018-02-22 11:41:12 +01001708
1709 clib_spinlock_unlock (&rt->lock);
1710 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001711
Klement Sekera75e7d132017-09-20 08:26:30 +02001712 vec_free (pool_indexes_to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +02001713 if (event_data)
1714 {
Damjan Marion8bea5892022-04-04 22:40:45 +02001715 vec_set_len (event_data, 0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001716 }
1717 }
1718
1719 return 0;
1720}
1721
Klement Sekera896c8962019-06-24 11:52:49 +00001722VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
Neale Rannse22a7042022-08-09 03:03:29 +00001723 .function = ip4_full_reass_walk_expired,
1724 .type = VLIB_NODE_TYPE_PROCESS,
1725 .name = "ip4-full-reassembly-expire-walk",
1726 .format_trace = format_ip4_full_reass_trace,
1727 .n_errors = IP4_N_ERROR,
1728 .error_counters = ip4_error_counters,
Klement Sekera75e7d132017-09-20 08:26:30 +02001729};
Klement Sekera75e7d132017-09-20 08:26:30 +02001730
1731static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001732format_ip4_full_reass_key (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +02001733{
Klement Sekera896c8962019-06-24 11:52:49 +00001734 ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *);
1735 s =
Damjan Marion121a16a2022-09-14 16:00:09 +02001736 format (s, "fib_index: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1737 key->fib_index, format_ip4_address, &key->src, format_ip4_address,
Klement Sekera896c8962019-06-24 11:52:49 +00001738 &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
Klement Sekera75e7d132017-09-20 08:26:30 +02001739 return s;
1740}
1741
1742static u8 *
1743format_ip4_reass (u8 * s, va_list * args)
1744{
1745 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001746 ip4_full_reass_t *reass = va_arg (*args, ip4_full_reass_t *);
Klement Sekera75e7d132017-09-20 08:26:30 +02001747
Klement Sekera4c533132018-02-22 11:41:12 +01001748 s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
Klement Sekera75e7d132017-09-20 08:26:30 +02001749 "last_packet_octet: %u, trace_op_counter: %u\n",
Klement Sekera896c8962019-06-24 11:52:49 +00001750 reass->id, format_ip4_full_reass_key, &reass->key,
1751 reass->first_bi, reass->data_len,
1752 reass->last_packet_octet, reass->trace_op_counter);
1753
Klement Sekera75e7d132017-09-20 08:26:30 +02001754 u32 bi = reass->first_bi;
1755 u32 counter = 0;
1756 while (~0 != bi)
1757 {
1758 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1759 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera896c8962019-06-24 11:52:49 +00001760 s =
1761 format (s,
1762 " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1763 "fragment[%u, %u]\n", counter, vnb->ip.reass.range_first,
1764 vnb->ip.reass.range_last, bi,
1765 ip4_full_reass_buffer_get_data_offset (b),
1766 ip4_full_reass_buffer_get_data_len (b),
1767 vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
Klement Sekera75e7d132017-09-20 08:26:30 +02001768 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1769 {
1770 bi = b->next_buffer;
1771 }
1772 else
1773 {
1774 bi = ~0;
1775 }
1776 }
1777 return s;
1778}
1779
1780static clib_error_t *
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001781show_ip4_reass (vlib_main_t * vm,
1782 unformat_input_t * input,
Klement Sekera75e7d132017-09-20 08:26:30 +02001783 CLIB_UNUSED (vlib_cli_command_t * lmd))
1784{
Klement Sekera896c8962019-06-24 11:52:49 +00001785 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001786
1787 vlib_cli_output (vm, "---------------------");
1788 vlib_cli_output (vm, "IP4 reassembly status");
1789 vlib_cli_output (vm, "---------------------");
Klement Sekera4c533132018-02-22 11:41:12 +01001790 bool details = false;
Klement Sekera75e7d132017-09-20 08:26:30 +02001791 if (unformat (input, "details"))
1792 {
Klement Sekera4c533132018-02-22 11:41:12 +01001793 details = true;
1794 }
1795
1796 u32 sum_reass_n = 0;
Klement Sekera896c8962019-06-24 11:52:49 +00001797 ip4_full_reass_t *reass;
Klement Sekera4c533132018-02-22 11:41:12 +01001798 uword thread_index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001799 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001800 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1801 {
Klement Sekera896c8962019-06-24 11:52:49 +00001802 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001803 clib_spinlock_lock (&rt->lock);
1804 if (details)
1805 {
Damjan Marionb2c31b62020-12-13 21:47:40 +01001806 pool_foreach (reass, rt->pool) {
Klement Sekera4c533132018-02-22 11:41:12 +01001807 vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
Damjan Marionb2c31b62020-12-13 21:47:40 +01001808 }
Klement Sekera4c533132018-02-22 11:41:12 +01001809 }
1810 sum_reass_n += rt->reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +01001811 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001812 }
1813 vlib_cli_output (vm, "---------------------");
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001814 vlib_cli_output (vm, "Current full IP4 reassemblies count: %lu\n",
Klement Sekera4c533132018-02-22 11:41:12 +01001815 (long unsigned) sum_reass_n);
Klement Sekera75e7d132017-09-20 08:26:30 +02001816 vlib_cli_output (vm,
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001817 "Maximum configured concurrent full IP4 reassemblies per worker-thread: %lu\n",
Klement Sekera75e7d132017-09-20 08:26:30 +02001818 (long unsigned) rm->max_reass_n);
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001819 vlib_cli_output (vm,
Anton Nikolaev74a4a702021-02-17 14:45:40 +05001820 "Maximum configured amount of fragments "
1821 "per full IP4 reassembly: %lu\n",
1822 (long unsigned) rm->max_reass_len);
1823 vlib_cli_output (vm,
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001824 "Maximum configured full IP4 reassembly timeout: %lums\n",
1825 (long unsigned) rm->timeout_ms);
1826 vlib_cli_output (vm,
1827 "Maximum configured full IP4 reassembly expire walk interval: %lums\n",
1828 (long unsigned) rm->expire_walk_interval_ms);
Klement Sekera75e7d132017-09-20 08:26:30 +02001829 return 0;
1830}
1831
Klement Sekera896c8962019-06-24 11:52:49 +00001832VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
1833 .path = "show ip4-full-reassembly",
1834 .short_help = "show ip4-full-reassembly [details]",
Klement Sekera75e7d132017-09-20 08:26:30 +02001835 .function = show_ip4_reass,
1836};
Klement Sekera75e7d132017-09-20 08:26:30 +02001837
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001838#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001839vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001840ip4_full_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
Klement Sekera4c533132018-02-22 11:41:12 +01001841{
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001842 return vnet_feature_enable_disable ("ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001843 "ip4-full-reassembly-feature",
1844 sw_if_index, enable_disable, 0, 0);
Klement Sekera4c533132018-02-22 11:41:12 +01001845}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001846#endif /* CLIB_MARCH_VARIANT */
Klement Sekera4c533132018-02-22 11:41:12 +01001847
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001848
Klement Sekera896c8962019-06-24 11:52:49 +00001849#define foreach_ip4_full_reass_handoff_error \
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001850_(CONGESTION_DROP, "congestion drop")
1851
1852
1853typedef enum
1854{
Klement Sekera896c8962019-06-24 11:52:49 +00001855#define _(sym,str) IP4_FULL_REASS_HANDOFF_ERROR_##sym,
1856 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001857#undef _
Klement Sekera896c8962019-06-24 11:52:49 +00001858 IP4_FULL_REASS_HANDOFF_N_ERROR,
1859} ip4_full_reass_handoff_error_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001860
Klement Sekera896c8962019-06-24 11:52:49 +00001861static char *ip4_full_reass_handoff_error_strings[] = {
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001862#define _(sym,string) string,
Klement Sekera896c8962019-06-24 11:52:49 +00001863 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001864#undef _
1865};
1866
1867typedef struct
1868{
1869 u32 next_worker_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001870} ip4_full_reass_handoff_trace_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001871
1872static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001873format_ip4_full_reass_handoff_trace (u8 * s, va_list * args)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001874{
1875 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1876 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001877 ip4_full_reass_handoff_trace_t *t =
1878 va_arg (*args, ip4_full_reass_handoff_trace_t *);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001879
1880 s =
Klement Sekera896c8962019-06-24 11:52:49 +00001881 format (s, "ip4-full-reassembly-handoff: next-worker %d",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001882 t->next_worker_index);
1883
1884 return s;
1885}
1886
1887always_inline uword
Klement Sekera01c1fa42021-12-14 18:25:11 +00001888ip4_full_reass_handoff_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1889 vlib_frame_t *frame,
1890 ip4_full_reass_node_type_t type,
1891 bool is_local)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001892{
Klement Sekera896c8962019-06-24 11:52:49 +00001893 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001894
1895 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1896 u32 n_enq, n_left_from, *from;
1897 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1898 u32 fq_index;
1899
1900 from = vlib_frame_vector_args (frame);
1901 n_left_from = frame->n_vectors;
1902 vlib_get_buffers (vm, from, bufs, n_left_from);
1903
1904 b = bufs;
1905 ti = thread_indices;
1906
Klement Sekerafe8371f2020-09-10 12:03:54 +00001907 switch (type)
1908 {
1909 case NORMAL:
Klement Sekera01c1fa42021-12-14 18:25:11 +00001910 if (is_local)
1911 {
1912 fq_index = rm->fq_local_index;
1913 }
1914 else
1915 {
1916 fq_index = rm->fq_index;
1917 }
Klement Sekerafe8371f2020-09-10 12:03:54 +00001918 break;
1919 case FEATURE:
1920 fq_index = rm->fq_feature_index;
1921 break;
1922 case CUSTOM:
1923 fq_index = rm->fq_custom_index;
1924 break;
1925 default:
1926 clib_warning ("Unexpected `type' (%d)!", type);
Klement Sekerafe8371f2020-09-10 12:03:54 +00001927 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001928
1929 while (n_left_from > 0)
1930 {
Klement Sekerade34c352019-06-25 11:19:22 +00001931 ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001932
1933 if (PREDICT_FALSE
1934 ((node->flags & VLIB_NODE_FLAG_TRACE)
1935 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1936 {
Klement Sekera896c8962019-06-24 11:52:49 +00001937 ip4_full_reass_handoff_trace_t *t =
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001938 vlib_add_trace (vm, node, b[0], sizeof (*t));
1939 t->next_worker_index = ti[0];
1940 }
1941
1942 n_left_from -= 1;
1943 ti += 1;
1944 b += 1;
1945 }
Damjan Marion9e7a0b42021-05-14 14:50:01 +02001946 n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
1947 thread_indices, frame->n_vectors, 1);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001948
1949 if (n_enq < frame->n_vectors)
1950 vlib_node_increment_counter (vm, node->node_index,
Klement Sekera896c8962019-06-24 11:52:49 +00001951 IP4_FULL_REASS_HANDOFF_ERROR_CONGESTION_DROP,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001952 frame->n_vectors - n_enq);
1953 return frame->n_vectors;
1954}
1955
Klement Sekera896c8962019-06-24 11:52:49 +00001956VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001957 vlib_node_runtime_t * node,
1958 vlib_frame_t * frame)
1959{
Klement Sekera01c1fa42021-12-14 18:25:11 +00001960 return ip4_full_reass_handoff_node_inline (vm, node, frame, NORMAL,
1961 false /* is_local */);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001962}
1963
1964
Klement Sekera896c8962019-06-24 11:52:49 +00001965VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
1966 .name = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001967 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001968 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1969 .error_strings = ip4_full_reass_handoff_error_strings,
1970 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001971
1972 .n_next_nodes = 1,
1973
1974 .next_nodes = {
1975 [0] = "error-drop",
1976 },
1977};
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001978
Klement Sekera01c1fa42021-12-14 18:25:11 +00001979VLIB_NODE_FN (ip4_local_full_reass_handoff_node)
1980(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
1981{
1982 return ip4_full_reass_handoff_node_inline (vm, node, frame, NORMAL,
1983 true /* is_local */);
1984}
1985
1986VLIB_REGISTER_NODE (ip4_local_full_reass_handoff_node) = {
1987 .name = "ip4-local-full-reassembly-handoff",
1988 .vector_size = sizeof (u32),
1989 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1990 .error_strings = ip4_full_reass_handoff_error_strings,
1991 .format_trace = format_ip4_full_reass_handoff_trace,
1992
1993 .n_next_nodes = 1,
1994
1995 .next_nodes = {
1996 [0] = "error-drop",
1997 },
1998};
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001999
Klement Sekera896c8962019-06-24 11:52:49 +00002000VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08002001 vlib_node_runtime_t *
2002 node,
2003 vlib_frame_t * frame)
2004{
Klement Sekera01c1fa42021-12-14 18:25:11 +00002005 return ip4_full_reass_handoff_node_inline (vm, node, frame, FEATURE,
2006 false /* is_local */);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08002007}
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08002008
Klement Sekera896c8962019-06-24 11:52:49 +00002009VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
2010 .name = "ip4-full-reass-feature-hoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08002011 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00002012 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
2013 .error_strings = ip4_full_reass_handoff_error_strings,
2014 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08002015
2016 .n_next_nodes = 1,
2017
2018 .next_nodes = {
2019 [0] = "error-drop",
2020 },
2021};
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08002022
Klement Sekerafe8371f2020-09-10 12:03:54 +00002023VLIB_NODE_FN (ip4_full_reass_custom_handoff_node) (vlib_main_t * vm,
2024 vlib_node_runtime_t *
2025 node,
2026 vlib_frame_t * frame)
2027{
Klement Sekera01c1fa42021-12-14 18:25:11 +00002028 return ip4_full_reass_handoff_node_inline (vm, node, frame, CUSTOM,
2029 false /* is_local */);
Klement Sekerafe8371f2020-09-10 12:03:54 +00002030}
Klement Sekerafe8371f2020-09-10 12:03:54 +00002031
Klement Sekerafe8371f2020-09-10 12:03:54 +00002032VLIB_REGISTER_NODE (ip4_full_reass_custom_handoff_node) = {
2033 .name = "ip4-full-reass-custom-hoff",
2034 .vector_size = sizeof (u32),
2035 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
2036 .error_strings = ip4_full_reass_handoff_error_strings,
2037 .format_trace = format_ip4_full_reass_handoff_trace,
2038
2039 .n_next_nodes = 1,
2040
2041 .next_nodes = {
2042 [0] = "error-drop",
2043 },
2044};
Klement Sekerafe8371f2020-09-10 12:03:54 +00002045
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00002046#ifndef CLIB_MARCH_VARIANT
2047int
2048ip4_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
2049{
2050 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
2051 vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
2052 if (is_enable)
2053 {
2054 if (!rm->feature_use_refcount_per_intf[sw_if_index])
2055 {
2056 ++rm->feature_use_refcount_per_intf[sw_if_index];
2057 return vnet_feature_enable_disable ("ip4-unicast",
2058 "ip4-full-reassembly-feature",
2059 sw_if_index, 1, 0, 0);
2060 }
2061 ++rm->feature_use_refcount_per_intf[sw_if_index];
2062 }
2063 else
2064 {
2065 --rm->feature_use_refcount_per_intf[sw_if_index];
2066 if (!rm->feature_use_refcount_per_intf[sw_if_index])
2067 return vnet_feature_enable_disable ("ip4-unicast",
2068 "ip4-full-reassembly-feature",
2069 sw_if_index, 0, 0, 0);
2070 }
2071 return -1;
2072}
Klement Sekera01c1fa42021-12-14 18:25:11 +00002073
2074void
2075ip4_local_full_reass_enable_disable (int enable)
2076{
2077 if (enable)
2078 {
2079 ip4_full_reass_main.is_local_reass_enabled = 1;
2080 }
2081 else
2082 {
2083 ip4_full_reass_main.is_local_reass_enabled = 0;
2084 }
2085}
2086
2087int
2088ip4_local_full_reass_enabled ()
2089{
2090 return ip4_full_reass_main.is_local_reass_enabled;
2091}
2092
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00002093#endif
2094
Klement Sekera75e7d132017-09-20 08:26:30 +02002095/*
2096 * fd.io coding-style-patch-verification: ON
2097 *
2098 * Local Variables:
2099 * eval: (c-set-style "gnu")
2100 * End:
2101 */