blob: ecd2fb0e5c61361c652e236499a9a9aa27605215 [file] [log] [blame]
Klement Sekera75e7d132017-09-20 08:26:30 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @file
Klement Sekera896c8962019-06-24 11:52:49 +000018 * @brief IPv4 Full Reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020019 *
Klement Sekera896c8962019-06-24 11:52:49 +000020 * This file contains the source code for IPv4 full reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020021 */
22
23#include <vppinfra/vec.h>
24#include <vnet/vnet.h>
25#include <vnet/ip/ip.h>
Klement Sekera896c8962019-06-24 11:52:49 +000026#include <vppinfra/fifo.h>
Klement Sekera8dcfed52018-06-28 11:16:15 +020027#include <vppinfra/bihash_16_8.h>
Klement Sekera896c8962019-06-24 11:52:49 +000028#include <vnet/ip/reass/ip4_full_reass.h>
Klement Sekera630ab582019-07-19 09:14:19 +000029#include <stddef.h>
Klement Sekera75e7d132017-09-20 08:26:30 +020030
31#define MSEC_PER_SEC 1000
32#define IP4_REASS_TIMEOUT_DEFAULT_MS 100
33#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
Klement Sekera4c533132018-02-22 11:41:12 +010034#define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
Klement Sekera3a343d42019-05-16 14:35:46 +020035#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
Klement Sekera75e7d132017-09-20 08:26:30 +020036#define IP4_REASS_HT_LOAD_FACTOR (0.75)
37
38#define IP4_REASS_DEBUG_BUFFERS 0
39#if IP4_REASS_DEBUG_BUFFERS
40#define IP4_REASS_DEBUG_BUFFER(bi, what) \
41 do \
42 { \
43 u32 _bi = bi; \
44 printf (#what "buffer %u", _bi); \
45 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi); \
46 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT) \
47 { \
48 _bi = _b->next_buffer; \
49 printf ("[%u]", _bi); \
50 _b = vlib_get_buffer (vm, _bi); \
51 } \
52 printf ("\n"); \
53 fflush (stdout); \
54 } \
55 while (0)
56#else
57#define IP4_REASS_DEBUG_BUFFER(...)
58#endif
59
Klement Sekerad0f70a32018-12-14 17:24:13 +010060typedef enum
61{
62 IP4_REASS_RC_OK,
Klement Sekera3a343d42019-05-16 14:35:46 +020063 IP4_REASS_RC_TOO_MANY_FRAGMENTS,
Klement Sekerad0f70a32018-12-14 17:24:13 +010064 IP4_REASS_RC_INTERNAL_ERROR,
Klement Sekeraf883f6a2019-02-13 11:01:32 +010065 IP4_REASS_RC_NO_BUF,
Klement Sekera630ab582019-07-19 09:14:19 +000066 IP4_REASS_RC_HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +000067} ip4_full_reass_rc_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020068
69typedef struct
70{
71 union
72 {
73 struct
74 {
Klement Sekera75e7d132017-09-20 08:26:30 +020075 u32 xx_id;
76 ip4_address_t src;
77 ip4_address_t dst;
Klement Sekera8dcfed52018-06-28 11:16:15 +020078 u16 frag_id;
79 u8 proto;
80 u8 unused;
Klement Sekera75e7d132017-09-20 08:26:30 +020081 };
Klement Sekera8dcfed52018-06-28 11:16:15 +020082 u64 as_u64[2];
Klement Sekera75e7d132017-09-20 08:26:30 +020083 };
Klement Sekera896c8962019-06-24 11:52:49 +000084} ip4_full_reass_key_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020085
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080086typedef union
87{
88 struct
89 {
90 u32 reass_index;
Klement Sekera630ab582019-07-19 09:14:19 +000091 u32 memory_owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080092 };
93 u64 as_u64;
Klement Sekera896c8962019-06-24 11:52:49 +000094} ip4_full_reass_val_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080095
96typedef union
97{
98 struct
99 {
Klement Sekera896c8962019-06-24 11:52:49 +0000100 ip4_full_reass_key_t k;
101 ip4_full_reass_val_t v;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800102 };
103 clib_bihash_kv_16_8_t kv;
Klement Sekera896c8962019-06-24 11:52:49 +0000104} ip4_full_reass_kv_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800105
Klement Sekera75e7d132017-09-20 08:26:30 +0200106always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +0000107ip4_full_reass_buffer_get_data_offset (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200108{
109 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100110 return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
Klement Sekera75e7d132017-09-20 08:26:30 +0200111}
112
113always_inline u16
Klement Sekera896c8962019-06-24 11:52:49 +0000114ip4_full_reass_buffer_get_data_len (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200115{
116 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100117 return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
Klement Sekera896c8962019-06-24 11:52:49 +0000118 (vnb->ip.reass.fragment_first +
119 ip4_full_reass_buffer_get_data_offset (b)) + 1;
Klement Sekera75e7d132017-09-20 08:26:30 +0200120}
121
122typedef struct
123{
124 // hash table key
Klement Sekera896c8962019-06-24 11:52:49 +0000125 ip4_full_reass_key_t key;
Klement Sekera75e7d132017-09-20 08:26:30 +0200126 // time when last packet was received
127 f64 last_heard;
128 // internal id of this reassembly
Klement Sekera4c533132018-02-22 11:41:12 +0100129 u64 id;
Klement Sekera75e7d132017-09-20 08:26:30 +0200130 // buffer index of first buffer in this reassembly context
131 u32 first_bi;
132 // last octet of packet, ~0 until fragment without more_fragments arrives
133 u32 last_packet_octet;
134 // length of data collected so far
135 u32 data_len;
136 // trace operation counter
137 u32 trace_op_counter;
Klement Sekera4c533132018-02-22 11:41:12 +0100138 // next index - used by non-feature node
Klement Sekera21aa8f12019-05-20 12:27:33 +0200139 u32 next_index;
Klement Sekerae8498652019-06-17 12:23:15 +0000140 // error next index - used by custom apps (~0 if not used)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200141 u32 error_next_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100142 // minimum fragment length for this reassembly - used to estimate MTU
143 u16 min_fragment_length;
Klement Sekera3a343d42019-05-16 14:35:46 +0200144 // number of fragments in this reassembly
145 u32 fragments_n;
Klement Sekera630ab582019-07-19 09:14:19 +0000146 // thread owning memory for this context (whose pool contains this ctx)
147 u32 memory_owner_thread_index;
148 // thread which received fragment with offset 0 and which sends out the
149 // completed reassembly
150 u32 sendout_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +0000151} ip4_full_reass_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200152
153typedef struct
154{
Klement Sekera896c8962019-06-24 11:52:49 +0000155 ip4_full_reass_t *pool;
Klement Sekera4c533132018-02-22 11:41:12 +0100156 u32 reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +0100157 u32 id_counter;
158 clib_spinlock_t lock;
Klement Sekera896c8962019-06-24 11:52:49 +0000159} ip4_full_reass_per_thread_t;
Klement Sekera4c533132018-02-22 11:41:12 +0100160
161typedef struct
162{
Klement Sekera75e7d132017-09-20 08:26:30 +0200163 // IPv4 config
164 u32 timeout_ms;
165 f64 timeout;
166 u32 expire_walk_interval_ms;
Klement Sekera3a343d42019-05-16 14:35:46 +0200167 // maximum number of fragments in one reassembly
168 u32 max_reass_len;
169 // maximum number of reassemblies
Klement Sekera75e7d132017-09-20 08:26:30 +0200170 u32 max_reass_n;
171
172 // IPv4 runtime
Klement Sekera8dcfed52018-06-28 11:16:15 +0200173 clib_bihash_16_8_t hash;
Klement Sekera4c533132018-02-22 11:41:12 +0100174 // per-thread data
Klement Sekera896c8962019-06-24 11:52:49 +0000175 ip4_full_reass_per_thread_t *per_thread_data;
Klement Sekera75e7d132017-09-20 08:26:30 +0200176
177 // convenience
178 vlib_main_t *vlib_main;
Klement Sekera75e7d132017-09-20 08:26:30 +0200179
180 // node index of ip4-drop node
181 u32 ip4_drop_idx;
Klement Sekera896c8962019-06-24 11:52:49 +0000182 u32 ip4_full_reass_expire_node_idx;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800183
184 /** Worker handoff */
185 u32 fq_index;
186 u32 fq_feature_index;
Klement Sekerafe8371f2020-09-10 12:03:54 +0000187 u32 fq_custom_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200188
Klement Sekera7b2e9fb2019-10-01 13:00:22 +0000189 // reference count for enabling/disabling feature - per interface
190 u32 *feature_use_refcount_per_intf;
Klement Sekera896c8962019-06-24 11:52:49 +0000191} ip4_full_reass_main_t;
192
193extern ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700194
195#ifndef CLIB_MARCH_VARIANT
Klement Sekera896c8962019-06-24 11:52:49 +0000196ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700197#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +0200198
199typedef enum
200{
Klement Sekera896c8962019-06-24 11:52:49 +0000201 IP4_FULL_REASS_NEXT_INPUT,
202 IP4_FULL_REASS_NEXT_DROP,
203 IP4_FULL_REASS_NEXT_HANDOFF,
204 IP4_FULL_REASS_N_NEXT,
205} ip4_full_reass_next_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200206
207typedef enum
208{
Klement Sekerafe8371f2020-09-10 12:03:54 +0000209 NORMAL,
210 FEATURE,
211 CUSTOM
212} ip4_full_reass_node_type_t;
213
214typedef enum
215{
Klement Sekera75e7d132017-09-20 08:26:30 +0200216 RANGE_NEW,
217 RANGE_SHRINK,
218 RANGE_DISCARD,
219 RANGE_OVERLAP,
220 FINALIZE,
Klement Sekera630ab582019-07-19 09:14:19 +0000221 HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +0000222} ip4_full_reass_trace_operation_e;
Klement Sekera75e7d132017-09-20 08:26:30 +0200223
224typedef struct
225{
226 u16 range_first;
227 u16 range_last;
228 u32 range_bi;
229 i32 data_offset;
230 u32 data_len;
231 u32 first_bi;
Klement Sekera896c8962019-06-24 11:52:49 +0000232} ip4_full_reass_range_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200233
234typedef struct
235{
Klement Sekera896c8962019-06-24 11:52:49 +0000236 ip4_full_reass_trace_operation_e action;
Klement Sekera75e7d132017-09-20 08:26:30 +0200237 u32 reass_id;
Klement Sekera896c8962019-06-24 11:52:49 +0000238 ip4_full_reass_range_trace_t trace_range;
Klement Sekera75e7d132017-09-20 08:26:30 +0200239 u32 size_diff;
240 u32 op_id;
Klement Sekera630ab582019-07-19 09:14:19 +0000241 u32 thread_id;
242 u32 thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200243 u32 fragment_first;
244 u32 fragment_last;
245 u32 total_data_len;
Klement Sekera8563cb32019-10-10 17:03:57 +0000246 bool is_after_handoff;
247 ip4_header_t ip4_header;
Klement Sekera896c8962019-06-24 11:52:49 +0000248} ip4_full_reass_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200249
Klement Sekera896c8962019-06-24 11:52:49 +0000250extern vlib_node_registration_t ip4_full_reass_node;
251extern vlib_node_registration_t ip4_full_reass_node_feature;
Klement Sekerafe8371f2020-09-10 12:03:54 +0000252extern vlib_node_registration_t ip4_full_reass_node_custom;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700253
Klement Sekera4c533132018-02-22 11:41:12 +0100254static void
Klement Sekera896c8962019-06-24 11:52:49 +0000255ip4_full_reass_trace_details (vlib_main_t * vm, u32 bi,
256 ip4_full_reass_range_trace_t * trace)
Klement Sekera75e7d132017-09-20 08:26:30 +0200257{
258 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
259 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
260 trace->range_first = vnb->ip.reass.range_first;
261 trace->range_last = vnb->ip.reass.range_last;
Klement Sekera896c8962019-06-24 11:52:49 +0000262 trace->data_offset = ip4_full_reass_buffer_get_data_offset (b);
263 trace->data_len = ip4_full_reass_buffer_get_data_len (b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200264 trace->range_bi = bi;
265}
266
Klement Sekera4c533132018-02-22 11:41:12 +0100267static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000268format_ip4_full_reass_range_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200269{
Klement Sekera896c8962019-06-24 11:52:49 +0000270 ip4_full_reass_range_trace_t *trace =
271 va_arg (*args, ip4_full_reass_range_trace_t *);
272 s =
273 format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
274 trace->range_last, trace->data_offset, trace->data_len,
275 trace->range_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200276 return s;
277}
278
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700279static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000280format_ip4_full_reass_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200281{
282 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
283 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +0000284 ip4_full_reass_trace_t *t = va_arg (*args, ip4_full_reass_trace_t *);
Klement Sekera630ab582019-07-19 09:14:19 +0000285 u32 indent = 0;
286 if (~0 != t->reass_id)
287 {
Klement Sekera8563cb32019-10-10 17:03:57 +0000288 if (t->is_after_handoff)
289 {
290 s =
291 format (s, "%U\n", format_ip4_header, &t->ip4_header,
292 sizeof (t->ip4_header));
293 indent = 2;
294 }
295 s =
296 format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
297 t->reass_id, t->op_id);
Klement Sekera630ab582019-07-19 09:14:19 +0000298 indent = format_get_indent (s);
299 s =
300 format (s,
301 "first bi: %u, data len: %u, ip/fragment[%u, %u]",
302 t->trace_range.first_bi, t->total_data_len, t->fragment_first,
303 t->fragment_last);
304 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200305 switch (t->action)
306 {
307 case RANGE_SHRINK:
308 s = format (s, "\n%Ushrink %U by %u", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000309 format_ip4_full_reass_range_trace, &t->trace_range,
Klement Sekera75e7d132017-09-20 08:26:30 +0200310 t->size_diff);
311 break;
312 case RANGE_DISCARD:
313 s = format (s, "\n%Udiscard %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000314 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200315 break;
316 case RANGE_NEW:
317 s = format (s, "\n%Unew %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000318 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200319 break;
320 case RANGE_OVERLAP:
321 s = format (s, "\n%Uoverlapping/ignored %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000322 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200323 break;
324 case FINALIZE:
325 s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
326 break;
Klement Sekera630ab582019-07-19 09:14:19 +0000327 case HANDOFF:
328 s =
329 format (s, "handoff from thread #%u to thread #%u", t->thread_id,
330 t->thread_id_to);
331 break;
Klement Sekera75e7d132017-09-20 08:26:30 +0200332 }
333 return s;
334}
335
Klement Sekera4c533132018-02-22 11:41:12 +0100336static void
Klement Sekera896c8962019-06-24 11:52:49 +0000337ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
338 ip4_full_reass_main_t * rm,
339 ip4_full_reass_t * reass, u32 bi,
340 ip4_full_reass_trace_operation_e action,
341 u32 size_diff, u32 thread_id_to)
Klement Sekera75e7d132017-09-20 08:26:30 +0200342{
343 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
344 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera8563cb32019-10-10 17:03:57 +0000345 bool is_after_handoff = false;
346 if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
347 {
348 is_after_handoff = true;
349 }
Klement Sekera896c8962019-06-24 11:52:49 +0000350 ip4_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
Klement Sekera8563cb32019-10-10 17:03:57 +0000351 t->is_after_handoff = is_after_handoff;
352 if (t->is_after_handoff)
353 {
354 clib_memcpy (&t->ip4_header, vlib_buffer_get_current (b),
355 clib_min (sizeof (t->ip4_header), b->current_length));
356 }
Klement Sekera896c8962019-06-24 11:52:49 +0000357 if (reass)
358 {
359 t->reass_id = reass->id;
360 t->op_id = reass->trace_op_counter;
361 t->trace_range.first_bi = reass->first_bi;
362 t->total_data_len = reass->data_len;
363 ++reass->trace_op_counter;
364 }
365 else
366 {
367 t->reass_id = ~0;
368 t->op_id = 0;
369 t->trace_range.first_bi = 0;
370 t->total_data_len = 0;
371 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200372 t->action = action;
Klement Sekera896c8962019-06-24 11:52:49 +0000373 ip4_full_reass_trace_details (vm, bi, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200374 t->size_diff = size_diff;
Klement Sekera630ab582019-07-19 09:14:19 +0000375 t->thread_id = vm->thread_index;
376 t->thread_id_to = thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200377 t->fragment_first = vnb->ip.reass.fragment_first;
378 t->fragment_last = vnb->ip.reass.fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200379#if 0
380 static u8 *s = NULL;
Klement Sekera896c8962019-06-24 11:52:49 +0000381 s = format (s, "%U", format_ip4_full_reass_trace, NULL, NULL, t);
Klement Sekera75e7d132017-09-20 08:26:30 +0200382 printf ("%.*s\n", vec_len (s), s);
383 fflush (stdout);
384 vec_reset_length (s);
385#endif
386}
387
Klement Sekera630ab582019-07-19 09:14:19 +0000388always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000389ip4_full_reass_free_ctx (ip4_full_reass_per_thread_t * rt,
390 ip4_full_reass_t * reass)
Klement Sekera630ab582019-07-19 09:14:19 +0000391{
392 pool_put (rt->pool, reass);
393 --rt->reass_n;
394}
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800395
Klement Sekera4c533132018-02-22 11:41:12 +0100396always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000397ip4_full_reass_free (ip4_full_reass_main_t * rm,
398 ip4_full_reass_per_thread_t * rt,
399 ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200400{
Klement Sekera8dcfed52018-06-28 11:16:15 +0200401 clib_bihash_kv_16_8_t kv;
Klement Sekera75e7d132017-09-20 08:26:30 +0200402 kv.key[0] = reass->key.as_u64[0];
403 kv.key[1] = reass->key.as_u64[1];
Klement Sekera8dcfed52018-06-28 11:16:15 +0200404 clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
Klement Sekera896c8962019-06-24 11:52:49 +0000405 return ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200406}
407
Klement Sekera4c533132018-02-22 11:41:12 +0100408always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000409ip4_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
410 ip4_full_reass_main_t * rm, ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200411{
412 u32 range_bi = reass->first_bi;
413 vlib_buffer_t *range_b;
414 vnet_buffer_opaque_t *range_vnb;
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100415 u32 *to_free = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +0200416 while (~0 != range_bi)
417 {
418 range_b = vlib_get_buffer (vm, range_bi);
419 range_vnb = vnet_buffer (range_b);
420 u32 bi = range_bi;
421 while (~0 != bi)
422 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100423 vec_add1 (to_free, bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200424 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
425 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
426 {
427 bi = b->next_buffer;
428 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
429 }
430 else
431 {
432 bi = ~0;
433 }
434 }
435 range_bi = range_vnb->ip.reass.next_range_bi;
436 }
Klement Sekera21aa8f12019-05-20 12:27:33 +0200437 /* send to next_error_index */
Klement Sekerae8498652019-06-17 12:23:15 +0000438 if (~0 != reass->error_next_index)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200439 {
440 u32 n_left_to_next, *to_next, next_index;
441
442 next_index = reass->error_next_index;
443 u32 bi = ~0;
444
445 while (vec_len (to_free) > 0)
446 {
447 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
448
449 while (vec_len (to_free) > 0 && n_left_to_next > 0)
450 {
451 bi = vec_pop (to_free);
452
453 if (~0 != bi)
454 {
455 to_next[0] = bi;
456 to_next += 1;
457 n_left_to_next -= 1;
Klement Sekera21aa8f12019-05-20 12:27:33 +0200458 }
459 }
460 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
461 }
462 }
463 else
464 {
465 vlib_buffer_free (vm, to_free, vec_len (to_free));
466 }
barryxie01e94db2020-12-04 19:21:23 +0800467 vec_free (to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +0200468}
469
Klement Sekera896c8962019-06-24 11:52:49 +0000470always_inline void
471ip4_full_reass_init (ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200472{
Klement Sekera896c8962019-06-24 11:52:49 +0000473 reass->first_bi = ~0;
474 reass->last_packet_octet = ~0;
475 reass->data_len = 0;
476 reass->next_index = ~0;
477 reass->error_next_index = ~0;
478}
479
480always_inline ip4_full_reass_t *
481ip4_full_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
482 ip4_full_reass_main_t * rm,
483 ip4_full_reass_per_thread_t * rt,
484 ip4_full_reass_kv_t * kv, u8 * do_handoff)
485{
486 ip4_full_reass_t *reass;
Klement Sekera630ab582019-07-19 09:14:19 +0000487 f64 now;
Klement Sekera75e7d132017-09-20 08:26:30 +0200488
Klement Sekera630ab582019-07-19 09:14:19 +0000489again:
490
491 reass = NULL;
492 now = vlib_time_now (vm);
Klement Sekerac99c0252019-12-18 12:17:06 +0000493 if (!clib_bihash_search_16_8 (&rm->hash, &kv->kv, &kv->kv))
Klement Sekera75e7d132017-09-20 08:26:30 +0200494 {
Gao Feng9165e032020-04-26 09:57:18 +0800495 if (vm->thread_index != kv->v.memory_owner_thread_index)
496 {
497 *do_handoff = 1;
498 return NULL;
499 }
Klement Sekera630ab582019-07-19 09:14:19 +0000500 reass =
501 pool_elt_at_index (rm->per_thread_data
502 [kv->v.memory_owner_thread_index].pool,
503 kv->v.reass_index);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800504
Klement Sekera75e7d132017-09-20 08:26:30 +0200505 if (now > reass->last_heard + rm->timeout)
506 {
Klement Sekera896c8962019-06-24 11:52:49 +0000507 ip4_full_reass_drop_all (vm, node, rm, reass);
508 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200509 reass = NULL;
510 }
511 }
512
513 if (reass)
514 {
515 reass->last_heard = now;
516 return reass;
517 }
518
Klement Sekera4c533132018-02-22 11:41:12 +0100519 if (rt->reass_n >= rm->max_reass_n)
Klement Sekera75e7d132017-09-20 08:26:30 +0200520 {
521 reass = NULL;
522 return reass;
523 }
524 else
525 {
Klement Sekera4c533132018-02-22 11:41:12 +0100526 pool_get (rt->pool, reass);
Dave Barachb7b92992018-10-17 10:38:51 -0400527 clib_memset (reass, 0, sizeof (*reass));
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800528 reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
Klement Sekera630ab582019-07-19 09:14:19 +0000529 reass->memory_owner_thread_index = vm->thread_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100530 ++rt->id_counter;
Klement Sekera896c8962019-06-24 11:52:49 +0000531 ip4_full_reass_init (reass);
Klement Sekera4c533132018-02-22 11:41:12 +0100532 ++rt->reass_n;
Klement Sekera75e7d132017-09-20 08:26:30 +0200533 }
534
Klement Sekerac99c0252019-12-18 12:17:06 +0000535 reass->key.as_u64[0] = kv->kv.key[0];
536 reass->key.as_u64[1] = kv->kv.key[1];
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800537 kv->v.reass_index = (reass - rt->pool);
Klement Sekera630ab582019-07-19 09:14:19 +0000538 kv->v.memory_owner_thread_index = vm->thread_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200539 reass->last_heard = now;
540
Klement Sekerac99c0252019-12-18 12:17:06 +0000541 int rv = clib_bihash_add_del_16_8 (&rm->hash, &kv->kv, 2);
Klement Sekera630ab582019-07-19 09:14:19 +0000542 if (rv)
Klement Sekera75e7d132017-09-20 08:26:30 +0200543 {
Klement Sekera896c8962019-06-24 11:52:49 +0000544 ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200545 reass = NULL;
Klement Sekera630ab582019-07-19 09:14:19 +0000546 // if other worker created a context already work with the other copy
547 if (-2 == rv)
548 goto again;
Klement Sekera75e7d132017-09-20 08:26:30 +0200549 }
550
551 return reass;
552}
553
Klement Sekera896c8962019-06-24 11:52:49 +0000554always_inline ip4_full_reass_rc_t
555ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
556 ip4_full_reass_main_t * rm,
557 ip4_full_reass_per_thread_t * rt,
558 ip4_full_reass_t * reass, u32 * bi0,
Klement Sekerafe8371f2020-09-10 12:03:54 +0000559 u32 * next0, u32 * error0, bool is_custom)
Klement Sekera75e7d132017-09-20 08:26:30 +0200560{
Klement Sekera75e7d132017-09-20 08:26:30 +0200561 vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
562 vlib_buffer_t *last_b = NULL;
563 u32 sub_chain_bi = reass->first_bi;
564 u32 total_length = 0;
565 u32 buf_cnt = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200566 do
567 {
568 u32 tmp_bi = sub_chain_bi;
569 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
570 ip4_header_t *ip = vlib_buffer_get_current (tmp);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100571 vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
572 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
573 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
574 {
575 return IP4_REASS_RC_INTERNAL_ERROR;
576 }
577
Klement Sekera896c8962019-06-24 11:52:49 +0000578 u32 data_len = ip4_full_reass_buffer_get_data_len (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200579 u32 trim_front =
Klement Sekera896c8962019-06-24 11:52:49 +0000580 ip4_header_bytes (ip) + ip4_full_reass_buffer_get_data_offset (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200581 u32 trim_end =
582 vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
583 if (tmp_bi == reass->first_bi)
584 {
585 /* first buffer - keep ip4 header */
Klement Sekera896c8962019-06-24 11:52:49 +0000586 if (0 != ip4_full_reass_buffer_get_data_offset (tmp))
Klement Sekerad0f70a32018-12-14 17:24:13 +0100587 {
588 return IP4_REASS_RC_INTERNAL_ERROR;
589 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200590 trim_front = 0;
591 trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
592 ip4_header_bytes (ip);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100593 if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
594 {
595 return IP4_REASS_RC_INTERNAL_ERROR;
596 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200597 }
598 u32 keep_data =
599 vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
600 while (1)
601 {
602 ++buf_cnt;
603 if (trim_front)
604 {
605 if (trim_front > tmp->current_length)
606 {
607 /* drop whole buffer */
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200608 u32 to_be_freed_bi = tmp_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200609 trim_front -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100610 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
611 {
612 return IP4_REASS_RC_INTERNAL_ERROR;
613 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200614 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
615 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700616 tmp->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200617 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200618 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200619 continue;
620 }
621 else
622 {
623 vlib_buffer_advance (tmp, trim_front);
624 trim_front = 0;
625 }
626 }
627 if (keep_data)
628 {
629 if (last_b)
630 {
631 last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
632 last_b->next_buffer = tmp_bi;
633 }
634 last_b = tmp;
635 if (keep_data <= tmp->current_length)
636 {
637 tmp->current_length = keep_data;
638 keep_data = 0;
639 }
640 else
641 {
642 keep_data -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100643 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
644 {
645 return IP4_REASS_RC_INTERNAL_ERROR;
646 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200647 }
648 total_length += tmp->current_length;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200649 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
650 {
651 tmp_bi = tmp->next_buffer;
652 tmp = vlib_get_buffer (vm, tmp->next_buffer);
653 }
654 else
655 {
656 break;
657 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200658 }
659 else
660 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200661 u32 to_be_freed_bi = tmp_bi;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100662 if (reass->first_bi == tmp_bi)
663 {
664 return IP4_REASS_RC_INTERNAL_ERROR;
665 }
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200666 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
667 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700668 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200669 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700670 tmp->next_buffer = 0;
671 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200672 vlib_buffer_free_one (vm, to_be_freed_bi);
673 }
674 else
675 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700676 tmp->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200677 vlib_buffer_free_one (vm, to_be_freed_bi);
678 break;
679 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200680 }
681 }
682 sub_chain_bi =
683 vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
684 reass.next_range_bi;
685 }
686 while (~0 != sub_chain_bi);
Chris Luke30684ac2018-03-29 12:56:58 -0700687
Klement Sekerad0f70a32018-12-14 17:24:13 +0100688 if (!last_b)
689 {
690 return IP4_REASS_RC_INTERNAL_ERROR;
691 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200692 last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700693
Klement Sekerad0f70a32018-12-14 17:24:13 +0100694 if (total_length < first_b->current_length)
695 {
696 return IP4_REASS_RC_INTERNAL_ERROR;
697 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200698 total_length -= first_b->current_length;
699 first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
700 first_b->total_length_not_including_first_buffer = total_length;
701 ip4_header_t *ip = vlib_buffer_get_current (first_b);
702 ip->flags_and_fragment_offset = 0;
703 ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
704 ip->checksum = ip4_header_checksum (ip);
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100705 if (!vlib_buffer_chain_linearize (vm, first_b))
706 {
707 return IP4_REASS_RC_NO_BUF;
708 }
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700709 // reset to reconstruct the mbuf linking
710 first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Klement Sekera75e7d132017-09-20 08:26:30 +0200711 if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
712 {
Klement Sekera896c8962019-06-24 11:52:49 +0000713 ip4_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
714 FINALIZE, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200715#if 0
716 // following code does a hexdump of packet fragments to stdout ...
717 do
718 {
719 u32 bi = reass->first_bi;
720 u8 *s = NULL;
721 while (~0 != bi)
722 {
723 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
724 s = format (s, "%u: %U\n", bi, format_hexdump,
725 vlib_buffer_get_current (b), b->current_length);
726 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
727 {
728 bi = b->next_buffer;
729 }
730 else
731 {
732 break;
733 }
734 }
735 printf ("%.*s\n", vec_len (s), s);
736 fflush (stdout);
737 vec_free (s);
738 }
739 while (0);
740#endif
741 }
742 *bi0 = reass->first_bi;
Klement Sekerafe8371f2020-09-10 12:03:54 +0000743 if (!is_custom)
Klement Sekera4c533132018-02-22 11:41:12 +0100744 {
Klement Sekera896c8962019-06-24 11:52:49 +0000745 *next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +0100746 }
747 else
748 {
749 *next0 = reass->next_index;
750 }
751 vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
Klement Sekera75e7d132017-09-20 08:26:30 +0200752 *error0 = IP4_ERROR_NONE;
Klement Sekera896c8962019-06-24 11:52:49 +0000753 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200754 reass = NULL;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100755 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200756}
757
Klement Sekera896c8962019-06-24 11:52:49 +0000758always_inline ip4_full_reass_rc_t
759ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
760 ip4_full_reass_main_t * rm,
761 ip4_full_reass_per_thread_t * rt,
762 ip4_full_reass_t * reass,
763 u32 prev_range_bi, u32 new_next_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200764{
Klement Sekera75e7d132017-09-20 08:26:30 +0200765 vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
766 vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
767 if (~0 != prev_range_bi)
768 {
769 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
770 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
771 new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
772 prev_vnb->ip.reass.next_range_bi = new_next_bi;
773 }
774 else
775 {
776 if (~0 != reass->first_bi)
777 {
778 new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
779 }
780 reass->first_bi = new_next_bi;
781 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100782 vnet_buffer_opaque_t *vnb = vnet_buffer (new_next_b);
783 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
784 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
785 {
786 return IP4_REASS_RC_INTERNAL_ERROR;
787 }
Klement Sekera896c8962019-06-24 11:52:49 +0000788 reass->data_len += ip4_full_reass_buffer_get_data_len (new_next_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100789 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200790}
791
Klement Sekera896c8962019-06-24 11:52:49 +0000792always_inline ip4_full_reass_rc_t
793ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
794 vlib_node_runtime_t * node,
795 ip4_full_reass_main_t * rm,
796 ip4_full_reass_t * reass,
797 u32 prev_range_bi, u32 discard_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200798{
799 vlib_buffer_t *discard_b = vlib_get_buffer (vm, discard_bi);
800 vnet_buffer_opaque_t *discard_vnb = vnet_buffer (discard_b);
801 if (~0 != prev_range_bi)
802 {
803 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
804 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100805 if (!(prev_vnb->ip.reass.next_range_bi == discard_bi))
806 {
807 return IP4_REASS_RC_INTERNAL_ERROR;
808 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200809 prev_vnb->ip.reass.next_range_bi = discard_vnb->ip.reass.next_range_bi;
810 }
811 else
812 {
813 reass->first_bi = discard_vnb->ip.reass.next_range_bi;
814 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100815 vnet_buffer_opaque_t *vnb = vnet_buffer (discard_b);
816 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
817 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
818 {
819 return IP4_REASS_RC_INTERNAL_ERROR;
820 }
Klement Sekera896c8962019-06-24 11:52:49 +0000821 reass->data_len -= ip4_full_reass_buffer_get_data_len (discard_b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200822 while (1)
823 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200824 u32 to_be_freed_bi = discard_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200825 if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
826 {
Klement Sekera896c8962019-06-24 11:52:49 +0000827 ip4_full_reass_add_trace (vm, node, rm, reass, discard_bi,
828 RANGE_DISCARD, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200829 }
830 if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
831 {
832 discard_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
833 discard_bi = discard_b->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700834 discard_b->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200835 discard_b = vlib_get_buffer (vm, discard_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200836 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200837 }
838 else
839 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700840 discard_b->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200841 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200842 break;
843 }
844 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100845 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200846}
847
Klement Sekera896c8962019-06-24 11:52:49 +0000848always_inline ip4_full_reass_rc_t
849ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
850 ip4_full_reass_main_t * rm,
851 ip4_full_reass_per_thread_t * rt,
852 ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
Klement Sekerafe8371f2020-09-10 12:03:54 +0000853 u32 * error0, bool is_custom, u32 * handoff_thread_idx)
Klement Sekera75e7d132017-09-20 08:26:30 +0200854{
Klement Sekera75e7d132017-09-20 08:26:30 +0200855 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200856 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
Klement Sekerafe8371f2020-09-10 12:03:54 +0000857 if (is_custom)
Klement Sekerae8498652019-06-17 12:23:15 +0000858 {
859 // store (error_)next_index before it's overwritten
860 reass->next_index = fvnb->ip.reass.next_index;
861 reass->error_next_index = fvnb->ip.reass.error_next_index;
862 }
Klement Sekera896c8962019-06-24 11:52:49 +0000863 ip4_full_reass_rc_t rc = IP4_REASS_RC_OK;
864 int consumed = 0;
865 ip4_header_t *fip = vlib_buffer_get_current (fb);
Klement Sekera14d7e902018-12-10 13:46:09 +0100866 const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
867 const u32 fragment_length =
Klement Sekera75e7d132017-09-20 08:26:30 +0200868 clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
Klement Sekera14d7e902018-12-10 13:46:09 +0100869 const u32 fragment_last = fragment_first + fragment_length - 1;
870 fvnb->ip.reass.fragment_first = fragment_first;
871 fvnb->ip.reass.fragment_last = fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200872 int more_fragments = ip4_get_fragment_more (fip);
873 u32 candidate_range_bi = reass->first_bi;
874 u32 prev_range_bi = ~0;
875 fvnb->ip.reass.range_first = fragment_first;
876 fvnb->ip.reass.range_last = fragment_last;
877 fvnb->ip.reass.next_range_bi = ~0;
878 if (!more_fragments)
879 {
880 reass->last_packet_octet = fragment_last;
881 }
882 if (~0 == reass->first_bi)
883 {
884 // starting a new reassembly
Klement Sekerad0f70a32018-12-14 17:24:13 +0100885 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000886 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
887 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100888 if (IP4_REASS_RC_OK != rc)
889 {
890 return rc;
891 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200892 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
893 {
Klement Sekera896c8962019-06-24 11:52:49 +0000894 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
895 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200896 }
897 *bi0 = ~0;
Klement Sekera4c533132018-02-22 11:41:12 +0100898 reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
Klement Sekera3a343d42019-05-16 14:35:46 +0200899 reass->fragments_n = 1;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100900 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200901 }
Klement Sekera896c8962019-06-24 11:52:49 +0000902 reass->min_fragment_length =
903 clib_min (clib_net_to_host_u16 (fip->length),
904 fvnb->ip.reass.estimated_mtu);
Klement Sekera75e7d132017-09-20 08:26:30 +0200905 while (~0 != candidate_range_bi)
906 {
907 vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
908 vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
909 if (fragment_first > candidate_vnb->ip.reass.range_last)
910 {
911 // this fragments starts after candidate range
912 prev_range_bi = candidate_range_bi;
913 candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
914 if (candidate_vnb->ip.reass.range_last < fragment_last &&
915 ~0 == candidate_range_bi)
916 {
917 // special case - this fragment falls beyond all known ranges
Klement Sekerad0f70a32018-12-14 17:24:13 +0100918 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000919 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
920 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100921 if (IP4_REASS_RC_OK != rc)
922 {
923 return rc;
924 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200925 consumed = 1;
926 break;
927 }
928 continue;
929 }
930 if (fragment_last < candidate_vnb->ip.reass.range_first)
931 {
932 // this fragment ends before candidate range without any overlap
Klement Sekerad0f70a32018-12-14 17:24:13 +0100933 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000934 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
935 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100936 if (IP4_REASS_RC_OK != rc)
937 {
938 return rc;
939 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200940 consumed = 1;
941 }
942 else
943 {
944 if (fragment_first >= candidate_vnb->ip.reass.range_first &&
945 fragment_last <= candidate_vnb->ip.reass.range_last)
946 {
947 // this fragment is a (sub)part of existing range, ignore it
948 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
949 {
Klement Sekera896c8962019-06-24 11:52:49 +0000950 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0,
951 RANGE_OVERLAP, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200952 }
953 break;
954 }
955 int discard_candidate = 0;
956 if (fragment_first < candidate_vnb->ip.reass.range_first)
957 {
958 u32 overlap =
959 fragment_last - candidate_vnb->ip.reass.range_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +0000960 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +0200961 {
962 candidate_vnb->ip.reass.range_first += overlap;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100963 if (reass->data_len < overlap)
964 {
965 return IP4_REASS_RC_INTERNAL_ERROR;
966 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200967 reass->data_len -= overlap;
968 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
969 {
Klement Sekera896c8962019-06-24 11:52:49 +0000970 ip4_full_reass_add_trace (vm, node, rm, reass,
971 candidate_range_bi,
972 RANGE_SHRINK, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200973 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100974 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000975 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
976 prev_range_bi,
977 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100978 if (IP4_REASS_RC_OK != rc)
979 {
980 return rc;
981 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200982 consumed = 1;
983 }
984 else
985 {
986 discard_candidate = 1;
987 }
988 }
989 else if (fragment_last > candidate_vnb->ip.reass.range_last)
990 {
991 u32 overlap =
992 candidate_vnb->ip.reass.range_last - fragment_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +0000993 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +0200994 {
995 fvnb->ip.reass.range_first += overlap;
996 if (~0 != candidate_vnb->ip.reass.next_range_bi)
997 {
998 prev_range_bi = candidate_range_bi;
999 candidate_range_bi =
1000 candidate_vnb->ip.reass.next_range_bi;
1001 continue;
1002 }
1003 else
1004 {
1005 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +01001006 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001007 ip4_full_reass_insert_range_in_chain (vm, rm, rt,
1008 reass,
1009 candidate_range_bi,
1010 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001011 if (IP4_REASS_RC_OK != rc)
1012 {
1013 return rc;
1014 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001015 consumed = 1;
1016 }
1017 }
1018 else
1019 {
1020 discard_candidate = 1;
1021 }
1022 }
1023 else
1024 {
1025 discard_candidate = 1;
1026 }
1027 if (discard_candidate)
1028 {
1029 u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
1030 // discard candidate range, probe next range
Klement Sekerad0f70a32018-12-14 17:24:13 +01001031 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001032 ip4_full_reass_remove_range_from_chain (vm, node, rm, reass,
1033 prev_range_bi,
1034 candidate_range_bi);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001035 if (IP4_REASS_RC_OK != rc)
1036 {
1037 return rc;
1038 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001039 if (~0 != next_range_bi)
1040 {
1041 candidate_range_bi = next_range_bi;
1042 continue;
1043 }
1044 else
1045 {
1046 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +01001047 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001048 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
1049 prev_range_bi,
1050 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001051 if (IP4_REASS_RC_OK != rc)
1052 {
1053 return rc;
1054 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001055 consumed = 1;
1056 }
1057 }
1058 }
1059 break;
1060 }
Klement Sekera3a343d42019-05-16 14:35:46 +02001061 ++reass->fragments_n;
Klement Sekera75e7d132017-09-20 08:26:30 +02001062 if (consumed)
1063 {
1064 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1065 {
Klement Sekera896c8962019-06-24 11:52:49 +00001066 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
1067 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001068 }
1069 }
1070 if (~0 != reass->last_packet_octet &&
1071 reass->data_len == reass->last_packet_octet + 1)
1072 {
Klement Sekera630ab582019-07-19 09:14:19 +00001073 *handoff_thread_idx = reass->sendout_thread_index;
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001074 int handoff =
1075 reass->memory_owner_thread_index != reass->sendout_thread_index;
Klement Sekera630ab582019-07-19 09:14:19 +00001076 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001077 ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001078 is_custom);
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001079 if (IP4_REASS_RC_OK == rc && handoff)
Klement Sekera630ab582019-07-19 09:14:19 +00001080 {
1081 rc = IP4_REASS_RC_HANDOFF;
1082 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001083 }
1084 else
1085 {
1086 if (consumed)
1087 {
1088 *bi0 = ~0;
Klement Sekera3a343d42019-05-16 14:35:46 +02001089 if (reass->fragments_n > rm->max_reass_len)
1090 {
1091 rc = IP4_REASS_RC_TOO_MANY_FRAGMENTS;
1092 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001093 }
1094 else
1095 {
Klement Sekera896c8962019-06-24 11:52:49 +00001096 *next0 = IP4_FULL_REASS_NEXT_DROP;
Klement Sekera75e7d132017-09-20 08:26:30 +02001097 *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
1098 }
1099 }
Klement Sekerad0f70a32018-12-14 17:24:13 +01001100 return rc;
Klement Sekera75e7d132017-09-20 08:26:30 +02001101}
1102
1103always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001104ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001105 vlib_frame_t * frame, ip4_full_reass_node_type_t type)
Klement Sekera75e7d132017-09-20 08:26:30 +02001106{
1107 u32 *from = vlib_frame_vector_args (frame);
1108 u32 n_left_from, n_left_to_next, *to_next, next_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001109 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1110 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001111 clib_spinlock_lock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001112
1113 n_left_from = frame->n_vectors;
1114 next_index = node->cached_next_index;
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001115 while (n_left_from > 0)
Klement Sekera75e7d132017-09-20 08:26:30 +02001116 {
1117 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1118
Klement Sekera75e7d132017-09-20 08:26:30 +02001119 while (n_left_from > 0 && n_left_to_next > 0)
1120 {
1121 u32 bi0;
1122 vlib_buffer_t *b0;
Klement Sekera4c533132018-02-22 11:41:12 +01001123 u32 next0;
1124 u32 error0 = IP4_ERROR_NONE;
Klement Sekera75e7d132017-09-20 08:26:30 +02001125
1126 bi0 = from[0];
1127 b0 = vlib_get_buffer (vm, bi0);
1128
1129 ip4_header_t *ip0 = vlib_buffer_get_current (b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001130 if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
Klement Sekera75e7d132017-09-20 08:26:30 +02001131 {
Klement Sekera4c533132018-02-22 11:41:12 +01001132 // this is a whole packet - no fragmentation
Klement Sekerafe8371f2020-09-10 12:03:54 +00001133 if (CUSTOM != type)
Klement Sekera4c533132018-02-22 11:41:12 +01001134 {
Klement Sekera896c8962019-06-24 11:52:49 +00001135 next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +01001136 }
1137 else
1138 {
1139 next0 = vnet_buffer (b0)->ip.reass.next_index;
1140 }
Klement Sekera896c8962019-06-24 11:52:49 +00001141 goto packet_enqueue;
1142 }
1143 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1144 const u32 fragment_length =
1145 clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
1146 const u32 fragment_last = fragment_first + fragment_length - 1;
1147 if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
1148 {
1149 next0 = IP4_FULL_REASS_NEXT_DROP;
1150 error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
1151 goto packet_enqueue;
1152 }
1153 ip4_full_reass_kv_t kv;
1154 u8 do_handoff = 0;
1155
1156 kv.k.as_u64[0] =
1157 (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
1158 vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
1159 (u64) ip0->src_address.as_u32 << 32;
1160 kv.k.as_u64[1] =
1161 (u64) ip0->dst_address.
1162 as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
1163
1164 ip4_full_reass_t *reass =
1165 ip4_full_reass_find_or_create (vm, node, rm, rt, &kv,
1166 &do_handoff);
1167
1168 if (reass)
1169 {
1170 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1171 if (0 == fragment_first)
1172 {
1173 reass->sendout_thread_index = vm->thread_index;
1174 }
1175 }
1176
1177 if (PREDICT_FALSE (do_handoff))
1178 {
1179 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
Klement Sekerade34c352019-06-25 11:19:22 +00001180 vnet_buffer (b0)->ip.reass.owner_thread_index =
1181 kv.v.memory_owner_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001182 }
1183 else if (reass)
1184 {
1185 u32 handoff_thread_idx;
1186 switch (ip4_full_reass_update
1187 (vm, node, rm, rt, reass, &bi0, &next0,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001188 &error0, CUSTOM == type, &handoff_thread_idx))
Klement Sekera896c8962019-06-24 11:52:49 +00001189 {
1190 case IP4_REASS_RC_OK:
1191 /* nothing to do here */
1192 break;
1193 case IP4_REASS_RC_HANDOFF:
1194 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
1195 b0 = vlib_get_buffer (vm, bi0);
Klement Sekerade34c352019-06-25 11:19:22 +00001196 vnet_buffer (b0)->ip.reass.owner_thread_index =
1197 handoff_thread_idx;
Klement Sekera896c8962019-06-24 11:52:49 +00001198 break;
1199 case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
1200 vlib_node_increment_counter (vm, node->node_index,
1201 IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1202 1);
1203 ip4_full_reass_drop_all (vm, node, rm, reass);
1204 ip4_full_reass_free (rm, rt, reass);
1205 goto next_packet;
1206 break;
1207 case IP4_REASS_RC_NO_BUF:
1208 vlib_node_increment_counter (vm, node->node_index,
1209 IP4_ERROR_REASS_NO_BUF, 1);
1210 ip4_full_reass_drop_all (vm, node, rm, reass);
1211 ip4_full_reass_free (rm, rt, reass);
1212 goto next_packet;
1213 break;
1214 case IP4_REASS_RC_INTERNAL_ERROR:
1215 /* drop everything and start with a clean slate */
1216 vlib_node_increment_counter (vm, node->node_index,
1217 IP4_ERROR_REASS_INTERNAL_ERROR,
1218 1);
1219 ip4_full_reass_drop_all (vm, node, rm, reass);
1220 ip4_full_reass_free (rm, rt, reass);
1221 goto next_packet;
1222 break;
1223 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001224 }
1225 else
1226 {
Klement Sekera896c8962019-06-24 11:52:49 +00001227 next0 = IP4_FULL_REASS_NEXT_DROP;
1228 error0 = IP4_ERROR_REASS_LIMIT_REACHED;
Klement Sekera4c533132018-02-22 11:41:12 +01001229 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001230
Klement Sekera896c8962019-06-24 11:52:49 +00001231
1232 packet_enqueue:
Klement Sekera896c8962019-06-24 11:52:49 +00001233
Klement Sekera75e7d132017-09-20 08:26:30 +02001234 if (bi0 != ~0)
1235 {
1236 to_next[0] = bi0;
1237 to_next += 1;
1238 n_left_to_next -= 1;
Benoît Gannecf7803d2019-10-23 13:53:49 +02001239
1240 /* bi0 might have been updated by reass_finalize, reload */
1241 b0 = vlib_get_buffer (vm, bi0);
Klement Sekera1766ddc2020-03-30 16:59:38 +02001242 if (IP4_ERROR_NONE != error0)
1243 {
1244 b0->error = node->errors[error0];
1245 }
Benoît Gannecf7803d2019-10-23 13:53:49 +02001246
Klement Sekera896c8962019-06-24 11:52:49 +00001247 if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
Klement Sekera630ab582019-07-19 09:14:19 +00001248 {
1249 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1250 {
Klement Sekerade34c352019-06-25 11:19:22 +00001251 ip4_full_reass_add_trace (vm, node, rm, NULL, bi0,
1252 HANDOFF, 0,
1253 vnet_buffer (b0)->ip.
1254 reass.owner_thread_index);
Klement Sekera630ab582019-07-19 09:14:19 +00001255 }
1256 }
Klement Sekerafe8371f2020-09-10 12:03:54 +00001257 else if (FEATURE == type && IP4_ERROR_NONE == error0)
Klement Sekera4c533132018-02-22 11:41:12 +01001258 {
Damjan Marion7d98a122018-07-19 20:42:08 +02001259 vnet_feature_next (&next0, b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001260 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001261 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1262 to_next, n_left_to_next,
1263 bi0, next0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001264 IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
1265 }
1266
Klement Sekerad0f70a32018-12-14 17:24:13 +01001267 next_packet:
Klement Sekera75e7d132017-09-20 08:26:30 +02001268 from += 1;
1269 n_left_from -= 1;
1270 }
1271
1272 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1273 }
1274
Klement Sekera4c533132018-02-22 11:41:12 +01001275 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001276 return frame->n_vectors;
1277}
1278
Klement Sekera896c8962019-06-24 11:52:49 +00001279static char *ip4_full_reass_error_strings[] = {
Klement Sekera75e7d132017-09-20 08:26:30 +02001280#define _(sym, string) string,
1281 foreach_ip4_error
1282#undef _
1283};
1284
Klement Sekera896c8962019-06-24 11:52:49 +00001285VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
1286 vlib_node_runtime_t * node,
1287 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001288{
Klement Sekerafe8371f2020-09-10 12:03:54 +00001289 return ip4_full_reass_inline (vm, node, frame, NORMAL);
Klement Sekera4c533132018-02-22 11:41:12 +01001290}
1291
Klement Sekera75e7d132017-09-20 08:26:30 +02001292/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001293VLIB_REGISTER_NODE (ip4_full_reass_node) = {
1294 .name = "ip4-full-reassembly",
Klement Sekera75e7d132017-09-20 08:26:30 +02001295 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001296 .format_trace = format_ip4_full_reass_trace,
1297 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1298 .error_strings = ip4_full_reass_error_strings,
1299 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera75e7d132017-09-20 08:26:30 +02001300 .next_nodes =
1301 {
Klement Sekera896c8962019-06-24 11:52:49 +00001302 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1303 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1304 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001305
Klement Sekera75e7d132017-09-20 08:26:30 +02001306 },
1307};
1308/* *INDENT-ON* */
1309
Klement Sekera896c8962019-06-24 11:52:49 +00001310VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
1311 vlib_node_runtime_t * node,
1312 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001313{
Klement Sekerafe8371f2020-09-10 12:03:54 +00001314 return ip4_full_reass_inline (vm, node, frame, FEATURE);
Klement Sekera4c533132018-02-22 11:41:12 +01001315}
1316
1317/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001318VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
1319 .name = "ip4-full-reassembly-feature",
Klement Sekera4c533132018-02-22 11:41:12 +01001320 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001321 .format_trace = format_ip4_full_reass_trace,
1322 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1323 .error_strings = ip4_full_reass_error_strings,
1324 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera4c533132018-02-22 11:41:12 +01001325 .next_nodes =
1326 {
Klement Sekera896c8962019-06-24 11:52:49 +00001327 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1328 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1329 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
Klement Sekera4c533132018-02-22 11:41:12 +01001330 },
1331};
1332/* *INDENT-ON* */
1333
Klement Sekera4c533132018-02-22 11:41:12 +01001334/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001335VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
Klement Sekera4c533132018-02-22 11:41:12 +01001336 .arc_name = "ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001337 .node_name = "ip4-full-reassembly-feature",
Neale Ranns14046982019-07-29 14:49:52 +00001338 .runs_before = VNET_FEATURES ("ip4-lookup",
Neale Ranns2be3eb62019-08-02 01:17:13 -07001339 "ipsec4-input-feature"),
Klement Sekera4c533132018-02-22 11:41:12 +01001340 .runs_after = 0,
1341};
1342/* *INDENT-ON* */
1343
Klement Sekerafe8371f2020-09-10 12:03:54 +00001344VLIB_NODE_FN (ip4_full_reass_node_custom) (vlib_main_t * vm,
1345 vlib_node_runtime_t * node,
1346 vlib_frame_t * frame)
1347{
1348 return ip4_full_reass_inline (vm, node, frame, CUSTOM);
1349}
1350
1351/* *INDENT-OFF* */
1352VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = {
1353 .name = "ip4-full-reassembly-custom",
1354 .vector_size = sizeof (u32),
1355 .format_trace = format_ip4_full_reass_trace,
1356 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1357 .error_strings = ip4_full_reass_error_strings,
1358 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
1359 .next_nodes =
1360 {
1361 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1362 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1363 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-custom-hoff",
1364 },
1365};
1366/* *INDENT-ON* */
1367
1368/* *INDENT-OFF* */
1369VNET_FEATURE_INIT (ip4_full_reass_custom, static) = {
1370 .arc_name = "ip4-unicast",
1371 .node_name = "ip4-full-reassembly-feature",
1372 .runs_before = VNET_FEATURES ("ip4-lookup",
1373 "ipsec4-input-feature"),
1374 .runs_after = 0,
1375};
1376
1377/* *INDENT-ON* */
1378
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001379#ifndef CLIB_MARCH_VARIANT
Klement Sekerafe8371f2020-09-10 12:03:54 +00001380uword
1381ip4_full_reass_custom_register_next_node (uword node_index)
1382{
1383 return vlib_node_add_next (vlib_get_main (),
1384 ip4_full_reass_node_custom.index, node_index);
1385}
1386
Klement Sekera4c533132018-02-22 11:41:12 +01001387always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +00001388ip4_full_reass_get_nbuckets ()
Klement Sekera75e7d132017-09-20 08:26:30 +02001389{
Klement Sekera896c8962019-06-24 11:52:49 +00001390 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001391 u32 nbuckets;
1392 u8 i;
1393
1394 nbuckets = (u32) (rm->max_reass_n / IP4_REASS_HT_LOAD_FACTOR);
1395
1396 for (i = 0; i < 31; i++)
1397 if ((1 << i) >= nbuckets)
1398 break;
1399 nbuckets = 1 << i;
1400
1401 return nbuckets;
1402}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001403#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001404
1405typedef enum
1406{
1407 IP4_EVENT_CONFIG_CHANGED = 1,
Klement Sekera896c8962019-06-24 11:52:49 +00001408} ip4_full_reass_event_t;
Klement Sekera75e7d132017-09-20 08:26:30 +02001409
1410typedef struct
1411{
1412 int failure;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001413 clib_bihash_16_8_t *new_hash;
Klement Sekera75e7d132017-09-20 08:26:30 +02001414} ip4_rehash_cb_ctx;
1415
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001416#ifndef CLIB_MARCH_VARIANT
Neale Rannsf50bac12019-12-06 05:53:17 +00001417static int
Klement Sekera8dcfed52018-06-28 11:16:15 +02001418ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
Klement Sekera75e7d132017-09-20 08:26:30 +02001419{
1420 ip4_rehash_cb_ctx *ctx = _ctx;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001421 if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
Klement Sekera75e7d132017-09-20 08:26:30 +02001422 {
1423 ctx->failure = 1;
1424 }
Neale Rannsf50bac12019-12-06 05:53:17 +00001425 return (BIHASH_WALK_CONTINUE);
Klement Sekera75e7d132017-09-20 08:26:30 +02001426}
1427
Klement Sekera4c533132018-02-22 11:41:12 +01001428static void
Klement Sekera896c8962019-06-24 11:52:49 +00001429ip4_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1430 u32 max_reassembly_length,
1431 u32 expire_walk_interval_ms)
Klement Sekera4c533132018-02-22 11:41:12 +01001432{
Klement Sekera896c8962019-06-24 11:52:49 +00001433 ip4_full_reass_main.timeout_ms = timeout_ms;
1434 ip4_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1435 ip4_full_reass_main.max_reass_n = max_reassemblies;
1436 ip4_full_reass_main.max_reass_len = max_reassembly_length;
1437 ip4_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
Klement Sekera4c533132018-02-22 11:41:12 +01001438}
1439
Klement Sekera75e7d132017-09-20 08:26:30 +02001440vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001441ip4_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1442 u32 max_reassembly_length, u32 expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001443{
Klement Sekera896c8962019-06-24 11:52:49 +00001444 u32 old_nbuckets = ip4_full_reass_get_nbuckets ();
1445 ip4_full_reass_set_params (timeout_ms, max_reassemblies,
1446 max_reassembly_length, expire_walk_interval_ms);
1447 vlib_process_signal_event (ip4_full_reass_main.vlib_main,
1448 ip4_full_reass_main.ip4_full_reass_expire_node_idx,
Klement Sekera75e7d132017-09-20 08:26:30 +02001449 IP4_EVENT_CONFIG_CHANGED, 0);
Klement Sekera896c8962019-06-24 11:52:49 +00001450 u32 new_nbuckets = ip4_full_reass_get_nbuckets ();
1451 if (ip4_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
Klement Sekera75e7d132017-09-20 08:26:30 +02001452 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001453 clib_bihash_16_8_t new_hash;
Dave Barachb7b92992018-10-17 10:38:51 -04001454 clib_memset (&new_hash, 0, sizeof (new_hash));
Klement Sekera75e7d132017-09-20 08:26:30 +02001455 ip4_rehash_cb_ctx ctx;
1456 ctx.failure = 0;
1457 ctx.new_hash = &new_hash;
Klement Sekera896c8962019-06-24 11:52:49 +00001458 clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
Klement Sekera75e7d132017-09-20 08:26:30 +02001459 new_nbuckets * 1024);
Klement Sekera896c8962019-06-24 11:52:49 +00001460 clib_bihash_foreach_key_value_pair_16_8 (&ip4_full_reass_main.hash,
Klement Sekera75e7d132017-09-20 08:26:30 +02001461 ip4_rehash_cb, &ctx);
1462 if (ctx.failure)
1463 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001464 clib_bihash_free_16_8 (&new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001465 return -1;
1466 }
1467 else
1468 {
Klement Sekera896c8962019-06-24 11:52:49 +00001469 clib_bihash_free_16_8 (&ip4_full_reass_main.hash);
1470 clib_memcpy_fast (&ip4_full_reass_main.hash, &new_hash,
1471 sizeof (ip4_full_reass_main.hash));
1472 clib_bihash_copied (&ip4_full_reass_main.hash, &new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001473 }
1474 }
1475 return 0;
1476}
1477
1478vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001479ip4_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1480 u32 * max_reassembly_length,
1481 u32 * expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001482{
Klement Sekera896c8962019-06-24 11:52:49 +00001483 *timeout_ms = ip4_full_reass_main.timeout_ms;
1484 *max_reassemblies = ip4_full_reass_main.max_reass_n;
1485 *max_reassembly_length = ip4_full_reass_main.max_reass_len;
1486 *expire_walk_interval_ms = ip4_full_reass_main.expire_walk_interval_ms;
Klement Sekera75e7d132017-09-20 08:26:30 +02001487 return 0;
1488}
1489
Klement Sekera4c533132018-02-22 11:41:12 +01001490static clib_error_t *
Klement Sekera896c8962019-06-24 11:52:49 +00001491ip4_full_reass_init_function (vlib_main_t * vm)
Klement Sekera75e7d132017-09-20 08:26:30 +02001492{
Klement Sekera896c8962019-06-24 11:52:49 +00001493 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001494 clib_error_t *error = 0;
1495 u32 nbuckets;
Dave Barach1403fcd2018-02-05 09:45:43 -05001496 vlib_node_t *node;
Klement Sekera75e7d132017-09-20 08:26:30 +02001497
1498 rm->vlib_main = vm;
Klement Sekera75e7d132017-09-20 08:26:30 +02001499
Juraj Slobodacd806922018-10-10 10:15:54 +02001500 vec_validate (rm->per_thread_data, vlib_num_workers ());
Klement Sekera896c8962019-06-24 11:52:49 +00001501 ip4_full_reass_per_thread_t *rt;
Klement Sekera4c533132018-02-22 11:41:12 +01001502 vec_foreach (rt, rm->per_thread_data)
1503 {
1504 clib_spinlock_init (&rt->lock);
1505 pool_alloc (rt->pool, rm->max_reass_n);
1506 }
Dave Barach1403fcd2018-02-05 09:45:43 -05001507
Klement Sekera896c8962019-06-24 11:52:49 +00001508 node = vlib_get_node_by_name (vm, (u8 *) "ip4-full-reassembly-expire-walk");
Dave Barach1403fcd2018-02-05 09:45:43 -05001509 ASSERT (node);
Klement Sekera896c8962019-06-24 11:52:49 +00001510 rm->ip4_full_reass_expire_node_idx = node->index;
Dave Barach1403fcd2018-02-05 09:45:43 -05001511
Klement Sekera896c8962019-06-24 11:52:49 +00001512 ip4_full_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
1513 IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
1514 IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
1515 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
Klement Sekera3ecc2212018-03-27 10:34:43 +02001516
Klement Sekera896c8962019-06-24 11:52:49 +00001517 nbuckets = ip4_full_reass_get_nbuckets ();
1518 clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
Klement Sekera75e7d132017-09-20 08:26:30 +02001519
Dave Barach1403fcd2018-02-05 09:45:43 -05001520 node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
Klement Sekera75e7d132017-09-20 08:26:30 +02001521 ASSERT (node);
1522 rm->ip4_drop_idx = node->index;
Klement Sekera4c533132018-02-22 11:41:12 +01001523
Klement Sekera896c8962019-06-24 11:52:49 +00001524 rm->fq_index = vlib_frame_queue_main_init (ip4_full_reass_node.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001525 rm->fq_feature_index =
Klement Sekera896c8962019-06-24 11:52:49 +00001526 vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
Klement Sekerafe8371f2020-09-10 12:03:54 +00001527 rm->fq_custom_index =
1528 vlib_frame_queue_main_init (ip4_full_reass_node_custom.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001529
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001530 rm->feature_use_refcount_per_intf = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +02001531 return error;
1532}
1533
Klement Sekera896c8962019-06-24 11:52:49 +00001534VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001535#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001536
1537static uword
Klement Sekera896c8962019-06-24 11:52:49 +00001538ip4_full_reass_walk_expired (vlib_main_t * vm,
1539 vlib_node_runtime_t * node, vlib_frame_t * f)
Klement Sekera75e7d132017-09-20 08:26:30 +02001540{
Klement Sekera896c8962019-06-24 11:52:49 +00001541 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001542 uword event_type, *event_data = 0;
1543
1544 while (true)
1545 {
1546 vlib_process_wait_for_event_or_clock (vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001547 (f64)
1548 rm->expire_walk_interval_ms /
1549 (f64) MSEC_PER_SEC);
Klement Sekera75e7d132017-09-20 08:26:30 +02001550 event_type = vlib_process_get_events (vm, &event_data);
1551
1552 switch (event_type)
1553 {
1554 case ~0: /* no events => timeout */
1555 /* nothing to do here */
1556 break;
1557 case IP4_EVENT_CONFIG_CHANGED:
1558 break;
1559 default:
1560 clib_warning ("BUG: event type 0x%wx", event_type);
1561 break;
1562 }
1563 f64 now = vlib_time_now (vm);
1564
Klement Sekera896c8962019-06-24 11:52:49 +00001565 ip4_full_reass_t *reass;
Klement Sekera75e7d132017-09-20 08:26:30 +02001566 int *pool_indexes_to_free = NULL;
1567
Klement Sekera4c533132018-02-22 11:41:12 +01001568 uword thread_index = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +02001569 int index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001570 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001571 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1572 {
Klement Sekera896c8962019-06-24 11:52:49 +00001573 ip4_full_reass_per_thread_t *rt =
1574 &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001575 clib_spinlock_lock (&rt->lock);
1576
1577 vec_reset_length (pool_indexes_to_free);
1578 /* *INDENT-OFF* */
Damjan Marionb2c31b62020-12-13 21:47:40 +01001579 pool_foreach_index (index, rt->pool) {
Klement Sekera4c533132018-02-22 11:41:12 +01001580 reass = pool_elt_at_index (rt->pool, index);
1581 if (now > reass->last_heard + rm->timeout)
1582 {
1583 vec_add1 (pool_indexes_to_free, index);
1584 }
Damjan Marionb2c31b62020-12-13 21:47:40 +01001585 }
Klement Sekera4c533132018-02-22 11:41:12 +01001586 /* *INDENT-ON* */
1587 int *i;
1588 /* *INDENT-OFF* */
1589 vec_foreach (i, pool_indexes_to_free)
1590 {
Klement Sekera896c8962019-06-24 11:52:49 +00001591 ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1592 ip4_full_reass_drop_all (vm, node, rm, reass);
1593 ip4_full_reass_free (rm, rt, reass);
Klement Sekera4c533132018-02-22 11:41:12 +01001594 }
1595 /* *INDENT-ON* */
1596
1597 clib_spinlock_unlock (&rt->lock);
1598 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001599
Klement Sekera75e7d132017-09-20 08:26:30 +02001600 vec_free (pool_indexes_to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +02001601 if (event_data)
1602 {
1603 _vec_len (event_data) = 0;
1604 }
1605 }
1606
1607 return 0;
1608}
1609
Klement Sekera75e7d132017-09-20 08:26:30 +02001610/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001611VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
1612 .function = ip4_full_reass_walk_expired,
Klement Sekera75e7d132017-09-20 08:26:30 +02001613 .type = VLIB_NODE_TYPE_PROCESS,
Klement Sekera896c8962019-06-24 11:52:49 +00001614 .name = "ip4-full-reassembly-expire-walk",
1615 .format_trace = format_ip4_full_reass_trace,
1616 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1617 .error_strings = ip4_full_reass_error_strings,
Klement Sekera75e7d132017-09-20 08:26:30 +02001618
1619};
1620/* *INDENT-ON* */
1621
1622static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001623format_ip4_full_reass_key (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +02001624{
Klement Sekera896c8962019-06-24 11:52:49 +00001625 ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *);
1626 s =
1627 format (s,
1628 "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1629 key->xx_id, format_ip4_address, &key->src, format_ip4_address,
1630 &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
Klement Sekera75e7d132017-09-20 08:26:30 +02001631 return s;
1632}
1633
1634static u8 *
1635format_ip4_reass (u8 * s, va_list * args)
1636{
1637 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001638 ip4_full_reass_t *reass = va_arg (*args, ip4_full_reass_t *);
Klement Sekera75e7d132017-09-20 08:26:30 +02001639
Klement Sekera4c533132018-02-22 11:41:12 +01001640 s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
Klement Sekera75e7d132017-09-20 08:26:30 +02001641 "last_packet_octet: %u, trace_op_counter: %u\n",
Klement Sekera896c8962019-06-24 11:52:49 +00001642 reass->id, format_ip4_full_reass_key, &reass->key,
1643 reass->first_bi, reass->data_len,
1644 reass->last_packet_octet, reass->trace_op_counter);
1645
Klement Sekera75e7d132017-09-20 08:26:30 +02001646 u32 bi = reass->first_bi;
1647 u32 counter = 0;
1648 while (~0 != bi)
1649 {
1650 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1651 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera896c8962019-06-24 11:52:49 +00001652 s =
1653 format (s,
1654 " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1655 "fragment[%u, %u]\n", counter, vnb->ip.reass.range_first,
1656 vnb->ip.reass.range_last, bi,
1657 ip4_full_reass_buffer_get_data_offset (b),
1658 ip4_full_reass_buffer_get_data_len (b),
1659 vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
Klement Sekera75e7d132017-09-20 08:26:30 +02001660 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1661 {
1662 bi = b->next_buffer;
1663 }
1664 else
1665 {
1666 bi = ~0;
1667 }
1668 }
1669 return s;
1670}
1671
1672static clib_error_t *
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001673show_ip4_reass (vlib_main_t * vm,
1674 unformat_input_t * input,
Klement Sekera75e7d132017-09-20 08:26:30 +02001675 CLIB_UNUSED (vlib_cli_command_t * lmd))
1676{
Klement Sekera896c8962019-06-24 11:52:49 +00001677 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001678
1679 vlib_cli_output (vm, "---------------------");
1680 vlib_cli_output (vm, "IP4 reassembly status");
1681 vlib_cli_output (vm, "---------------------");
Klement Sekera4c533132018-02-22 11:41:12 +01001682 bool details = false;
Klement Sekera75e7d132017-09-20 08:26:30 +02001683 if (unformat (input, "details"))
1684 {
Klement Sekera4c533132018-02-22 11:41:12 +01001685 details = true;
1686 }
1687
1688 u32 sum_reass_n = 0;
Klement Sekera896c8962019-06-24 11:52:49 +00001689 ip4_full_reass_t *reass;
Klement Sekera4c533132018-02-22 11:41:12 +01001690 uword thread_index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001691 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001692 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1693 {
Klement Sekera896c8962019-06-24 11:52:49 +00001694 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001695 clib_spinlock_lock (&rt->lock);
1696 if (details)
1697 {
1698 /* *INDENT-OFF* */
Damjan Marionb2c31b62020-12-13 21:47:40 +01001699 pool_foreach (reass, rt->pool) {
Klement Sekera4c533132018-02-22 11:41:12 +01001700 vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
Damjan Marionb2c31b62020-12-13 21:47:40 +01001701 }
Klement Sekera4c533132018-02-22 11:41:12 +01001702 /* *INDENT-ON* */
1703 }
1704 sum_reass_n += rt->reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +01001705 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001706 }
1707 vlib_cli_output (vm, "---------------------");
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001708 vlib_cli_output (vm, "Current full IP4 reassemblies count: %lu\n",
Klement Sekera4c533132018-02-22 11:41:12 +01001709 (long unsigned) sum_reass_n);
Klement Sekera75e7d132017-09-20 08:26:30 +02001710 vlib_cli_output (vm,
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001711 "Maximum configured concurrent full IP4 reassemblies per worker-thread: %lu\n",
Klement Sekera75e7d132017-09-20 08:26:30 +02001712 (long unsigned) rm->max_reass_n);
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001713 vlib_cli_output (vm,
1714 "Maximum configured full IP4 reassembly timeout: %lums\n",
1715 (long unsigned) rm->timeout_ms);
1716 vlib_cli_output (vm,
1717 "Maximum configured full IP4 reassembly expire walk interval: %lums\n",
1718 (long unsigned) rm->expire_walk_interval_ms);
Klement Sekera75e7d132017-09-20 08:26:30 +02001719 return 0;
1720}
1721
1722/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001723VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
1724 .path = "show ip4-full-reassembly",
1725 .short_help = "show ip4-full-reassembly [details]",
Klement Sekera75e7d132017-09-20 08:26:30 +02001726 .function = show_ip4_reass,
1727};
1728/* *INDENT-ON* */
1729
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001730#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001731vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001732ip4_full_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
Klement Sekera4c533132018-02-22 11:41:12 +01001733{
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001734 return vnet_feature_enable_disable ("ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001735 "ip4-full-reassembly-feature",
1736 sw_if_index, enable_disable, 0, 0);
Klement Sekera4c533132018-02-22 11:41:12 +01001737}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001738#endif /* CLIB_MARCH_VARIANT */
Klement Sekera4c533132018-02-22 11:41:12 +01001739
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001740
Klement Sekera896c8962019-06-24 11:52:49 +00001741#define foreach_ip4_full_reass_handoff_error \
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001742_(CONGESTION_DROP, "congestion drop")
1743
1744
1745typedef enum
1746{
Klement Sekera896c8962019-06-24 11:52:49 +00001747#define _(sym,str) IP4_FULL_REASS_HANDOFF_ERROR_##sym,
1748 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001749#undef _
Klement Sekera896c8962019-06-24 11:52:49 +00001750 IP4_FULL_REASS_HANDOFF_N_ERROR,
1751} ip4_full_reass_handoff_error_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001752
Klement Sekera896c8962019-06-24 11:52:49 +00001753static char *ip4_full_reass_handoff_error_strings[] = {
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001754#define _(sym,string) string,
Klement Sekera896c8962019-06-24 11:52:49 +00001755 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001756#undef _
1757};
1758
1759typedef struct
1760{
1761 u32 next_worker_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001762} ip4_full_reass_handoff_trace_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001763
1764static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001765format_ip4_full_reass_handoff_trace (u8 * s, va_list * args)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001766{
1767 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1768 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001769 ip4_full_reass_handoff_trace_t *t =
1770 va_arg (*args, ip4_full_reass_handoff_trace_t *);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001771
1772 s =
Klement Sekera896c8962019-06-24 11:52:49 +00001773 format (s, "ip4-full-reassembly-handoff: next-worker %d",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001774 t->next_worker_index);
1775
1776 return s;
1777}
1778
1779always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001780ip4_full_reass_handoff_node_inline (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001781 vlib_node_runtime_t * node,
Klement Sekerafe8371f2020-09-10 12:03:54 +00001782 vlib_frame_t * frame,
1783 ip4_full_reass_node_type_t type)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001784{
Klement Sekera896c8962019-06-24 11:52:49 +00001785 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001786
1787 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1788 u32 n_enq, n_left_from, *from;
1789 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1790 u32 fq_index;
1791
1792 from = vlib_frame_vector_args (frame);
1793 n_left_from = frame->n_vectors;
1794 vlib_get_buffers (vm, from, bufs, n_left_from);
1795
1796 b = bufs;
1797 ti = thread_indices;
1798
Klement Sekerafe8371f2020-09-10 12:03:54 +00001799 switch (type)
1800 {
1801 case NORMAL:
1802 fq_index = rm->fq_index;
1803 break;
1804 case FEATURE:
1805 fq_index = rm->fq_feature_index;
1806 break;
1807 case CUSTOM:
1808 fq_index = rm->fq_custom_index;
1809 break;
1810 default:
1811 clib_warning ("Unexpected `type' (%d)!", type);
1812 ASSERT (0);
1813 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001814
1815 while (n_left_from > 0)
1816 {
Klement Sekerade34c352019-06-25 11:19:22 +00001817 ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001818
1819 if (PREDICT_FALSE
1820 ((node->flags & VLIB_NODE_FLAG_TRACE)
1821 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1822 {
Klement Sekera896c8962019-06-24 11:52:49 +00001823 ip4_full_reass_handoff_trace_t *t =
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001824 vlib_add_trace (vm, node, b[0], sizeof (*t));
1825 t->next_worker_index = ti[0];
1826 }
1827
1828 n_left_from -= 1;
1829 ti += 1;
1830 b += 1;
1831 }
1832 n_enq =
1833 vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1834 frame->n_vectors, 1);
1835
1836 if (n_enq < frame->n_vectors)
1837 vlib_node_increment_counter (vm, node->node_index,
Klement Sekera896c8962019-06-24 11:52:49 +00001838 IP4_FULL_REASS_HANDOFF_ERROR_CONGESTION_DROP,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001839 frame->n_vectors - n_enq);
1840 return frame->n_vectors;
1841}
1842
Klement Sekera896c8962019-06-24 11:52:49 +00001843VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001844 vlib_node_runtime_t * node,
1845 vlib_frame_t * frame)
1846{
Klement Sekerafe8371f2020-09-10 12:03:54 +00001847 return ip4_full_reass_handoff_node_inline (vm, node, frame, NORMAL);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001848}
1849
1850
1851/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001852VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
1853 .name = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001854 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001855 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1856 .error_strings = ip4_full_reass_handoff_error_strings,
1857 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001858
1859 .n_next_nodes = 1,
1860
1861 .next_nodes = {
1862 [0] = "error-drop",
1863 },
1864};
1865/* *INDENT-ON* */
1866
1867
1868/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001869VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001870 vlib_node_runtime_t *
1871 node,
1872 vlib_frame_t * frame)
1873{
Klement Sekerafe8371f2020-09-10 12:03:54 +00001874 return ip4_full_reass_handoff_node_inline (vm, node, frame, FEATURE);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001875}
1876/* *INDENT-ON* */
1877
1878
1879/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001880VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
1881 .name = "ip4-full-reass-feature-hoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001882 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001883 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1884 .error_strings = ip4_full_reass_handoff_error_strings,
1885 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001886
1887 .n_next_nodes = 1,
1888
1889 .next_nodes = {
1890 [0] = "error-drop",
1891 },
1892};
1893/* *INDENT-ON* */
1894
Klement Sekerafe8371f2020-09-10 12:03:54 +00001895/* *INDENT-OFF* */
1896VLIB_NODE_FN (ip4_full_reass_custom_handoff_node) (vlib_main_t * vm,
1897 vlib_node_runtime_t *
1898 node,
1899 vlib_frame_t * frame)
1900{
1901 return ip4_full_reass_handoff_node_inline (vm, node, frame, CUSTOM);
1902}
1903/* *INDENT-ON* */
1904
1905
1906/* *INDENT-OFF* */
1907VLIB_REGISTER_NODE (ip4_full_reass_custom_handoff_node) = {
1908 .name = "ip4-full-reass-custom-hoff",
1909 .vector_size = sizeof (u32),
1910 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1911 .error_strings = ip4_full_reass_handoff_error_strings,
1912 .format_trace = format_ip4_full_reass_handoff_trace,
1913
1914 .n_next_nodes = 1,
1915
1916 .next_nodes = {
1917 [0] = "error-drop",
1918 },
1919};
1920/* *INDENT-ON* */
1921
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001922#ifndef CLIB_MARCH_VARIANT
1923int
1924ip4_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
1925{
1926 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1927 vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
1928 if (is_enable)
1929 {
1930 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1931 {
1932 ++rm->feature_use_refcount_per_intf[sw_if_index];
1933 return vnet_feature_enable_disable ("ip4-unicast",
1934 "ip4-full-reassembly-feature",
1935 sw_if_index, 1, 0, 0);
1936 }
1937 ++rm->feature_use_refcount_per_intf[sw_if_index];
1938 }
1939 else
1940 {
1941 --rm->feature_use_refcount_per_intf[sw_if_index];
1942 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1943 return vnet_feature_enable_disable ("ip4-unicast",
1944 "ip4-full-reassembly-feature",
1945 sw_if_index, 0, 0, 0);
1946 }
1947 return -1;
1948}
1949#endif
1950
Klement Sekera75e7d132017-09-20 08:26:30 +02001951/*
1952 * fd.io coding-style-patch-verification: ON
1953 *
1954 * Local Variables:
1955 * eval: (c-set-style "gnu")
1956 * End:
1957 */