blob: 94136ff487c1163c46077c1fe5761dbcac4a65af [file] [log] [blame]
Klement Sekera75e7d132017-09-20 08:26:30 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @file
Klement Sekera896c8962019-06-24 11:52:49 +000018 * @brief IPv4 Full Reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020019 *
Klement Sekera896c8962019-06-24 11:52:49 +000020 * This file contains the source code for IPv4 full reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020021 */
22
23#include <vppinfra/vec.h>
24#include <vnet/vnet.h>
25#include <vnet/ip/ip.h>
Klement Sekera896c8962019-06-24 11:52:49 +000026#include <vppinfra/fifo.h>
Klement Sekera8dcfed52018-06-28 11:16:15 +020027#include <vppinfra/bihash_16_8.h>
Klement Sekera896c8962019-06-24 11:52:49 +000028#include <vnet/ip/reass/ip4_full_reass.h>
Klement Sekera630ab582019-07-19 09:14:19 +000029#include <stddef.h>
Klement Sekera75e7d132017-09-20 08:26:30 +020030
31#define MSEC_PER_SEC 1000
32#define IP4_REASS_TIMEOUT_DEFAULT_MS 100
33#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
Klement Sekera4c533132018-02-22 11:41:12 +010034#define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
Klement Sekera3a343d42019-05-16 14:35:46 +020035#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
Klement Sekera75e7d132017-09-20 08:26:30 +020036#define IP4_REASS_HT_LOAD_FACTOR (0.75)
37
38#define IP4_REASS_DEBUG_BUFFERS 0
39#if IP4_REASS_DEBUG_BUFFERS
40#define IP4_REASS_DEBUG_BUFFER(bi, what) \
41 do \
42 { \
43 u32 _bi = bi; \
44 printf (#what "buffer %u", _bi); \
45 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi); \
46 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT) \
47 { \
48 _bi = _b->next_buffer; \
49 printf ("[%u]", _bi); \
50 _b = vlib_get_buffer (vm, _bi); \
51 } \
52 printf ("\n"); \
53 fflush (stdout); \
54 } \
55 while (0)
56#else
57#define IP4_REASS_DEBUG_BUFFER(...)
58#endif
59
Klement Sekerad0f70a32018-12-14 17:24:13 +010060typedef enum
61{
62 IP4_REASS_RC_OK,
Klement Sekera3a343d42019-05-16 14:35:46 +020063 IP4_REASS_RC_TOO_MANY_FRAGMENTS,
Klement Sekerad0f70a32018-12-14 17:24:13 +010064 IP4_REASS_RC_INTERNAL_ERROR,
Klement Sekeraf883f6a2019-02-13 11:01:32 +010065 IP4_REASS_RC_NO_BUF,
Klement Sekera630ab582019-07-19 09:14:19 +000066 IP4_REASS_RC_HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +000067} ip4_full_reass_rc_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020068
69typedef struct
70{
71 union
72 {
73 struct
74 {
Klement Sekera75e7d132017-09-20 08:26:30 +020075 u32 xx_id;
76 ip4_address_t src;
77 ip4_address_t dst;
Klement Sekera8dcfed52018-06-28 11:16:15 +020078 u16 frag_id;
79 u8 proto;
80 u8 unused;
Klement Sekera75e7d132017-09-20 08:26:30 +020081 };
Klement Sekera8dcfed52018-06-28 11:16:15 +020082 u64 as_u64[2];
Klement Sekera75e7d132017-09-20 08:26:30 +020083 };
Klement Sekera896c8962019-06-24 11:52:49 +000084} ip4_full_reass_key_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020085
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080086typedef union
87{
88 struct
89 {
90 u32 reass_index;
Klement Sekera630ab582019-07-19 09:14:19 +000091 u32 memory_owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080092 };
93 u64 as_u64;
Klement Sekera896c8962019-06-24 11:52:49 +000094} ip4_full_reass_val_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080095
96typedef union
97{
98 struct
99 {
Klement Sekera896c8962019-06-24 11:52:49 +0000100 ip4_full_reass_key_t k;
101 ip4_full_reass_val_t v;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800102 };
103 clib_bihash_kv_16_8_t kv;
Klement Sekera896c8962019-06-24 11:52:49 +0000104} ip4_full_reass_kv_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800105
Klement Sekera75e7d132017-09-20 08:26:30 +0200106always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +0000107ip4_full_reass_buffer_get_data_offset (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200108{
109 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100110 return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
Klement Sekera75e7d132017-09-20 08:26:30 +0200111}
112
113always_inline u16
Klement Sekera896c8962019-06-24 11:52:49 +0000114ip4_full_reass_buffer_get_data_len (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200115{
116 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100117 return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
Klement Sekera896c8962019-06-24 11:52:49 +0000118 (vnb->ip.reass.fragment_first +
119 ip4_full_reass_buffer_get_data_offset (b)) + 1;
Klement Sekera75e7d132017-09-20 08:26:30 +0200120}
121
122typedef struct
123{
124 // hash table key
Klement Sekera896c8962019-06-24 11:52:49 +0000125 ip4_full_reass_key_t key;
Klement Sekera75e7d132017-09-20 08:26:30 +0200126 // time when last packet was received
127 f64 last_heard;
128 // internal id of this reassembly
Klement Sekera4c533132018-02-22 11:41:12 +0100129 u64 id;
Klement Sekera75e7d132017-09-20 08:26:30 +0200130 // buffer index of first buffer in this reassembly context
131 u32 first_bi;
132 // last octet of packet, ~0 until fragment without more_fragments arrives
133 u32 last_packet_octet;
134 // length of data collected so far
135 u32 data_len;
136 // trace operation counter
137 u32 trace_op_counter;
Klement Sekera4c533132018-02-22 11:41:12 +0100138 // next index - used by non-feature node
Klement Sekera21aa8f12019-05-20 12:27:33 +0200139 u32 next_index;
Klement Sekerae8498652019-06-17 12:23:15 +0000140 // error next index - used by custom apps (~0 if not used)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200141 u32 error_next_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100142 // minimum fragment length for this reassembly - used to estimate MTU
143 u16 min_fragment_length;
Klement Sekera3a343d42019-05-16 14:35:46 +0200144 // number of fragments in this reassembly
145 u32 fragments_n;
Klement Sekera630ab582019-07-19 09:14:19 +0000146 // thread owning memory for this context (whose pool contains this ctx)
147 u32 memory_owner_thread_index;
148 // thread which received fragment with offset 0 and which sends out the
149 // completed reassembly
150 u32 sendout_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +0000151} ip4_full_reass_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200152
153typedef struct
154{
Klement Sekera896c8962019-06-24 11:52:49 +0000155 ip4_full_reass_t *pool;
Klement Sekera4c533132018-02-22 11:41:12 +0100156 u32 reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +0100157 u32 id_counter;
158 clib_spinlock_t lock;
Klement Sekera896c8962019-06-24 11:52:49 +0000159} ip4_full_reass_per_thread_t;
Klement Sekera4c533132018-02-22 11:41:12 +0100160
161typedef struct
162{
Klement Sekera75e7d132017-09-20 08:26:30 +0200163 // IPv4 config
164 u32 timeout_ms;
165 f64 timeout;
166 u32 expire_walk_interval_ms;
Klement Sekera3a343d42019-05-16 14:35:46 +0200167 // maximum number of fragments in one reassembly
168 u32 max_reass_len;
169 // maximum number of reassemblies
Klement Sekera75e7d132017-09-20 08:26:30 +0200170 u32 max_reass_n;
171
172 // IPv4 runtime
Klement Sekera8dcfed52018-06-28 11:16:15 +0200173 clib_bihash_16_8_t hash;
Klement Sekera4c533132018-02-22 11:41:12 +0100174 // per-thread data
Klement Sekera896c8962019-06-24 11:52:49 +0000175 ip4_full_reass_per_thread_t *per_thread_data;
Klement Sekera75e7d132017-09-20 08:26:30 +0200176
177 // convenience
178 vlib_main_t *vlib_main;
Klement Sekera75e7d132017-09-20 08:26:30 +0200179
180 // node index of ip4-drop node
181 u32 ip4_drop_idx;
Klement Sekera896c8962019-06-24 11:52:49 +0000182 u32 ip4_full_reass_expire_node_idx;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800183
184 /** Worker handoff */
185 u32 fq_index;
186 u32 fq_feature_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200187
Klement Sekera7b2e9fb2019-10-01 13:00:22 +0000188 // reference count for enabling/disabling feature - per interface
189 u32 *feature_use_refcount_per_intf;
Klement Sekera896c8962019-06-24 11:52:49 +0000190} ip4_full_reass_main_t;
191
192extern ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700193
194#ifndef CLIB_MARCH_VARIANT
Klement Sekera896c8962019-06-24 11:52:49 +0000195ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700196#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +0200197
198typedef enum
199{
Klement Sekera896c8962019-06-24 11:52:49 +0000200 IP4_FULL_REASS_NEXT_INPUT,
201 IP4_FULL_REASS_NEXT_DROP,
202 IP4_FULL_REASS_NEXT_HANDOFF,
203 IP4_FULL_REASS_N_NEXT,
204} ip4_full_reass_next_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200205
206typedef enum
207{
208 RANGE_NEW,
209 RANGE_SHRINK,
210 RANGE_DISCARD,
211 RANGE_OVERLAP,
212 FINALIZE,
Klement Sekera630ab582019-07-19 09:14:19 +0000213 HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +0000214} ip4_full_reass_trace_operation_e;
Klement Sekera75e7d132017-09-20 08:26:30 +0200215
216typedef struct
217{
218 u16 range_first;
219 u16 range_last;
220 u32 range_bi;
221 i32 data_offset;
222 u32 data_len;
223 u32 first_bi;
Klement Sekera896c8962019-06-24 11:52:49 +0000224} ip4_full_reass_range_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200225
226typedef struct
227{
Klement Sekera896c8962019-06-24 11:52:49 +0000228 ip4_full_reass_trace_operation_e action;
Klement Sekera75e7d132017-09-20 08:26:30 +0200229 u32 reass_id;
Klement Sekera896c8962019-06-24 11:52:49 +0000230 ip4_full_reass_range_trace_t trace_range;
Klement Sekera75e7d132017-09-20 08:26:30 +0200231 u32 size_diff;
232 u32 op_id;
Klement Sekera630ab582019-07-19 09:14:19 +0000233 u32 thread_id;
234 u32 thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200235 u32 fragment_first;
236 u32 fragment_last;
237 u32 total_data_len;
Klement Sekera8563cb32019-10-10 17:03:57 +0000238 bool is_after_handoff;
239 ip4_header_t ip4_header;
Klement Sekera896c8962019-06-24 11:52:49 +0000240} ip4_full_reass_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200241
Klement Sekera896c8962019-06-24 11:52:49 +0000242extern vlib_node_registration_t ip4_full_reass_node;
243extern vlib_node_registration_t ip4_full_reass_node_feature;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700244
Klement Sekera4c533132018-02-22 11:41:12 +0100245static void
Klement Sekera896c8962019-06-24 11:52:49 +0000246ip4_full_reass_trace_details (vlib_main_t * vm, u32 bi,
247 ip4_full_reass_range_trace_t * trace)
Klement Sekera75e7d132017-09-20 08:26:30 +0200248{
249 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
250 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
251 trace->range_first = vnb->ip.reass.range_first;
252 trace->range_last = vnb->ip.reass.range_last;
Klement Sekera896c8962019-06-24 11:52:49 +0000253 trace->data_offset = ip4_full_reass_buffer_get_data_offset (b);
254 trace->data_len = ip4_full_reass_buffer_get_data_len (b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200255 trace->range_bi = bi;
256}
257
Klement Sekera4c533132018-02-22 11:41:12 +0100258static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000259format_ip4_full_reass_range_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200260{
Klement Sekera896c8962019-06-24 11:52:49 +0000261 ip4_full_reass_range_trace_t *trace =
262 va_arg (*args, ip4_full_reass_range_trace_t *);
263 s =
264 format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
265 trace->range_last, trace->data_offset, trace->data_len,
266 trace->range_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200267 return s;
268}
269
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700270static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000271format_ip4_full_reass_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200272{
273 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
274 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +0000275 ip4_full_reass_trace_t *t = va_arg (*args, ip4_full_reass_trace_t *);
Klement Sekera630ab582019-07-19 09:14:19 +0000276 u32 indent = 0;
277 if (~0 != t->reass_id)
278 {
Klement Sekera8563cb32019-10-10 17:03:57 +0000279 if (t->is_after_handoff)
280 {
281 s =
282 format (s, "%U\n", format_ip4_header, &t->ip4_header,
283 sizeof (t->ip4_header));
284 indent = 2;
285 }
286 s =
287 format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
288 t->reass_id, t->op_id);
Klement Sekera630ab582019-07-19 09:14:19 +0000289 indent = format_get_indent (s);
290 s =
291 format (s,
292 "first bi: %u, data len: %u, ip/fragment[%u, %u]",
293 t->trace_range.first_bi, t->total_data_len, t->fragment_first,
294 t->fragment_last);
295 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200296 switch (t->action)
297 {
298 case RANGE_SHRINK:
299 s = format (s, "\n%Ushrink %U by %u", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000300 format_ip4_full_reass_range_trace, &t->trace_range,
Klement Sekera75e7d132017-09-20 08:26:30 +0200301 t->size_diff);
302 break;
303 case RANGE_DISCARD:
304 s = format (s, "\n%Udiscard %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000305 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200306 break;
307 case RANGE_NEW:
308 s = format (s, "\n%Unew %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000309 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200310 break;
311 case RANGE_OVERLAP:
312 s = format (s, "\n%Uoverlapping/ignored %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000313 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200314 break;
315 case FINALIZE:
316 s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
317 break;
Klement Sekera630ab582019-07-19 09:14:19 +0000318 case HANDOFF:
319 s =
320 format (s, "handoff from thread #%u to thread #%u", t->thread_id,
321 t->thread_id_to);
322 break;
Klement Sekera75e7d132017-09-20 08:26:30 +0200323 }
324 return s;
325}
326
Klement Sekera4c533132018-02-22 11:41:12 +0100327static void
Klement Sekera896c8962019-06-24 11:52:49 +0000328ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
329 ip4_full_reass_main_t * rm,
330 ip4_full_reass_t * reass, u32 bi,
331 ip4_full_reass_trace_operation_e action,
332 u32 size_diff, u32 thread_id_to)
Klement Sekera75e7d132017-09-20 08:26:30 +0200333{
334 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
335 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera8563cb32019-10-10 17:03:57 +0000336 bool is_after_handoff = false;
337 if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
338 {
339 is_after_handoff = true;
340 }
Klement Sekera896c8962019-06-24 11:52:49 +0000341 ip4_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
Klement Sekera8563cb32019-10-10 17:03:57 +0000342 t->is_after_handoff = is_after_handoff;
343 if (t->is_after_handoff)
344 {
345 clib_memcpy (&t->ip4_header, vlib_buffer_get_current (b),
346 clib_min (sizeof (t->ip4_header), b->current_length));
347 }
Klement Sekera896c8962019-06-24 11:52:49 +0000348 if (reass)
349 {
350 t->reass_id = reass->id;
351 t->op_id = reass->trace_op_counter;
352 t->trace_range.first_bi = reass->first_bi;
353 t->total_data_len = reass->data_len;
354 ++reass->trace_op_counter;
355 }
356 else
357 {
358 t->reass_id = ~0;
359 t->op_id = 0;
360 t->trace_range.first_bi = 0;
361 t->total_data_len = 0;
362 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200363 t->action = action;
Klement Sekera896c8962019-06-24 11:52:49 +0000364 ip4_full_reass_trace_details (vm, bi, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200365 t->size_diff = size_diff;
Klement Sekera630ab582019-07-19 09:14:19 +0000366 t->thread_id = vm->thread_index;
367 t->thread_id_to = thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200368 t->fragment_first = vnb->ip.reass.fragment_first;
369 t->fragment_last = vnb->ip.reass.fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200370#if 0
371 static u8 *s = NULL;
Klement Sekera896c8962019-06-24 11:52:49 +0000372 s = format (s, "%U", format_ip4_full_reass_trace, NULL, NULL, t);
Klement Sekera75e7d132017-09-20 08:26:30 +0200373 printf ("%.*s\n", vec_len (s), s);
374 fflush (stdout);
375 vec_reset_length (s);
376#endif
377}
378
Klement Sekera630ab582019-07-19 09:14:19 +0000379always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000380ip4_full_reass_free_ctx (ip4_full_reass_per_thread_t * rt,
381 ip4_full_reass_t * reass)
Klement Sekera630ab582019-07-19 09:14:19 +0000382{
383 pool_put (rt->pool, reass);
384 --rt->reass_n;
385}
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800386
Klement Sekera4c533132018-02-22 11:41:12 +0100387always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000388ip4_full_reass_free (ip4_full_reass_main_t * rm,
389 ip4_full_reass_per_thread_t * rt,
390 ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200391{
Klement Sekera8dcfed52018-06-28 11:16:15 +0200392 clib_bihash_kv_16_8_t kv;
Klement Sekera75e7d132017-09-20 08:26:30 +0200393 kv.key[0] = reass->key.as_u64[0];
394 kv.key[1] = reass->key.as_u64[1];
Klement Sekera8dcfed52018-06-28 11:16:15 +0200395 clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
Klement Sekera896c8962019-06-24 11:52:49 +0000396 return ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200397}
398
Klement Sekera4c533132018-02-22 11:41:12 +0100399always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000400ip4_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
401 ip4_full_reass_main_t * rm, ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200402{
403 u32 range_bi = reass->first_bi;
404 vlib_buffer_t *range_b;
405 vnet_buffer_opaque_t *range_vnb;
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100406 u32 *to_free = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +0200407 while (~0 != range_bi)
408 {
409 range_b = vlib_get_buffer (vm, range_bi);
410 range_vnb = vnet_buffer (range_b);
411 u32 bi = range_bi;
412 while (~0 != bi)
413 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100414 vec_add1 (to_free, bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200415 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
416 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
417 {
418 bi = b->next_buffer;
419 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
420 }
421 else
422 {
423 bi = ~0;
424 }
425 }
426 range_bi = range_vnb->ip.reass.next_range_bi;
427 }
Klement Sekera21aa8f12019-05-20 12:27:33 +0200428 /* send to next_error_index */
Klement Sekerae8498652019-06-17 12:23:15 +0000429 if (~0 != reass->error_next_index)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200430 {
431 u32 n_left_to_next, *to_next, next_index;
432
433 next_index = reass->error_next_index;
434 u32 bi = ~0;
435
436 while (vec_len (to_free) > 0)
437 {
438 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
439
440 while (vec_len (to_free) > 0 && n_left_to_next > 0)
441 {
442 bi = vec_pop (to_free);
443
444 if (~0 != bi)
445 {
446 to_next[0] = bi;
447 to_next += 1;
448 n_left_to_next -= 1;
Klement Sekera21aa8f12019-05-20 12:27:33 +0200449 }
450 }
451 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
452 }
453 }
454 else
455 {
456 vlib_buffer_free (vm, to_free, vec_len (to_free));
457 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200458}
459
Klement Sekera896c8962019-06-24 11:52:49 +0000460always_inline void
461ip4_full_reass_init (ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200462{
Klement Sekera896c8962019-06-24 11:52:49 +0000463 reass->first_bi = ~0;
464 reass->last_packet_octet = ~0;
465 reass->data_len = 0;
466 reass->next_index = ~0;
467 reass->error_next_index = ~0;
468}
469
470always_inline ip4_full_reass_t *
471ip4_full_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
472 ip4_full_reass_main_t * rm,
473 ip4_full_reass_per_thread_t * rt,
474 ip4_full_reass_kv_t * kv, u8 * do_handoff)
475{
476 ip4_full_reass_t *reass;
Klement Sekera630ab582019-07-19 09:14:19 +0000477 f64 now;
Klement Sekera75e7d132017-09-20 08:26:30 +0200478
Klement Sekera630ab582019-07-19 09:14:19 +0000479again:
480
481 reass = NULL;
482 now = vlib_time_now (vm);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800483 if (!clib_bihash_search_16_8
484 (&rm->hash, (clib_bihash_kv_16_8_t *) kv, (clib_bihash_kv_16_8_t *) kv))
Klement Sekera75e7d132017-09-20 08:26:30 +0200485 {
Klement Sekera630ab582019-07-19 09:14:19 +0000486 reass =
487 pool_elt_at_index (rm->per_thread_data
488 [kv->v.memory_owner_thread_index].pool,
489 kv->v.reass_index);
490 if (vm->thread_index != reass->memory_owner_thread_index)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800491 {
492 *do_handoff = 1;
Klement Sekera630ab582019-07-19 09:14:19 +0000493 return reass;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800494 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800495
Klement Sekera75e7d132017-09-20 08:26:30 +0200496 if (now > reass->last_heard + rm->timeout)
497 {
Klement Sekera896c8962019-06-24 11:52:49 +0000498 ip4_full_reass_drop_all (vm, node, rm, reass);
499 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200500 reass = NULL;
501 }
502 }
503
504 if (reass)
505 {
506 reass->last_heard = now;
507 return reass;
508 }
509
Klement Sekera4c533132018-02-22 11:41:12 +0100510 if (rt->reass_n >= rm->max_reass_n)
Klement Sekera75e7d132017-09-20 08:26:30 +0200511 {
512 reass = NULL;
513 return reass;
514 }
515 else
516 {
Klement Sekera4c533132018-02-22 11:41:12 +0100517 pool_get (rt->pool, reass);
Dave Barachb7b92992018-10-17 10:38:51 -0400518 clib_memset (reass, 0, sizeof (*reass));
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800519 reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
Klement Sekera630ab582019-07-19 09:14:19 +0000520 reass->memory_owner_thread_index = vm->thread_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100521 ++rt->id_counter;
Klement Sekera896c8962019-06-24 11:52:49 +0000522 ip4_full_reass_init (reass);
Klement Sekera4c533132018-02-22 11:41:12 +0100523 ++rt->reass_n;
Klement Sekera75e7d132017-09-20 08:26:30 +0200524 }
525
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800526 reass->key.as_u64[0] = ((clib_bihash_kv_16_8_t *) kv)->key[0];
527 reass->key.as_u64[1] = ((clib_bihash_kv_16_8_t *) kv)->key[1];
528 kv->v.reass_index = (reass - rt->pool);
Klement Sekera630ab582019-07-19 09:14:19 +0000529 kv->v.memory_owner_thread_index = vm->thread_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200530 reass->last_heard = now;
531
Klement Sekera630ab582019-07-19 09:14:19 +0000532 int rv =
533 clib_bihash_add_del_16_8 (&rm->hash, (clib_bihash_kv_16_8_t *) kv, 2);
534 if (rv)
Klement Sekera75e7d132017-09-20 08:26:30 +0200535 {
Klement Sekera896c8962019-06-24 11:52:49 +0000536 ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200537 reass = NULL;
Klement Sekera630ab582019-07-19 09:14:19 +0000538 // if other worker created a context already work with the other copy
539 if (-2 == rv)
540 goto again;
Klement Sekera75e7d132017-09-20 08:26:30 +0200541 }
542
543 return reass;
544}
545
Klement Sekera896c8962019-06-24 11:52:49 +0000546always_inline ip4_full_reass_rc_t
547ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
548 ip4_full_reass_main_t * rm,
549 ip4_full_reass_per_thread_t * rt,
550 ip4_full_reass_t * reass, u32 * bi0,
551 u32 * next0, u32 * error0, bool is_custom_app)
Klement Sekera75e7d132017-09-20 08:26:30 +0200552{
Klement Sekera75e7d132017-09-20 08:26:30 +0200553 vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
554 vlib_buffer_t *last_b = NULL;
555 u32 sub_chain_bi = reass->first_bi;
556 u32 total_length = 0;
557 u32 buf_cnt = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200558 do
559 {
560 u32 tmp_bi = sub_chain_bi;
561 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
562 ip4_header_t *ip = vlib_buffer_get_current (tmp);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100563 vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
564 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
565 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
566 {
567 return IP4_REASS_RC_INTERNAL_ERROR;
568 }
569
Klement Sekera896c8962019-06-24 11:52:49 +0000570 u32 data_len = ip4_full_reass_buffer_get_data_len (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200571 u32 trim_front =
Klement Sekera896c8962019-06-24 11:52:49 +0000572 ip4_header_bytes (ip) + ip4_full_reass_buffer_get_data_offset (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200573 u32 trim_end =
574 vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
575 if (tmp_bi == reass->first_bi)
576 {
577 /* first buffer - keep ip4 header */
Klement Sekera896c8962019-06-24 11:52:49 +0000578 if (0 != ip4_full_reass_buffer_get_data_offset (tmp))
Klement Sekerad0f70a32018-12-14 17:24:13 +0100579 {
580 return IP4_REASS_RC_INTERNAL_ERROR;
581 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200582 trim_front = 0;
583 trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
584 ip4_header_bytes (ip);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100585 if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
586 {
587 return IP4_REASS_RC_INTERNAL_ERROR;
588 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200589 }
590 u32 keep_data =
591 vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
592 while (1)
593 {
594 ++buf_cnt;
595 if (trim_front)
596 {
597 if (trim_front > tmp->current_length)
598 {
599 /* drop whole buffer */
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200600 u32 to_be_freed_bi = tmp_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200601 trim_front -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100602 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
603 {
604 return IP4_REASS_RC_INTERNAL_ERROR;
605 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200606 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
607 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700608 tmp->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200609 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200610 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200611 continue;
612 }
613 else
614 {
615 vlib_buffer_advance (tmp, trim_front);
616 trim_front = 0;
617 }
618 }
619 if (keep_data)
620 {
621 if (last_b)
622 {
623 last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
624 last_b->next_buffer = tmp_bi;
625 }
626 last_b = tmp;
627 if (keep_data <= tmp->current_length)
628 {
629 tmp->current_length = keep_data;
630 keep_data = 0;
631 }
632 else
633 {
634 keep_data -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100635 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
636 {
637 return IP4_REASS_RC_INTERNAL_ERROR;
638 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200639 }
640 total_length += tmp->current_length;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200641 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
642 {
643 tmp_bi = tmp->next_buffer;
644 tmp = vlib_get_buffer (vm, tmp->next_buffer);
645 }
646 else
647 {
648 break;
649 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200650 }
651 else
652 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200653 u32 to_be_freed_bi = tmp_bi;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100654 if (reass->first_bi == tmp_bi)
655 {
656 return IP4_REASS_RC_INTERNAL_ERROR;
657 }
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200658 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
659 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700660 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200661 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700662 tmp->next_buffer = 0;
663 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200664 vlib_buffer_free_one (vm, to_be_freed_bi);
665 }
666 else
667 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700668 tmp->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200669 vlib_buffer_free_one (vm, to_be_freed_bi);
670 break;
671 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200672 }
673 }
674 sub_chain_bi =
675 vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
676 reass.next_range_bi;
677 }
678 while (~0 != sub_chain_bi);
Chris Luke30684ac2018-03-29 12:56:58 -0700679
Klement Sekerad0f70a32018-12-14 17:24:13 +0100680 if (!last_b)
681 {
682 return IP4_REASS_RC_INTERNAL_ERROR;
683 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200684 last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700685
Klement Sekerad0f70a32018-12-14 17:24:13 +0100686 if (total_length < first_b->current_length)
687 {
688 return IP4_REASS_RC_INTERNAL_ERROR;
689 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200690 total_length -= first_b->current_length;
691 first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
692 first_b->total_length_not_including_first_buffer = total_length;
693 ip4_header_t *ip = vlib_buffer_get_current (first_b);
694 ip->flags_and_fragment_offset = 0;
695 ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
696 ip->checksum = ip4_header_checksum (ip);
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100697 if (!vlib_buffer_chain_linearize (vm, first_b))
698 {
699 return IP4_REASS_RC_NO_BUF;
700 }
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700701 // reset to reconstruct the mbuf linking
702 first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Klement Sekera75e7d132017-09-20 08:26:30 +0200703 if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
704 {
Klement Sekera896c8962019-06-24 11:52:49 +0000705 ip4_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
706 FINALIZE, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200707#if 0
708 // following code does a hexdump of packet fragments to stdout ...
709 do
710 {
711 u32 bi = reass->first_bi;
712 u8 *s = NULL;
713 while (~0 != bi)
714 {
715 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
716 s = format (s, "%u: %U\n", bi, format_hexdump,
717 vlib_buffer_get_current (b), b->current_length);
718 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
719 {
720 bi = b->next_buffer;
721 }
722 else
723 {
724 break;
725 }
726 }
727 printf ("%.*s\n", vec_len (s), s);
728 fflush (stdout);
729 vec_free (s);
730 }
731 while (0);
732#endif
733 }
734 *bi0 = reass->first_bi;
Klement Sekerae8498652019-06-17 12:23:15 +0000735 if (!is_custom_app)
Klement Sekera4c533132018-02-22 11:41:12 +0100736 {
Klement Sekera896c8962019-06-24 11:52:49 +0000737 *next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +0100738 }
739 else
740 {
741 *next0 = reass->next_index;
742 }
743 vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
Klement Sekera75e7d132017-09-20 08:26:30 +0200744 *error0 = IP4_ERROR_NONE;
Klement Sekera896c8962019-06-24 11:52:49 +0000745 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200746 reass = NULL;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100747 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200748}
749
Klement Sekera896c8962019-06-24 11:52:49 +0000750always_inline ip4_full_reass_rc_t
751ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
752 ip4_full_reass_main_t * rm,
753 ip4_full_reass_per_thread_t * rt,
754 ip4_full_reass_t * reass,
755 u32 prev_range_bi, u32 new_next_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200756{
Klement Sekera75e7d132017-09-20 08:26:30 +0200757 vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
758 vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
759 if (~0 != prev_range_bi)
760 {
761 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
762 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
763 new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
764 prev_vnb->ip.reass.next_range_bi = new_next_bi;
765 }
766 else
767 {
768 if (~0 != reass->first_bi)
769 {
770 new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
771 }
772 reass->first_bi = new_next_bi;
773 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100774 vnet_buffer_opaque_t *vnb = vnet_buffer (new_next_b);
775 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
776 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
777 {
778 return IP4_REASS_RC_INTERNAL_ERROR;
779 }
Klement Sekera896c8962019-06-24 11:52:49 +0000780 reass->data_len += ip4_full_reass_buffer_get_data_len (new_next_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100781 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200782}
783
Klement Sekera896c8962019-06-24 11:52:49 +0000784always_inline ip4_full_reass_rc_t
785ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
786 vlib_node_runtime_t * node,
787 ip4_full_reass_main_t * rm,
788 ip4_full_reass_t * reass,
789 u32 prev_range_bi, u32 discard_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200790{
791 vlib_buffer_t *discard_b = vlib_get_buffer (vm, discard_bi);
792 vnet_buffer_opaque_t *discard_vnb = vnet_buffer (discard_b);
793 if (~0 != prev_range_bi)
794 {
795 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
796 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100797 if (!(prev_vnb->ip.reass.next_range_bi == discard_bi))
798 {
799 return IP4_REASS_RC_INTERNAL_ERROR;
800 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200801 prev_vnb->ip.reass.next_range_bi = discard_vnb->ip.reass.next_range_bi;
802 }
803 else
804 {
805 reass->first_bi = discard_vnb->ip.reass.next_range_bi;
806 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100807 vnet_buffer_opaque_t *vnb = vnet_buffer (discard_b);
808 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
809 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
810 {
811 return IP4_REASS_RC_INTERNAL_ERROR;
812 }
Klement Sekera896c8962019-06-24 11:52:49 +0000813 reass->data_len -= ip4_full_reass_buffer_get_data_len (discard_b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200814 while (1)
815 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200816 u32 to_be_freed_bi = discard_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200817 if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
818 {
Klement Sekera896c8962019-06-24 11:52:49 +0000819 ip4_full_reass_add_trace (vm, node, rm, reass, discard_bi,
820 RANGE_DISCARD, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200821 }
822 if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
823 {
824 discard_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
825 discard_bi = discard_b->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700826 discard_b->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200827 discard_b = vlib_get_buffer (vm, discard_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200828 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200829 }
830 else
831 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700832 discard_b->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200833 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200834 break;
835 }
836 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100837 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200838}
839
Klement Sekera896c8962019-06-24 11:52:49 +0000840always_inline ip4_full_reass_rc_t
841ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
842 ip4_full_reass_main_t * rm,
843 ip4_full_reass_per_thread_t * rt,
844 ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
845 u32 * error0, bool is_custom_app,
846 u32 * handoff_thread_idx)
Klement Sekera75e7d132017-09-20 08:26:30 +0200847{
Klement Sekera75e7d132017-09-20 08:26:30 +0200848 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200849 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
Klement Sekerae8498652019-06-17 12:23:15 +0000850 if (is_custom_app)
851 {
852 // store (error_)next_index before it's overwritten
853 reass->next_index = fvnb->ip.reass.next_index;
854 reass->error_next_index = fvnb->ip.reass.error_next_index;
855 }
Klement Sekera896c8962019-06-24 11:52:49 +0000856 ip4_full_reass_rc_t rc = IP4_REASS_RC_OK;
857 int consumed = 0;
858 ip4_header_t *fip = vlib_buffer_get_current (fb);
Klement Sekera14d7e902018-12-10 13:46:09 +0100859 const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
860 const u32 fragment_length =
Klement Sekera75e7d132017-09-20 08:26:30 +0200861 clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
Klement Sekera14d7e902018-12-10 13:46:09 +0100862 const u32 fragment_last = fragment_first + fragment_length - 1;
863 fvnb->ip.reass.fragment_first = fragment_first;
864 fvnb->ip.reass.fragment_last = fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200865 int more_fragments = ip4_get_fragment_more (fip);
866 u32 candidate_range_bi = reass->first_bi;
867 u32 prev_range_bi = ~0;
868 fvnb->ip.reass.range_first = fragment_first;
869 fvnb->ip.reass.range_last = fragment_last;
870 fvnb->ip.reass.next_range_bi = ~0;
871 if (!more_fragments)
872 {
873 reass->last_packet_octet = fragment_last;
874 }
875 if (~0 == reass->first_bi)
876 {
877 // starting a new reassembly
Klement Sekerad0f70a32018-12-14 17:24:13 +0100878 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000879 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
880 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100881 if (IP4_REASS_RC_OK != rc)
882 {
883 return rc;
884 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200885 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
886 {
Klement Sekera896c8962019-06-24 11:52:49 +0000887 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
888 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200889 }
890 *bi0 = ~0;
Klement Sekera4c533132018-02-22 11:41:12 +0100891 reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
Klement Sekera3a343d42019-05-16 14:35:46 +0200892 reass->fragments_n = 1;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100893 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200894 }
Klement Sekera896c8962019-06-24 11:52:49 +0000895 reass->min_fragment_length =
896 clib_min (clib_net_to_host_u16 (fip->length),
897 fvnb->ip.reass.estimated_mtu);
Klement Sekera75e7d132017-09-20 08:26:30 +0200898 while (~0 != candidate_range_bi)
899 {
900 vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
901 vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
902 if (fragment_first > candidate_vnb->ip.reass.range_last)
903 {
904 // this fragments starts after candidate range
905 prev_range_bi = candidate_range_bi;
906 candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
907 if (candidate_vnb->ip.reass.range_last < fragment_last &&
908 ~0 == candidate_range_bi)
909 {
910 // special case - this fragment falls beyond all known ranges
Klement Sekerad0f70a32018-12-14 17:24:13 +0100911 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000912 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
913 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100914 if (IP4_REASS_RC_OK != rc)
915 {
916 return rc;
917 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200918 consumed = 1;
919 break;
920 }
921 continue;
922 }
923 if (fragment_last < candidate_vnb->ip.reass.range_first)
924 {
925 // this fragment ends before candidate range without any overlap
Klement Sekerad0f70a32018-12-14 17:24:13 +0100926 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000927 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
928 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100929 if (IP4_REASS_RC_OK != rc)
930 {
931 return rc;
932 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200933 consumed = 1;
934 }
935 else
936 {
937 if (fragment_first >= candidate_vnb->ip.reass.range_first &&
938 fragment_last <= candidate_vnb->ip.reass.range_last)
939 {
940 // this fragment is a (sub)part of existing range, ignore it
941 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
942 {
Klement Sekera896c8962019-06-24 11:52:49 +0000943 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0,
944 RANGE_OVERLAP, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200945 }
946 break;
947 }
948 int discard_candidate = 0;
949 if (fragment_first < candidate_vnb->ip.reass.range_first)
950 {
951 u32 overlap =
952 fragment_last - candidate_vnb->ip.reass.range_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +0000953 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +0200954 {
955 candidate_vnb->ip.reass.range_first += overlap;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100956 if (reass->data_len < overlap)
957 {
958 return IP4_REASS_RC_INTERNAL_ERROR;
959 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200960 reass->data_len -= overlap;
961 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
962 {
Klement Sekera896c8962019-06-24 11:52:49 +0000963 ip4_full_reass_add_trace (vm, node, rm, reass,
964 candidate_range_bi,
965 RANGE_SHRINK, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200966 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100967 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000968 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
969 prev_range_bi,
970 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100971 if (IP4_REASS_RC_OK != rc)
972 {
973 return rc;
974 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200975 consumed = 1;
976 }
977 else
978 {
979 discard_candidate = 1;
980 }
981 }
982 else if (fragment_last > candidate_vnb->ip.reass.range_last)
983 {
984 u32 overlap =
985 candidate_vnb->ip.reass.range_last - fragment_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +0000986 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +0200987 {
988 fvnb->ip.reass.range_first += overlap;
989 if (~0 != candidate_vnb->ip.reass.next_range_bi)
990 {
991 prev_range_bi = candidate_range_bi;
992 candidate_range_bi =
993 candidate_vnb->ip.reass.next_range_bi;
994 continue;
995 }
996 else
997 {
998 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +0100999 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001000 ip4_full_reass_insert_range_in_chain (vm, rm, rt,
1001 reass,
1002 candidate_range_bi,
1003 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001004 if (IP4_REASS_RC_OK != rc)
1005 {
1006 return rc;
1007 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001008 consumed = 1;
1009 }
1010 }
1011 else
1012 {
1013 discard_candidate = 1;
1014 }
1015 }
1016 else
1017 {
1018 discard_candidate = 1;
1019 }
1020 if (discard_candidate)
1021 {
1022 u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
1023 // discard candidate range, probe next range
Klement Sekerad0f70a32018-12-14 17:24:13 +01001024 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001025 ip4_full_reass_remove_range_from_chain (vm, node, rm, reass,
1026 prev_range_bi,
1027 candidate_range_bi);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001028 if (IP4_REASS_RC_OK != rc)
1029 {
1030 return rc;
1031 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001032 if (~0 != next_range_bi)
1033 {
1034 candidate_range_bi = next_range_bi;
1035 continue;
1036 }
1037 else
1038 {
1039 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +01001040 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001041 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
1042 prev_range_bi,
1043 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001044 if (IP4_REASS_RC_OK != rc)
1045 {
1046 return rc;
1047 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001048 consumed = 1;
1049 }
1050 }
1051 }
1052 break;
1053 }
Klement Sekera3a343d42019-05-16 14:35:46 +02001054 ++reass->fragments_n;
Klement Sekera75e7d132017-09-20 08:26:30 +02001055 if (consumed)
1056 {
1057 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1058 {
Klement Sekera896c8962019-06-24 11:52:49 +00001059 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
1060 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001061 }
1062 }
1063 if (~0 != reass->last_packet_octet &&
1064 reass->data_len == reass->last_packet_octet + 1)
1065 {
Klement Sekera630ab582019-07-19 09:14:19 +00001066 *handoff_thread_idx = reass->sendout_thread_index;
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001067 int handoff =
1068 reass->memory_owner_thread_index != reass->sendout_thread_index;
Klement Sekera630ab582019-07-19 09:14:19 +00001069 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001070 ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
1071 is_custom_app);
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001072 if (IP4_REASS_RC_OK == rc && handoff)
Klement Sekera630ab582019-07-19 09:14:19 +00001073 {
1074 rc = IP4_REASS_RC_HANDOFF;
1075 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001076 }
1077 else
1078 {
1079 if (consumed)
1080 {
1081 *bi0 = ~0;
Klement Sekera3a343d42019-05-16 14:35:46 +02001082 if (reass->fragments_n > rm->max_reass_len)
1083 {
1084 rc = IP4_REASS_RC_TOO_MANY_FRAGMENTS;
1085 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001086 }
1087 else
1088 {
Klement Sekera896c8962019-06-24 11:52:49 +00001089 *next0 = IP4_FULL_REASS_NEXT_DROP;
Klement Sekera75e7d132017-09-20 08:26:30 +02001090 *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
1091 }
1092 }
Klement Sekerad0f70a32018-12-14 17:24:13 +01001093 return rc;
Klement Sekera75e7d132017-09-20 08:26:30 +02001094}
1095
1096always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001097ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekerae8498652019-06-17 12:23:15 +00001098 vlib_frame_t * frame, bool is_feature,
1099 bool is_custom_app)
Klement Sekera75e7d132017-09-20 08:26:30 +02001100{
1101 u32 *from = vlib_frame_vector_args (frame);
1102 u32 n_left_from, n_left_to_next, *to_next, next_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001103 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1104 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001105 clib_spinlock_lock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001106
1107 n_left_from = frame->n_vectors;
1108 next_index = node->cached_next_index;
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001109 while (n_left_from > 0)
Klement Sekera75e7d132017-09-20 08:26:30 +02001110 {
1111 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1112
Klement Sekera75e7d132017-09-20 08:26:30 +02001113 while (n_left_from > 0 && n_left_to_next > 0)
1114 {
1115 u32 bi0;
1116 vlib_buffer_t *b0;
Klement Sekera4c533132018-02-22 11:41:12 +01001117 u32 next0;
1118 u32 error0 = IP4_ERROR_NONE;
Klement Sekera75e7d132017-09-20 08:26:30 +02001119
1120 bi0 = from[0];
1121 b0 = vlib_get_buffer (vm, bi0);
1122
1123 ip4_header_t *ip0 = vlib_buffer_get_current (b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001124 if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
Klement Sekera75e7d132017-09-20 08:26:30 +02001125 {
Klement Sekera4c533132018-02-22 11:41:12 +01001126 // this is a whole packet - no fragmentation
Klement Sekerae8498652019-06-17 12:23:15 +00001127 if (!is_custom_app)
Klement Sekera4c533132018-02-22 11:41:12 +01001128 {
Klement Sekera896c8962019-06-24 11:52:49 +00001129 next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +01001130 }
1131 else
1132 {
1133 next0 = vnet_buffer (b0)->ip.reass.next_index;
1134 }
Klement Sekera896c8962019-06-24 11:52:49 +00001135 goto packet_enqueue;
1136 }
1137 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1138 const u32 fragment_length =
1139 clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
1140 const u32 fragment_last = fragment_first + fragment_length - 1;
1141 if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
1142 {
1143 next0 = IP4_FULL_REASS_NEXT_DROP;
1144 error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
1145 goto packet_enqueue;
1146 }
1147 ip4_full_reass_kv_t kv;
1148 u8 do_handoff = 0;
1149
1150 kv.k.as_u64[0] =
1151 (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
1152 vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
1153 (u64) ip0->src_address.as_u32 << 32;
1154 kv.k.as_u64[1] =
1155 (u64) ip0->dst_address.
1156 as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
1157
1158 ip4_full_reass_t *reass =
1159 ip4_full_reass_find_or_create (vm, node, rm, rt, &kv,
1160 &do_handoff);
1161
1162 if (reass)
1163 {
1164 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1165 if (0 == fragment_first)
1166 {
1167 reass->sendout_thread_index = vm->thread_index;
1168 }
1169 }
1170
1171 if (PREDICT_FALSE (do_handoff))
1172 {
1173 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
Klement Sekerade34c352019-06-25 11:19:22 +00001174 vnet_buffer (b0)->ip.reass.owner_thread_index =
1175 kv.v.memory_owner_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001176 }
1177 else if (reass)
1178 {
1179 u32 handoff_thread_idx;
1180 switch (ip4_full_reass_update
1181 (vm, node, rm, rt, reass, &bi0, &next0,
1182 &error0, is_custom_app, &handoff_thread_idx))
1183 {
1184 case IP4_REASS_RC_OK:
1185 /* nothing to do here */
1186 break;
1187 case IP4_REASS_RC_HANDOFF:
1188 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
1189 b0 = vlib_get_buffer (vm, bi0);
Klement Sekerade34c352019-06-25 11:19:22 +00001190 vnet_buffer (b0)->ip.reass.owner_thread_index =
1191 handoff_thread_idx;
Klement Sekera896c8962019-06-24 11:52:49 +00001192 break;
1193 case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
1194 vlib_node_increment_counter (vm, node->node_index,
1195 IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1196 1);
1197 ip4_full_reass_drop_all (vm, node, rm, reass);
1198 ip4_full_reass_free (rm, rt, reass);
1199 goto next_packet;
1200 break;
1201 case IP4_REASS_RC_NO_BUF:
1202 vlib_node_increment_counter (vm, node->node_index,
1203 IP4_ERROR_REASS_NO_BUF, 1);
1204 ip4_full_reass_drop_all (vm, node, rm, reass);
1205 ip4_full_reass_free (rm, rt, reass);
1206 goto next_packet;
1207 break;
1208 case IP4_REASS_RC_INTERNAL_ERROR:
1209 /* drop everything and start with a clean slate */
1210 vlib_node_increment_counter (vm, node->node_index,
1211 IP4_ERROR_REASS_INTERNAL_ERROR,
1212 1);
1213 ip4_full_reass_drop_all (vm, node, rm, reass);
1214 ip4_full_reass_free (rm, rt, reass);
1215 goto next_packet;
1216 break;
1217 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001218 }
1219 else
1220 {
Klement Sekera896c8962019-06-24 11:52:49 +00001221 next0 = IP4_FULL_REASS_NEXT_DROP;
1222 error0 = IP4_ERROR_REASS_LIMIT_REACHED;
Klement Sekera4c533132018-02-22 11:41:12 +01001223 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001224
Klement Sekera896c8962019-06-24 11:52:49 +00001225
1226 packet_enqueue:
Klement Sekera896c8962019-06-24 11:52:49 +00001227
Klement Sekera75e7d132017-09-20 08:26:30 +02001228 if (bi0 != ~0)
1229 {
1230 to_next[0] = bi0;
1231 to_next += 1;
1232 n_left_to_next -= 1;
Benoît Gannecf7803d2019-10-23 13:53:49 +02001233
1234 /* bi0 might have been updated by reass_finalize, reload */
1235 b0 = vlib_get_buffer (vm, bi0);
Klement Sekera1766ddc2020-03-30 16:59:38 +02001236 if (IP4_ERROR_NONE != error0)
1237 {
1238 b0->error = node->errors[error0];
1239 }
Benoît Gannecf7803d2019-10-23 13:53:49 +02001240
Klement Sekera896c8962019-06-24 11:52:49 +00001241 if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
Klement Sekera630ab582019-07-19 09:14:19 +00001242 {
1243 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1244 {
Klement Sekerade34c352019-06-25 11:19:22 +00001245 ip4_full_reass_add_trace (vm, node, rm, NULL, bi0,
1246 HANDOFF, 0,
1247 vnet_buffer (b0)->ip.
1248 reass.owner_thread_index);
Klement Sekera630ab582019-07-19 09:14:19 +00001249 }
1250 }
1251 else if (is_feature && IP4_ERROR_NONE == error0)
Klement Sekera4c533132018-02-22 11:41:12 +01001252 {
Damjan Marion7d98a122018-07-19 20:42:08 +02001253 vnet_feature_next (&next0, b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001254 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001255 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1256 to_next, n_left_to_next,
1257 bi0, next0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001258 IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
1259 }
1260
Klement Sekerad0f70a32018-12-14 17:24:13 +01001261 next_packet:
Klement Sekera75e7d132017-09-20 08:26:30 +02001262 from += 1;
1263 n_left_from -= 1;
1264 }
1265
1266 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1267 }
1268
Klement Sekera4c533132018-02-22 11:41:12 +01001269 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001270 return frame->n_vectors;
1271}
1272
Klement Sekera896c8962019-06-24 11:52:49 +00001273static char *ip4_full_reass_error_strings[] = {
Klement Sekera75e7d132017-09-20 08:26:30 +02001274#define _(sym, string) string,
1275 foreach_ip4_error
1276#undef _
1277};
1278
Klement Sekera896c8962019-06-24 11:52:49 +00001279VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
1280 vlib_node_runtime_t * node,
1281 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001282{
Klement Sekera896c8962019-06-24 11:52:49 +00001283 return ip4_full_reass_inline (vm, node, frame, false /* is_feature */ ,
Klement Sekerae8498652019-06-17 12:23:15 +00001284 false /* is_custom_app */ );
Klement Sekera4c533132018-02-22 11:41:12 +01001285}
1286
Klement Sekera75e7d132017-09-20 08:26:30 +02001287/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001288VLIB_REGISTER_NODE (ip4_full_reass_node) = {
1289 .name = "ip4-full-reassembly",
Klement Sekera75e7d132017-09-20 08:26:30 +02001290 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001291 .format_trace = format_ip4_full_reass_trace,
1292 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1293 .error_strings = ip4_full_reass_error_strings,
1294 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera75e7d132017-09-20 08:26:30 +02001295 .next_nodes =
1296 {
Klement Sekera896c8962019-06-24 11:52:49 +00001297 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1298 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1299 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001300
Klement Sekera75e7d132017-09-20 08:26:30 +02001301 },
1302};
1303/* *INDENT-ON* */
1304
Klement Sekera896c8962019-06-24 11:52:49 +00001305VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
1306 vlib_node_runtime_t * node,
1307 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001308{
Klement Sekera896c8962019-06-24 11:52:49 +00001309 return ip4_full_reass_inline (vm, node, frame, true /* is_feature */ ,
Klement Sekerae8498652019-06-17 12:23:15 +00001310 false /* is_custom_app */ );
Klement Sekera4c533132018-02-22 11:41:12 +01001311}
1312
1313/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001314VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
1315 .name = "ip4-full-reassembly-feature",
Klement Sekera4c533132018-02-22 11:41:12 +01001316 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001317 .format_trace = format_ip4_full_reass_trace,
1318 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1319 .error_strings = ip4_full_reass_error_strings,
1320 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera4c533132018-02-22 11:41:12 +01001321 .next_nodes =
1322 {
Klement Sekera896c8962019-06-24 11:52:49 +00001323 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1324 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1325 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
Klement Sekera4c533132018-02-22 11:41:12 +01001326 },
1327};
1328/* *INDENT-ON* */
1329
Klement Sekera4c533132018-02-22 11:41:12 +01001330/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001331VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
Klement Sekera4c533132018-02-22 11:41:12 +01001332 .arc_name = "ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001333 .node_name = "ip4-full-reassembly-feature",
Neale Ranns14046982019-07-29 14:49:52 +00001334 .runs_before = VNET_FEATURES ("ip4-lookup",
Neale Ranns2be3eb62019-08-02 01:17:13 -07001335 "ipsec4-input-feature"),
Klement Sekera4c533132018-02-22 11:41:12 +01001336 .runs_after = 0,
1337};
1338/* *INDENT-ON* */
1339
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001340#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001341always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +00001342ip4_full_reass_get_nbuckets ()
Klement Sekera75e7d132017-09-20 08:26:30 +02001343{
Klement Sekera896c8962019-06-24 11:52:49 +00001344 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001345 u32 nbuckets;
1346 u8 i;
1347
1348 nbuckets = (u32) (rm->max_reass_n / IP4_REASS_HT_LOAD_FACTOR);
1349
1350 for (i = 0; i < 31; i++)
1351 if ((1 << i) >= nbuckets)
1352 break;
1353 nbuckets = 1 << i;
1354
1355 return nbuckets;
1356}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001357#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001358
1359typedef enum
1360{
1361 IP4_EVENT_CONFIG_CHANGED = 1,
Klement Sekera896c8962019-06-24 11:52:49 +00001362} ip4_full_reass_event_t;
Klement Sekera75e7d132017-09-20 08:26:30 +02001363
1364typedef struct
1365{
1366 int failure;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001367 clib_bihash_16_8_t *new_hash;
Klement Sekera75e7d132017-09-20 08:26:30 +02001368} ip4_rehash_cb_ctx;
1369
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001370#ifndef CLIB_MARCH_VARIANT
Neale Rannsf50bac12019-12-06 05:53:17 +00001371static int
Klement Sekera8dcfed52018-06-28 11:16:15 +02001372ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
Klement Sekera75e7d132017-09-20 08:26:30 +02001373{
1374 ip4_rehash_cb_ctx *ctx = _ctx;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001375 if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
Klement Sekera75e7d132017-09-20 08:26:30 +02001376 {
1377 ctx->failure = 1;
1378 }
Neale Rannsf50bac12019-12-06 05:53:17 +00001379 return (BIHASH_WALK_CONTINUE);
Klement Sekera75e7d132017-09-20 08:26:30 +02001380}
1381
Klement Sekera4c533132018-02-22 11:41:12 +01001382static void
Klement Sekera896c8962019-06-24 11:52:49 +00001383ip4_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1384 u32 max_reassembly_length,
1385 u32 expire_walk_interval_ms)
Klement Sekera4c533132018-02-22 11:41:12 +01001386{
Klement Sekera896c8962019-06-24 11:52:49 +00001387 ip4_full_reass_main.timeout_ms = timeout_ms;
1388 ip4_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1389 ip4_full_reass_main.max_reass_n = max_reassemblies;
1390 ip4_full_reass_main.max_reass_len = max_reassembly_length;
1391 ip4_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
Klement Sekera4c533132018-02-22 11:41:12 +01001392}
1393
Klement Sekera75e7d132017-09-20 08:26:30 +02001394vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001395ip4_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1396 u32 max_reassembly_length, u32 expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001397{
Klement Sekera896c8962019-06-24 11:52:49 +00001398 u32 old_nbuckets = ip4_full_reass_get_nbuckets ();
1399 ip4_full_reass_set_params (timeout_ms, max_reassemblies,
1400 max_reassembly_length, expire_walk_interval_ms);
1401 vlib_process_signal_event (ip4_full_reass_main.vlib_main,
1402 ip4_full_reass_main.ip4_full_reass_expire_node_idx,
Klement Sekera75e7d132017-09-20 08:26:30 +02001403 IP4_EVENT_CONFIG_CHANGED, 0);
Klement Sekera896c8962019-06-24 11:52:49 +00001404 u32 new_nbuckets = ip4_full_reass_get_nbuckets ();
1405 if (ip4_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
Klement Sekera75e7d132017-09-20 08:26:30 +02001406 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001407 clib_bihash_16_8_t new_hash;
Dave Barachb7b92992018-10-17 10:38:51 -04001408 clib_memset (&new_hash, 0, sizeof (new_hash));
Klement Sekera75e7d132017-09-20 08:26:30 +02001409 ip4_rehash_cb_ctx ctx;
1410 ctx.failure = 0;
1411 ctx.new_hash = &new_hash;
Klement Sekera896c8962019-06-24 11:52:49 +00001412 clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
Klement Sekera75e7d132017-09-20 08:26:30 +02001413 new_nbuckets * 1024);
Klement Sekera896c8962019-06-24 11:52:49 +00001414 clib_bihash_foreach_key_value_pair_16_8 (&ip4_full_reass_main.hash,
Klement Sekera75e7d132017-09-20 08:26:30 +02001415 ip4_rehash_cb, &ctx);
1416 if (ctx.failure)
1417 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001418 clib_bihash_free_16_8 (&new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001419 return -1;
1420 }
1421 else
1422 {
Klement Sekera896c8962019-06-24 11:52:49 +00001423 clib_bihash_free_16_8 (&ip4_full_reass_main.hash);
1424 clib_memcpy_fast (&ip4_full_reass_main.hash, &new_hash,
1425 sizeof (ip4_full_reass_main.hash));
1426 clib_bihash_copied (&ip4_full_reass_main.hash, &new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001427 }
1428 }
1429 return 0;
1430}
1431
1432vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001433ip4_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1434 u32 * max_reassembly_length,
1435 u32 * expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001436{
Klement Sekera896c8962019-06-24 11:52:49 +00001437 *timeout_ms = ip4_full_reass_main.timeout_ms;
1438 *max_reassemblies = ip4_full_reass_main.max_reass_n;
1439 *max_reassembly_length = ip4_full_reass_main.max_reass_len;
1440 *expire_walk_interval_ms = ip4_full_reass_main.expire_walk_interval_ms;
Klement Sekera75e7d132017-09-20 08:26:30 +02001441 return 0;
1442}
1443
Klement Sekera4c533132018-02-22 11:41:12 +01001444static clib_error_t *
Klement Sekera896c8962019-06-24 11:52:49 +00001445ip4_full_reass_init_function (vlib_main_t * vm)
Klement Sekera75e7d132017-09-20 08:26:30 +02001446{
Klement Sekera896c8962019-06-24 11:52:49 +00001447 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001448 clib_error_t *error = 0;
1449 u32 nbuckets;
Dave Barach1403fcd2018-02-05 09:45:43 -05001450 vlib_node_t *node;
Klement Sekera75e7d132017-09-20 08:26:30 +02001451
1452 rm->vlib_main = vm;
Klement Sekera75e7d132017-09-20 08:26:30 +02001453
Juraj Slobodacd806922018-10-10 10:15:54 +02001454 vec_validate (rm->per_thread_data, vlib_num_workers ());
Klement Sekera896c8962019-06-24 11:52:49 +00001455 ip4_full_reass_per_thread_t *rt;
Klement Sekera4c533132018-02-22 11:41:12 +01001456 vec_foreach (rt, rm->per_thread_data)
1457 {
1458 clib_spinlock_init (&rt->lock);
1459 pool_alloc (rt->pool, rm->max_reass_n);
1460 }
Dave Barach1403fcd2018-02-05 09:45:43 -05001461
Klement Sekera896c8962019-06-24 11:52:49 +00001462 node = vlib_get_node_by_name (vm, (u8 *) "ip4-full-reassembly-expire-walk");
Dave Barach1403fcd2018-02-05 09:45:43 -05001463 ASSERT (node);
Klement Sekera896c8962019-06-24 11:52:49 +00001464 rm->ip4_full_reass_expire_node_idx = node->index;
Dave Barach1403fcd2018-02-05 09:45:43 -05001465
Klement Sekera896c8962019-06-24 11:52:49 +00001466 ip4_full_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
1467 IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
1468 IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
1469 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
Klement Sekera3ecc2212018-03-27 10:34:43 +02001470
Klement Sekera896c8962019-06-24 11:52:49 +00001471 nbuckets = ip4_full_reass_get_nbuckets ();
1472 clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
Klement Sekera75e7d132017-09-20 08:26:30 +02001473
Dave Barach1403fcd2018-02-05 09:45:43 -05001474 node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
Klement Sekera75e7d132017-09-20 08:26:30 +02001475 ASSERT (node);
1476 rm->ip4_drop_idx = node->index;
Klement Sekera4c533132018-02-22 11:41:12 +01001477
Klement Sekera896c8962019-06-24 11:52:49 +00001478 rm->fq_index = vlib_frame_queue_main_init (ip4_full_reass_node.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001479 rm->fq_feature_index =
Klement Sekera896c8962019-06-24 11:52:49 +00001480 vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001481
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001482 rm->feature_use_refcount_per_intf = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +02001483 return error;
1484}
1485
Klement Sekera896c8962019-06-24 11:52:49 +00001486VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001487#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001488
1489static uword
Klement Sekera896c8962019-06-24 11:52:49 +00001490ip4_full_reass_walk_expired (vlib_main_t * vm,
1491 vlib_node_runtime_t * node, vlib_frame_t * f)
Klement Sekera75e7d132017-09-20 08:26:30 +02001492{
Klement Sekera896c8962019-06-24 11:52:49 +00001493 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001494 uword event_type, *event_data = 0;
1495
1496 while (true)
1497 {
1498 vlib_process_wait_for_event_or_clock (vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001499 (f64)
1500 rm->expire_walk_interval_ms /
1501 (f64) MSEC_PER_SEC);
Klement Sekera75e7d132017-09-20 08:26:30 +02001502 event_type = vlib_process_get_events (vm, &event_data);
1503
1504 switch (event_type)
1505 {
1506 case ~0: /* no events => timeout */
1507 /* nothing to do here */
1508 break;
1509 case IP4_EVENT_CONFIG_CHANGED:
1510 break;
1511 default:
1512 clib_warning ("BUG: event type 0x%wx", event_type);
1513 break;
1514 }
1515 f64 now = vlib_time_now (vm);
1516
Klement Sekera896c8962019-06-24 11:52:49 +00001517 ip4_full_reass_t *reass;
Klement Sekera75e7d132017-09-20 08:26:30 +02001518 int *pool_indexes_to_free = NULL;
1519
Klement Sekera4c533132018-02-22 11:41:12 +01001520 uword thread_index = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +02001521 int index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001522 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001523 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1524 {
Klement Sekera896c8962019-06-24 11:52:49 +00001525 ip4_full_reass_per_thread_t *rt =
1526 &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001527 clib_spinlock_lock (&rt->lock);
1528
1529 vec_reset_length (pool_indexes_to_free);
1530 /* *INDENT-OFF* */
1531 pool_foreach_index (index, rt->pool, ({
1532 reass = pool_elt_at_index (rt->pool, index);
1533 if (now > reass->last_heard + rm->timeout)
1534 {
1535 vec_add1 (pool_indexes_to_free, index);
1536 }
1537 }));
1538 /* *INDENT-ON* */
1539 int *i;
1540 /* *INDENT-OFF* */
1541 vec_foreach (i, pool_indexes_to_free)
1542 {
Klement Sekera896c8962019-06-24 11:52:49 +00001543 ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1544 ip4_full_reass_drop_all (vm, node, rm, reass);
1545 ip4_full_reass_free (rm, rt, reass);
Klement Sekera4c533132018-02-22 11:41:12 +01001546 }
1547 /* *INDENT-ON* */
1548
1549 clib_spinlock_unlock (&rt->lock);
1550 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001551
Klement Sekera75e7d132017-09-20 08:26:30 +02001552 vec_free (pool_indexes_to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +02001553 if (event_data)
1554 {
1555 _vec_len (event_data) = 0;
1556 }
1557 }
1558
1559 return 0;
1560}
1561
Klement Sekera75e7d132017-09-20 08:26:30 +02001562/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001563VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
1564 .function = ip4_full_reass_walk_expired,
Klement Sekera75e7d132017-09-20 08:26:30 +02001565 .type = VLIB_NODE_TYPE_PROCESS,
Klement Sekera896c8962019-06-24 11:52:49 +00001566 .name = "ip4-full-reassembly-expire-walk",
1567 .format_trace = format_ip4_full_reass_trace,
1568 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1569 .error_strings = ip4_full_reass_error_strings,
Klement Sekera75e7d132017-09-20 08:26:30 +02001570
1571};
1572/* *INDENT-ON* */
1573
1574static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001575format_ip4_full_reass_key (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +02001576{
Klement Sekera896c8962019-06-24 11:52:49 +00001577 ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *);
1578 s =
1579 format (s,
1580 "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1581 key->xx_id, format_ip4_address, &key->src, format_ip4_address,
1582 &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
Klement Sekera75e7d132017-09-20 08:26:30 +02001583 return s;
1584}
1585
1586static u8 *
1587format_ip4_reass (u8 * s, va_list * args)
1588{
1589 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001590 ip4_full_reass_t *reass = va_arg (*args, ip4_full_reass_t *);
Klement Sekera75e7d132017-09-20 08:26:30 +02001591
Klement Sekera4c533132018-02-22 11:41:12 +01001592 s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
Klement Sekera75e7d132017-09-20 08:26:30 +02001593 "last_packet_octet: %u, trace_op_counter: %u\n",
Klement Sekera896c8962019-06-24 11:52:49 +00001594 reass->id, format_ip4_full_reass_key, &reass->key,
1595 reass->first_bi, reass->data_len,
1596 reass->last_packet_octet, reass->trace_op_counter);
1597
Klement Sekera75e7d132017-09-20 08:26:30 +02001598 u32 bi = reass->first_bi;
1599 u32 counter = 0;
1600 while (~0 != bi)
1601 {
1602 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1603 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera896c8962019-06-24 11:52:49 +00001604 s =
1605 format (s,
1606 " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1607 "fragment[%u, %u]\n", counter, vnb->ip.reass.range_first,
1608 vnb->ip.reass.range_last, bi,
1609 ip4_full_reass_buffer_get_data_offset (b),
1610 ip4_full_reass_buffer_get_data_len (b),
1611 vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
Klement Sekera75e7d132017-09-20 08:26:30 +02001612 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1613 {
1614 bi = b->next_buffer;
1615 }
1616 else
1617 {
1618 bi = ~0;
1619 }
1620 }
1621 return s;
1622}
1623
1624static clib_error_t *
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001625show_ip4_reass (vlib_main_t * vm,
1626 unformat_input_t * input,
Klement Sekera75e7d132017-09-20 08:26:30 +02001627 CLIB_UNUSED (vlib_cli_command_t * lmd))
1628{
Klement Sekera896c8962019-06-24 11:52:49 +00001629 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001630
1631 vlib_cli_output (vm, "---------------------");
1632 vlib_cli_output (vm, "IP4 reassembly status");
1633 vlib_cli_output (vm, "---------------------");
Klement Sekera4c533132018-02-22 11:41:12 +01001634 bool details = false;
Klement Sekera75e7d132017-09-20 08:26:30 +02001635 if (unformat (input, "details"))
1636 {
Klement Sekera4c533132018-02-22 11:41:12 +01001637 details = true;
1638 }
1639
1640 u32 sum_reass_n = 0;
Klement Sekera896c8962019-06-24 11:52:49 +00001641 ip4_full_reass_t *reass;
Klement Sekera4c533132018-02-22 11:41:12 +01001642 uword thread_index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001643 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001644 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1645 {
Klement Sekera896c8962019-06-24 11:52:49 +00001646 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001647 clib_spinlock_lock (&rt->lock);
1648 if (details)
1649 {
1650 /* *INDENT-OFF* */
1651 pool_foreach (reass, rt->pool, {
1652 vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
1653 });
1654 /* *INDENT-ON* */
1655 }
1656 sum_reass_n += rt->reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +01001657 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001658 }
1659 vlib_cli_output (vm, "---------------------");
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001660 vlib_cli_output (vm, "Current full IP4 reassemblies count: %lu\n",
Klement Sekera4c533132018-02-22 11:41:12 +01001661 (long unsigned) sum_reass_n);
Klement Sekera75e7d132017-09-20 08:26:30 +02001662 vlib_cli_output (vm,
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001663 "Maximum configured concurrent full IP4 reassemblies per worker-thread: %lu\n",
Klement Sekera75e7d132017-09-20 08:26:30 +02001664 (long unsigned) rm->max_reass_n);
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001665 vlib_cli_output (vm,
1666 "Maximum configured full IP4 reassembly timeout: %lums\n",
1667 (long unsigned) rm->timeout_ms);
1668 vlib_cli_output (vm,
1669 "Maximum configured full IP4 reassembly expire walk interval: %lums\n",
1670 (long unsigned) rm->expire_walk_interval_ms);
Klement Sekera75e7d132017-09-20 08:26:30 +02001671 return 0;
1672}
1673
1674/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001675VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
1676 .path = "show ip4-full-reassembly",
1677 .short_help = "show ip4-full-reassembly [details]",
Klement Sekera75e7d132017-09-20 08:26:30 +02001678 .function = show_ip4_reass,
1679};
1680/* *INDENT-ON* */
1681
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001682#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001683vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001684ip4_full_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
Klement Sekera4c533132018-02-22 11:41:12 +01001685{
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001686 return vnet_feature_enable_disable ("ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001687 "ip4-full-reassembly-feature",
1688 sw_if_index, enable_disable, 0, 0);
Klement Sekera4c533132018-02-22 11:41:12 +01001689}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001690#endif /* CLIB_MARCH_VARIANT */
Klement Sekera4c533132018-02-22 11:41:12 +01001691
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001692
Klement Sekera896c8962019-06-24 11:52:49 +00001693#define foreach_ip4_full_reass_handoff_error \
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001694_(CONGESTION_DROP, "congestion drop")
1695
1696
1697typedef enum
1698{
Klement Sekera896c8962019-06-24 11:52:49 +00001699#define _(sym,str) IP4_FULL_REASS_HANDOFF_ERROR_##sym,
1700 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001701#undef _
Klement Sekera896c8962019-06-24 11:52:49 +00001702 IP4_FULL_REASS_HANDOFF_N_ERROR,
1703} ip4_full_reass_handoff_error_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001704
Klement Sekera896c8962019-06-24 11:52:49 +00001705static char *ip4_full_reass_handoff_error_strings[] = {
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001706#define _(sym,string) string,
Klement Sekera896c8962019-06-24 11:52:49 +00001707 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001708#undef _
1709};
1710
1711typedef struct
1712{
1713 u32 next_worker_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001714} ip4_full_reass_handoff_trace_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001715
1716static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001717format_ip4_full_reass_handoff_trace (u8 * s, va_list * args)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001718{
1719 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1720 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001721 ip4_full_reass_handoff_trace_t *t =
1722 va_arg (*args, ip4_full_reass_handoff_trace_t *);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001723
1724 s =
Klement Sekera896c8962019-06-24 11:52:49 +00001725 format (s, "ip4-full-reassembly-handoff: next-worker %d",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001726 t->next_worker_index);
1727
1728 return s;
1729}
1730
1731always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001732ip4_full_reass_handoff_node_inline (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001733 vlib_node_runtime_t * node,
1734 vlib_frame_t * frame, bool is_feature)
1735{
Klement Sekera896c8962019-06-24 11:52:49 +00001736 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001737
1738 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1739 u32 n_enq, n_left_from, *from;
1740 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1741 u32 fq_index;
1742
1743 from = vlib_frame_vector_args (frame);
1744 n_left_from = frame->n_vectors;
1745 vlib_get_buffers (vm, from, bufs, n_left_from);
1746
1747 b = bufs;
1748 ti = thread_indices;
1749
1750 fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1751
1752 while (n_left_from > 0)
1753 {
Klement Sekerade34c352019-06-25 11:19:22 +00001754 ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001755
1756 if (PREDICT_FALSE
1757 ((node->flags & VLIB_NODE_FLAG_TRACE)
1758 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1759 {
Klement Sekera896c8962019-06-24 11:52:49 +00001760 ip4_full_reass_handoff_trace_t *t =
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001761 vlib_add_trace (vm, node, b[0], sizeof (*t));
1762 t->next_worker_index = ti[0];
1763 }
1764
1765 n_left_from -= 1;
1766 ti += 1;
1767 b += 1;
1768 }
1769 n_enq =
1770 vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1771 frame->n_vectors, 1);
1772
1773 if (n_enq < frame->n_vectors)
1774 vlib_node_increment_counter (vm, node->node_index,
Klement Sekera896c8962019-06-24 11:52:49 +00001775 IP4_FULL_REASS_HANDOFF_ERROR_CONGESTION_DROP,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001776 frame->n_vectors - n_enq);
1777 return frame->n_vectors;
1778}
1779
Klement Sekera896c8962019-06-24 11:52:49 +00001780VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001781 vlib_node_runtime_t * node,
1782 vlib_frame_t * frame)
1783{
Klement Sekera896c8962019-06-24 11:52:49 +00001784 return ip4_full_reass_handoff_node_inline (vm, node, frame,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001785 false /* is_feature */ );
1786}
1787
1788
1789/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001790VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
1791 .name = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001792 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001793 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1794 .error_strings = ip4_full_reass_handoff_error_strings,
1795 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001796
1797 .n_next_nodes = 1,
1798
1799 .next_nodes = {
1800 [0] = "error-drop",
1801 },
1802};
1803/* *INDENT-ON* */
1804
1805
1806/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001807VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001808 vlib_node_runtime_t *
1809 node,
1810 vlib_frame_t * frame)
1811{
Klement Sekera896c8962019-06-24 11:52:49 +00001812 return ip4_full_reass_handoff_node_inline (vm, node, frame,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001813 true /* is_feature */ );
1814}
1815/* *INDENT-ON* */
1816
1817
1818/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001819VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
1820 .name = "ip4-full-reass-feature-hoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001821 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001822 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1823 .error_strings = ip4_full_reass_handoff_error_strings,
1824 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001825
1826 .n_next_nodes = 1,
1827
1828 .next_nodes = {
1829 [0] = "error-drop",
1830 },
1831};
1832/* *INDENT-ON* */
1833
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001834#ifndef CLIB_MARCH_VARIANT
1835int
1836ip4_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
1837{
1838 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1839 vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
1840 if (is_enable)
1841 {
1842 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1843 {
1844 ++rm->feature_use_refcount_per_intf[sw_if_index];
1845 return vnet_feature_enable_disable ("ip4-unicast",
1846 "ip4-full-reassembly-feature",
1847 sw_if_index, 1, 0, 0);
1848 }
1849 ++rm->feature_use_refcount_per_intf[sw_if_index];
1850 }
1851 else
1852 {
1853 --rm->feature_use_refcount_per_intf[sw_if_index];
1854 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1855 return vnet_feature_enable_disable ("ip4-unicast",
1856 "ip4-full-reassembly-feature",
1857 sw_if_index, 0, 0, 0);
1858 }
1859 return -1;
1860}
1861#endif
1862
Klement Sekera75e7d132017-09-20 08:26:30 +02001863/*
1864 * fd.io coding-style-patch-verification: ON
1865 *
1866 * Local Variables:
1867 * eval: (c-set-style "gnu")
1868 * End:
1869 */