blob: f6c05466e19903598afa01ec378d2757e1cbe5fb [file] [log] [blame]
Klement Sekera75e7d132017-09-20 08:26:30 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @file
Klement Sekera896c8962019-06-24 11:52:49 +000018 * @brief IPv4 Full Reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020019 *
Klement Sekera896c8962019-06-24 11:52:49 +000020 * This file contains the source code for IPv4 full reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020021 */
22
23#include <vppinfra/vec.h>
24#include <vnet/vnet.h>
25#include <vnet/ip/ip.h>
Klement Sekera896c8962019-06-24 11:52:49 +000026#include <vppinfra/fifo.h>
Klement Sekera8dcfed52018-06-28 11:16:15 +020027#include <vppinfra/bihash_16_8.h>
Klement Sekera896c8962019-06-24 11:52:49 +000028#include <vnet/ip/reass/ip4_full_reass.h>
Klement Sekera630ab582019-07-19 09:14:19 +000029#include <stddef.h>
Klement Sekera75e7d132017-09-20 08:26:30 +020030
31#define MSEC_PER_SEC 1000
32#define IP4_REASS_TIMEOUT_DEFAULT_MS 100
33#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
Klement Sekera4c533132018-02-22 11:41:12 +010034#define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
Klement Sekera3a343d42019-05-16 14:35:46 +020035#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
Klement Sekera75e7d132017-09-20 08:26:30 +020036#define IP4_REASS_HT_LOAD_FACTOR (0.75)
37
38#define IP4_REASS_DEBUG_BUFFERS 0
39#if IP4_REASS_DEBUG_BUFFERS
40#define IP4_REASS_DEBUG_BUFFER(bi, what) \
41 do \
42 { \
43 u32 _bi = bi; \
44 printf (#what "buffer %u", _bi); \
45 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi); \
46 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT) \
47 { \
48 _bi = _b->next_buffer; \
49 printf ("[%u]", _bi); \
50 _b = vlib_get_buffer (vm, _bi); \
51 } \
52 printf ("\n"); \
53 fflush (stdout); \
54 } \
55 while (0)
56#else
57#define IP4_REASS_DEBUG_BUFFER(...)
58#endif
59
Klement Sekerad0f70a32018-12-14 17:24:13 +010060typedef enum
61{
62 IP4_REASS_RC_OK,
Klement Sekera3a343d42019-05-16 14:35:46 +020063 IP4_REASS_RC_TOO_MANY_FRAGMENTS,
Klement Sekerad0f70a32018-12-14 17:24:13 +010064 IP4_REASS_RC_INTERNAL_ERROR,
Klement Sekeraf883f6a2019-02-13 11:01:32 +010065 IP4_REASS_RC_NO_BUF,
Klement Sekera630ab582019-07-19 09:14:19 +000066 IP4_REASS_RC_HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +000067} ip4_full_reass_rc_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020068
69typedef struct
70{
71 union
72 {
73 struct
74 {
Klement Sekera75e7d132017-09-20 08:26:30 +020075 u32 xx_id;
76 ip4_address_t src;
77 ip4_address_t dst;
Klement Sekera8dcfed52018-06-28 11:16:15 +020078 u16 frag_id;
79 u8 proto;
80 u8 unused;
Klement Sekera75e7d132017-09-20 08:26:30 +020081 };
Klement Sekera8dcfed52018-06-28 11:16:15 +020082 u64 as_u64[2];
Klement Sekera75e7d132017-09-20 08:26:30 +020083 };
Klement Sekera896c8962019-06-24 11:52:49 +000084} ip4_full_reass_key_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020085
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080086typedef union
87{
88 struct
89 {
90 u32 reass_index;
Klement Sekera630ab582019-07-19 09:14:19 +000091 u32 memory_owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080092 };
93 u64 as_u64;
Klement Sekera896c8962019-06-24 11:52:49 +000094} ip4_full_reass_val_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080095
96typedef union
97{
98 struct
99 {
Klement Sekera896c8962019-06-24 11:52:49 +0000100 ip4_full_reass_key_t k;
101 ip4_full_reass_val_t v;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800102 };
103 clib_bihash_kv_16_8_t kv;
Klement Sekera896c8962019-06-24 11:52:49 +0000104} ip4_full_reass_kv_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800105
Klement Sekera75e7d132017-09-20 08:26:30 +0200106always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +0000107ip4_full_reass_buffer_get_data_offset (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200108{
109 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100110 return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
Klement Sekera75e7d132017-09-20 08:26:30 +0200111}
112
113always_inline u16
Klement Sekera896c8962019-06-24 11:52:49 +0000114ip4_full_reass_buffer_get_data_len (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200115{
116 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100117 return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
Klement Sekera896c8962019-06-24 11:52:49 +0000118 (vnb->ip.reass.fragment_first +
119 ip4_full_reass_buffer_get_data_offset (b)) + 1;
Klement Sekera75e7d132017-09-20 08:26:30 +0200120}
121
122typedef struct
123{
124 // hash table key
Klement Sekera896c8962019-06-24 11:52:49 +0000125 ip4_full_reass_key_t key;
Klement Sekera75e7d132017-09-20 08:26:30 +0200126 // time when last packet was received
127 f64 last_heard;
128 // internal id of this reassembly
Klement Sekera4c533132018-02-22 11:41:12 +0100129 u64 id;
Klement Sekera75e7d132017-09-20 08:26:30 +0200130 // buffer index of first buffer in this reassembly context
131 u32 first_bi;
132 // last octet of packet, ~0 until fragment without more_fragments arrives
133 u32 last_packet_octet;
134 // length of data collected so far
135 u32 data_len;
136 // trace operation counter
137 u32 trace_op_counter;
Klement Sekera4c533132018-02-22 11:41:12 +0100138 // next index - used by non-feature node
Klement Sekera21aa8f12019-05-20 12:27:33 +0200139 u32 next_index;
Klement Sekerae8498652019-06-17 12:23:15 +0000140 // error next index - used by custom apps (~0 if not used)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200141 u32 error_next_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100142 // minimum fragment length for this reassembly - used to estimate MTU
143 u16 min_fragment_length;
Klement Sekera3a343d42019-05-16 14:35:46 +0200144 // number of fragments in this reassembly
145 u32 fragments_n;
Klement Sekera630ab582019-07-19 09:14:19 +0000146 // thread owning memory for this context (whose pool contains this ctx)
147 u32 memory_owner_thread_index;
148 // thread which received fragment with offset 0 and which sends out the
149 // completed reassembly
150 u32 sendout_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +0000151} ip4_full_reass_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200152
153typedef struct
154{
Klement Sekera896c8962019-06-24 11:52:49 +0000155 ip4_full_reass_t *pool;
Klement Sekera4c533132018-02-22 11:41:12 +0100156 u32 reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +0100157 u32 id_counter;
158 clib_spinlock_t lock;
Klement Sekera896c8962019-06-24 11:52:49 +0000159} ip4_full_reass_per_thread_t;
Klement Sekera4c533132018-02-22 11:41:12 +0100160
161typedef struct
162{
Klement Sekera75e7d132017-09-20 08:26:30 +0200163 // IPv4 config
164 u32 timeout_ms;
165 f64 timeout;
166 u32 expire_walk_interval_ms;
Klement Sekera3a343d42019-05-16 14:35:46 +0200167 // maximum number of fragments in one reassembly
168 u32 max_reass_len;
169 // maximum number of reassemblies
Klement Sekera75e7d132017-09-20 08:26:30 +0200170 u32 max_reass_n;
171
172 // IPv4 runtime
Klement Sekera8dcfed52018-06-28 11:16:15 +0200173 clib_bihash_16_8_t hash;
Klement Sekera4c533132018-02-22 11:41:12 +0100174 // per-thread data
Klement Sekera896c8962019-06-24 11:52:49 +0000175 ip4_full_reass_per_thread_t *per_thread_data;
Klement Sekera75e7d132017-09-20 08:26:30 +0200176
177 // convenience
178 vlib_main_t *vlib_main;
Klement Sekera75e7d132017-09-20 08:26:30 +0200179
180 // node index of ip4-drop node
181 u32 ip4_drop_idx;
Klement Sekera896c8962019-06-24 11:52:49 +0000182 u32 ip4_full_reass_expire_node_idx;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800183
184 /** Worker handoff */
185 u32 fq_index;
186 u32 fq_feature_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200187
Klement Sekera7b2e9fb2019-10-01 13:00:22 +0000188 // reference count for enabling/disabling feature - per interface
189 u32 *feature_use_refcount_per_intf;
Klement Sekera896c8962019-06-24 11:52:49 +0000190} ip4_full_reass_main_t;
191
192extern ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700193
194#ifndef CLIB_MARCH_VARIANT
Klement Sekera896c8962019-06-24 11:52:49 +0000195ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700196#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +0200197
198typedef enum
199{
Klement Sekera896c8962019-06-24 11:52:49 +0000200 IP4_FULL_REASS_NEXT_INPUT,
201 IP4_FULL_REASS_NEXT_DROP,
202 IP4_FULL_REASS_NEXT_HANDOFF,
203 IP4_FULL_REASS_N_NEXT,
204} ip4_full_reass_next_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200205
206typedef enum
207{
208 RANGE_NEW,
209 RANGE_SHRINK,
210 RANGE_DISCARD,
211 RANGE_OVERLAP,
212 FINALIZE,
Klement Sekera630ab582019-07-19 09:14:19 +0000213 HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +0000214} ip4_full_reass_trace_operation_e;
Klement Sekera75e7d132017-09-20 08:26:30 +0200215
216typedef struct
217{
218 u16 range_first;
219 u16 range_last;
220 u32 range_bi;
221 i32 data_offset;
222 u32 data_len;
223 u32 first_bi;
Klement Sekera896c8962019-06-24 11:52:49 +0000224} ip4_full_reass_range_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200225
226typedef struct
227{
Klement Sekera896c8962019-06-24 11:52:49 +0000228 ip4_full_reass_trace_operation_e action;
Klement Sekera75e7d132017-09-20 08:26:30 +0200229 u32 reass_id;
Klement Sekera896c8962019-06-24 11:52:49 +0000230 ip4_full_reass_range_trace_t trace_range;
Klement Sekera75e7d132017-09-20 08:26:30 +0200231 u32 size_diff;
232 u32 op_id;
Klement Sekera630ab582019-07-19 09:14:19 +0000233 u32 thread_id;
234 u32 thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200235 u32 fragment_first;
236 u32 fragment_last;
237 u32 total_data_len;
Klement Sekera8563cb32019-10-10 17:03:57 +0000238 bool is_after_handoff;
239 ip4_header_t ip4_header;
Klement Sekera896c8962019-06-24 11:52:49 +0000240} ip4_full_reass_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200241
Klement Sekera896c8962019-06-24 11:52:49 +0000242extern vlib_node_registration_t ip4_full_reass_node;
243extern vlib_node_registration_t ip4_full_reass_node_feature;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700244
Klement Sekera4c533132018-02-22 11:41:12 +0100245static void
Klement Sekera896c8962019-06-24 11:52:49 +0000246ip4_full_reass_trace_details (vlib_main_t * vm, u32 bi,
247 ip4_full_reass_range_trace_t * trace)
Klement Sekera75e7d132017-09-20 08:26:30 +0200248{
249 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
250 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
251 trace->range_first = vnb->ip.reass.range_first;
252 trace->range_last = vnb->ip.reass.range_last;
Klement Sekera896c8962019-06-24 11:52:49 +0000253 trace->data_offset = ip4_full_reass_buffer_get_data_offset (b);
254 trace->data_len = ip4_full_reass_buffer_get_data_len (b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200255 trace->range_bi = bi;
256}
257
Klement Sekera4c533132018-02-22 11:41:12 +0100258static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000259format_ip4_full_reass_range_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200260{
Klement Sekera896c8962019-06-24 11:52:49 +0000261 ip4_full_reass_range_trace_t *trace =
262 va_arg (*args, ip4_full_reass_range_trace_t *);
263 s =
264 format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
265 trace->range_last, trace->data_offset, trace->data_len,
266 trace->range_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200267 return s;
268}
269
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700270static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000271format_ip4_full_reass_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200272{
273 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
274 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +0000275 ip4_full_reass_trace_t *t = va_arg (*args, ip4_full_reass_trace_t *);
Klement Sekera630ab582019-07-19 09:14:19 +0000276 u32 indent = 0;
277 if (~0 != t->reass_id)
278 {
Klement Sekera8563cb32019-10-10 17:03:57 +0000279 if (t->is_after_handoff)
280 {
281 s =
282 format (s, "%U\n", format_ip4_header, &t->ip4_header,
283 sizeof (t->ip4_header));
284 indent = 2;
285 }
286 s =
287 format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
288 t->reass_id, t->op_id);
Klement Sekera630ab582019-07-19 09:14:19 +0000289 indent = format_get_indent (s);
290 s =
291 format (s,
292 "first bi: %u, data len: %u, ip/fragment[%u, %u]",
293 t->trace_range.first_bi, t->total_data_len, t->fragment_first,
294 t->fragment_last);
295 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200296 switch (t->action)
297 {
298 case RANGE_SHRINK:
299 s = format (s, "\n%Ushrink %U by %u", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000300 format_ip4_full_reass_range_trace, &t->trace_range,
Klement Sekera75e7d132017-09-20 08:26:30 +0200301 t->size_diff);
302 break;
303 case RANGE_DISCARD:
304 s = format (s, "\n%Udiscard %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000305 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200306 break;
307 case RANGE_NEW:
308 s = format (s, "\n%Unew %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000309 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200310 break;
311 case RANGE_OVERLAP:
312 s = format (s, "\n%Uoverlapping/ignored %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000313 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200314 break;
315 case FINALIZE:
316 s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
317 break;
Klement Sekera630ab582019-07-19 09:14:19 +0000318 case HANDOFF:
319 s =
320 format (s, "handoff from thread #%u to thread #%u", t->thread_id,
321 t->thread_id_to);
322 break;
Klement Sekera75e7d132017-09-20 08:26:30 +0200323 }
324 return s;
325}
326
Klement Sekera4c533132018-02-22 11:41:12 +0100327static void
Klement Sekera896c8962019-06-24 11:52:49 +0000328ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
329 ip4_full_reass_main_t * rm,
330 ip4_full_reass_t * reass, u32 bi,
331 ip4_full_reass_trace_operation_e action,
332 u32 size_diff, u32 thread_id_to)
Klement Sekera75e7d132017-09-20 08:26:30 +0200333{
334 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
335 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera8563cb32019-10-10 17:03:57 +0000336 bool is_after_handoff = false;
337 if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
338 {
339 is_after_handoff = true;
340 }
Klement Sekera896c8962019-06-24 11:52:49 +0000341 ip4_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
Klement Sekera8563cb32019-10-10 17:03:57 +0000342 t->is_after_handoff = is_after_handoff;
343 if (t->is_after_handoff)
344 {
345 clib_memcpy (&t->ip4_header, vlib_buffer_get_current (b),
346 clib_min (sizeof (t->ip4_header), b->current_length));
347 }
Klement Sekera896c8962019-06-24 11:52:49 +0000348 if (reass)
349 {
350 t->reass_id = reass->id;
351 t->op_id = reass->trace_op_counter;
352 t->trace_range.first_bi = reass->first_bi;
353 t->total_data_len = reass->data_len;
354 ++reass->trace_op_counter;
355 }
356 else
357 {
358 t->reass_id = ~0;
359 t->op_id = 0;
360 t->trace_range.first_bi = 0;
361 t->total_data_len = 0;
362 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200363 t->action = action;
Klement Sekera896c8962019-06-24 11:52:49 +0000364 ip4_full_reass_trace_details (vm, bi, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200365 t->size_diff = size_diff;
Klement Sekera630ab582019-07-19 09:14:19 +0000366 t->thread_id = vm->thread_index;
367 t->thread_id_to = thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200368 t->fragment_first = vnb->ip.reass.fragment_first;
369 t->fragment_last = vnb->ip.reass.fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200370#if 0
371 static u8 *s = NULL;
Klement Sekera896c8962019-06-24 11:52:49 +0000372 s = format (s, "%U", format_ip4_full_reass_trace, NULL, NULL, t);
Klement Sekera75e7d132017-09-20 08:26:30 +0200373 printf ("%.*s\n", vec_len (s), s);
374 fflush (stdout);
375 vec_reset_length (s);
376#endif
377}
378
Klement Sekera630ab582019-07-19 09:14:19 +0000379always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000380ip4_full_reass_free_ctx (ip4_full_reass_per_thread_t * rt,
381 ip4_full_reass_t * reass)
Klement Sekera630ab582019-07-19 09:14:19 +0000382{
383 pool_put (rt->pool, reass);
384 --rt->reass_n;
385}
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800386
Klement Sekera4c533132018-02-22 11:41:12 +0100387always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000388ip4_full_reass_free (ip4_full_reass_main_t * rm,
389 ip4_full_reass_per_thread_t * rt,
390 ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200391{
Klement Sekera8dcfed52018-06-28 11:16:15 +0200392 clib_bihash_kv_16_8_t kv;
Klement Sekera75e7d132017-09-20 08:26:30 +0200393 kv.key[0] = reass->key.as_u64[0];
394 kv.key[1] = reass->key.as_u64[1];
Klement Sekera8dcfed52018-06-28 11:16:15 +0200395 clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
Klement Sekera896c8962019-06-24 11:52:49 +0000396 return ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200397}
398
Klement Sekera4c533132018-02-22 11:41:12 +0100399always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000400ip4_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
401 ip4_full_reass_main_t * rm, ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200402{
403 u32 range_bi = reass->first_bi;
404 vlib_buffer_t *range_b;
405 vnet_buffer_opaque_t *range_vnb;
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100406 u32 *to_free = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +0200407 while (~0 != range_bi)
408 {
409 range_b = vlib_get_buffer (vm, range_bi);
410 range_vnb = vnet_buffer (range_b);
411 u32 bi = range_bi;
412 while (~0 != bi)
413 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100414 vec_add1 (to_free, bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200415 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
416 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
417 {
418 bi = b->next_buffer;
419 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
420 }
421 else
422 {
423 bi = ~0;
424 }
425 }
426 range_bi = range_vnb->ip.reass.next_range_bi;
427 }
Klement Sekera21aa8f12019-05-20 12:27:33 +0200428 /* send to next_error_index */
Klement Sekerae8498652019-06-17 12:23:15 +0000429 if (~0 != reass->error_next_index)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200430 {
431 u32 n_left_to_next, *to_next, next_index;
432
433 next_index = reass->error_next_index;
434 u32 bi = ~0;
435
436 while (vec_len (to_free) > 0)
437 {
438 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
439
440 while (vec_len (to_free) > 0 && n_left_to_next > 0)
441 {
442 bi = vec_pop (to_free);
443
444 if (~0 != bi)
445 {
446 to_next[0] = bi;
447 to_next += 1;
448 n_left_to_next -= 1;
Klement Sekera21aa8f12019-05-20 12:27:33 +0200449 }
450 }
451 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
452 }
453 }
454 else
455 {
456 vlib_buffer_free (vm, to_free, vec_len (to_free));
457 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200458}
459
Klement Sekera896c8962019-06-24 11:52:49 +0000460always_inline void
461ip4_full_reass_init (ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200462{
Klement Sekera896c8962019-06-24 11:52:49 +0000463 reass->first_bi = ~0;
464 reass->last_packet_octet = ~0;
465 reass->data_len = 0;
466 reass->next_index = ~0;
467 reass->error_next_index = ~0;
468}
469
470always_inline ip4_full_reass_t *
471ip4_full_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
472 ip4_full_reass_main_t * rm,
473 ip4_full_reass_per_thread_t * rt,
474 ip4_full_reass_kv_t * kv, u8 * do_handoff)
475{
476 ip4_full_reass_t *reass;
Klement Sekera630ab582019-07-19 09:14:19 +0000477 f64 now;
Klement Sekera75e7d132017-09-20 08:26:30 +0200478
Klement Sekera630ab582019-07-19 09:14:19 +0000479again:
480
481 reass = NULL;
482 now = vlib_time_now (vm);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800483 if (!clib_bihash_search_16_8
484 (&rm->hash, (clib_bihash_kv_16_8_t *) kv, (clib_bihash_kv_16_8_t *) kv))
Klement Sekera75e7d132017-09-20 08:26:30 +0200485 {
Klement Sekera630ab582019-07-19 09:14:19 +0000486 reass =
487 pool_elt_at_index (rm->per_thread_data
488 [kv->v.memory_owner_thread_index].pool,
489 kv->v.reass_index);
490 if (vm->thread_index != reass->memory_owner_thread_index)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800491 {
492 *do_handoff = 1;
Klement Sekera630ab582019-07-19 09:14:19 +0000493 return reass;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800494 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800495
Klement Sekera75e7d132017-09-20 08:26:30 +0200496 if (now > reass->last_heard + rm->timeout)
497 {
Klement Sekera896c8962019-06-24 11:52:49 +0000498 ip4_full_reass_drop_all (vm, node, rm, reass);
499 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200500 reass = NULL;
501 }
502 }
503
504 if (reass)
505 {
506 reass->last_heard = now;
507 return reass;
508 }
509
Klement Sekera4c533132018-02-22 11:41:12 +0100510 if (rt->reass_n >= rm->max_reass_n)
Klement Sekera75e7d132017-09-20 08:26:30 +0200511 {
512 reass = NULL;
513 return reass;
514 }
515 else
516 {
Klement Sekera4c533132018-02-22 11:41:12 +0100517 pool_get (rt->pool, reass);
Dave Barachb7b92992018-10-17 10:38:51 -0400518 clib_memset (reass, 0, sizeof (*reass));
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800519 reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
Klement Sekera630ab582019-07-19 09:14:19 +0000520 reass->memory_owner_thread_index = vm->thread_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100521 ++rt->id_counter;
Klement Sekera896c8962019-06-24 11:52:49 +0000522 ip4_full_reass_init (reass);
Klement Sekera4c533132018-02-22 11:41:12 +0100523 ++rt->reass_n;
Klement Sekera75e7d132017-09-20 08:26:30 +0200524 }
525
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800526 reass->key.as_u64[0] = ((clib_bihash_kv_16_8_t *) kv)->key[0];
527 reass->key.as_u64[1] = ((clib_bihash_kv_16_8_t *) kv)->key[1];
528 kv->v.reass_index = (reass - rt->pool);
Klement Sekera630ab582019-07-19 09:14:19 +0000529 kv->v.memory_owner_thread_index = vm->thread_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200530 reass->last_heard = now;
531
Klement Sekera630ab582019-07-19 09:14:19 +0000532 int rv =
533 clib_bihash_add_del_16_8 (&rm->hash, (clib_bihash_kv_16_8_t *) kv, 2);
534 if (rv)
Klement Sekera75e7d132017-09-20 08:26:30 +0200535 {
Klement Sekera896c8962019-06-24 11:52:49 +0000536 ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200537 reass = NULL;
Klement Sekera630ab582019-07-19 09:14:19 +0000538 // if other worker created a context already work with the other copy
539 if (-2 == rv)
540 goto again;
Klement Sekera75e7d132017-09-20 08:26:30 +0200541 }
542
543 return reass;
544}
545
Klement Sekera896c8962019-06-24 11:52:49 +0000546always_inline ip4_full_reass_rc_t
547ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
548 ip4_full_reass_main_t * rm,
549 ip4_full_reass_per_thread_t * rt,
550 ip4_full_reass_t * reass, u32 * bi0,
551 u32 * next0, u32 * error0, bool is_custom_app)
Klement Sekera75e7d132017-09-20 08:26:30 +0200552{
Klement Sekera75e7d132017-09-20 08:26:30 +0200553 vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
554 vlib_buffer_t *last_b = NULL;
555 u32 sub_chain_bi = reass->first_bi;
556 u32 total_length = 0;
557 u32 buf_cnt = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200558 do
559 {
560 u32 tmp_bi = sub_chain_bi;
561 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
562 ip4_header_t *ip = vlib_buffer_get_current (tmp);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100563 vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
564 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
565 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
566 {
567 return IP4_REASS_RC_INTERNAL_ERROR;
568 }
569
Klement Sekera896c8962019-06-24 11:52:49 +0000570 u32 data_len = ip4_full_reass_buffer_get_data_len (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200571 u32 trim_front =
Klement Sekera896c8962019-06-24 11:52:49 +0000572 ip4_header_bytes (ip) + ip4_full_reass_buffer_get_data_offset (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200573 u32 trim_end =
574 vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
575 if (tmp_bi == reass->first_bi)
576 {
577 /* first buffer - keep ip4 header */
Klement Sekera896c8962019-06-24 11:52:49 +0000578 if (0 != ip4_full_reass_buffer_get_data_offset (tmp))
Klement Sekerad0f70a32018-12-14 17:24:13 +0100579 {
580 return IP4_REASS_RC_INTERNAL_ERROR;
581 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200582 trim_front = 0;
583 trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
584 ip4_header_bytes (ip);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100585 if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
586 {
587 return IP4_REASS_RC_INTERNAL_ERROR;
588 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200589 }
590 u32 keep_data =
591 vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
592 while (1)
593 {
594 ++buf_cnt;
595 if (trim_front)
596 {
597 if (trim_front > tmp->current_length)
598 {
599 /* drop whole buffer */
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200600 u32 to_be_freed_bi = tmp_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200601 trim_front -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100602 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
603 {
604 return IP4_REASS_RC_INTERNAL_ERROR;
605 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200606 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
607 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700608 tmp->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200609 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200610 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200611 continue;
612 }
613 else
614 {
615 vlib_buffer_advance (tmp, trim_front);
616 trim_front = 0;
617 }
618 }
619 if (keep_data)
620 {
621 if (last_b)
622 {
623 last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
624 last_b->next_buffer = tmp_bi;
625 }
626 last_b = tmp;
627 if (keep_data <= tmp->current_length)
628 {
629 tmp->current_length = keep_data;
630 keep_data = 0;
631 }
632 else
633 {
634 keep_data -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100635 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
636 {
637 return IP4_REASS_RC_INTERNAL_ERROR;
638 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200639 }
640 total_length += tmp->current_length;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200641 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
642 {
643 tmp_bi = tmp->next_buffer;
644 tmp = vlib_get_buffer (vm, tmp->next_buffer);
645 }
646 else
647 {
648 break;
649 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200650 }
651 else
652 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200653 u32 to_be_freed_bi = tmp_bi;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100654 if (reass->first_bi == tmp_bi)
655 {
656 return IP4_REASS_RC_INTERNAL_ERROR;
657 }
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200658 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
659 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700660 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200661 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700662 tmp->next_buffer = 0;
663 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200664 vlib_buffer_free_one (vm, to_be_freed_bi);
665 }
666 else
667 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700668 tmp->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200669 vlib_buffer_free_one (vm, to_be_freed_bi);
670 break;
671 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200672 }
673 }
674 sub_chain_bi =
675 vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
676 reass.next_range_bi;
677 }
678 while (~0 != sub_chain_bi);
Chris Luke30684ac2018-03-29 12:56:58 -0700679
Klement Sekerad0f70a32018-12-14 17:24:13 +0100680 if (!last_b)
681 {
682 return IP4_REASS_RC_INTERNAL_ERROR;
683 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200684 last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700685
Klement Sekerad0f70a32018-12-14 17:24:13 +0100686 if (total_length < first_b->current_length)
687 {
688 return IP4_REASS_RC_INTERNAL_ERROR;
689 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200690 total_length -= first_b->current_length;
691 first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
692 first_b->total_length_not_including_first_buffer = total_length;
693 ip4_header_t *ip = vlib_buffer_get_current (first_b);
694 ip->flags_and_fragment_offset = 0;
695 ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
696 ip->checksum = ip4_header_checksum (ip);
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100697 if (!vlib_buffer_chain_linearize (vm, first_b))
698 {
699 return IP4_REASS_RC_NO_BUF;
700 }
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700701 // reset to reconstruct the mbuf linking
702 first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Klement Sekera75e7d132017-09-20 08:26:30 +0200703 if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
704 {
Klement Sekera896c8962019-06-24 11:52:49 +0000705 ip4_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
706 FINALIZE, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200707#if 0
708 // following code does a hexdump of packet fragments to stdout ...
709 do
710 {
711 u32 bi = reass->first_bi;
712 u8 *s = NULL;
713 while (~0 != bi)
714 {
715 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
716 s = format (s, "%u: %U\n", bi, format_hexdump,
717 vlib_buffer_get_current (b), b->current_length);
718 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
719 {
720 bi = b->next_buffer;
721 }
722 else
723 {
724 break;
725 }
726 }
727 printf ("%.*s\n", vec_len (s), s);
728 fflush (stdout);
729 vec_free (s);
730 }
731 while (0);
732#endif
733 }
734 *bi0 = reass->first_bi;
Klement Sekerae8498652019-06-17 12:23:15 +0000735 if (!is_custom_app)
Klement Sekera4c533132018-02-22 11:41:12 +0100736 {
Klement Sekera896c8962019-06-24 11:52:49 +0000737 *next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +0100738 }
739 else
740 {
741 *next0 = reass->next_index;
742 }
743 vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
Klement Sekera75e7d132017-09-20 08:26:30 +0200744 *error0 = IP4_ERROR_NONE;
Klement Sekera896c8962019-06-24 11:52:49 +0000745 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200746 reass = NULL;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100747 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200748}
749
Klement Sekera896c8962019-06-24 11:52:49 +0000750always_inline ip4_full_reass_rc_t
751ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
752 ip4_full_reass_main_t * rm,
753 ip4_full_reass_per_thread_t * rt,
754 ip4_full_reass_t * reass,
755 u32 prev_range_bi, u32 new_next_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200756{
Klement Sekera75e7d132017-09-20 08:26:30 +0200757 vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
758 vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
759 if (~0 != prev_range_bi)
760 {
761 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
762 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
763 new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
764 prev_vnb->ip.reass.next_range_bi = new_next_bi;
765 }
766 else
767 {
768 if (~0 != reass->first_bi)
769 {
770 new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
771 }
772 reass->first_bi = new_next_bi;
773 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100774 vnet_buffer_opaque_t *vnb = vnet_buffer (new_next_b);
775 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
776 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
777 {
778 return IP4_REASS_RC_INTERNAL_ERROR;
779 }
Klement Sekera896c8962019-06-24 11:52:49 +0000780 reass->data_len += ip4_full_reass_buffer_get_data_len (new_next_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100781 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200782}
783
Klement Sekera896c8962019-06-24 11:52:49 +0000784always_inline ip4_full_reass_rc_t
785ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
786 vlib_node_runtime_t * node,
787 ip4_full_reass_main_t * rm,
788 ip4_full_reass_t * reass,
789 u32 prev_range_bi, u32 discard_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200790{
791 vlib_buffer_t *discard_b = vlib_get_buffer (vm, discard_bi);
792 vnet_buffer_opaque_t *discard_vnb = vnet_buffer (discard_b);
793 if (~0 != prev_range_bi)
794 {
795 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
796 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100797 if (!(prev_vnb->ip.reass.next_range_bi == discard_bi))
798 {
799 return IP4_REASS_RC_INTERNAL_ERROR;
800 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200801 prev_vnb->ip.reass.next_range_bi = discard_vnb->ip.reass.next_range_bi;
802 }
803 else
804 {
805 reass->first_bi = discard_vnb->ip.reass.next_range_bi;
806 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100807 vnet_buffer_opaque_t *vnb = vnet_buffer (discard_b);
808 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
809 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
810 {
811 return IP4_REASS_RC_INTERNAL_ERROR;
812 }
Klement Sekera896c8962019-06-24 11:52:49 +0000813 reass->data_len -= ip4_full_reass_buffer_get_data_len (discard_b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200814 while (1)
815 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200816 u32 to_be_freed_bi = discard_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200817 if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
818 {
Klement Sekera896c8962019-06-24 11:52:49 +0000819 ip4_full_reass_add_trace (vm, node, rm, reass, discard_bi,
820 RANGE_DISCARD, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200821 }
822 if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
823 {
824 discard_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
825 discard_bi = discard_b->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700826 discard_b->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200827 discard_b = vlib_get_buffer (vm, discard_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200828 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200829 }
830 else
831 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700832 discard_b->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200833 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200834 break;
835 }
836 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100837 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200838}
839
Klement Sekera896c8962019-06-24 11:52:49 +0000840always_inline ip4_full_reass_rc_t
841ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
842 ip4_full_reass_main_t * rm,
843 ip4_full_reass_per_thread_t * rt,
844 ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
845 u32 * error0, bool is_custom_app,
846 u32 * handoff_thread_idx)
Klement Sekera75e7d132017-09-20 08:26:30 +0200847{
Klement Sekera75e7d132017-09-20 08:26:30 +0200848 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200849 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
Klement Sekerae8498652019-06-17 12:23:15 +0000850 if (is_custom_app)
851 {
852 // store (error_)next_index before it's overwritten
853 reass->next_index = fvnb->ip.reass.next_index;
854 reass->error_next_index = fvnb->ip.reass.error_next_index;
855 }
Klement Sekera896c8962019-06-24 11:52:49 +0000856 ip4_full_reass_rc_t rc = IP4_REASS_RC_OK;
857 int consumed = 0;
858 ip4_header_t *fip = vlib_buffer_get_current (fb);
Klement Sekera14d7e902018-12-10 13:46:09 +0100859 const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
860 const u32 fragment_length =
Klement Sekera75e7d132017-09-20 08:26:30 +0200861 clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
Klement Sekera14d7e902018-12-10 13:46:09 +0100862 const u32 fragment_last = fragment_first + fragment_length - 1;
863 fvnb->ip.reass.fragment_first = fragment_first;
864 fvnb->ip.reass.fragment_last = fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200865 int more_fragments = ip4_get_fragment_more (fip);
866 u32 candidate_range_bi = reass->first_bi;
867 u32 prev_range_bi = ~0;
868 fvnb->ip.reass.range_first = fragment_first;
869 fvnb->ip.reass.range_last = fragment_last;
870 fvnb->ip.reass.next_range_bi = ~0;
871 if (!more_fragments)
872 {
873 reass->last_packet_octet = fragment_last;
874 }
875 if (~0 == reass->first_bi)
876 {
877 // starting a new reassembly
Klement Sekerad0f70a32018-12-14 17:24:13 +0100878 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000879 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
880 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100881 if (IP4_REASS_RC_OK != rc)
882 {
883 return rc;
884 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200885 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
886 {
Klement Sekera896c8962019-06-24 11:52:49 +0000887 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
888 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200889 }
890 *bi0 = ~0;
Klement Sekera4c533132018-02-22 11:41:12 +0100891 reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
Klement Sekera3a343d42019-05-16 14:35:46 +0200892 reass->fragments_n = 1;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100893 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200894 }
Klement Sekera896c8962019-06-24 11:52:49 +0000895 reass->min_fragment_length =
896 clib_min (clib_net_to_host_u16 (fip->length),
897 fvnb->ip.reass.estimated_mtu);
Klement Sekera75e7d132017-09-20 08:26:30 +0200898 while (~0 != candidate_range_bi)
899 {
900 vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
901 vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
902 if (fragment_first > candidate_vnb->ip.reass.range_last)
903 {
904 // this fragments starts after candidate range
905 prev_range_bi = candidate_range_bi;
906 candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
907 if (candidate_vnb->ip.reass.range_last < fragment_last &&
908 ~0 == candidate_range_bi)
909 {
910 // special case - this fragment falls beyond all known ranges
Klement Sekerad0f70a32018-12-14 17:24:13 +0100911 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000912 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
913 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100914 if (IP4_REASS_RC_OK != rc)
915 {
916 return rc;
917 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200918 consumed = 1;
919 break;
920 }
921 continue;
922 }
923 if (fragment_last < candidate_vnb->ip.reass.range_first)
924 {
925 // this fragment ends before candidate range without any overlap
Klement Sekerad0f70a32018-12-14 17:24:13 +0100926 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000927 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
928 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100929 if (IP4_REASS_RC_OK != rc)
930 {
931 return rc;
932 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200933 consumed = 1;
934 }
935 else
936 {
937 if (fragment_first >= candidate_vnb->ip.reass.range_first &&
938 fragment_last <= candidate_vnb->ip.reass.range_last)
939 {
940 // this fragment is a (sub)part of existing range, ignore it
941 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
942 {
Klement Sekera896c8962019-06-24 11:52:49 +0000943 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0,
944 RANGE_OVERLAP, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200945 }
946 break;
947 }
948 int discard_candidate = 0;
949 if (fragment_first < candidate_vnb->ip.reass.range_first)
950 {
951 u32 overlap =
952 fragment_last - candidate_vnb->ip.reass.range_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +0000953 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +0200954 {
955 candidate_vnb->ip.reass.range_first += overlap;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100956 if (reass->data_len < overlap)
957 {
958 return IP4_REASS_RC_INTERNAL_ERROR;
959 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200960 reass->data_len -= overlap;
961 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
962 {
Klement Sekera896c8962019-06-24 11:52:49 +0000963 ip4_full_reass_add_trace (vm, node, rm, reass,
964 candidate_range_bi,
965 RANGE_SHRINK, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200966 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100967 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000968 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
969 prev_range_bi,
970 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100971 if (IP4_REASS_RC_OK != rc)
972 {
973 return rc;
974 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200975 consumed = 1;
976 }
977 else
978 {
979 discard_candidate = 1;
980 }
981 }
982 else if (fragment_last > candidate_vnb->ip.reass.range_last)
983 {
984 u32 overlap =
985 candidate_vnb->ip.reass.range_last - fragment_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +0000986 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +0200987 {
988 fvnb->ip.reass.range_first += overlap;
989 if (~0 != candidate_vnb->ip.reass.next_range_bi)
990 {
991 prev_range_bi = candidate_range_bi;
992 candidate_range_bi =
993 candidate_vnb->ip.reass.next_range_bi;
994 continue;
995 }
996 else
997 {
998 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +0100999 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001000 ip4_full_reass_insert_range_in_chain (vm, rm, rt,
1001 reass,
1002 candidate_range_bi,
1003 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001004 if (IP4_REASS_RC_OK != rc)
1005 {
1006 return rc;
1007 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001008 consumed = 1;
1009 }
1010 }
1011 else
1012 {
1013 discard_candidate = 1;
1014 }
1015 }
1016 else
1017 {
1018 discard_candidate = 1;
1019 }
1020 if (discard_candidate)
1021 {
1022 u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
1023 // discard candidate range, probe next range
Klement Sekerad0f70a32018-12-14 17:24:13 +01001024 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001025 ip4_full_reass_remove_range_from_chain (vm, node, rm, reass,
1026 prev_range_bi,
1027 candidate_range_bi);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001028 if (IP4_REASS_RC_OK != rc)
1029 {
1030 return rc;
1031 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001032 if (~0 != next_range_bi)
1033 {
1034 candidate_range_bi = next_range_bi;
1035 continue;
1036 }
1037 else
1038 {
1039 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +01001040 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001041 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
1042 prev_range_bi,
1043 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001044 if (IP4_REASS_RC_OK != rc)
1045 {
1046 return rc;
1047 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001048 consumed = 1;
1049 }
1050 }
1051 }
1052 break;
1053 }
Klement Sekera3a343d42019-05-16 14:35:46 +02001054 ++reass->fragments_n;
Klement Sekera75e7d132017-09-20 08:26:30 +02001055 if (consumed)
1056 {
1057 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1058 {
Klement Sekera896c8962019-06-24 11:52:49 +00001059 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
1060 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001061 }
1062 }
1063 if (~0 != reass->last_packet_octet &&
1064 reass->data_len == reass->last_packet_octet + 1)
1065 {
Klement Sekera630ab582019-07-19 09:14:19 +00001066 *handoff_thread_idx = reass->sendout_thread_index;
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001067 int handoff =
1068 reass->memory_owner_thread_index != reass->sendout_thread_index;
Klement Sekera630ab582019-07-19 09:14:19 +00001069 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001070 ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
1071 is_custom_app);
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001072 if (IP4_REASS_RC_OK == rc && handoff)
Klement Sekera630ab582019-07-19 09:14:19 +00001073 {
1074 rc = IP4_REASS_RC_HANDOFF;
1075 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001076 }
1077 else
1078 {
1079 if (consumed)
1080 {
1081 *bi0 = ~0;
Klement Sekera3a343d42019-05-16 14:35:46 +02001082 if (reass->fragments_n > rm->max_reass_len)
1083 {
1084 rc = IP4_REASS_RC_TOO_MANY_FRAGMENTS;
1085 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001086 }
1087 else
1088 {
Klement Sekera896c8962019-06-24 11:52:49 +00001089 *next0 = IP4_FULL_REASS_NEXT_DROP;
Klement Sekera75e7d132017-09-20 08:26:30 +02001090 *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
1091 }
1092 }
Klement Sekerad0f70a32018-12-14 17:24:13 +01001093 return rc;
Klement Sekera75e7d132017-09-20 08:26:30 +02001094}
1095
1096always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001097ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekerae8498652019-06-17 12:23:15 +00001098 vlib_frame_t * frame, bool is_feature,
1099 bool is_custom_app)
Klement Sekera75e7d132017-09-20 08:26:30 +02001100{
1101 u32 *from = vlib_frame_vector_args (frame);
1102 u32 n_left_from, n_left_to_next, *to_next, next_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001103 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1104 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001105 clib_spinlock_lock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001106
1107 n_left_from = frame->n_vectors;
1108 next_index = node->cached_next_index;
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001109 while (n_left_from > 0)
Klement Sekera75e7d132017-09-20 08:26:30 +02001110 {
1111 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1112
Klement Sekera75e7d132017-09-20 08:26:30 +02001113 while (n_left_from > 0 && n_left_to_next > 0)
1114 {
1115 u32 bi0;
1116 vlib_buffer_t *b0;
Klement Sekera4c533132018-02-22 11:41:12 +01001117 u32 next0;
1118 u32 error0 = IP4_ERROR_NONE;
Klement Sekera75e7d132017-09-20 08:26:30 +02001119
1120 bi0 = from[0];
1121 b0 = vlib_get_buffer (vm, bi0);
1122
1123 ip4_header_t *ip0 = vlib_buffer_get_current (b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001124 if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
Klement Sekera75e7d132017-09-20 08:26:30 +02001125 {
Klement Sekera4c533132018-02-22 11:41:12 +01001126 // this is a whole packet - no fragmentation
Klement Sekerae8498652019-06-17 12:23:15 +00001127 if (!is_custom_app)
Klement Sekera4c533132018-02-22 11:41:12 +01001128 {
Klement Sekera896c8962019-06-24 11:52:49 +00001129 next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +01001130 }
1131 else
1132 {
1133 next0 = vnet_buffer (b0)->ip.reass.next_index;
1134 }
Klement Sekera896c8962019-06-24 11:52:49 +00001135 goto packet_enqueue;
1136 }
1137 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1138 const u32 fragment_length =
1139 clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
1140 const u32 fragment_last = fragment_first + fragment_length - 1;
1141 if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
1142 {
1143 next0 = IP4_FULL_REASS_NEXT_DROP;
1144 error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
1145 goto packet_enqueue;
1146 }
1147 ip4_full_reass_kv_t kv;
1148 u8 do_handoff = 0;
1149
1150 kv.k.as_u64[0] =
1151 (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
1152 vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
1153 (u64) ip0->src_address.as_u32 << 32;
1154 kv.k.as_u64[1] =
1155 (u64) ip0->dst_address.
1156 as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
1157
1158 ip4_full_reass_t *reass =
1159 ip4_full_reass_find_or_create (vm, node, rm, rt, &kv,
1160 &do_handoff);
1161
1162 if (reass)
1163 {
1164 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1165 if (0 == fragment_first)
1166 {
1167 reass->sendout_thread_index = vm->thread_index;
1168 }
1169 }
1170
1171 if (PREDICT_FALSE (do_handoff))
1172 {
1173 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
Klement Sekerade34c352019-06-25 11:19:22 +00001174 vnet_buffer (b0)->ip.reass.owner_thread_index =
1175 kv.v.memory_owner_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001176 }
1177 else if (reass)
1178 {
1179 u32 handoff_thread_idx;
1180 switch (ip4_full_reass_update
1181 (vm, node, rm, rt, reass, &bi0, &next0,
1182 &error0, is_custom_app, &handoff_thread_idx))
1183 {
1184 case IP4_REASS_RC_OK:
1185 /* nothing to do here */
1186 break;
1187 case IP4_REASS_RC_HANDOFF:
1188 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
1189 b0 = vlib_get_buffer (vm, bi0);
Klement Sekerade34c352019-06-25 11:19:22 +00001190 vnet_buffer (b0)->ip.reass.owner_thread_index =
1191 handoff_thread_idx;
Klement Sekera896c8962019-06-24 11:52:49 +00001192 break;
1193 case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
1194 vlib_node_increment_counter (vm, node->node_index,
1195 IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1196 1);
1197 ip4_full_reass_drop_all (vm, node, rm, reass);
1198 ip4_full_reass_free (rm, rt, reass);
1199 goto next_packet;
1200 break;
1201 case IP4_REASS_RC_NO_BUF:
1202 vlib_node_increment_counter (vm, node->node_index,
1203 IP4_ERROR_REASS_NO_BUF, 1);
1204 ip4_full_reass_drop_all (vm, node, rm, reass);
1205 ip4_full_reass_free (rm, rt, reass);
1206 goto next_packet;
1207 break;
1208 case IP4_REASS_RC_INTERNAL_ERROR:
1209 /* drop everything and start with a clean slate */
1210 vlib_node_increment_counter (vm, node->node_index,
1211 IP4_ERROR_REASS_INTERNAL_ERROR,
1212 1);
1213 ip4_full_reass_drop_all (vm, node, rm, reass);
1214 ip4_full_reass_free (rm, rt, reass);
1215 goto next_packet;
1216 break;
1217 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001218 }
1219 else
1220 {
Klement Sekera896c8962019-06-24 11:52:49 +00001221 next0 = IP4_FULL_REASS_NEXT_DROP;
1222 error0 = IP4_ERROR_REASS_LIMIT_REACHED;
Klement Sekera4c533132018-02-22 11:41:12 +01001223 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001224
Klement Sekera896c8962019-06-24 11:52:49 +00001225
1226 packet_enqueue:
Klement Sekera896c8962019-06-24 11:52:49 +00001227
Klement Sekera75e7d132017-09-20 08:26:30 +02001228 if (bi0 != ~0)
1229 {
1230 to_next[0] = bi0;
1231 to_next += 1;
1232 n_left_to_next -= 1;
Benoît Gannecf7803d2019-10-23 13:53:49 +02001233
1234 /* bi0 might have been updated by reass_finalize, reload */
1235 b0 = vlib_get_buffer (vm, bi0);
1236 b0->error = node->errors[error0];
1237
Klement Sekera896c8962019-06-24 11:52:49 +00001238 if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
Klement Sekera630ab582019-07-19 09:14:19 +00001239 {
1240 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1241 {
Klement Sekerade34c352019-06-25 11:19:22 +00001242 ip4_full_reass_add_trace (vm, node, rm, NULL, bi0,
1243 HANDOFF, 0,
1244 vnet_buffer (b0)->ip.
1245 reass.owner_thread_index);
Klement Sekera630ab582019-07-19 09:14:19 +00001246 }
1247 }
1248 else if (is_feature && IP4_ERROR_NONE == error0)
Klement Sekera4c533132018-02-22 11:41:12 +01001249 {
Damjan Marion7d98a122018-07-19 20:42:08 +02001250 vnet_feature_next (&next0, b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001251 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001252 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1253 to_next, n_left_to_next,
1254 bi0, next0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001255 IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
1256 }
1257
Klement Sekerad0f70a32018-12-14 17:24:13 +01001258 next_packet:
Klement Sekera75e7d132017-09-20 08:26:30 +02001259 from += 1;
1260 n_left_from -= 1;
1261 }
1262
1263 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1264 }
1265
Klement Sekera4c533132018-02-22 11:41:12 +01001266 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001267 return frame->n_vectors;
1268}
1269
Klement Sekera896c8962019-06-24 11:52:49 +00001270static char *ip4_full_reass_error_strings[] = {
Klement Sekera75e7d132017-09-20 08:26:30 +02001271#define _(sym, string) string,
1272 foreach_ip4_error
1273#undef _
1274};
1275
Klement Sekera896c8962019-06-24 11:52:49 +00001276VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
1277 vlib_node_runtime_t * node,
1278 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001279{
Klement Sekera896c8962019-06-24 11:52:49 +00001280 return ip4_full_reass_inline (vm, node, frame, false /* is_feature */ ,
Klement Sekerae8498652019-06-17 12:23:15 +00001281 false /* is_custom_app */ );
Klement Sekera4c533132018-02-22 11:41:12 +01001282}
1283
Klement Sekera75e7d132017-09-20 08:26:30 +02001284/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001285VLIB_REGISTER_NODE (ip4_full_reass_node) = {
1286 .name = "ip4-full-reassembly",
Klement Sekera75e7d132017-09-20 08:26:30 +02001287 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001288 .format_trace = format_ip4_full_reass_trace,
1289 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1290 .error_strings = ip4_full_reass_error_strings,
1291 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera75e7d132017-09-20 08:26:30 +02001292 .next_nodes =
1293 {
Klement Sekera896c8962019-06-24 11:52:49 +00001294 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1295 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1296 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001297
Klement Sekera75e7d132017-09-20 08:26:30 +02001298 },
1299};
1300/* *INDENT-ON* */
1301
Klement Sekera896c8962019-06-24 11:52:49 +00001302VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
1303 vlib_node_runtime_t * node,
1304 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001305{
Klement Sekera896c8962019-06-24 11:52:49 +00001306 return ip4_full_reass_inline (vm, node, frame, true /* is_feature */ ,
Klement Sekerae8498652019-06-17 12:23:15 +00001307 false /* is_custom_app */ );
Klement Sekera4c533132018-02-22 11:41:12 +01001308}
1309
1310/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001311VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
1312 .name = "ip4-full-reassembly-feature",
Klement Sekera4c533132018-02-22 11:41:12 +01001313 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001314 .format_trace = format_ip4_full_reass_trace,
1315 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1316 .error_strings = ip4_full_reass_error_strings,
1317 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera4c533132018-02-22 11:41:12 +01001318 .next_nodes =
1319 {
Klement Sekera896c8962019-06-24 11:52:49 +00001320 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1321 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1322 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
Klement Sekera4c533132018-02-22 11:41:12 +01001323 },
1324};
1325/* *INDENT-ON* */
1326
Klement Sekera4c533132018-02-22 11:41:12 +01001327/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001328VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
Klement Sekera4c533132018-02-22 11:41:12 +01001329 .arc_name = "ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001330 .node_name = "ip4-full-reassembly-feature",
Neale Ranns14046982019-07-29 14:49:52 +00001331 .runs_before = VNET_FEATURES ("ip4-lookup",
Neale Ranns2be3eb62019-08-02 01:17:13 -07001332 "ipsec4-input-feature"),
Klement Sekera4c533132018-02-22 11:41:12 +01001333 .runs_after = 0,
1334};
1335/* *INDENT-ON* */
1336
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001337#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001338always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +00001339ip4_full_reass_get_nbuckets ()
Klement Sekera75e7d132017-09-20 08:26:30 +02001340{
Klement Sekera896c8962019-06-24 11:52:49 +00001341 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001342 u32 nbuckets;
1343 u8 i;
1344
1345 nbuckets = (u32) (rm->max_reass_n / IP4_REASS_HT_LOAD_FACTOR);
1346
1347 for (i = 0; i < 31; i++)
1348 if ((1 << i) >= nbuckets)
1349 break;
1350 nbuckets = 1 << i;
1351
1352 return nbuckets;
1353}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001354#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001355
1356typedef enum
1357{
1358 IP4_EVENT_CONFIG_CHANGED = 1,
Klement Sekera896c8962019-06-24 11:52:49 +00001359} ip4_full_reass_event_t;
Klement Sekera75e7d132017-09-20 08:26:30 +02001360
1361typedef struct
1362{
1363 int failure;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001364 clib_bihash_16_8_t *new_hash;
Klement Sekera75e7d132017-09-20 08:26:30 +02001365} ip4_rehash_cb_ctx;
1366
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001367#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001368static void
Klement Sekera8dcfed52018-06-28 11:16:15 +02001369ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
Klement Sekera75e7d132017-09-20 08:26:30 +02001370{
1371 ip4_rehash_cb_ctx *ctx = _ctx;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001372 if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
Klement Sekera75e7d132017-09-20 08:26:30 +02001373 {
1374 ctx->failure = 1;
1375 }
1376}
1377
Klement Sekera4c533132018-02-22 11:41:12 +01001378static void
Klement Sekera896c8962019-06-24 11:52:49 +00001379ip4_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1380 u32 max_reassembly_length,
1381 u32 expire_walk_interval_ms)
Klement Sekera4c533132018-02-22 11:41:12 +01001382{
Klement Sekera896c8962019-06-24 11:52:49 +00001383 ip4_full_reass_main.timeout_ms = timeout_ms;
1384 ip4_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1385 ip4_full_reass_main.max_reass_n = max_reassemblies;
1386 ip4_full_reass_main.max_reass_len = max_reassembly_length;
1387 ip4_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
Klement Sekera4c533132018-02-22 11:41:12 +01001388}
1389
Klement Sekera75e7d132017-09-20 08:26:30 +02001390vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001391ip4_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1392 u32 max_reassembly_length, u32 expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001393{
Klement Sekera896c8962019-06-24 11:52:49 +00001394 u32 old_nbuckets = ip4_full_reass_get_nbuckets ();
1395 ip4_full_reass_set_params (timeout_ms, max_reassemblies,
1396 max_reassembly_length, expire_walk_interval_ms);
1397 vlib_process_signal_event (ip4_full_reass_main.vlib_main,
1398 ip4_full_reass_main.ip4_full_reass_expire_node_idx,
Klement Sekera75e7d132017-09-20 08:26:30 +02001399 IP4_EVENT_CONFIG_CHANGED, 0);
Klement Sekera896c8962019-06-24 11:52:49 +00001400 u32 new_nbuckets = ip4_full_reass_get_nbuckets ();
1401 if (ip4_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
Klement Sekera75e7d132017-09-20 08:26:30 +02001402 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001403 clib_bihash_16_8_t new_hash;
Dave Barachb7b92992018-10-17 10:38:51 -04001404 clib_memset (&new_hash, 0, sizeof (new_hash));
Klement Sekera75e7d132017-09-20 08:26:30 +02001405 ip4_rehash_cb_ctx ctx;
1406 ctx.failure = 0;
1407 ctx.new_hash = &new_hash;
Klement Sekera896c8962019-06-24 11:52:49 +00001408 clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
Klement Sekera75e7d132017-09-20 08:26:30 +02001409 new_nbuckets * 1024);
Klement Sekera896c8962019-06-24 11:52:49 +00001410 clib_bihash_foreach_key_value_pair_16_8 (&ip4_full_reass_main.hash,
Klement Sekera75e7d132017-09-20 08:26:30 +02001411 ip4_rehash_cb, &ctx);
1412 if (ctx.failure)
1413 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001414 clib_bihash_free_16_8 (&new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001415 return -1;
1416 }
1417 else
1418 {
Klement Sekera896c8962019-06-24 11:52:49 +00001419 clib_bihash_free_16_8 (&ip4_full_reass_main.hash);
1420 clib_memcpy_fast (&ip4_full_reass_main.hash, &new_hash,
1421 sizeof (ip4_full_reass_main.hash));
1422 clib_bihash_copied (&ip4_full_reass_main.hash, &new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001423 }
1424 }
1425 return 0;
1426}
1427
1428vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001429ip4_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1430 u32 * max_reassembly_length,
1431 u32 * expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001432{
Klement Sekera896c8962019-06-24 11:52:49 +00001433 *timeout_ms = ip4_full_reass_main.timeout_ms;
1434 *max_reassemblies = ip4_full_reass_main.max_reass_n;
1435 *max_reassembly_length = ip4_full_reass_main.max_reass_len;
1436 *expire_walk_interval_ms = ip4_full_reass_main.expire_walk_interval_ms;
Klement Sekera75e7d132017-09-20 08:26:30 +02001437 return 0;
1438}
1439
Klement Sekera4c533132018-02-22 11:41:12 +01001440static clib_error_t *
Klement Sekera896c8962019-06-24 11:52:49 +00001441ip4_full_reass_init_function (vlib_main_t * vm)
Klement Sekera75e7d132017-09-20 08:26:30 +02001442{
Klement Sekera896c8962019-06-24 11:52:49 +00001443 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001444 clib_error_t *error = 0;
1445 u32 nbuckets;
Dave Barach1403fcd2018-02-05 09:45:43 -05001446 vlib_node_t *node;
Klement Sekera75e7d132017-09-20 08:26:30 +02001447
1448 rm->vlib_main = vm;
Klement Sekera75e7d132017-09-20 08:26:30 +02001449
Juraj Slobodacd806922018-10-10 10:15:54 +02001450 vec_validate (rm->per_thread_data, vlib_num_workers ());
Klement Sekera896c8962019-06-24 11:52:49 +00001451 ip4_full_reass_per_thread_t *rt;
Klement Sekera4c533132018-02-22 11:41:12 +01001452 vec_foreach (rt, rm->per_thread_data)
1453 {
1454 clib_spinlock_init (&rt->lock);
1455 pool_alloc (rt->pool, rm->max_reass_n);
1456 }
Dave Barach1403fcd2018-02-05 09:45:43 -05001457
Klement Sekera896c8962019-06-24 11:52:49 +00001458 node = vlib_get_node_by_name (vm, (u8 *) "ip4-full-reassembly-expire-walk");
Dave Barach1403fcd2018-02-05 09:45:43 -05001459 ASSERT (node);
Klement Sekera896c8962019-06-24 11:52:49 +00001460 rm->ip4_full_reass_expire_node_idx = node->index;
Dave Barach1403fcd2018-02-05 09:45:43 -05001461
Klement Sekera896c8962019-06-24 11:52:49 +00001462 ip4_full_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
1463 IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
1464 IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
1465 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
Klement Sekera3ecc2212018-03-27 10:34:43 +02001466
Klement Sekera896c8962019-06-24 11:52:49 +00001467 nbuckets = ip4_full_reass_get_nbuckets ();
1468 clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
Klement Sekera75e7d132017-09-20 08:26:30 +02001469
Dave Barach1403fcd2018-02-05 09:45:43 -05001470 node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
Klement Sekera75e7d132017-09-20 08:26:30 +02001471 ASSERT (node);
1472 rm->ip4_drop_idx = node->index;
Klement Sekera4c533132018-02-22 11:41:12 +01001473
Klement Sekera896c8962019-06-24 11:52:49 +00001474 rm->fq_index = vlib_frame_queue_main_init (ip4_full_reass_node.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001475 rm->fq_feature_index =
Klement Sekera896c8962019-06-24 11:52:49 +00001476 vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001477
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001478 rm->feature_use_refcount_per_intf = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +02001479 return error;
1480}
1481
Klement Sekera896c8962019-06-24 11:52:49 +00001482VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001483#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001484
1485static uword
Klement Sekera896c8962019-06-24 11:52:49 +00001486ip4_full_reass_walk_expired (vlib_main_t * vm,
1487 vlib_node_runtime_t * node, vlib_frame_t * f)
Klement Sekera75e7d132017-09-20 08:26:30 +02001488{
Klement Sekera896c8962019-06-24 11:52:49 +00001489 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001490 uword event_type, *event_data = 0;
1491
1492 while (true)
1493 {
1494 vlib_process_wait_for_event_or_clock (vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001495 (f64)
1496 rm->expire_walk_interval_ms /
1497 (f64) MSEC_PER_SEC);
Klement Sekera75e7d132017-09-20 08:26:30 +02001498 event_type = vlib_process_get_events (vm, &event_data);
1499
1500 switch (event_type)
1501 {
1502 case ~0: /* no events => timeout */
1503 /* nothing to do here */
1504 break;
1505 case IP4_EVENT_CONFIG_CHANGED:
1506 break;
1507 default:
1508 clib_warning ("BUG: event type 0x%wx", event_type);
1509 break;
1510 }
1511 f64 now = vlib_time_now (vm);
1512
Klement Sekera896c8962019-06-24 11:52:49 +00001513 ip4_full_reass_t *reass;
Klement Sekera75e7d132017-09-20 08:26:30 +02001514 int *pool_indexes_to_free = NULL;
1515
Klement Sekera4c533132018-02-22 11:41:12 +01001516 uword thread_index = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +02001517 int index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001518 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001519 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1520 {
Klement Sekera896c8962019-06-24 11:52:49 +00001521 ip4_full_reass_per_thread_t *rt =
1522 &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001523 clib_spinlock_lock (&rt->lock);
1524
1525 vec_reset_length (pool_indexes_to_free);
1526 /* *INDENT-OFF* */
1527 pool_foreach_index (index, rt->pool, ({
1528 reass = pool_elt_at_index (rt->pool, index);
1529 if (now > reass->last_heard + rm->timeout)
1530 {
1531 vec_add1 (pool_indexes_to_free, index);
1532 }
1533 }));
1534 /* *INDENT-ON* */
1535 int *i;
1536 /* *INDENT-OFF* */
1537 vec_foreach (i, pool_indexes_to_free)
1538 {
Klement Sekera896c8962019-06-24 11:52:49 +00001539 ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1540 ip4_full_reass_drop_all (vm, node, rm, reass);
1541 ip4_full_reass_free (rm, rt, reass);
Klement Sekera4c533132018-02-22 11:41:12 +01001542 }
1543 /* *INDENT-ON* */
1544
1545 clib_spinlock_unlock (&rt->lock);
1546 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001547
Klement Sekera75e7d132017-09-20 08:26:30 +02001548 vec_free (pool_indexes_to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +02001549 if (event_data)
1550 {
1551 _vec_len (event_data) = 0;
1552 }
1553 }
1554
1555 return 0;
1556}
1557
Klement Sekera75e7d132017-09-20 08:26:30 +02001558/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001559VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
1560 .function = ip4_full_reass_walk_expired,
Klement Sekera75e7d132017-09-20 08:26:30 +02001561 .type = VLIB_NODE_TYPE_PROCESS,
Klement Sekera896c8962019-06-24 11:52:49 +00001562 .name = "ip4-full-reassembly-expire-walk",
1563 .format_trace = format_ip4_full_reass_trace,
1564 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1565 .error_strings = ip4_full_reass_error_strings,
Klement Sekera75e7d132017-09-20 08:26:30 +02001566
1567};
1568/* *INDENT-ON* */
1569
1570static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001571format_ip4_full_reass_key (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +02001572{
Klement Sekera896c8962019-06-24 11:52:49 +00001573 ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *);
1574 s =
1575 format (s,
1576 "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1577 key->xx_id, format_ip4_address, &key->src, format_ip4_address,
1578 &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
Klement Sekera75e7d132017-09-20 08:26:30 +02001579 return s;
1580}
1581
1582static u8 *
1583format_ip4_reass (u8 * s, va_list * args)
1584{
1585 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001586 ip4_full_reass_t *reass = va_arg (*args, ip4_full_reass_t *);
Klement Sekera75e7d132017-09-20 08:26:30 +02001587
Klement Sekera4c533132018-02-22 11:41:12 +01001588 s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
Klement Sekera75e7d132017-09-20 08:26:30 +02001589 "last_packet_octet: %u, trace_op_counter: %u\n",
Klement Sekera896c8962019-06-24 11:52:49 +00001590 reass->id, format_ip4_full_reass_key, &reass->key,
1591 reass->first_bi, reass->data_len,
1592 reass->last_packet_octet, reass->trace_op_counter);
1593
Klement Sekera75e7d132017-09-20 08:26:30 +02001594 u32 bi = reass->first_bi;
1595 u32 counter = 0;
1596 while (~0 != bi)
1597 {
1598 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1599 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera896c8962019-06-24 11:52:49 +00001600 s =
1601 format (s,
1602 " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1603 "fragment[%u, %u]\n", counter, vnb->ip.reass.range_first,
1604 vnb->ip.reass.range_last, bi,
1605 ip4_full_reass_buffer_get_data_offset (b),
1606 ip4_full_reass_buffer_get_data_len (b),
1607 vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
Klement Sekera75e7d132017-09-20 08:26:30 +02001608 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1609 {
1610 bi = b->next_buffer;
1611 }
1612 else
1613 {
1614 bi = ~0;
1615 }
1616 }
1617 return s;
1618}
1619
1620static clib_error_t *
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001621show_ip4_reass (vlib_main_t * vm,
1622 unformat_input_t * input,
Klement Sekera75e7d132017-09-20 08:26:30 +02001623 CLIB_UNUSED (vlib_cli_command_t * lmd))
1624{
Klement Sekera896c8962019-06-24 11:52:49 +00001625 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001626
1627 vlib_cli_output (vm, "---------------------");
1628 vlib_cli_output (vm, "IP4 reassembly status");
1629 vlib_cli_output (vm, "---------------------");
Klement Sekera4c533132018-02-22 11:41:12 +01001630 bool details = false;
Klement Sekera75e7d132017-09-20 08:26:30 +02001631 if (unformat (input, "details"))
1632 {
Klement Sekera4c533132018-02-22 11:41:12 +01001633 details = true;
1634 }
1635
1636 u32 sum_reass_n = 0;
Klement Sekera896c8962019-06-24 11:52:49 +00001637 ip4_full_reass_t *reass;
Klement Sekera4c533132018-02-22 11:41:12 +01001638 uword thread_index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001639 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001640 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1641 {
Klement Sekera896c8962019-06-24 11:52:49 +00001642 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001643 clib_spinlock_lock (&rt->lock);
1644 if (details)
1645 {
1646 /* *INDENT-OFF* */
1647 pool_foreach (reass, rt->pool, {
1648 vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
1649 });
1650 /* *INDENT-ON* */
1651 }
1652 sum_reass_n += rt->reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +01001653 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001654 }
1655 vlib_cli_output (vm, "---------------------");
Klement Sekera4c533132018-02-22 11:41:12 +01001656 vlib_cli_output (vm, "Current IP4 reassemblies count: %lu\n",
1657 (long unsigned) sum_reass_n);
Klement Sekera75e7d132017-09-20 08:26:30 +02001658 vlib_cli_output (vm,
Klement Sekera4c533132018-02-22 11:41:12 +01001659 "Maximum configured concurrent IP4 reassemblies per worker-thread: %lu\n",
Klement Sekera75e7d132017-09-20 08:26:30 +02001660 (long unsigned) rm->max_reass_n);
Klement Sekera75e7d132017-09-20 08:26:30 +02001661 return 0;
1662}
1663
1664/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001665VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
1666 .path = "show ip4-full-reassembly",
1667 .short_help = "show ip4-full-reassembly [details]",
Klement Sekera75e7d132017-09-20 08:26:30 +02001668 .function = show_ip4_reass,
1669};
1670/* *INDENT-ON* */
1671
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001672#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001673vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001674ip4_full_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
Klement Sekera4c533132018-02-22 11:41:12 +01001675{
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001676 return vnet_feature_enable_disable ("ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001677 "ip4-full-reassembly-feature",
1678 sw_if_index, enable_disable, 0, 0);
Klement Sekera4c533132018-02-22 11:41:12 +01001679}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001680#endif /* CLIB_MARCH_VARIANT */
Klement Sekera4c533132018-02-22 11:41:12 +01001681
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001682
Klement Sekera896c8962019-06-24 11:52:49 +00001683#define foreach_ip4_full_reass_handoff_error \
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001684_(CONGESTION_DROP, "congestion drop")
1685
1686
1687typedef enum
1688{
Klement Sekera896c8962019-06-24 11:52:49 +00001689#define _(sym,str) IP4_FULL_REASS_HANDOFF_ERROR_##sym,
1690 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001691#undef _
Klement Sekera896c8962019-06-24 11:52:49 +00001692 IP4_FULL_REASS_HANDOFF_N_ERROR,
1693} ip4_full_reass_handoff_error_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001694
Klement Sekera896c8962019-06-24 11:52:49 +00001695static char *ip4_full_reass_handoff_error_strings[] = {
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001696#define _(sym,string) string,
Klement Sekera896c8962019-06-24 11:52:49 +00001697 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001698#undef _
1699};
1700
1701typedef struct
1702{
1703 u32 next_worker_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001704} ip4_full_reass_handoff_trace_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001705
1706static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001707format_ip4_full_reass_handoff_trace (u8 * s, va_list * args)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001708{
1709 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1710 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001711 ip4_full_reass_handoff_trace_t *t =
1712 va_arg (*args, ip4_full_reass_handoff_trace_t *);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001713
1714 s =
Klement Sekera896c8962019-06-24 11:52:49 +00001715 format (s, "ip4-full-reassembly-handoff: next-worker %d",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001716 t->next_worker_index);
1717
1718 return s;
1719}
1720
1721always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001722ip4_full_reass_handoff_node_inline (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001723 vlib_node_runtime_t * node,
1724 vlib_frame_t * frame, bool is_feature)
1725{
Klement Sekera896c8962019-06-24 11:52:49 +00001726 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001727
1728 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1729 u32 n_enq, n_left_from, *from;
1730 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1731 u32 fq_index;
1732
1733 from = vlib_frame_vector_args (frame);
1734 n_left_from = frame->n_vectors;
1735 vlib_get_buffers (vm, from, bufs, n_left_from);
1736
1737 b = bufs;
1738 ti = thread_indices;
1739
1740 fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1741
1742 while (n_left_from > 0)
1743 {
Klement Sekerade34c352019-06-25 11:19:22 +00001744 ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001745
1746 if (PREDICT_FALSE
1747 ((node->flags & VLIB_NODE_FLAG_TRACE)
1748 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1749 {
Klement Sekera896c8962019-06-24 11:52:49 +00001750 ip4_full_reass_handoff_trace_t *t =
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001751 vlib_add_trace (vm, node, b[0], sizeof (*t));
1752 t->next_worker_index = ti[0];
1753 }
1754
1755 n_left_from -= 1;
1756 ti += 1;
1757 b += 1;
1758 }
1759 n_enq =
1760 vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1761 frame->n_vectors, 1);
1762
1763 if (n_enq < frame->n_vectors)
1764 vlib_node_increment_counter (vm, node->node_index,
Klement Sekera896c8962019-06-24 11:52:49 +00001765 IP4_FULL_REASS_HANDOFF_ERROR_CONGESTION_DROP,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001766 frame->n_vectors - n_enq);
1767 return frame->n_vectors;
1768}
1769
Klement Sekera896c8962019-06-24 11:52:49 +00001770VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001771 vlib_node_runtime_t * node,
1772 vlib_frame_t * frame)
1773{
Klement Sekera896c8962019-06-24 11:52:49 +00001774 return ip4_full_reass_handoff_node_inline (vm, node, frame,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001775 false /* is_feature */ );
1776}
1777
1778
1779/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001780VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
1781 .name = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001782 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001783 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1784 .error_strings = ip4_full_reass_handoff_error_strings,
1785 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001786
1787 .n_next_nodes = 1,
1788
1789 .next_nodes = {
1790 [0] = "error-drop",
1791 },
1792};
1793/* *INDENT-ON* */
1794
1795
1796/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001797VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001798 vlib_node_runtime_t *
1799 node,
1800 vlib_frame_t * frame)
1801{
Klement Sekera896c8962019-06-24 11:52:49 +00001802 return ip4_full_reass_handoff_node_inline (vm, node, frame,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001803 true /* is_feature */ );
1804}
1805/* *INDENT-ON* */
1806
1807
1808/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001809VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
1810 .name = "ip4-full-reass-feature-hoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001811 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001812 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1813 .error_strings = ip4_full_reass_handoff_error_strings,
1814 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001815
1816 .n_next_nodes = 1,
1817
1818 .next_nodes = {
1819 [0] = "error-drop",
1820 },
1821};
1822/* *INDENT-ON* */
1823
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001824#ifndef CLIB_MARCH_VARIANT
1825int
1826ip4_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
1827{
1828 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1829 vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
1830 if (is_enable)
1831 {
1832 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1833 {
1834 ++rm->feature_use_refcount_per_intf[sw_if_index];
1835 return vnet_feature_enable_disable ("ip4-unicast",
1836 "ip4-full-reassembly-feature",
1837 sw_if_index, 1, 0, 0);
1838 }
1839 ++rm->feature_use_refcount_per_intf[sw_if_index];
1840 }
1841 else
1842 {
1843 --rm->feature_use_refcount_per_intf[sw_if_index];
1844 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1845 return vnet_feature_enable_disable ("ip4-unicast",
1846 "ip4-full-reassembly-feature",
1847 sw_if_index, 0, 0, 0);
1848 }
1849 return -1;
1850}
1851#endif
1852
Klement Sekera75e7d132017-09-20 08:26:30 +02001853/*
1854 * fd.io coding-style-patch-verification: ON
1855 *
1856 * Local Variables:
1857 * eval: (c-set-style "gnu")
1858 * End:
1859 */