blob: 4d578c5ce4428933be96c4f49ef7fab51f3219d2 [file] [log] [blame]
Klement Sekera75e7d132017-09-20 08:26:30 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @file
Klement Sekera896c8962019-06-24 11:52:49 +000018 * @brief IPv4 Full Reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020019 *
Klement Sekera896c8962019-06-24 11:52:49 +000020 * This file contains the source code for IPv4 full reassembly.
Klement Sekera75e7d132017-09-20 08:26:30 +020021 */
22
23#include <vppinfra/vec.h>
24#include <vnet/vnet.h>
25#include <vnet/ip/ip.h>
Klement Sekera896c8962019-06-24 11:52:49 +000026#include <vppinfra/fifo.h>
Klement Sekera8dcfed52018-06-28 11:16:15 +020027#include <vppinfra/bihash_16_8.h>
Klement Sekera896c8962019-06-24 11:52:49 +000028#include <vnet/ip/reass/ip4_full_reass.h>
Klement Sekera630ab582019-07-19 09:14:19 +000029#include <stddef.h>
Klement Sekera75e7d132017-09-20 08:26:30 +020030
31#define MSEC_PER_SEC 1000
32#define IP4_REASS_TIMEOUT_DEFAULT_MS 100
33#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
Klement Sekera4c533132018-02-22 11:41:12 +010034#define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
Klement Sekera3a343d42019-05-16 14:35:46 +020035#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
Klement Sekera75e7d132017-09-20 08:26:30 +020036#define IP4_REASS_HT_LOAD_FACTOR (0.75)
37
38#define IP4_REASS_DEBUG_BUFFERS 0
39#if IP4_REASS_DEBUG_BUFFERS
40#define IP4_REASS_DEBUG_BUFFER(bi, what) \
41 do \
42 { \
43 u32 _bi = bi; \
44 printf (#what "buffer %u", _bi); \
45 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi); \
46 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT) \
47 { \
48 _bi = _b->next_buffer; \
49 printf ("[%u]", _bi); \
50 _b = vlib_get_buffer (vm, _bi); \
51 } \
52 printf ("\n"); \
53 fflush (stdout); \
54 } \
55 while (0)
56#else
57#define IP4_REASS_DEBUG_BUFFER(...)
58#endif
59
Klement Sekerad0f70a32018-12-14 17:24:13 +010060typedef enum
61{
62 IP4_REASS_RC_OK,
Klement Sekera3a343d42019-05-16 14:35:46 +020063 IP4_REASS_RC_TOO_MANY_FRAGMENTS,
Klement Sekerad0f70a32018-12-14 17:24:13 +010064 IP4_REASS_RC_INTERNAL_ERROR,
Klement Sekeraf883f6a2019-02-13 11:01:32 +010065 IP4_REASS_RC_NO_BUF,
Klement Sekera630ab582019-07-19 09:14:19 +000066 IP4_REASS_RC_HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +000067} ip4_full_reass_rc_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020068
69typedef struct
70{
71 union
72 {
73 struct
74 {
Klement Sekera75e7d132017-09-20 08:26:30 +020075 u32 xx_id;
76 ip4_address_t src;
77 ip4_address_t dst;
Klement Sekera8dcfed52018-06-28 11:16:15 +020078 u16 frag_id;
79 u8 proto;
80 u8 unused;
Klement Sekera75e7d132017-09-20 08:26:30 +020081 };
Klement Sekera8dcfed52018-06-28 11:16:15 +020082 u64 as_u64[2];
Klement Sekera75e7d132017-09-20 08:26:30 +020083 };
Klement Sekera896c8962019-06-24 11:52:49 +000084} ip4_full_reass_key_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020085
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080086typedef union
87{
88 struct
89 {
90 u32 reass_index;
Klement Sekera630ab582019-07-19 09:14:19 +000091 u32 memory_owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080092 };
93 u64 as_u64;
Klement Sekera896c8962019-06-24 11:52:49 +000094} ip4_full_reass_val_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080095
96typedef union
97{
98 struct
99 {
Klement Sekera896c8962019-06-24 11:52:49 +0000100 ip4_full_reass_key_t k;
101 ip4_full_reass_val_t v;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800102 };
103 clib_bihash_kv_16_8_t kv;
Klement Sekera896c8962019-06-24 11:52:49 +0000104} ip4_full_reass_kv_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800105
Klement Sekera75e7d132017-09-20 08:26:30 +0200106always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +0000107ip4_full_reass_buffer_get_data_offset (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200108{
109 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100110 return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
Klement Sekera75e7d132017-09-20 08:26:30 +0200111}
112
113always_inline u16
Klement Sekera896c8962019-06-24 11:52:49 +0000114ip4_full_reass_buffer_get_data_len (vlib_buffer_t * b)
Klement Sekera75e7d132017-09-20 08:26:30 +0200115{
116 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100117 return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
Klement Sekera896c8962019-06-24 11:52:49 +0000118 (vnb->ip.reass.fragment_first +
119 ip4_full_reass_buffer_get_data_offset (b)) + 1;
Klement Sekera75e7d132017-09-20 08:26:30 +0200120}
121
122typedef struct
123{
124 // hash table key
Klement Sekera896c8962019-06-24 11:52:49 +0000125 ip4_full_reass_key_t key;
Klement Sekera75e7d132017-09-20 08:26:30 +0200126 // time when last packet was received
127 f64 last_heard;
128 // internal id of this reassembly
Klement Sekera4c533132018-02-22 11:41:12 +0100129 u64 id;
Klement Sekera75e7d132017-09-20 08:26:30 +0200130 // buffer index of first buffer in this reassembly context
131 u32 first_bi;
132 // last octet of packet, ~0 until fragment without more_fragments arrives
133 u32 last_packet_octet;
134 // length of data collected so far
135 u32 data_len;
136 // trace operation counter
137 u32 trace_op_counter;
Klement Sekera4c533132018-02-22 11:41:12 +0100138 // next index - used by non-feature node
Klement Sekera21aa8f12019-05-20 12:27:33 +0200139 u32 next_index;
Klement Sekerae8498652019-06-17 12:23:15 +0000140 // error next index - used by custom apps (~0 if not used)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200141 u32 error_next_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100142 // minimum fragment length for this reassembly - used to estimate MTU
143 u16 min_fragment_length;
Klement Sekera3a343d42019-05-16 14:35:46 +0200144 // number of fragments in this reassembly
145 u32 fragments_n;
Klement Sekera630ab582019-07-19 09:14:19 +0000146 // thread owning memory for this context (whose pool contains this ctx)
147 u32 memory_owner_thread_index;
148 // thread which received fragment with offset 0 and which sends out the
149 // completed reassembly
150 u32 sendout_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +0000151} ip4_full_reass_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200152
153typedef struct
154{
Klement Sekera896c8962019-06-24 11:52:49 +0000155 ip4_full_reass_t *pool;
Klement Sekera4c533132018-02-22 11:41:12 +0100156 u32 reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +0100157 u32 id_counter;
158 clib_spinlock_t lock;
Klement Sekera896c8962019-06-24 11:52:49 +0000159} ip4_full_reass_per_thread_t;
Klement Sekera4c533132018-02-22 11:41:12 +0100160
161typedef struct
162{
Klement Sekera75e7d132017-09-20 08:26:30 +0200163 // IPv4 config
164 u32 timeout_ms;
165 f64 timeout;
166 u32 expire_walk_interval_ms;
Klement Sekera3a343d42019-05-16 14:35:46 +0200167 // maximum number of fragments in one reassembly
168 u32 max_reass_len;
169 // maximum number of reassemblies
Klement Sekera75e7d132017-09-20 08:26:30 +0200170 u32 max_reass_n;
171
172 // IPv4 runtime
Klement Sekera8dcfed52018-06-28 11:16:15 +0200173 clib_bihash_16_8_t hash;
Klement Sekera4c533132018-02-22 11:41:12 +0100174 // per-thread data
Klement Sekera896c8962019-06-24 11:52:49 +0000175 ip4_full_reass_per_thread_t *per_thread_data;
Klement Sekera75e7d132017-09-20 08:26:30 +0200176
177 // convenience
178 vlib_main_t *vlib_main;
Klement Sekera75e7d132017-09-20 08:26:30 +0200179
180 // node index of ip4-drop node
181 u32 ip4_drop_idx;
Klement Sekera896c8962019-06-24 11:52:49 +0000182 u32 ip4_full_reass_expire_node_idx;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800183
184 /** Worker handoff */
185 u32 fq_index;
186 u32 fq_feature_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200187
Klement Sekera7b2e9fb2019-10-01 13:00:22 +0000188 // reference count for enabling/disabling feature - per interface
189 u32 *feature_use_refcount_per_intf;
Klement Sekera896c8962019-06-24 11:52:49 +0000190} ip4_full_reass_main_t;
191
192extern ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700193
194#ifndef CLIB_MARCH_VARIANT
Klement Sekera896c8962019-06-24 11:52:49 +0000195ip4_full_reass_main_t ip4_full_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700196#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +0200197
198typedef enum
199{
Klement Sekera896c8962019-06-24 11:52:49 +0000200 IP4_FULL_REASS_NEXT_INPUT,
201 IP4_FULL_REASS_NEXT_DROP,
202 IP4_FULL_REASS_NEXT_HANDOFF,
203 IP4_FULL_REASS_N_NEXT,
204} ip4_full_reass_next_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200205
206typedef enum
207{
208 RANGE_NEW,
209 RANGE_SHRINK,
210 RANGE_DISCARD,
211 RANGE_OVERLAP,
212 FINALIZE,
Klement Sekera630ab582019-07-19 09:14:19 +0000213 HANDOFF,
Klement Sekera896c8962019-06-24 11:52:49 +0000214} ip4_full_reass_trace_operation_e;
Klement Sekera75e7d132017-09-20 08:26:30 +0200215
216typedef struct
217{
218 u16 range_first;
219 u16 range_last;
220 u32 range_bi;
221 i32 data_offset;
222 u32 data_len;
223 u32 first_bi;
Klement Sekera896c8962019-06-24 11:52:49 +0000224} ip4_full_reass_range_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200225
226typedef struct
227{
Klement Sekera896c8962019-06-24 11:52:49 +0000228 ip4_full_reass_trace_operation_e action;
Klement Sekera75e7d132017-09-20 08:26:30 +0200229 u32 reass_id;
Klement Sekera896c8962019-06-24 11:52:49 +0000230 ip4_full_reass_range_trace_t trace_range;
Klement Sekera75e7d132017-09-20 08:26:30 +0200231 u32 size_diff;
232 u32 op_id;
Klement Sekera630ab582019-07-19 09:14:19 +0000233 u32 thread_id;
234 u32 thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200235 u32 fragment_first;
236 u32 fragment_last;
237 u32 total_data_len;
Klement Sekera8563cb32019-10-10 17:03:57 +0000238 bool is_after_handoff;
239 ip4_header_t ip4_header;
Klement Sekera896c8962019-06-24 11:52:49 +0000240} ip4_full_reass_trace_t;
Klement Sekera75e7d132017-09-20 08:26:30 +0200241
Klement Sekera896c8962019-06-24 11:52:49 +0000242extern vlib_node_registration_t ip4_full_reass_node;
243extern vlib_node_registration_t ip4_full_reass_node_feature;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700244
Klement Sekera4c533132018-02-22 11:41:12 +0100245static void
Klement Sekera896c8962019-06-24 11:52:49 +0000246ip4_full_reass_trace_details (vlib_main_t * vm, u32 bi,
247 ip4_full_reass_range_trace_t * trace)
Klement Sekera75e7d132017-09-20 08:26:30 +0200248{
249 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
250 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
251 trace->range_first = vnb->ip.reass.range_first;
252 trace->range_last = vnb->ip.reass.range_last;
Klement Sekera896c8962019-06-24 11:52:49 +0000253 trace->data_offset = ip4_full_reass_buffer_get_data_offset (b);
254 trace->data_len = ip4_full_reass_buffer_get_data_len (b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200255 trace->range_bi = bi;
256}
257
Klement Sekera4c533132018-02-22 11:41:12 +0100258static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000259format_ip4_full_reass_range_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200260{
Klement Sekera896c8962019-06-24 11:52:49 +0000261 ip4_full_reass_range_trace_t *trace =
262 va_arg (*args, ip4_full_reass_range_trace_t *);
263 s =
264 format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
265 trace->range_last, trace->data_offset, trace->data_len,
266 trace->range_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200267 return s;
268}
269
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700270static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +0000271format_ip4_full_reass_trace (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +0200272{
273 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
274 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +0000275 ip4_full_reass_trace_t *t = va_arg (*args, ip4_full_reass_trace_t *);
Klement Sekera630ab582019-07-19 09:14:19 +0000276 u32 indent = 0;
277 if (~0 != t->reass_id)
278 {
Klement Sekera8563cb32019-10-10 17:03:57 +0000279 if (t->is_after_handoff)
280 {
281 s =
282 format (s, "%U\n", format_ip4_header, &t->ip4_header,
283 sizeof (t->ip4_header));
284 indent = 2;
285 }
286 s =
287 format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
288 t->reass_id, t->op_id);
Klement Sekera630ab582019-07-19 09:14:19 +0000289 indent = format_get_indent (s);
290 s =
291 format (s,
292 "first bi: %u, data len: %u, ip/fragment[%u, %u]",
293 t->trace_range.first_bi, t->total_data_len, t->fragment_first,
294 t->fragment_last);
295 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200296 switch (t->action)
297 {
298 case RANGE_SHRINK:
299 s = format (s, "\n%Ushrink %U by %u", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000300 format_ip4_full_reass_range_trace, &t->trace_range,
Klement Sekera75e7d132017-09-20 08:26:30 +0200301 t->size_diff);
302 break;
303 case RANGE_DISCARD:
304 s = format (s, "\n%Udiscard %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000305 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200306 break;
307 case RANGE_NEW:
308 s = format (s, "\n%Unew %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000309 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200310 break;
311 case RANGE_OVERLAP:
312 s = format (s, "\n%Uoverlapping/ignored %U", format_white_space, indent,
Klement Sekera896c8962019-06-24 11:52:49 +0000313 format_ip4_full_reass_range_trace, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200314 break;
315 case FINALIZE:
316 s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
317 break;
Klement Sekera630ab582019-07-19 09:14:19 +0000318 case HANDOFF:
319 s =
320 format (s, "handoff from thread #%u to thread #%u", t->thread_id,
321 t->thread_id_to);
322 break;
Klement Sekera75e7d132017-09-20 08:26:30 +0200323 }
324 return s;
325}
326
Klement Sekera4c533132018-02-22 11:41:12 +0100327static void
Klement Sekera896c8962019-06-24 11:52:49 +0000328ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
329 ip4_full_reass_main_t * rm,
330 ip4_full_reass_t * reass, u32 bi,
331 ip4_full_reass_trace_operation_e action,
332 u32 size_diff, u32 thread_id_to)
Klement Sekera75e7d132017-09-20 08:26:30 +0200333{
334 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
335 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera8563cb32019-10-10 17:03:57 +0000336 bool is_after_handoff = false;
337 if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
338 {
339 is_after_handoff = true;
340 }
Klement Sekera896c8962019-06-24 11:52:49 +0000341 ip4_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
Klement Sekera8563cb32019-10-10 17:03:57 +0000342 t->is_after_handoff = is_after_handoff;
343 if (t->is_after_handoff)
344 {
345 clib_memcpy (&t->ip4_header, vlib_buffer_get_current (b),
346 clib_min (sizeof (t->ip4_header), b->current_length));
347 }
Klement Sekera896c8962019-06-24 11:52:49 +0000348 if (reass)
349 {
350 t->reass_id = reass->id;
351 t->op_id = reass->trace_op_counter;
352 t->trace_range.first_bi = reass->first_bi;
353 t->total_data_len = reass->data_len;
354 ++reass->trace_op_counter;
355 }
356 else
357 {
358 t->reass_id = ~0;
359 t->op_id = 0;
360 t->trace_range.first_bi = 0;
361 t->total_data_len = 0;
362 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200363 t->action = action;
Klement Sekera896c8962019-06-24 11:52:49 +0000364 ip4_full_reass_trace_details (vm, bi, &t->trace_range);
Klement Sekera75e7d132017-09-20 08:26:30 +0200365 t->size_diff = size_diff;
Klement Sekera630ab582019-07-19 09:14:19 +0000366 t->thread_id = vm->thread_index;
367 t->thread_id_to = thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200368 t->fragment_first = vnb->ip.reass.fragment_first;
369 t->fragment_last = vnb->ip.reass.fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200370#if 0
371 static u8 *s = NULL;
Klement Sekera896c8962019-06-24 11:52:49 +0000372 s = format (s, "%U", format_ip4_full_reass_trace, NULL, NULL, t);
Klement Sekera75e7d132017-09-20 08:26:30 +0200373 printf ("%.*s\n", vec_len (s), s);
374 fflush (stdout);
375 vec_reset_length (s);
376#endif
377}
378
Klement Sekera630ab582019-07-19 09:14:19 +0000379always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000380ip4_full_reass_free_ctx (ip4_full_reass_per_thread_t * rt,
381 ip4_full_reass_t * reass)
Klement Sekera630ab582019-07-19 09:14:19 +0000382{
383 pool_put (rt->pool, reass);
384 --rt->reass_n;
385}
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800386
Klement Sekera4c533132018-02-22 11:41:12 +0100387always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000388ip4_full_reass_free (ip4_full_reass_main_t * rm,
389 ip4_full_reass_per_thread_t * rt,
390 ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200391{
Klement Sekera8dcfed52018-06-28 11:16:15 +0200392 clib_bihash_kv_16_8_t kv;
Klement Sekera75e7d132017-09-20 08:26:30 +0200393 kv.key[0] = reass->key.as_u64[0];
394 kv.key[1] = reass->key.as_u64[1];
Klement Sekera8dcfed52018-06-28 11:16:15 +0200395 clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
Klement Sekera896c8962019-06-24 11:52:49 +0000396 return ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200397}
398
Klement Sekera4c533132018-02-22 11:41:12 +0100399always_inline void
Klement Sekera896c8962019-06-24 11:52:49 +0000400ip4_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
401 ip4_full_reass_main_t * rm, ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200402{
403 u32 range_bi = reass->first_bi;
404 vlib_buffer_t *range_b;
405 vnet_buffer_opaque_t *range_vnb;
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100406 u32 *to_free = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +0200407 while (~0 != range_bi)
408 {
409 range_b = vlib_get_buffer (vm, range_bi);
410 range_vnb = vnet_buffer (range_b);
411 u32 bi = range_bi;
412 while (~0 != bi)
413 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100414 vec_add1 (to_free, bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200415 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
416 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
417 {
418 bi = b->next_buffer;
419 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
420 }
421 else
422 {
423 bi = ~0;
424 }
425 }
426 range_bi = range_vnb->ip.reass.next_range_bi;
427 }
Klement Sekera21aa8f12019-05-20 12:27:33 +0200428 /* send to next_error_index */
Klement Sekerae8498652019-06-17 12:23:15 +0000429 if (~0 != reass->error_next_index)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200430 {
431 u32 n_left_to_next, *to_next, next_index;
432
433 next_index = reass->error_next_index;
434 u32 bi = ~0;
435
436 while (vec_len (to_free) > 0)
437 {
438 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
439
440 while (vec_len (to_free) > 0 && n_left_to_next > 0)
441 {
442 bi = vec_pop (to_free);
443
444 if (~0 != bi)
445 {
446 to_next[0] = bi;
447 to_next += 1;
448 n_left_to_next -= 1;
Klement Sekera21aa8f12019-05-20 12:27:33 +0200449 }
450 }
451 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
452 }
453 }
454 else
455 {
456 vlib_buffer_free (vm, to_free, vec_len (to_free));
457 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200458}
459
Klement Sekera896c8962019-06-24 11:52:49 +0000460always_inline void
461ip4_full_reass_init (ip4_full_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200462{
Klement Sekera896c8962019-06-24 11:52:49 +0000463 reass->first_bi = ~0;
464 reass->last_packet_octet = ~0;
465 reass->data_len = 0;
466 reass->next_index = ~0;
467 reass->error_next_index = ~0;
468}
469
470always_inline ip4_full_reass_t *
471ip4_full_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
472 ip4_full_reass_main_t * rm,
473 ip4_full_reass_per_thread_t * rt,
474 ip4_full_reass_kv_t * kv, u8 * do_handoff)
475{
476 ip4_full_reass_t *reass;
Klement Sekera630ab582019-07-19 09:14:19 +0000477 f64 now;
Klement Sekera75e7d132017-09-20 08:26:30 +0200478
Klement Sekera630ab582019-07-19 09:14:19 +0000479again:
480
481 reass = NULL;
482 now = vlib_time_now (vm);
Klement Sekerac99c0252019-12-18 12:17:06 +0000483 if (!clib_bihash_search_16_8 (&rm->hash, &kv->kv, &kv->kv))
Klement Sekera75e7d132017-09-20 08:26:30 +0200484 {
Gao Feng9165e032020-04-26 09:57:18 +0800485 if (vm->thread_index != kv->v.memory_owner_thread_index)
486 {
487 *do_handoff = 1;
488 return NULL;
489 }
Klement Sekera630ab582019-07-19 09:14:19 +0000490 reass =
491 pool_elt_at_index (rm->per_thread_data
492 [kv->v.memory_owner_thread_index].pool,
493 kv->v.reass_index);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800494
Klement Sekera75e7d132017-09-20 08:26:30 +0200495 if (now > reass->last_heard + rm->timeout)
496 {
Klement Sekera896c8962019-06-24 11:52:49 +0000497 ip4_full_reass_drop_all (vm, node, rm, reass);
498 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200499 reass = NULL;
500 }
501 }
502
503 if (reass)
504 {
505 reass->last_heard = now;
506 return reass;
507 }
508
Klement Sekera4c533132018-02-22 11:41:12 +0100509 if (rt->reass_n >= rm->max_reass_n)
Klement Sekera75e7d132017-09-20 08:26:30 +0200510 {
511 reass = NULL;
512 return reass;
513 }
514 else
515 {
Klement Sekera4c533132018-02-22 11:41:12 +0100516 pool_get (rt->pool, reass);
Dave Barachb7b92992018-10-17 10:38:51 -0400517 clib_memset (reass, 0, sizeof (*reass));
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800518 reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
Klement Sekera630ab582019-07-19 09:14:19 +0000519 reass->memory_owner_thread_index = vm->thread_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100520 ++rt->id_counter;
Klement Sekera896c8962019-06-24 11:52:49 +0000521 ip4_full_reass_init (reass);
Klement Sekera4c533132018-02-22 11:41:12 +0100522 ++rt->reass_n;
Klement Sekera75e7d132017-09-20 08:26:30 +0200523 }
524
Klement Sekerac99c0252019-12-18 12:17:06 +0000525 reass->key.as_u64[0] = kv->kv.key[0];
526 reass->key.as_u64[1] = kv->kv.key[1];
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800527 kv->v.reass_index = (reass - rt->pool);
Klement Sekera630ab582019-07-19 09:14:19 +0000528 kv->v.memory_owner_thread_index = vm->thread_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200529 reass->last_heard = now;
530
Klement Sekerac99c0252019-12-18 12:17:06 +0000531 int rv = clib_bihash_add_del_16_8 (&rm->hash, &kv->kv, 2);
Klement Sekera630ab582019-07-19 09:14:19 +0000532 if (rv)
Klement Sekera75e7d132017-09-20 08:26:30 +0200533 {
Klement Sekera896c8962019-06-24 11:52:49 +0000534 ip4_full_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200535 reass = NULL;
Klement Sekera630ab582019-07-19 09:14:19 +0000536 // if other worker created a context already work with the other copy
537 if (-2 == rv)
538 goto again;
Klement Sekera75e7d132017-09-20 08:26:30 +0200539 }
540
541 return reass;
542}
543
Klement Sekera896c8962019-06-24 11:52:49 +0000544always_inline ip4_full_reass_rc_t
545ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
546 ip4_full_reass_main_t * rm,
547 ip4_full_reass_per_thread_t * rt,
548 ip4_full_reass_t * reass, u32 * bi0,
549 u32 * next0, u32 * error0, bool is_custom_app)
Klement Sekera75e7d132017-09-20 08:26:30 +0200550{
Klement Sekera75e7d132017-09-20 08:26:30 +0200551 vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
552 vlib_buffer_t *last_b = NULL;
553 u32 sub_chain_bi = reass->first_bi;
554 u32 total_length = 0;
555 u32 buf_cnt = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200556 do
557 {
558 u32 tmp_bi = sub_chain_bi;
559 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
560 ip4_header_t *ip = vlib_buffer_get_current (tmp);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100561 vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
562 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
563 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
564 {
565 return IP4_REASS_RC_INTERNAL_ERROR;
566 }
567
Klement Sekera896c8962019-06-24 11:52:49 +0000568 u32 data_len = ip4_full_reass_buffer_get_data_len (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200569 u32 trim_front =
Klement Sekera896c8962019-06-24 11:52:49 +0000570 ip4_header_bytes (ip) + ip4_full_reass_buffer_get_data_offset (tmp);
Klement Sekera75e7d132017-09-20 08:26:30 +0200571 u32 trim_end =
572 vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
573 if (tmp_bi == reass->first_bi)
574 {
575 /* first buffer - keep ip4 header */
Klement Sekera896c8962019-06-24 11:52:49 +0000576 if (0 != ip4_full_reass_buffer_get_data_offset (tmp))
Klement Sekerad0f70a32018-12-14 17:24:13 +0100577 {
578 return IP4_REASS_RC_INTERNAL_ERROR;
579 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200580 trim_front = 0;
581 trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
582 ip4_header_bytes (ip);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100583 if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
584 {
585 return IP4_REASS_RC_INTERNAL_ERROR;
586 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200587 }
588 u32 keep_data =
589 vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
590 while (1)
591 {
592 ++buf_cnt;
593 if (trim_front)
594 {
595 if (trim_front > tmp->current_length)
596 {
597 /* drop whole buffer */
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200598 u32 to_be_freed_bi = tmp_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200599 trim_front -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100600 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
601 {
602 return IP4_REASS_RC_INTERNAL_ERROR;
603 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200604 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
605 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700606 tmp->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200607 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200608 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200609 continue;
610 }
611 else
612 {
613 vlib_buffer_advance (tmp, trim_front);
614 trim_front = 0;
615 }
616 }
617 if (keep_data)
618 {
619 if (last_b)
620 {
621 last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
622 last_b->next_buffer = tmp_bi;
623 }
624 last_b = tmp;
625 if (keep_data <= tmp->current_length)
626 {
627 tmp->current_length = keep_data;
628 keep_data = 0;
629 }
630 else
631 {
632 keep_data -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100633 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
634 {
635 return IP4_REASS_RC_INTERNAL_ERROR;
636 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200637 }
638 total_length += tmp->current_length;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200639 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
640 {
641 tmp_bi = tmp->next_buffer;
642 tmp = vlib_get_buffer (vm, tmp->next_buffer);
643 }
644 else
645 {
646 break;
647 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200648 }
649 else
650 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200651 u32 to_be_freed_bi = tmp_bi;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100652 if (reass->first_bi == tmp_bi)
653 {
654 return IP4_REASS_RC_INTERNAL_ERROR;
655 }
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200656 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
657 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700658 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200659 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700660 tmp->next_buffer = 0;
661 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200662 vlib_buffer_free_one (vm, to_be_freed_bi);
663 }
664 else
665 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700666 tmp->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200667 vlib_buffer_free_one (vm, to_be_freed_bi);
668 break;
669 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200670 }
671 }
672 sub_chain_bi =
673 vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
674 reass.next_range_bi;
675 }
676 while (~0 != sub_chain_bi);
Chris Luke30684ac2018-03-29 12:56:58 -0700677
Klement Sekerad0f70a32018-12-14 17:24:13 +0100678 if (!last_b)
679 {
680 return IP4_REASS_RC_INTERNAL_ERROR;
681 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200682 last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700683
Klement Sekerad0f70a32018-12-14 17:24:13 +0100684 if (total_length < first_b->current_length)
685 {
686 return IP4_REASS_RC_INTERNAL_ERROR;
687 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200688 total_length -= first_b->current_length;
689 first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
690 first_b->total_length_not_including_first_buffer = total_length;
691 ip4_header_t *ip = vlib_buffer_get_current (first_b);
692 ip->flags_and_fragment_offset = 0;
693 ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
694 ip->checksum = ip4_header_checksum (ip);
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100695 if (!vlib_buffer_chain_linearize (vm, first_b))
696 {
697 return IP4_REASS_RC_NO_BUF;
698 }
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700699 // reset to reconstruct the mbuf linking
700 first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Klement Sekera75e7d132017-09-20 08:26:30 +0200701 if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
702 {
Klement Sekera896c8962019-06-24 11:52:49 +0000703 ip4_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
704 FINALIZE, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200705#if 0
706 // following code does a hexdump of packet fragments to stdout ...
707 do
708 {
709 u32 bi = reass->first_bi;
710 u8 *s = NULL;
711 while (~0 != bi)
712 {
713 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
714 s = format (s, "%u: %U\n", bi, format_hexdump,
715 vlib_buffer_get_current (b), b->current_length);
716 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
717 {
718 bi = b->next_buffer;
719 }
720 else
721 {
722 break;
723 }
724 }
725 printf ("%.*s\n", vec_len (s), s);
726 fflush (stdout);
727 vec_free (s);
728 }
729 while (0);
730#endif
731 }
732 *bi0 = reass->first_bi;
Klement Sekerae8498652019-06-17 12:23:15 +0000733 if (!is_custom_app)
Klement Sekera4c533132018-02-22 11:41:12 +0100734 {
Klement Sekera896c8962019-06-24 11:52:49 +0000735 *next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +0100736 }
737 else
738 {
739 *next0 = reass->next_index;
740 }
741 vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
Klement Sekera75e7d132017-09-20 08:26:30 +0200742 *error0 = IP4_ERROR_NONE;
Klement Sekera896c8962019-06-24 11:52:49 +0000743 ip4_full_reass_free (rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200744 reass = NULL;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100745 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200746}
747
Klement Sekera896c8962019-06-24 11:52:49 +0000748always_inline ip4_full_reass_rc_t
749ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
750 ip4_full_reass_main_t * rm,
751 ip4_full_reass_per_thread_t * rt,
752 ip4_full_reass_t * reass,
753 u32 prev_range_bi, u32 new_next_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200754{
Klement Sekera75e7d132017-09-20 08:26:30 +0200755 vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
756 vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
757 if (~0 != prev_range_bi)
758 {
759 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
760 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
761 new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
762 prev_vnb->ip.reass.next_range_bi = new_next_bi;
763 }
764 else
765 {
766 if (~0 != reass->first_bi)
767 {
768 new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
769 }
770 reass->first_bi = new_next_bi;
771 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100772 vnet_buffer_opaque_t *vnb = vnet_buffer (new_next_b);
773 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
774 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
775 {
776 return IP4_REASS_RC_INTERNAL_ERROR;
777 }
Klement Sekera896c8962019-06-24 11:52:49 +0000778 reass->data_len += ip4_full_reass_buffer_get_data_len (new_next_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100779 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200780}
781
Klement Sekera896c8962019-06-24 11:52:49 +0000782always_inline ip4_full_reass_rc_t
783ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
784 vlib_node_runtime_t * node,
785 ip4_full_reass_main_t * rm,
786 ip4_full_reass_t * reass,
787 u32 prev_range_bi, u32 discard_bi)
Klement Sekera75e7d132017-09-20 08:26:30 +0200788{
789 vlib_buffer_t *discard_b = vlib_get_buffer (vm, discard_bi);
790 vnet_buffer_opaque_t *discard_vnb = vnet_buffer (discard_b);
791 if (~0 != prev_range_bi)
792 {
793 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
794 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100795 if (!(prev_vnb->ip.reass.next_range_bi == discard_bi))
796 {
797 return IP4_REASS_RC_INTERNAL_ERROR;
798 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200799 prev_vnb->ip.reass.next_range_bi = discard_vnb->ip.reass.next_range_bi;
800 }
801 else
802 {
803 reass->first_bi = discard_vnb->ip.reass.next_range_bi;
804 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100805 vnet_buffer_opaque_t *vnb = vnet_buffer (discard_b);
806 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
807 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
808 {
809 return IP4_REASS_RC_INTERNAL_ERROR;
810 }
Klement Sekera896c8962019-06-24 11:52:49 +0000811 reass->data_len -= ip4_full_reass_buffer_get_data_len (discard_b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200812 while (1)
813 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200814 u32 to_be_freed_bi = discard_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200815 if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
816 {
Klement Sekera896c8962019-06-24 11:52:49 +0000817 ip4_full_reass_add_trace (vm, node, rm, reass, discard_bi,
818 RANGE_DISCARD, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200819 }
820 if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
821 {
822 discard_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
823 discard_bi = discard_b->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700824 discard_b->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200825 discard_b = vlib_get_buffer (vm, discard_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200826 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200827 }
828 else
829 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700830 discard_b->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200831 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200832 break;
833 }
834 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100835 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200836}
837
Klement Sekera896c8962019-06-24 11:52:49 +0000838always_inline ip4_full_reass_rc_t
839ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
840 ip4_full_reass_main_t * rm,
841 ip4_full_reass_per_thread_t * rt,
842 ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
843 u32 * error0, bool is_custom_app,
844 u32 * handoff_thread_idx)
Klement Sekera75e7d132017-09-20 08:26:30 +0200845{
Klement Sekera75e7d132017-09-20 08:26:30 +0200846 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200847 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
Klement Sekerae8498652019-06-17 12:23:15 +0000848 if (is_custom_app)
849 {
850 // store (error_)next_index before it's overwritten
851 reass->next_index = fvnb->ip.reass.next_index;
852 reass->error_next_index = fvnb->ip.reass.error_next_index;
853 }
Klement Sekera896c8962019-06-24 11:52:49 +0000854 ip4_full_reass_rc_t rc = IP4_REASS_RC_OK;
855 int consumed = 0;
856 ip4_header_t *fip = vlib_buffer_get_current (fb);
Klement Sekera14d7e902018-12-10 13:46:09 +0100857 const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
858 const u32 fragment_length =
Klement Sekera75e7d132017-09-20 08:26:30 +0200859 clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
Klement Sekera14d7e902018-12-10 13:46:09 +0100860 const u32 fragment_last = fragment_first + fragment_length - 1;
861 fvnb->ip.reass.fragment_first = fragment_first;
862 fvnb->ip.reass.fragment_last = fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200863 int more_fragments = ip4_get_fragment_more (fip);
864 u32 candidate_range_bi = reass->first_bi;
865 u32 prev_range_bi = ~0;
866 fvnb->ip.reass.range_first = fragment_first;
867 fvnb->ip.reass.range_last = fragment_last;
868 fvnb->ip.reass.next_range_bi = ~0;
869 if (!more_fragments)
870 {
871 reass->last_packet_octet = fragment_last;
872 }
873 if (~0 == reass->first_bi)
874 {
875 // starting a new reassembly
Klement Sekerad0f70a32018-12-14 17:24:13 +0100876 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000877 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
878 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100879 if (IP4_REASS_RC_OK != rc)
880 {
881 return rc;
882 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200883 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
884 {
Klement Sekera896c8962019-06-24 11:52:49 +0000885 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
886 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200887 }
888 *bi0 = ~0;
Klement Sekera4c533132018-02-22 11:41:12 +0100889 reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
Klement Sekera3a343d42019-05-16 14:35:46 +0200890 reass->fragments_n = 1;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100891 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200892 }
Klement Sekera896c8962019-06-24 11:52:49 +0000893 reass->min_fragment_length =
894 clib_min (clib_net_to_host_u16 (fip->length),
895 fvnb->ip.reass.estimated_mtu);
Klement Sekera75e7d132017-09-20 08:26:30 +0200896 while (~0 != candidate_range_bi)
897 {
898 vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
899 vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
900 if (fragment_first > candidate_vnb->ip.reass.range_last)
901 {
902 // this fragments starts after candidate range
903 prev_range_bi = candidate_range_bi;
904 candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
905 if (candidate_vnb->ip.reass.range_last < fragment_last &&
906 ~0 == candidate_range_bi)
907 {
908 // special case - this fragment falls beyond all known ranges
Klement Sekerad0f70a32018-12-14 17:24:13 +0100909 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000910 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
911 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100912 if (IP4_REASS_RC_OK != rc)
913 {
914 return rc;
915 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200916 consumed = 1;
917 break;
918 }
919 continue;
920 }
921 if (fragment_last < candidate_vnb->ip.reass.range_first)
922 {
923 // this fragment ends before candidate range without any overlap
Klement Sekerad0f70a32018-12-14 17:24:13 +0100924 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000925 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
926 prev_range_bi, *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100927 if (IP4_REASS_RC_OK != rc)
928 {
929 return rc;
930 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200931 consumed = 1;
932 }
933 else
934 {
935 if (fragment_first >= candidate_vnb->ip.reass.range_first &&
936 fragment_last <= candidate_vnb->ip.reass.range_last)
937 {
938 // this fragment is a (sub)part of existing range, ignore it
939 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
940 {
Klement Sekera896c8962019-06-24 11:52:49 +0000941 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0,
942 RANGE_OVERLAP, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200943 }
944 break;
945 }
946 int discard_candidate = 0;
947 if (fragment_first < candidate_vnb->ip.reass.range_first)
948 {
949 u32 overlap =
950 fragment_last - candidate_vnb->ip.reass.range_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +0000951 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +0200952 {
953 candidate_vnb->ip.reass.range_first += overlap;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100954 if (reass->data_len < overlap)
955 {
956 return IP4_REASS_RC_INTERNAL_ERROR;
957 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200958 reass->data_len -= overlap;
959 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
960 {
Klement Sekera896c8962019-06-24 11:52:49 +0000961 ip4_full_reass_add_trace (vm, node, rm, reass,
962 candidate_range_bi,
963 RANGE_SHRINK, 0, ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +0200964 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100965 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000966 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
967 prev_range_bi,
968 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100969 if (IP4_REASS_RC_OK != rc)
970 {
971 return rc;
972 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200973 consumed = 1;
974 }
975 else
976 {
977 discard_candidate = 1;
978 }
979 }
980 else if (fragment_last > candidate_vnb->ip.reass.range_last)
981 {
982 u32 overlap =
983 candidate_vnb->ip.reass.range_last - fragment_first + 1;
Klement Sekera896c8962019-06-24 11:52:49 +0000984 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
Klement Sekera75e7d132017-09-20 08:26:30 +0200985 {
986 fvnb->ip.reass.range_first += overlap;
987 if (~0 != candidate_vnb->ip.reass.next_range_bi)
988 {
989 prev_range_bi = candidate_range_bi;
990 candidate_range_bi =
991 candidate_vnb->ip.reass.next_range_bi;
992 continue;
993 }
994 else
995 {
996 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +0100997 rc =
Klement Sekera896c8962019-06-24 11:52:49 +0000998 ip4_full_reass_insert_range_in_chain (vm, rm, rt,
999 reass,
1000 candidate_range_bi,
1001 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001002 if (IP4_REASS_RC_OK != rc)
1003 {
1004 return rc;
1005 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001006 consumed = 1;
1007 }
1008 }
1009 else
1010 {
1011 discard_candidate = 1;
1012 }
1013 }
1014 else
1015 {
1016 discard_candidate = 1;
1017 }
1018 if (discard_candidate)
1019 {
1020 u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
1021 // discard candidate range, probe next range
Klement Sekerad0f70a32018-12-14 17:24:13 +01001022 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001023 ip4_full_reass_remove_range_from_chain (vm, node, rm, reass,
1024 prev_range_bi,
1025 candidate_range_bi);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001026 if (IP4_REASS_RC_OK != rc)
1027 {
1028 return rc;
1029 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001030 if (~0 != next_range_bi)
1031 {
1032 candidate_range_bi = next_range_bi;
1033 continue;
1034 }
1035 else
1036 {
1037 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +01001038 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001039 ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
1040 prev_range_bi,
1041 *bi0);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001042 if (IP4_REASS_RC_OK != rc)
1043 {
1044 return rc;
1045 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001046 consumed = 1;
1047 }
1048 }
1049 }
1050 break;
1051 }
Klement Sekera3a343d42019-05-16 14:35:46 +02001052 ++reass->fragments_n;
Klement Sekera75e7d132017-09-20 08:26:30 +02001053 if (consumed)
1054 {
1055 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1056 {
Klement Sekera896c8962019-06-24 11:52:49 +00001057 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
1058 ~0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001059 }
1060 }
1061 if (~0 != reass->last_packet_octet &&
1062 reass->data_len == reass->last_packet_octet + 1)
1063 {
Klement Sekera630ab582019-07-19 09:14:19 +00001064 *handoff_thread_idx = reass->sendout_thread_index;
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001065 int handoff =
1066 reass->memory_owner_thread_index != reass->sendout_thread_index;
Klement Sekera630ab582019-07-19 09:14:19 +00001067 rc =
Klement Sekera896c8962019-06-24 11:52:49 +00001068 ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
1069 is_custom_app);
Benoît Ganne2d0ebd72019-07-19 13:42:12 +02001070 if (IP4_REASS_RC_OK == rc && handoff)
Klement Sekera630ab582019-07-19 09:14:19 +00001071 {
1072 rc = IP4_REASS_RC_HANDOFF;
1073 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001074 }
1075 else
1076 {
1077 if (consumed)
1078 {
1079 *bi0 = ~0;
Klement Sekera3a343d42019-05-16 14:35:46 +02001080 if (reass->fragments_n > rm->max_reass_len)
1081 {
1082 rc = IP4_REASS_RC_TOO_MANY_FRAGMENTS;
1083 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001084 }
1085 else
1086 {
Klement Sekera896c8962019-06-24 11:52:49 +00001087 *next0 = IP4_FULL_REASS_NEXT_DROP;
Klement Sekera75e7d132017-09-20 08:26:30 +02001088 *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
1089 }
1090 }
Klement Sekerad0f70a32018-12-14 17:24:13 +01001091 return rc;
Klement Sekera75e7d132017-09-20 08:26:30 +02001092}
1093
1094always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001095ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekerae8498652019-06-17 12:23:15 +00001096 vlib_frame_t * frame, bool is_feature,
1097 bool is_custom_app)
Klement Sekera75e7d132017-09-20 08:26:30 +02001098{
1099 u32 *from = vlib_frame_vector_args (frame);
1100 u32 n_left_from, n_left_to_next, *to_next, next_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001101 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1102 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001103 clib_spinlock_lock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001104
1105 n_left_from = frame->n_vectors;
1106 next_index = node->cached_next_index;
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001107 while (n_left_from > 0)
Klement Sekera75e7d132017-09-20 08:26:30 +02001108 {
1109 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1110
Klement Sekera75e7d132017-09-20 08:26:30 +02001111 while (n_left_from > 0 && n_left_to_next > 0)
1112 {
1113 u32 bi0;
1114 vlib_buffer_t *b0;
Klement Sekera4c533132018-02-22 11:41:12 +01001115 u32 next0;
1116 u32 error0 = IP4_ERROR_NONE;
Klement Sekera75e7d132017-09-20 08:26:30 +02001117
1118 bi0 = from[0];
1119 b0 = vlib_get_buffer (vm, bi0);
1120
1121 ip4_header_t *ip0 = vlib_buffer_get_current (b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001122 if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
Klement Sekera75e7d132017-09-20 08:26:30 +02001123 {
Klement Sekera4c533132018-02-22 11:41:12 +01001124 // this is a whole packet - no fragmentation
Klement Sekerae8498652019-06-17 12:23:15 +00001125 if (!is_custom_app)
Klement Sekera4c533132018-02-22 11:41:12 +01001126 {
Klement Sekera896c8962019-06-24 11:52:49 +00001127 next0 = IP4_FULL_REASS_NEXT_INPUT;
Klement Sekera4c533132018-02-22 11:41:12 +01001128 }
1129 else
1130 {
1131 next0 = vnet_buffer (b0)->ip.reass.next_index;
1132 }
Klement Sekera896c8962019-06-24 11:52:49 +00001133 goto packet_enqueue;
1134 }
1135 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1136 const u32 fragment_length =
1137 clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
1138 const u32 fragment_last = fragment_first + fragment_length - 1;
1139 if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
1140 {
1141 next0 = IP4_FULL_REASS_NEXT_DROP;
1142 error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
1143 goto packet_enqueue;
1144 }
1145 ip4_full_reass_kv_t kv;
1146 u8 do_handoff = 0;
1147
1148 kv.k.as_u64[0] =
1149 (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
1150 vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
1151 (u64) ip0->src_address.as_u32 << 32;
1152 kv.k.as_u64[1] =
1153 (u64) ip0->dst_address.
1154 as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
1155
1156 ip4_full_reass_t *reass =
1157 ip4_full_reass_find_or_create (vm, node, rm, rt, &kv,
1158 &do_handoff);
1159
1160 if (reass)
1161 {
1162 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1163 if (0 == fragment_first)
1164 {
1165 reass->sendout_thread_index = vm->thread_index;
1166 }
1167 }
1168
1169 if (PREDICT_FALSE (do_handoff))
1170 {
1171 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
Klement Sekerade34c352019-06-25 11:19:22 +00001172 vnet_buffer (b0)->ip.reass.owner_thread_index =
1173 kv.v.memory_owner_thread_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001174 }
1175 else if (reass)
1176 {
1177 u32 handoff_thread_idx;
1178 switch (ip4_full_reass_update
1179 (vm, node, rm, rt, reass, &bi0, &next0,
1180 &error0, is_custom_app, &handoff_thread_idx))
1181 {
1182 case IP4_REASS_RC_OK:
1183 /* nothing to do here */
1184 break;
1185 case IP4_REASS_RC_HANDOFF:
1186 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
1187 b0 = vlib_get_buffer (vm, bi0);
Klement Sekerade34c352019-06-25 11:19:22 +00001188 vnet_buffer (b0)->ip.reass.owner_thread_index =
1189 handoff_thread_idx;
Klement Sekera896c8962019-06-24 11:52:49 +00001190 break;
1191 case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
1192 vlib_node_increment_counter (vm, node->node_index,
1193 IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1194 1);
1195 ip4_full_reass_drop_all (vm, node, rm, reass);
1196 ip4_full_reass_free (rm, rt, reass);
1197 goto next_packet;
1198 break;
1199 case IP4_REASS_RC_NO_BUF:
1200 vlib_node_increment_counter (vm, node->node_index,
1201 IP4_ERROR_REASS_NO_BUF, 1);
1202 ip4_full_reass_drop_all (vm, node, rm, reass);
1203 ip4_full_reass_free (rm, rt, reass);
1204 goto next_packet;
1205 break;
1206 case IP4_REASS_RC_INTERNAL_ERROR:
1207 /* drop everything and start with a clean slate */
1208 vlib_node_increment_counter (vm, node->node_index,
1209 IP4_ERROR_REASS_INTERNAL_ERROR,
1210 1);
1211 ip4_full_reass_drop_all (vm, node, rm, reass);
1212 ip4_full_reass_free (rm, rt, reass);
1213 goto next_packet;
1214 break;
1215 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001216 }
1217 else
1218 {
Klement Sekera896c8962019-06-24 11:52:49 +00001219 next0 = IP4_FULL_REASS_NEXT_DROP;
1220 error0 = IP4_ERROR_REASS_LIMIT_REACHED;
Klement Sekera4c533132018-02-22 11:41:12 +01001221 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001222
Klement Sekera896c8962019-06-24 11:52:49 +00001223
1224 packet_enqueue:
Klement Sekera896c8962019-06-24 11:52:49 +00001225
Klement Sekera75e7d132017-09-20 08:26:30 +02001226 if (bi0 != ~0)
1227 {
1228 to_next[0] = bi0;
1229 to_next += 1;
1230 n_left_to_next -= 1;
Benoît Gannecf7803d2019-10-23 13:53:49 +02001231
1232 /* bi0 might have been updated by reass_finalize, reload */
1233 b0 = vlib_get_buffer (vm, bi0);
Klement Sekera1766ddc2020-03-30 16:59:38 +02001234 if (IP4_ERROR_NONE != error0)
1235 {
1236 b0->error = node->errors[error0];
1237 }
Benoît Gannecf7803d2019-10-23 13:53:49 +02001238
Klement Sekera896c8962019-06-24 11:52:49 +00001239 if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
Klement Sekera630ab582019-07-19 09:14:19 +00001240 {
1241 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1242 {
Klement Sekerade34c352019-06-25 11:19:22 +00001243 ip4_full_reass_add_trace (vm, node, rm, NULL, bi0,
1244 HANDOFF, 0,
1245 vnet_buffer (b0)->ip.
1246 reass.owner_thread_index);
Klement Sekera630ab582019-07-19 09:14:19 +00001247 }
1248 }
1249 else if (is_feature && IP4_ERROR_NONE == error0)
Klement Sekera4c533132018-02-22 11:41:12 +01001250 {
Damjan Marion7d98a122018-07-19 20:42:08 +02001251 vnet_feature_next (&next0, b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001252 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001253 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1254 to_next, n_left_to_next,
1255 bi0, next0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001256 IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
1257 }
1258
Klement Sekerad0f70a32018-12-14 17:24:13 +01001259 next_packet:
Klement Sekera75e7d132017-09-20 08:26:30 +02001260 from += 1;
1261 n_left_from -= 1;
1262 }
1263
1264 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1265 }
1266
Klement Sekera4c533132018-02-22 11:41:12 +01001267 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001268 return frame->n_vectors;
1269}
1270
Klement Sekera896c8962019-06-24 11:52:49 +00001271static char *ip4_full_reass_error_strings[] = {
Klement Sekera75e7d132017-09-20 08:26:30 +02001272#define _(sym, string) string,
1273 foreach_ip4_error
1274#undef _
1275};
1276
Klement Sekera896c8962019-06-24 11:52:49 +00001277VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
1278 vlib_node_runtime_t * node,
1279 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001280{
Klement Sekera896c8962019-06-24 11:52:49 +00001281 return ip4_full_reass_inline (vm, node, frame, false /* is_feature */ ,
Klement Sekerae8498652019-06-17 12:23:15 +00001282 false /* is_custom_app */ );
Klement Sekera4c533132018-02-22 11:41:12 +01001283}
1284
Klement Sekera75e7d132017-09-20 08:26:30 +02001285/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001286VLIB_REGISTER_NODE (ip4_full_reass_node) = {
1287 .name = "ip4-full-reassembly",
Klement Sekera75e7d132017-09-20 08:26:30 +02001288 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001289 .format_trace = format_ip4_full_reass_trace,
1290 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1291 .error_strings = ip4_full_reass_error_strings,
1292 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera75e7d132017-09-20 08:26:30 +02001293 .next_nodes =
1294 {
Klement Sekera896c8962019-06-24 11:52:49 +00001295 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1296 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1297 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001298
Klement Sekera75e7d132017-09-20 08:26:30 +02001299 },
1300};
1301/* *INDENT-ON* */
1302
Klement Sekera896c8962019-06-24 11:52:49 +00001303VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
1304 vlib_node_runtime_t * node,
1305 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001306{
Klement Sekera896c8962019-06-24 11:52:49 +00001307 return ip4_full_reass_inline (vm, node, frame, true /* is_feature */ ,
Klement Sekerae8498652019-06-17 12:23:15 +00001308 false /* is_custom_app */ );
Klement Sekera4c533132018-02-22 11:41:12 +01001309}
1310
1311/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001312VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
1313 .name = "ip4-full-reassembly-feature",
Klement Sekera4c533132018-02-22 11:41:12 +01001314 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001315 .format_trace = format_ip4_full_reass_trace,
1316 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1317 .error_strings = ip4_full_reass_error_strings,
1318 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
Klement Sekera4c533132018-02-22 11:41:12 +01001319 .next_nodes =
1320 {
Klement Sekera896c8962019-06-24 11:52:49 +00001321 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1322 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1323 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
Klement Sekera4c533132018-02-22 11:41:12 +01001324 },
1325};
1326/* *INDENT-ON* */
1327
Klement Sekera4c533132018-02-22 11:41:12 +01001328/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001329VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
Klement Sekera4c533132018-02-22 11:41:12 +01001330 .arc_name = "ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001331 .node_name = "ip4-full-reassembly-feature",
Neale Ranns14046982019-07-29 14:49:52 +00001332 .runs_before = VNET_FEATURES ("ip4-lookup",
Neale Ranns2be3eb62019-08-02 01:17:13 -07001333 "ipsec4-input-feature"),
Klement Sekera4c533132018-02-22 11:41:12 +01001334 .runs_after = 0,
1335};
1336/* *INDENT-ON* */
1337
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001338#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001339always_inline u32
Klement Sekera896c8962019-06-24 11:52:49 +00001340ip4_full_reass_get_nbuckets ()
Klement Sekera75e7d132017-09-20 08:26:30 +02001341{
Klement Sekera896c8962019-06-24 11:52:49 +00001342 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001343 u32 nbuckets;
1344 u8 i;
1345
1346 nbuckets = (u32) (rm->max_reass_n / IP4_REASS_HT_LOAD_FACTOR);
1347
1348 for (i = 0; i < 31; i++)
1349 if ((1 << i) >= nbuckets)
1350 break;
1351 nbuckets = 1 << i;
1352
1353 return nbuckets;
1354}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001355#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001356
1357typedef enum
1358{
1359 IP4_EVENT_CONFIG_CHANGED = 1,
Klement Sekera896c8962019-06-24 11:52:49 +00001360} ip4_full_reass_event_t;
Klement Sekera75e7d132017-09-20 08:26:30 +02001361
1362typedef struct
1363{
1364 int failure;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001365 clib_bihash_16_8_t *new_hash;
Klement Sekera75e7d132017-09-20 08:26:30 +02001366} ip4_rehash_cb_ctx;
1367
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001368#ifndef CLIB_MARCH_VARIANT
Neale Rannsf50bac12019-12-06 05:53:17 +00001369static int
Klement Sekera8dcfed52018-06-28 11:16:15 +02001370ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
Klement Sekera75e7d132017-09-20 08:26:30 +02001371{
1372 ip4_rehash_cb_ctx *ctx = _ctx;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001373 if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
Klement Sekera75e7d132017-09-20 08:26:30 +02001374 {
1375 ctx->failure = 1;
1376 }
Neale Rannsf50bac12019-12-06 05:53:17 +00001377 return (BIHASH_WALK_CONTINUE);
Klement Sekera75e7d132017-09-20 08:26:30 +02001378}
1379
Klement Sekera4c533132018-02-22 11:41:12 +01001380static void
Klement Sekera896c8962019-06-24 11:52:49 +00001381ip4_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1382 u32 max_reassembly_length,
1383 u32 expire_walk_interval_ms)
Klement Sekera4c533132018-02-22 11:41:12 +01001384{
Klement Sekera896c8962019-06-24 11:52:49 +00001385 ip4_full_reass_main.timeout_ms = timeout_ms;
1386 ip4_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1387 ip4_full_reass_main.max_reass_n = max_reassemblies;
1388 ip4_full_reass_main.max_reass_len = max_reassembly_length;
1389 ip4_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
Klement Sekera4c533132018-02-22 11:41:12 +01001390}
1391
Klement Sekera75e7d132017-09-20 08:26:30 +02001392vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001393ip4_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1394 u32 max_reassembly_length, u32 expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001395{
Klement Sekera896c8962019-06-24 11:52:49 +00001396 u32 old_nbuckets = ip4_full_reass_get_nbuckets ();
1397 ip4_full_reass_set_params (timeout_ms, max_reassemblies,
1398 max_reassembly_length, expire_walk_interval_ms);
1399 vlib_process_signal_event (ip4_full_reass_main.vlib_main,
1400 ip4_full_reass_main.ip4_full_reass_expire_node_idx,
Klement Sekera75e7d132017-09-20 08:26:30 +02001401 IP4_EVENT_CONFIG_CHANGED, 0);
Klement Sekera896c8962019-06-24 11:52:49 +00001402 u32 new_nbuckets = ip4_full_reass_get_nbuckets ();
1403 if (ip4_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
Klement Sekera75e7d132017-09-20 08:26:30 +02001404 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001405 clib_bihash_16_8_t new_hash;
Dave Barachb7b92992018-10-17 10:38:51 -04001406 clib_memset (&new_hash, 0, sizeof (new_hash));
Klement Sekera75e7d132017-09-20 08:26:30 +02001407 ip4_rehash_cb_ctx ctx;
1408 ctx.failure = 0;
1409 ctx.new_hash = &new_hash;
Klement Sekera896c8962019-06-24 11:52:49 +00001410 clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
Klement Sekera75e7d132017-09-20 08:26:30 +02001411 new_nbuckets * 1024);
Klement Sekera896c8962019-06-24 11:52:49 +00001412 clib_bihash_foreach_key_value_pair_16_8 (&ip4_full_reass_main.hash,
Klement Sekera75e7d132017-09-20 08:26:30 +02001413 ip4_rehash_cb, &ctx);
1414 if (ctx.failure)
1415 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001416 clib_bihash_free_16_8 (&new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001417 return -1;
1418 }
1419 else
1420 {
Klement Sekera896c8962019-06-24 11:52:49 +00001421 clib_bihash_free_16_8 (&ip4_full_reass_main.hash);
1422 clib_memcpy_fast (&ip4_full_reass_main.hash, &new_hash,
1423 sizeof (ip4_full_reass_main.hash));
1424 clib_bihash_copied (&ip4_full_reass_main.hash, &new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001425 }
1426 }
1427 return 0;
1428}
1429
1430vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001431ip4_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1432 u32 * max_reassembly_length,
1433 u32 * expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001434{
Klement Sekera896c8962019-06-24 11:52:49 +00001435 *timeout_ms = ip4_full_reass_main.timeout_ms;
1436 *max_reassemblies = ip4_full_reass_main.max_reass_n;
1437 *max_reassembly_length = ip4_full_reass_main.max_reass_len;
1438 *expire_walk_interval_ms = ip4_full_reass_main.expire_walk_interval_ms;
Klement Sekera75e7d132017-09-20 08:26:30 +02001439 return 0;
1440}
1441
Klement Sekera4c533132018-02-22 11:41:12 +01001442static clib_error_t *
Klement Sekera896c8962019-06-24 11:52:49 +00001443ip4_full_reass_init_function (vlib_main_t * vm)
Klement Sekera75e7d132017-09-20 08:26:30 +02001444{
Klement Sekera896c8962019-06-24 11:52:49 +00001445 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001446 clib_error_t *error = 0;
1447 u32 nbuckets;
Dave Barach1403fcd2018-02-05 09:45:43 -05001448 vlib_node_t *node;
Klement Sekera75e7d132017-09-20 08:26:30 +02001449
1450 rm->vlib_main = vm;
Klement Sekera75e7d132017-09-20 08:26:30 +02001451
Juraj Slobodacd806922018-10-10 10:15:54 +02001452 vec_validate (rm->per_thread_data, vlib_num_workers ());
Klement Sekera896c8962019-06-24 11:52:49 +00001453 ip4_full_reass_per_thread_t *rt;
Klement Sekera4c533132018-02-22 11:41:12 +01001454 vec_foreach (rt, rm->per_thread_data)
1455 {
1456 clib_spinlock_init (&rt->lock);
1457 pool_alloc (rt->pool, rm->max_reass_n);
1458 }
Dave Barach1403fcd2018-02-05 09:45:43 -05001459
Klement Sekera896c8962019-06-24 11:52:49 +00001460 node = vlib_get_node_by_name (vm, (u8 *) "ip4-full-reassembly-expire-walk");
Dave Barach1403fcd2018-02-05 09:45:43 -05001461 ASSERT (node);
Klement Sekera896c8962019-06-24 11:52:49 +00001462 rm->ip4_full_reass_expire_node_idx = node->index;
Dave Barach1403fcd2018-02-05 09:45:43 -05001463
Klement Sekera896c8962019-06-24 11:52:49 +00001464 ip4_full_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
1465 IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
1466 IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
1467 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
Klement Sekera3ecc2212018-03-27 10:34:43 +02001468
Klement Sekera896c8962019-06-24 11:52:49 +00001469 nbuckets = ip4_full_reass_get_nbuckets ();
1470 clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
Klement Sekera75e7d132017-09-20 08:26:30 +02001471
Dave Barach1403fcd2018-02-05 09:45:43 -05001472 node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
Klement Sekera75e7d132017-09-20 08:26:30 +02001473 ASSERT (node);
1474 rm->ip4_drop_idx = node->index;
Klement Sekera4c533132018-02-22 11:41:12 +01001475
Klement Sekera896c8962019-06-24 11:52:49 +00001476 rm->fq_index = vlib_frame_queue_main_init (ip4_full_reass_node.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001477 rm->fq_feature_index =
Klement Sekera896c8962019-06-24 11:52:49 +00001478 vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001479
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001480 rm->feature_use_refcount_per_intf = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +02001481 return error;
1482}
1483
Klement Sekera896c8962019-06-24 11:52:49 +00001484VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001485#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001486
1487static uword
Klement Sekera896c8962019-06-24 11:52:49 +00001488ip4_full_reass_walk_expired (vlib_main_t * vm,
1489 vlib_node_runtime_t * node, vlib_frame_t * f)
Klement Sekera75e7d132017-09-20 08:26:30 +02001490{
Klement Sekera896c8962019-06-24 11:52:49 +00001491 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001492 uword event_type, *event_data = 0;
1493
1494 while (true)
1495 {
1496 vlib_process_wait_for_event_or_clock (vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001497 (f64)
1498 rm->expire_walk_interval_ms /
1499 (f64) MSEC_PER_SEC);
Klement Sekera75e7d132017-09-20 08:26:30 +02001500 event_type = vlib_process_get_events (vm, &event_data);
1501
1502 switch (event_type)
1503 {
1504 case ~0: /* no events => timeout */
1505 /* nothing to do here */
1506 break;
1507 case IP4_EVENT_CONFIG_CHANGED:
1508 break;
1509 default:
1510 clib_warning ("BUG: event type 0x%wx", event_type);
1511 break;
1512 }
1513 f64 now = vlib_time_now (vm);
1514
Klement Sekera896c8962019-06-24 11:52:49 +00001515 ip4_full_reass_t *reass;
Klement Sekera75e7d132017-09-20 08:26:30 +02001516 int *pool_indexes_to_free = NULL;
1517
Klement Sekera4c533132018-02-22 11:41:12 +01001518 uword thread_index = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +02001519 int index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001520 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001521 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1522 {
Klement Sekera896c8962019-06-24 11:52:49 +00001523 ip4_full_reass_per_thread_t *rt =
1524 &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001525 clib_spinlock_lock (&rt->lock);
1526
1527 vec_reset_length (pool_indexes_to_free);
1528 /* *INDENT-OFF* */
1529 pool_foreach_index (index, rt->pool, ({
1530 reass = pool_elt_at_index (rt->pool, index);
1531 if (now > reass->last_heard + rm->timeout)
1532 {
1533 vec_add1 (pool_indexes_to_free, index);
1534 }
1535 }));
1536 /* *INDENT-ON* */
1537 int *i;
1538 /* *INDENT-OFF* */
1539 vec_foreach (i, pool_indexes_to_free)
1540 {
Klement Sekera896c8962019-06-24 11:52:49 +00001541 ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1542 ip4_full_reass_drop_all (vm, node, rm, reass);
1543 ip4_full_reass_free (rm, rt, reass);
Klement Sekera4c533132018-02-22 11:41:12 +01001544 }
1545 /* *INDENT-ON* */
1546
1547 clib_spinlock_unlock (&rt->lock);
1548 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001549
Klement Sekera75e7d132017-09-20 08:26:30 +02001550 vec_free (pool_indexes_to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +02001551 if (event_data)
1552 {
1553 _vec_len (event_data) = 0;
1554 }
1555 }
1556
1557 return 0;
1558}
1559
Klement Sekera75e7d132017-09-20 08:26:30 +02001560/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001561VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
1562 .function = ip4_full_reass_walk_expired,
Klement Sekera75e7d132017-09-20 08:26:30 +02001563 .type = VLIB_NODE_TYPE_PROCESS,
Klement Sekera896c8962019-06-24 11:52:49 +00001564 .name = "ip4-full-reassembly-expire-walk",
1565 .format_trace = format_ip4_full_reass_trace,
1566 .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
1567 .error_strings = ip4_full_reass_error_strings,
Klement Sekera75e7d132017-09-20 08:26:30 +02001568
1569};
1570/* *INDENT-ON* */
1571
1572static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001573format_ip4_full_reass_key (u8 * s, va_list * args)
Klement Sekera75e7d132017-09-20 08:26:30 +02001574{
Klement Sekera896c8962019-06-24 11:52:49 +00001575 ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *);
1576 s =
1577 format (s,
1578 "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1579 key->xx_id, format_ip4_address, &key->src, format_ip4_address,
1580 &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
Klement Sekera75e7d132017-09-20 08:26:30 +02001581 return s;
1582}
1583
1584static u8 *
1585format_ip4_reass (u8 * s, va_list * args)
1586{
1587 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001588 ip4_full_reass_t *reass = va_arg (*args, ip4_full_reass_t *);
Klement Sekera75e7d132017-09-20 08:26:30 +02001589
Klement Sekera4c533132018-02-22 11:41:12 +01001590 s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
Klement Sekera75e7d132017-09-20 08:26:30 +02001591 "last_packet_octet: %u, trace_op_counter: %u\n",
Klement Sekera896c8962019-06-24 11:52:49 +00001592 reass->id, format_ip4_full_reass_key, &reass->key,
1593 reass->first_bi, reass->data_len,
1594 reass->last_packet_octet, reass->trace_op_counter);
1595
Klement Sekera75e7d132017-09-20 08:26:30 +02001596 u32 bi = reass->first_bi;
1597 u32 counter = 0;
1598 while (~0 != bi)
1599 {
1600 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1601 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekera896c8962019-06-24 11:52:49 +00001602 s =
1603 format (s,
1604 " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1605 "fragment[%u, %u]\n", counter, vnb->ip.reass.range_first,
1606 vnb->ip.reass.range_last, bi,
1607 ip4_full_reass_buffer_get_data_offset (b),
1608 ip4_full_reass_buffer_get_data_len (b),
1609 vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
Klement Sekera75e7d132017-09-20 08:26:30 +02001610 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1611 {
1612 bi = b->next_buffer;
1613 }
1614 else
1615 {
1616 bi = ~0;
1617 }
1618 }
1619 return s;
1620}
1621
1622static clib_error_t *
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001623show_ip4_reass (vlib_main_t * vm,
1624 unformat_input_t * input,
Klement Sekera75e7d132017-09-20 08:26:30 +02001625 CLIB_UNUSED (vlib_cli_command_t * lmd))
1626{
Klement Sekera896c8962019-06-24 11:52:49 +00001627 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Klement Sekera75e7d132017-09-20 08:26:30 +02001628
1629 vlib_cli_output (vm, "---------------------");
1630 vlib_cli_output (vm, "IP4 reassembly status");
1631 vlib_cli_output (vm, "---------------------");
Klement Sekera4c533132018-02-22 11:41:12 +01001632 bool details = false;
Klement Sekera75e7d132017-09-20 08:26:30 +02001633 if (unformat (input, "details"))
1634 {
Klement Sekera4c533132018-02-22 11:41:12 +01001635 details = true;
1636 }
1637
1638 u32 sum_reass_n = 0;
Klement Sekera896c8962019-06-24 11:52:49 +00001639 ip4_full_reass_t *reass;
Klement Sekera4c533132018-02-22 11:41:12 +01001640 uword thread_index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001641 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001642 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1643 {
Klement Sekera896c8962019-06-24 11:52:49 +00001644 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001645 clib_spinlock_lock (&rt->lock);
1646 if (details)
1647 {
1648 /* *INDENT-OFF* */
1649 pool_foreach (reass, rt->pool, {
1650 vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
1651 });
1652 /* *INDENT-ON* */
1653 }
1654 sum_reass_n += rt->reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +01001655 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001656 }
1657 vlib_cli_output (vm, "---------------------");
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001658 vlib_cli_output (vm, "Current full IP4 reassemblies count: %lu\n",
Klement Sekera4c533132018-02-22 11:41:12 +01001659 (long unsigned) sum_reass_n);
Klement Sekera75e7d132017-09-20 08:26:30 +02001660 vlib_cli_output (vm,
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001661 "Maximum configured concurrent full IP4 reassemblies per worker-thread: %lu\n",
Klement Sekera75e7d132017-09-20 08:26:30 +02001662 (long unsigned) rm->max_reass_n);
Vladimir Ratnikova877cf92019-12-21 06:27:52 -05001663 vlib_cli_output (vm,
1664 "Maximum configured full IP4 reassembly timeout: %lums\n",
1665 (long unsigned) rm->timeout_ms);
1666 vlib_cli_output (vm,
1667 "Maximum configured full IP4 reassembly expire walk interval: %lums\n",
1668 (long unsigned) rm->expire_walk_interval_ms);
Klement Sekera75e7d132017-09-20 08:26:30 +02001669 return 0;
1670}
1671
1672/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001673VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
1674 .path = "show ip4-full-reassembly",
1675 .short_help = "show ip4-full-reassembly [details]",
Klement Sekera75e7d132017-09-20 08:26:30 +02001676 .function = show_ip4_reass,
1677};
1678/* *INDENT-ON* */
1679
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001680#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001681vnet_api_error_t
Klement Sekera896c8962019-06-24 11:52:49 +00001682ip4_full_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
Klement Sekera4c533132018-02-22 11:41:12 +01001683{
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001684 return vnet_feature_enable_disable ("ip4-unicast",
Klement Sekera896c8962019-06-24 11:52:49 +00001685 "ip4-full-reassembly-feature",
1686 sw_if_index, enable_disable, 0, 0);
Klement Sekera4c533132018-02-22 11:41:12 +01001687}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001688#endif /* CLIB_MARCH_VARIANT */
Klement Sekera4c533132018-02-22 11:41:12 +01001689
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001690
Klement Sekera896c8962019-06-24 11:52:49 +00001691#define foreach_ip4_full_reass_handoff_error \
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001692_(CONGESTION_DROP, "congestion drop")
1693
1694
1695typedef enum
1696{
Klement Sekera896c8962019-06-24 11:52:49 +00001697#define _(sym,str) IP4_FULL_REASS_HANDOFF_ERROR_##sym,
1698 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001699#undef _
Klement Sekera896c8962019-06-24 11:52:49 +00001700 IP4_FULL_REASS_HANDOFF_N_ERROR,
1701} ip4_full_reass_handoff_error_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001702
Klement Sekera896c8962019-06-24 11:52:49 +00001703static char *ip4_full_reass_handoff_error_strings[] = {
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001704#define _(sym,string) string,
Klement Sekera896c8962019-06-24 11:52:49 +00001705 foreach_ip4_full_reass_handoff_error
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001706#undef _
1707};
1708
1709typedef struct
1710{
1711 u32 next_worker_index;
Klement Sekera896c8962019-06-24 11:52:49 +00001712} ip4_full_reass_handoff_trace_t;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001713
1714static u8 *
Klement Sekera896c8962019-06-24 11:52:49 +00001715format_ip4_full_reass_handoff_trace (u8 * s, va_list * args)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001716{
1717 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1718 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
Klement Sekera896c8962019-06-24 11:52:49 +00001719 ip4_full_reass_handoff_trace_t *t =
1720 va_arg (*args, ip4_full_reass_handoff_trace_t *);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001721
1722 s =
Klement Sekera896c8962019-06-24 11:52:49 +00001723 format (s, "ip4-full-reassembly-handoff: next-worker %d",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001724 t->next_worker_index);
1725
1726 return s;
1727}
1728
1729always_inline uword
Klement Sekera896c8962019-06-24 11:52:49 +00001730ip4_full_reass_handoff_node_inline (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001731 vlib_node_runtime_t * node,
1732 vlib_frame_t * frame, bool is_feature)
1733{
Klement Sekera896c8962019-06-24 11:52:49 +00001734 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001735
1736 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1737 u32 n_enq, n_left_from, *from;
1738 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1739 u32 fq_index;
1740
1741 from = vlib_frame_vector_args (frame);
1742 n_left_from = frame->n_vectors;
1743 vlib_get_buffers (vm, from, bufs, n_left_from);
1744
1745 b = bufs;
1746 ti = thread_indices;
1747
1748 fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1749
1750 while (n_left_from > 0)
1751 {
Klement Sekerade34c352019-06-25 11:19:22 +00001752 ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001753
1754 if (PREDICT_FALSE
1755 ((node->flags & VLIB_NODE_FLAG_TRACE)
1756 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1757 {
Klement Sekera896c8962019-06-24 11:52:49 +00001758 ip4_full_reass_handoff_trace_t *t =
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001759 vlib_add_trace (vm, node, b[0], sizeof (*t));
1760 t->next_worker_index = ti[0];
1761 }
1762
1763 n_left_from -= 1;
1764 ti += 1;
1765 b += 1;
1766 }
1767 n_enq =
1768 vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1769 frame->n_vectors, 1);
1770
1771 if (n_enq < frame->n_vectors)
1772 vlib_node_increment_counter (vm, node->node_index,
Klement Sekera896c8962019-06-24 11:52:49 +00001773 IP4_FULL_REASS_HANDOFF_ERROR_CONGESTION_DROP,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001774 frame->n_vectors - n_enq);
1775 return frame->n_vectors;
1776}
1777
Klement Sekera896c8962019-06-24 11:52:49 +00001778VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001779 vlib_node_runtime_t * node,
1780 vlib_frame_t * frame)
1781{
Klement Sekera896c8962019-06-24 11:52:49 +00001782 return ip4_full_reass_handoff_node_inline (vm, node, frame,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001783 false /* is_feature */ );
1784}
1785
1786
1787/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001788VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
1789 .name = "ip4-full-reassembly-handoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001790 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001791 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1792 .error_strings = ip4_full_reass_handoff_error_strings,
1793 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001794
1795 .n_next_nodes = 1,
1796
1797 .next_nodes = {
1798 [0] = "error-drop",
1799 },
1800};
1801/* *INDENT-ON* */
1802
1803
1804/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001805VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001806 vlib_node_runtime_t *
1807 node,
1808 vlib_frame_t * frame)
1809{
Klement Sekera896c8962019-06-24 11:52:49 +00001810 return ip4_full_reass_handoff_node_inline (vm, node, frame,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001811 true /* is_feature */ );
1812}
1813/* *INDENT-ON* */
1814
1815
1816/* *INDENT-OFF* */
Klement Sekera896c8962019-06-24 11:52:49 +00001817VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
1818 .name = "ip4-full-reass-feature-hoff",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001819 .vector_size = sizeof (u32),
Klement Sekera896c8962019-06-24 11:52:49 +00001820 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
1821 .error_strings = ip4_full_reass_handoff_error_strings,
1822 .format_trace = format_ip4_full_reass_handoff_trace,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001823
1824 .n_next_nodes = 1,
1825
1826 .next_nodes = {
1827 [0] = "error-drop",
1828 },
1829};
1830/* *INDENT-ON* */
1831
Klement Sekera7b2e9fb2019-10-01 13:00:22 +00001832#ifndef CLIB_MARCH_VARIANT
1833int
1834ip4_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
1835{
1836 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1837 vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
1838 if (is_enable)
1839 {
1840 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1841 {
1842 ++rm->feature_use_refcount_per_intf[sw_if_index];
1843 return vnet_feature_enable_disable ("ip4-unicast",
1844 "ip4-full-reassembly-feature",
1845 sw_if_index, 1, 0, 0);
1846 }
1847 ++rm->feature_use_refcount_per_intf[sw_if_index];
1848 }
1849 else
1850 {
1851 --rm->feature_use_refcount_per_intf[sw_if_index];
1852 if (!rm->feature_use_refcount_per_intf[sw_if_index])
1853 return vnet_feature_enable_disable ("ip4-unicast",
1854 "ip4-full-reassembly-feature",
1855 sw_if_index, 0, 0, 0);
1856 }
1857 return -1;
1858}
1859#endif
1860
Klement Sekera75e7d132017-09-20 08:26:30 +02001861/*
1862 * fd.io coding-style-patch-verification: ON
1863 *
1864 * Local Variables:
1865 * eval: (c-set-style "gnu")
1866 * End:
1867 */