blob: 682cad965afcd672876524345e6c90712502126c [file] [log] [blame]
Klement Sekera75e7d132017-09-20 08:26:30 +02001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @file
18 * @brief IPv4 Reassembly.
19 *
20 * This file contains the source code for IPv4 reassembly.
21 */
22
23#include <vppinfra/vec.h>
24#include <vnet/vnet.h>
25#include <vnet/ip/ip.h>
Klement Sekera8dcfed52018-06-28 11:16:15 +020026#include <vppinfra/bihash_16_8.h>
Klement Sekera75e7d132017-09-20 08:26:30 +020027#include <vnet/ip/ip4_reassembly.h>
Klement Sekera630ab582019-07-19 09:14:19 +000028#include <stddef.h>
Klement Sekera75e7d132017-09-20 08:26:30 +020029
30#define MSEC_PER_SEC 1000
31#define IP4_REASS_TIMEOUT_DEFAULT_MS 100
32#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
Klement Sekera4c533132018-02-22 11:41:12 +010033#define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
Klement Sekera3a343d42019-05-16 14:35:46 +020034#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
Klement Sekera75e7d132017-09-20 08:26:30 +020035#define IP4_REASS_HT_LOAD_FACTOR (0.75)
36
37#define IP4_REASS_DEBUG_BUFFERS 0
38#if IP4_REASS_DEBUG_BUFFERS
39#define IP4_REASS_DEBUG_BUFFER(bi, what) \
40 do \
41 { \
42 u32 _bi = bi; \
43 printf (#what "buffer %u", _bi); \
44 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi); \
45 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT) \
46 { \
47 _bi = _b->next_buffer; \
48 printf ("[%u]", _bi); \
49 _b = vlib_get_buffer (vm, _bi); \
50 } \
51 printf ("\n"); \
52 fflush (stdout); \
53 } \
54 while (0)
55#else
56#define IP4_REASS_DEBUG_BUFFER(...)
57#endif
58
Klement Sekerad0f70a32018-12-14 17:24:13 +010059typedef enum
60{
61 IP4_REASS_RC_OK,
Klement Sekera3a343d42019-05-16 14:35:46 +020062 IP4_REASS_RC_TOO_MANY_FRAGMENTS,
Klement Sekerad0f70a32018-12-14 17:24:13 +010063 IP4_REASS_RC_INTERNAL_ERROR,
Klement Sekeraf883f6a2019-02-13 11:01:32 +010064 IP4_REASS_RC_NO_BUF,
Klement Sekera630ab582019-07-19 09:14:19 +000065 IP4_REASS_RC_HANDOFF,
Klement Sekerad0f70a32018-12-14 17:24:13 +010066} ip4_reass_rc_t;
Klement Sekera75e7d132017-09-20 08:26:30 +020067
68typedef struct
69{
70 union
71 {
72 struct
73 {
Klement Sekera75e7d132017-09-20 08:26:30 +020074 u32 xx_id;
75 ip4_address_t src;
76 ip4_address_t dst;
Klement Sekera8dcfed52018-06-28 11:16:15 +020077 u16 frag_id;
78 u8 proto;
79 u8 unused;
Klement Sekera75e7d132017-09-20 08:26:30 +020080 };
Klement Sekera8dcfed52018-06-28 11:16:15 +020081 u64 as_u64[2];
Klement Sekera75e7d132017-09-20 08:26:30 +020082 };
83} ip4_reass_key_t;
84
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080085typedef union
86{
87 struct
88 {
89 u32 reass_index;
Klement Sekera630ab582019-07-19 09:14:19 +000090 u32 memory_owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -080091 };
92 u64 as_u64;
93} ip4_reass_val_t;
94
95typedef union
96{
97 struct
98 {
99 ip4_reass_key_t k;
100 ip4_reass_val_t v;
101 };
102 clib_bihash_kv_16_8_t kv;
103} ip4_reass_kv_t;
104
Klement Sekera75e7d132017-09-20 08:26:30 +0200105always_inline u32
Klement Sekera75e7d132017-09-20 08:26:30 +0200106ip4_reass_buffer_get_data_offset (vlib_buffer_t * b)
107{
108 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100109 return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
Klement Sekera75e7d132017-09-20 08:26:30 +0200110}
111
112always_inline u16
113ip4_reass_buffer_get_data_len (vlib_buffer_t * b)
114{
115 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100116 return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
117 (vnb->ip.reass.fragment_first + ip4_reass_buffer_get_data_offset (b)) + 1;
Klement Sekera75e7d132017-09-20 08:26:30 +0200118}
119
120typedef struct
121{
122 // hash table key
123 ip4_reass_key_t key;
Klement Sekera75e7d132017-09-20 08:26:30 +0200124 // time when last packet was received
125 f64 last_heard;
126 // internal id of this reassembly
Klement Sekera4c533132018-02-22 11:41:12 +0100127 u64 id;
Klement Sekera75e7d132017-09-20 08:26:30 +0200128 // buffer index of first buffer in this reassembly context
129 u32 first_bi;
130 // last octet of packet, ~0 until fragment without more_fragments arrives
131 u32 last_packet_octet;
132 // length of data collected so far
133 u32 data_len;
134 // trace operation counter
135 u32 trace_op_counter;
Klement Sekera4c533132018-02-22 11:41:12 +0100136 // next index - used by non-feature node
Klement Sekera21aa8f12019-05-20 12:27:33 +0200137 u32 next_index;
Klement Sekerae8498652019-06-17 12:23:15 +0000138 // error next index - used by custom apps (~0 if not used)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200139 u32 error_next_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100140 // minimum fragment length for this reassembly - used to estimate MTU
141 u16 min_fragment_length;
Klement Sekera3a343d42019-05-16 14:35:46 +0200142 // number of fragments in this reassembly
143 u32 fragments_n;
Klement Sekera630ab582019-07-19 09:14:19 +0000144 // thread owning memory for this context (whose pool contains this ctx)
145 u32 memory_owner_thread_index;
146 // thread which received fragment with offset 0 and which sends out the
147 // completed reassembly
148 u32 sendout_thread_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200149} ip4_reass_t;
150
151typedef struct
152{
Klement Sekera630ab582019-07-19 09:14:19 +0000153 // pool of reassembly contexts
Klement Sekera4c533132018-02-22 11:41:12 +0100154 ip4_reass_t *pool;
155 u32 reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +0100156 u32 id_counter;
157 clib_spinlock_t lock;
158} ip4_reass_per_thread_t;
159
160typedef struct
161{
Klement Sekera75e7d132017-09-20 08:26:30 +0200162 // IPv4 config
163 u32 timeout_ms;
164 f64 timeout;
165 u32 expire_walk_interval_ms;
Klement Sekera3a343d42019-05-16 14:35:46 +0200166 // maximum number of fragments in one reassembly
167 u32 max_reass_len;
168 // maximum number of reassemblies
Klement Sekera75e7d132017-09-20 08:26:30 +0200169 u32 max_reass_n;
170
171 // IPv4 runtime
Klement Sekera8dcfed52018-06-28 11:16:15 +0200172 clib_bihash_16_8_t hash;
Klement Sekera4c533132018-02-22 11:41:12 +0100173 // per-thread data
174 ip4_reass_per_thread_t *per_thread_data;
Klement Sekera75e7d132017-09-20 08:26:30 +0200175
176 // convenience
177 vlib_main_t *vlib_main;
Klement Sekera75e7d132017-09-20 08:26:30 +0200178
179 // node index of ip4-drop node
180 u32 ip4_drop_idx;
181 u32 ip4_reass_expire_node_idx;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800182
183 /** Worker handoff */
184 u32 fq_index;
185 u32 fq_feature_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200186} ip4_reass_main_t;
187
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700188extern ip4_reass_main_t ip4_reass_main;
189
190#ifndef CLIB_MARCH_VARIANT
Klement Sekera75e7d132017-09-20 08:26:30 +0200191ip4_reass_main_t ip4_reass_main;
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700192#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +0200193
194typedef enum
195{
196 IP4_REASSEMBLY_NEXT_INPUT,
197 IP4_REASSEMBLY_NEXT_DROP,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800198 IP4_REASSEMBLY_NEXT_HANDOFF,
Klement Sekera75e7d132017-09-20 08:26:30 +0200199 IP4_REASSEMBLY_N_NEXT,
200} ip4_reass_next_t;
201
202typedef enum
203{
204 RANGE_NEW,
205 RANGE_SHRINK,
206 RANGE_DISCARD,
207 RANGE_OVERLAP,
208 FINALIZE,
Klement Sekera630ab582019-07-19 09:14:19 +0000209 HANDOFF,
Klement Sekera75e7d132017-09-20 08:26:30 +0200210} ip4_reass_trace_operation_e;
211
212typedef struct
213{
214 u16 range_first;
215 u16 range_last;
216 u32 range_bi;
217 i32 data_offset;
218 u32 data_len;
219 u32 first_bi;
220} ip4_reass_range_trace_t;
221
222typedef struct
223{
224 ip4_reass_trace_operation_e action;
Klement Sekera75e7d132017-09-20 08:26:30 +0200225 u32 reass_id;
226 ip4_reass_range_trace_t trace_range;
227 u32 size_diff;
228 u32 op_id;
Klement Sekera630ab582019-07-19 09:14:19 +0000229 u32 thread_id;
230 u32 thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200231 u32 fragment_first;
232 u32 fragment_last;
233 u32 total_data_len;
234} ip4_reass_trace_t;
235
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700236extern vlib_node_registration_t ip4_reass_node;
237extern vlib_node_registration_t ip4_reass_node_feature;
238
Klement Sekera4c533132018-02-22 11:41:12 +0100239static void
Klement Sekera75e7d132017-09-20 08:26:30 +0200240ip4_reass_trace_details (vlib_main_t * vm, u32 bi,
241 ip4_reass_range_trace_t * trace)
242{
243 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
244 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
245 trace->range_first = vnb->ip.reass.range_first;
246 trace->range_last = vnb->ip.reass.range_last;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100247 trace->data_offset = ip4_reass_buffer_get_data_offset (b);
248 trace->data_len = ip4_reass_buffer_get_data_len (b);
Klement Sekera75e7d132017-09-20 08:26:30 +0200249 trace->range_bi = bi;
250}
251
Klement Sekera4c533132018-02-22 11:41:12 +0100252static u8 *
Klement Sekera75e7d132017-09-20 08:26:30 +0200253format_ip4_reass_range_trace (u8 * s, va_list * args)
254{
255 ip4_reass_range_trace_t *trace = va_arg (*args, ip4_reass_range_trace_t *);
256 s = format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
257 trace->range_last, trace->data_offset, trace->data_len,
258 trace->range_bi);
259 return s;
260}
261
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700262static u8 *
Klement Sekera75e7d132017-09-20 08:26:30 +0200263format_ip4_reass_trace (u8 * s, va_list * args)
264{
265 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
266 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
267 ip4_reass_trace_t *t = va_arg (*args, ip4_reass_trace_t *);
Klement Sekera630ab582019-07-19 09:14:19 +0000268 u32 indent = 0;
269 if (~0 != t->reass_id)
270 {
271 s = format (s, "reass id: %u, op id: %u, ", t->reass_id, t->op_id);
272 indent = format_get_indent (s);
273 s =
274 format (s,
275 "first bi: %u, data len: %u, ip/fragment[%u, %u]",
276 t->trace_range.first_bi, t->total_data_len, t->fragment_first,
277 t->fragment_last);
278 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200279 switch (t->action)
280 {
281 case RANGE_SHRINK:
282 s = format (s, "\n%Ushrink %U by %u", format_white_space, indent,
283 format_ip4_reass_range_trace, &t->trace_range,
284 t->size_diff);
285 break;
286 case RANGE_DISCARD:
287 s = format (s, "\n%Udiscard %U", format_white_space, indent,
288 format_ip4_reass_range_trace, &t->trace_range);
289 break;
290 case RANGE_NEW:
291 s = format (s, "\n%Unew %U", format_white_space, indent,
292 format_ip4_reass_range_trace, &t->trace_range);
293 break;
294 case RANGE_OVERLAP:
295 s = format (s, "\n%Uoverlapping/ignored %U", format_white_space, indent,
296 format_ip4_reass_range_trace, &t->trace_range);
297 break;
298 case FINALIZE:
299 s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
300 break;
Klement Sekera630ab582019-07-19 09:14:19 +0000301 case HANDOFF:
302 s =
303 format (s, "handoff from thread #%u to thread #%u", t->thread_id,
304 t->thread_id_to);
305 break;
Klement Sekera75e7d132017-09-20 08:26:30 +0200306 }
307 return s;
308}
309
Klement Sekera4c533132018-02-22 11:41:12 +0100310static void
Klement Sekera75e7d132017-09-20 08:26:30 +0200311ip4_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekera630ab582019-07-19 09:14:19 +0000312 ip4_reass_main_t * rm, u32 reass_id, u32 op_id,
313 u32 bi, u32 first_bi, u32 data_len,
314 ip4_reass_trace_operation_e action, u32 size_diff,
315 u32 thread_id_to)
Klement Sekera75e7d132017-09-20 08:26:30 +0200316{
317 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
318 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
319 ip4_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
Klement Sekera630ab582019-07-19 09:14:19 +0000320 t->reass_id = reass_id;
Klement Sekera75e7d132017-09-20 08:26:30 +0200321 t->action = action;
322 ip4_reass_trace_details (vm, bi, &t->trace_range);
323 t->size_diff = size_diff;
Klement Sekera630ab582019-07-19 09:14:19 +0000324 t->op_id = op_id;
325 t->thread_id = vm->thread_index;
326 t->thread_id_to = thread_id_to;
Klement Sekera75e7d132017-09-20 08:26:30 +0200327 t->fragment_first = vnb->ip.reass.fragment_first;
328 t->fragment_last = vnb->ip.reass.fragment_last;
Klement Sekera630ab582019-07-19 09:14:19 +0000329 t->trace_range.first_bi = first_bi;
330 t->total_data_len = data_len;
Klement Sekera75e7d132017-09-20 08:26:30 +0200331#if 0
332 static u8 *s = NULL;
333 s = format (s, "%U", format_ip4_reass_trace, NULL, NULL, t);
334 printf ("%.*s\n", vec_len (s), s);
335 fflush (stdout);
336 vec_reset_length (s);
337#endif
338}
339
Klement Sekera630ab582019-07-19 09:14:19 +0000340always_inline void
341ip4_reass_free_ctx (ip4_reass_per_thread_t * rt, ip4_reass_t * reass)
342{
343 pool_put (rt->pool, reass);
344 --rt->reass_n;
345}
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800346
Klement Sekera4c533132018-02-22 11:41:12 +0100347always_inline void
Klement Sekera630ab582019-07-19 09:14:19 +0000348ip4_reass_free (vlib_main_t * vm, ip4_reass_main_t * rm,
349 ip4_reass_per_thread_t * rt, ip4_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200350{
Klement Sekera8dcfed52018-06-28 11:16:15 +0200351 clib_bihash_kv_16_8_t kv;
Klement Sekera75e7d132017-09-20 08:26:30 +0200352 kv.key[0] = reass->key.as_u64[0];
353 kv.key[1] = reass->key.as_u64[1];
Klement Sekera8dcfed52018-06-28 11:16:15 +0200354 clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
Klement Sekera630ab582019-07-19 09:14:19 +0000355 return ip4_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200356}
357
Klement Sekera4c533132018-02-22 11:41:12 +0100358always_inline void
Klement Sekera21aa8f12019-05-20 12:27:33 +0200359ip4_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekerae8498652019-06-17 12:23:15 +0000360 ip4_reass_main_t * rm, ip4_reass_t * reass)
Klement Sekera75e7d132017-09-20 08:26:30 +0200361{
362 u32 range_bi = reass->first_bi;
363 vlib_buffer_t *range_b;
364 vnet_buffer_opaque_t *range_vnb;
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100365 u32 *to_free = NULL;
Klement Sekera75e7d132017-09-20 08:26:30 +0200366 while (~0 != range_bi)
367 {
368 range_b = vlib_get_buffer (vm, range_bi);
369 range_vnb = vnet_buffer (range_b);
370 u32 bi = range_bi;
371 while (~0 != bi)
372 {
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100373 vec_add1 (to_free, bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200374 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
375 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
376 {
377 bi = b->next_buffer;
378 b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
379 }
380 else
381 {
382 bi = ~0;
383 }
384 }
385 range_bi = range_vnb->ip.reass.next_range_bi;
386 }
Klement Sekera21aa8f12019-05-20 12:27:33 +0200387 /* send to next_error_index */
Klement Sekerae8498652019-06-17 12:23:15 +0000388 if (~0 != reass->error_next_index)
Klement Sekera21aa8f12019-05-20 12:27:33 +0200389 {
390 u32 n_left_to_next, *to_next, next_index;
391
392 next_index = reass->error_next_index;
393 u32 bi = ~0;
394
395 while (vec_len (to_free) > 0)
396 {
397 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
398
399 while (vec_len (to_free) > 0 && n_left_to_next > 0)
400 {
401 bi = vec_pop (to_free);
402
403 if (~0 != bi)
404 {
405 to_next[0] = bi;
406 to_next += 1;
407 n_left_to_next -= 1;
Klement Sekera21aa8f12019-05-20 12:27:33 +0200408 }
409 }
410 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
411 }
412 }
413 else
414 {
415 vlib_buffer_free (vm, to_free, vec_len (to_free));
416 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200417}
418
Filip Tehlar26ea14e2019-03-11 05:30:21 -0700419static ip4_reass_t *
Klement Sekera21aa8f12019-05-20 12:27:33 +0200420ip4_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
421 ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
Klement Sekerae8498652019-06-17 12:23:15 +0000422 ip4_reass_kv_t * kv, u8 * do_handoff)
Klement Sekera75e7d132017-09-20 08:26:30 +0200423{
Klement Sekera630ab582019-07-19 09:14:19 +0000424 ip4_reass_t *reass;
425 f64 now;
Klement Sekera75e7d132017-09-20 08:26:30 +0200426
Klement Sekera630ab582019-07-19 09:14:19 +0000427again:
428
429 reass = NULL;
430 now = vlib_time_now (vm);
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800431 if (!clib_bihash_search_16_8
432 (&rm->hash, (clib_bihash_kv_16_8_t *) kv, (clib_bihash_kv_16_8_t *) kv))
Klement Sekera75e7d132017-09-20 08:26:30 +0200433 {
Klement Sekera630ab582019-07-19 09:14:19 +0000434 reass =
435 pool_elt_at_index (rm->per_thread_data
436 [kv->v.memory_owner_thread_index].pool,
437 kv->v.reass_index);
438 if (vm->thread_index != reass->memory_owner_thread_index)
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800439 {
440 *do_handoff = 1;
Klement Sekera630ab582019-07-19 09:14:19 +0000441 return reass;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800442 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800443
Klement Sekera75e7d132017-09-20 08:26:30 +0200444 if (now > reass->last_heard + rm->timeout)
445 {
Klement Sekerae8498652019-06-17 12:23:15 +0000446 ip4_reass_drop_all (vm, node, rm, reass);
Klement Sekera630ab582019-07-19 09:14:19 +0000447 ip4_reass_free (vm, rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200448 reass = NULL;
449 }
450 }
451
452 if (reass)
453 {
454 reass->last_heard = now;
455 return reass;
456 }
457
Klement Sekera4c533132018-02-22 11:41:12 +0100458 if (rt->reass_n >= rm->max_reass_n)
Klement Sekera75e7d132017-09-20 08:26:30 +0200459 {
460 reass = NULL;
461 return reass;
462 }
463 else
464 {
Klement Sekera4c533132018-02-22 11:41:12 +0100465 pool_get (rt->pool, reass);
Dave Barachb7b92992018-10-17 10:38:51 -0400466 clib_memset (reass, 0, sizeof (*reass));
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800467 reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
Klement Sekera630ab582019-07-19 09:14:19 +0000468 reass->memory_owner_thread_index = vm->thread_index;
Klement Sekera4c533132018-02-22 11:41:12 +0100469 ++rt->id_counter;
Klement Sekera75e7d132017-09-20 08:26:30 +0200470 reass->first_bi = ~0;
471 reass->last_packet_octet = ~0;
472 reass->data_len = 0;
Klement Sekerae8498652019-06-17 12:23:15 +0000473 reass->next_index = ~0;
474 reass->error_next_index = ~0;
Klement Sekera4c533132018-02-22 11:41:12 +0100475 ++rt->reass_n;
Klement Sekera75e7d132017-09-20 08:26:30 +0200476 }
477
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -0800478 reass->key.as_u64[0] = ((clib_bihash_kv_16_8_t *) kv)->key[0];
479 reass->key.as_u64[1] = ((clib_bihash_kv_16_8_t *) kv)->key[1];
480 kv->v.reass_index = (reass - rt->pool);
Klement Sekera630ab582019-07-19 09:14:19 +0000481 kv->v.memory_owner_thread_index = vm->thread_index;
Klement Sekera75e7d132017-09-20 08:26:30 +0200482 reass->last_heard = now;
483
Klement Sekera630ab582019-07-19 09:14:19 +0000484 int rv =
485 clib_bihash_add_del_16_8 (&rm->hash, (clib_bihash_kv_16_8_t *) kv, 2);
486 if (rv)
Klement Sekera75e7d132017-09-20 08:26:30 +0200487 {
Klement Sekera630ab582019-07-19 09:14:19 +0000488 ip4_reass_free_ctx (rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200489 reass = NULL;
Klement Sekera630ab582019-07-19 09:14:19 +0000490 // if other worker created a context already work with the other copy
491 if (-2 == rv)
492 goto again;
Klement Sekera75e7d132017-09-20 08:26:30 +0200493 }
494
495 return reass;
496}
497
Klement Sekerad0f70a32018-12-14 17:24:13 +0100498always_inline ip4_reass_rc_t
Klement Sekera75e7d132017-09-20 08:26:30 +0200499ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekera4c533132018-02-22 11:41:12 +0100500 ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100501 ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
Klement Sekerae8498652019-06-17 12:23:15 +0000502 bool is_custom_app)
Klement Sekera75e7d132017-09-20 08:26:30 +0200503{
Klement Sekera75e7d132017-09-20 08:26:30 +0200504 vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
505 vlib_buffer_t *last_b = NULL;
506 u32 sub_chain_bi = reass->first_bi;
507 u32 total_length = 0;
508 u32 buf_cnt = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200509 do
510 {
511 u32 tmp_bi = sub_chain_bi;
512 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
513 ip4_header_t *ip = vlib_buffer_get_current (tmp);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100514 vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
515 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
516 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
517 {
518 return IP4_REASS_RC_INTERNAL_ERROR;
519 }
520
Klement Sekera75e7d132017-09-20 08:26:30 +0200521 u32 data_len = ip4_reass_buffer_get_data_len (tmp);
522 u32 trim_front =
523 ip4_header_bytes (ip) + ip4_reass_buffer_get_data_offset (tmp);
524 u32 trim_end =
525 vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
526 if (tmp_bi == reass->first_bi)
527 {
528 /* first buffer - keep ip4 header */
Klement Sekerad0f70a32018-12-14 17:24:13 +0100529 if (0 != ip4_reass_buffer_get_data_offset (tmp))
530 {
531 return IP4_REASS_RC_INTERNAL_ERROR;
532 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200533 trim_front = 0;
534 trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
535 ip4_header_bytes (ip);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100536 if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
537 {
538 return IP4_REASS_RC_INTERNAL_ERROR;
539 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200540 }
541 u32 keep_data =
542 vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
543 while (1)
544 {
545 ++buf_cnt;
546 if (trim_front)
547 {
548 if (trim_front > tmp->current_length)
549 {
550 /* drop whole buffer */
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200551 u32 to_be_freed_bi = tmp_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200552 trim_front -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100553 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
554 {
555 return IP4_REASS_RC_INTERNAL_ERROR;
556 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200557 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
558 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700559 tmp->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200560 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200561 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200562 continue;
563 }
564 else
565 {
566 vlib_buffer_advance (tmp, trim_front);
567 trim_front = 0;
568 }
569 }
570 if (keep_data)
571 {
572 if (last_b)
573 {
574 last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
575 last_b->next_buffer = tmp_bi;
576 }
577 last_b = tmp;
578 if (keep_data <= tmp->current_length)
579 {
580 tmp->current_length = keep_data;
581 keep_data = 0;
582 }
583 else
584 {
585 keep_data -= tmp->current_length;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100586 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
587 {
588 return IP4_REASS_RC_INTERNAL_ERROR;
589 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200590 }
591 total_length += tmp->current_length;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200592 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
593 {
594 tmp_bi = tmp->next_buffer;
595 tmp = vlib_get_buffer (vm, tmp->next_buffer);
596 }
597 else
598 {
599 break;
600 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200601 }
602 else
603 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200604 u32 to_be_freed_bi = tmp_bi;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100605 if (reass->first_bi == tmp_bi)
606 {
607 return IP4_REASS_RC_INTERNAL_ERROR;
608 }
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200609 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
610 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700611 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200612 tmp_bi = tmp->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700613 tmp->next_buffer = 0;
614 tmp = vlib_get_buffer (vm, tmp_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200615 vlib_buffer_free_one (vm, to_be_freed_bi);
616 }
617 else
618 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700619 tmp->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200620 vlib_buffer_free_one (vm, to_be_freed_bi);
621 break;
622 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200623 }
624 }
625 sub_chain_bi =
626 vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
627 reass.next_range_bi;
628 }
629 while (~0 != sub_chain_bi);
Chris Luke30684ac2018-03-29 12:56:58 -0700630
Klement Sekerad0f70a32018-12-14 17:24:13 +0100631 if (!last_b)
632 {
633 return IP4_REASS_RC_INTERNAL_ERROR;
634 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200635 last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700636
Klement Sekerad0f70a32018-12-14 17:24:13 +0100637 if (total_length < first_b->current_length)
638 {
639 return IP4_REASS_RC_INTERNAL_ERROR;
640 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200641 total_length -= first_b->current_length;
642 first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
643 first_b->total_length_not_including_first_buffer = total_length;
644 ip4_header_t *ip = vlib_buffer_get_current (first_b);
645 ip->flags_and_fragment_offset = 0;
646 ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
647 ip->checksum = ip4_header_checksum (ip);
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100648 if (!vlib_buffer_chain_linearize (vm, first_b))
649 {
650 return IP4_REASS_RC_NO_BUF;
651 }
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700652 // reset to reconstruct the mbuf linking
653 first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
Klement Sekera75e7d132017-09-20 08:26:30 +0200654 if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
655 {
Klement Sekera630ab582019-07-19 09:14:19 +0000656 ip4_reass_add_trace (vm, node, rm, reass->id, reass->trace_op_counter,
657 reass->first_bi, reass->first_bi, reass->data_len,
658 FINALIZE, 0, ~0);
659 ++reass->trace_op_counter;
Klement Sekera75e7d132017-09-20 08:26:30 +0200660#if 0
661 // following code does a hexdump of packet fragments to stdout ...
662 do
663 {
664 u32 bi = reass->first_bi;
665 u8 *s = NULL;
666 while (~0 != bi)
667 {
668 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
669 s = format (s, "%u: %U\n", bi, format_hexdump,
670 vlib_buffer_get_current (b), b->current_length);
671 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
672 {
673 bi = b->next_buffer;
674 }
675 else
676 {
677 break;
678 }
679 }
680 printf ("%.*s\n", vec_len (s), s);
681 fflush (stdout);
682 vec_free (s);
683 }
684 while (0);
685#endif
686 }
687 *bi0 = reass->first_bi;
Klement Sekerae8498652019-06-17 12:23:15 +0000688 if (!is_custom_app)
Klement Sekera4c533132018-02-22 11:41:12 +0100689 {
690 *next0 = IP4_REASSEMBLY_NEXT_INPUT;
691 }
692 else
693 {
694 *next0 = reass->next_index;
695 }
696 vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
Klement Sekera75e7d132017-09-20 08:26:30 +0200697 *error0 = IP4_ERROR_NONE;
Klement Sekera630ab582019-07-19 09:14:19 +0000698 ip4_reass_free (vm, rm, rt, reass);
Klement Sekera75e7d132017-09-20 08:26:30 +0200699 reass = NULL;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100700 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200701}
702
Klement Sekerad0f70a32018-12-14 17:24:13 +0100703always_inline ip4_reass_rc_t
Klement Sekera75e7d132017-09-20 08:26:30 +0200704ip4_reass_insert_range_in_chain (vlib_main_t * vm,
705 ip4_reass_main_t * rm,
Klement Sekera4c533132018-02-22 11:41:12 +0100706 ip4_reass_per_thread_t * rt,
Klement Sekera75e7d132017-09-20 08:26:30 +0200707 ip4_reass_t * reass,
708 u32 prev_range_bi, u32 new_next_bi)
709{
Klement Sekera75e7d132017-09-20 08:26:30 +0200710 vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
711 vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
712 if (~0 != prev_range_bi)
713 {
714 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
715 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
716 new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
717 prev_vnb->ip.reass.next_range_bi = new_next_bi;
718 }
719 else
720 {
721 if (~0 != reass->first_bi)
722 {
723 new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
724 }
725 reass->first_bi = new_next_bi;
726 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100727 vnet_buffer_opaque_t *vnb = vnet_buffer (new_next_b);
728 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
729 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
730 {
731 return IP4_REASS_RC_INTERNAL_ERROR;
732 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200733 reass->data_len += ip4_reass_buffer_get_data_len (new_next_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100734 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200735}
736
Klement Sekerad0f70a32018-12-14 17:24:13 +0100737always_inline ip4_reass_rc_t
Klement Sekera75e7d132017-09-20 08:26:30 +0200738ip4_reass_remove_range_from_chain (vlib_main_t * vm,
739 vlib_node_runtime_t * node,
740 ip4_reass_main_t * rm,
Klement Sekera75e7d132017-09-20 08:26:30 +0200741 ip4_reass_t * reass, u32 prev_range_bi,
742 u32 discard_bi)
743{
744 vlib_buffer_t *discard_b = vlib_get_buffer (vm, discard_bi);
745 vnet_buffer_opaque_t *discard_vnb = vnet_buffer (discard_b);
746 if (~0 != prev_range_bi)
747 {
748 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
749 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
Klement Sekerad0f70a32018-12-14 17:24:13 +0100750 if (!(prev_vnb->ip.reass.next_range_bi == discard_bi))
751 {
752 return IP4_REASS_RC_INTERNAL_ERROR;
753 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200754 prev_vnb->ip.reass.next_range_bi = discard_vnb->ip.reass.next_range_bi;
755 }
756 else
757 {
758 reass->first_bi = discard_vnb->ip.reass.next_range_bi;
759 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100760 vnet_buffer_opaque_t *vnb = vnet_buffer (discard_b);
761 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
762 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
763 {
764 return IP4_REASS_RC_INTERNAL_ERROR;
765 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200766 reass->data_len -= ip4_reass_buffer_get_data_len (discard_b);
767 while (1)
768 {
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200769 u32 to_be_freed_bi = discard_bi;
Klement Sekera75e7d132017-09-20 08:26:30 +0200770 if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
771 {
Klement Sekera630ab582019-07-19 09:14:19 +0000772 ip4_reass_add_trace (vm, node, rm, reass->id,
773 reass->trace_op_counter, discard_bi,
774 reass->first_bi, reass->data_len,
775 RANGE_DISCARD, 0, ~0);
776 ++reass->trace_op_counter;
Klement Sekera75e7d132017-09-20 08:26:30 +0200777 }
778 if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
779 {
780 discard_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
781 discard_bi = discard_b->next_buffer;
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700782 discard_b->next_buffer = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +0200783 discard_b = vlib_get_buffer (vm, discard_bi);
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200784 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200785 }
786 else
787 {
Vijayabhaskar Katamreddy90556d62019-05-23 13:02:28 -0700788 discard_b->next_buffer = 0;
Klement Sekeraf369e3a2019-04-30 13:01:08 +0200789 vlib_buffer_free_one (vm, to_be_freed_bi);
Klement Sekera75e7d132017-09-20 08:26:30 +0200790 break;
791 }
792 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100793 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200794}
795
Klement Sekerad0f70a32018-12-14 17:24:13 +0100796always_inline ip4_reass_rc_t
Klement Sekera75e7d132017-09-20 08:26:30 +0200797ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
Klement Sekera4c533132018-02-22 11:41:12 +0100798 ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100799 ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
Klement Sekera630ab582019-07-19 09:14:19 +0000800 bool is_custom_app, u32 * handoff_thread_idx)
Klement Sekera75e7d132017-09-20 08:26:30 +0200801{
Klement Sekerad0f70a32018-12-14 17:24:13 +0100802 ip4_reass_rc_t rc = IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200803 int consumed = 0;
804 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
805 ip4_header_t *fip = vlib_buffer_get_current (fb);
Klement Sekera75e7d132017-09-20 08:26:30 +0200806 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
Klement Sekerae8498652019-06-17 12:23:15 +0000807 if (is_custom_app)
808 {
809 // store (error_)next_index before it's overwritten
810 reass->next_index = fvnb->ip.reass.next_index;
811 reass->error_next_index = fvnb->ip.reass.error_next_index;
812 }
Klement Sekera14d7e902018-12-10 13:46:09 +0100813 const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
814 const u32 fragment_length =
Klement Sekera75e7d132017-09-20 08:26:30 +0200815 clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
Klement Sekera14d7e902018-12-10 13:46:09 +0100816 const u32 fragment_last = fragment_first + fragment_length - 1;
817 fvnb->ip.reass.fragment_first = fragment_first;
818 fvnb->ip.reass.fragment_last = fragment_last;
Klement Sekera75e7d132017-09-20 08:26:30 +0200819 int more_fragments = ip4_get_fragment_more (fip);
820 u32 candidate_range_bi = reass->first_bi;
821 u32 prev_range_bi = ~0;
822 fvnb->ip.reass.range_first = fragment_first;
823 fvnb->ip.reass.range_last = fragment_last;
824 fvnb->ip.reass.next_range_bi = ~0;
825 if (!more_fragments)
826 {
827 reass->last_packet_octet = fragment_last;
828 }
829 if (~0 == reass->first_bi)
830 {
831 // starting a new reassembly
Klement Sekerad0f70a32018-12-14 17:24:13 +0100832 rc =
833 ip4_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
834 *bi0);
835 if (IP4_REASS_RC_OK != rc)
836 {
837 return rc;
838 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200839 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
840 {
Klement Sekera630ab582019-07-19 09:14:19 +0000841 ip4_reass_add_trace (vm, node, rm, reass->id,
842 reass->trace_op_counter, *bi0, reass->first_bi,
843 reass->data_len, RANGE_NEW, 0, ~0);
844 ++reass->trace_op_counter;
Klement Sekera75e7d132017-09-20 08:26:30 +0200845 }
846 *bi0 = ~0;
Klement Sekera4c533132018-02-22 11:41:12 +0100847 reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
Klement Sekera3a343d42019-05-16 14:35:46 +0200848 reass->fragments_n = 1;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100849 return IP4_REASS_RC_OK;
Klement Sekera75e7d132017-09-20 08:26:30 +0200850 }
Klement Sekera4c533132018-02-22 11:41:12 +0100851 reass->min_fragment_length = clib_min (clib_net_to_host_u16 (fip->length),
852 fvnb->ip.reass.estimated_mtu);
Klement Sekera75e7d132017-09-20 08:26:30 +0200853 while (~0 != candidate_range_bi)
854 {
855 vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
856 vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
857 if (fragment_first > candidate_vnb->ip.reass.range_last)
858 {
859 // this fragments starts after candidate range
860 prev_range_bi = candidate_range_bi;
861 candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
862 if (candidate_vnb->ip.reass.range_last < fragment_last &&
863 ~0 == candidate_range_bi)
864 {
865 // special case - this fragment falls beyond all known ranges
Klement Sekerad0f70a32018-12-14 17:24:13 +0100866 rc =
867 ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
868 prev_range_bi, *bi0);
869 if (IP4_REASS_RC_OK != rc)
870 {
871 return rc;
872 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200873 consumed = 1;
874 break;
875 }
876 continue;
877 }
878 if (fragment_last < candidate_vnb->ip.reass.range_first)
879 {
880 // this fragment ends before candidate range without any overlap
Klement Sekerad0f70a32018-12-14 17:24:13 +0100881 rc =
882 ip4_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
883 *bi0);
884 if (IP4_REASS_RC_OK != rc)
885 {
886 return rc;
887 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200888 consumed = 1;
889 }
890 else
891 {
892 if (fragment_first >= candidate_vnb->ip.reass.range_first &&
893 fragment_last <= candidate_vnb->ip.reass.range_last)
894 {
895 // this fragment is a (sub)part of existing range, ignore it
896 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
897 {
Klement Sekera630ab582019-07-19 09:14:19 +0000898 ip4_reass_add_trace (vm, node, rm, reass->id,
899 reass->trace_op_counter, *bi0,
900 reass->first_bi, reass->data_len,
901 RANGE_OVERLAP, 0, ~0);
902 ++reass->trace_op_counter;
Klement Sekera75e7d132017-09-20 08:26:30 +0200903 }
904 break;
905 }
906 int discard_candidate = 0;
907 if (fragment_first < candidate_vnb->ip.reass.range_first)
908 {
909 u32 overlap =
910 fragment_last - candidate_vnb->ip.reass.range_first + 1;
911 if (overlap < ip4_reass_buffer_get_data_len (candidate_b))
912 {
913 candidate_vnb->ip.reass.range_first += overlap;
Klement Sekerad0f70a32018-12-14 17:24:13 +0100914 if (reass->data_len < overlap)
915 {
916 return IP4_REASS_RC_INTERNAL_ERROR;
917 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200918 reass->data_len -= overlap;
919 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
920 {
Klement Sekera630ab582019-07-19 09:14:19 +0000921 ip4_reass_add_trace (vm, node, rm, reass->id,
922 reass->trace_op_counter,
923 candidate_range_bi,
924 reass->first_bi, reass->data_len,
925 RANGE_SHRINK, 0, ~0);
926 ++reass->trace_op_counter;
Klement Sekera75e7d132017-09-20 08:26:30 +0200927 }
Klement Sekerad0f70a32018-12-14 17:24:13 +0100928 rc =
929 ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
930 prev_range_bi, *bi0);
931 if (IP4_REASS_RC_OK != rc)
932 {
933 return rc;
934 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200935 consumed = 1;
936 }
937 else
938 {
939 discard_candidate = 1;
940 }
941 }
942 else if (fragment_last > candidate_vnb->ip.reass.range_last)
943 {
944 u32 overlap =
945 candidate_vnb->ip.reass.range_last - fragment_first + 1;
946 if (overlap < ip4_reass_buffer_get_data_len (candidate_b))
947 {
948 fvnb->ip.reass.range_first += overlap;
949 if (~0 != candidate_vnb->ip.reass.next_range_bi)
950 {
951 prev_range_bi = candidate_range_bi;
952 candidate_range_bi =
953 candidate_vnb->ip.reass.next_range_bi;
954 continue;
955 }
956 else
957 {
958 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +0100959 rc =
960 ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
961 candidate_range_bi,
962 *bi0);
963 if (IP4_REASS_RC_OK != rc)
964 {
965 return rc;
966 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200967 consumed = 1;
968 }
969 }
970 else
971 {
972 discard_candidate = 1;
973 }
974 }
975 else
976 {
977 discard_candidate = 1;
978 }
979 if (discard_candidate)
980 {
981 u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
982 // discard candidate range, probe next range
Klement Sekerad0f70a32018-12-14 17:24:13 +0100983 rc =
Klement Sekeraf883f6a2019-02-13 11:01:32 +0100984 ip4_reass_remove_range_from_chain (vm, node, rm, reass,
Klement Sekerad0f70a32018-12-14 17:24:13 +0100985 prev_range_bi,
986 candidate_range_bi);
987 if (IP4_REASS_RC_OK != rc)
988 {
989 return rc;
990 }
Klement Sekera75e7d132017-09-20 08:26:30 +0200991 if (~0 != next_range_bi)
992 {
993 candidate_range_bi = next_range_bi;
994 continue;
995 }
996 else
997 {
998 // special case - last range discarded
Klement Sekerad0f70a32018-12-14 17:24:13 +0100999 rc =
1000 ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
1001 prev_range_bi, *bi0);
1002 if (IP4_REASS_RC_OK != rc)
1003 {
1004 return rc;
1005 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001006 consumed = 1;
1007 }
1008 }
1009 }
1010 break;
1011 }
Klement Sekera3a343d42019-05-16 14:35:46 +02001012 ++reass->fragments_n;
Klement Sekera75e7d132017-09-20 08:26:30 +02001013 if (consumed)
1014 {
1015 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1016 {
Klement Sekera630ab582019-07-19 09:14:19 +00001017 ip4_reass_add_trace (vm, node, rm, reass->id,
1018 reass->trace_op_counter, *bi0, reass->first_bi,
1019 reass->data_len, RANGE_NEW, 0, ~0);
1020 ++reass->trace_op_counter;
Klement Sekera75e7d132017-09-20 08:26:30 +02001021 }
1022 }
1023 if (~0 != reass->last_packet_octet &&
1024 reass->data_len == reass->last_packet_octet + 1)
1025 {
Klement Sekera630ab582019-07-19 09:14:19 +00001026 *handoff_thread_idx = reass->sendout_thread_index;
1027 rc =
1028 ip4_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
1029 is_custom_app);
1030 if (IP4_REASS_RC_OK == rc
1031 && reass->memory_owner_thread_index != reass->sendout_thread_index)
1032 {
1033 rc = IP4_REASS_RC_HANDOFF;
1034 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001035 }
1036 else
1037 {
1038 if (consumed)
1039 {
1040 *bi0 = ~0;
Klement Sekera3a343d42019-05-16 14:35:46 +02001041 if (reass->fragments_n > rm->max_reass_len)
1042 {
1043 rc = IP4_REASS_RC_TOO_MANY_FRAGMENTS;
1044 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001045 }
1046 else
1047 {
Klement Sekera4c533132018-02-22 11:41:12 +01001048 *next0 = IP4_REASSEMBLY_NEXT_DROP;
Klement Sekera75e7d132017-09-20 08:26:30 +02001049 *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
1050 }
1051 }
Klement Sekerad0f70a32018-12-14 17:24:13 +01001052 return rc;
Klement Sekera75e7d132017-09-20 08:26:30 +02001053}
1054
1055always_inline uword
Klement Sekerae8498652019-06-17 12:23:15 +00001056ip4_reassembly_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1057 vlib_frame_t * frame, bool is_feature,
1058 bool is_custom_app)
Klement Sekera75e7d132017-09-20 08:26:30 +02001059{
1060 u32 *from = vlib_frame_vector_args (frame);
1061 u32 n_left_from, n_left_to_next, *to_next, next_index;
1062 ip4_reass_main_t *rm = &ip4_reass_main;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001063 ip4_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
Klement Sekera4c533132018-02-22 11:41:12 +01001064 clib_spinlock_lock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001065
1066 n_left_from = frame->n_vectors;
1067 next_index = node->cached_next_index;
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001068 while (n_left_from > 0)
Klement Sekera75e7d132017-09-20 08:26:30 +02001069 {
1070 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1071
Klement Sekera75e7d132017-09-20 08:26:30 +02001072 while (n_left_from > 0 && n_left_to_next > 0)
1073 {
1074 u32 bi0;
1075 vlib_buffer_t *b0;
Klement Sekera4c533132018-02-22 11:41:12 +01001076 u32 next0;
1077 u32 error0 = IP4_ERROR_NONE;
Klement Sekera75e7d132017-09-20 08:26:30 +02001078
1079 bi0 = from[0];
1080 b0 = vlib_get_buffer (vm, bi0);
1081
1082 ip4_header_t *ip0 = vlib_buffer_get_current (b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001083 if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
Klement Sekera75e7d132017-09-20 08:26:30 +02001084 {
Klement Sekera4c533132018-02-22 11:41:12 +01001085 // this is a whole packet - no fragmentation
Klement Sekerae8498652019-06-17 12:23:15 +00001086 if (!is_custom_app)
Klement Sekera4c533132018-02-22 11:41:12 +01001087 {
1088 next0 = IP4_REASSEMBLY_NEXT_INPUT;
1089 }
1090 else
1091 {
1092 next0 = vnet_buffer (b0)->ip.reass.next_index;
1093 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001094 }
1095 else
1096 {
Klement Sekera4ee633e2018-12-14 12:00:44 +01001097 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
Klement Sekera14d7e902018-12-10 13:46:09 +01001098 const u32 fragment_length =
Klement Sekera4ee633e2018-12-14 12:00:44 +01001099 clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
Klement Sekera14d7e902018-12-10 13:46:09 +01001100 const u32 fragment_last = fragment_first + fragment_length - 1;
Klement Sekera4ee633e2018-12-14 12:00:44 +01001101 if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
Klement Sekera4c533132018-02-22 11:41:12 +01001102 {
Klement Sekera14d7e902018-12-10 13:46:09 +01001103 next0 = IP4_REASSEMBLY_NEXT_DROP;
1104 error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
Klement Sekera4c533132018-02-22 11:41:12 +01001105 }
1106 else
1107 {
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001108 ip4_reass_kv_t kv;
1109 u8 do_handoff = 0;
1110
1111 kv.k.as_u64[0] =
1112 (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
1113 vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
Klement Sekerad0f70a32018-12-14 17:24:13 +01001114 (u64) ip0->src_address.as_u32 << 32;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001115 kv.k.as_u64[1] =
Klement Sekerad0f70a32018-12-14 17:24:13 +01001116 (u64) ip0->dst_address.as_u32 |
1117 (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
Klement Sekera14d7e902018-12-10 13:46:09 +01001118
1119 ip4_reass_t *reass =
Klement Sekera21aa8f12019-05-20 12:27:33 +02001120 ip4_reass_find_or_create (vm, node, rm, rt, &kv,
Klement Sekerae8498652019-06-17 12:23:15 +00001121 &do_handoff);
Klement Sekera630ab582019-07-19 09:14:19 +00001122 if (reass)
1123 {
1124 const u32 fragment_first =
1125 ip4_get_fragment_offset_bytes (ip0);
1126 if (0 == fragment_first)
1127 {
1128 reass->sendout_thread_index = vm->thread_index;
1129 }
1130 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001131 if (PREDICT_FALSE (do_handoff))
1132 {
1133 next0 = IP4_REASSEMBLY_NEXT_HANDOFF;
1134 if (is_feature)
1135 vnet_buffer (b0)->ip.
1136 reass.owner_feature_thread_index =
Klement Sekera630ab582019-07-19 09:14:19 +00001137 kv.v.memory_owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001138 else
1139 vnet_buffer (b0)->ip.reass.owner_thread_index =
Klement Sekera630ab582019-07-19 09:14:19 +00001140 kv.v.memory_owner_thread_index;
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001141 }
1142 else if (reass)
Klement Sekera14d7e902018-12-10 13:46:09 +01001143 {
Klement Sekera630ab582019-07-19 09:14:19 +00001144 u32 handoff_thread_idx;
Klement Sekerad0f70a32018-12-14 17:24:13 +01001145 switch (ip4_reass_update
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001146 (vm, node, rm, rt, reass, &bi0, &next0,
Klement Sekera630ab582019-07-19 09:14:19 +00001147 &error0, is_custom_app, &handoff_thread_idx))
Klement Sekerad0f70a32018-12-14 17:24:13 +01001148 {
1149 case IP4_REASS_RC_OK:
1150 /* nothing to do here */
1151 break;
Klement Sekera630ab582019-07-19 09:14:19 +00001152 case IP4_REASS_RC_HANDOFF:
1153 next0 = IP4_REASSEMBLY_NEXT_HANDOFF;
1154 b0 = vlib_get_buffer (vm, bi0);
1155 if (is_feature)
1156 vnet_buffer (b0)->ip.
1157 reass.owner_feature_thread_index =
1158 handoff_thread_idx;
1159 else
1160 vnet_buffer (b0)->ip.reass.owner_thread_index =
1161 handoff_thread_idx;
1162 break;
Klement Sekera3a343d42019-05-16 14:35:46 +02001163 case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
1164 vlib_node_increment_counter (vm, node->node_index,
1165 IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1166 1);
Klement Sekerae8498652019-06-17 12:23:15 +00001167 ip4_reass_drop_all (vm, node, rm, reass);
Klement Sekera630ab582019-07-19 09:14:19 +00001168 ip4_reass_free (vm, rm, rt, reass);
Klement Sekera3a343d42019-05-16 14:35:46 +02001169 goto next_packet;
1170 break;
Klement Sekeraf883f6a2019-02-13 11:01:32 +01001171 case IP4_REASS_RC_NO_BUF:
Klement Sekera3a343d42019-05-16 14:35:46 +02001172 vlib_node_increment_counter (vm, node->node_index,
1173 IP4_ERROR_REASS_NO_BUF,
1174 1);
Klement Sekerae8498652019-06-17 12:23:15 +00001175 ip4_reass_drop_all (vm, node, rm, reass);
Klement Sekera630ab582019-07-19 09:14:19 +00001176 ip4_reass_free (vm, rm, rt, reass);
Klement Sekera3a343d42019-05-16 14:35:46 +02001177 goto next_packet;
1178 break;
Klement Sekerad0f70a32018-12-14 17:24:13 +01001179 case IP4_REASS_RC_INTERNAL_ERROR:
Klement Sekera21aa8f12019-05-20 12:27:33 +02001180 /* drop everything and start with a clean slate */
Klement Sekera3a343d42019-05-16 14:35:46 +02001181 vlib_node_increment_counter (vm, node->node_index,
1182 IP4_ERROR_REASS_INTERNAL_ERROR,
1183 1);
Klement Sekerae8498652019-06-17 12:23:15 +00001184 ip4_reass_drop_all (vm, node, rm, reass);
Klement Sekera630ab582019-07-19 09:14:19 +00001185 ip4_reass_free (vm, rm, rt, reass);
Klement Sekerad0f70a32018-12-14 17:24:13 +01001186 goto next_packet;
1187 break;
1188 }
Klement Sekera14d7e902018-12-10 13:46:09 +01001189 }
1190 else
1191 {
1192 next0 = IP4_REASSEMBLY_NEXT_DROP;
1193 error0 = IP4_ERROR_REASS_LIMIT_REACHED;
1194 }
Klement Sekera4c533132018-02-22 11:41:12 +01001195 }
1196
1197 b0->error = node->errors[error0];
1198 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001199
1200 if (bi0 != ~0)
1201 {
1202 to_next[0] = bi0;
1203 to_next += 1;
1204 n_left_to_next -= 1;
Klement Sekera630ab582019-07-19 09:14:19 +00001205 if (next0 == IP4_REASSEMBLY_NEXT_HANDOFF)
1206 {
1207 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1208 {
1209 if (is_feature)
1210 ip4_reass_add_trace (vm, node, rm, ~0,
1211 ~0,
1212 bi0, ~0, ~0, HANDOFF, 0,
1213 vnet_buffer (b0)->ip.
1214 reass.owner_feature_thread_index);
1215 else
1216 ip4_reass_add_trace (vm, node, rm, ~0, ~0, bi0,
1217 ~0, ~0, HANDOFF, 0,
1218 vnet_buffer (b0)->ip.
1219 reass.owner_thread_index);
1220 }
1221 }
1222 else if (is_feature && IP4_ERROR_NONE == error0)
Klement Sekera4c533132018-02-22 11:41:12 +01001223 {
Kingwel Xiea0060652018-09-26 04:59:52 -04001224 b0 = vlib_get_buffer (vm, bi0);
Damjan Marion7d98a122018-07-19 20:42:08 +02001225 vnet_feature_next (&next0, b0);
Klement Sekera4c533132018-02-22 11:41:12 +01001226 }
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001227 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1228 to_next, n_left_to_next,
1229 bi0, next0);
Klement Sekera75e7d132017-09-20 08:26:30 +02001230 IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
1231 }
1232
Klement Sekerad0f70a32018-12-14 17:24:13 +01001233 next_packet:
Klement Sekera75e7d132017-09-20 08:26:30 +02001234 from += 1;
1235 n_left_from -= 1;
1236 }
1237
1238 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1239 }
1240
Klement Sekera4c533132018-02-22 11:41:12 +01001241 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001242 return frame->n_vectors;
1243}
1244
1245static char *ip4_reassembly_error_strings[] = {
1246#define _(sym, string) string,
1247 foreach_ip4_error
1248#undef _
1249};
1250
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001251VLIB_NODE_FN (ip4_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
1252 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001253{
Klement Sekerae8498652019-06-17 12:23:15 +00001254 return ip4_reassembly_inline (vm, node, frame, false /* is_feature */ ,
1255 false /* is_custom_app */ );
Klement Sekera4c533132018-02-22 11:41:12 +01001256}
1257
Klement Sekera75e7d132017-09-20 08:26:30 +02001258/* *INDENT-OFF* */
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001259VLIB_REGISTER_NODE (ip4_reass_node) = {
Klement Sekera75e7d132017-09-20 08:26:30 +02001260 .name = "ip4-reassembly",
1261 .vector_size = sizeof (u32),
1262 .format_trace = format_ip4_reass_trace,
1263 .n_errors = ARRAY_LEN (ip4_reassembly_error_strings),
1264 .error_strings = ip4_reassembly_error_strings,
1265 .n_next_nodes = IP4_REASSEMBLY_N_NEXT,
1266 .next_nodes =
1267 {
1268 [IP4_REASSEMBLY_NEXT_INPUT] = "ip4-input",
1269 [IP4_REASSEMBLY_NEXT_DROP] = "ip4-drop",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001270 [IP4_REASSEMBLY_NEXT_HANDOFF] = "ip4-reassembly-handoff",
1271
Klement Sekera75e7d132017-09-20 08:26:30 +02001272 },
1273};
1274/* *INDENT-ON* */
1275
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001276VLIB_NODE_FN (ip4_reass_node_feature) (vlib_main_t * vm,
1277 vlib_node_runtime_t * node,
1278 vlib_frame_t * frame)
Klement Sekera4c533132018-02-22 11:41:12 +01001279{
Klement Sekerae8498652019-06-17 12:23:15 +00001280 return ip4_reassembly_inline (vm, node, frame, true /* is_feature */ ,
1281 false /* is_custom_app */ );
Klement Sekera4c533132018-02-22 11:41:12 +01001282}
1283
1284/* *INDENT-OFF* */
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001285VLIB_REGISTER_NODE (ip4_reass_node_feature) = {
Klement Sekera4c533132018-02-22 11:41:12 +01001286 .name = "ip4-reassembly-feature",
1287 .vector_size = sizeof (u32),
1288 .format_trace = format_ip4_reass_trace,
1289 .n_errors = ARRAY_LEN (ip4_reassembly_error_strings),
1290 .error_strings = ip4_reassembly_error_strings,
1291 .n_next_nodes = IP4_REASSEMBLY_N_NEXT,
1292 .next_nodes =
1293 {
1294 [IP4_REASSEMBLY_NEXT_INPUT] = "ip4-input",
1295 [IP4_REASSEMBLY_NEXT_DROP] = "ip4-drop",
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001296 [IP4_REASSEMBLY_NEXT_HANDOFF] = "ip4-reass-feature-hoff",
Klement Sekera4c533132018-02-22 11:41:12 +01001297 },
1298};
1299/* *INDENT-ON* */
1300
Klement Sekera4c533132018-02-22 11:41:12 +01001301/* *INDENT-OFF* */
1302VNET_FEATURE_INIT (ip4_reassembly_feature, static) = {
1303 .arc_name = "ip4-unicast",
1304 .node_name = "ip4-reassembly-feature",
Neale Ranns14046982019-07-29 14:49:52 +00001305 .runs_before = VNET_FEATURES ("ip4-lookup",
Neale Ranns2be3eb62019-08-02 01:17:13 -07001306 "ipsec4-input-feature"),
Klement Sekera4c533132018-02-22 11:41:12 +01001307 .runs_after = 0,
1308};
1309/* *INDENT-ON* */
1310
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001311#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001312always_inline u32
1313ip4_reass_get_nbuckets ()
Klement Sekera75e7d132017-09-20 08:26:30 +02001314{
1315 ip4_reass_main_t *rm = &ip4_reass_main;
1316 u32 nbuckets;
1317 u8 i;
1318
1319 nbuckets = (u32) (rm->max_reass_n / IP4_REASS_HT_LOAD_FACTOR);
1320
1321 for (i = 0; i < 31; i++)
1322 if ((1 << i) >= nbuckets)
1323 break;
1324 nbuckets = 1 << i;
1325
1326 return nbuckets;
1327}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001328#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001329
1330typedef enum
1331{
1332 IP4_EVENT_CONFIG_CHANGED = 1,
1333} ip4_reass_event_t;
1334
1335typedef struct
1336{
1337 int failure;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001338 clib_bihash_16_8_t *new_hash;
Klement Sekera75e7d132017-09-20 08:26:30 +02001339} ip4_rehash_cb_ctx;
1340
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001341#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001342static void
Klement Sekera8dcfed52018-06-28 11:16:15 +02001343ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
Klement Sekera75e7d132017-09-20 08:26:30 +02001344{
1345 ip4_rehash_cb_ctx *ctx = _ctx;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001346 if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
Klement Sekera75e7d132017-09-20 08:26:30 +02001347 {
1348 ctx->failure = 1;
1349 }
1350}
1351
Klement Sekera4c533132018-02-22 11:41:12 +01001352static void
1353ip4_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
Klement Sekera3a343d42019-05-16 14:35:46 +02001354 u32 max_reassembly_length, u32 expire_walk_interval_ms)
Klement Sekera4c533132018-02-22 11:41:12 +01001355{
1356 ip4_reass_main.timeout_ms = timeout_ms;
1357 ip4_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1358 ip4_reass_main.max_reass_n = max_reassemblies;
Klement Sekera3a343d42019-05-16 14:35:46 +02001359 ip4_reass_main.max_reass_len = max_reassembly_length;
Klement Sekera4c533132018-02-22 11:41:12 +01001360 ip4_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
1361}
1362
Klement Sekera75e7d132017-09-20 08:26:30 +02001363vnet_api_error_t
1364ip4_reass_set (u32 timeout_ms, u32 max_reassemblies,
Klement Sekera3a343d42019-05-16 14:35:46 +02001365 u32 max_reassembly_length, u32 expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001366{
1367 u32 old_nbuckets = ip4_reass_get_nbuckets ();
Klement Sekera3a343d42019-05-16 14:35:46 +02001368 ip4_reass_set_params (timeout_ms, max_reassemblies, max_reassembly_length,
Klement Sekera4c533132018-02-22 11:41:12 +01001369 expire_walk_interval_ms);
Klement Sekera75e7d132017-09-20 08:26:30 +02001370 vlib_process_signal_event (ip4_reass_main.vlib_main,
1371 ip4_reass_main.ip4_reass_expire_node_idx,
1372 IP4_EVENT_CONFIG_CHANGED, 0);
1373 u32 new_nbuckets = ip4_reass_get_nbuckets ();
Klement Sekera3ecc2212018-03-27 10:34:43 +02001374 if (ip4_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
Klement Sekera75e7d132017-09-20 08:26:30 +02001375 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001376 clib_bihash_16_8_t new_hash;
Dave Barachb7b92992018-10-17 10:38:51 -04001377 clib_memset (&new_hash, 0, sizeof (new_hash));
Klement Sekera75e7d132017-09-20 08:26:30 +02001378 ip4_rehash_cb_ctx ctx;
1379 ctx.failure = 0;
1380 ctx.new_hash = &new_hash;
Klement Sekera8dcfed52018-06-28 11:16:15 +02001381 clib_bihash_init_16_8 (&new_hash, "ip4-reass", new_nbuckets,
Klement Sekera75e7d132017-09-20 08:26:30 +02001382 new_nbuckets * 1024);
Klement Sekera8dcfed52018-06-28 11:16:15 +02001383 clib_bihash_foreach_key_value_pair_16_8 (&ip4_reass_main.hash,
Klement Sekera75e7d132017-09-20 08:26:30 +02001384 ip4_rehash_cb, &ctx);
1385 if (ctx.failure)
1386 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001387 clib_bihash_free_16_8 (&new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001388 return -1;
1389 }
1390 else
1391 {
Klement Sekera8dcfed52018-06-28 11:16:15 +02001392 clib_bihash_free_16_8 (&ip4_reass_main.hash);
Dave Barach178cf492018-11-13 16:34:13 -05001393 clib_memcpy_fast (&ip4_reass_main.hash, &new_hash,
1394 sizeof (ip4_reass_main.hash));
Dave Barach32dcd3b2019-07-08 12:25:38 -04001395 clib_bihash_copied (&ip4_reass_main.hash, &new_hash);
Klement Sekera75e7d132017-09-20 08:26:30 +02001396 }
1397 }
1398 return 0;
1399}
1400
1401vnet_api_error_t
1402ip4_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
Klement Sekera3a343d42019-05-16 14:35:46 +02001403 u32 * max_reassembly_length, u32 * expire_walk_interval_ms)
Klement Sekera75e7d132017-09-20 08:26:30 +02001404{
1405 *timeout_ms = ip4_reass_main.timeout_ms;
1406 *max_reassemblies = ip4_reass_main.max_reass_n;
Klement Sekera3a343d42019-05-16 14:35:46 +02001407 *max_reassembly_length = ip4_reass_main.max_reass_len;
Klement Sekera75e7d132017-09-20 08:26:30 +02001408 *expire_walk_interval_ms = ip4_reass_main.expire_walk_interval_ms;
1409 return 0;
1410}
1411
Klement Sekera4c533132018-02-22 11:41:12 +01001412static clib_error_t *
Klement Sekera75e7d132017-09-20 08:26:30 +02001413ip4_reass_init_function (vlib_main_t * vm)
1414{
1415 ip4_reass_main_t *rm = &ip4_reass_main;
1416 clib_error_t *error = 0;
1417 u32 nbuckets;
Dave Barach1403fcd2018-02-05 09:45:43 -05001418 vlib_node_t *node;
Klement Sekera75e7d132017-09-20 08:26:30 +02001419
1420 rm->vlib_main = vm;
Klement Sekera75e7d132017-09-20 08:26:30 +02001421
Juraj Slobodacd806922018-10-10 10:15:54 +02001422 vec_validate (rm->per_thread_data, vlib_num_workers ());
Klement Sekera4c533132018-02-22 11:41:12 +01001423 ip4_reass_per_thread_t *rt;
1424 vec_foreach (rt, rm->per_thread_data)
1425 {
1426 clib_spinlock_init (&rt->lock);
1427 pool_alloc (rt->pool, rm->max_reass_n);
1428 }
Dave Barach1403fcd2018-02-05 09:45:43 -05001429
1430 node = vlib_get_node_by_name (vm, (u8 *) "ip4-reassembly-expire-walk");
1431 ASSERT (node);
1432 rm->ip4_reass_expire_node_idx = node->index;
1433
Klement Sekera3ecc2212018-03-27 10:34:43 +02001434 ip4_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
1435 IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
Klement Sekera3a343d42019-05-16 14:35:46 +02001436 IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
Klement Sekera3ecc2212018-03-27 10:34:43 +02001437 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
1438
Klement Sekera75e7d132017-09-20 08:26:30 +02001439 nbuckets = ip4_reass_get_nbuckets ();
Klement Sekera8dcfed52018-06-28 11:16:15 +02001440 clib_bihash_init_16_8 (&rm->hash, "ip4-reass", nbuckets, nbuckets * 1024);
Klement Sekera75e7d132017-09-20 08:26:30 +02001441
Dave Barach1403fcd2018-02-05 09:45:43 -05001442 node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
Klement Sekera75e7d132017-09-20 08:26:30 +02001443 ASSERT (node);
1444 rm->ip4_drop_idx = node->index;
Klement Sekera4c533132018-02-22 11:41:12 +01001445
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001446 rm->fq_index = vlib_frame_queue_main_init (ip4_reass_node.index, 0);
1447 rm->fq_feature_index =
1448 vlib_frame_queue_main_init (ip4_reass_node_feature.index, 0);
1449
Klement Sekera75e7d132017-09-20 08:26:30 +02001450 return error;
1451}
1452
1453VLIB_INIT_FUNCTION (ip4_reass_init_function);
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001454#endif /* CLIB_MARCH_VARIANT */
Klement Sekera75e7d132017-09-20 08:26:30 +02001455
1456static uword
1457ip4_reass_walk_expired (vlib_main_t * vm,
1458 vlib_node_runtime_t * node, vlib_frame_t * f)
1459{
1460 ip4_reass_main_t *rm = &ip4_reass_main;
1461 uword event_type, *event_data = 0;
1462
1463 while (true)
1464 {
1465 vlib_process_wait_for_event_or_clock (vm,
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001466 (f64)
1467 rm->expire_walk_interval_ms /
1468 (f64) MSEC_PER_SEC);
Klement Sekera75e7d132017-09-20 08:26:30 +02001469 event_type = vlib_process_get_events (vm, &event_data);
1470
1471 switch (event_type)
1472 {
1473 case ~0: /* no events => timeout */
1474 /* nothing to do here */
1475 break;
1476 case IP4_EVENT_CONFIG_CHANGED:
1477 break;
1478 default:
1479 clib_warning ("BUG: event type 0x%wx", event_type);
1480 break;
1481 }
1482 f64 now = vlib_time_now (vm);
1483
1484 ip4_reass_t *reass;
Klement Sekera75e7d132017-09-20 08:26:30 +02001485 int *pool_indexes_to_free = NULL;
1486
Klement Sekera4c533132018-02-22 11:41:12 +01001487 uword thread_index = 0;
Klement Sekera75e7d132017-09-20 08:26:30 +02001488 int index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001489 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001490 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1491 {
1492 ip4_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1493 clib_spinlock_lock (&rt->lock);
1494
1495 vec_reset_length (pool_indexes_to_free);
1496 /* *INDENT-OFF* */
1497 pool_foreach_index (index, rt->pool, ({
1498 reass = pool_elt_at_index (rt->pool, index);
1499 if (now > reass->last_heard + rm->timeout)
1500 {
1501 vec_add1 (pool_indexes_to_free, index);
1502 }
1503 }));
1504 /* *INDENT-ON* */
1505 int *i;
1506 /* *INDENT-OFF* */
1507 vec_foreach (i, pool_indexes_to_free)
1508 {
1509 ip4_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
Klement Sekerae8498652019-06-17 12:23:15 +00001510 ip4_reass_drop_all (vm, node, rm, reass);
Klement Sekera630ab582019-07-19 09:14:19 +00001511 ip4_reass_free (vm, rm, rt, reass);
Klement Sekera4c533132018-02-22 11:41:12 +01001512 }
1513 /* *INDENT-ON* */
1514
1515 clib_spinlock_unlock (&rt->lock);
1516 }
Klement Sekera75e7d132017-09-20 08:26:30 +02001517
Klement Sekera75e7d132017-09-20 08:26:30 +02001518 vec_free (pool_indexes_to_free);
Klement Sekera75e7d132017-09-20 08:26:30 +02001519 if (event_data)
1520 {
1521 _vec_len (event_data) = 0;
1522 }
1523 }
1524
1525 return 0;
1526}
1527
Klement Sekera75e7d132017-09-20 08:26:30 +02001528/* *INDENT-OFF* */
Damjan Mariond770cfc2019-09-02 19:00:33 +02001529VLIB_REGISTER_NODE (ip4_reass_expire_node) = {
Klement Sekera75e7d132017-09-20 08:26:30 +02001530 .function = ip4_reass_walk_expired,
1531 .type = VLIB_NODE_TYPE_PROCESS,
1532 .name = "ip4-reassembly-expire-walk",
1533 .format_trace = format_ip4_reass_trace,
1534 .n_errors = ARRAY_LEN (ip4_reassembly_error_strings),
1535 .error_strings = ip4_reassembly_error_strings,
1536
1537};
1538/* *INDENT-ON* */
1539
1540static u8 *
1541format_ip4_reass_key (u8 * s, va_list * args)
1542{
1543 ip4_reass_key_t *key = va_arg (*args, ip4_reass_key_t *);
1544 s = format (s, "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1545 key->xx_id, format_ip4_address, &key->src, format_ip4_address,
1546 &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
1547 return s;
1548}
1549
1550static u8 *
1551format_ip4_reass (u8 * s, va_list * args)
1552{
1553 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
1554 ip4_reass_t *reass = va_arg (*args, ip4_reass_t *);
1555
Klement Sekera4c533132018-02-22 11:41:12 +01001556 s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
Klement Sekera75e7d132017-09-20 08:26:30 +02001557 "last_packet_octet: %u, trace_op_counter: %u\n",
1558 reass->id, format_ip4_reass_key, &reass->key, reass->first_bi,
1559 reass->data_len, reass->last_packet_octet,
1560 reass->trace_op_counter);
1561 u32 bi = reass->first_bi;
1562 u32 counter = 0;
1563 while (~0 != bi)
1564 {
1565 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1566 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
1567 s = format (s, " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1568 "fragment[%u, %u]\n",
1569 counter, vnb->ip.reass.range_first,
1570 vnb->ip.reass.range_last, bi,
Klement Sekerad0f70a32018-12-14 17:24:13 +01001571 ip4_reass_buffer_get_data_offset (b),
1572 ip4_reass_buffer_get_data_len (b),
Klement Sekera75e7d132017-09-20 08:26:30 +02001573 vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
1574 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1575 {
1576 bi = b->next_buffer;
1577 }
1578 else
1579 {
1580 bi = ~0;
1581 }
1582 }
1583 return s;
1584}
1585
1586static clib_error_t *
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001587show_ip4_reass (vlib_main_t * vm,
1588 unformat_input_t * input,
Klement Sekera75e7d132017-09-20 08:26:30 +02001589 CLIB_UNUSED (vlib_cli_command_t * lmd))
1590{
1591 ip4_reass_main_t *rm = &ip4_reass_main;
1592
1593 vlib_cli_output (vm, "---------------------");
1594 vlib_cli_output (vm, "IP4 reassembly status");
1595 vlib_cli_output (vm, "---------------------");
Klement Sekera4c533132018-02-22 11:41:12 +01001596 bool details = false;
Klement Sekera75e7d132017-09-20 08:26:30 +02001597 if (unformat (input, "details"))
1598 {
Klement Sekera4c533132018-02-22 11:41:12 +01001599 details = true;
1600 }
1601
1602 u32 sum_reass_n = 0;
Klement Sekera4c533132018-02-22 11:41:12 +01001603 ip4_reass_t *reass;
1604 uword thread_index;
Juraj Slobodacd806922018-10-10 10:15:54 +02001605 const uword nthreads = vlib_num_workers () + 1;
Klement Sekera4c533132018-02-22 11:41:12 +01001606 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1607 {
1608 ip4_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1609 clib_spinlock_lock (&rt->lock);
1610 if (details)
1611 {
1612 /* *INDENT-OFF* */
1613 pool_foreach (reass, rt->pool, {
1614 vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
1615 });
1616 /* *INDENT-ON* */
1617 }
1618 sum_reass_n += rt->reass_n;
Klement Sekera4c533132018-02-22 11:41:12 +01001619 clib_spinlock_unlock (&rt->lock);
Klement Sekera75e7d132017-09-20 08:26:30 +02001620 }
1621 vlib_cli_output (vm, "---------------------");
Klement Sekera4c533132018-02-22 11:41:12 +01001622 vlib_cli_output (vm, "Current IP4 reassemblies count: %lu\n",
1623 (long unsigned) sum_reass_n);
Klement Sekera75e7d132017-09-20 08:26:30 +02001624 vlib_cli_output (vm,
Klement Sekera4c533132018-02-22 11:41:12 +01001625 "Maximum configured concurrent IP4 reassemblies per worker-thread: %lu\n",
Klement Sekera75e7d132017-09-20 08:26:30 +02001626 (long unsigned) rm->max_reass_n);
Klement Sekera75e7d132017-09-20 08:26:30 +02001627 return 0;
1628}
1629
1630/* *INDENT-OFF* */
1631VLIB_CLI_COMMAND (show_ip4_reassembly_cmd, static) = {
1632 .path = "show ip4-reassembly",
1633 .short_help = "show ip4-reassembly [details]",
1634 .function = show_ip4_reass,
1635};
1636/* *INDENT-ON* */
1637
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001638#ifndef CLIB_MARCH_VARIANT
Klement Sekera4c533132018-02-22 11:41:12 +01001639vnet_api_error_t
1640ip4_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
1641{
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001642 return vnet_feature_enable_disable ("ip4-unicast",
1643 "ip4-reassembly-feature", sw_if_index,
1644 enable_disable, 0, 0);
Klement Sekera4c533132018-02-22 11:41:12 +01001645}
Filip Tehlar26ea14e2019-03-11 05:30:21 -07001646#endif /* CLIB_MARCH_VARIANT */
Klement Sekera4c533132018-02-22 11:41:12 +01001647
Vijayabhaskar Katamreddy470a3702019-03-01 19:57:06 -08001648
1649#define foreach_ip4_reassembly_handoff_error \
1650_(CONGESTION_DROP, "congestion drop")
1651
1652
1653typedef enum
1654{
1655#define _(sym,str) IP4_REASSEMBLY_HANDOFF_ERROR_##sym,
1656 foreach_ip4_reassembly_handoff_error
1657#undef _
1658 IP4_REASSEMBLY_HANDOFF_N_ERROR,
1659} ip4_reassembly_handoff_error_t;
1660
1661static char *ip4_reassembly_handoff_error_strings[] = {
1662#define _(sym,string) string,
1663 foreach_ip4_reassembly_handoff_error
1664#undef _
1665};
1666
1667typedef struct
1668{
1669 u32 next_worker_index;
1670} ip4_reassembly_handoff_trace_t;
1671
1672static u8 *
1673format_ip4_reassembly_handoff_trace (u8 * s, va_list * args)
1674{
1675 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1676 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1677 ip4_reassembly_handoff_trace_t *t =
1678 va_arg (*args, ip4_reassembly_handoff_trace_t *);
1679
1680 s =
1681 format (s, "ip4-reassembly-handoff: next-worker %d",
1682 t->next_worker_index);
1683
1684 return s;
1685}
1686
1687always_inline uword
1688ip4_reassembly_handoff_node_inline (vlib_main_t * vm,
1689 vlib_node_runtime_t * node,
1690 vlib_frame_t * frame, bool is_feature)
1691{
1692 ip4_reass_main_t *rm = &ip4_reass_main;
1693
1694 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1695 u32 n_enq, n_left_from, *from;
1696 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1697 u32 fq_index;
1698
1699 from = vlib_frame_vector_args (frame);
1700 n_left_from = frame->n_vectors;
1701 vlib_get_buffers (vm, from, bufs, n_left_from);
1702
1703 b = bufs;
1704 ti = thread_indices;
1705
1706 fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1707
1708 while (n_left_from > 0)
1709 {
1710 ti[0] =
1711 (is_feature) ? vnet_buffer (b[0])->ip.
1712 reass.owner_feature_thread_index : vnet_buffer (b[0])->ip.
1713 reass.owner_thread_index;
1714
1715 if (PREDICT_FALSE
1716 ((node->flags & VLIB_NODE_FLAG_TRACE)
1717 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1718 {
1719 ip4_reassembly_handoff_trace_t *t =
1720 vlib_add_trace (vm, node, b[0], sizeof (*t));
1721 t->next_worker_index = ti[0];
1722 }
1723
1724 n_left_from -= 1;
1725 ti += 1;
1726 b += 1;
1727 }
1728 n_enq =
1729 vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1730 frame->n_vectors, 1);
1731
1732 if (n_enq < frame->n_vectors)
1733 vlib_node_increment_counter (vm, node->node_index,
1734 IP4_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
1735 frame->n_vectors - n_enq);
1736 return frame->n_vectors;
1737}
1738
1739VLIB_NODE_FN (ip4_reassembly_handoff_node) (vlib_main_t * vm,
1740 vlib_node_runtime_t * node,
1741 vlib_frame_t * frame)
1742{
1743 return ip4_reassembly_handoff_node_inline (vm, node, frame,
1744 false /* is_feature */ );
1745}
1746
1747
1748/* *INDENT-OFF* */
1749VLIB_REGISTER_NODE (ip4_reassembly_handoff_node) = {
1750 .name = "ip4-reassembly-handoff",
1751 .vector_size = sizeof (u32),
1752 .n_errors = ARRAY_LEN(ip4_reassembly_handoff_error_strings),
1753 .error_strings = ip4_reassembly_handoff_error_strings,
1754 .format_trace = format_ip4_reassembly_handoff_trace,
1755
1756 .n_next_nodes = 1,
1757
1758 .next_nodes = {
1759 [0] = "error-drop",
1760 },
1761};
1762/* *INDENT-ON* */
1763
1764
1765/* *INDENT-OFF* */
1766VLIB_NODE_FN (ip4_reassembly_feature_handoff_node) (vlib_main_t * vm,
1767 vlib_node_runtime_t *
1768 node,
1769 vlib_frame_t * frame)
1770{
1771 return ip4_reassembly_handoff_node_inline (vm, node, frame,
1772 true /* is_feature */ );
1773}
1774/* *INDENT-ON* */
1775
1776
1777/* *INDENT-OFF* */
1778VLIB_REGISTER_NODE (ip4_reassembly_feature_handoff_node) = {
1779 .name = "ip4-reass-feature-hoff",
1780 .vector_size = sizeof (u32),
1781 .n_errors = ARRAY_LEN(ip4_reassembly_handoff_error_strings),
1782 .error_strings = ip4_reassembly_handoff_error_strings,
1783 .format_trace = format_ip4_reassembly_handoff_trace,
1784
1785 .n_next_nodes = 1,
1786
1787 .next_nodes = {
1788 [0] = "error-drop",
1789 },
1790};
1791/* *INDENT-ON* */
1792
Klement Sekera75e7d132017-09-20 08:26:30 +02001793/*
1794 * fd.io coding-style-patch-verification: ON
1795 *
1796 * Local Variables:
1797 * eval: (c-set-style "gnu")
1798 * End:
1799 */