blob: 00e9aa698c6fb2ca113922c2252f9f918570337a [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * vnet/buffer.h: vnet buffer flags
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vnet_buffer_h
41#define included_vnet_buffer_h
42
43#include <vlib/vlib.h>
44
Neale Rannsf068c3e2018-01-03 04:18:48 -080045/**
46 * Flags that are set in the high order bits of ((vlib_buffer*)b)->flags
Dave Baracha5fb0ec2018-12-03 19:07:09 -050047 *
Neale Rannsf068c3e2018-01-03 04:18:48 -080048 */
Mohsin Kazmi68095382021-02-10 11:26:24 +010049#define foreach_vnet_buffer_flag \
50 _ (1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed", 1) \
51 _ (2, L4_CHECKSUM_CORRECT, "l4-cksum-correct", 1) \
52 _ (3, VLAN_2_DEEP, "vlan-2-deep", 1) \
53 _ (4, VLAN_1_DEEP, "vlan-1-deep", 1) \
54 _ (5, SPAN_CLONE, "span-clone", 1) \
55 _ (6, LOOP_COUNTER_VALID, "loop-counter-valid", 0) \
56 _ (7, LOCALLY_ORIGINATED, "local", 1) \
57 _ (8, IS_IP4, "ip4", 1) \
58 _ (9, IS_IP6, "ip6", 1) \
59 _ (10, OFFLOAD, "offload", 0) \
60 _ (11, IS_NATED, "natted", 1) \
61 _ (12, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \
62 _ (13, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \
63 _ (14, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \
64 _ (15, FLOW_REPORT, "flow-report", 1) \
65 _ (16, IS_DVR, "dvr", 1) \
66 _ (17, QOS_DATA_VALID, "qos-data-valid", 0) \
67 _ (18, GSO, "gso", 0) \
68 _ (19, AVAIL1, "avail1", 1) \
69 _ (20, AVAIL2, "avail2", 1) \
70 _ (21, AVAIL3, "avail3", 1) \
71 _ (22, AVAIL4, "avail4", 1) \
72 _ (23, AVAIL5, "avail5", 1) \
73 _ (24, AVAIL6, "avail6", 1) \
74 _ (25, AVAIL7, "avail7", 1) \
75 _ (26, AVAIL8, "avail8", 1) \
76 _ (27, AVAIL9, "avail9", 1)
Dave Baracha5fb0ec2018-12-03 19:07:09 -050077
78/*
79 * Please allocate the FIRST available bit, redefine
80 * AVAIL 1 ... AVAILn-1, and remove AVAILn. Please maintain the
81 * VNET_BUFFER_FLAGS_ALL_AVAIL definition.
82 */
83
Mohsin Kazmi68095382021-02-10 11:26:24 +010084#define VNET_BUFFER_FLAGS_ALL_AVAIL \
85 (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \
86 VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \
87 VNET_BUFFER_F_AVAIL7 | VNET_BUFFER_F_AVAIL8 | VNET_BUFFER_F_AVAIL9)
Ed Warnickecb9cada2015-12-08 15:45:58 -070088
Damjan Marion213b5aa2017-07-13 21:19:27 +020089#define VNET_BUFFER_FLAGS_VLAN_BITS \
90 (VNET_BUFFER_F_VLAN_1_DEEP | VNET_BUFFER_F_VLAN_2_DEEP)
Chris Luke194ebc52016-04-25 14:26:55 -040091
Damjan Marion213b5aa2017-07-13 21:19:27 +020092enum
93{
Dave Barach7fff3d22018-11-27 16:52:59 -050094#define _(bit, name, s, v) VNET_BUFFER_F_##name = (1 << LOG2_VLIB_BUFFER_FLAG_USER(bit)),
Damjan Mariondac03522018-02-01 15:30:13 +010095 foreach_vnet_buffer_flag
Damjan Marion213b5aa2017-07-13 21:19:27 +020096#undef _
97};
Damjan Marion0247b462016-06-08 01:37:11 +020098
Damjan Marion213b5aa2017-07-13 21:19:27 +020099enum
100{
Dave Barach7fff3d22018-11-27 16:52:59 -0500101#define _(bit, name, s, v) VNET_BUFFER_F_LOG2_##name = LOG2_VLIB_BUFFER_FLAG_USER(bit),
Damjan Mariondac03522018-02-01 15:30:13 +0100102 foreach_vnet_buffer_flag
Damjan Marion213b5aa2017-07-13 21:19:27 +0200103#undef _
104};
Damjan Marion67655492016-11-15 12:50:28 +0100105
Dave Baracha5fb0ec2018-12-03 19:07:09 -0500106/* Make sure that the vnet and vlib bits are disjoint */
107STATIC_ASSERT (((VNET_BUFFER_FLAGS_ALL_AVAIL & VLIB_BUFFER_FLAGS_ALL) == 0),
108 "VLIB / VNET buffer flags overlap");
109
Ed Warnickecb9cada2015-12-08 15:45:58 -0700110#define foreach_buffer_opaque_union_subtype \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700111_(ip) \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700112_(l2) \
113_(l2t) \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700114_(l2_classify) \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700115_(policer) \
Damjan Marion9c6ae5f2016-11-15 23:20:01 +0100116_(ipsec) \
Ole Troancda94822016-01-07 14:37:25 +0100117_(map) \
118_(map_t) \
Florin Coras82b13a82017-04-25 11:58:06 -0700119_(ip_frag) \
Neale Rannsd792d9c2017-10-21 10:53:20 -0700120_(mpls) \
Florin Coras82b13a82017-04-25 11:58:06 -0700121_(tcp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700122
Dave Barachba868bb2016-08-08 09:51:21 -0400123/*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700124 * vnet stack buffer opaque array overlay structure.
125 * The vnet_buffer_opaque_t *must* be the same size as the
126 * vlib_buffer_t "opaque" structure member, 32 bytes.
127 *
128 * When adding a union type, please add a stanza to
129 * foreach_buffer_opaque_union_subtype (directly above).
130 * Code in vnet_interface_init(...) verifies the size
131 * of the union, and will announce any deviations in an
132 * impossible-to-miss manner.
133 */
Dave Barachba868bb2016-08-08 09:51:21 -0400134typedef struct
135{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700136 u32 sw_if_index[VLIB_N_RX_TX];
Damjan Marion072401e2017-07-13 18:53:27 +0200137 i16 l2_hdr_offset;
138 i16 l3_hdr_offset;
139 i16 l4_hdr_offset;
Damjan Marionaa682a32018-04-26 22:45:40 +0200140 u8 feature_arc_index;
141 u8 dont_waste_me;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700142
Dave Barachba868bb2016-08-08 09:51:21 -0400143 union
144 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700145 /* IP4/6 buffer opaque. */
Dave Barachba868bb2016-08-08 09:51:21 -0400146 struct
147 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700148 /* Adjacency from destination IP address lookup [VLIB_TX].
Dave Barachba868bb2016-08-08 09:51:21 -0400149 Adjacency from source IP address lookup [VLIB_RX].
150 This gets set to ~0 until source lookup is performed. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151 u32 adj_index[VLIB_N_RX_TX];
152
Dave Barachba868bb2016-08-08 09:51:21 -0400153 union
154 {
155 struct
156 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700157 /* Flow hash value for this packet computed from IP src/dst address
158 protocol and ports. */
159 u32 flow_hash;
160
Florin Corascea194d2017-10-02 00:18:51 -0700161 union
162 {
163 /* next protocol */
164 u32 save_protocol;
165
166 /* Hint for transport protocols */
167 u32 fib_index;
168 };
Dave Barachc7493e12016-08-24 18:36:03 -0400169
170 /* Rewrite length */
Klement Sekera7dbf9a12019-11-21 10:31:03 +0000171 u8 save_rewrite_length;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800172
173 /* MFIB RPF ID */
174 u32 rpf_id;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700175 };
176
Ole Troancda94822016-01-07 14:37:25 +0100177 /* ICMP */
Dave Barachba868bb2016-08-08 09:51:21 -0400178 struct
179 {
Ole Troancda94822016-01-07 14:37:25 +0100180 u8 type;
181 u8 code;
182 u32 data;
183 } icmp;
Klement Sekera75e7d132017-09-20 08:26:30 +0200184
185 /* reassembly */
Klement Sekera4c533132018-02-22 11:41:12 +0100186 union
Klement Sekera75e7d132017-09-20 08:26:30 +0200187 {
Klement Sekeraf126e742019-10-10 09:46:06 +0000188 /* group input/output to simplify the code, this way
189 * we can handoff while keeping input variables intact */
Klement Sekera4c533132018-02-22 11:41:12 +0100190 struct
191 {
Klement Sekerade34c352019-06-25 11:19:22 +0000192 /* input variables */
193 struct
194 {
195 u32 next_index; /* index of next node - used by custom apps */
196 u32 error_next_index; /* index of next node if error - used by custom apps */
197 };
198 /* handoff variables */
199 struct
200 {
201 u16 owner_thread_index;
202 };
Klement Sekeraf126e742019-10-10 09:46:06 +0000203 };
204 /* output variables */
205 struct
206 {
207 union
Klement Sekerade34c352019-06-25 11:19:22 +0000208 {
Klement Sekeraf126e742019-10-10 09:46:06 +0000209 /* shallow virtual reassembly output variables */
210 struct
Klement Sekerade34c352019-06-25 11:19:22 +0000211 {
Klement Sekeraf126e742019-10-10 09:46:06 +0000212 u16 l4_src_port; /* tcp/udp/icmp src port */
213 u16 l4_dst_port; /* tcp/udp/icmp dst port */
214 u32 tcp_ack_number;
Klement Sekera8ad070e2020-01-15 10:30:48 +0000215 u8 save_rewrite_length;
216 u8 ip_proto; /* protocol in ip header */
217 u8 icmp_type_or_tcp_flags;
218 u8 is_non_first_fragment;
Klement Sekeraf126e742019-10-10 09:46:06 +0000219 u32 tcp_seq_number;
220 };
221 /* full reassembly output variables */
222 struct
223 {
224 u16 estimated_mtu; /* estimated MTU calculated during reassembly */
Klement Sekerade34c352019-06-25 11:19:22 +0000225 };
226 };
Klement Sekera4c533132018-02-22 11:41:12 +0100227 };
228 /* internal variables used during reassembly */
229 struct
230 {
231 u16 fragment_first;
232 u16 fragment_last;
233 u16 range_first;
234 u16 range_last;
235 u32 next_range_bi;
236 u16 ip6_frag_hdr_offset;
237 };
Klement Sekera75e7d132017-09-20 08:26:30 +0200238 } reass;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700239 };
240 } ip;
241
Neale Rannsad422ed2016-11-02 14:20:04 +0000242 /*
243 * MPLS:
244 * data copied from the MPLS header that was popped from the packet
245 * during the look-up.
246 */
247 struct
248 {
Neale Ranns31ed7442018-02-23 05:29:09 -0800249 /* do not overlay w/ ip.adj_index[0,1] nor flow hash */
250 u32 pad[VLIB_N_RX_TX + 1];
Neale Rannsad422ed2016-11-02 14:20:04 +0000251 u8 ttl;
252 u8 exp;
253 u8 first;
Rajesh Goeld6f1c9c2019-10-06 13:17:36 +0530254 u8 pyld_proto:3; /* dpo_proto_t */
255 u8 rsvd:5;
Neale Ranns039cbfe2018-02-27 03:45:38 -0800256 /* Rewrite length */
Klement Sekera7dbf9a12019-11-21 10:31:03 +0000257 u8 save_rewrite_length;
Rajesh Goeld6f1c9c2019-10-06 13:17:36 +0530258 /* Save the mpls header length including all label stack */
259 u8 mpls_hdr_length;
Neale Ranns91286372017-12-05 13:24:04 -0800260 /*
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700261 * BIER - the number of bytes in the header.
262 * the len field in the header is not authoritative. It's the
Neale Ranns91286372017-12-05 13:24:04 -0800263 * value in the table that counts.
264 */
265 struct
266 {
267 u8 n_bytes;
268 } bier;
Neale Rannsad422ed2016-11-02 14:20:04 +0000269 } mpls;
270
Ed Warnickecb9cada2015-12-08 15:45:58 -0700271 /* l2 bridging path, only valid there */
Eyal Baria11832f2017-06-21 15:32:13 +0300272 struct opaque_l2
Dave Barachba868bb2016-08-08 09:51:21 -0400273 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700274 u32 feature_bitmap;
John Loda1f2c72017-03-24 20:11:15 -0400275 u16 bd_index; /* bridge-domain index */
Neale Rannsa342da22019-06-06 10:35:07 +0000276 u16 l2fib_sn; /* l2fib bd/int seq_num */
John Loda1f2c72017-03-24 20:11:15 -0400277 u8 l2_len; /* ethernet header length */
278 u8 shg; /* split-horizon group */
John Lo5a6508d2017-10-03 13:13:47 -0400279 u8 bd_age; /* aging enabled */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700280 } l2;
281
282 /* l2tpv3 softwire encap, only valid there */
Dave Barachba868bb2016-08-08 09:51:21 -0400283 struct
284 {
285 u32 pad[4]; /* do not overlay w/ ip.adj_index[0,1] */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700286 u8 next_index;
287 u32 session_index;
288 } l2t;
289
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290 /* L2 classify */
Dave Barachba868bb2016-08-08 09:51:21 -0400291 struct
292 {
Eyal Baria11832f2017-06-21 15:32:13 +0300293 struct opaque_l2 pad;
Eyal Bari0f360dc2017-06-14 13:11:20 +0300294 union
295 {
296 u32 table_index;
297 u32 opaque_index;
298 };
Ed Warnickecb9cada2015-12-08 15:45:58 -0700299 u64 hash;
300 } l2_classify;
301
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302 /* vnet policer */
Dave Barachba868bb2016-08-08 09:51:21 -0400303 struct
304 {
305 u32 pad[8 - VLIB_N_RX_TX - 1]; /* to end of opaque */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700306 u32 index;
307 } policer;
308
309 /* interface output features */
Dave Barachba868bb2016-08-08 09:51:21 -0400310 struct
311 {
Neale Rannsaa7d7662021-02-10 08:42:49 +0000312 /* don't overlap the adjcencies nor flow-hash */
313 u32 __pad[3];
Damjan Marion9c6ae5f2016-11-15 23:20:01 +0100314 u32 sad_index;
Neale Rannsc87b66c2019-02-07 07:26:12 -0800315 u32 protect_index;
Neale Rannsaa7d7662021-02-10 08:42:49 +0000316 u16 thread_index;
Damjan Marion9c6ae5f2016-11-15 23:20:01 +0100317 } ipsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700318
Ed Warnickecb9cada2015-12-08 15:45:58 -0700319 /* MAP */
Dave Barachba868bb2016-08-08 09:51:21 -0400320 struct
321 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322 u16 mtu;
323 } map;
324
325 /* MAP-T */
Dave Barachba868bb2016-08-08 09:51:21 -0400326 struct
327 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700328 u32 map_domain_index;
Dave Barachba868bb2016-08-08 09:51:21 -0400329 struct
330 {
331 u32 saddr, daddr;
332 u16 frag_offset; //Fragmentation header offset
333 u16 l4_offset; //L4 header overall offset
334 u8 l4_protocol; //The final protocol number
335 } v6; //Used by ip6_map_t only
336 u16 checksum_offset; //L4 checksum overall offset
337 u16 mtu; //Exit MTU
Ed Warnickecb9cada2015-12-08 15:45:58 -0700338 } map_t;
339
340 /* IP Fragmentation */
Dave Barachba868bb2016-08-08 09:51:21 -0400341 struct
342 {
Vijayabhaskar Katamreddyc592ca52018-01-25 15:12:11 -0800343 u32 pad[2]; /* do not overlay w/ ip.adj_index[0,1] */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700344 u16 mtu;
345 u8 next_index;
Dave Barachba868bb2016-08-08 09:51:21 -0400346 u8 flags; //See ip_frag.h
Ed Warnickecb9cada2015-12-08 15:45:58 -0700347 } ip_frag;
348
Dave Barachc07bf5d2016-02-17 17:52:26 -0500349 /* COP - configurable junk filter(s) */
Dave Barachba868bb2016-08-08 09:51:21 -0400350 struct
351 {
352 /* Current configuration index. */
353 u32 current_config_index;
Dave Barachc07bf5d2016-02-17 17:52:26 -0500354 } cop;
355
Florin Coras1a1adc72016-07-22 01:45:30 +0200356 /* LISP */
Dave Barachba868bb2016-08-08 09:51:21 -0400357 struct
358 {
Florin Coras1a1adc72016-07-22 01:45:30 +0200359 /* overlay address family */
360 u16 overlay_afi;
361 } lisp;
362
Dave Barach68b0fb02017-02-28 15:15:56 -0500363 /* TCP */
364 struct
365 {
366 u32 connection_index;
Florin Coras07beade2019-06-20 12:18:31 -0700367 union
368 {
369 u32 seq_number;
370 u32 next_node_opaque;
371 };
Dave Barach68b0fb02017-02-28 15:15:56 -0500372 u32 seq_end;
373 u32 ack_number;
Florin Coras82b13a82017-04-25 11:58:06 -0700374 u16 hdr_offset; /**< offset relative to ip hdr */
375 u16 data_offset; /**< offset relative to ip hdr */
376 u16 data_len; /**< data len */
Dave Barach68b0fb02017-02-28 15:15:56 -0500377 u8 flags;
378 } tcp;
379
Matus Fabian161c59c2017-07-21 03:46:03 -0700380 /* SNAT */
381 struct
382 {
383 u32 flags;
Klement Sekera98d82ca2021-02-02 13:25:40 +0100384 u32 required_thread_index;
Matus Fabian161c59c2017-07-21 03:46:03 -0700385 } snat;
386
Ed Warnickecb9cada2015-12-08 15:45:58 -0700387 u32 unused[6];
388 };
389} vnet_buffer_opaque_t;
390
Klement Sekera7dbf9a12019-11-21 10:31:03 +0000391#define VNET_REWRITE_TOTAL_BYTES (VLIB_BUFFER_PRE_DATA_SIZE)
392
393STATIC_ASSERT (STRUCT_SIZE_OF (vnet_buffer_opaque_t, ip.save_rewrite_length)
394 == STRUCT_SIZE_OF (vnet_buffer_opaque_t,
Klement Sekeraf126e742019-10-10 09:46:06 +0000395 ip.reass.save_rewrite_length)
396 && STRUCT_SIZE_OF (vnet_buffer_opaque_t,
397 ip.reass.save_rewrite_length) ==
398 STRUCT_SIZE_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length)
Klement Sekera7dbf9a12019-11-21 10:31:03 +0000399 && STRUCT_SIZE_OF (vnet_buffer_opaque_t,
400 mpls.save_rewrite_length) == 1
401 && VNET_REWRITE_TOTAL_BYTES < UINT8_MAX,
402 "save_rewrite_length member must be able to hold the max value of rewrite length");
403
Klement Sekera8ad070e2020-01-15 10:30:48 +0000404STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.save_rewrite_length)
405 == STRUCT_OFFSET_OF (vnet_buffer_opaque_t,
406 ip.reass.save_rewrite_length)
407 && STRUCT_OFFSET_OF (vnet_buffer_opaque_t,
408 mpls.save_rewrite_length) ==
409 STRUCT_OFFSET_OF (vnet_buffer_opaque_t,
410 ip.reass.save_rewrite_length),
411 "save_rewrite_length must be aligned so that reass doesn't overwrite it");
412
Neale Rannsad422ed2016-11-02 14:20:04 +0000413/*
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700414 * The opaque field of the vlib_buffer_t is interpreted as a
Neale Rannsad422ed2016-11-02 14:20:04 +0000415 * vnet_buffer_opaque_t. Hence it should be big enough to accommodate one.
416 */
Eyal Baria11832f2017-06-21 15:32:13 +0300417STATIC_ASSERT (sizeof (vnet_buffer_opaque_t) <=
418 STRUCT_SIZE_OF (vlib_buffer_t, opaque),
Neale Rannsad422ed2016-11-02 14:20:04 +0000419 "VNET buffer meta-data too large for vlib_buffer");
420
Ed Warnickecb9cada2015-12-08 15:45:58 -0700421#define vnet_buffer(b) ((vnet_buffer_opaque_t *) (b)->opaque)
422
Mohsin Kazmi68095382021-02-10 11:26:24 +0100423#define foreach_vnet_buffer_offload_flag \
424 _ (0, IP_CKSUM, "offload-ip-cksum", 1) \
425 _ (1, TCP_CKSUM, "offload-tcp-cksum", 1) \
426 _ (2, UDP_CKSUM, "offload-udp-cksum", 1) \
427 _ (3, OUTER_IP_CKSUM, "offload-outer-ip-cksum", 1) \
428 _ (4, OUTER_TCP_CKSUM, "offload-outer-tcp-cksum", 1) \
429 _ (5, OUTER_UDP_CKSUM, "offload-outer-udp-cksum", 1)
430
431enum
432{
433#define _(bit, name, s, v) VNET_BUFFER_OFFLOAD_F_##name = (1 << bit),
434 foreach_vnet_buffer_offload_flag
435#undef _
436};
437
Ed Warnickecb9cada2015-12-08 15:45:58 -0700438/* Full cache line (64 bytes) of additional space */
Dave Barachba868bb2016-08-08 09:51:21 -0400439typedef struct
440{
Neale Ranns039cbfe2018-02-27 03:45:38 -0800441 /**
442 * QoS marking data that needs to persist from the recording nodes
443 * (nominally in the ingress path) to the marking node (in the
444 * egress path)
445 */
446 struct
447 {
448 u8 bits;
449 u8 source;
450 } qos;
451
Neale Rannsce9e0b42018-08-01 12:53:17 -0700452 u8 loop_counter;
453 u8 __unused[1];
Neale Ranns039cbfe2018-02-27 03:45:38 -0800454
Neale Ranns25b04942018-04-04 09:34:50 -0700455 /* Group Based Policy */
456 struct
457 {
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200458 u8 __unused;
459 u8 flags;
Neale Ranns4ba67722019-02-28 11:11:39 +0000460 u16 sclass;
Neale Ranns25b04942018-04-04 09:34:50 -0700461 } gbp;
462
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200463 /**
464 * The L4 payload size set on input on GSO enabled interfaces
465 * when we receive a GSO packet (a chain of buffers with the first one
466 * having GSO bit set), and needs to persist all the way to the interface-output,
467 * in case the egress interface is not GSO-enabled - then we need to perform
468 * the segmentation, and use this value to cut the payload appropriately.
469 */
Mohsin Kazmi68095382021-02-10 11:26:24 +0100470 struct
471 {
472 u16 gso_size;
473 /* size of L4 prototol header */
474 u16 gso_l4_hdr_sz;
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200475
Mohsin Kazmi68095382021-02-10 11:26:24 +0100476 /* offload flags */
477 u32 oflags;
478 };
Klement Sekera4881cb42020-12-15 18:47:05 +0100479
Klement Sekera5581de62020-04-24 12:24:41 +0000480 struct
481 {
482 u32 arc_next;
Klement Sekerad2b69972021-03-09 17:53:47 +0100483 union
484 {
485 u32 cached_session_index;
486 u32 cached_dst_nat_session_index;
487 };
Klement Sekera5581de62020-04-24 12:24:41 +0000488 } nat;
489
Dave Barachba868bb2016-08-08 09:51:21 -0400490 union
491 {
Dave Barach7bee7732017-10-18 18:48:11 -0400492 struct
493 {
Neale Ranns039cbfe2018-02-27 03:45:38 -0800494#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
495 /* buffer trajectory tracing */
Dave Barach7bee7732017-10-18 18:48:11 -0400496 u16 *trajectory_trace;
Dave Barach7bee7732017-10-18 18:48:11 -0400497#endif
Neale Ranns039cbfe2018-02-27 03:45:38 -0800498 };
Dave Barach78c56892018-05-16 11:34:35 -0400499 struct
500 {
501 u64 pad[1];
502 u64 pg_replay_timestamp;
503 };
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200504 u32 unused[8];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700505 };
506} vnet_buffer_opaque2_t;
507
Dave Barach7bee7732017-10-18 18:48:11 -0400508#define vnet_buffer2(b) ((vnet_buffer_opaque2_t *) (b)->opaque2)
509
510/*
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700511 * The opaque2 field of the vlib_buffer_t is interpreted as a
Dave Barach7bee7732017-10-18 18:48:11 -0400512 * vnet_buffer_opaque2_t. Hence it should be big enough to accommodate one.
513 */
514STATIC_ASSERT (sizeof (vnet_buffer_opaque2_t) <=
515 STRUCT_SIZE_OF (vlib_buffer_t, opaque2),
516 "VNET buffer opaque2 meta-data too large for vlib_buffer");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700517
Mohsin Kazmi0f09a472019-07-12 13:18:16 +0200518#define gso_mtu_sz(b) (vnet_buffer2(b)->gso_size + \
519 vnet_buffer2(b)->gso_l4_hdr_sz + \
520 vnet_buffer(b)->l4_hdr_offset - \
521 vnet_buffer (b)->l3_hdr_offset)
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200522
523
Damjan Marionbd846cd2017-11-21 13:12:41 +0100524format_function_t format_vnet_buffer;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100525format_function_t format_vnet_buffer_offload;
Damjan Marion25ab6c52021-03-05 14:41:25 +0100526format_function_t format_vnet_buffer_flags;
527format_function_t format_vnet_buffer_opaque;
528format_function_t format_vnet_buffer_opaque2;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100529
530static_always_inline void
531vnet_buffer_offload_flags_set (vlib_buffer_t *b, u32 oflags)
532{
BenoƮt Ganneaa80f072021-02-18 10:34:33 +0100533 if (b->flags & VNET_BUFFER_F_OFFLOAD)
534 {
535 /* add a flag to existing offload */
536 vnet_buffer2 (b)->oflags |= oflags;
537 }
538 else
539 {
540 /* no offload yet: reset offload flags to new value */
541 vnet_buffer2 (b)->oflags = oflags;
542 b->flags |= VNET_BUFFER_F_OFFLOAD;
543 }
Mohsin Kazmi68095382021-02-10 11:26:24 +0100544}
545
546static_always_inline void
547vnet_buffer_offload_flags_clear (vlib_buffer_t *b, u32 oflags)
548{
549 vnet_buffer2 (b)->oflags &= ~oflags;
550 if (0 == vnet_buffer2 (b)->oflags)
551 b->flags &= ~VNET_BUFFER_F_OFFLOAD;
552}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700553
554#endif /* included_vnet_buffer_h */
Dave Barachba868bb2016-08-08 09:51:21 -0400555
556/*
557 * fd.io coding-style-patch-verification: ON
558 *
559 * Local Variables:
560 * eval: (c-set-style "gnu")
561 * End:
562 */