blob: 0798de4cf6378eb1caa5dae8ab30ab22f50b71cf [file] [log] [blame]
Ed Warnickecb9cada2015-12-08 15:45:58 -07001/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * vnet/buffer.h: vnet buffer flags
17 *
18 * Copyright (c) 2008 Eliot Dresselhaus
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#ifndef included_vnet_buffer_h
41#define included_vnet_buffer_h
42
43#include <vlib/vlib.h>
44
Neale Rannsf068c3e2018-01-03 04:18:48 -080045/**
46 * Flags that are set in the high order bits of ((vlib_buffer*)b)->flags
Dave Baracha5fb0ec2018-12-03 19:07:09 -050047 *
Neale Rannsf068c3e2018-01-03 04:18:48 -080048 */
Mohsin Kazmi68095382021-02-10 11:26:24 +010049#define foreach_vnet_buffer_flag \
50 _ (1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed", 1) \
51 _ (2, L4_CHECKSUM_CORRECT, "l4-cksum-correct", 1) \
52 _ (3, VLAN_2_DEEP, "vlan-2-deep", 1) \
53 _ (4, VLAN_1_DEEP, "vlan-1-deep", 1) \
54 _ (5, SPAN_CLONE, "span-clone", 1) \
55 _ (6, LOOP_COUNTER_VALID, "loop-counter-valid", 0) \
56 _ (7, LOCALLY_ORIGINATED, "local", 1) \
57 _ (8, IS_IP4, "ip4", 1) \
58 _ (9, IS_IP6, "ip6", 1) \
59 _ (10, OFFLOAD, "offload", 0) \
60 _ (11, IS_NATED, "natted", 1) \
61 _ (12, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \
62 _ (13, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \
63 _ (14, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \
64 _ (15, FLOW_REPORT, "flow-report", 1) \
65 _ (16, IS_DVR, "dvr", 1) \
66 _ (17, QOS_DATA_VALID, "qos-data-valid", 0) \
67 _ (18, GSO, "gso", 0) \
68 _ (19, AVAIL1, "avail1", 1) \
69 _ (20, AVAIL2, "avail2", 1) \
70 _ (21, AVAIL3, "avail3", 1) \
71 _ (22, AVAIL4, "avail4", 1) \
72 _ (23, AVAIL5, "avail5", 1) \
73 _ (24, AVAIL6, "avail6", 1) \
74 _ (25, AVAIL7, "avail7", 1) \
75 _ (26, AVAIL8, "avail8", 1) \
76 _ (27, AVAIL9, "avail9", 1)
Dave Baracha5fb0ec2018-12-03 19:07:09 -050077
78/*
79 * Please allocate the FIRST available bit, redefine
80 * AVAIL 1 ... AVAILn-1, and remove AVAILn. Please maintain the
81 * VNET_BUFFER_FLAGS_ALL_AVAIL definition.
82 */
83
Mohsin Kazmi68095382021-02-10 11:26:24 +010084#define VNET_BUFFER_FLAGS_ALL_AVAIL \
85 (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \
86 VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \
87 VNET_BUFFER_F_AVAIL7 | VNET_BUFFER_F_AVAIL8 | VNET_BUFFER_F_AVAIL9)
Ed Warnickecb9cada2015-12-08 15:45:58 -070088
Damjan Marion213b5aa2017-07-13 21:19:27 +020089#define VNET_BUFFER_FLAGS_VLAN_BITS \
90 (VNET_BUFFER_F_VLAN_1_DEEP | VNET_BUFFER_F_VLAN_2_DEEP)
Chris Luke194ebc52016-04-25 14:26:55 -040091
Damjan Marion213b5aa2017-07-13 21:19:27 +020092enum
93{
Dave Barach7fff3d22018-11-27 16:52:59 -050094#define _(bit, name, s, v) VNET_BUFFER_F_##name = (1 << LOG2_VLIB_BUFFER_FLAG_USER(bit)),
Damjan Mariondac03522018-02-01 15:30:13 +010095 foreach_vnet_buffer_flag
Damjan Marion213b5aa2017-07-13 21:19:27 +020096#undef _
97};
Damjan Marion0247b462016-06-08 01:37:11 +020098
Damjan Marion213b5aa2017-07-13 21:19:27 +020099enum
100{
Dave Barach7fff3d22018-11-27 16:52:59 -0500101#define _(bit, name, s, v) VNET_BUFFER_F_LOG2_##name = LOG2_VLIB_BUFFER_FLAG_USER(bit),
Damjan Mariondac03522018-02-01 15:30:13 +0100102 foreach_vnet_buffer_flag
Damjan Marion213b5aa2017-07-13 21:19:27 +0200103#undef _
104};
Damjan Marion67655492016-11-15 12:50:28 +0100105
Dave Baracha5fb0ec2018-12-03 19:07:09 -0500106/* Make sure that the vnet and vlib bits are disjoint */
107STATIC_ASSERT (((VNET_BUFFER_FLAGS_ALL_AVAIL & VLIB_BUFFER_FLAGS_ALL) == 0),
108 "VLIB / VNET buffer flags overlap");
109
Mohsin Kazmi36f7a6a2021-05-05 14:26:38 +0200110#define foreach_vnet_buffer_offload_flag \
111 _ (0, IP_CKSUM, "offload-ip-cksum", 1) \
112 _ (1, TCP_CKSUM, "offload-tcp-cksum", 1) \
113 _ (2, UDP_CKSUM, "offload-udp-cksum", 1) \
114 _ (3, OUTER_IP_CKSUM, "offload-outer-ip-cksum", 1) \
115 _ (4, OUTER_TCP_CKSUM, "offload-outer-tcp-cksum", 1) \
116 _ (5, OUTER_UDP_CKSUM, "offload-outer-udp-cksum", 1)
117
118typedef enum
119{
120#define _(bit, name, s, v) VNET_BUFFER_OFFLOAD_F_##name = (1 << bit),
121 foreach_vnet_buffer_offload_flag
122#undef _
123} vnet_buffer_oflags_t;
124
Ed Warnickecb9cada2015-12-08 15:45:58 -0700125#define foreach_buffer_opaque_union_subtype \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700126_(ip) \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700127_(l2) \
128_(l2t) \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700129_(l2_classify) \
Ed Warnickecb9cada2015-12-08 15:45:58 -0700130_(policer) \
Damjan Marion9c6ae5f2016-11-15 23:20:01 +0100131_(ipsec) \
Ole Troancda94822016-01-07 14:37:25 +0100132_(map) \
133_(map_t) \
Florin Coras82b13a82017-04-25 11:58:06 -0700134_(ip_frag) \
Neale Rannsd792d9c2017-10-21 10:53:20 -0700135_(mpls) \
Florin Coras82b13a82017-04-25 11:58:06 -0700136_(tcp)
Ed Warnickecb9cada2015-12-08 15:45:58 -0700137
Dave Barachba868bb2016-08-08 09:51:21 -0400138/*
Ed Warnickecb9cada2015-12-08 15:45:58 -0700139 * vnet stack buffer opaque array overlay structure.
140 * The vnet_buffer_opaque_t *must* be the same size as the
141 * vlib_buffer_t "opaque" structure member, 32 bytes.
142 *
143 * When adding a union type, please add a stanza to
144 * foreach_buffer_opaque_union_subtype (directly above).
145 * Code in vnet_interface_init(...) verifies the size
146 * of the union, and will announce any deviations in an
147 * impossible-to-miss manner.
148 */
Dave Barachba868bb2016-08-08 09:51:21 -0400149typedef struct
150{
Ed Warnickecb9cada2015-12-08 15:45:58 -0700151 u32 sw_if_index[VLIB_N_RX_TX];
Damjan Marion072401e2017-07-13 18:53:27 +0200152 i16 l2_hdr_offset;
153 i16 l3_hdr_offset;
154 i16 l4_hdr_offset;
Damjan Marionaa682a32018-04-26 22:45:40 +0200155 u8 feature_arc_index;
Mohsin Kazmia7e830e2021-04-23 15:16:50 +0200156 /* offload flags */
Mohsin Kazmi36f7a6a2021-05-05 14:26:38 +0200157 vnet_buffer_oflags_t oflags : 8;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700158
Dave Barachba868bb2016-08-08 09:51:21 -0400159 union
160 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700161 /* IP4/6 buffer opaque. */
Dave Barachba868bb2016-08-08 09:51:21 -0400162 struct
163 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700164 /* Adjacency from destination IP address lookup [VLIB_TX].
Dave Barachba868bb2016-08-08 09:51:21 -0400165 Adjacency from source IP address lookup [VLIB_RX].
166 This gets set to ~0 until source lookup is performed. */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700167 u32 adj_index[VLIB_N_RX_TX];
168
Dave Barachba868bb2016-08-08 09:51:21 -0400169 union
170 {
171 struct
172 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700173 /* Flow hash value for this packet computed from IP src/dst address
174 protocol and ports. */
175 u32 flow_hash;
176
Florin Corascea194d2017-10-02 00:18:51 -0700177 union
178 {
179 /* next protocol */
180 u32 save_protocol;
181
182 /* Hint for transport protocols */
183 u32 fib_index;
184 };
Dave Barachc7493e12016-08-24 18:36:03 -0400185
186 /* Rewrite length */
Klement Sekera7dbf9a12019-11-21 10:31:03 +0000187 u8 save_rewrite_length;
Neale Ranns0f26c5a2017-03-01 15:12:11 -0800188
189 /* MFIB RPF ID */
190 u32 rpf_id;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700191 };
192
Ole Troancda94822016-01-07 14:37:25 +0100193 /* ICMP */
Dave Barachba868bb2016-08-08 09:51:21 -0400194 struct
195 {
Ole Troancda94822016-01-07 14:37:25 +0100196 u8 type;
197 u8 code;
198 u32 data;
199 } icmp;
Klement Sekera75e7d132017-09-20 08:26:30 +0200200
201 /* reassembly */
Klement Sekera4c533132018-02-22 11:41:12 +0100202 union
Klement Sekera75e7d132017-09-20 08:26:30 +0200203 {
Klement Sekeraf126e742019-10-10 09:46:06 +0000204 /* group input/output to simplify the code, this way
205 * we can handoff while keeping input variables intact */
Klement Sekera4c533132018-02-22 11:41:12 +0100206 struct
207 {
Klement Sekerade34c352019-06-25 11:19:22 +0000208 /* input variables */
209 struct
210 {
211 u32 next_index; /* index of next node - used by custom apps */
212 u32 error_next_index; /* index of next node if error - used by custom apps */
213 };
214 /* handoff variables */
215 struct
216 {
217 u16 owner_thread_index;
218 };
Klement Sekeraf126e742019-10-10 09:46:06 +0000219 };
220 /* output variables */
221 struct
222 {
223 union
Klement Sekerade34c352019-06-25 11:19:22 +0000224 {
Klement Sekeraf126e742019-10-10 09:46:06 +0000225 /* shallow virtual reassembly output variables */
226 struct
Klement Sekerade34c352019-06-25 11:19:22 +0000227 {
Klement Sekeraf126e742019-10-10 09:46:06 +0000228 u16 l4_src_port; /* tcp/udp/icmp src port */
229 u16 l4_dst_port; /* tcp/udp/icmp dst port */
230 u32 tcp_ack_number;
Klement Sekera8ad070e2020-01-15 10:30:48 +0000231 u8 save_rewrite_length;
232 u8 ip_proto; /* protocol in ip header */
233 u8 icmp_type_or_tcp_flags;
234 u8 is_non_first_fragment;
Klement Sekeraf126e742019-10-10 09:46:06 +0000235 u32 tcp_seq_number;
236 };
237 /* full reassembly output variables */
238 struct
239 {
240 u16 estimated_mtu; /* estimated MTU calculated during reassembly */
Klement Sekerade34c352019-06-25 11:19:22 +0000241 };
242 };
Klement Sekera4c533132018-02-22 11:41:12 +0100243 };
244 /* internal variables used during reassembly */
245 struct
246 {
247 u16 fragment_first;
248 u16 fragment_last;
249 u16 range_first;
250 u16 range_last;
251 u32 next_range_bi;
252 u16 ip6_frag_hdr_offset;
253 };
Klement Sekera75e7d132017-09-20 08:26:30 +0200254 } reass;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700255 };
256 } ip;
257
Neale Rannsad422ed2016-11-02 14:20:04 +0000258 /*
259 * MPLS:
260 * data copied from the MPLS header that was popped from the packet
261 * during the look-up.
262 */
263 struct
264 {
Neale Ranns31ed7442018-02-23 05:29:09 -0800265 /* do not overlay w/ ip.adj_index[0,1] nor flow hash */
266 u32 pad[VLIB_N_RX_TX + 1];
Neale Rannsad422ed2016-11-02 14:20:04 +0000267 u8 ttl;
268 u8 exp;
269 u8 first;
Rajesh Goeld6f1c9c2019-10-06 13:17:36 +0530270 u8 pyld_proto:3; /* dpo_proto_t */
271 u8 rsvd:5;
Neale Ranns039cbfe2018-02-27 03:45:38 -0800272 /* Rewrite length */
Klement Sekera7dbf9a12019-11-21 10:31:03 +0000273 u8 save_rewrite_length;
Rajesh Goeld6f1c9c2019-10-06 13:17:36 +0530274 /* Save the mpls header length including all label stack */
275 u8 mpls_hdr_length;
Neale Ranns91286372017-12-05 13:24:04 -0800276 /*
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700277 * BIER - the number of bytes in the header.
278 * the len field in the header is not authoritative. It's the
Neale Ranns91286372017-12-05 13:24:04 -0800279 * value in the table that counts.
280 */
281 struct
282 {
283 u8 n_bytes;
284 } bier;
Neale Rannsad422ed2016-11-02 14:20:04 +0000285 } mpls;
286
Ed Warnickecb9cada2015-12-08 15:45:58 -0700287 /* l2 bridging path, only valid there */
Eyal Baria11832f2017-06-21 15:32:13 +0300288 struct opaque_l2
Dave Barachba868bb2016-08-08 09:51:21 -0400289 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700290 u32 feature_bitmap;
John Loda1f2c72017-03-24 20:11:15 -0400291 u16 bd_index; /* bridge-domain index */
Neale Rannsa342da22019-06-06 10:35:07 +0000292 u16 l2fib_sn; /* l2fib bd/int seq_num */
John Loda1f2c72017-03-24 20:11:15 -0400293 u8 l2_len; /* ethernet header length */
294 u8 shg; /* split-horizon group */
John Lo5a6508d2017-10-03 13:13:47 -0400295 u8 bd_age; /* aging enabled */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700296 } l2;
297
298 /* l2tpv3 softwire encap, only valid there */
Dave Barachba868bb2016-08-08 09:51:21 -0400299 struct
300 {
301 u32 pad[4]; /* do not overlay w/ ip.adj_index[0,1] */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700302 u8 next_index;
303 u32 session_index;
304 } l2t;
305
Ed Warnickecb9cada2015-12-08 15:45:58 -0700306 /* L2 classify */
Dave Barachba868bb2016-08-08 09:51:21 -0400307 struct
308 {
Eyal Baria11832f2017-06-21 15:32:13 +0300309 struct opaque_l2 pad;
Eyal Bari0f360dc2017-06-14 13:11:20 +0300310 union
311 {
312 u32 table_index;
313 u32 opaque_index;
314 };
Ed Warnickecb9cada2015-12-08 15:45:58 -0700315 u64 hash;
316 } l2_classify;
317
Ed Warnickecb9cada2015-12-08 15:45:58 -0700318 /* vnet policer */
Dave Barachba868bb2016-08-08 09:51:21 -0400319 struct
320 {
321 u32 pad[8 - VLIB_N_RX_TX - 1]; /* to end of opaque */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700322 u32 index;
323 } policer;
324
325 /* interface output features */
Dave Barachba868bb2016-08-08 09:51:21 -0400326 struct
327 {
Neale Rannsaa7d7662021-02-10 08:42:49 +0000328 /* don't overlap the adjcencies nor flow-hash */
329 u32 __pad[3];
Damjan Marion9c6ae5f2016-11-15 23:20:01 +0100330 u32 sad_index;
Neale Rannsc87b66c2019-02-07 07:26:12 -0800331 u32 protect_index;
Neale Rannsaa7d7662021-02-10 08:42:49 +0000332 u16 thread_index;
Damjan Marion9c6ae5f2016-11-15 23:20:01 +0100333 } ipsec;
Ed Warnickecb9cada2015-12-08 15:45:58 -0700334
Ed Warnickecb9cada2015-12-08 15:45:58 -0700335 /* MAP */
Dave Barachba868bb2016-08-08 09:51:21 -0400336 struct
337 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700338 u16 mtu;
339 } map;
340
341 /* MAP-T */
Dave Barachba868bb2016-08-08 09:51:21 -0400342 struct
343 {
Ed Warnickecb9cada2015-12-08 15:45:58 -0700344 u32 map_domain_index;
Dave Barachba868bb2016-08-08 09:51:21 -0400345 struct
346 {
347 u32 saddr, daddr;
348 u16 frag_offset; //Fragmentation header offset
349 u16 l4_offset; //L4 header overall offset
350 u8 l4_protocol; //The final protocol number
351 } v6; //Used by ip6_map_t only
352 u16 checksum_offset; //L4 checksum overall offset
353 u16 mtu; //Exit MTU
Ed Warnickecb9cada2015-12-08 15:45:58 -0700354 } map_t;
355
356 /* IP Fragmentation */
Dave Barachba868bb2016-08-08 09:51:21 -0400357 struct
358 {
Vijayabhaskar Katamreddyc592ca52018-01-25 15:12:11 -0800359 u32 pad[2]; /* do not overlay w/ ip.adj_index[0,1] */
Ed Warnickecb9cada2015-12-08 15:45:58 -0700360 u16 mtu;
361 u8 next_index;
Dave Barachba868bb2016-08-08 09:51:21 -0400362 u8 flags; //See ip_frag.h
Ed Warnickecb9cada2015-12-08 15:45:58 -0700363 } ip_frag;
364
Dave Barachc07bf5d2016-02-17 17:52:26 -0500365 /* COP - configurable junk filter(s) */
Dave Barachba868bb2016-08-08 09:51:21 -0400366 struct
367 {
368 /* Current configuration index. */
369 u32 current_config_index;
Dave Barachc07bf5d2016-02-17 17:52:26 -0500370 } cop;
371
Florin Coras1a1adc72016-07-22 01:45:30 +0200372 /* LISP */
Dave Barachba868bb2016-08-08 09:51:21 -0400373 struct
374 {
Florin Coras1a1adc72016-07-22 01:45:30 +0200375 /* overlay address family */
376 u16 overlay_afi;
377 } lisp;
378
Dave Barach68b0fb02017-02-28 15:15:56 -0500379 /* TCP */
380 struct
381 {
382 u32 connection_index;
Florin Coras07beade2019-06-20 12:18:31 -0700383 union
384 {
385 u32 seq_number;
386 u32 next_node_opaque;
387 };
Dave Barach68b0fb02017-02-28 15:15:56 -0500388 u32 seq_end;
389 u32 ack_number;
Florin Coras82b13a82017-04-25 11:58:06 -0700390 u16 hdr_offset; /**< offset relative to ip hdr */
391 u16 data_offset; /**< offset relative to ip hdr */
392 u16 data_len; /**< data len */
Dave Barach68b0fb02017-02-28 15:15:56 -0500393 u8 flags;
394 } tcp;
395
Matus Fabian161c59c2017-07-21 03:46:03 -0700396 /* SNAT */
397 struct
398 {
399 u32 flags;
Klement Sekera98d82ca2021-02-02 13:25:40 +0100400 u32 required_thread_index;
Matus Fabian161c59c2017-07-21 03:46:03 -0700401 } snat;
402
Ed Warnickecb9cada2015-12-08 15:45:58 -0700403 u32 unused[6];
404 };
405} vnet_buffer_opaque_t;
406
Klement Sekera7dbf9a12019-11-21 10:31:03 +0000407#define VNET_REWRITE_TOTAL_BYTES (VLIB_BUFFER_PRE_DATA_SIZE)
408
409STATIC_ASSERT (STRUCT_SIZE_OF (vnet_buffer_opaque_t, ip.save_rewrite_length)
410 == STRUCT_SIZE_OF (vnet_buffer_opaque_t,
Klement Sekeraf126e742019-10-10 09:46:06 +0000411 ip.reass.save_rewrite_length)
412 && STRUCT_SIZE_OF (vnet_buffer_opaque_t,
413 ip.reass.save_rewrite_length) ==
414 STRUCT_SIZE_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length)
Klement Sekera7dbf9a12019-11-21 10:31:03 +0000415 && STRUCT_SIZE_OF (vnet_buffer_opaque_t,
416 mpls.save_rewrite_length) == 1
417 && VNET_REWRITE_TOTAL_BYTES < UINT8_MAX,
418 "save_rewrite_length member must be able to hold the max value of rewrite length");
419
Klement Sekera8ad070e2020-01-15 10:30:48 +0000420STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.save_rewrite_length)
421 == STRUCT_OFFSET_OF (vnet_buffer_opaque_t,
422 ip.reass.save_rewrite_length)
423 && STRUCT_OFFSET_OF (vnet_buffer_opaque_t,
424 mpls.save_rewrite_length) ==
425 STRUCT_OFFSET_OF (vnet_buffer_opaque_t,
426 ip.reass.save_rewrite_length),
427 "save_rewrite_length must be aligned so that reass doesn't overwrite it");
428
Neale Rannsad422ed2016-11-02 14:20:04 +0000429/*
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700430 * The opaque field of the vlib_buffer_t is interpreted as a
Neale Rannsad422ed2016-11-02 14:20:04 +0000431 * vnet_buffer_opaque_t. Hence it should be big enough to accommodate one.
432 */
Eyal Baria11832f2017-06-21 15:32:13 +0300433STATIC_ASSERT (sizeof (vnet_buffer_opaque_t) <=
434 STRUCT_SIZE_OF (vlib_buffer_t, opaque),
Neale Rannsad422ed2016-11-02 14:20:04 +0000435 "VNET buffer meta-data too large for vlib_buffer");
436
Ed Warnickecb9cada2015-12-08 15:45:58 -0700437#define vnet_buffer(b) ((vnet_buffer_opaque_t *) (b)->opaque)
438
439/* Full cache line (64 bytes) of additional space */
Dave Barachba868bb2016-08-08 09:51:21 -0400440typedef struct
441{
Neale Ranns039cbfe2018-02-27 03:45:38 -0800442 /**
443 * QoS marking data that needs to persist from the recording nodes
444 * (nominally in the ingress path) to the marking node (in the
445 * egress path)
446 */
447 struct
448 {
449 u8 bits;
450 u8 source;
451 } qos;
452
Neale Rannsce9e0b42018-08-01 12:53:17 -0700453 u8 loop_counter;
454 u8 __unused[1];
Neale Ranns039cbfe2018-02-27 03:45:38 -0800455
Neale Ranns25b04942018-04-04 09:34:50 -0700456 /* Group Based Policy */
457 struct
458 {
Mohsin Kazmi61b94c62018-08-20 18:32:39 +0200459 u8 __unused;
460 u8 flags;
Neale Ranns4ba67722019-02-28 11:11:39 +0000461 u16 sclass;
Neale Ranns25b04942018-04-04 09:34:50 -0700462 } gbp;
463
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200464 /**
465 * The L4 payload size set on input on GSO enabled interfaces
466 * when we receive a GSO packet (a chain of buffers with the first one
467 * having GSO bit set), and needs to persist all the way to the interface-output,
468 * in case the egress interface is not GSO-enabled - then we need to perform
469 * the segmentation, and use this value to cut the payload appropriately.
470 */
Mohsin Kazmi68095382021-02-10 11:26:24 +0100471 struct
472 {
473 u16 gso_size;
474 /* size of L4 prototol header */
475 u16 gso_l4_hdr_sz;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100476 };
Klement Sekera4881cb42020-12-15 18:47:05 +0100477
Klement Sekera5581de62020-04-24 12:24:41 +0000478 struct
479 {
480 u32 arc_next;
Klement Sekerad2b69972021-03-09 17:53:47 +0100481 union
482 {
483 u32 cached_session_index;
484 u32 cached_dst_nat_session_index;
485 };
Klement Sekera5581de62020-04-24 12:24:41 +0000486 } nat;
487
Dave Barachba868bb2016-08-08 09:51:21 -0400488 union
489 {
Dave Barach7bee7732017-10-18 18:48:11 -0400490 struct
491 {
Dave Barach78c56892018-05-16 11:34:35 -0400492 u64 pad[1];
493 u64 pg_replay_timestamp;
494 };
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200495 u32 unused[8];
Ed Warnickecb9cada2015-12-08 15:45:58 -0700496 };
497} vnet_buffer_opaque2_t;
498
Dave Barach7bee7732017-10-18 18:48:11 -0400499#define vnet_buffer2(b) ((vnet_buffer_opaque2_t *) (b)->opaque2)
500
501/*
Paul Vinciguerrabdc0e6b2018-09-22 05:32:50 -0700502 * The opaque2 field of the vlib_buffer_t is interpreted as a
Dave Barach7bee7732017-10-18 18:48:11 -0400503 * vnet_buffer_opaque2_t. Hence it should be big enough to accommodate one.
504 */
505STATIC_ASSERT (sizeof (vnet_buffer_opaque2_t) <=
506 STRUCT_SIZE_OF (vlib_buffer_t, opaque2),
507 "VNET buffer opaque2 meta-data too large for vlib_buffer");
Ed Warnickecb9cada2015-12-08 15:45:58 -0700508
Mohsin Kazmi0f09a472019-07-12 13:18:16 +0200509#define gso_mtu_sz(b) (vnet_buffer2(b)->gso_size + \
510 vnet_buffer2(b)->gso_l4_hdr_sz + \
511 vnet_buffer(b)->l4_hdr_offset - \
512 vnet_buffer (b)->l3_hdr_offset)
Andrew Yourtchenko6a7cff72018-10-12 16:09:22 +0200513
514
Damjan Marionbd846cd2017-11-21 13:12:41 +0100515format_function_t format_vnet_buffer;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100516format_function_t format_vnet_buffer_offload;
Damjan Marion25ab6c52021-03-05 14:41:25 +0100517format_function_t format_vnet_buffer_flags;
518format_function_t format_vnet_buffer_opaque;
519format_function_t format_vnet_buffer_opaque2;
Mohsin Kazmi68095382021-02-10 11:26:24 +0100520
521static_always_inline void
Mohsin Kazmi36f7a6a2021-05-05 14:26:38 +0200522vnet_buffer_offload_flags_set (vlib_buffer_t *b, vnet_buffer_oflags_t oflags)
Mohsin Kazmi68095382021-02-10 11:26:24 +0100523{
Benoît Ganneaa80f072021-02-18 10:34:33 +0100524 if (b->flags & VNET_BUFFER_F_OFFLOAD)
525 {
526 /* add a flag to existing offload */
Mohsin Kazmia7e830e2021-04-23 15:16:50 +0200527 vnet_buffer (b)->oflags |= oflags;
Benoît Ganneaa80f072021-02-18 10:34:33 +0100528 }
529 else
530 {
531 /* no offload yet: reset offload flags to new value */
Mohsin Kazmia7e830e2021-04-23 15:16:50 +0200532 vnet_buffer (b)->oflags = oflags;
Benoît Ganneaa80f072021-02-18 10:34:33 +0100533 b->flags |= VNET_BUFFER_F_OFFLOAD;
534 }
Mohsin Kazmi68095382021-02-10 11:26:24 +0100535}
536
537static_always_inline void
Mohsin Kazmi36f7a6a2021-05-05 14:26:38 +0200538vnet_buffer_offload_flags_clear (vlib_buffer_t *b, vnet_buffer_oflags_t oflags)
Mohsin Kazmi68095382021-02-10 11:26:24 +0100539{
Mohsin Kazmia7e830e2021-04-23 15:16:50 +0200540 vnet_buffer (b)->oflags &= ~oflags;
541 if (0 == vnet_buffer (b)->oflags)
Mohsin Kazmi68095382021-02-10 11:26:24 +0100542 b->flags &= ~VNET_BUFFER_F_OFFLOAD;
543}
Ed Warnickecb9cada2015-12-08 15:45:58 -0700544
545#endif /* included_vnet_buffer_h */
Dave Barachba868bb2016-08-08 09:51:21 -0400546
547/*
548 * fd.io coding-style-patch-verification: ON
549 *
550 * Local Variables:
551 * eval: (c-set-style "gnu")
552 * End:
553 */