Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | /* |
| 16 | * vnet/buffer.h: vnet buffer flags |
| 17 | * |
| 18 | * Copyright (c) 2008 Eliot Dresselhaus |
| 19 | * |
| 20 | * Permission is hereby granted, free of charge, to any person obtaining |
| 21 | * a copy of this software and associated documentation files (the |
| 22 | * "Software"), to deal in the Software without restriction, including |
| 23 | * without limitation the rights to use, copy, modify, merge, publish, |
| 24 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 25 | * permit persons to whom the Software is furnished to do so, subject to |
| 26 | * the following conditions: |
| 27 | * |
| 28 | * The above copyright notice and this permission notice shall be |
| 29 | * included in all copies or substantial portions of the Software. |
| 30 | * |
| 31 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 32 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 33 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 34 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE |
| 35 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
| 36 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
| 37 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 38 | */ |
| 39 | |
| 40 | #ifndef included_vnet_buffer_h |
| 41 | #define included_vnet_buffer_h |
| 42 | |
| 43 | #include <vlib/vlib.h> |
| 44 | |
Neale Ranns | f068c3e | 2018-01-03 04:18:48 -0800 | [diff] [blame] | 45 | /** |
| 46 | * Flags that are set in the high order bits of ((vlib_buffer*)b)->flags |
Dave Barach | a5fb0ec | 2018-12-03 19:07:09 -0500 | [diff] [blame] | 47 | * |
Neale Ranns | f068c3e | 2018-01-03 04:18:48 -0800 | [diff] [blame] | 48 | */ |
Mohsin Kazmi | 6809538 | 2021-02-10 11:26:24 +0100 | [diff] [blame] | 49 | #define foreach_vnet_buffer_flag \ |
| 50 | _ (1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed", 1) \ |
| 51 | _ (2, L4_CHECKSUM_CORRECT, "l4-cksum-correct", 1) \ |
| 52 | _ (3, VLAN_2_DEEP, "vlan-2-deep", 1) \ |
| 53 | _ (4, VLAN_1_DEEP, "vlan-1-deep", 1) \ |
| 54 | _ (5, SPAN_CLONE, "span-clone", 1) \ |
| 55 | _ (6, LOOP_COUNTER_VALID, "loop-counter-valid", 0) \ |
| 56 | _ (7, LOCALLY_ORIGINATED, "local", 1) \ |
| 57 | _ (8, IS_IP4, "ip4", 1) \ |
| 58 | _ (9, IS_IP6, "ip6", 1) \ |
| 59 | _ (10, OFFLOAD, "offload", 0) \ |
| 60 | _ (11, IS_NATED, "natted", 1) \ |
| 61 | _ (12, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \ |
| 62 | _ (13, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \ |
| 63 | _ (14, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \ |
| 64 | _ (15, FLOW_REPORT, "flow-report", 1) \ |
| 65 | _ (16, IS_DVR, "dvr", 1) \ |
| 66 | _ (17, QOS_DATA_VALID, "qos-data-valid", 0) \ |
| 67 | _ (18, GSO, "gso", 0) \ |
| 68 | _ (19, AVAIL1, "avail1", 1) \ |
| 69 | _ (20, AVAIL2, "avail2", 1) \ |
| 70 | _ (21, AVAIL3, "avail3", 1) \ |
| 71 | _ (22, AVAIL4, "avail4", 1) \ |
| 72 | _ (23, AVAIL5, "avail5", 1) \ |
| 73 | _ (24, AVAIL6, "avail6", 1) \ |
| 74 | _ (25, AVAIL7, "avail7", 1) \ |
| 75 | _ (26, AVAIL8, "avail8", 1) \ |
| 76 | _ (27, AVAIL9, "avail9", 1) |
Dave Barach | a5fb0ec | 2018-12-03 19:07:09 -0500 | [diff] [blame] | 77 | |
| 78 | /* |
| 79 | * Please allocate the FIRST available bit, redefine |
| 80 | * AVAIL 1 ... AVAILn-1, and remove AVAILn. Please maintain the |
| 81 | * VNET_BUFFER_FLAGS_ALL_AVAIL definition. |
| 82 | */ |
| 83 | |
Mohsin Kazmi | 6809538 | 2021-02-10 11:26:24 +0100 | [diff] [blame] | 84 | #define VNET_BUFFER_FLAGS_ALL_AVAIL \ |
| 85 | (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \ |
| 86 | VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \ |
| 87 | VNET_BUFFER_F_AVAIL7 | VNET_BUFFER_F_AVAIL8 | VNET_BUFFER_F_AVAIL9) |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 88 | |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 89 | #define VNET_BUFFER_FLAGS_VLAN_BITS \ |
| 90 | (VNET_BUFFER_F_VLAN_1_DEEP | VNET_BUFFER_F_VLAN_2_DEEP) |
Chris Luke | 194ebc5 | 2016-04-25 14:26:55 -0400 | [diff] [blame] | 91 | |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 92 | enum |
| 93 | { |
Dave Barach | 7fff3d2 | 2018-11-27 16:52:59 -0500 | [diff] [blame] | 94 | #define _(bit, name, s, v) VNET_BUFFER_F_##name = (1 << LOG2_VLIB_BUFFER_FLAG_USER(bit)), |
Damjan Marion | dac0352 | 2018-02-01 15:30:13 +0100 | [diff] [blame] | 95 | foreach_vnet_buffer_flag |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 96 | #undef _ |
| 97 | }; |
Damjan Marion | 0247b46 | 2016-06-08 01:37:11 +0200 | [diff] [blame] | 98 | |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 99 | enum |
| 100 | { |
Dave Barach | 7fff3d2 | 2018-11-27 16:52:59 -0500 | [diff] [blame] | 101 | #define _(bit, name, s, v) VNET_BUFFER_F_LOG2_##name = LOG2_VLIB_BUFFER_FLAG_USER(bit), |
Damjan Marion | dac0352 | 2018-02-01 15:30:13 +0100 | [diff] [blame] | 102 | foreach_vnet_buffer_flag |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 103 | #undef _ |
| 104 | }; |
Damjan Marion | 6765549 | 2016-11-15 12:50:28 +0100 | [diff] [blame] | 105 | |
Dave Barach | a5fb0ec | 2018-12-03 19:07:09 -0500 | [diff] [blame] | 106 | /* Make sure that the vnet and vlib bits are disjoint */ |
| 107 | STATIC_ASSERT (((VNET_BUFFER_FLAGS_ALL_AVAIL & VLIB_BUFFER_FLAGS_ALL) == 0), |
| 108 | "VLIB / VNET buffer flags overlap"); |
| 109 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 110 | #define foreach_buffer_opaque_union_subtype \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 111 | _(ip) \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 112 | _(l2) \ |
| 113 | _(l2t) \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 114 | _(l2_classify) \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 115 | _(policer) \ |
Damjan Marion | 9c6ae5f | 2016-11-15 23:20:01 +0100 | [diff] [blame] | 116 | _(ipsec) \ |
Ole Troan | cda9482 | 2016-01-07 14:37:25 +0100 | [diff] [blame] | 117 | _(map) \ |
| 118 | _(map_t) \ |
Florin Coras | 82b13a8 | 2017-04-25 11:58:06 -0700 | [diff] [blame] | 119 | _(ip_frag) \ |
Neale Ranns | d792d9c | 2017-10-21 10:53:20 -0700 | [diff] [blame] | 120 | _(mpls) \ |
Florin Coras | 82b13a8 | 2017-04-25 11:58:06 -0700 | [diff] [blame] | 121 | _(tcp) |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 122 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 123 | /* |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 124 | * vnet stack buffer opaque array overlay structure. |
| 125 | * The vnet_buffer_opaque_t *must* be the same size as the |
| 126 | * vlib_buffer_t "opaque" structure member, 32 bytes. |
| 127 | * |
| 128 | * When adding a union type, please add a stanza to |
| 129 | * foreach_buffer_opaque_union_subtype (directly above). |
| 130 | * Code in vnet_interface_init(...) verifies the size |
| 131 | * of the union, and will announce any deviations in an |
| 132 | * impossible-to-miss manner. |
| 133 | */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 134 | typedef struct |
| 135 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 136 | u32 sw_if_index[VLIB_N_RX_TX]; |
Damjan Marion | 072401e | 2017-07-13 18:53:27 +0200 | [diff] [blame] | 137 | i16 l2_hdr_offset; |
| 138 | i16 l3_hdr_offset; |
| 139 | i16 l4_hdr_offset; |
Damjan Marion | aa682a3 | 2018-04-26 22:45:40 +0200 | [diff] [blame] | 140 | u8 feature_arc_index; |
| 141 | u8 dont_waste_me; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 142 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 143 | union |
| 144 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 145 | /* IP4/6 buffer opaque. */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 146 | struct |
| 147 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 148 | /* Adjacency from destination IP address lookup [VLIB_TX]. |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 149 | Adjacency from source IP address lookup [VLIB_RX]. |
| 150 | This gets set to ~0 until source lookup is performed. */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 151 | u32 adj_index[VLIB_N_RX_TX]; |
| 152 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 153 | union |
| 154 | { |
| 155 | struct |
| 156 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 157 | /* Flow hash value for this packet computed from IP src/dst address |
| 158 | protocol and ports. */ |
| 159 | u32 flow_hash; |
| 160 | |
Florin Coras | cea194d | 2017-10-02 00:18:51 -0700 | [diff] [blame] | 161 | union |
| 162 | { |
| 163 | /* next protocol */ |
| 164 | u32 save_protocol; |
| 165 | |
| 166 | /* Hint for transport protocols */ |
| 167 | u32 fib_index; |
| 168 | }; |
Dave Barach | c7493e1 | 2016-08-24 18:36:03 -0400 | [diff] [blame] | 169 | |
| 170 | /* Rewrite length */ |
Klement Sekera | 7dbf9a1 | 2019-11-21 10:31:03 +0000 | [diff] [blame] | 171 | u8 save_rewrite_length; |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 172 | |
| 173 | /* MFIB RPF ID */ |
| 174 | u32 rpf_id; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 175 | }; |
| 176 | |
Ole Troan | cda9482 | 2016-01-07 14:37:25 +0100 | [diff] [blame] | 177 | /* ICMP */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 178 | struct |
| 179 | { |
Ole Troan | cda9482 | 2016-01-07 14:37:25 +0100 | [diff] [blame] | 180 | u8 type; |
| 181 | u8 code; |
| 182 | u32 data; |
| 183 | } icmp; |
Klement Sekera | 75e7d13 | 2017-09-20 08:26:30 +0200 | [diff] [blame] | 184 | |
| 185 | /* reassembly */ |
Klement Sekera | 4c53313 | 2018-02-22 11:41:12 +0100 | [diff] [blame] | 186 | union |
Klement Sekera | 75e7d13 | 2017-09-20 08:26:30 +0200 | [diff] [blame] | 187 | { |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 188 | /* group input/output to simplify the code, this way |
| 189 | * we can handoff while keeping input variables intact */ |
Klement Sekera | 4c53313 | 2018-02-22 11:41:12 +0100 | [diff] [blame] | 190 | struct |
| 191 | { |
Klement Sekera | de34c35 | 2019-06-25 11:19:22 +0000 | [diff] [blame] | 192 | /* input variables */ |
| 193 | struct |
| 194 | { |
| 195 | u32 next_index; /* index of next node - used by custom apps */ |
| 196 | u32 error_next_index; /* index of next node if error - used by custom apps */ |
| 197 | }; |
| 198 | /* handoff variables */ |
| 199 | struct |
| 200 | { |
| 201 | u16 owner_thread_index; |
| 202 | }; |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 203 | }; |
| 204 | /* output variables */ |
| 205 | struct |
| 206 | { |
| 207 | union |
Klement Sekera | de34c35 | 2019-06-25 11:19:22 +0000 | [diff] [blame] | 208 | { |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 209 | /* shallow virtual reassembly output variables */ |
| 210 | struct |
Klement Sekera | de34c35 | 2019-06-25 11:19:22 +0000 | [diff] [blame] | 211 | { |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 212 | u16 l4_src_port; /* tcp/udp/icmp src port */ |
| 213 | u16 l4_dst_port; /* tcp/udp/icmp dst port */ |
| 214 | u32 tcp_ack_number; |
Klement Sekera | 8ad070e | 2020-01-15 10:30:48 +0000 | [diff] [blame] | 215 | u8 save_rewrite_length; |
| 216 | u8 ip_proto; /* protocol in ip header */ |
| 217 | u8 icmp_type_or_tcp_flags; |
| 218 | u8 is_non_first_fragment; |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 219 | u32 tcp_seq_number; |
| 220 | }; |
| 221 | /* full reassembly output variables */ |
| 222 | struct |
| 223 | { |
| 224 | u16 estimated_mtu; /* estimated MTU calculated during reassembly */ |
Klement Sekera | de34c35 | 2019-06-25 11:19:22 +0000 | [diff] [blame] | 225 | }; |
| 226 | }; |
Klement Sekera | 4c53313 | 2018-02-22 11:41:12 +0100 | [diff] [blame] | 227 | }; |
| 228 | /* internal variables used during reassembly */ |
| 229 | struct |
| 230 | { |
| 231 | u16 fragment_first; |
| 232 | u16 fragment_last; |
| 233 | u16 range_first; |
| 234 | u16 range_last; |
| 235 | u32 next_range_bi; |
| 236 | u16 ip6_frag_hdr_offset; |
| 237 | }; |
Klement Sekera | 75e7d13 | 2017-09-20 08:26:30 +0200 | [diff] [blame] | 238 | } reass; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 239 | }; |
| 240 | } ip; |
| 241 | |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 242 | /* |
| 243 | * MPLS: |
| 244 | * data copied from the MPLS header that was popped from the packet |
| 245 | * during the look-up. |
| 246 | */ |
| 247 | struct |
| 248 | { |
Neale Ranns | 31ed744 | 2018-02-23 05:29:09 -0800 | [diff] [blame] | 249 | /* do not overlay w/ ip.adj_index[0,1] nor flow hash */ |
| 250 | u32 pad[VLIB_N_RX_TX + 1]; |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 251 | u8 ttl; |
| 252 | u8 exp; |
| 253 | u8 first; |
Rajesh Goel | d6f1c9c | 2019-10-06 13:17:36 +0530 | [diff] [blame] | 254 | u8 pyld_proto:3; /* dpo_proto_t */ |
| 255 | u8 rsvd:5; |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 256 | /* Rewrite length */ |
Klement Sekera | 7dbf9a1 | 2019-11-21 10:31:03 +0000 | [diff] [blame] | 257 | u8 save_rewrite_length; |
Rajesh Goel | d6f1c9c | 2019-10-06 13:17:36 +0530 | [diff] [blame] | 258 | /* Save the mpls header length including all label stack */ |
| 259 | u8 mpls_hdr_length; |
Neale Ranns | 9128637 | 2017-12-05 13:24:04 -0800 | [diff] [blame] | 260 | /* |
Paul Vinciguerra | bdc0e6b | 2018-09-22 05:32:50 -0700 | [diff] [blame] | 261 | * BIER - the number of bytes in the header. |
| 262 | * the len field in the header is not authoritative. It's the |
Neale Ranns | 9128637 | 2017-12-05 13:24:04 -0800 | [diff] [blame] | 263 | * value in the table that counts. |
| 264 | */ |
| 265 | struct |
| 266 | { |
| 267 | u8 n_bytes; |
| 268 | } bier; |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 269 | } mpls; |
| 270 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 271 | /* l2 bridging path, only valid there */ |
Eyal Bari | a11832f | 2017-06-21 15:32:13 +0300 | [diff] [blame] | 272 | struct opaque_l2 |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 273 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 274 | u32 feature_bitmap; |
John Lo | da1f2c7 | 2017-03-24 20:11:15 -0400 | [diff] [blame] | 275 | u16 bd_index; /* bridge-domain index */ |
Neale Ranns | a342da2 | 2019-06-06 10:35:07 +0000 | [diff] [blame] | 276 | u16 l2fib_sn; /* l2fib bd/int seq_num */ |
John Lo | da1f2c7 | 2017-03-24 20:11:15 -0400 | [diff] [blame] | 277 | u8 l2_len; /* ethernet header length */ |
| 278 | u8 shg; /* split-horizon group */ |
John Lo | 5a6508d | 2017-10-03 13:13:47 -0400 | [diff] [blame] | 279 | u8 bd_age; /* aging enabled */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 280 | } l2; |
| 281 | |
| 282 | /* l2tpv3 softwire encap, only valid there */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 283 | struct |
| 284 | { |
| 285 | u32 pad[4]; /* do not overlay w/ ip.adj_index[0,1] */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 286 | u8 next_index; |
| 287 | u32 session_index; |
| 288 | } l2t; |
| 289 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 290 | /* L2 classify */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 291 | struct |
| 292 | { |
Eyal Bari | a11832f | 2017-06-21 15:32:13 +0300 | [diff] [blame] | 293 | struct opaque_l2 pad; |
Eyal Bari | 0f360dc | 2017-06-14 13:11:20 +0300 | [diff] [blame] | 294 | union |
| 295 | { |
| 296 | u32 table_index; |
| 297 | u32 opaque_index; |
| 298 | }; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 299 | u64 hash; |
| 300 | } l2_classify; |
| 301 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 302 | /* vnet policer */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 303 | struct |
| 304 | { |
| 305 | u32 pad[8 - VLIB_N_RX_TX - 1]; /* to end of opaque */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 306 | u32 index; |
| 307 | } policer; |
| 308 | |
| 309 | /* interface output features */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 310 | struct |
| 311 | { |
Neale Ranns | aa7d766 | 2021-02-10 08:42:49 +0000 | [diff] [blame] | 312 | /* don't overlap the adjcencies nor flow-hash */ |
| 313 | u32 __pad[3]; |
Damjan Marion | 9c6ae5f | 2016-11-15 23:20:01 +0100 | [diff] [blame] | 314 | u32 sad_index; |
Neale Ranns | c87b66c | 2019-02-07 07:26:12 -0800 | [diff] [blame] | 315 | u32 protect_index; |
Neale Ranns | aa7d766 | 2021-02-10 08:42:49 +0000 | [diff] [blame] | 316 | u16 thread_index; |
Damjan Marion | 9c6ae5f | 2016-11-15 23:20:01 +0100 | [diff] [blame] | 317 | } ipsec; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 318 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 319 | /* MAP */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 320 | struct |
| 321 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 322 | u16 mtu; |
| 323 | } map; |
| 324 | |
| 325 | /* MAP-T */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 326 | struct |
| 327 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 328 | u32 map_domain_index; |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 329 | struct |
| 330 | { |
| 331 | u32 saddr, daddr; |
| 332 | u16 frag_offset; //Fragmentation header offset |
| 333 | u16 l4_offset; //L4 header overall offset |
| 334 | u8 l4_protocol; //The final protocol number |
| 335 | } v6; //Used by ip6_map_t only |
| 336 | u16 checksum_offset; //L4 checksum overall offset |
| 337 | u16 mtu; //Exit MTU |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 338 | } map_t; |
| 339 | |
| 340 | /* IP Fragmentation */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 341 | struct |
| 342 | { |
Vijayabhaskar Katamreddy | c592ca5 | 2018-01-25 15:12:11 -0800 | [diff] [blame] | 343 | u32 pad[2]; /* do not overlay w/ ip.adj_index[0,1] */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 344 | u16 mtu; |
| 345 | u8 next_index; |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 346 | u8 flags; //See ip_frag.h |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 347 | } ip_frag; |
| 348 | |
Dave Barach | c07bf5d | 2016-02-17 17:52:26 -0500 | [diff] [blame] | 349 | /* COP - configurable junk filter(s) */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 350 | struct |
| 351 | { |
| 352 | /* Current configuration index. */ |
| 353 | u32 current_config_index; |
Dave Barach | c07bf5d | 2016-02-17 17:52:26 -0500 | [diff] [blame] | 354 | } cop; |
| 355 | |
Florin Coras | 1a1adc7 | 2016-07-22 01:45:30 +0200 | [diff] [blame] | 356 | /* LISP */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 357 | struct |
| 358 | { |
Florin Coras | 1a1adc7 | 2016-07-22 01:45:30 +0200 | [diff] [blame] | 359 | /* overlay address family */ |
| 360 | u16 overlay_afi; |
| 361 | } lisp; |
| 362 | |
Dave Barach | 68b0fb0 | 2017-02-28 15:15:56 -0500 | [diff] [blame] | 363 | /* TCP */ |
| 364 | struct |
| 365 | { |
| 366 | u32 connection_index; |
Florin Coras | 07beade | 2019-06-20 12:18:31 -0700 | [diff] [blame] | 367 | union |
| 368 | { |
| 369 | u32 seq_number; |
| 370 | u32 next_node_opaque; |
| 371 | }; |
Dave Barach | 68b0fb0 | 2017-02-28 15:15:56 -0500 | [diff] [blame] | 372 | u32 seq_end; |
| 373 | u32 ack_number; |
Florin Coras | 82b13a8 | 2017-04-25 11:58:06 -0700 | [diff] [blame] | 374 | u16 hdr_offset; /**< offset relative to ip hdr */ |
| 375 | u16 data_offset; /**< offset relative to ip hdr */ |
| 376 | u16 data_len; /**< data len */ |
Dave Barach | 68b0fb0 | 2017-02-28 15:15:56 -0500 | [diff] [blame] | 377 | u8 flags; |
| 378 | } tcp; |
| 379 | |
Matus Fabian | 161c59c | 2017-07-21 03:46:03 -0700 | [diff] [blame] | 380 | /* SNAT */ |
| 381 | struct |
| 382 | { |
| 383 | u32 flags; |
Klement Sekera | 98d82ca | 2021-02-02 13:25:40 +0100 | [diff] [blame] | 384 | u32 required_thread_index; |
Matus Fabian | 161c59c | 2017-07-21 03:46:03 -0700 | [diff] [blame] | 385 | } snat; |
| 386 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 387 | u32 unused[6]; |
| 388 | }; |
| 389 | } vnet_buffer_opaque_t; |
| 390 | |
Klement Sekera | 7dbf9a1 | 2019-11-21 10:31:03 +0000 | [diff] [blame] | 391 | #define VNET_REWRITE_TOTAL_BYTES (VLIB_BUFFER_PRE_DATA_SIZE) |
| 392 | |
| 393 | STATIC_ASSERT (STRUCT_SIZE_OF (vnet_buffer_opaque_t, ip.save_rewrite_length) |
| 394 | == STRUCT_SIZE_OF (vnet_buffer_opaque_t, |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 395 | ip.reass.save_rewrite_length) |
| 396 | && STRUCT_SIZE_OF (vnet_buffer_opaque_t, |
| 397 | ip.reass.save_rewrite_length) == |
| 398 | STRUCT_SIZE_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length) |
Klement Sekera | 7dbf9a1 | 2019-11-21 10:31:03 +0000 | [diff] [blame] | 399 | && STRUCT_SIZE_OF (vnet_buffer_opaque_t, |
| 400 | mpls.save_rewrite_length) == 1 |
| 401 | && VNET_REWRITE_TOTAL_BYTES < UINT8_MAX, |
| 402 | "save_rewrite_length member must be able to hold the max value of rewrite length"); |
| 403 | |
Klement Sekera | 8ad070e | 2020-01-15 10:30:48 +0000 | [diff] [blame] | 404 | STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.save_rewrite_length) |
| 405 | == STRUCT_OFFSET_OF (vnet_buffer_opaque_t, |
| 406 | ip.reass.save_rewrite_length) |
| 407 | && STRUCT_OFFSET_OF (vnet_buffer_opaque_t, |
| 408 | mpls.save_rewrite_length) == |
| 409 | STRUCT_OFFSET_OF (vnet_buffer_opaque_t, |
| 410 | ip.reass.save_rewrite_length), |
| 411 | "save_rewrite_length must be aligned so that reass doesn't overwrite it"); |
| 412 | |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 413 | /* |
Paul Vinciguerra | bdc0e6b | 2018-09-22 05:32:50 -0700 | [diff] [blame] | 414 | * The opaque field of the vlib_buffer_t is interpreted as a |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 415 | * vnet_buffer_opaque_t. Hence it should be big enough to accommodate one. |
| 416 | */ |
Eyal Bari | a11832f | 2017-06-21 15:32:13 +0300 | [diff] [blame] | 417 | STATIC_ASSERT (sizeof (vnet_buffer_opaque_t) <= |
| 418 | STRUCT_SIZE_OF (vlib_buffer_t, opaque), |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 419 | "VNET buffer meta-data too large for vlib_buffer"); |
| 420 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 421 | #define vnet_buffer(b) ((vnet_buffer_opaque_t *) (b)->opaque) |
| 422 | |
Mohsin Kazmi | 6809538 | 2021-02-10 11:26:24 +0100 | [diff] [blame] | 423 | #define foreach_vnet_buffer_offload_flag \ |
| 424 | _ (0, IP_CKSUM, "offload-ip-cksum", 1) \ |
| 425 | _ (1, TCP_CKSUM, "offload-tcp-cksum", 1) \ |
| 426 | _ (2, UDP_CKSUM, "offload-udp-cksum", 1) \ |
| 427 | _ (3, OUTER_IP_CKSUM, "offload-outer-ip-cksum", 1) \ |
| 428 | _ (4, OUTER_TCP_CKSUM, "offload-outer-tcp-cksum", 1) \ |
| 429 | _ (5, OUTER_UDP_CKSUM, "offload-outer-udp-cksum", 1) |
| 430 | |
| 431 | enum |
| 432 | { |
| 433 | #define _(bit, name, s, v) VNET_BUFFER_OFFLOAD_F_##name = (1 << bit), |
| 434 | foreach_vnet_buffer_offload_flag |
| 435 | #undef _ |
| 436 | }; |
| 437 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 438 | /* Full cache line (64 bytes) of additional space */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 439 | typedef struct |
| 440 | { |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 441 | /** |
| 442 | * QoS marking data that needs to persist from the recording nodes |
| 443 | * (nominally in the ingress path) to the marking node (in the |
| 444 | * egress path) |
| 445 | */ |
| 446 | struct |
| 447 | { |
| 448 | u8 bits; |
| 449 | u8 source; |
| 450 | } qos; |
| 451 | |
Neale Ranns | ce9e0b4 | 2018-08-01 12:53:17 -0700 | [diff] [blame] | 452 | u8 loop_counter; |
| 453 | u8 __unused[1]; |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 454 | |
Neale Ranns | 25b0494 | 2018-04-04 09:34:50 -0700 | [diff] [blame] | 455 | /* Group Based Policy */ |
| 456 | struct |
| 457 | { |
Mohsin Kazmi | 61b94c6 | 2018-08-20 18:32:39 +0200 | [diff] [blame] | 458 | u8 __unused; |
| 459 | u8 flags; |
Neale Ranns | 4ba6772 | 2019-02-28 11:11:39 +0000 | [diff] [blame] | 460 | u16 sclass; |
Neale Ranns | 25b0494 | 2018-04-04 09:34:50 -0700 | [diff] [blame] | 461 | } gbp; |
| 462 | |
Andrew Yourtchenko | 6a7cff7 | 2018-10-12 16:09:22 +0200 | [diff] [blame] | 463 | /** |
| 464 | * The L4 payload size set on input on GSO enabled interfaces |
| 465 | * when we receive a GSO packet (a chain of buffers with the first one |
| 466 | * having GSO bit set), and needs to persist all the way to the interface-output, |
| 467 | * in case the egress interface is not GSO-enabled - then we need to perform |
| 468 | * the segmentation, and use this value to cut the payload appropriately. |
| 469 | */ |
Mohsin Kazmi | 6809538 | 2021-02-10 11:26:24 +0100 | [diff] [blame] | 470 | struct |
| 471 | { |
| 472 | u16 gso_size; |
| 473 | /* size of L4 prototol header */ |
| 474 | u16 gso_l4_hdr_sz; |
Andrew Yourtchenko | 6a7cff7 | 2018-10-12 16:09:22 +0200 | [diff] [blame] | 475 | |
Mohsin Kazmi | 6809538 | 2021-02-10 11:26:24 +0100 | [diff] [blame] | 476 | /* offload flags */ |
| 477 | u32 oflags; |
| 478 | }; |
Klement Sekera | 4881cb4 | 2020-12-15 18:47:05 +0100 | [diff] [blame] | 479 | |
Klement Sekera | 5581de6 | 2020-04-24 12:24:41 +0000 | [diff] [blame] | 480 | struct |
| 481 | { |
| 482 | u32 arc_next; |
Klement Sekera | 4881cb4 | 2020-12-15 18:47:05 +0100 | [diff] [blame] | 483 | /* cached session index from previous node */ |
| 484 | u32 cached_session_index; |
Klement Sekera | 5581de6 | 2020-04-24 12:24:41 +0000 | [diff] [blame] | 485 | } nat; |
| 486 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 487 | union |
| 488 | { |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 489 | struct |
| 490 | { |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 491 | #if VLIB_BUFFER_TRACE_TRAJECTORY > 0 |
| 492 | /* buffer trajectory tracing */ |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 493 | u16 *trajectory_trace; |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 494 | #endif |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 495 | }; |
Dave Barach | 78c5689 | 2018-05-16 11:34:35 -0400 | [diff] [blame] | 496 | struct |
| 497 | { |
| 498 | u64 pad[1]; |
| 499 | u64 pg_replay_timestamp; |
| 500 | }; |
Andrew Yourtchenko | 6a7cff7 | 2018-10-12 16:09:22 +0200 | [diff] [blame] | 501 | u32 unused[8]; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 502 | }; |
| 503 | } vnet_buffer_opaque2_t; |
| 504 | |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 505 | #define vnet_buffer2(b) ((vnet_buffer_opaque2_t *) (b)->opaque2) |
| 506 | |
| 507 | /* |
Paul Vinciguerra | bdc0e6b | 2018-09-22 05:32:50 -0700 | [diff] [blame] | 508 | * The opaque2 field of the vlib_buffer_t is interpreted as a |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 509 | * vnet_buffer_opaque2_t. Hence it should be big enough to accommodate one. |
| 510 | */ |
| 511 | STATIC_ASSERT (sizeof (vnet_buffer_opaque2_t) <= |
| 512 | STRUCT_SIZE_OF (vlib_buffer_t, opaque2), |
| 513 | "VNET buffer opaque2 meta-data too large for vlib_buffer"); |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 514 | |
Mohsin Kazmi | 0f09a47 | 2019-07-12 13:18:16 +0200 | [diff] [blame] | 515 | #define gso_mtu_sz(b) (vnet_buffer2(b)->gso_size + \ |
| 516 | vnet_buffer2(b)->gso_l4_hdr_sz + \ |
| 517 | vnet_buffer(b)->l4_hdr_offset - \ |
| 518 | vnet_buffer (b)->l3_hdr_offset) |
Andrew Yourtchenko | 6a7cff7 | 2018-10-12 16:09:22 +0200 | [diff] [blame] | 519 | |
| 520 | |
Damjan Marion | bd846cd | 2017-11-21 13:12:41 +0100 | [diff] [blame] | 521 | format_function_t format_vnet_buffer; |
Mohsin Kazmi | 6809538 | 2021-02-10 11:26:24 +0100 | [diff] [blame] | 522 | format_function_t format_vnet_buffer_offload; |
| 523 | |
| 524 | static_always_inline void |
| 525 | vnet_buffer_offload_flags_set (vlib_buffer_t *b, u32 oflags) |
| 526 | { |
| 527 | vnet_buffer2 (b)->oflags |= oflags; |
| 528 | b->flags |= VNET_BUFFER_F_OFFLOAD; |
| 529 | } |
| 530 | |
| 531 | static_always_inline void |
| 532 | vnet_buffer_offload_flags_clear (vlib_buffer_t *b, u32 oflags) |
| 533 | { |
| 534 | vnet_buffer2 (b)->oflags &= ~oflags; |
| 535 | if (0 == vnet_buffer2 (b)->oflags) |
| 536 | b->flags &= ~VNET_BUFFER_F_OFFLOAD; |
| 537 | } |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 538 | |
| 539 | #endif /* included_vnet_buffer_h */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 540 | |
| 541 | /* |
| 542 | * fd.io coding-style-patch-verification: ON |
| 543 | * |
| 544 | * Local Variables: |
| 545 | * eval: (c-set-style "gnu") |
| 546 | * End: |
| 547 | */ |