Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | /* |
| 16 | * vnet/buffer.h: vnet buffer flags |
| 17 | * |
| 18 | * Copyright (c) 2008 Eliot Dresselhaus |
| 19 | * |
| 20 | * Permission is hereby granted, free of charge, to any person obtaining |
| 21 | * a copy of this software and associated documentation files (the |
| 22 | * "Software"), to deal in the Software without restriction, including |
| 23 | * without limitation the rights to use, copy, modify, merge, publish, |
| 24 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 25 | * permit persons to whom the Software is furnished to do so, subject to |
| 26 | * the following conditions: |
| 27 | * |
| 28 | * The above copyright notice and this permission notice shall be |
| 29 | * included in all copies or substantial portions of the Software. |
| 30 | * |
| 31 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 32 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 33 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 34 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE |
| 35 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
| 36 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
| 37 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 38 | */ |
| 39 | |
| 40 | #ifndef included_vnet_buffer_h |
| 41 | #define included_vnet_buffer_h |
| 42 | |
| 43 | #include <vlib/vlib.h> |
| 44 | |
Neale Ranns | f068c3e | 2018-01-03 04:18:48 -0800 | [diff] [blame] | 45 | /** |
| 46 | * Flags that are set in the high order bits of ((vlib_buffer*)b)->flags |
Dave Barach | a5fb0ec | 2018-12-03 19:07:09 -0500 | [diff] [blame] | 47 | * |
Neale Ranns | f068c3e | 2018-01-03 04:18:48 -0800 | [diff] [blame] | 48 | */ |
Dave Barach | 7fff3d2 | 2018-11-27 16:52:59 -0500 | [diff] [blame] | 49 | #define foreach_vnet_buffer_flag \ |
| 50 | _( 1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed", 1) \ |
| 51 | _( 2, L4_CHECKSUM_CORRECT, "l4-cksum-correct", 1) \ |
| 52 | _( 3, VLAN_2_DEEP, "vlan-2-deep", 1) \ |
| 53 | _( 4, VLAN_1_DEEP, "vlan-1-deep", 1) \ |
| 54 | _( 5, SPAN_CLONE, "span-clone", 1) \ |
| 55 | _( 6, LOOP_COUNTER_VALID, "loop-counter-valid", 0) \ |
| 56 | _( 7, LOCALLY_ORIGINATED, "local", 1) \ |
| 57 | _( 8, IS_IP4, "ip4", 1) \ |
| 58 | _( 9, IS_IP6, "ip6", 1) \ |
| 59 | _(10, OFFLOAD_IP_CKSUM, "offload-ip-cksum", 1) \ |
| 60 | _(11, OFFLOAD_TCP_CKSUM, "offload-tcp-cksum", 1) \ |
| 61 | _(12, OFFLOAD_UDP_CKSUM, "offload-udp-cksum", 1) \ |
| 62 | _(13, IS_NATED, "natted", 1) \ |
| 63 | _(14, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \ |
| 64 | _(15, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \ |
| 65 | _(16, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \ |
| 66 | _(17, FLOW_REPORT, "flow-report", 1) \ |
| 67 | _(18, IS_DVR, "dvr", 1) \ |
Dave Barach | a5fb0ec | 2018-12-03 19:07:09 -0500 | [diff] [blame] | 68 | _(19, QOS_DATA_VALID, "qos-data-valid", 0) \ |
Andrew Yourtchenko | 6a7cff7 | 2018-10-12 16:09:22 +0200 | [diff] [blame] | 69 | _(20, GSO, "gso", 0) \ |
| 70 | _(21, AVAIL1, "avail1", 1) \ |
| 71 | _(22, AVAIL2, "avail2", 1) \ |
| 72 | _(23, AVAIL3, "avail3", 1) \ |
| 73 | _(24, AVAIL4, "avail4", 1) \ |
| 74 | _(25, AVAIL5, "avail5", 1) \ |
| 75 | _(26, AVAIL6, "avail6", 1) \ |
| 76 | _(27, AVAIL7, "avail7", 1) |
Dave Barach | a5fb0ec | 2018-12-03 19:07:09 -0500 | [diff] [blame] | 77 | |
| 78 | /* |
| 79 | * Please allocate the FIRST available bit, redefine |
| 80 | * AVAIL 1 ... AVAILn-1, and remove AVAILn. Please maintain the |
| 81 | * VNET_BUFFER_FLAGS_ALL_AVAIL definition. |
| 82 | */ |
| 83 | |
| 84 | #define VNET_BUFFER_FLAGS_ALL_AVAIL \ |
| 85 | (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \ |
| 86 | VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \ |
| 87 | VNET_BUFFER_F_AVAIL7) |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 88 | |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 89 | #define VNET_BUFFER_FLAGS_VLAN_BITS \ |
| 90 | (VNET_BUFFER_F_VLAN_1_DEEP | VNET_BUFFER_F_VLAN_2_DEEP) |
Chris Luke | 194ebc5 | 2016-04-25 14:26:55 -0400 | [diff] [blame] | 91 | |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 92 | enum |
| 93 | { |
Dave Barach | 7fff3d2 | 2018-11-27 16:52:59 -0500 | [diff] [blame] | 94 | #define _(bit, name, s, v) VNET_BUFFER_F_##name = (1 << LOG2_VLIB_BUFFER_FLAG_USER(bit)), |
Damjan Marion | dac0352 | 2018-02-01 15:30:13 +0100 | [diff] [blame] | 95 | foreach_vnet_buffer_flag |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 96 | #undef _ |
| 97 | }; |
Damjan Marion | 0247b46 | 2016-06-08 01:37:11 +0200 | [diff] [blame] | 98 | |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 99 | enum |
| 100 | { |
Dave Barach | 7fff3d2 | 2018-11-27 16:52:59 -0500 | [diff] [blame] | 101 | #define _(bit, name, s, v) VNET_BUFFER_F_LOG2_##name = LOG2_VLIB_BUFFER_FLAG_USER(bit), |
Damjan Marion | dac0352 | 2018-02-01 15:30:13 +0100 | [diff] [blame] | 102 | foreach_vnet_buffer_flag |
Damjan Marion | 213b5aa | 2017-07-13 21:19:27 +0200 | [diff] [blame] | 103 | #undef _ |
| 104 | }; |
Damjan Marion | 6765549 | 2016-11-15 12:50:28 +0100 | [diff] [blame] | 105 | |
Dave Barach | a5fb0ec | 2018-12-03 19:07:09 -0500 | [diff] [blame] | 106 | /* Make sure that the vnet and vlib bits are disjoint */ |
| 107 | STATIC_ASSERT (((VNET_BUFFER_FLAGS_ALL_AVAIL & VLIB_BUFFER_FLAGS_ALL) == 0), |
| 108 | "VLIB / VNET buffer flags overlap"); |
| 109 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 110 | #define foreach_buffer_opaque_union_subtype \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 111 | _(ip) \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 112 | _(l2) \ |
| 113 | _(l2t) \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 114 | _(l2_classify) \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 115 | _(policer) \ |
Damjan Marion | 9c6ae5f | 2016-11-15 23:20:01 +0100 | [diff] [blame] | 116 | _(ipsec) \ |
Ole Troan | cda9482 | 2016-01-07 14:37:25 +0100 | [diff] [blame] | 117 | _(map) \ |
| 118 | _(map_t) \ |
Florin Coras | 82b13a8 | 2017-04-25 11:58:06 -0700 | [diff] [blame] | 119 | _(ip_frag) \ |
Neale Ranns | d792d9c | 2017-10-21 10:53:20 -0700 | [diff] [blame] | 120 | _(mpls) \ |
Florin Coras | 82b13a8 | 2017-04-25 11:58:06 -0700 | [diff] [blame] | 121 | _(tcp) |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 122 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 123 | /* |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 124 | * vnet stack buffer opaque array overlay structure. |
| 125 | * The vnet_buffer_opaque_t *must* be the same size as the |
| 126 | * vlib_buffer_t "opaque" structure member, 32 bytes. |
| 127 | * |
| 128 | * When adding a union type, please add a stanza to |
| 129 | * foreach_buffer_opaque_union_subtype (directly above). |
| 130 | * Code in vnet_interface_init(...) verifies the size |
| 131 | * of the union, and will announce any deviations in an |
| 132 | * impossible-to-miss manner. |
| 133 | */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 134 | typedef struct |
| 135 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 136 | u32 sw_if_index[VLIB_N_RX_TX]; |
Damjan Marion | 072401e | 2017-07-13 18:53:27 +0200 | [diff] [blame] | 137 | i16 l2_hdr_offset; |
| 138 | i16 l3_hdr_offset; |
| 139 | i16 l4_hdr_offset; |
Damjan Marion | aa682a3 | 2018-04-26 22:45:40 +0200 | [diff] [blame] | 140 | u8 feature_arc_index; |
| 141 | u8 dont_waste_me; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 142 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 143 | union |
| 144 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 145 | /* IP4/6 buffer opaque. */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 146 | struct |
| 147 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 148 | /* Adjacency from destination IP address lookup [VLIB_TX]. |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 149 | Adjacency from source IP address lookup [VLIB_RX]. |
| 150 | This gets set to ~0 until source lookup is performed. */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 151 | u32 adj_index[VLIB_N_RX_TX]; |
| 152 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 153 | union |
| 154 | { |
| 155 | struct |
| 156 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 157 | /* Flow hash value for this packet computed from IP src/dst address |
| 158 | protocol and ports. */ |
| 159 | u32 flow_hash; |
| 160 | |
Florin Coras | cea194d | 2017-10-02 00:18:51 -0700 | [diff] [blame] | 161 | union |
| 162 | { |
| 163 | /* next protocol */ |
| 164 | u32 save_protocol; |
| 165 | |
| 166 | /* Hint for transport protocols */ |
| 167 | u32 fib_index; |
| 168 | }; |
Dave Barach | c7493e1 | 2016-08-24 18:36:03 -0400 | [diff] [blame] | 169 | |
| 170 | /* Rewrite length */ |
Klement Sekera | 7dbf9a1 | 2019-11-21 10:31:03 +0000 | [diff] [blame] | 171 | u8 save_rewrite_length; |
Neale Ranns | 0f26c5a | 2017-03-01 15:12:11 -0800 | [diff] [blame] | 172 | |
| 173 | /* MFIB RPF ID */ |
| 174 | u32 rpf_id; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 175 | }; |
| 176 | |
Ole Troan | cda9482 | 2016-01-07 14:37:25 +0100 | [diff] [blame] | 177 | /* ICMP */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 178 | struct |
| 179 | { |
Ole Troan | cda9482 | 2016-01-07 14:37:25 +0100 | [diff] [blame] | 180 | u8 type; |
| 181 | u8 code; |
| 182 | u32 data; |
| 183 | } icmp; |
Klement Sekera | 75e7d13 | 2017-09-20 08:26:30 +0200 | [diff] [blame] | 184 | |
| 185 | /* reassembly */ |
Klement Sekera | 4c53313 | 2018-02-22 11:41:12 +0100 | [diff] [blame] | 186 | union |
Klement Sekera | 75e7d13 | 2017-09-20 08:26:30 +0200 | [diff] [blame] | 187 | { |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 188 | /* group input/output to simplify the code, this way |
| 189 | * we can handoff while keeping input variables intact */ |
Klement Sekera | 4c53313 | 2018-02-22 11:41:12 +0100 | [diff] [blame] | 190 | struct |
| 191 | { |
Klement Sekera | de34c35 | 2019-06-25 11:19:22 +0000 | [diff] [blame] | 192 | /* input variables */ |
| 193 | struct |
| 194 | { |
| 195 | u32 next_index; /* index of next node - used by custom apps */ |
| 196 | u32 error_next_index; /* index of next node if error - used by custom apps */ |
| 197 | }; |
| 198 | /* handoff variables */ |
| 199 | struct |
| 200 | { |
| 201 | u16 owner_thread_index; |
| 202 | }; |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 203 | }; |
| 204 | /* output variables */ |
| 205 | struct |
| 206 | { |
| 207 | union |
Klement Sekera | de34c35 | 2019-06-25 11:19:22 +0000 | [diff] [blame] | 208 | { |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 209 | /* shallow virtual reassembly output variables */ |
| 210 | struct |
Klement Sekera | de34c35 | 2019-06-25 11:19:22 +0000 | [diff] [blame] | 211 | { |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 212 | u16 l4_src_port; /* tcp/udp/icmp src port */ |
| 213 | u16 l4_dst_port; /* tcp/udp/icmp dst port */ |
| 214 | u32 tcp_ack_number; |
Klement Sekera | 8ad070e | 2020-01-15 10:30:48 +0000 | [diff] [blame] | 215 | u8 save_rewrite_length; |
| 216 | u8 ip_proto; /* protocol in ip header */ |
| 217 | u8 icmp_type_or_tcp_flags; |
| 218 | u8 is_non_first_fragment; |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 219 | u32 tcp_seq_number; |
| 220 | }; |
| 221 | /* full reassembly output variables */ |
| 222 | struct |
| 223 | { |
| 224 | u16 estimated_mtu; /* estimated MTU calculated during reassembly */ |
Klement Sekera | de34c35 | 2019-06-25 11:19:22 +0000 | [diff] [blame] | 225 | }; |
| 226 | }; |
Klement Sekera | 4c53313 | 2018-02-22 11:41:12 +0100 | [diff] [blame] | 227 | }; |
| 228 | /* internal variables used during reassembly */ |
| 229 | struct |
| 230 | { |
| 231 | u16 fragment_first; |
| 232 | u16 fragment_last; |
| 233 | u16 range_first; |
| 234 | u16 range_last; |
| 235 | u32 next_range_bi; |
| 236 | u16 ip6_frag_hdr_offset; |
| 237 | }; |
Klement Sekera | 75e7d13 | 2017-09-20 08:26:30 +0200 | [diff] [blame] | 238 | } reass; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 239 | }; |
| 240 | } ip; |
| 241 | |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 242 | /* |
| 243 | * MPLS: |
| 244 | * data copied from the MPLS header that was popped from the packet |
| 245 | * during the look-up. |
| 246 | */ |
| 247 | struct |
| 248 | { |
Neale Ranns | 31ed744 | 2018-02-23 05:29:09 -0800 | [diff] [blame] | 249 | /* do not overlay w/ ip.adj_index[0,1] nor flow hash */ |
| 250 | u32 pad[VLIB_N_RX_TX + 1]; |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 251 | u8 ttl; |
| 252 | u8 exp; |
| 253 | u8 first; |
Rajesh Goel | d6f1c9c | 2019-10-06 13:17:36 +0530 | [diff] [blame] | 254 | u8 pyld_proto:3; /* dpo_proto_t */ |
| 255 | u8 rsvd:5; |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 256 | /* Rewrite length */ |
Klement Sekera | 7dbf9a1 | 2019-11-21 10:31:03 +0000 | [diff] [blame] | 257 | u8 save_rewrite_length; |
Rajesh Goel | d6f1c9c | 2019-10-06 13:17:36 +0530 | [diff] [blame] | 258 | /* Save the mpls header length including all label stack */ |
| 259 | u8 mpls_hdr_length; |
Neale Ranns | 9128637 | 2017-12-05 13:24:04 -0800 | [diff] [blame] | 260 | /* |
Paul Vinciguerra | bdc0e6b | 2018-09-22 05:32:50 -0700 | [diff] [blame] | 261 | * BIER - the number of bytes in the header. |
| 262 | * the len field in the header is not authoritative. It's the |
Neale Ranns | 9128637 | 2017-12-05 13:24:04 -0800 | [diff] [blame] | 263 | * value in the table that counts. |
| 264 | */ |
| 265 | struct |
| 266 | { |
| 267 | u8 n_bytes; |
| 268 | } bier; |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 269 | } mpls; |
| 270 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 271 | /* l2 bridging path, only valid there */ |
Eyal Bari | a11832f | 2017-06-21 15:32:13 +0300 | [diff] [blame] | 272 | struct opaque_l2 |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 273 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 274 | u32 feature_bitmap; |
John Lo | da1f2c7 | 2017-03-24 20:11:15 -0400 | [diff] [blame] | 275 | u16 bd_index; /* bridge-domain index */ |
Neale Ranns | a342da2 | 2019-06-06 10:35:07 +0000 | [diff] [blame] | 276 | u16 l2fib_sn; /* l2fib bd/int seq_num */ |
John Lo | da1f2c7 | 2017-03-24 20:11:15 -0400 | [diff] [blame] | 277 | u8 l2_len; /* ethernet header length */ |
| 278 | u8 shg; /* split-horizon group */ |
John Lo | 5a6508d | 2017-10-03 13:13:47 -0400 | [diff] [blame] | 279 | u8 bd_age; /* aging enabled */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 280 | } l2; |
| 281 | |
| 282 | /* l2tpv3 softwire encap, only valid there */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 283 | struct |
| 284 | { |
| 285 | u32 pad[4]; /* do not overlay w/ ip.adj_index[0,1] */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 286 | u8 next_index; |
| 287 | u32 session_index; |
| 288 | } l2t; |
| 289 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 290 | /* L2 classify */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 291 | struct |
| 292 | { |
Eyal Bari | a11832f | 2017-06-21 15:32:13 +0300 | [diff] [blame] | 293 | struct opaque_l2 pad; |
Eyal Bari | 0f360dc | 2017-06-14 13:11:20 +0300 | [diff] [blame] | 294 | union |
| 295 | { |
| 296 | u32 table_index; |
| 297 | u32 opaque_index; |
| 298 | }; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 299 | u64 hash; |
| 300 | } l2_classify; |
| 301 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 302 | /* vnet policer */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 303 | struct |
| 304 | { |
| 305 | u32 pad[8 - VLIB_N_RX_TX - 1]; /* to end of opaque */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 306 | u32 index; |
| 307 | } policer; |
| 308 | |
| 309 | /* interface output features */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 310 | struct |
| 311 | { |
Damjan Marion | 9c6ae5f | 2016-11-15 23:20:01 +0100 | [diff] [blame] | 312 | u32 sad_index; |
Neale Ranns | c87b66c | 2019-02-07 07:26:12 -0800 | [diff] [blame] | 313 | u32 protect_index; |
Damjan Marion | 9c6ae5f | 2016-11-15 23:20:01 +0100 | [diff] [blame] | 314 | } ipsec; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 315 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 316 | /* MAP */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 317 | struct |
| 318 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 319 | u16 mtu; |
| 320 | } map; |
| 321 | |
| 322 | /* MAP-T */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 323 | struct |
| 324 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 325 | u32 map_domain_index; |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 326 | struct |
| 327 | { |
| 328 | u32 saddr, daddr; |
| 329 | u16 frag_offset; //Fragmentation header offset |
| 330 | u16 l4_offset; //L4 header overall offset |
| 331 | u8 l4_protocol; //The final protocol number |
| 332 | } v6; //Used by ip6_map_t only |
| 333 | u16 checksum_offset; //L4 checksum overall offset |
| 334 | u16 mtu; //Exit MTU |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 335 | } map_t; |
| 336 | |
| 337 | /* IP Fragmentation */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 338 | struct |
| 339 | { |
Vijayabhaskar Katamreddy | c592ca5 | 2018-01-25 15:12:11 -0800 | [diff] [blame] | 340 | u32 pad[2]; /* do not overlay w/ ip.adj_index[0,1] */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 341 | u16 mtu; |
| 342 | u8 next_index; |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 343 | u8 flags; //See ip_frag.h |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 344 | } ip_frag; |
| 345 | |
Dave Barach | c07bf5d | 2016-02-17 17:52:26 -0500 | [diff] [blame] | 346 | /* COP - configurable junk filter(s) */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 347 | struct |
| 348 | { |
| 349 | /* Current configuration index. */ |
| 350 | u32 current_config_index; |
Dave Barach | c07bf5d | 2016-02-17 17:52:26 -0500 | [diff] [blame] | 351 | } cop; |
| 352 | |
Florin Coras | 1a1adc7 | 2016-07-22 01:45:30 +0200 | [diff] [blame] | 353 | /* LISP */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 354 | struct |
| 355 | { |
Florin Coras | 1a1adc7 | 2016-07-22 01:45:30 +0200 | [diff] [blame] | 356 | /* overlay address family */ |
| 357 | u16 overlay_afi; |
| 358 | } lisp; |
| 359 | |
Dave Barach | 68b0fb0 | 2017-02-28 15:15:56 -0500 | [diff] [blame] | 360 | /* TCP */ |
| 361 | struct |
| 362 | { |
| 363 | u32 connection_index; |
Florin Coras | 07beade | 2019-06-20 12:18:31 -0700 | [diff] [blame] | 364 | union |
| 365 | { |
| 366 | u32 seq_number; |
| 367 | u32 next_node_opaque; |
| 368 | }; |
Dave Barach | 68b0fb0 | 2017-02-28 15:15:56 -0500 | [diff] [blame] | 369 | u32 seq_end; |
| 370 | u32 ack_number; |
Florin Coras | 82b13a8 | 2017-04-25 11:58:06 -0700 | [diff] [blame] | 371 | u16 hdr_offset; /**< offset relative to ip hdr */ |
| 372 | u16 data_offset; /**< offset relative to ip hdr */ |
| 373 | u16 data_len; /**< data len */ |
Dave Barach | 68b0fb0 | 2017-02-28 15:15:56 -0500 | [diff] [blame] | 374 | u8 flags; |
| 375 | } tcp; |
| 376 | |
Matus Fabian | 161c59c | 2017-07-21 03:46:03 -0700 | [diff] [blame] | 377 | /* SNAT */ |
| 378 | struct |
| 379 | { |
| 380 | u32 flags; |
| 381 | } snat; |
| 382 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 383 | u32 unused[6]; |
| 384 | }; |
| 385 | } vnet_buffer_opaque_t; |
| 386 | |
Klement Sekera | 7dbf9a1 | 2019-11-21 10:31:03 +0000 | [diff] [blame] | 387 | #define VNET_REWRITE_TOTAL_BYTES (VLIB_BUFFER_PRE_DATA_SIZE) |
| 388 | |
| 389 | STATIC_ASSERT (STRUCT_SIZE_OF (vnet_buffer_opaque_t, ip.save_rewrite_length) |
| 390 | == STRUCT_SIZE_OF (vnet_buffer_opaque_t, |
Klement Sekera | f126e74 | 2019-10-10 09:46:06 +0000 | [diff] [blame] | 391 | ip.reass.save_rewrite_length) |
| 392 | && STRUCT_SIZE_OF (vnet_buffer_opaque_t, |
| 393 | ip.reass.save_rewrite_length) == |
| 394 | STRUCT_SIZE_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length) |
Klement Sekera | 7dbf9a1 | 2019-11-21 10:31:03 +0000 | [diff] [blame] | 395 | && STRUCT_SIZE_OF (vnet_buffer_opaque_t, |
| 396 | mpls.save_rewrite_length) == 1 |
| 397 | && VNET_REWRITE_TOTAL_BYTES < UINT8_MAX, |
| 398 | "save_rewrite_length member must be able to hold the max value of rewrite length"); |
| 399 | |
Klement Sekera | 8ad070e | 2020-01-15 10:30:48 +0000 | [diff] [blame] | 400 | STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.save_rewrite_length) |
| 401 | == STRUCT_OFFSET_OF (vnet_buffer_opaque_t, |
| 402 | ip.reass.save_rewrite_length) |
| 403 | && STRUCT_OFFSET_OF (vnet_buffer_opaque_t, |
| 404 | mpls.save_rewrite_length) == |
| 405 | STRUCT_OFFSET_OF (vnet_buffer_opaque_t, |
| 406 | ip.reass.save_rewrite_length), |
| 407 | "save_rewrite_length must be aligned so that reass doesn't overwrite it"); |
| 408 | |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 409 | /* |
Paul Vinciguerra | bdc0e6b | 2018-09-22 05:32:50 -0700 | [diff] [blame] | 410 | * The opaque field of the vlib_buffer_t is interpreted as a |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 411 | * vnet_buffer_opaque_t. Hence it should be big enough to accommodate one. |
| 412 | */ |
Eyal Bari | a11832f | 2017-06-21 15:32:13 +0300 | [diff] [blame] | 413 | STATIC_ASSERT (sizeof (vnet_buffer_opaque_t) <= |
| 414 | STRUCT_SIZE_OF (vlib_buffer_t, opaque), |
Neale Ranns | ad422ed | 2016-11-02 14:20:04 +0000 | [diff] [blame] | 415 | "VNET buffer meta-data too large for vlib_buffer"); |
| 416 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 417 | #define vnet_buffer(b) ((vnet_buffer_opaque_t *) (b)->opaque) |
| 418 | |
| 419 | /* Full cache line (64 bytes) of additional space */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 420 | typedef struct |
| 421 | { |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 422 | /** |
| 423 | * QoS marking data that needs to persist from the recording nodes |
| 424 | * (nominally in the ingress path) to the marking node (in the |
| 425 | * egress path) |
| 426 | */ |
| 427 | struct |
| 428 | { |
| 429 | u8 bits; |
| 430 | u8 source; |
| 431 | } qos; |
| 432 | |
Neale Ranns | ce9e0b4 | 2018-08-01 12:53:17 -0700 | [diff] [blame] | 433 | u8 loop_counter; |
| 434 | u8 __unused[1]; |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 435 | |
Neale Ranns | 25b0494 | 2018-04-04 09:34:50 -0700 | [diff] [blame] | 436 | /* Group Based Policy */ |
| 437 | struct |
| 438 | { |
Mohsin Kazmi | 61b94c6 | 2018-08-20 18:32:39 +0200 | [diff] [blame] | 439 | u8 __unused; |
| 440 | u8 flags; |
Neale Ranns | 4ba6772 | 2019-02-28 11:11:39 +0000 | [diff] [blame] | 441 | u16 sclass; |
Neale Ranns | 25b0494 | 2018-04-04 09:34:50 -0700 | [diff] [blame] | 442 | } gbp; |
| 443 | |
Andrew Yourtchenko | 6a7cff7 | 2018-10-12 16:09:22 +0200 | [diff] [blame] | 444 | /** |
| 445 | * The L4 payload size set on input on GSO enabled interfaces |
| 446 | * when we receive a GSO packet (a chain of buffers with the first one |
| 447 | * having GSO bit set), and needs to persist all the way to the interface-output, |
| 448 | * in case the egress interface is not GSO-enabled - then we need to perform |
| 449 | * the segmentation, and use this value to cut the payload appropriately. |
| 450 | */ |
| 451 | u16 gso_size; |
| 452 | /* size of L4 prototol header */ |
| 453 | u16 gso_l4_hdr_sz; |
| 454 | |
Klement Sekera | 5581de6 | 2020-04-24 12:24:41 +0000 | [diff] [blame] | 455 | struct |
| 456 | { |
Filip Varga | c05e285 | 2020-10-22 11:13:00 +0200 | [diff] [blame] | 457 | u16 unused; |
| 458 | u16 thread_next; |
Klement Sekera | 5581de6 | 2020-04-24 12:24:41 +0000 | [diff] [blame] | 459 | u32 arc_next; |
Klement Sekera | 2a595da | 2020-04-22 12:45:50 +0000 | [diff] [blame] | 460 | u32 ed_out2in_nat_session_index; |
Klement Sekera | 5581de6 | 2020-04-24 12:24:41 +0000 | [diff] [blame] | 461 | } nat; |
| 462 | |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 463 | union |
| 464 | { |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 465 | struct |
| 466 | { |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 467 | #if VLIB_BUFFER_TRACE_TRAJECTORY > 0 |
| 468 | /* buffer trajectory tracing */ |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 469 | u16 *trajectory_trace; |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 470 | #endif |
Neale Ranns | 039cbfe | 2018-02-27 03:45:38 -0800 | [diff] [blame] | 471 | }; |
Dave Barach | 78c5689 | 2018-05-16 11:34:35 -0400 | [diff] [blame] | 472 | struct |
| 473 | { |
| 474 | u64 pad[1]; |
| 475 | u64 pg_replay_timestamp; |
| 476 | }; |
Andrew Yourtchenko | 6a7cff7 | 2018-10-12 16:09:22 +0200 | [diff] [blame] | 477 | u32 unused[8]; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 478 | }; |
| 479 | } vnet_buffer_opaque2_t; |
| 480 | |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 481 | #define vnet_buffer2(b) ((vnet_buffer_opaque2_t *) (b)->opaque2) |
| 482 | |
| 483 | /* |
Paul Vinciguerra | bdc0e6b | 2018-09-22 05:32:50 -0700 | [diff] [blame] | 484 | * The opaque2 field of the vlib_buffer_t is interpreted as a |
Dave Barach | 7bee773 | 2017-10-18 18:48:11 -0400 | [diff] [blame] | 485 | * vnet_buffer_opaque2_t. Hence it should be big enough to accommodate one. |
| 486 | */ |
| 487 | STATIC_ASSERT (sizeof (vnet_buffer_opaque2_t) <= |
| 488 | STRUCT_SIZE_OF (vlib_buffer_t, opaque2), |
| 489 | "VNET buffer opaque2 meta-data too large for vlib_buffer"); |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 490 | |
Mohsin Kazmi | 0f09a47 | 2019-07-12 13:18:16 +0200 | [diff] [blame] | 491 | #define gso_mtu_sz(b) (vnet_buffer2(b)->gso_size + \ |
| 492 | vnet_buffer2(b)->gso_l4_hdr_sz + \ |
| 493 | vnet_buffer(b)->l4_hdr_offset - \ |
| 494 | vnet_buffer (b)->l3_hdr_offset) |
Andrew Yourtchenko | 6a7cff7 | 2018-10-12 16:09:22 +0200 | [diff] [blame] | 495 | |
| 496 | |
Damjan Marion | bd846cd | 2017-11-21 13:12:41 +0100 | [diff] [blame] | 497 | format_function_t format_vnet_buffer; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 498 | |
| 499 | #endif /* included_vnet_buffer_h */ |
Dave Barach | ba868bb | 2016-08-08 09:51:21 -0400 | [diff] [blame] | 500 | |
| 501 | /* |
| 502 | * fd.io coding-style-patch-verification: ON |
| 503 | * |
| 504 | * Local Variables: |
| 505 | * eval: (c-set-style "gnu") |
| 506 | * End: |
| 507 | */ |