Mohsin Kazmi | 61b94c6 | 2018-08-20 18:32:39 +0200 | [diff] [blame] | 1 | /* |
| 2 | * decap.c: vxlan gbp tunnel decap packet processing |
| 3 | * |
| 4 | * Copyright (c) 2018 Cisco and/or its affiliates. |
| 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at: |
| 8 | * |
| 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | * |
| 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
| 16 | */ |
| 17 | |
| 18 | #include <vlib/vlib.h> |
| 19 | #include <vnet/pg/pg.h> |
| 20 | #include <vnet/vxlan-gbp/vxlan_gbp.h> |
| 21 | |
| 22 | vlib_node_registration_t vxlan4_gbp_input_node; |
| 23 | vlib_node_registration_t vxlan6_gbp_input_node; |
| 24 | |
| 25 | typedef struct |
| 26 | { |
| 27 | u32 next_index; |
| 28 | u32 tunnel_index; |
| 29 | u32 error; |
| 30 | u32 vni; |
| 31 | u16 sclass; |
| 32 | } vxlan_gbp_rx_trace_t; |
| 33 | |
| 34 | static u8 * |
| 35 | format_vxlan_gbp_rx_trace (u8 * s, va_list * args) |
| 36 | { |
| 37 | CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); |
| 38 | CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); |
| 39 | vxlan_gbp_rx_trace_t *t = va_arg (*args, vxlan_gbp_rx_trace_t *); |
| 40 | |
| 41 | if (t->tunnel_index == ~0) |
| 42 | return format (s, |
| 43 | "VXLAN_GBP decap error - tunnel for vni %d does not exist", |
| 44 | t->vni); |
| 45 | return format (s, |
| 46 | "VXLAN_GBP decap from vxlan_gbp_tunnel%d vni %d sclass %d" |
| 47 | " next %d error %d", |
| 48 | t->tunnel_index, t->vni, t->sclass, t->next_index, t->error); |
| 49 | } |
| 50 | |
| 51 | always_inline u32 |
| 52 | buf_fib_index (vlib_buffer_t * b, u32 is_ip4) |
| 53 | { |
| 54 | u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX]; |
| 55 | if (sw_if_index != (u32) ~ 0) |
| 56 | return sw_if_index; |
| 57 | |
| 58 | u32 *fib_index_by_sw_if_index = is_ip4 ? |
| 59 | ip4_main.fib_index_by_sw_if_index : ip6_main.fib_index_by_sw_if_index; |
| 60 | sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX]; |
| 61 | |
| 62 | return vec_elt (fib_index_by_sw_if_index, sw_if_index); |
| 63 | } |
| 64 | |
| 65 | typedef vxlan4_gbp_tunnel_key_t last_tunnel_cache4; |
| 66 | |
| 67 | always_inline vxlan_gbp_tunnel_t * |
| 68 | vxlan4_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache4 * cache, |
| 69 | u32 fib_index, ip4_header_t * ip4_0, |
| 70 | vxlan_gbp_header_t * vxlan_gbp0, |
| 71 | vxlan_gbp_tunnel_t ** stats_t0) |
| 72 | { |
| 73 | /* Make sure VXLAN_GBP tunnel exist according to packet SIP and VNI */ |
| 74 | vxlan4_gbp_tunnel_key_t key4; |
| 75 | key4.key[1] = ((u64) fib_index << 32) | vxlan_gbp0->vni_reserved; |
| 76 | |
| 77 | if (PREDICT_FALSE (key4.key[1] != cache->key[1] || |
| 78 | ip4_0->src_address.as_u32 != (u32) cache->key[0])) |
| 79 | { |
| 80 | key4.key[0] = ip4_0->src_address.as_u32; |
| 81 | int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, |
| 82 | &key4); |
| 83 | if (PREDICT_FALSE (rv != 0)) |
| 84 | return 0; |
| 85 | |
| 86 | *cache = key4; |
| 87 | } |
| 88 | vxlan_gbp_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value); |
| 89 | |
| 90 | /* Validate VXLAN_GBP tunnel SIP against packet DIP */ |
| 91 | if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32)) |
| 92 | *stats_t0 = t0; |
| 93 | else |
| 94 | { |
| 95 | /* try multicast */ |
| 96 | if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address))) |
| 97 | return 0; |
| 98 | |
| 99 | key4.key[0] = ip4_0->dst_address.as_u32; |
| 100 | /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */ |
| 101 | int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, |
| 102 | &key4); |
| 103 | if (PREDICT_FALSE (rv != 0)) |
| 104 | return 0; |
| 105 | |
| 106 | *stats_t0 = pool_elt_at_index (vxm->tunnels, key4.value); |
| 107 | } |
| 108 | |
| 109 | return t0; |
| 110 | } |
| 111 | |
| 112 | typedef vxlan6_gbp_tunnel_key_t last_tunnel_cache6; |
| 113 | |
| 114 | always_inline vxlan_gbp_tunnel_t * |
| 115 | vxlan6_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache6 * cache, |
| 116 | u32 fib_index, ip6_header_t * ip6_0, |
| 117 | vxlan_gbp_header_t * vxlan_gbp0, |
| 118 | vxlan_gbp_tunnel_t ** stats_t0) |
| 119 | { |
| 120 | /* Make sure VXLAN_GBP tunnel exist according to packet SIP and VNI */ |
| 121 | vxlan6_gbp_tunnel_key_t key6 = { |
| 122 | .key = { |
| 123 | [0] = ip6_0->src_address.as_u64[0], |
| 124 | [1] = ip6_0->src_address.as_u64[1], |
| 125 | [2] = (((u64) fib_index) << 32) | vxlan_gbp0->vni_reserved, |
| 126 | } |
| 127 | }; |
| 128 | |
| 129 | if (PREDICT_FALSE |
| 130 | (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0)) |
| 131 | { |
| 132 | int rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, |
| 133 | &key6); |
| 134 | if (PREDICT_FALSE (rv != 0)) |
| 135 | return 0; |
| 136 | |
| 137 | *cache = key6; |
| 138 | } |
| 139 | vxlan_gbp_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value); |
| 140 | |
| 141 | /* Validate VXLAN_GBP tunnel SIP against packet DIP */ |
| 142 | if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6))) |
| 143 | *stats_t0 = t0; |
| 144 | else |
| 145 | { |
| 146 | /* try multicast */ |
| 147 | if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address))) |
| 148 | return 0; |
| 149 | |
| 150 | /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */ |
| 151 | key6.key[0] = ip6_0->dst_address.as_u64[0]; |
| 152 | key6.key[1] = ip6_0->dst_address.as_u64[1]; |
| 153 | int rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, |
| 154 | &key6); |
| 155 | if (PREDICT_FALSE (rv != 0)) |
| 156 | return 0; |
| 157 | |
| 158 | *stats_t0 = pool_elt_at_index (vxm->tunnels, key6.value); |
| 159 | } |
| 160 | |
| 161 | return t0; |
| 162 | } |
| 163 | |
| 164 | always_inline uword |
| 165 | vxlan_gbp_input (vlib_main_t * vm, |
| 166 | vlib_node_runtime_t * node, |
| 167 | vlib_frame_t * from_frame, u32 is_ip4) |
| 168 | { |
| 169 | vxlan_gbp_main_t *vxm = &vxlan_gbp_main; |
| 170 | vnet_main_t *vnm = vxm->vnet_main; |
| 171 | vnet_interface_main_t *im = &vnm->interface_main; |
| 172 | vlib_combined_counter_main_t *rx_counter = |
| 173 | im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX; |
| 174 | vlib_combined_counter_main_t *drop_counter = |
| 175 | im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP; |
| 176 | last_tunnel_cache4 last4; |
| 177 | last_tunnel_cache6 last6; |
| 178 | u32 pkts_decapsulated = 0; |
| 179 | u32 thread_index = vlib_get_thread_index (); |
| 180 | |
| 181 | if (is_ip4) |
| 182 | memset (&last4, 0xff, sizeof last4); |
| 183 | else |
| 184 | memset (&last6, 0xff, sizeof last6); |
| 185 | |
| 186 | u32 next_index = node->cached_next_index; |
| 187 | |
| 188 | u32 *from = vlib_frame_vector_args (from_frame); |
| 189 | u32 n_left_from = from_frame->n_vectors; |
| 190 | |
| 191 | while (n_left_from > 0) |
| 192 | { |
| 193 | u32 *to_next, n_left_to_next; |
| 194 | vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); |
| 195 | |
| 196 | while (n_left_from >= 4 && n_left_to_next >= 2) |
| 197 | { |
| 198 | /* Prefetch next iteration. */ |
| 199 | { |
| 200 | vlib_buffer_t *p2, *p3; |
| 201 | |
| 202 | p2 = vlib_get_buffer (vm, from[2]); |
| 203 | p3 = vlib_get_buffer (vm, from[3]); |
| 204 | |
| 205 | vlib_prefetch_buffer_header (p2, LOAD); |
| 206 | vlib_prefetch_buffer_header (p3, LOAD); |
| 207 | |
| 208 | CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); |
| 209 | CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); |
| 210 | } |
| 211 | |
| 212 | u32 bi0 = to_next[0] = from[0]; |
| 213 | u32 bi1 = to_next[1] = from[1]; |
| 214 | from += 2; |
| 215 | to_next += 2; |
| 216 | n_left_to_next -= 2; |
| 217 | n_left_from -= 2; |
| 218 | |
| 219 | vlib_buffer_t *b0, *b1; |
| 220 | b0 = vlib_get_buffer (vm, bi0); |
| 221 | b1 = vlib_get_buffer (vm, bi1); |
| 222 | |
| 223 | /* udp leaves current_data pointing at the vxlan_gbp header */ |
| 224 | void *cur0 = vlib_buffer_get_current (b0); |
| 225 | void *cur1 = vlib_buffer_get_current (b1); |
| 226 | vxlan_gbp_header_t *vxlan_gbp0 = cur0; |
| 227 | vxlan_gbp_header_t *vxlan_gbp1 = cur1; |
| 228 | |
| 229 | ip4_header_t *ip4_0, *ip4_1; |
| 230 | ip6_header_t *ip6_0, *ip6_1; |
| 231 | if (is_ip4) |
| 232 | { |
| 233 | ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t); |
| 234 | ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t); |
| 235 | } |
| 236 | else |
| 237 | { |
| 238 | ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t); |
| 239 | ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t); |
| 240 | } |
| 241 | |
| 242 | /* pop vxlan_gbp */ |
| 243 | vlib_buffer_advance (b0, sizeof *vxlan_gbp0); |
| 244 | vlib_buffer_advance (b1, sizeof *vxlan_gbp1); |
| 245 | |
| 246 | u32 fi0 = buf_fib_index (b0, is_ip4); |
| 247 | u32 fi1 = buf_fib_index (b1, is_ip4); |
| 248 | |
| 249 | vxlan_gbp_tunnel_t *t0, *stats_t0 = 0; |
| 250 | vxlan_gbp_tunnel_t *t1, *stats_t1 = 0; |
| 251 | if (is_ip4) |
| 252 | { |
| 253 | t0 = |
| 254 | vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0, |
| 255 | &stats_t0); |
| 256 | t1 = |
| 257 | vxlan4_gbp_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan_gbp1, |
| 258 | &stats_t1); |
| 259 | } |
| 260 | else |
| 261 | { |
| 262 | t0 = |
| 263 | vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0, |
| 264 | &stats_t0); |
| 265 | t1 = |
| 266 | vxlan6_gbp_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan_gbp1, |
| 267 | &stats_t1); |
| 268 | } |
| 269 | |
| 270 | u32 len0 = vlib_buffer_length_in_chain (vm, b0); |
| 271 | u32 len1 = vlib_buffer_length_in_chain (vm, b1); |
| 272 | |
| 273 | u32 next0, next1; |
| 274 | u8 error0 = 0, error1 = 0; |
| 275 | u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0); |
| 276 | u8 flags1 = vxlan_gbp_get_flags (vxlan_gbp1); |
Paul Vinciguerra | bdc0e6b | 2018-09-22 05:32:50 -0700 | [diff] [blame] | 277 | /* Validate VXLAN_GBP tunnel encap-fib index against packet */ |
Mohsin Kazmi | 61b94c6 | 2018-08-20 18:32:39 +0200 | [diff] [blame] | 278 | if (PREDICT_FALSE |
| 279 | (t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))) |
| 280 | { |
| 281 | next0 = VXLAN_GBP_INPUT_NEXT_DROP; |
| 282 | |
| 283 | if (t0 != 0 |
| 284 | && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)) |
| 285 | { |
| 286 | error0 = VXLAN_GBP_ERROR_BAD_FLAGS; |
| 287 | vlib_increment_combined_counter |
| 288 | (drop_counter, thread_index, stats_t0->sw_if_index, 1, |
| 289 | len0); |
| 290 | } |
| 291 | else |
| 292 | error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL; |
| 293 | b0->error = node->errors[error0]; |
| 294 | } |
| 295 | else |
| 296 | { |
| 297 | next0 = t0->decap_next_index; |
| 298 | vnet_buffer2 (b0)->gbp.flags = |
| 299 | vxlan_gbp_get_gpflags (vxlan_gbp0); |
| 300 | vnet_buffer2 (b0)->gbp.src_epg = |
| 301 | vxlan_gbp_get_sclass (vxlan_gbp0); |
| 302 | |
| 303 | /* Required to make the l2 tag push / pop code work on l2 subifs */ |
| 304 | if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT)) |
| 305 | vnet_update_l2_len (b0); |
| 306 | |
| 307 | /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */ |
| 308 | vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index; |
| 309 | vlib_increment_combined_counter |
| 310 | (rx_counter, thread_index, stats_t0->sw_if_index, 1, len0); |
| 311 | pkts_decapsulated++; |
| 312 | } |
| 313 | |
Paul Vinciguerra | bdc0e6b | 2018-09-22 05:32:50 -0700 | [diff] [blame] | 314 | /* Validate VXLAN_GBP tunnel encap-fib index against packet */ |
Mohsin Kazmi | 61b94c6 | 2018-08-20 18:32:39 +0200 | [diff] [blame] | 315 | if (PREDICT_FALSE |
| 316 | (t1 == 0 || flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))) |
| 317 | { |
| 318 | next1 = VXLAN_GBP_INPUT_NEXT_DROP; |
| 319 | |
| 320 | if (t1 != 0 |
| 321 | && flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)) |
| 322 | { |
| 323 | error1 = VXLAN_GBP_ERROR_BAD_FLAGS; |
| 324 | vlib_increment_combined_counter |
| 325 | (drop_counter, thread_index, stats_t1->sw_if_index, 1, |
| 326 | len1); |
| 327 | } |
| 328 | else |
| 329 | error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL; |
| 330 | b1->error = node->errors[error1]; |
| 331 | } |
| 332 | else |
| 333 | { |
| 334 | next1 = t1->decap_next_index; |
| 335 | vnet_buffer2 (b1)->gbp.flags = |
| 336 | vxlan_gbp_get_gpflags (vxlan_gbp1); |
| 337 | vnet_buffer2 (b1)->gbp.src_epg = |
| 338 | vxlan_gbp_get_sclass (vxlan_gbp1); |
| 339 | |
| 340 | /* Required to make the l2 tag push / pop code work on l2 subifs */ |
| 341 | if (PREDICT_TRUE (next1 == VXLAN_GBP_INPUT_NEXT_L2_INPUT)) |
| 342 | vnet_update_l2_len (b1); |
| 343 | |
| 344 | /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */ |
| 345 | vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index; |
| 346 | pkts_decapsulated++; |
| 347 | |
| 348 | vlib_increment_combined_counter |
| 349 | (rx_counter, thread_index, stats_t1->sw_if_index, 1, len1); |
| 350 | } |
| 351 | |
| 352 | if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) |
| 353 | { |
| 354 | vxlan_gbp_rx_trace_t *tr = |
| 355 | vlib_add_trace (vm, node, b0, sizeof (*tr)); |
| 356 | tr->next_index = next0; |
| 357 | tr->error = error0; |
| 358 | tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels; |
| 359 | tr->vni = vxlan_gbp_get_vni (vxlan_gbp0); |
| 360 | tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0); |
| 361 | } |
| 362 | if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) |
| 363 | { |
| 364 | vxlan_gbp_rx_trace_t *tr = |
| 365 | vlib_add_trace (vm, node, b1, sizeof (*tr)); |
| 366 | tr->next_index = next1; |
| 367 | tr->error = error1; |
| 368 | tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels; |
| 369 | tr->vni = vxlan_gbp_get_vni (vxlan_gbp1); |
| 370 | tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp1); |
| 371 | } |
| 372 | |
| 373 | vlib_validate_buffer_enqueue_x2 (vm, node, next_index, |
| 374 | to_next, n_left_to_next, |
| 375 | bi0, bi1, next0, next1); |
| 376 | } |
| 377 | |
| 378 | while (n_left_from > 0 && n_left_to_next > 0) |
| 379 | { |
| 380 | u32 bi0 = to_next[0] = from[0]; |
| 381 | from += 1; |
| 382 | to_next += 1; |
| 383 | n_left_from -= 1; |
| 384 | n_left_to_next -= 1; |
| 385 | |
| 386 | vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); |
| 387 | |
| 388 | /* udp leaves current_data pointing at the vxlan_gbp header */ |
| 389 | void *cur0 = vlib_buffer_get_current (b0); |
| 390 | vxlan_gbp_header_t *vxlan_gbp0 = cur0; |
| 391 | ip4_header_t *ip4_0; |
| 392 | ip6_header_t *ip6_0; |
| 393 | if (is_ip4) |
| 394 | ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t); |
| 395 | else |
| 396 | ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t); |
| 397 | |
| 398 | /* pop (ip, udp, vxlan_gbp) */ |
| 399 | vlib_buffer_advance (b0, sizeof (*vxlan_gbp0)); |
| 400 | |
| 401 | u32 fi0 = buf_fib_index (b0, is_ip4); |
| 402 | |
| 403 | vxlan_gbp_tunnel_t *t0, *stats_t0 = 0; |
| 404 | if (is_ip4) |
| 405 | t0 = |
| 406 | vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0, |
| 407 | &stats_t0); |
| 408 | else |
| 409 | t0 = |
| 410 | vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0, |
| 411 | &stats_t0); |
| 412 | |
| 413 | uword len0 = vlib_buffer_length_in_chain (vm, b0); |
| 414 | |
| 415 | u32 next0; |
| 416 | u8 error0 = 0; |
| 417 | u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0); |
Paul Vinciguerra | bdc0e6b | 2018-09-22 05:32:50 -0700 | [diff] [blame] | 418 | /* Validate VXLAN_GBP tunnel encap-fib index against packet */ |
Mohsin Kazmi | 61b94c6 | 2018-08-20 18:32:39 +0200 | [diff] [blame] | 419 | if (PREDICT_FALSE |
| 420 | (t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))) |
| 421 | { |
| 422 | next0 = VXLAN_GBP_INPUT_NEXT_DROP; |
| 423 | |
| 424 | if (t0 != 0 |
| 425 | && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)) |
| 426 | { |
| 427 | error0 = VXLAN_GBP_ERROR_BAD_FLAGS; |
| 428 | vlib_increment_combined_counter |
| 429 | (drop_counter, thread_index, stats_t0->sw_if_index, 1, |
| 430 | len0); |
| 431 | } |
| 432 | else |
| 433 | error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL; |
| 434 | b0->error = node->errors[error0]; |
| 435 | } |
| 436 | else |
| 437 | { |
| 438 | next0 = t0->decap_next_index; |
| 439 | vnet_buffer2 (b0)->gbp.flags = |
| 440 | vxlan_gbp_get_gpflags (vxlan_gbp0); |
| 441 | vnet_buffer2 (b0)->gbp.src_epg = |
| 442 | vxlan_gbp_get_sclass (vxlan_gbp0); |
| 443 | |
| 444 | |
| 445 | /* Required to make the l2 tag push / pop code work on l2 subifs */ |
| 446 | if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT)) |
| 447 | vnet_update_l2_len (b0); |
| 448 | |
| 449 | /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */ |
| 450 | vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index; |
| 451 | pkts_decapsulated++; |
| 452 | |
| 453 | vlib_increment_combined_counter |
| 454 | (rx_counter, thread_index, stats_t0->sw_if_index, 1, len0); |
| 455 | } |
| 456 | |
| 457 | if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) |
| 458 | { |
| 459 | vxlan_gbp_rx_trace_t *tr |
| 460 | = vlib_add_trace (vm, node, b0, sizeof (*tr)); |
| 461 | tr->next_index = next0; |
| 462 | tr->error = error0; |
| 463 | tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels; |
| 464 | tr->vni = vxlan_gbp_get_vni (vxlan_gbp0); |
| 465 | tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0); |
| 466 | } |
| 467 | vlib_validate_buffer_enqueue_x1 (vm, node, next_index, |
| 468 | to_next, n_left_to_next, |
| 469 | bi0, next0); |
| 470 | } |
| 471 | |
| 472 | vlib_put_next_frame (vm, node, next_index, n_left_to_next); |
| 473 | } |
| 474 | /* Do we still need this now that tunnel tx stats is kept? */ |
| 475 | u32 node_idx = |
| 476 | is_ip4 ? vxlan4_gbp_input_node.index : vxlan6_gbp_input_node.index; |
| 477 | vlib_node_increment_counter (vm, node_idx, VXLAN_GBP_ERROR_DECAPSULATED, |
| 478 | pkts_decapsulated); |
| 479 | |
| 480 | return from_frame->n_vectors; |
| 481 | } |
| 482 | |
| 483 | static uword |
| 484 | vxlan4_gbp_input (vlib_main_t * vm, |
| 485 | vlib_node_runtime_t * node, vlib_frame_t * from_frame) |
| 486 | { |
| 487 | return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 1); |
| 488 | } |
| 489 | |
| 490 | static uword |
| 491 | vxlan6_gbp_input (vlib_main_t * vm, |
| 492 | vlib_node_runtime_t * node, vlib_frame_t * from_frame) |
| 493 | { |
| 494 | return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 0); |
| 495 | } |
| 496 | |
| 497 | static char *vxlan_gbp_error_strings[] = { |
| 498 | #define vxlan_gbp_error(n,s) s, |
| 499 | #include <vnet/vxlan-gbp/vxlan_gbp_error.def> |
| 500 | #undef vxlan_gbp_error |
| 501 | #undef _ |
| 502 | }; |
| 503 | |
| 504 | /* *INDENT-OFF* */ |
| 505 | VLIB_REGISTER_NODE (vxlan4_gbp_input_node) = |
| 506 | { |
| 507 | .function = vxlan4_gbp_input, |
| 508 | .name = "vxlan4-gbp-input", |
| 509 | .vector_size = sizeof (u32), |
| 510 | .n_errors = VXLAN_GBP_N_ERROR, |
| 511 | .error_strings = vxlan_gbp_error_strings, |
| 512 | .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT, |
| 513 | .format_trace = format_vxlan_gbp_rx_trace, |
| 514 | .next_nodes = { |
| 515 | #define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n, |
| 516 | foreach_vxlan_gbp_input_next |
| 517 | #undef _ |
| 518 | }, |
| 519 | }; |
| 520 | VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gbp_input_node, vxlan4_gbp_input) |
| 521 | |
| 522 | VLIB_REGISTER_NODE (vxlan6_gbp_input_node) = |
| 523 | { |
| 524 | .function = vxlan6_gbp_input, |
| 525 | .name = "vxlan6-gbp-input", |
| 526 | .vector_size = sizeof (u32), |
| 527 | .n_errors = VXLAN_GBP_N_ERROR, |
| 528 | .error_strings = vxlan_gbp_error_strings, |
| 529 | .n_next_nodes = VXLAN_GBP_INPUT_N_NEXT, |
| 530 | .next_nodes = { |
| 531 | #define _(s,n) [VXLAN_GBP_INPUT_NEXT_##s] = n, |
| 532 | foreach_vxlan_gbp_input_next |
| 533 | #undef _ |
| 534 | }, |
| 535 | .format_trace = format_vxlan_gbp_rx_trace, |
| 536 | }; |
| 537 | VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gbp_input_node, vxlan6_gbp_input) |
| 538 | /* *INDENT-ON* */ |
| 539 | |
| 540 | typedef enum |
| 541 | { |
| 542 | IP_VXLAN_GBP_BYPASS_NEXT_DROP, |
| 543 | IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP, |
| 544 | IP_VXLAN_GBP_BYPASS_N_NEXT, |
| 545 | } ip_vxan_gbp_bypass_next_t; |
| 546 | |
| 547 | always_inline uword |
| 548 | ip_vxlan_gbp_bypass_inline (vlib_main_t * vm, |
| 549 | vlib_node_runtime_t * node, |
| 550 | vlib_frame_t * frame, u32 is_ip4) |
| 551 | { |
| 552 | vxlan_gbp_main_t *vxm = &vxlan_gbp_main; |
| 553 | u32 *from, *to_next, n_left_from, n_left_to_next, next_index; |
| 554 | vlib_node_runtime_t *error_node = |
| 555 | vlib_node_get_runtime (vm, ip4_input_node.index); |
| 556 | ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */ |
| 557 | ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */ |
| 558 | |
| 559 | from = vlib_frame_vector_args (frame); |
| 560 | n_left_from = frame->n_vectors; |
| 561 | next_index = node->cached_next_index; |
| 562 | |
| 563 | if (node->flags & VLIB_NODE_FLAG_TRACE) |
| 564 | ip4_forward_next_trace (vm, node, frame, VLIB_TX); |
| 565 | |
| 566 | if (is_ip4) |
| 567 | addr4.data_u32 = ~0; |
| 568 | else |
| 569 | ip6_address_set_zero (&addr6); |
| 570 | |
| 571 | while (n_left_from > 0) |
| 572 | { |
| 573 | vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); |
| 574 | |
| 575 | while (n_left_from >= 4 && n_left_to_next >= 2) |
| 576 | { |
| 577 | vlib_buffer_t *b0, *b1; |
| 578 | ip4_header_t *ip40, *ip41; |
| 579 | ip6_header_t *ip60, *ip61; |
| 580 | udp_header_t *udp0, *udp1; |
| 581 | u32 bi0, ip_len0, udp_len0, flags0, next0; |
| 582 | u32 bi1, ip_len1, udp_len1, flags1, next1; |
| 583 | i32 len_diff0, len_diff1; |
| 584 | u8 error0, good_udp0, proto0; |
| 585 | u8 error1, good_udp1, proto1; |
| 586 | |
| 587 | /* Prefetch next iteration. */ |
| 588 | { |
| 589 | vlib_buffer_t *p2, *p3; |
| 590 | |
| 591 | p2 = vlib_get_buffer (vm, from[2]); |
| 592 | p3 = vlib_get_buffer (vm, from[3]); |
| 593 | |
| 594 | vlib_prefetch_buffer_header (p2, LOAD); |
| 595 | vlib_prefetch_buffer_header (p3, LOAD); |
| 596 | |
| 597 | CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); |
| 598 | CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); |
| 599 | } |
| 600 | |
| 601 | bi0 = to_next[0] = from[0]; |
| 602 | bi1 = to_next[1] = from[1]; |
| 603 | from += 2; |
| 604 | n_left_from -= 2; |
| 605 | to_next += 2; |
| 606 | n_left_to_next -= 2; |
| 607 | |
| 608 | b0 = vlib_get_buffer (vm, bi0); |
| 609 | b1 = vlib_get_buffer (vm, bi1); |
| 610 | if (is_ip4) |
| 611 | { |
| 612 | ip40 = vlib_buffer_get_current (b0); |
| 613 | ip41 = vlib_buffer_get_current (b1); |
| 614 | } |
| 615 | else |
| 616 | { |
| 617 | ip60 = vlib_buffer_get_current (b0); |
| 618 | ip61 = vlib_buffer_get_current (b1); |
| 619 | } |
| 620 | |
| 621 | /* Setup packet for next IP feature */ |
| 622 | vnet_feature_next (&next0, b0); |
| 623 | vnet_feature_next (&next1, b1); |
| 624 | |
| 625 | if (is_ip4) |
| 626 | { |
| 627 | /* Treat IP frag packets as "experimental" protocol for now |
| 628 | until support of IP frag reassembly is implemented */ |
| 629 | proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol; |
| 630 | proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol; |
| 631 | } |
| 632 | else |
| 633 | { |
| 634 | proto0 = ip60->protocol; |
| 635 | proto1 = ip61->protocol; |
| 636 | } |
| 637 | |
| 638 | /* Process packet 0 */ |
| 639 | if (proto0 != IP_PROTOCOL_UDP) |
| 640 | goto exit0; /* not UDP packet */ |
| 641 | |
| 642 | if (is_ip4) |
| 643 | udp0 = ip4_next_header (ip40); |
| 644 | else |
| 645 | udp0 = ip6_next_header (ip60); |
| 646 | |
| 647 | if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp)) |
| 648 | goto exit0; /* not VXLAN_GBP packet */ |
| 649 | |
| 650 | /* Validate DIP against VTEPs */ |
| 651 | if (is_ip4) |
| 652 | { |
| 653 | if (addr4.as_u32 != ip40->dst_address.as_u32) |
| 654 | { |
| 655 | if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32)) |
| 656 | goto exit0; /* no local VTEP for VXLAN_GBP packet */ |
| 657 | addr4 = ip40->dst_address; |
| 658 | } |
| 659 | } |
| 660 | else |
| 661 | { |
| 662 | if (!ip6_address_is_equal (&addr6, &ip60->dst_address)) |
| 663 | { |
| 664 | if (!hash_get_mem (vxm->vtep6, &ip60->dst_address)) |
| 665 | goto exit0; /* no local VTEP for VXLAN_GBP packet */ |
| 666 | addr6 = ip60->dst_address; |
| 667 | } |
| 668 | } |
| 669 | |
| 670 | flags0 = b0->flags; |
| 671 | good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0; |
| 672 | |
| 673 | /* Don't verify UDP checksum for packets with explicit zero checksum. */ |
| 674 | good_udp0 |= udp0->checksum == 0; |
| 675 | |
| 676 | /* Verify UDP length */ |
| 677 | if (is_ip4) |
| 678 | ip_len0 = clib_net_to_host_u16 (ip40->length); |
| 679 | else |
| 680 | ip_len0 = clib_net_to_host_u16 (ip60->payload_length); |
| 681 | udp_len0 = clib_net_to_host_u16 (udp0->length); |
| 682 | len_diff0 = ip_len0 - udp_len0; |
| 683 | |
| 684 | /* Verify UDP checksum */ |
| 685 | if (PREDICT_FALSE (!good_udp0)) |
| 686 | { |
| 687 | if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0) |
| 688 | { |
| 689 | if (is_ip4) |
| 690 | flags0 = ip4_tcp_udp_validate_checksum (vm, b0); |
| 691 | else |
| 692 | flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0); |
| 693 | good_udp0 = |
| 694 | (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0; |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | if (is_ip4) |
| 699 | { |
| 700 | error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM; |
| 701 | error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH; |
| 702 | } |
| 703 | else |
| 704 | { |
| 705 | error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM; |
| 706 | error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH; |
| 707 | } |
| 708 | |
| 709 | next0 = error0 ? |
| 710 | IP_VXLAN_GBP_BYPASS_NEXT_DROP : |
| 711 | IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP; |
| 712 | b0->error = error0 ? error_node->errors[error0] : 0; |
| 713 | |
| 714 | /* vxlan-gbp-input node expect current at VXLAN_GBP header */ |
| 715 | if (is_ip4) |
| 716 | vlib_buffer_advance (b0, |
| 717 | sizeof (ip4_header_t) + |
| 718 | sizeof (udp_header_t)); |
| 719 | else |
| 720 | vlib_buffer_advance (b0, |
| 721 | sizeof (ip6_header_t) + |
| 722 | sizeof (udp_header_t)); |
| 723 | |
| 724 | exit0: |
| 725 | /* Process packet 1 */ |
| 726 | if (proto1 != IP_PROTOCOL_UDP) |
| 727 | goto exit1; /* not UDP packet */ |
| 728 | |
| 729 | if (is_ip4) |
| 730 | udp1 = ip4_next_header (ip41); |
| 731 | else |
| 732 | udp1 = ip6_next_header (ip61); |
| 733 | |
| 734 | if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp)) |
| 735 | goto exit1; /* not VXLAN_GBP packet */ |
| 736 | |
| 737 | /* Validate DIP against VTEPs */ |
| 738 | if (is_ip4) |
| 739 | { |
| 740 | if (addr4.as_u32 != ip41->dst_address.as_u32) |
| 741 | { |
| 742 | if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32)) |
| 743 | goto exit1; /* no local VTEP for VXLAN_GBP packet */ |
| 744 | addr4 = ip41->dst_address; |
| 745 | } |
| 746 | } |
| 747 | else |
| 748 | { |
| 749 | if (!ip6_address_is_equal (&addr6, &ip61->dst_address)) |
| 750 | { |
| 751 | if (!hash_get_mem (vxm->vtep6, &ip61->dst_address)) |
| 752 | goto exit1; /* no local VTEP for VXLAN_GBP packet */ |
| 753 | addr6 = ip61->dst_address; |
| 754 | } |
| 755 | } |
| 756 | |
| 757 | flags1 = b1->flags; |
| 758 | good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0; |
| 759 | |
| 760 | /* Don't verify UDP checksum for packets with explicit zero checksum. */ |
| 761 | good_udp1 |= udp1->checksum == 0; |
| 762 | |
| 763 | /* Verify UDP length */ |
| 764 | if (is_ip4) |
| 765 | ip_len1 = clib_net_to_host_u16 (ip41->length); |
| 766 | else |
| 767 | ip_len1 = clib_net_to_host_u16 (ip61->payload_length); |
| 768 | udp_len1 = clib_net_to_host_u16 (udp1->length); |
| 769 | len_diff1 = ip_len1 - udp_len1; |
| 770 | |
| 771 | /* Verify UDP checksum */ |
| 772 | if (PREDICT_FALSE (!good_udp1)) |
| 773 | { |
| 774 | if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0) |
| 775 | { |
| 776 | if (is_ip4) |
| 777 | flags1 = ip4_tcp_udp_validate_checksum (vm, b1); |
| 778 | else |
| 779 | flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1); |
| 780 | good_udp1 = |
| 781 | (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0; |
| 782 | } |
| 783 | } |
| 784 | |
| 785 | if (is_ip4) |
| 786 | { |
| 787 | error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM; |
| 788 | error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH; |
| 789 | } |
| 790 | else |
| 791 | { |
| 792 | error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM; |
| 793 | error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH; |
| 794 | } |
| 795 | |
| 796 | next1 = error1 ? |
| 797 | IP_VXLAN_GBP_BYPASS_NEXT_DROP : |
| 798 | IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP; |
| 799 | b1->error = error1 ? error_node->errors[error1] : 0; |
| 800 | |
| 801 | /* vxlan_gbp-input node expect current at VXLAN_GBP header */ |
| 802 | if (is_ip4) |
| 803 | vlib_buffer_advance (b1, |
| 804 | sizeof (ip4_header_t) + |
| 805 | sizeof (udp_header_t)); |
| 806 | else |
| 807 | vlib_buffer_advance (b1, |
| 808 | sizeof (ip6_header_t) + |
| 809 | sizeof (udp_header_t)); |
| 810 | |
| 811 | exit1: |
| 812 | vlib_validate_buffer_enqueue_x2 (vm, node, next_index, |
| 813 | to_next, n_left_to_next, |
| 814 | bi0, bi1, next0, next1); |
| 815 | } |
| 816 | |
| 817 | while (n_left_from > 0 && n_left_to_next > 0) |
| 818 | { |
| 819 | vlib_buffer_t *b0; |
| 820 | ip4_header_t *ip40; |
| 821 | ip6_header_t *ip60; |
| 822 | udp_header_t *udp0; |
| 823 | u32 bi0, ip_len0, udp_len0, flags0, next0; |
| 824 | i32 len_diff0; |
| 825 | u8 error0, good_udp0, proto0; |
| 826 | |
| 827 | bi0 = to_next[0] = from[0]; |
| 828 | from += 1; |
| 829 | n_left_from -= 1; |
| 830 | to_next += 1; |
| 831 | n_left_to_next -= 1; |
| 832 | |
| 833 | b0 = vlib_get_buffer (vm, bi0); |
| 834 | if (is_ip4) |
| 835 | ip40 = vlib_buffer_get_current (b0); |
| 836 | else |
| 837 | ip60 = vlib_buffer_get_current (b0); |
| 838 | |
| 839 | /* Setup packet for next IP feature */ |
| 840 | vnet_feature_next (&next0, b0); |
| 841 | |
| 842 | if (is_ip4) |
| 843 | /* Treat IP4 frag packets as "experimental" protocol for now |
| 844 | until support of IP frag reassembly is implemented */ |
| 845 | proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol; |
| 846 | else |
| 847 | proto0 = ip60->protocol; |
| 848 | |
| 849 | if (proto0 != IP_PROTOCOL_UDP) |
| 850 | goto exit; /* not UDP packet */ |
| 851 | |
| 852 | if (is_ip4) |
| 853 | udp0 = ip4_next_header (ip40); |
| 854 | else |
| 855 | udp0 = ip6_next_header (ip60); |
| 856 | |
| 857 | if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp)) |
| 858 | goto exit; /* not VXLAN_GBP packet */ |
| 859 | |
| 860 | /* Validate DIP against VTEPs */ |
| 861 | if (is_ip4) |
| 862 | { |
| 863 | if (addr4.as_u32 != ip40->dst_address.as_u32) |
| 864 | { |
| 865 | if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32)) |
| 866 | goto exit; /* no local VTEP for VXLAN_GBP packet */ |
| 867 | addr4 = ip40->dst_address; |
| 868 | } |
| 869 | } |
| 870 | else |
| 871 | { |
| 872 | if (!ip6_address_is_equal (&addr6, &ip60->dst_address)) |
| 873 | { |
| 874 | if (!hash_get_mem (vxm->vtep6, &ip60->dst_address)) |
| 875 | goto exit; /* no local VTEP for VXLAN_GBP packet */ |
| 876 | addr6 = ip60->dst_address; |
| 877 | } |
| 878 | } |
| 879 | |
| 880 | flags0 = b0->flags; |
| 881 | good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0; |
| 882 | |
| 883 | /* Don't verify UDP checksum for packets with explicit zero checksum. */ |
| 884 | good_udp0 |= udp0->checksum == 0; |
| 885 | |
| 886 | /* Verify UDP length */ |
| 887 | if (is_ip4) |
| 888 | ip_len0 = clib_net_to_host_u16 (ip40->length); |
| 889 | else |
| 890 | ip_len0 = clib_net_to_host_u16 (ip60->payload_length); |
| 891 | udp_len0 = clib_net_to_host_u16 (udp0->length); |
| 892 | len_diff0 = ip_len0 - udp_len0; |
| 893 | |
| 894 | /* Verify UDP checksum */ |
| 895 | if (PREDICT_FALSE (!good_udp0)) |
| 896 | { |
| 897 | if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0) |
| 898 | { |
| 899 | if (is_ip4) |
| 900 | flags0 = ip4_tcp_udp_validate_checksum (vm, b0); |
| 901 | else |
| 902 | flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0); |
| 903 | good_udp0 = |
| 904 | (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0; |
| 905 | } |
| 906 | } |
| 907 | |
| 908 | if (is_ip4) |
| 909 | { |
| 910 | error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM; |
| 911 | error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH; |
| 912 | } |
| 913 | else |
| 914 | { |
| 915 | error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM; |
| 916 | error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH; |
| 917 | } |
| 918 | |
| 919 | next0 = error0 ? |
| 920 | IP_VXLAN_GBP_BYPASS_NEXT_DROP : |
| 921 | IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP; |
| 922 | b0->error = error0 ? error_node->errors[error0] : 0; |
| 923 | |
| 924 | /* vxlan_gbp-input node expect current at VXLAN_GBP header */ |
| 925 | if (is_ip4) |
| 926 | vlib_buffer_advance (b0, |
| 927 | sizeof (ip4_header_t) + |
| 928 | sizeof (udp_header_t)); |
| 929 | else |
| 930 | vlib_buffer_advance (b0, |
| 931 | sizeof (ip6_header_t) + |
| 932 | sizeof (udp_header_t)); |
| 933 | |
| 934 | exit: |
| 935 | vlib_validate_buffer_enqueue_x1 (vm, node, next_index, |
| 936 | to_next, n_left_to_next, |
| 937 | bi0, next0); |
| 938 | } |
| 939 | |
| 940 | vlib_put_next_frame (vm, node, next_index, n_left_to_next); |
| 941 | } |
| 942 | |
| 943 | return frame->n_vectors; |
| 944 | } |
| 945 | |
| 946 | static uword |
| 947 | ip4_vxlan_gbp_bypass (vlib_main_t * vm, |
| 948 | vlib_node_runtime_t * node, vlib_frame_t * frame) |
| 949 | { |
| 950 | return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 1); |
| 951 | } |
| 952 | |
| 953 | /* *INDENT-OFF* */ |
| 954 | VLIB_REGISTER_NODE (ip4_vxlan_gbp_bypass_node) = |
| 955 | { |
| 956 | .function = ip4_vxlan_gbp_bypass, |
| 957 | .name = "ip4-vxlan-gbp-bypass", |
| 958 | .vector_size = sizeof (u32), |
| 959 | .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT, |
| 960 | .next_nodes = { |
| 961 | [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop", |
| 962 | [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan4-gbp-input", |
| 963 | }, |
| 964 | .format_buffer = format_ip4_header, |
| 965 | .format_trace = format_ip4_forward_next_trace, |
| 966 | }; |
| 967 | |
| 968 | VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gbp_bypass_node, ip4_vxlan_gbp_bypass) |
| 969 | /* *INDENT-ON* */ |
| 970 | |
| 971 | /* Dummy init function to get us linked in. */ |
| 972 | clib_error_t * |
| 973 | ip4_vxlan_gbp_bypass_init (vlib_main_t * vm) |
| 974 | { |
| 975 | return 0; |
| 976 | } |
| 977 | |
| 978 | VLIB_INIT_FUNCTION (ip4_vxlan_gbp_bypass_init); |
| 979 | |
| 980 | static uword |
| 981 | ip6_vxlan_gbp_bypass (vlib_main_t * vm, |
| 982 | vlib_node_runtime_t * node, vlib_frame_t * frame) |
| 983 | { |
| 984 | return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 0); |
| 985 | } |
| 986 | |
| 987 | /* *INDENT-OFF* */ |
| 988 | VLIB_REGISTER_NODE (ip6_vxlan_gbp_bypass_node) = |
| 989 | { |
| 990 | .function = ip6_vxlan_gbp_bypass, |
| 991 | .name = "ip6-vxlan-gbp-bypass", |
| 992 | .vector_size = sizeof (u32), |
| 993 | .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT, |
| 994 | .next_nodes = { |
| 995 | [IP_VXLAN_GBP_BYPASS_NEXT_DROP] = "error-drop", |
| 996 | [IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP] = "vxlan6-gbp-input", |
| 997 | }, |
| 998 | .format_buffer = format_ip6_header, |
| 999 | .format_trace = format_ip6_forward_next_trace, |
| 1000 | }; |
| 1001 | |
| 1002 | VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gbp_bypass_node, ip6_vxlan_gbp_bypass) |
| 1003 | /* *INDENT-ON* */ |
| 1004 | |
| 1005 | /* Dummy init function to get us linked in. */ |
| 1006 | clib_error_t * |
| 1007 | ip6_vxlan_gbp_bypass_init (vlib_main_t * vm) |
| 1008 | { |
| 1009 | return 0; |
| 1010 | } |
| 1011 | |
| 1012 | VLIB_INIT_FUNCTION (ip6_vxlan_gbp_bypass_init); |
| 1013 | |
| 1014 | /* |
| 1015 | * fd.io coding-style-patch-verification: ON |
| 1016 | * |
| 1017 | * Local Variables: |
| 1018 | * eval: (c-set-style "gnu") |
| 1019 | * End: |
| 1020 | */ |