Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | #include <stdbool.h> |
| 16 | #include <vppinfra/error.h> |
| 17 | #include <vnet/vnet.h> |
| 18 | #include <vnet/ip/ip.h> |
| 19 | #include <vlib/vlib.h> |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 20 | #include <vnet/fib/fib_types.h> |
| 21 | #include <vnet/fib/ip4_fib.h> |
| 22 | #include <vnet/adj/adj.h> |
| 23 | #include <vnet/map/map_dpo.h> |
| 24 | #include <vnet/dpo/load_balance.h> |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 25 | |
| 26 | #define MAP_SKIP_IP6_LOOKUP 1 |
| 27 | |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 28 | typedef enum |
| 29 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 30 | MAP_SENDER, |
| 31 | MAP_RECEIVER |
| 32 | } map_dir_e; |
| 33 | |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 34 | int map_create_domain (ip4_address_t * ip4_prefix, u8 ip4_prefix_len, |
| 35 | ip6_address_t * ip6_prefix, u8 ip6_prefix_len, |
| 36 | ip6_address_t * ip6_src, u8 ip6_src_len, |
| 37 | u8 ea_bits_len, u8 psid_offset, u8 psid_length, |
| 38 | u32 * map_domain_index, u16 mtu, u8 flags); |
| 39 | int map_delete_domain (u32 map_domain_index); |
| 40 | int map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep, |
| 41 | u8 is_add); |
| 42 | u8 *format_map_trace (u8 * s, va_list * args); |
| 43 | i32 ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len); |
| 44 | i32 ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len); |
| 45 | u16 ip4_map_get_port (ip4_header_t * ip, map_dir_e dir); |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 46 | |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 47 | typedef enum __attribute__ ((__packed__)) |
| 48 | { |
| 49 | MAP_DOMAIN_PREFIX = 1 << 0, MAP_DOMAIN_TRANSLATION = 1 << 1, // The domain uses MAP-T |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 50 | } map_domain_flags_e; |
| 51 | |
| 52 | /** |
| 53 | * IP4 reassembly logic: |
| 54 | * One virtually reassembled flow requires a map_ip4_reass_t structure in order |
| 55 | * to keep the first-fragment port number and, optionally, cache out of sequence |
| 56 | * packets. |
| 57 | * There are up to MAP_IP4_REASS_MAX_REASSEMBLY such structures. |
| 58 | * When in use, those structures are stored in a hash table of MAP_IP4_REASS_BUCKETS buckets. |
| 59 | * When a new structure needs to be used, it is allocated from available ones. |
| 60 | * If there is no structure available, the oldest in use is selected and used if and |
| 61 | * only if it was first allocated more than MAP_IP4_REASS_LIFETIME seconds ago. |
| 62 | * In case no structure can be allocated, the fragment is dropped. |
| 63 | */ |
| 64 | |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 65 | #define MAP_IP4_REASS_LIFETIME_DEFAULT (100) /* ms */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 66 | #define MAP_IP4_REASS_HT_RATIO_DEFAULT (1.0) |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 67 | #define MAP_IP4_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 68 | #define MAP_IP4_REASS_BUFFERS_DEFAULT 2048 |
| 69 | |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 70 | #define MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5 // Number of fragment per reassembly |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 71 | |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 72 | #define MAP_IP6_REASS_LIFETIME_DEFAULT (100) /* ms */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 73 | #define MAP_IP6_REASS_HT_RATIO_DEFAULT (1.0) |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 74 | #define MAP_IP6_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 75 | #define MAP_IP6_REASS_BUFFERS_DEFAULT 2048 |
| 76 | |
| 77 | #define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5 |
| 78 | |
| 79 | #define MAP_IP6_REASS_COUNT_BYTES |
| 80 | #define MAP_IP4_REASS_COUNT_BYTES |
| 81 | |
| 82 | //#define IP6_MAP_T_OVERRIDE_TOS 0 |
| 83 | |
| 84 | /* |
| 85 | * This structure _MUST_ be no larger than a single cache line (64 bytes). |
| 86 | * If more space is needed make a union of ip6_prefix and *rules, those are mutually exclusive. |
| 87 | */ |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 88 | typedef struct |
| 89 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 90 | ip6_address_t ip6_src; |
| 91 | ip6_address_t ip6_prefix; |
| 92 | ip6_address_t *rules; |
| 93 | u32 suffix_mask; |
| 94 | ip4_address_t ip4_prefix; |
| 95 | u16 psid_mask; |
| 96 | u16 mtu; |
| 97 | map_domain_flags_e flags; |
| 98 | u8 ip6_prefix_len; |
| 99 | u8 ip6_src_len; |
| 100 | u8 ea_bits_len; |
| 101 | u8 psid_offset; |
| 102 | u8 psid_length; |
| 103 | |
| 104 | /* helpers */ |
| 105 | u8 psid_shift; |
| 106 | u8 suffix_shift; |
| 107 | u8 ea_shift; |
| 108 | |
| 109 | /* not used by forwarding */ |
| 110 | u8 ip4_prefix_len; |
| 111 | } map_domain_t; |
| 112 | |
Damjan Marion | cf47894 | 2016-11-07 14:57:50 +0100 | [diff] [blame] | 113 | STATIC_ASSERT ((sizeof (map_domain_t) <= CLIB_CACHE_LINE_BYTES), |
| 114 | "MAP domain fits in one cacheline"); |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 115 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 116 | #define MAP_REASS_INDEX_NONE ((u16)0xffff) |
| 117 | |
| 118 | /* |
| 119 | * Hash key, padded out to 16 bytes for fast compare |
| 120 | */ |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 121 | /* *INDENT-OFF* */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 122 | typedef union { |
| 123 | CLIB_PACKED (struct { |
| 124 | ip4_address_t src; |
| 125 | ip4_address_t dst; |
| 126 | u16 fragment_id; |
| 127 | u8 protocol; |
| 128 | }); |
| 129 | u64 as_u64[2]; |
| 130 | u32 as_u32[4]; |
| 131 | } map_ip4_reass_key_t; |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 132 | /* *INDENT-ON* */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 133 | |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 134 | typedef struct |
| 135 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 136 | map_ip4_reass_key_t key; |
| 137 | f64 ts; |
| 138 | #ifdef MAP_IP4_REASS_COUNT_BYTES |
| 139 | u16 expected_total; |
| 140 | u16 forwarded; |
| 141 | #endif |
| 142 | i32 port; |
| 143 | u16 bucket; |
| 144 | u16 bucket_next; |
| 145 | u16 fifo_prev; |
| 146 | u16 fifo_next; |
| 147 | u32 fragments[MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]; |
| 148 | } map_ip4_reass_t; |
| 149 | |
| 150 | /* |
| 151 | * MAP domain counters |
| 152 | */ |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 153 | typedef enum |
| 154 | { |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 155 | /* Simple counters */ |
| 156 | MAP_DOMAIN_IPV4_FRAGMENT = 0, |
| 157 | /* Combined counters */ |
| 158 | MAP_DOMAIN_COUNTER_RX = 0, |
| 159 | MAP_DOMAIN_COUNTER_TX, |
| 160 | MAP_N_DOMAIN_COUNTER |
| 161 | } map_domain_counter_t; |
| 162 | |
| 163 | /* |
| 164 | * main_main_t |
| 165 | */ |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 166 | /* *INDENT-OFF* */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 167 | typedef union { |
| 168 | CLIB_PACKED (struct { |
| 169 | ip6_address_t src; |
| 170 | ip6_address_t dst; |
| 171 | u32 fragment_id; |
| 172 | u8 protocol; |
| 173 | }); |
| 174 | u64 as_u64[5]; |
| 175 | u32 as_u32[10]; |
| 176 | } map_ip6_reass_key_t; |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 177 | /* *INDENT-OFF* */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 178 | |
| 179 | typedef struct { |
| 180 | u32 pi; //Cached packet or ~0 |
| 181 | u16 next_data_offset; //The data offset of the additional 20 bytes or ~0 |
| 182 | u8 next_data_len; //Number of bytes ready to be copied (20 if not last fragment) |
| 183 | u8 next_data[20]; //The 20 additional bytes |
| 184 | } map_ip6_fragment_t; |
| 185 | |
| 186 | typedef struct { |
| 187 | map_ip6_reass_key_t key; |
| 188 | f64 ts; |
| 189 | #ifdef MAP_IP6_REASS_COUNT_BYTES |
| 190 | u16 expected_total; |
| 191 | u16 forwarded; |
| 192 | #endif |
| 193 | u16 bucket; //What hash bucket this element is linked in |
| 194 | u16 bucket_next; |
| 195 | u16 fifo_prev; |
| 196 | u16 fifo_next; |
| 197 | ip4_header_t ip4_header; |
| 198 | map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]; |
| 199 | } map_ip6_reass_t; |
| 200 | |
| 201 | typedef struct { |
| 202 | /* pool of MAP domains */ |
| 203 | map_domain_t *domains; |
| 204 | |
| 205 | /* MAP Domain packet/byte counters indexed by map domain index */ |
| 206 | vlib_simple_counter_main_t *simple_domain_counters; |
| 207 | vlib_combined_counter_main_t *domain_counters; |
| 208 | volatile u32 *counter_lock; |
| 209 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 210 | #ifdef MAP_SKIP_IP6_LOOKUP |
| 211 | /* pre-presolve */ |
| 212 | u32 adj6_index, adj4_index; |
| 213 | ip4_address_t preresolve_ip4; |
| 214 | ip6_address_t preresolve_ip6; |
| 215 | #endif |
| 216 | |
| 217 | /* Traffic class: zero, copy (~0) or fixed value */ |
| 218 | u8 tc; |
| 219 | bool tc_copy; |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 220 | |
| 221 | bool sec_check; /* Inbound security check */ |
| 222 | bool sec_check_frag; /* Inbound security check for (subsequent) fragments */ |
| 223 | bool icmp6_enabled; /* Send destination unreachable for security check failure */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 224 | |
| 225 | /* ICMPv6 -> ICMPv4 relay parameters */ |
Ole Troan | cda9482 | 2016-01-07 14:37:25 +0100 | [diff] [blame] | 226 | ip4_address_t icmp4_src_address; |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 227 | vlib_simple_counter_main_t icmp_relayed; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 228 | |
| 229 | /* convenience */ |
| 230 | vlib_main_t *vlib_main; |
| 231 | vnet_main_t *vnet_main; |
| 232 | |
| 233 | /* |
| 234 | * IPv4 encap and decap reassembly |
| 235 | */ |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 236 | /* Configuration */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 237 | f32 ip4_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size)) |
| 238 | u16 ip4_reass_conf_pool_size; //Max number of allocated reass structures |
| 239 | u16 ip4_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms |
| 240 | u32 ip4_reass_conf_buffers; //Maximum number of buffers used by ip4 reassembly |
| 241 | |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 242 | /* Runtime */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 243 | map_ip4_reass_t *ip4_reass_pool; |
| 244 | u8 ip4_reass_ht_log2len; //Hash table size is 2^log2len |
| 245 | u16 ip4_reass_allocated; |
| 246 | u16 *ip4_reass_hash_table; |
| 247 | u16 ip4_reass_fifo_last; |
| 248 | volatile u32 *ip4_reass_lock; |
| 249 | |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 250 | /* Counters */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 251 | u32 ip4_reass_buffered_counter; |
| 252 | |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 253 | bool frag_inner; /* Inner or outer fragmentation */ |
| 254 | bool frag_ignore_df; /* Fragment (outer) packet even if DF is set */ |
| 255 | |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 256 | /* |
| 257 | * IPv6 decap reassembly |
| 258 | */ |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 259 | /* Configuration */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 260 | f32 ip6_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size)) |
| 261 | u16 ip6_reass_conf_pool_size; //Max number of allocated reass structures |
| 262 | u16 ip6_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms |
| 263 | u32 ip6_reass_conf_buffers; //Maximum number of buffers used by ip6 reassembly |
| 264 | |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 265 | /* Runtime */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 266 | map_ip6_reass_t *ip6_reass_pool; |
| 267 | u8 ip6_reass_ht_log2len; //Hash table size is 2^log2len |
| 268 | u16 ip6_reass_allocated; |
| 269 | u16 *ip6_reass_hash_table; |
| 270 | u16 ip6_reass_fifo_last; |
| 271 | volatile u32 *ip6_reass_lock; |
| 272 | |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 273 | /* Counters */ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 274 | u32 ip6_reass_buffered_counter; |
| 275 | |
| 276 | } map_main_t; |
| 277 | |
| 278 | /* |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 279 | * MAP Error counters/messages |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 280 | */ |
| 281 | #define foreach_map_error \ |
| 282 | /* Must be first. */ \ |
| 283 | _(NONE, "valid MAP packets") \ |
| 284 | _(BAD_PROTOCOL, "bad protocol") \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 285 | _(SEC_CHECK, "security check failed") \ |
| 286 | _(ENCAP_SEC_CHECK, "encap security check failed") \ |
| 287 | _(DECAP_SEC_CHECK, "decap security check failed") \ |
| 288 | _(ICMP, "unable to translate ICMP") \ |
| 289 | _(ICMP_RELAY, "unable to relay ICMP") \ |
| 290 | _(UNKNOWN, "unknown") \ |
Ole Troan | cda9482 | 2016-01-07 14:37:25 +0100 | [diff] [blame] | 291 | _(NO_BINDING, "no binding") \ |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 292 | _(NO_DOMAIN, "no domain") \ |
| 293 | _(FRAGMENTED, "packet is a fragment") \ |
| 294 | _(FRAGMENT_MEMORY, "could not cache fragment") \ |
| 295 | _(FRAGMENT_MALFORMED, "fragment has unexpected format")\ |
| 296 | _(FRAGMENT_DROPPED, "dropped cached fragment") \ |
Ole Troan | 366ac6e | 2016-01-06 12:40:28 +0100 | [diff] [blame] | 297 | _(MALFORMED, "malformed packet") \ |
Ole Troan | 9fb8755 | 2016-01-13 22:30:43 +0100 | [diff] [blame] | 298 | _(DF_SET, "can't fragment, DF set") |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 299 | |
| 300 | typedef enum { |
| 301 | #define _(sym,str) MAP_ERROR_##sym, |
| 302 | foreach_map_error |
| 303 | #undef _ |
| 304 | MAP_N_ERROR, |
| 305 | } map_error_t; |
| 306 | |
| 307 | u64 map_error_counter_get(u32 node_index, map_error_t map_error); |
| 308 | |
| 309 | typedef struct { |
| 310 | u32 map_domain_index; |
| 311 | u16 port; |
| 312 | } map_trace_t; |
| 313 | |
| 314 | map_main_t map_main; |
| 315 | |
Jean-Mickael Guerin | 8941ec2 | 2016-03-04 14:14:21 +0100 | [diff] [blame] | 316 | extern vlib_node_registration_t ip4_map_node; |
| 317 | extern vlib_node_registration_t ip6_map_node; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 318 | |
Jean-Mickael Guerin | 8941ec2 | 2016-03-04 14:14:21 +0100 | [diff] [blame] | 319 | extern vlib_node_registration_t ip4_map_t_node; |
| 320 | extern vlib_node_registration_t ip4_map_t_fragmented_node; |
| 321 | extern vlib_node_registration_t ip4_map_t_tcp_udp_node; |
| 322 | extern vlib_node_registration_t ip4_map_t_icmp_node; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 323 | |
Jean-Mickael Guerin | 8941ec2 | 2016-03-04 14:14:21 +0100 | [diff] [blame] | 324 | extern vlib_node_registration_t ip6_map_t_node; |
| 325 | extern vlib_node_registration_t ip6_map_t_fragmented_node; |
| 326 | extern vlib_node_registration_t ip6_map_t_tcp_udp_node; |
| 327 | extern vlib_node_registration_t ip6_map_t_icmp_node; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 328 | |
| 329 | /* |
| 330 | * map_get_pfx |
| 331 | */ |
| 332 | static_always_inline u64 |
| 333 | map_get_pfx (map_domain_t *d, u32 addr, u16 port) |
| 334 | { |
| 335 | u16 psid = (port >> d->psid_shift) & d->psid_mask; |
| 336 | |
| 337 | if (d->ea_bits_len == 0 && d->rules) |
| 338 | return clib_net_to_host_u64(d->rules[psid].as_u64[0]); |
| 339 | |
| 340 | u32 suffix = (addr >> d->suffix_shift) & d->suffix_mask; |
| 341 | u64 ea = d->ea_bits_len == 0 ? 0 : (((u64) suffix << d->psid_length)) | psid; |
| 342 | |
| 343 | return clib_net_to_host_u64(d->ip6_prefix.as_u64[0]) | ea << d->ea_shift; |
| 344 | } |
| 345 | |
| 346 | static_always_inline u64 |
| 347 | map_get_pfx_net (map_domain_t *d, u32 addr, u16 port) |
| 348 | { |
| 349 | return clib_host_to_net_u64(map_get_pfx(d, clib_net_to_host_u32(addr), |
| 350 | clib_net_to_host_u16(port))); |
| 351 | } |
| 352 | |
| 353 | /* |
| 354 | * map_get_sfx |
| 355 | */ |
| 356 | static_always_inline u64 |
| 357 | map_get_sfx (map_domain_t *d, u32 addr, u16 port) |
| 358 | { |
| 359 | u16 psid = (port >> d->psid_shift) & d->psid_mask; |
| 360 | |
| 361 | /* Shared 1:1 mode. */ |
| 362 | if (d->ea_bits_len == 0 && d->rules) |
| 363 | return clib_net_to_host_u64(d->rules[psid].as_u64[1]); |
| 364 | if (d->ip6_prefix_len == 128) |
| 365 | return clib_net_to_host_u64(d->ip6_prefix.as_u64[1]); |
| 366 | |
| 367 | /* IPv4 prefix */ |
| 368 | if (d->flags & MAP_DOMAIN_PREFIX) |
Ole Troan | d575e69 | 2016-08-25 12:26:47 +0200 | [diff] [blame] | 369 | return (u64) (addr & (0xFFFFFFFF << d->suffix_shift)) << 16; |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 370 | |
| 371 | /* Shared or full IPv4 address */ |
| 372 | return ((u64) addr << 16) | psid; |
| 373 | } |
| 374 | |
| 375 | static_always_inline u64 |
| 376 | map_get_sfx_net (map_domain_t *d, u32 addr, u16 port) |
| 377 | { |
| 378 | return clib_host_to_net_u64(map_get_sfx(d, clib_net_to_host_u32(addr), |
| 379 | clib_net_to_host_u16(port))); |
| 380 | } |
| 381 | |
| 382 | static_always_inline u32 |
| 383 | map_get_ip4 (ip6_address_t *addr) |
| 384 | { |
| 385 | return clib_host_to_net_u32(clib_net_to_host_u64(addr->as_u64[1]) >> 16); |
| 386 | } |
| 387 | |
| 388 | /* |
| 389 | * Get the MAP domain from an IPv4 lookup adjacency. |
| 390 | */ |
| 391 | static_always_inline map_domain_t * |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 392 | ip4_map_get_domain (u32 mdi, |
| 393 | u32 *map_domain_index) |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 394 | { |
| 395 | map_main_t *mm = &map_main; |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 396 | map_dpo_t *md; |
| 397 | |
| 398 | md = map_dpo_get(mdi); |
| 399 | |
| 400 | ASSERT(md); |
| 401 | *map_domain_index = md->md_domain; |
| 402 | return pool_elt_at_index(mm->domains, *map_domain_index); |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | /* |
| 406 | * Get the MAP domain from an IPv6 lookup adjacency. |
| 407 | * If the IPv6 address or prefix is not shared, no lookup is required. |
| 408 | * The IPv4 address is used otherwise. |
| 409 | */ |
| 410 | static_always_inline map_domain_t * |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 411 | ip6_map_get_domain (u32 mdi, ip4_address_t *addr, |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 412 | u32 *map_domain_index, u8 *error) |
| 413 | { |
| 414 | map_main_t *mm = &map_main; |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 415 | map_dpo_t *md; |
Ole Troan | 366ac6e | 2016-01-06 12:40:28 +0100 | [diff] [blame] | 416 | |
| 417 | /* |
| 418 | * Disable direct MAP domain lookup on decap, until the security check is updated to verify IPv4 SA. |
| 419 | * (That's done implicitly when MAP domain is looked up in the IPv4 FIB) |
| 420 | */ |
| 421 | #ifdef MAP_NONSHARED_DOMAIN_ENABLED |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 422 | md = map_dpo_get(mdi); |
| 423 | |
| 424 | ASSERT(md); |
| 425 | *map_domain_index = md->md_domain; |
| 426 | if (*map_domain_index != ~0) |
| 427 | return pool_elt_at_index(mm->domains, *map_domain_index); |
Ole Troan | 366ac6e | 2016-01-06 12:40:28 +0100 | [diff] [blame] | 428 | #endif |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 429 | |
Neale Ranns | 0bfe5d8 | 2016-08-25 15:29:12 +0100 | [diff] [blame] | 430 | u32 lbi = ip4_fib_forwarding_lookup(0, addr); |
| 431 | const dpo_id_t *dpo = load_balance_get_bucket(lbi, 0); |
| 432 | if (PREDICT_TRUE(dpo->dpoi_type == map_dpo_type || |
| 433 | dpo->dpoi_type == map_t_dpo_type)) |
| 434 | { |
| 435 | md = map_dpo_get(dpo->dpoi_index); |
| 436 | *map_domain_index = md->md_domain; |
| 437 | return pool_elt_at_index(mm->domains, *map_domain_index); |
| 438 | } |
Ed Warnicke | cb9cada | 2015-12-08 15:45:58 -0700 | [diff] [blame] | 439 | *error = MAP_ERROR_NO_DOMAIN; |
| 440 | return NULL; |
| 441 | } |
| 442 | |
| 443 | map_ip4_reass_t * |
| 444 | map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, |
| 445 | u8 protocol, u32 **pi_to_drop); |
| 446 | void |
| 447 | map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop); |
| 448 | |
| 449 | #define map_ip4_reass_lock() while (__sync_lock_test_and_set(map_main.ip4_reass_lock, 1)) {} |
| 450 | #define map_ip4_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip4_reass_lock = 0;} while(0) |
| 451 | |
| 452 | static_always_inline void |
| 453 | map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi) |
| 454 | { |
| 455 | int i; |
| 456 | for (i=0; i<MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) |
| 457 | if(r->fragments[i] != ~0) { |
| 458 | vec_add1(*pi, r->fragments[i]); |
| 459 | r->fragments[i] = ~0; |
| 460 | map_main.ip4_reass_buffered_counter--; |
| 461 | } |
| 462 | } |
| 463 | |
| 464 | int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi); |
| 465 | |
| 466 | map_ip6_reass_t * |
| 467 | map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id, |
| 468 | u8 protocol, u32 **pi_to_drop); |
| 469 | void |
| 470 | map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop); |
| 471 | |
| 472 | #define map_ip6_reass_lock() while (__sync_lock_test_and_set(map_main.ip6_reass_lock, 1)) {} |
| 473 | #define map_ip6_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip6_reass_lock = 0;} while(0) |
| 474 | |
| 475 | int |
| 476 | map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi, |
| 477 | u16 data_offset, u16 next_data_offset, |
| 478 | u8 *data_start, u16 data_len); |
| 479 | |
| 480 | void map_ip4_drop_pi(u32 pi); |
| 481 | |
| 482 | int map_ip4_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets); |
| 483 | #define MAP_IP4_REASS_CONF_HT_RATIO_MAX 100 |
| 484 | int map_ip4_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets); |
| 485 | #define MAP_IP4_REASS_CONF_POOL_SIZE_MAX (0xfeff) |
| 486 | int map_ip4_reass_conf_lifetime(u16 lifetime_ms); |
| 487 | #define MAP_IP4_REASS_CONF_LIFETIME_MAX 0xffff |
| 488 | int map_ip4_reass_conf_buffers(u32 buffers); |
| 489 | #define MAP_IP4_REASS_CONF_BUFFERS_MAX (0xffffffff) |
| 490 | |
| 491 | void map_ip6_drop_pi(u32 pi); |
| 492 | |
| 493 | |
| 494 | int map_ip6_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets); |
| 495 | #define MAP_IP6_REASS_CONF_HT_RATIO_MAX 100 |
| 496 | int map_ip6_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets); |
| 497 | #define MAP_IP6_REASS_CONF_POOL_SIZE_MAX (0xfeff) |
| 498 | int map_ip6_reass_conf_lifetime(u16 lifetime_ms); |
| 499 | #define MAP_IP6_REASS_CONF_LIFETIME_MAX 0xffff |
| 500 | int map_ip6_reass_conf_buffers(u32 buffers); |
| 501 | #define MAP_IP6_REASS_CONF_BUFFERS_MAX (0xffffffff) |
| 502 | |
| 503 | static_always_inline |
| 504 | int ip6_parse(const ip6_header_t *ip6, u32 buff_len, |
| 505 | u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset) |
| 506 | { |
| 507 | if (ip6->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) { |
| 508 | *l4_protocol = ((ip6_frag_hdr_t *)(ip6 + 1))->next_hdr; |
| 509 | *frag_hdr_offset = sizeof(*ip6); |
| 510 | *l4_offset = sizeof(*ip6) + sizeof(ip6_frag_hdr_t); |
| 511 | } else { |
| 512 | *l4_protocol = ip6->protocol; |
| 513 | *frag_hdr_offset = 0; |
| 514 | *l4_offset = sizeof(*ip6); |
| 515 | } |
| 516 | |
| 517 | return (buff_len < (*l4_offset + 4)) || |
| 518 | (clib_net_to_host_u16(ip6->payload_length) < (*l4_offset + 4 - sizeof(*ip6))); |
| 519 | } |
| 520 | |
| 521 | |
| 522 | #define u8_ptr_add(ptr, index) (((u8 *)ptr) + index) |
| 523 | #define u16_net_add(u, val) clib_host_to_net_u16(clib_net_to_host_u16(u) + (val)) |
| 524 | |
| 525 | #define frag_id_6to4(id) ((id) ^ ((id) >> 16)) |
| 526 | |
| 527 | static_always_inline void |
| 528 | ip4_map_t_embedded_address (map_domain_t *d, |
| 529 | ip6_address_t *ip6, const ip4_address_t *ip4) |
| 530 | { |
| 531 | ASSERT(d->ip6_src_len == 96); //No support for other lengths for now |
| 532 | ip6->as_u64[0] = d->ip6_src.as_u64[0]; |
| 533 | ip6->as_u32[2] = d->ip6_src.as_u32[2]; |
| 534 | ip6->as_u32[3] = ip4->as_u32; |
| 535 | } |
| 536 | |
| 537 | static_always_inline u32 |
| 538 | ip6_map_t_embedded_address (map_domain_t *d, ip6_address_t *addr) |
| 539 | { |
| 540 | ASSERT(d->ip6_src_len == 96); //No support for other lengths for now |
| 541 | return addr->as_u32[3]; |
| 542 | } |
| 543 | |
| 544 | static inline void |
| 545 | map_domain_counter_lock (map_main_t *mm) |
| 546 | { |
| 547 | if (mm->counter_lock) |
| 548 | while (__sync_lock_test_and_set(mm->counter_lock, 1)) |
| 549 | /* zzzz */ ; |
| 550 | } |
| 551 | static inline void |
| 552 | map_domain_counter_unlock (map_main_t *mm) |
| 553 | { |
| 554 | if (mm->counter_lock) |
| 555 | *mm->counter_lock = 0; |
| 556 | } |
| 557 | |
| 558 | |
| 559 | static_always_inline void |
| 560 | map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, |
| 561 | vlib_node_runtime_t *node, vlib_error_t *error, |
| 562 | u32 next) |
| 563 | { |
| 564 | u32 n_left_from, *from, next_index, *to_next, n_left_to_next; |
| 565 | //Deal with fragments that are ready |
| 566 | from = pi_vector; |
| 567 | n_left_from = vec_len(pi_vector); |
| 568 | next_index = node->cached_next_index; |
| 569 | while (n_left_from > 0) { |
| 570 | vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next); |
| 571 | while (n_left_from > 0 && n_left_to_next > 0) { |
| 572 | u32 pi0 = to_next[0] = from[0]; |
| 573 | from += 1; |
| 574 | n_left_from -= 1; |
| 575 | to_next += 1; |
| 576 | n_left_to_next -= 1; |
| 577 | vlib_buffer_t *p0 = vlib_get_buffer(vm, pi0); |
| 578 | p0->error = *error; |
| 579 | vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next); |
| 580 | } |
| 581 | vlib_put_next_frame(vm, node, next_index, n_left_to_next); |
| 582 | } |
| 583 | } |
Keith Burns (alagalah) | 06e3d07 | 2016-08-07 08:43:18 -0700 | [diff] [blame] | 584 | |
| 585 | /* |
| 586 | * fd.io coding-style-patch-verification: ON |
| 587 | * |
| 588 | * Local Variables: |
| 589 | * eval: (c-set-style "gnu") |
| 590 | * End: |
| 591 | */ |