Dave Barach | 68b0fb0 | 2017-02-28 15:15:56 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Cisco and/or its affiliates. |
| 3 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | * you may not use this file except in compliance with the License. |
| 5 | * You may obtain a copy of the License at: |
| 6 | * |
| 7 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | * |
| 9 | * Unless required by applicable law or agreed to in writing, software |
| 10 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | * See the License for the specific language governing permissions and |
| 13 | * limitations under the License. |
| 14 | */ |
| 15 | |
| 16 | #include <vnet/tcp/tcp.h> |
| 17 | #include <vnet/lisp-cp/packets.h> |
| 18 | |
| 19 | vlib_node_registration_t tcp4_output_node; |
| 20 | vlib_node_registration_t tcp6_output_node; |
| 21 | |
| 22 | typedef enum _tcp_output_nect |
| 23 | { |
| 24 | TCP_OUTPUT_NEXT_DROP, |
| 25 | TCP_OUTPUT_NEXT_IP_LOOKUP, |
| 26 | TCP_OUTPUT_N_NEXT |
| 27 | } tcp_output_next_t; |
| 28 | |
| 29 | #define foreach_tcp4_output_next \ |
| 30 | _ (DROP, "error-drop") \ |
| 31 | _ (IP_LOOKUP, "ip4-lookup") |
| 32 | |
| 33 | #define foreach_tcp6_output_next \ |
| 34 | _ (DROP, "error-drop") \ |
| 35 | _ (IP_LOOKUP, "ip6-lookup") |
| 36 | |
| 37 | static char *tcp_error_strings[] = { |
| 38 | #define tcp_error(n,s) s, |
| 39 | #include <vnet/tcp/tcp_error.def> |
| 40 | #undef tcp_error |
| 41 | }; |
| 42 | |
| 43 | typedef struct |
| 44 | { |
| 45 | u16 src_port; |
| 46 | u16 dst_port; |
| 47 | u8 state; |
| 48 | } tcp_tx_trace_t; |
| 49 | |
| 50 | u16 dummy_mtu = 400; |
| 51 | |
| 52 | u8 * |
| 53 | format_tcp_tx_trace (u8 * s, va_list * args) |
| 54 | { |
| 55 | CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); |
| 56 | CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); |
| 57 | |
| 58 | s = format (s, "TBD\n"); |
| 59 | |
| 60 | return s; |
| 61 | } |
| 62 | |
| 63 | void |
| 64 | tcp_set_snd_mss (tcp_connection_t * tc) |
| 65 | { |
| 66 | u16 snd_mss; |
| 67 | |
| 68 | /* TODO find our iface MTU */ |
| 69 | snd_mss = dummy_mtu; |
| 70 | |
| 71 | /* TODO cache mss and consider PMTU discovery */ |
| 72 | snd_mss = tc->opt.mss < snd_mss ? tc->opt.mss : snd_mss; |
| 73 | |
| 74 | tc->snd_mss = snd_mss; |
| 75 | |
| 76 | if (tc->snd_mss == 0) |
| 77 | { |
| 78 | clib_warning ("snd mss is 0"); |
| 79 | tc->snd_mss = dummy_mtu; |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | static u8 |
| 84 | tcp_window_compute_scale (u32 available_space) |
| 85 | { |
| 86 | u8 wnd_scale = 0; |
| 87 | while (wnd_scale < TCP_MAX_WND_SCALE |
| 88 | && (available_space >> wnd_scale) > TCP_WND_MAX) |
| 89 | wnd_scale++; |
| 90 | return wnd_scale; |
| 91 | } |
| 92 | |
| 93 | /** |
| 94 | * Compute initial window and scale factor. As per RFC1323, window field in |
| 95 | * SYN and SYN-ACK segments is never scaled. |
| 96 | */ |
| 97 | u32 |
| 98 | tcp_initial_window_to_advertise (tcp_connection_t * tc) |
| 99 | { |
| 100 | u32 available_space; |
| 101 | |
| 102 | /* Initial wnd for SYN. Fifos are not allocated yet. |
| 103 | * Use some predefined value */ |
| 104 | if (tc->state != TCP_STATE_SYN_RCVD) |
| 105 | { |
| 106 | return TCP_DEFAULT_RX_FIFO_SIZE; |
| 107 | } |
| 108 | |
| 109 | available_space = stream_session_max_enqueue (&tc->connection); |
| 110 | tc->rcv_wscale = tcp_window_compute_scale (available_space); |
| 111 | tc->rcv_wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); |
| 112 | |
| 113 | return clib_min (tc->rcv_wnd, TCP_WND_MAX); |
| 114 | } |
| 115 | |
| 116 | /** |
| 117 | * Compute and return window to advertise, scaled as per RFC1323 |
| 118 | */ |
| 119 | u32 |
| 120 | tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state) |
| 121 | { |
| 122 | u32 available_space, wnd, scaled_space; |
| 123 | |
| 124 | if (state != TCP_STATE_ESTABLISHED) |
| 125 | return tcp_initial_window_to_advertise (tc); |
| 126 | |
| 127 | available_space = stream_session_max_enqueue (&tc->connection); |
| 128 | scaled_space = available_space >> tc->rcv_wscale; |
| 129 | |
| 130 | /* Need to update scale */ |
| 131 | if (PREDICT_FALSE ((scaled_space == 0 && available_space != 0)) |
| 132 | || (scaled_space >= TCP_WND_MAX)) |
| 133 | tc->rcv_wscale = tcp_window_compute_scale (available_space); |
| 134 | |
| 135 | wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); |
| 136 | tc->rcv_wnd = wnd; |
| 137 | |
| 138 | return wnd >> tc->rcv_wscale; |
| 139 | } |
| 140 | |
| 141 | /** |
| 142 | * Write TCP options to segment. |
| 143 | */ |
| 144 | u32 |
| 145 | tcp_options_write (u8 * data, tcp_options_t * opts) |
| 146 | { |
| 147 | u32 opts_len = 0; |
| 148 | u32 buf, seq_len = 4; |
| 149 | |
| 150 | if (tcp_opts_mss (opts)) |
| 151 | { |
| 152 | *data++ = TCP_OPTION_MSS; |
| 153 | *data++ = TCP_OPTION_LEN_MSS; |
| 154 | buf = clib_host_to_net_u16 (opts->mss); |
| 155 | clib_memcpy (data, &buf, sizeof (opts->mss)); |
| 156 | data += sizeof (opts->mss); |
| 157 | opts_len += TCP_OPTION_LEN_MSS; |
| 158 | } |
| 159 | |
| 160 | if (tcp_opts_wscale (opts)) |
| 161 | { |
| 162 | *data++ = TCP_OPTION_WINDOW_SCALE; |
| 163 | *data++ = TCP_OPTION_LEN_WINDOW_SCALE; |
| 164 | *data++ = opts->wscale; |
| 165 | opts_len += TCP_OPTION_LEN_WINDOW_SCALE; |
| 166 | } |
| 167 | |
| 168 | if (tcp_opts_sack_permitted (opts)) |
| 169 | { |
| 170 | *data++ = TCP_OPTION_SACK_PERMITTED; |
| 171 | *data++ = TCP_OPTION_LEN_SACK_PERMITTED; |
| 172 | opts_len += TCP_OPTION_LEN_SACK_PERMITTED; |
| 173 | } |
| 174 | |
| 175 | if (tcp_opts_tstamp (opts)) |
| 176 | { |
| 177 | *data++ = TCP_OPTION_TIMESTAMP; |
| 178 | *data++ = TCP_OPTION_LEN_TIMESTAMP; |
| 179 | buf = clib_host_to_net_u32 (opts->tsval); |
| 180 | clib_memcpy (data, &buf, sizeof (opts->tsval)); |
| 181 | data += sizeof (opts->tsval); |
| 182 | buf = clib_host_to_net_u32 (opts->tsecr); |
| 183 | clib_memcpy (data, &buf, sizeof (opts->tsecr)); |
| 184 | data += sizeof (opts->tsecr); |
| 185 | opts_len += TCP_OPTION_LEN_TIMESTAMP; |
| 186 | } |
| 187 | |
| 188 | if (tcp_opts_sack (opts)) |
| 189 | { |
| 190 | int i; |
| 191 | u32 n_sack_blocks = clib_min (vec_len (opts->sacks), |
| 192 | TCP_OPTS_MAX_SACK_BLOCKS); |
| 193 | |
| 194 | if (n_sack_blocks != 0) |
| 195 | { |
| 196 | *data++ = TCP_OPTION_SACK_BLOCK; |
| 197 | *data++ = 2 + n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK; |
| 198 | for (i = 0; i < n_sack_blocks; i++) |
| 199 | { |
| 200 | buf = clib_host_to_net_u32 (opts->sacks[i].start); |
| 201 | clib_memcpy (data, &buf, seq_len); |
| 202 | data += seq_len; |
| 203 | buf = clib_host_to_net_u32 (opts->sacks[i].end); |
| 204 | clib_memcpy (data, &buf, seq_len); |
| 205 | data += seq_len; |
| 206 | } |
| 207 | opts_len += 2 + n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK; |
| 208 | } |
| 209 | } |
| 210 | |
| 211 | /* Terminate TCP options */ |
| 212 | if (opts_len % 4) |
| 213 | { |
| 214 | *data++ = TCP_OPTION_EOL; |
| 215 | opts_len += TCP_OPTION_LEN_EOL; |
| 216 | } |
| 217 | |
| 218 | /* Pad with zeroes to a u32 boundary */ |
| 219 | while (opts_len % 4) |
| 220 | { |
| 221 | *data++ = TCP_OPTION_NOOP; |
| 222 | opts_len += TCP_OPTION_LEN_NOOP; |
| 223 | } |
| 224 | return opts_len; |
| 225 | } |
| 226 | |
| 227 | always_inline int |
| 228 | tcp_make_syn_options (tcp_options_t * opts, u32 initial_wnd) |
| 229 | { |
| 230 | u8 len = 0; |
| 231 | |
| 232 | opts->flags |= TCP_OPTS_FLAG_MSS; |
| 233 | opts->mss = dummy_mtu; /*XXX discover that */ |
| 234 | len += TCP_OPTION_LEN_MSS; |
| 235 | |
| 236 | opts->flags |= TCP_OPTS_FLAG_WSCALE; |
| 237 | opts->wscale = tcp_window_compute_scale (initial_wnd); |
| 238 | len += TCP_OPTION_LEN_WINDOW_SCALE; |
| 239 | |
| 240 | opts->flags |= TCP_OPTS_FLAG_TSTAMP; |
| 241 | opts->tsval = tcp_time_now (); |
| 242 | opts->tsecr = 0; |
| 243 | len += TCP_OPTION_LEN_TIMESTAMP; |
| 244 | |
| 245 | opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED; |
| 246 | len += TCP_OPTION_LEN_SACK_PERMITTED; |
| 247 | |
| 248 | /* Align to needed boundary */ |
| 249 | len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN; |
| 250 | return len; |
| 251 | } |
| 252 | |
| 253 | always_inline int |
| 254 | tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts) |
| 255 | { |
| 256 | u8 len = 0; |
| 257 | |
| 258 | opts->flags |= TCP_OPTS_FLAG_MSS; |
| 259 | opts->mss = dummy_mtu; /*XXX discover that */ |
| 260 | len += TCP_OPTION_LEN_MSS; |
| 261 | |
| 262 | if (tcp_opts_wscale (&tc->opt)) |
| 263 | { |
| 264 | opts->flags |= TCP_OPTS_FLAG_WSCALE; |
| 265 | opts->wscale = tc->rcv_wscale; |
| 266 | len += TCP_OPTION_LEN_WINDOW_SCALE; |
| 267 | } |
| 268 | |
| 269 | if (tcp_opts_tstamp (&tc->opt)) |
| 270 | { |
| 271 | opts->flags |= TCP_OPTS_FLAG_TSTAMP; |
| 272 | opts->tsval = tcp_time_now (); |
| 273 | opts->tsecr = tc->tsval_recent; |
| 274 | len += TCP_OPTION_LEN_TIMESTAMP; |
| 275 | } |
| 276 | |
| 277 | if (tcp_opts_sack_permitted (&tc->opt)) |
| 278 | { |
| 279 | opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED; |
| 280 | len += TCP_OPTION_LEN_SACK_PERMITTED; |
| 281 | } |
| 282 | |
| 283 | /* Align to needed boundary */ |
| 284 | len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN; |
| 285 | return len; |
| 286 | } |
| 287 | |
| 288 | always_inline int |
| 289 | tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts) |
| 290 | { |
| 291 | u8 len = 0; |
| 292 | |
| 293 | opts->flags = 0; |
| 294 | |
| 295 | if (tcp_opts_tstamp (&tc->opt)) |
| 296 | { |
| 297 | opts->flags |= TCP_OPTS_FLAG_TSTAMP; |
| 298 | opts->tsval = tcp_time_now (); |
| 299 | opts->tsecr = tc->tsval_recent; |
| 300 | len += TCP_OPTION_LEN_TIMESTAMP; |
| 301 | } |
| 302 | if (tcp_opts_sack_permitted (&tc->opt)) |
| 303 | { |
| 304 | if (vec_len (tc->snd_sacks)) |
| 305 | { |
| 306 | opts->flags |= TCP_OPTS_FLAG_SACK; |
| 307 | opts->sacks = tc->snd_sacks; |
| 308 | opts->n_sack_blocks = vec_len (tc->snd_sacks); |
| 309 | len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks; |
| 310 | } |
| 311 | } |
| 312 | |
| 313 | /* Align to needed boundary */ |
| 314 | len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN; |
| 315 | return len; |
| 316 | } |
| 317 | |
| 318 | always_inline int |
| 319 | tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts, |
| 320 | tcp_state_t state) |
| 321 | { |
| 322 | switch (state) |
| 323 | { |
| 324 | case TCP_STATE_ESTABLISHED: |
| 325 | case TCP_STATE_FIN_WAIT_1: |
| 326 | return tcp_make_established_options (tc, opts); |
| 327 | case TCP_STATE_SYN_RCVD: |
| 328 | return tcp_make_synack_options (tc, opts); |
| 329 | case TCP_STATE_SYN_SENT: |
| 330 | return tcp_make_syn_options (opts, |
| 331 | tcp_initial_window_to_advertise (tc)); |
| 332 | default: |
| 333 | clib_warning ("Not handled!"); |
| 334 | return 0; |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | #define tcp_get_free_buffer_index(tm, bidx) \ |
| 339 | do { \ |
| 340 | u32 *my_tx_buffers, n_free_buffers; \ |
| 341 | u32 cpu_index = tm->vlib_main->cpu_index; \ |
| 342 | my_tx_buffers = tm->tx_buffers[cpu_index]; \ |
| 343 | if (PREDICT_FALSE(vec_len (my_tx_buffers) == 0)) \ |
| 344 | { \ |
| 345 | n_free_buffers = 32; /* TODO config or macro */ \ |
| 346 | vec_validate (my_tx_buffers, n_free_buffers - 1); \ |
| 347 | _vec_len(my_tx_buffers) = vlib_buffer_alloc_from_free_list ( \ |
| 348 | tm->vlib_main, my_tx_buffers, n_free_buffers, \ |
| 349 | VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); \ |
| 350 | tm->tx_buffers[cpu_index] = my_tx_buffers; \ |
| 351 | } \ |
| 352 | /* buffer shortage */ \ |
| 353 | if (PREDICT_FALSE (vec_len (my_tx_buffers) == 0)) \ |
| 354 | return; \ |
| 355 | *bidx = my_tx_buffers[_vec_len (my_tx_buffers)-1]; \ |
| 356 | _vec_len (my_tx_buffers) -= 1; \ |
| 357 | } while (0) |
| 358 | |
| 359 | always_inline void |
| 360 | tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b) |
| 361 | { |
| 362 | vlib_buffer_t *it = b; |
| 363 | do |
| 364 | { |
| 365 | it->current_data = 0; |
| 366 | it->current_length = 0; |
| 367 | it->total_length_not_including_first_buffer = 0; |
| 368 | } |
| 369 | while ((it->flags & VLIB_BUFFER_NEXT_PRESENT) |
| 370 | && (it = vlib_get_buffer (vm, it->next_buffer))); |
| 371 | |
| 372 | /* Leave enough space for headers */ |
| 373 | vlib_buffer_make_headroom (b, MAX_HDRS_LEN); |
| 374 | } |
| 375 | |
| 376 | /** |
| 377 | * Prepare ACK |
| 378 | */ |
| 379 | void |
| 380 | tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state, |
| 381 | u8 flags) |
| 382 | { |
| 383 | tcp_options_t _snd_opts, *snd_opts = &_snd_opts; |
| 384 | u8 tcp_opts_len, tcp_hdr_opts_len; |
| 385 | tcp_header_t *th; |
| 386 | u16 wnd; |
| 387 | |
| 388 | wnd = tcp_window_to_advertise (tc, state); |
| 389 | |
| 390 | /* Make and write options */ |
| 391 | tcp_opts_len = tcp_make_established_options (tc, snd_opts); |
| 392 | tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); |
| 393 | |
| 394 | th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt, |
| 395 | tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd); |
| 396 | |
| 397 | tcp_options_write ((u8 *) (th + 1), snd_opts); |
| 398 | |
| 399 | /* Mark as ACK */ |
| 400 | vnet_buffer (b)->tcp.connection_index = tc->c_c_index; |
| 401 | } |
| 402 | |
| 403 | /** |
| 404 | * Convert buffer to ACK |
| 405 | */ |
| 406 | void |
| 407 | tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b) |
| 408 | { |
| 409 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 410 | vlib_main_t *vm = tm->vlib_main; |
| 411 | |
| 412 | tcp_reuse_buffer (vm, b); |
| 413 | tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK); |
| 414 | vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK; |
| 415 | } |
| 416 | |
| 417 | /** |
| 418 | * Convert buffer to FIN-ACK |
| 419 | */ |
| 420 | void |
| 421 | tcp_make_finack (tcp_connection_t * tc, vlib_buffer_t * b) |
| 422 | { |
| 423 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 424 | vlib_main_t *vm = tm->vlib_main; |
| 425 | |
| 426 | tcp_reuse_buffer (vm, b); |
| 427 | tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK | TCP_FLAG_FIN); |
| 428 | |
| 429 | /* Reset flags, make sure ack is sent */ |
| 430 | tc->flags = TCP_CONN_SNDACK; |
| 431 | vnet_buffer (b)->tcp.flags &= ~TCP_BUF_FLAG_DUPACK; |
| 432 | |
| 433 | tc->snd_nxt += 1; |
| 434 | } |
| 435 | |
| 436 | /** |
| 437 | * Convert buffer to SYN-ACK |
| 438 | */ |
| 439 | void |
| 440 | tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b) |
| 441 | { |
| 442 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 443 | vlib_main_t *vm = tm->vlib_main; |
| 444 | tcp_options_t _snd_opts, *snd_opts = &_snd_opts; |
| 445 | u8 tcp_opts_len, tcp_hdr_opts_len; |
| 446 | tcp_header_t *th; |
| 447 | u16 initial_wnd; |
| 448 | u32 time_now; |
| 449 | |
| 450 | memset (snd_opts, 0, sizeof (*snd_opts)); |
| 451 | |
| 452 | tcp_reuse_buffer (vm, b); |
| 453 | |
| 454 | /* Set random initial sequence */ |
| 455 | time_now = tcp_time_now (); |
| 456 | |
| 457 | tc->iss = random_u32 (&time_now); |
| 458 | tc->snd_una = tc->iss; |
| 459 | tc->snd_nxt = tc->iss + 1; |
| 460 | tc->snd_una_max = tc->snd_nxt; |
| 461 | |
| 462 | initial_wnd = tcp_initial_window_to_advertise (tc); |
| 463 | |
| 464 | /* Make and write options */ |
| 465 | tcp_opts_len = tcp_make_synack_options (tc, snd_opts); |
| 466 | tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); |
| 467 | |
| 468 | th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss, |
| 469 | tc->rcv_nxt, tcp_hdr_opts_len, |
| 470 | TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd); |
| 471 | |
| 472 | tcp_options_write ((u8 *) (th + 1), snd_opts); |
| 473 | |
| 474 | vnet_buffer (b)->tcp.connection_index = tc->c_c_index; |
| 475 | vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK; |
| 476 | |
| 477 | /* Init retransmit timer */ |
| 478 | tcp_retransmit_timer_set (tm, tc); |
| 479 | } |
| 480 | |
| 481 | always_inline void |
| 482 | tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, |
| 483 | u8 is_ip4) |
| 484 | { |
| 485 | u32 *to_next, next_index; |
| 486 | vlib_frame_t *f; |
| 487 | |
| 488 | b->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; |
| 489 | b->error = 0; |
| 490 | |
| 491 | /* Default FIB for now */ |
| 492 | vnet_buffer (b)->sw_if_index[VLIB_TX] = 0; |
| 493 | |
| 494 | /* Send to IP lookup */ |
| 495 | next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index; |
| 496 | f = vlib_get_frame_to_node (vm, next_index); |
| 497 | |
| 498 | /* Enqueue the packet */ |
| 499 | to_next = vlib_frame_vector_args (f); |
| 500 | to_next[0] = bi; |
| 501 | f->n_vectors = 1; |
| 502 | vlib_put_frame_to_node (vm, next_index, f); |
| 503 | } |
| 504 | |
| 505 | int |
| 506 | tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b0, |
| 507 | tcp_state_t state, u32 my_thread_index, u8 is_ip4) |
| 508 | { |
| 509 | u8 tcp_hdr_len = sizeof (tcp_header_t); |
| 510 | ip4_header_t *ih4; |
| 511 | ip6_header_t *ih6; |
| 512 | tcp_header_t *th0; |
| 513 | ip4_address_t src_ip40; |
| 514 | ip6_address_t src_ip60; |
| 515 | u16 src_port0; |
| 516 | u32 tmp; |
| 517 | |
| 518 | /* Find IP and TCP headers */ |
| 519 | if (is_ip4) |
| 520 | { |
| 521 | ih4 = vlib_buffer_get_current (b0); |
| 522 | th0 = ip4_next_header (ih4); |
| 523 | } |
| 524 | else |
| 525 | { |
| 526 | ih6 = vlib_buffer_get_current (b0); |
| 527 | th0 = ip6_next_header (ih6); |
| 528 | } |
| 529 | |
| 530 | /* Swap src and dst ip */ |
| 531 | if (is_ip4) |
| 532 | { |
| 533 | ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40); |
| 534 | src_ip40.as_u32 = ih4->src_address.as_u32; |
| 535 | ih4->src_address.as_u32 = ih4->dst_address.as_u32; |
| 536 | ih4->dst_address.as_u32 = src_ip40.as_u32; |
| 537 | |
| 538 | /* Chop the end of the pkt */ |
| 539 | b0->current_length += ip4_header_bytes (ih4) + tcp_hdr_len; |
| 540 | } |
| 541 | else |
| 542 | { |
| 543 | ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60); |
| 544 | clib_memcpy (&src_ip60, &ih6->src_address, sizeof (ip6_address_t)); |
| 545 | clib_memcpy (&ih6->src_address, &ih6->dst_address, |
| 546 | sizeof (ip6_address_t)); |
| 547 | clib_memcpy (&ih6->dst_address, &src_ip60, sizeof (ip6_address_t)); |
| 548 | |
| 549 | /* Chop the end of the pkt */ |
| 550 | b0->current_length += sizeof (ip6_header_t) + tcp_hdr_len; |
| 551 | } |
| 552 | |
| 553 | /* Try to determine what/why we're actually resetting and swap |
| 554 | * src and dst ports */ |
| 555 | if (state == TCP_STATE_CLOSED) |
| 556 | { |
| 557 | if (!tcp_syn (th0)) |
| 558 | return -1; |
| 559 | |
| 560 | tmp = clib_net_to_host_u32 (th0->seq_number); |
| 561 | |
| 562 | /* Got a SYN for no listener. */ |
| 563 | th0->flags = TCP_FLAG_RST | TCP_FLAG_ACK; |
| 564 | th0->ack_number = clib_host_to_net_u32 (tmp + 1); |
| 565 | th0->seq_number = 0; |
| 566 | |
| 567 | } |
| 568 | else if (state >= TCP_STATE_SYN_SENT) |
| 569 | { |
| 570 | th0->flags = TCP_FLAG_RST | TCP_FLAG_ACK; |
| 571 | th0->seq_number = th0->ack_number; |
| 572 | th0->ack_number = 0; |
| 573 | } |
| 574 | |
| 575 | src_port0 = th0->src_port; |
| 576 | th0->src_port = th0->dst_port; |
| 577 | th0->dst_port = src_port0; |
| 578 | th0->window = 0; |
| 579 | th0->data_offset_and_reserved = (tcp_hdr_len >> 2) << 4; |
| 580 | th0->urgent_pointer = 0; |
| 581 | |
| 582 | /* Compute checksum */ |
| 583 | if (is_ip4) |
| 584 | { |
| 585 | th0->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ih4); |
| 586 | } |
| 587 | else |
| 588 | { |
| 589 | int bogus = ~0; |
| 590 | th0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0, ih6, &bogus); |
| 591 | ASSERT (!bogus); |
| 592 | } |
| 593 | |
| 594 | return 0; |
| 595 | } |
| 596 | |
| 597 | /** |
| 598 | * Send reset without reusing existing buffer |
| 599 | */ |
| 600 | void |
| 601 | tcp_send_reset (vlib_buffer_t * pkt, u8 is_ip4) |
| 602 | { |
| 603 | vlib_buffer_t *b; |
| 604 | u32 bi; |
| 605 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 606 | vlib_main_t *vm = tm->vlib_main; |
| 607 | u8 tcp_hdr_len, flags = 0; |
| 608 | tcp_header_t *th, *pkt_th; |
| 609 | u32 seq, ack; |
| 610 | ip4_header_t *ih4, *pkt_ih4; |
| 611 | ip6_header_t *ih6, *pkt_ih6; |
| 612 | |
| 613 | tcp_get_free_buffer_index (tm, &bi); |
| 614 | b = vlib_get_buffer (vm, bi); |
| 615 | |
| 616 | /* Leave enough space for headers */ |
| 617 | vlib_buffer_make_headroom (b, MAX_HDRS_LEN); |
| 618 | |
| 619 | /* Make and write options */ |
| 620 | tcp_hdr_len = sizeof (tcp_header_t); |
| 621 | |
| 622 | if (is_ip4) |
| 623 | { |
| 624 | pkt_ih4 = vlib_buffer_get_current (pkt); |
| 625 | pkt_th = ip4_next_header (pkt_ih4); |
| 626 | } |
| 627 | else |
| 628 | { |
| 629 | pkt_ih6 = vlib_buffer_get_current (pkt); |
| 630 | pkt_th = ip6_next_header (pkt_ih6); |
| 631 | } |
| 632 | |
| 633 | if (tcp_ack (pkt_th)) |
| 634 | { |
| 635 | flags = TCP_FLAG_RST; |
| 636 | seq = pkt_th->ack_number; |
| 637 | ack = 0; |
| 638 | } |
| 639 | else |
| 640 | { |
| 641 | flags = TCP_FLAG_RST | TCP_FLAG_ACK; |
| 642 | seq = 0; |
| 643 | ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end); |
| 644 | } |
| 645 | |
| 646 | th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port, |
| 647 | seq, ack, tcp_hdr_len, flags, 0); |
| 648 | |
| 649 | /* Swap src and dst ip */ |
| 650 | if (is_ip4) |
| 651 | { |
| 652 | ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40); |
| 653 | ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address, |
| 654 | &pkt_ih4->src_address, IP_PROTOCOL_TCP); |
| 655 | th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4); |
| 656 | } |
| 657 | else |
| 658 | { |
| 659 | int bogus = ~0; |
| 660 | pkt_ih6 = (ip6_header_t *) (pkt_th - 1); |
| 661 | ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) == |
| 662 | 0x60); |
| 663 | ih6 = |
| 664 | vlib_buffer_push_ip6 (vm, b, &pkt_ih6->dst_address, |
| 665 | &pkt_ih6->src_address, IP_PROTOCOL_TCP); |
| 666 | th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus); |
| 667 | ASSERT (!bogus); |
| 668 | } |
| 669 | |
| 670 | tcp_enqueue_to_ip_lookup (vm, b, bi, is_ip4); |
| 671 | } |
| 672 | |
| 673 | void |
| 674 | tcp_push_ip_hdr (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b) |
| 675 | { |
| 676 | tcp_header_t *th = vlib_buffer_get_current (b); |
| 677 | |
| 678 | if (tc->c_is_ip4) |
| 679 | { |
| 680 | ip4_header_t *ih; |
| 681 | ih = vlib_buffer_push_ip4 (tm->vlib_main, b, &tc->c_lcl_ip4, |
| 682 | &tc->c_rmt_ip4, IP_PROTOCOL_TCP); |
| 683 | th->checksum = ip4_tcp_udp_compute_checksum (tm->vlib_main, b, ih); |
| 684 | } |
| 685 | else |
| 686 | { |
| 687 | ip6_header_t *ih; |
| 688 | int bogus = ~0; |
| 689 | |
| 690 | ih = vlib_buffer_push_ip6 (tm->vlib_main, b, &tc->c_lcl_ip6, |
| 691 | &tc->c_rmt_ip6, IP_PROTOCOL_TCP); |
| 692 | th->checksum = ip6_tcp_udp_icmp_compute_checksum (tm->vlib_main, b, ih, |
| 693 | &bogus); |
| 694 | ASSERT (!bogus); |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | /** |
| 699 | * Send SYN |
| 700 | * |
| 701 | * Builds a SYN packet for a half-open connection and sends it to ipx_lookup. |
| 702 | * The packet is not forwarded through tcpx_output to avoid doing lookups |
| 703 | * in the half_open pool. |
| 704 | */ |
| 705 | void |
| 706 | tcp_send_syn (tcp_connection_t * tc) |
| 707 | { |
| 708 | vlib_buffer_t *b; |
| 709 | u32 bi; |
| 710 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 711 | vlib_main_t *vm = tm->vlib_main; |
| 712 | u8 tcp_hdr_opts_len, tcp_opts_len; |
| 713 | tcp_header_t *th; |
| 714 | u32 time_now; |
| 715 | u16 initial_wnd; |
| 716 | tcp_options_t snd_opts; |
| 717 | |
| 718 | tcp_get_free_buffer_index (tm, &bi); |
| 719 | b = vlib_get_buffer (vm, bi); |
| 720 | |
| 721 | /* Leave enough space for headers */ |
| 722 | vlib_buffer_make_headroom (b, MAX_HDRS_LEN); |
| 723 | |
| 724 | /* Set random initial sequence */ |
| 725 | time_now = tcp_time_now (); |
| 726 | |
| 727 | tc->iss = random_u32 (&time_now); |
| 728 | tc->snd_una = tc->iss; |
| 729 | tc->snd_una_max = tc->snd_nxt = tc->iss + 1; |
| 730 | |
| 731 | initial_wnd = tcp_initial_window_to_advertise (tc); |
| 732 | |
| 733 | /* Make and write options */ |
| 734 | memset (&snd_opts, 0, sizeof (snd_opts)); |
| 735 | tcp_opts_len = tcp_make_syn_options (&snd_opts, initial_wnd); |
| 736 | tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); |
| 737 | |
| 738 | th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss, |
| 739 | tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN, |
| 740 | initial_wnd); |
| 741 | |
| 742 | tcp_options_write ((u8 *) (th + 1), &snd_opts); |
| 743 | |
| 744 | /* Measure RTT with this */ |
| 745 | tc->rtt_ts = tcp_time_now (); |
| 746 | tc->rtt_seq = tc->snd_nxt; |
| 747 | |
| 748 | /* Start retransmit trimer */ |
| 749 | tcp_timer_set (tc, TCP_TIMER_RETRANSMIT_SYN, tc->rto * TCP_TO_TIMER_TICK); |
| 750 | tc->rto_boff = 0; |
| 751 | |
| 752 | /* Set the connection establishment timer */ |
| 753 | tcp_timer_set (tc, TCP_TIMER_ESTABLISH, TCP_ESTABLISH_TIME); |
| 754 | |
| 755 | tcp_push_ip_hdr (tm, tc, b); |
| 756 | tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4); |
| 757 | } |
| 758 | |
| 759 | always_inline void |
| 760 | tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) |
| 761 | { |
| 762 | u32 *to_next, next_index; |
| 763 | vlib_frame_t *f; |
| 764 | |
| 765 | b->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; |
| 766 | b->error = 0; |
| 767 | |
| 768 | /* Decide where to send the packet */ |
| 769 | next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; |
| 770 | f = vlib_get_frame_to_node (vm, next_index); |
| 771 | |
| 772 | /* Enqueue the packet */ |
| 773 | to_next = vlib_frame_vector_args (f); |
| 774 | to_next[0] = bi; |
| 775 | f->n_vectors = 1; |
| 776 | vlib_put_frame_to_node (vm, next_index, f); |
| 777 | } |
| 778 | |
| 779 | /** |
| 780 | * Send FIN |
| 781 | */ |
| 782 | void |
| 783 | tcp_send_fin (tcp_connection_t * tc) |
| 784 | { |
| 785 | vlib_buffer_t *b; |
| 786 | u32 bi; |
| 787 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 788 | vlib_main_t *vm = tm->vlib_main; |
| 789 | |
| 790 | tcp_get_free_buffer_index (tm, &bi); |
| 791 | b = vlib_get_buffer (vm, bi); |
| 792 | |
| 793 | /* Leave enough space for headers */ |
| 794 | vlib_buffer_make_headroom (b, MAX_HDRS_LEN); |
| 795 | |
| 796 | tcp_make_finack (tc, b); |
| 797 | |
| 798 | tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); |
| 799 | } |
| 800 | |
| 801 | always_inline u8 |
| 802 | tcp_make_state_flags (tcp_state_t next_state) |
| 803 | { |
| 804 | switch (next_state) |
| 805 | { |
| 806 | case TCP_STATE_ESTABLISHED: |
| 807 | return TCP_FLAG_ACK; |
| 808 | case TCP_STATE_SYN_RCVD: |
| 809 | return TCP_FLAG_SYN | TCP_FLAG_ACK; |
| 810 | case TCP_STATE_SYN_SENT: |
| 811 | return TCP_FLAG_SYN; |
| 812 | case TCP_STATE_LAST_ACK: |
| 813 | case TCP_STATE_FIN_WAIT_1: |
| 814 | return TCP_FLAG_FIN; |
| 815 | default: |
| 816 | clib_warning ("Shouldn't be here!"); |
| 817 | } |
| 818 | return 0; |
| 819 | } |
| 820 | |
| 821 | /** |
| 822 | * Push TCP header and update connection variables |
| 823 | */ |
| 824 | static void |
| 825 | tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, |
| 826 | tcp_state_t next_state) |
| 827 | { |
| 828 | u32 advertise_wnd, data_len; |
| 829 | u8 tcp_opts_len, tcp_hdr_opts_len, opts_write_len, flags; |
| 830 | tcp_options_t _snd_opts, *snd_opts = &_snd_opts; |
| 831 | tcp_header_t *th; |
| 832 | |
| 833 | data_len = b->current_length; |
| 834 | vnet_buffer (b)->tcp.flags = 0; |
| 835 | |
| 836 | /* Make and write options */ |
| 837 | memset (snd_opts, 0, sizeof (*snd_opts)); |
| 838 | tcp_opts_len = tcp_make_options (tc, snd_opts, next_state); |
| 839 | tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); |
| 840 | |
| 841 | /* Get rcv window to advertise */ |
| 842 | advertise_wnd = tcp_window_to_advertise (tc, next_state); |
| 843 | flags = tcp_make_state_flags (next_state); |
| 844 | |
| 845 | /* Push header and options */ |
| 846 | th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt, |
| 847 | tc->rcv_nxt, tcp_hdr_opts_len, flags, |
| 848 | advertise_wnd); |
| 849 | |
| 850 | opts_write_len = tcp_options_write ((u8 *) (th + 1), snd_opts); |
| 851 | |
| 852 | ASSERT (opts_write_len == tcp_opts_len); |
| 853 | |
| 854 | /* Tag the buffer with the connection index */ |
| 855 | vnet_buffer (b)->tcp.connection_index = tc->c_c_index; |
| 856 | |
| 857 | tc->snd_nxt += data_len; |
| 858 | } |
| 859 | |
| 860 | /* Send delayed ACK when timer expires */ |
| 861 | void |
| 862 | tcp_timer_delack_handler (u32 index) |
| 863 | { |
| 864 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 865 | vlib_main_t *vm = tm->vlib_main; |
| 866 | u32 thread_index = os_get_cpu_number (); |
| 867 | tcp_connection_t *tc; |
| 868 | vlib_buffer_t *b; |
| 869 | u32 bi; |
| 870 | |
| 871 | tc = tcp_connection_get (index, thread_index); |
| 872 | |
| 873 | /* Get buffer */ |
| 874 | tcp_get_free_buffer_index (tm, &bi); |
| 875 | b = vlib_get_buffer (vm, bi); |
| 876 | |
| 877 | /* Fill in the ACK */ |
| 878 | tcp_make_ack (tc, b); |
| 879 | |
| 880 | tc->timers[TCP_TIMER_DELACK] = TCP_TIMER_HANDLE_INVALID; |
| 881 | tc->flags &= ~TCP_CONN_DELACK; |
| 882 | |
| 883 | tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); |
| 884 | } |
| 885 | |
| 886 | /** Build a retransmit segment |
| 887 | * |
| 888 | * @return the number of bytes in the segment or 0 if there's nothing to |
| 889 | * retransmit |
| 890 | * */ |
| 891 | u32 |
| 892 | tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, |
| 893 | u32 max_bytes) |
| 894 | { |
| 895 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 896 | vlib_main_t *vm = tm->vlib_main; |
| 897 | u32 n_bytes, offset = 0; |
| 898 | sack_scoreboard_hole_t *hole; |
| 899 | u32 hole_size; |
| 900 | |
| 901 | tcp_reuse_buffer (vm, b); |
| 902 | |
| 903 | ASSERT (tc->state == TCP_STATE_ESTABLISHED); |
| 904 | ASSERT (max_bytes != 0); |
| 905 | |
| 906 | if (tcp_opts_sack_permitted (&tc->opt)) |
| 907 | { |
| 908 | /* XXX get first hole not retransmitted yet */ |
| 909 | hole = scoreboard_first_hole (&tc->sack_sb); |
| 910 | if (!hole) |
| 911 | return 0; |
| 912 | |
| 913 | offset = hole->start - tc->snd_una; |
| 914 | hole_size = hole->end - hole->start; |
| 915 | |
| 916 | ASSERT (hole_size); |
| 917 | |
| 918 | if (hole_size < max_bytes) |
| 919 | max_bytes = hole_size; |
| 920 | } |
| 921 | else |
| 922 | { |
| 923 | if (seq_geq (tc->snd_nxt, tc->snd_una_max)) |
| 924 | return 0; |
| 925 | } |
| 926 | |
| 927 | n_bytes = stream_session_peek_bytes (&tc->connection, |
| 928 | vlib_buffer_get_current (b), offset, |
| 929 | max_bytes); |
| 930 | ASSERT (n_bytes != 0); |
| 931 | |
| 932 | tc->snd_nxt += n_bytes; |
| 933 | tcp_push_hdr_i (tc, b, tc->state); |
| 934 | |
| 935 | return n_bytes; |
| 936 | } |
| 937 | |
| 938 | static void |
| 939 | tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) |
| 940 | { |
| 941 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 942 | vlib_main_t *vm = tm->vlib_main; |
| 943 | u32 thread_index = os_get_cpu_number (); |
| 944 | tcp_connection_t *tc; |
| 945 | vlib_buffer_t *b; |
| 946 | u32 bi, max_bytes, snd_space; |
| 947 | |
| 948 | if (is_syn) |
| 949 | { |
| 950 | tc = tcp_half_open_connection_get (index); |
| 951 | } |
| 952 | else |
| 953 | { |
| 954 | tc = tcp_connection_get (index, thread_index); |
| 955 | } |
| 956 | |
| 957 | /* Make sure timer handle is set to invalid */ |
| 958 | tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID; |
| 959 | |
| 960 | /* Increment RTO backoff (also equal to number of retries) */ |
| 961 | tc->rto_boff += 1; |
| 962 | |
| 963 | /* Go back to first un-acked byte */ |
| 964 | tc->snd_nxt = tc->snd_una; |
| 965 | |
| 966 | /* Get buffer */ |
| 967 | tcp_get_free_buffer_index (tm, &bi); |
| 968 | b = vlib_get_buffer (vm, bi); |
| 969 | |
| 970 | if (tc->state == TCP_STATE_ESTABLISHED) |
| 971 | { |
| 972 | tcp_fastrecovery_off (tc); |
| 973 | |
| 974 | /* Exponential backoff */ |
| 975 | tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); |
| 976 | |
| 977 | /* Figure out what and how many bytes we can send */ |
| 978 | snd_space = tcp_available_snd_space (tc); |
| 979 | max_bytes = clib_min (tc->snd_mss, snd_space); |
| 980 | tcp_prepare_retransmit_segment (tc, b, max_bytes); |
| 981 | |
| 982 | tc->rtx_bytes += max_bytes; |
| 983 | |
| 984 | /* No fancy recovery for now! */ |
| 985 | scoreboard_clear (&tc->sack_sb); |
| 986 | } |
| 987 | else |
| 988 | { |
| 989 | /* Retransmit for SYN/SYNACK */ |
| 990 | ASSERT (tc->state == TCP_STATE_SYN_RCVD |
| 991 | || tc->state == TCP_STATE_SYN_SENT); |
| 992 | |
| 993 | /* Try without increasing RTO a number of times. If this fails, |
| 994 | * start growing RTO exponentially */ |
| 995 | if (tc->rto_boff > TCP_RTO_SYN_RETRIES) |
| 996 | tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); |
| 997 | |
| 998 | vlib_buffer_make_headroom (b, MAX_HDRS_LEN); |
| 999 | tcp_push_hdr_i (tc, b, tc->state); |
| 1000 | } |
| 1001 | |
| 1002 | if (!is_syn) |
| 1003 | { |
| 1004 | tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); |
| 1005 | |
| 1006 | /* Re-enable retransmit timer */ |
| 1007 | tcp_retransmit_timer_set (tm, tc); |
| 1008 | } |
| 1009 | else |
| 1010 | { |
| 1011 | ASSERT (tc->state == TCP_STATE_SYN_SENT); |
| 1012 | |
| 1013 | /* This goes straight to ipx_lookup */ |
| 1014 | tcp_push_ip_hdr (tm, tc, b); |
| 1015 | tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4); |
| 1016 | |
| 1017 | /* Re-enable retransmit timer */ |
| 1018 | tcp_timer_set (tc, TCP_TIMER_RETRANSMIT_SYN, |
| 1019 | tc->rto * TCP_TO_TIMER_TICK); |
| 1020 | } |
| 1021 | } |
| 1022 | |
| 1023 | void |
| 1024 | tcp_timer_retransmit_handler (u32 index) |
| 1025 | { |
| 1026 | tcp_timer_retransmit_handler_i (index, 0); |
| 1027 | } |
| 1028 | |
| 1029 | void |
| 1030 | tcp_timer_retransmit_syn_handler (u32 index) |
| 1031 | { |
| 1032 | tcp_timer_retransmit_handler_i (index, 1); |
| 1033 | } |
| 1034 | |
| 1035 | /** |
| 1036 | * Retansmit first unacked segment */ |
| 1037 | void |
| 1038 | tcp_retransmit_first_unacked (tcp_connection_t * tc) |
| 1039 | { |
| 1040 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 1041 | u32 snd_nxt = tc->snd_nxt; |
| 1042 | vlib_buffer_t *b; |
| 1043 | u32 bi; |
| 1044 | |
| 1045 | tc->snd_nxt = tc->snd_una; |
| 1046 | |
| 1047 | /* Get buffer */ |
| 1048 | tcp_get_free_buffer_index (tm, &bi); |
| 1049 | b = vlib_get_buffer (tm->vlib_main, bi); |
| 1050 | |
| 1051 | tcp_prepare_retransmit_segment (tc, b, tc->snd_mss); |
| 1052 | tcp_enqueue_to_output (tm->vlib_main, b, bi, tc->c_is_ip4); |
| 1053 | |
| 1054 | tc->snd_nxt = snd_nxt; |
| 1055 | tc->rtx_bytes += tc->snd_mss; |
| 1056 | } |
| 1057 | |
| 1058 | void |
| 1059 | tcp_fast_retransmit (tcp_connection_t * tc) |
| 1060 | { |
| 1061 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 1062 | u32 snd_space, max_bytes, n_bytes, bi; |
| 1063 | vlib_buffer_t *b; |
| 1064 | |
| 1065 | ASSERT (tcp_in_fastrecovery (tc)); |
| 1066 | |
| 1067 | clib_warning ("fast retransmit!"); |
| 1068 | |
| 1069 | /* Start resending from first un-acked segment */ |
| 1070 | tc->snd_nxt = tc->snd_una; |
| 1071 | |
| 1072 | snd_space = tcp_available_snd_space (tc); |
| 1073 | |
| 1074 | while (snd_space) |
| 1075 | { |
| 1076 | tcp_get_free_buffer_index (tm, &bi); |
| 1077 | b = vlib_get_buffer (tm->vlib_main, bi); |
| 1078 | |
| 1079 | max_bytes = clib_min (tc->snd_mss, snd_space); |
| 1080 | n_bytes = tcp_prepare_retransmit_segment (tc, b, max_bytes); |
| 1081 | |
| 1082 | /* Nothing left to retransmit */ |
| 1083 | if (n_bytes == 0) |
| 1084 | return; |
| 1085 | |
| 1086 | tcp_enqueue_to_output (tm->vlib_main, b, bi, tc->c_is_ip4); |
| 1087 | |
| 1088 | snd_space -= n_bytes; |
| 1089 | } |
| 1090 | |
| 1091 | /* If window allows, send new data */ |
| 1092 | tc->snd_nxt = tc->snd_una_max; |
| 1093 | } |
| 1094 | |
| 1095 | always_inline u32 |
| 1096 | tcp_session_has_ooo_data (tcp_connection_t * tc) |
| 1097 | { |
| 1098 | stream_session_t *s = |
| 1099 | stream_session_get (tc->c_s_index, tc->c_thread_index); |
| 1100 | return svm_fifo_has_ooo_data (s->server_rx_fifo); |
| 1101 | } |
| 1102 | |
| 1103 | always_inline uword |
| 1104 | tcp46_output_inline (vlib_main_t * vm, |
| 1105 | vlib_node_runtime_t * node, |
| 1106 | vlib_frame_t * from_frame, int is_ip4) |
| 1107 | { |
| 1108 | tcp_main_t *tm = vnet_get_tcp_main (); |
| 1109 | u32 n_left_from, next_index, *from, *to_next; |
| 1110 | u32 my_thread_index = vm->cpu_index; |
| 1111 | |
| 1112 | from = vlib_frame_vector_args (from_frame); |
| 1113 | n_left_from = from_frame->n_vectors; |
| 1114 | |
| 1115 | next_index = node->cached_next_index; |
| 1116 | |
| 1117 | while (n_left_from > 0) |
| 1118 | { |
| 1119 | u32 n_left_to_next; |
| 1120 | |
| 1121 | vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); |
| 1122 | |
| 1123 | while (n_left_from > 0 && n_left_to_next > 0) |
| 1124 | { |
| 1125 | u32 bi0; |
| 1126 | vlib_buffer_t *b0; |
| 1127 | tcp_connection_t *tc0; |
| 1128 | tcp_header_t *th0; |
| 1129 | u32 error0 = TCP_ERROR_PKTS_SENT, next0 = TCP_OUTPUT_NEXT_IP_LOOKUP; |
| 1130 | |
| 1131 | bi0 = from[0]; |
| 1132 | to_next[0] = bi0; |
| 1133 | from += 1; |
| 1134 | to_next += 1; |
| 1135 | n_left_from -= 1; |
| 1136 | n_left_to_next -= 1; |
| 1137 | |
| 1138 | b0 = vlib_get_buffer (vm, bi0); |
| 1139 | tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index, |
| 1140 | my_thread_index); |
| 1141 | th0 = vlib_buffer_get_current (b0); |
| 1142 | |
| 1143 | if (is_ip4) |
| 1144 | { |
| 1145 | ip4_header_t *ih0; |
| 1146 | ih0 = vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, |
| 1147 | &tc0->c_rmt_ip4, IP_PROTOCOL_TCP); |
| 1148 | th0->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ih0); |
| 1149 | } |
| 1150 | else |
| 1151 | { |
| 1152 | ip6_header_t *ih0; |
| 1153 | int bogus = ~0; |
| 1154 | |
| 1155 | ih0 = vlib_buffer_push_ip6 (vm, b0, &tc0->c_lcl_ip6, |
| 1156 | &tc0->c_rmt_ip6, IP_PROTOCOL_TCP); |
| 1157 | th0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0, ih0, |
| 1158 | &bogus); |
| 1159 | ASSERT (!bogus); |
| 1160 | } |
| 1161 | |
| 1162 | /* Filter out DUPACKs if there are no OOO segments left */ |
| 1163 | if (PREDICT_FALSE |
| 1164 | (vnet_buffer (b0)->tcp.flags & TCP_BUF_FLAG_DUPACK)) |
| 1165 | { |
| 1166 | tc0->snt_dupacks--; |
| 1167 | ASSERT (tc0->snt_dupacks >= 0); |
| 1168 | if (!tcp_session_has_ooo_data (tc0)) |
| 1169 | { |
| 1170 | error0 = TCP_ERROR_FILTERED_DUPACKS; |
| 1171 | next0 = TCP_OUTPUT_NEXT_DROP; |
| 1172 | goto done; |
| 1173 | } |
| 1174 | } |
| 1175 | |
| 1176 | /* Retransmitted SYNs do reach this but it should be harmless */ |
| 1177 | tc0->rcv_las = tc0->rcv_nxt; |
| 1178 | |
| 1179 | /* Stop DELACK timer and fix flags */ |
| 1180 | tc0->flags &= |
| 1181 | ~(TCP_CONN_SNDACK | TCP_CONN_DELACK | TCP_CONN_BURSTACK); |
| 1182 | if (tcp_timer_is_active (tc0, TCP_TIMER_DELACK)) |
| 1183 | { |
| 1184 | tcp_timer_reset (tc0, TCP_TIMER_DELACK); |
| 1185 | } |
| 1186 | |
| 1187 | /* If not retransmitting |
| 1188 | * 1) update snd_una_max (SYN, SYNACK, new data, FIN) |
| 1189 | * 2) If we're not tracking an ACK, start tracking */ |
| 1190 | if (seq_lt (tc0->snd_una_max, tc0->snd_nxt)) |
| 1191 | { |
| 1192 | tc0->snd_una_max = tc0->snd_nxt; |
| 1193 | if (tc0->rtt_ts == 0) |
| 1194 | { |
| 1195 | tc0->rtt_ts = tcp_time_now (); |
| 1196 | tc0->rtt_seq = tc0->snd_nxt; |
| 1197 | } |
| 1198 | } |
| 1199 | |
| 1200 | /* Set the retransmit timer if not set already and not |
| 1201 | * doing a pure ACK */ |
| 1202 | if (!tcp_timer_is_active (tc0, TCP_TIMER_RETRANSMIT) |
| 1203 | && tc0->snd_nxt != tc0->snd_una) |
| 1204 | { |
| 1205 | tcp_retransmit_timer_set (tm, tc0); |
| 1206 | tc0->rto_boff = 0; |
| 1207 | } |
| 1208 | |
| 1209 | /* set fib index to default and lookup node */ |
| 1210 | /* XXX network virtualization (vrf/vni) */ |
| 1211 | vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0; |
| 1212 | vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; |
| 1213 | |
| 1214 | b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; |
| 1215 | |
| 1216 | done: |
| 1217 | b0->error = error0 != 0 ? node->errors[error0] : 0; |
| 1218 | if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) |
| 1219 | { |
| 1220 | |
| 1221 | } |
| 1222 | |
| 1223 | vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, |
| 1224 | n_left_to_next, bi0, next0); |
| 1225 | } |
| 1226 | |
| 1227 | vlib_put_next_frame (vm, node, next_index, n_left_to_next); |
| 1228 | } |
| 1229 | |
| 1230 | return from_frame->n_vectors; |
| 1231 | } |
| 1232 | |
| 1233 | static uword |
| 1234 | tcp4_output (vlib_main_t * vm, vlib_node_runtime_t * node, |
| 1235 | vlib_frame_t * from_frame) |
| 1236 | { |
| 1237 | return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ ); |
| 1238 | } |
| 1239 | |
| 1240 | static uword |
| 1241 | tcp6_output (vlib_main_t * vm, vlib_node_runtime_t * node, |
| 1242 | vlib_frame_t * from_frame) |
| 1243 | { |
| 1244 | return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ ); |
| 1245 | } |
| 1246 | |
| 1247 | VLIB_REGISTER_NODE (tcp4_output_node) = |
| 1248 | { |
| 1249 | .function = tcp4_output,.name = "tcp4-output", |
| 1250 | /* Takes a vector of packets. */ |
| 1251 | .vector_size = sizeof (u32),.n_errors = TCP_N_ERROR,.error_strings = |
| 1252 | tcp_error_strings,.n_next_nodes = TCP_OUTPUT_N_NEXT,.next_nodes = |
| 1253 | { |
| 1254 | #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n, |
| 1255 | foreach_tcp4_output_next |
| 1256 | #undef _ |
| 1257 | } |
| 1258 | ,.format_buffer = format_tcp_header,.format_trace = format_tcp_tx_trace,}; |
| 1259 | |
| 1260 | VLIB_NODE_FUNCTION_MULTIARCH (tcp4_output_node, tcp4_output) |
| 1261 | VLIB_REGISTER_NODE (tcp6_output_node) = |
| 1262 | { |
| 1263 | .function = tcp6_output,.name = "tcp6-output", |
| 1264 | /* Takes a vector of packets. */ |
| 1265 | .vector_size = sizeof (u32),.n_errors = TCP_N_ERROR,.error_strings = |
| 1266 | tcp_error_strings,.n_next_nodes = TCP_OUTPUT_N_NEXT,.next_nodes = |
| 1267 | { |
| 1268 | #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n, |
| 1269 | foreach_tcp6_output_next |
| 1270 | #undef _ |
| 1271 | } |
| 1272 | ,.format_buffer = format_tcp_header,.format_trace = format_tcp_tx_trace,}; |
| 1273 | |
| 1274 | VLIB_NODE_FUNCTION_MULTIARCH (tcp6_output_node, tcp6_output) u32 |
| 1275 | tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b) |
| 1276 | { |
| 1277 | tcp_connection_t *tc; |
| 1278 | |
| 1279 | tc = (tcp_connection_t *) tconn; |
| 1280 | tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED); |
| 1281 | return 0; |
| 1282 | } |
| 1283 | |
| 1284 | typedef enum _tcp_reset_next |
| 1285 | { |
| 1286 | TCP_RESET_NEXT_DROP, |
| 1287 | TCP_RESET_NEXT_IP_LOOKUP, |
| 1288 | TCP_RESET_N_NEXT |
| 1289 | } tcp_reset_next_t; |
| 1290 | |
| 1291 | #define foreach_tcp4_reset_next \ |
| 1292 | _(DROP, "error-drop") \ |
| 1293 | _(IP_LOOKUP, "ip4-lookup") |
| 1294 | |
| 1295 | #define foreach_tcp6_reset_next \ |
| 1296 | _(DROP, "error-drop") \ |
| 1297 | _(IP_LOOKUP, "ip6-lookup") |
| 1298 | |
| 1299 | static uword |
| 1300 | tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node, |
| 1301 | vlib_frame_t * from_frame, u8 is_ip4) |
| 1302 | { |
| 1303 | u32 n_left_from, next_index, *from, *to_next; |
| 1304 | u32 my_thread_index = vm->cpu_index; |
| 1305 | |
| 1306 | from = vlib_frame_vector_args (from_frame); |
| 1307 | n_left_from = from_frame->n_vectors; |
| 1308 | |
| 1309 | next_index = node->cached_next_index; |
| 1310 | |
| 1311 | while (n_left_from > 0) |
| 1312 | { |
| 1313 | u32 n_left_to_next; |
| 1314 | |
| 1315 | vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); |
| 1316 | |
| 1317 | while (n_left_from > 0 && n_left_to_next > 0) |
| 1318 | { |
| 1319 | u32 bi0; |
| 1320 | vlib_buffer_t *b0; |
| 1321 | u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP; |
| 1322 | |
| 1323 | bi0 = from[0]; |
| 1324 | to_next[0] = bi0; |
| 1325 | from += 1; |
| 1326 | to_next += 1; |
| 1327 | n_left_from -= 1; |
| 1328 | n_left_to_next -= 1; |
| 1329 | |
| 1330 | b0 = vlib_get_buffer (vm, bi0); |
| 1331 | |
| 1332 | if (tcp_make_reset_in_place (vm, b0, vnet_buffer (b0)->tcp.flags, |
| 1333 | my_thread_index, is_ip4)) |
| 1334 | { |
| 1335 | error0 = TCP_ERROR_LOOKUP_DROPS; |
| 1336 | next0 = TCP_RESET_NEXT_DROP; |
| 1337 | goto done; |
| 1338 | } |
| 1339 | |
| 1340 | /* Prepare to send to IP lookup */ |
| 1341 | vnet_buffer (b0)->sw_if_index[VLIB_TX] = 0; |
| 1342 | next0 = TCP_RESET_NEXT_IP_LOOKUP; |
| 1343 | |
| 1344 | done: |
| 1345 | b0->error = error0 != 0 ? node->errors[error0] : 0; |
| 1346 | b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; |
| 1347 | if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) |
| 1348 | { |
| 1349 | |
| 1350 | } |
| 1351 | |
| 1352 | vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, |
| 1353 | n_left_to_next, bi0, next0); |
| 1354 | } |
| 1355 | vlib_put_next_frame (vm, node, next_index, n_left_to_next); |
| 1356 | } |
| 1357 | return from_frame->n_vectors; |
| 1358 | } |
| 1359 | |
| 1360 | static uword |
| 1361 | tcp4_send_reset (vlib_main_t * vm, vlib_node_runtime_t * node, |
| 1362 | vlib_frame_t * from_frame) |
| 1363 | { |
| 1364 | return tcp46_send_reset_inline (vm, node, from_frame, 1); |
| 1365 | } |
| 1366 | |
| 1367 | static uword |
| 1368 | tcp6_send_reset (vlib_main_t * vm, vlib_node_runtime_t * node, |
| 1369 | vlib_frame_t * from_frame) |
| 1370 | { |
| 1371 | return tcp46_send_reset_inline (vm, node, from_frame, 0); |
| 1372 | } |
| 1373 | |
| 1374 | /* *INDENT-OFF* */ |
| 1375 | VLIB_REGISTER_NODE (tcp4_reset_node) = { |
| 1376 | .function = tcp4_send_reset, |
| 1377 | .name = "tcp4-reset", |
| 1378 | .vector_size = sizeof (u32), |
| 1379 | .n_errors = TCP_N_ERROR, |
| 1380 | .error_strings = tcp_error_strings, |
| 1381 | .n_next_nodes = TCP_RESET_N_NEXT, |
| 1382 | .next_nodes = { |
| 1383 | #define _(s,n) [TCP_RESET_NEXT_##s] = n, |
| 1384 | foreach_tcp4_reset_next |
| 1385 | #undef _ |
| 1386 | }, |
| 1387 | }; |
| 1388 | /* *INDENT-ON* */ |
| 1389 | |
| 1390 | /* *INDENT-OFF* */ |
| 1391 | VLIB_REGISTER_NODE (tcp6_reset_node) = { |
| 1392 | .function = tcp6_send_reset, |
| 1393 | .name = "tcp6-reset", |
| 1394 | .vector_size = sizeof (u32), |
| 1395 | .n_errors = TCP_N_ERROR, |
| 1396 | .error_strings = tcp_error_strings, |
| 1397 | .n_next_nodes = TCP_RESET_N_NEXT, |
| 1398 | .next_nodes = { |
| 1399 | #define _(s,n) [TCP_RESET_NEXT_##s] = n, |
| 1400 | foreach_tcp6_reset_next |
| 1401 | #undef _ |
| 1402 | }, |
| 1403 | }; |
| 1404 | /* *INDENT-ON* */ |
| 1405 | |
| 1406 | /* |
| 1407 | * fd.io coding-style-patch-verification: ON |
| 1408 | * |
| 1409 | * Local Variables: |
| 1410 | * eval: (c-set-style "gnu") |
| 1411 | * End: |
| 1412 | */ |