Implement sack based tcp loss recovery (RFC 6675)

- refactor existing congestion control code (RFC 6582/5681). Handling of ack
  feedback now consists of: ack parsing, cc event detection, event handling,
  congestion control update
- extend sack scoreboard to support sack based retransmissions
- basic implementation of Eifel detection algorithm (RFC 3522) for
  detecting spurious retransmissions
- actually initialize the per-thread frame freelist hash tables
- increase worker stack size to 2mb
- fix session queue node out-of-buffer handling
  - ensure that the local buffer cache vec_len matches reality
  - avoid 2x spurious event requeues when short of buffers
  - count out-of-buffer events
- make the builtin server thread-safe
- fix bihash template threading issue: need to paint -1 across uninitialized
  working_copy_length vector elements (via rebase from master)

Change-Id: I646cb9f1add9a67d08f4a87badbcb117980ebfc4
Signed-off-by: Florin Coras <fcoras@cisco.com>
Signed-off-by: Dave Barach <dbarach@cisco.com>
diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c
index f13f6fe..5c8f244 100644
--- a/src/svm/svm_fifo.c
+++ b/src/svm/svm_fifo.c
@@ -540,7 +540,7 @@
 
   /* read cursize, which can only increase while we're working */
   cursize = svm_fifo_max_dequeue (f);
-  if (PREDICT_FALSE (cursize == 0))
+  if (PREDICT_FALSE (cursize < relative_offset))
     return -2;			/* nothing in the fifo */
 
   nitems = f->nitems;
@@ -548,7 +548,8 @@
   real_head = real_head >= nitems ? real_head - nitems : real_head;
 
   /* Number of bytes we're going to copy */
-  total_copy_bytes = (cursize < max_bytes) ? cursize : max_bytes;
+  total_copy_bytes = (cursize - relative_offset < max_bytes) ?
+    cursize - relative_offset : max_bytes;
 
   if (PREDICT_TRUE (copy_here != 0))
     {
diff --git a/src/vlib/node.c b/src/vlib/node.c
index bbd3a42..eecad27 100644
--- a/src/vlib/node.c
+++ b/src/vlib/node.c
@@ -502,6 +502,7 @@
   vlib_node_t *n;
   uword ni;
 
+  nm->frame_size_hash = hash_create (0, sizeof (uword));
   nm->flags |= VLIB_NODE_MAIN_RUNTIME_STARTED;
 
   /* Generate sibling relationships */
diff --git a/src/vlib/threads.c b/src/vlib/threads.c
index b7bc9e2..0c775e2 100644
--- a/src/vlib/threads.c
+++ b/src/vlib/threads.c
@@ -670,7 +670,7 @@
 
 	      /* zap the (per worker) frame freelists, etc */
 	      nm_clone->frame_sizes = 0;
-	      nm_clone->frame_size_hash = 0;
+	      nm_clone->frame_size_hash = hash_create (0, sizeof (uword));
 
 	      /* Packet trace buffers are guaranteed to be empty, nothing to do here */
 
diff --git a/src/vlib/threads.h b/src/vlib/threads.h
index 17d35a2..572ce77 100644
--- a/src/vlib/threads.h
+++ b/src/vlib/threads.h
@@ -62,7 +62,7 @@
 #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1)	/* 0x3f, max */
 #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK)
 
-#define VLIB_LOG2_THREAD_STACK_SIZE (20)
+#define VLIB_LOG2_THREAD_STACK_SIZE (21)
 #define VLIB_THREAD_STACK_SIZE (1<<VLIB_LOG2_THREAD_STACK_SIZE)
 
 typedef enum
diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c
index 3053ccc..07eeae8 100644
--- a/src/vnet/session/node.c
+++ b/src/vnet/session/node.c
@@ -47,7 +47,8 @@
 
 #define foreach_session_queue_error		\
 _(TX, "Packets transmitted")                  	\
-_(TIMER, "Timer events")
+_(TIMER, "Timer events")			\
+_(NO_BUFFER, "Out of buffers")
 
 typedef enum
 {
@@ -141,6 +142,7 @@
   u8 *data0;
   int i, n_bytes_read;
   u32 n_bytes_per_buf, deq_per_buf;
+  u32 buffers_allocated, buffers_allocated_this_call;
 
   next_index = next0 = session_type_to_next[s0->session_type];
 
@@ -167,9 +169,6 @@
   /* Check how much we can pull. If buffering, subtract the offset */
   max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo) - rx_offset;
 
-  /* Allow enqueuing of a new event */
-  svm_fifo_unset_event (s0->server_tx_fifo);
-
   /* Nothing to read return */
   if (max_dequeue0 == 0)
     return 0;
@@ -187,8 +186,8 @@
       max_len_to_snd0 = snd_space0;
     }
 
-  n_bytes_per_buf = vlib_buffer_free_list_buffer_size (vm,
-						       VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+  n_bytes_per_buf = vlib_buffer_free_list_buffer_size
+    (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
   n_bytes_per_seg = MAX_HDRS_LEN + snd_mss0;
   n_bufs_per_seg = ceil ((double) n_bytes_per_seg / n_bytes_per_buf);
   n_bufs_per_evt = (ceil ((double) max_len_to_snd0 / n_bytes_per_seg))
@@ -205,24 +204,33 @@
       if (PREDICT_FALSE (n_bufs < VLIB_FRAME_SIZE))
 	{
 	  vec_validate (smm->tx_buffers[thread_index],
-			n_bufs + VLIB_FRAME_SIZE - 1);
-	  n_bufs += vlib_buffer_alloc (vm,
-				       &smm->tx_buffers[thread_index][n_bufs],
-				       VLIB_FRAME_SIZE);
+			n_bufs + 2 * VLIB_FRAME_SIZE - 1);
 
-	  /* buffer shortage
-	   * XXX 0.9 because when debugging we might not get a full frame */
-	  if (PREDICT_FALSE (n_bufs < 0.9 * VLIB_FRAME_SIZE))
+	  buffers_allocated = 0;
+	  do
 	    {
-	      if (svm_fifo_set_event (s0->server_tx_fifo))
-		{
-		  vec_add1 (smm->pending_event_vector[thread_index], *e0);
-		}
-	      return -1;
+	      buffers_allocated_this_call =
+		vlib_buffer_alloc
+		(vm,
+		 &smm->tx_buffers[thread_index][n_bufs + buffers_allocated],
+		 2 * VLIB_FRAME_SIZE - buffers_allocated);
+	      buffers_allocated += buffers_allocated_this_call;
 	    }
+	  while (buffers_allocated_this_call > 0
+		 && ((buffers_allocated + n_bufs < VLIB_FRAME_SIZE)));
+
+	  n_bufs += buffers_allocated;
 
 	  _vec_len (smm->tx_buffers[thread_index]) = n_bufs;
+
+	  if (PREDICT_FALSE (n_bufs < VLIB_FRAME_SIZE))
+	    {
+	      vec_add1 (smm->pending_event_vector[thread_index], *e0);
+	      return -1;
+	    }
 	}
+      /* Allow enqueuing of a new event */
+      svm_fifo_unset_event (s0->server_tx_fifo);
 
       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
       while (left_to_snd0 && n_left_to_next >= n_bufs_per_seg)
@@ -232,7 +240,9 @@
 	   */
 
 	  /* Get free buffer */
+	  ASSERT (n_bufs >= 1);
 	  bi0 = smm->tx_buffers[thread_index][--n_bufs];
+	  ASSERT (bi0);
 	  _vec_len (smm->tx_buffers[thread_index]) = n_bufs;
 
 	  b0 = vlib_get_buffer (vm, bi0);
@@ -545,9 +555,10 @@
 							my_thread_index,
 							&n_tx_packets);
 	  /* Out of buffers */
-	  if (rv < 0)
+	  if (PREDICT_FALSE (rv < 0))
 	    {
-	      vec_add1 (smm->pending_event_vector[my_thread_index], *e0);
+	      vlib_node_increment_counter (vm, node->node_index,
+					   SESSION_QUEUE_ERROR_NO_BUFFER, 1);
 	      continue;
 	    }
 	  break;
diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c
index 02b0cce..534598d 100644
--- a/src/vnet/session/session.c
+++ b/src/vnet/session/session.c
@@ -551,7 +551,7 @@
 stream_session_no_space (transport_connection_t * tc, u32 thread_index,
 			 u16 data_len)
 {
-  stream_session_t *s = stream_session_get (tc->c_index, thread_index);
+  stream_session_t *s = stream_session_get (tc->s_index, thread_index);
 
   if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY))
     return 1;
@@ -563,6 +563,15 @@
 }
 
 u32
+stream_session_tx_fifo_max_dequeue (transport_connection_t * tc)
+{
+  stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
+  if (s->session_state != SESSION_STATE_READY)
+    return 0;
+  return svm_fifo_max_dequeue (s->server_tx_fifo);
+}
+
+int
 stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer,
 			   u32 offset, u32 max_bytes)
 {
diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h
index a872864..d9c38bd 100644
--- a/src/vnet/session/session.h
+++ b/src/vnet/session/session.h
@@ -352,16 +352,18 @@
 }
 
 always_inline u32
-stream_session_fifo_size (transport_connection_t * tc)
+stream_session_rx_fifo_size (transport_connection_t * tc)
 {
   stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
   return s->server_rx_fifo->nitems;
 }
 
+u32 stream_session_tx_fifo_max_dequeue (transport_connection_t * tc);
+
 int
 stream_session_enqueue_data (transport_connection_t * tc, vlib_buffer_t * b,
 			     u32 offset, u8 queue_event, u8 is_in_order);
-u32
+int
 stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer,
 			   u32 offset, u32 max_bytes);
 u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes);
diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c
index 509eedb..6b8341a 100755
--- a/src/vnet/session/session_cli.c
+++ b/src/vnet/session/session_cli.c
@@ -15,6 +15,15 @@
 #include <vnet/session/application.h>
 #include <vnet/session/session.h>
 
+u8 *
+format_stream_session_fifos (u8 * s, va_list * args)
+{
+  stream_session_t *ss = va_arg (*args, stream_session_t *);
+  s = format (s, " Rx fifo: %U", format_svm_fifo, ss->server_rx_fifo, 1);
+  s = format (s, " Tx fifo: %U", format_svm_fifo, ss->server_tx_fifo, 1);
+  return s;
+}
+
 /**
  * Format stream session as per the following format
  *
@@ -44,6 +53,8 @@
 		  ss->thread_index, verbose);
       if (verbose == 1)
 	s = format (s, "%v", str);
+      if (verbose > 1)
+	s = format (s, "%U", format_stream_session_fifos, ss);
     }
   else if (ss->session_state == SESSION_STATE_LISTENING)
     {
@@ -57,8 +68,12 @@
     }
   else if (ss->session_state == SESSION_STATE_CLOSED)
     {
-      s = format (s, "[CL] %-40U%v", tp_vft->format_connection,
-		  ss->connection_index, ss->thread_index, verbose, str);
+      s = format (s, "[CL] %-40U", tp_vft->format_connection,
+		  ss->connection_index, ss->thread_index, verbose);
+      if (verbose == 1)
+	s = format (s, "%v", str);
+      if (verbose > 1)
+	s = format (s, "%U", format_stream_session_fifos, ss);
     }
   else
     {
@@ -124,13 +139,6 @@
               ({
         	vec_reset_length (str);
                 str = format (str, "%U", format_stream_session, s, verbose);
-                if (verbose > 1)
-                  {
-                    str = format (str, " Rx fifo: %U", format_svm_fifo,
-				  s->server_rx_fifo, 1);
-                    str = format (str, " Tx fifo: %U", format_svm_fifo,
-				  s->server_tx_fifo, 1);
-                  }
                 vlib_cli_output (vm, "%v", str);
               }));
               /* *INDENT-ON* */
diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c
index 768f0c3..7238cda 100644
--- a/src/vnet/tcp/builtin_client.c
+++ b/src/vnet/tcp/builtin_client.c
@@ -115,8 +115,17 @@
   /* Allow enqueuing of new event */
   // svm_fifo_unset_event (rx_fifo);
 
-  n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (tm->rx_buf),
-				    tm->rx_buf);
+  if (test_bytes)
+    {
+      n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (tm->rx_buf),
+					tm->rx_buf);
+    }
+  else
+    {
+      n_read = svm_fifo_max_dequeue (rx_fifo);
+      svm_fifo_dequeue_drop (rx_fifo, n_read);
+    }
+
   if (n_read > 0)
     {
       if (TCP_BUILTIN_CLIENT_DBG)
@@ -165,6 +174,8 @@
   int i;
   int delete_session;
   u32 *connection_indices;
+  u32 tx_quota = 0;
+  u32 delta, prev_bytes_received_this_session;
 
   connection_indices = tm->connection_index_by_thread[my_thread_index];
 
@@ -177,14 +188,19 @@
 
       sp = pool_elt_at_index (tm->sessions, connection_indices[i]);
 
-      if (sp->bytes_to_send > 0)
+      if (tx_quota < 60 && sp->bytes_to_send > 0)
 	{
 	  send_test_chunk (tm, sp);
 	  delete_session = 0;
+	  tx_quota++;
 	}
       if (sp->bytes_to_receive > 0)
 	{
+	  prev_bytes_received_this_session = sp->bytes_received;
 	  receive_test_chunk (tm, sp);
+	  delta = sp->bytes_received - prev_bytes_received_this_session;
+	  if (delta > 0)
+	    tx_quota--;
 	  delete_session = 0;
 	}
       if (PREDICT_FALSE (delete_session == 1))
@@ -195,11 +211,19 @@
 	  dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION);
 	  dmp->client_index = tm->my_client_index;
 	  dmp->handle = sp->vpp_session_handle;
-	  vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp);
-	  vec_delete (connection_indices, 1, i);
-	  tm->connection_index_by_thread[my_thread_index] =
-	    connection_indices;
-	  __sync_fetch_and_add (&tm->ready_connections, -1);
+//        vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp);
+	  if (!unix_shared_memory_queue_add (tm->vl_input_queue, (u8 *) & dmp,
+					     1))
+	    {
+	      vec_delete (connection_indices, 1, i);
+	      tm->connection_index_by_thread[my_thread_index] =
+		connection_indices;
+	      __sync_fetch_and_add (&tm->ready_connections, -1);
+	    }
+	  else
+	    {
+	      vl_msg_api_free (dmp);
+	    }
 
 	  /* Kick the debug CLI process */
 	  if (tm->ready_connections == 0)
diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c
index 4f0e211..8bd2f36 100644
--- a/src/vnet/tcp/builtin_server.c
+++ b/src/vnet/tcp/builtin_server.c
@@ -39,7 +39,8 @@
 
 typedef struct
 {
-  u8 *rx_buf;
+  /* Per-thread RX buffer */
+  u8 **rx_buf;
   unix_shared_memory_queue_t **vpp_queue;
   u64 byte_index;
 
@@ -117,13 +118,15 @@
 test_bytes (builtin_server_main_t * bsm, int actual_transfer)
 {
   int i;
+  u32 my_thread_id = vlib_get_thread_index ();
 
   for (i = 0; i < actual_transfer; i++)
     {
-      if (bsm->rx_buf[i] != ((bsm->byte_index + i) & 0xff))
+      if (bsm->rx_buf[my_thread_id][i] != ((bsm->byte_index + i) & 0xff))
 	{
 	  clib_warning ("at %lld expected %d got %d", bsm->byte_index + i,
-			(bsm->byte_index + i) & 0xff, bsm->rx_buf[i]);
+			(bsm->byte_index + i) & 0xff,
+			bsm->rx_buf[my_thread_id][i]);
 	}
     }
   bsm->byte_index += actual_transfer;
@@ -138,6 +141,7 @@
   builtin_server_main_t *bsm = &builtin_server_main;
   session_fifo_event_t evt;
   static int serial_number = 0;
+  u32 my_thread_id = vlib_get_thread_index ();
 
   tx_fifo = s->server_tx_fifo;
   rx_fifo = s->server_rx_fifo;
@@ -171,11 +175,12 @@
       return 0;
     }
 
-  vec_validate (bsm->rx_buf, max_transfer - 1);
-  _vec_len (bsm->rx_buf) = max_transfer;
+  vec_validate (bsm->rx_buf, my_thread_id);
+  vec_validate (bsm->rx_buf[my_thread_id], max_transfer - 1);
+  _vec_len (bsm->rx_buf[my_thread_id]) = max_transfer;
 
   actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer,
-					     bsm->rx_buf);
+					     bsm->rx_buf[my_thread_id]);
   ASSERT (actual_transfer == max_transfer);
 
 //  test_bytes (bsm, actual_transfer);
@@ -184,7 +189,8 @@
    * Echo back
    */
 
-  n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer, bsm->rx_buf);
+  n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer,
+				       bsm->rx_buf[my_thread_id]);
 
   if (n_written != max_transfer)
     clib_warning ("short trout!");
diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c
index 9b7b2f6..e0b67a8 100644
--- a/src/vnet/tcp/tcp.c
+++ b/src/vnet/tcp/tcp.c
@@ -195,8 +195,8 @@
   TCP_EVT_DBG (TCP_EVT_CLOSE, tc);
 
   /* Send FIN if needed */
-  if (tc->state == TCP_STATE_ESTABLISHED || tc->state == TCP_STATE_SYN_RCVD
-      || tc->state == TCP_STATE_CLOSE_WAIT)
+  if (tc->state == TCP_STATE_ESTABLISHED
+      || tc->state == TCP_STATE_SYN_RCVD || tc->state == TCP_STATE_CLOSE_WAIT)
     tcp_send_fin (tc);
 
   /* Switch state */
@@ -480,7 +480,7 @@
 format_tcp_timers (u8 * s, va_list * args)
 {
   tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
-  int i, last = 0;
+  int i, last = -1;
 
   for (i = 0; i < TCP_N_TIMERS; i++)
     if (tc->timers[i] != TCP_TIMER_HANDLE_INVALID)
@@ -493,7 +493,7 @@
 	s = format (s, "%s,", tcp_conn_timers[i]);
     }
 
-  if (last > 0)
+  if (last >= 0)
     s = format (s, "%s]", tcp_conn_timers[i]);
   else
     s = format (s, "]");
@@ -526,19 +526,19 @@
   s = format (s, " snd_wnd %u rcv_wnd %u snd_wl1 %u snd_wl2 %u\n",
 	      tc->snd_wnd, tc->rcv_wnd, tc->snd_wl1 - tc->irs,
 	      tc->snd_wl2 - tc->iss);
-  s = format (s, " flight size %u send space %u rcv_wnd available %d\n",
-	      tcp_flight_size (tc), tcp_snd_space (tc),
-	      tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las));
+  s = format (s, " flight size %u send space %u rcv_wnd_av %d\n",
+	      tcp_flight_size (tc), tcp_available_snd_space (tc),
+	      tcp_rcv_wnd_available (tc));
   s = format (s, " cong %U ", format_tcp_congestion_status, tc);
   s = format (s, "cwnd %u ssthresh %u rtx_bytes %u bytes_acked %u\n",
-	      tc->cwnd, tc->ssthresh, tc->rtx_bytes, tc->bytes_acked);
-  s = format (s, " prev_ssthresh %u snd_congestion %u\n", tc->prev_ssthresh,
-	      tc->snd_congestion - tc->iss);
+	      tc->cwnd, tc->ssthresh, tc->snd_rxt_bytes, tc->bytes_acked);
+  s = format (s, " prev_ssthresh %u snd_congestion %u dupack %u\n",
+	      tc->prev_ssthresh, tc->snd_congestion - tc->iss,
+	      tc->rcv_dupacks);
   s = format (s, " rto %u rto_boff %u srtt %u rttvar %u rtt_ts %u ", tc->rto,
 	      tc->rto_boff, tc->srtt, tc->rttvar, tc->rtt_ts);
   s = format (s, "rtt_seq %u\n", tc->rtt_seq);
-  if (scoreboard_first_hole (&tc->sack_sb))
-    s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb);
+  s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb);
   if (vec_len (tc->snd_sacks))
     s = format (s, " sacks tx: %U\n", format_tcp_sacks, tc);
 
@@ -595,9 +595,10 @@
 
   tc = tcp_connection_get (tci, thread_index);
   if (tc)
-    return format (s, "%U", format_tcp_connection, tc, verbose);
+    s = format (s, "%U", format_tcp_connection, tc, verbose);
   else
-    return format (s, "empty");
+    s = format (s, "empty");
+  return s;
 }
 
 u8 *
@@ -643,13 +644,17 @@
 {
   sack_scoreboard_t *sb = va_arg (*args, sack_scoreboard_t *);
   sack_scoreboard_hole_t *hole;
-  s = format (s, "head %u tail %u snd_una_adv %u\n", sb->head, sb->tail,
-	      sb->snd_una_adv);
-  s = format (s, "sacked_bytes %u last_sacked_bytes %u", sb->sacked_bytes,
-	      sb->last_sacked_bytes);
-  s = format (s, " max_byte_sacked %u\n", sb->max_byte_sacked);
-  s = format (s, "holes:\n");
+  s = format (s, "sacked_bytes %u last_sacked_bytes %u lost_bytes %u\n",
+	      sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes);
+  s = format (s, " last_bytes_delivered %u high_sacked %u snd_una_adv %u\n",
+	      sb->last_bytes_delivered, sb->high_sacked, sb->snd_una_adv);
+  s = format (s, " cur_rxt_hole %u high_rxt %u rescue_rxt %u",
+	      sb->cur_rxt_hole, sb->high_rxt, sb->rescue_rxt);
+
   hole = scoreboard_first_hole (sb);
+  if (hole)
+    s = format (s, "\n head %u tail %u holes:\n", sb->head, sb->tail);
+
   while (hole)
     {
       s = format (s, "%U", format_tcp_sack_hole, hole);
@@ -736,7 +741,7 @@
   if (tcp_in_recovery (tc))
     {
       tc->snd_nxt = tc->snd_una_max;
-      snd_space = tcp_available_wnd (tc) - tc->rtx_bytes
+      snd_space = tcp_available_wnd (tc) - tc->snd_rxt_bytes
 	- (tc->snd_una_max - tc->snd_congestion);
       if (snd_space <= 0 || (tc->snd_una_max - tc->snd_una) >= tc->snd_wnd)
 	return 0;
@@ -744,8 +749,8 @@
     }
 
   /* If in fast recovery, send 1 SMSS if wnd allows */
-  if (tcp_in_fastrecovery (tc) && tcp_available_snd_space (tc)
-      && tcp_fastrecovery_sent_1_smss (tc))
+  if (tcp_in_fastrecovery (tc)
+      && tcp_available_snd_space (tc) && !tcp_fastrecovery_sent_1_smss (tc))
     {
       tcp_fastrecovery_1_smss_on (tc);
       return tc->snd_mss;
@@ -761,6 +766,12 @@
   return tcp_snd_space (tc);
 }
 
+i32
+tcp_rcv_wnd_available (tcp_connection_t * tc)
+{
+  return (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
+}
+
 u32
 tcp_session_tx_fifo_offset (transport_connection_t * trans_conn)
 {
diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h
index c3ebe22..071f1ab 100644
--- a/src/vnet/tcp/tcp.h
+++ b/src/vnet/tcp/tcp.h
@@ -34,6 +34,7 @@
 #define TCP_MAX_RX_FIFO_SIZE 	2 << 20
 #define TCP_IW_N_SEGMENTS 	10
 #define TCP_ALWAYS_ACK		0	/**< If on, we always ack */
+#define TCP_USE_SACKS		1	/**< Disable only for testing */
 
 /** TCP FSM state definitions as per RFC793. */
 #define foreach_tcp_fsm_state   \
@@ -94,7 +95,7 @@
 #define TCP_DELACK_TIME         1	/* 0.1s */
 #define TCP_ESTABLISH_TIME      750	/* 75s */
 #define TCP_2MSL_TIME           300	/* 30s */
-#define TCP_CLOSEWAIT_TIME	1	/* 0.1s */
+#define TCP_CLOSEWAIT_TIME	20	/* 0.1s */
 #define TCP_CLEANUP_TIME	5	/* 0.5s Time to wait before cleanup */
 #define TCP_TIMER_PERSIST_MIN	2	/* 0.2s */
 
@@ -157,6 +158,7 @@
   u32 prev;		/**< Index for previous entry in linked list */
   u32 start;		/**< Start sequence number */
   u32 end;		/**< End sequence number */
+  u8 is_lost;		/**< Mark hole as lost */
 } sack_scoreboard_hole_t;
 
 typedef struct _sack_scoreboard
@@ -166,8 +168,13 @@
   u32 tail;				/**< Index of last entry */
   u32 sacked_bytes;			/**< Number of bytes sacked in sb */
   u32 last_sacked_bytes;		/**< Number of bytes last sacked */
+  u32 last_bytes_delivered;		/**< Number of sack bytes delivered */
   u32 snd_una_adv;			/**< Bytes to add to snd_una */
-  u32 max_byte_sacked;			/**< Highest byte acked */
+  u32 high_sacked;			/**< Highest byte sacked (fack) */
+  u32 high_rxt;				/**< Highest retransmitted sequence */
+  u32 rescue_rxt;			/**< Rescue sequence number */
+  u32 lost_bytes;			/**< Bytes lost as per RFC6675 */
+  u32 cur_rxt_hole;			/**< Retransmitting from this hole */
 } sack_scoreboard_t;
 
 typedef enum _tcp_cc_algorithm_type
@@ -211,7 +218,7 @@
   u32 irs;		/**< initial remote sequence */
 
   /* Options */
-  tcp_options_t opt;		/**< TCP connection options parsed */
+  tcp_options_t rcv_opts;	/**< Rx options for connection */
   tcp_options_t snd_opts;	/**< Tx options for connection */
   u8 snd_opts_len;		/**< Tx options len */
   u8 rcv_wscale;	/**< Window scale to advertise to peer */
@@ -229,8 +236,10 @@
   u32 cwnd;		/**< Congestion window */
   u32 ssthresh;		/**< Slow-start threshold */
   u32 prev_ssthresh;	/**< ssthresh before congestion */
+  u32 prev_cwnd;	/**< ssthresh before congestion */
   u32 bytes_acked;	/**< Bytes acknowledged by current segment */
-  u32 rtx_bytes;	/**< Retransmitted bytes */
+  u32 snd_rxt_bytes;	/**< Retransmitted bytes */
+  u32 snd_rxt_ts;	/**< Timestamp when first packet is retransmitted */
   u32 tsecr_last_ack;	/**< Timestamp echoed to us in last healthy ACK */
   u32 snd_congestion;	/**< snd_una_max when congestion is detected */
   tcp_cc_algorithm_t *cc_algo;	/**< Congestion control algorithm */
@@ -411,6 +420,7 @@
 void tcp_send_fin (tcp_connection_t * tc);
 void tcp_init_mss (tcp_connection_t * tc);
 void tcp_update_snd_mss (tcp_connection_t * tc);
+void tcp_update_rto (tcp_connection_t * tc);
 
 always_inline u32
 tcp_end_seq (tcp_header_t * th, u32 len)
@@ -428,17 +438,39 @@
 #define timestamp_lt(_t1, _t2) ((i32)((_t1)-(_t2)) < 0)
 #define timestamp_leq(_t1, _t2) ((i32)((_t1)-(_t2)) <= 0)
 
+/**
+ * Our estimate of the number of bytes that have left the network
+ */
+always_inline u32
+tcp_bytes_out (const tcp_connection_t * tc)
+{
+  if (tcp_opts_sack_permitted (&tc->rcv_opts))
+    return tc->sack_sb.sacked_bytes + tc->sack_sb.lost_bytes;
+  else
+    return tc->rcv_dupacks * tc->snd_mss;
+}
+
+/**
+ * Our estimate of the number of bytes in flight (pipe size)
+ */
 always_inline u32
 tcp_flight_size (const tcp_connection_t * tc)
 {
   int flight_size;
 
-  flight_size = (int) ((tc->snd_una_max - tc->snd_una) + tc->rtx_bytes)
-    - (tc->rcv_dupacks * tc->snd_mss) /* - tc->sack_sb.sacked_bytes */ ;
+  flight_size = (int) (tc->snd_una_max - tc->snd_una) - tcp_bytes_out (tc)
+    + tc->snd_rxt_bytes;
 
-  /* Happens if we don't clear sacked bytes */
   if (flight_size < 0)
-    return 0;
+    {
+      if (0)
+	clib_warning
+	  ("Negative: %u %u %u dupacks %u sacked bytes %u flags %d",
+	   tc->snd_una_max - tc->snd_una, tcp_bytes_out (tc),
+	   tc->snd_rxt_bytes, tc->rcv_dupacks, tc->sack_sb.sacked_bytes,
+	   tc->rcv_opts.flags);
+      return 0;
+    }
 
   return flight_size;
 }
@@ -481,14 +513,17 @@
   return available_wnd - flight_size;
 }
 
-u32 tcp_rcv_wnd_available (tcp_connection_t * tc);
+i32 tcp_rcv_wnd_available (tcp_connection_t * tc);
 u32 tcp_snd_space (tcp_connection_t * tc);
 void tcp_update_rcv_wnd (tcp_connection_t * tc);
 
 void tcp_retransmit_first_unacked (tcp_connection_t * tc);
+void tcp_fast_retransmit_no_sack (tcp_connection_t * tc);
+void tcp_fast_retransmit_sack (tcp_connection_t * tc);
 void tcp_fast_retransmit (tcp_connection_t * tc);
-void tcp_cc_congestion (tcp_connection_t * tc);
-void tcp_cc_recover (tcp_connection_t * tc);
+void tcp_cc_init_congestion (tcp_connection_t * tc);
+int tcp_cc_recover (tcp_connection_t * tc);
+void tcp_cc_fastrecovery_exit (tcp_connection_t * tc);
 
 /* Made public for unit testing only */
 void tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end);
@@ -563,19 +598,19 @@
 }
 
 always_inline void
-tcp_retransmit_timer_update (tcp_connection_t * tc)
-{
-  tcp_timer_update (tc, TCP_TIMER_RETRANSMIT,
-		    clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
-}
-
-always_inline void
 tcp_retransmit_timer_reset (tcp_connection_t * tc)
 {
   tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT);
 }
 
 always_inline void
+tcp_retransmit_timer_force_update (tcp_connection_t * tc)
+{
+  tcp_timer_update (tc, TCP_TIMER_RETRANSMIT,
+		    clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
+}
+
+always_inline void
 tcp_persist_timer_set (tcp_connection_t * tc)
 {
   /* Reuse RTO. It's backed off in handler */
@@ -598,15 +633,43 @@
   tcp_timer_reset (tc, TCP_TIMER_PERSIST);
 }
 
+always_inline void
+tcp_retransmit_timer_update (tcp_connection_t * tc)
+{
+  if (tc->snd_una == tc->snd_una_max)
+    {
+      tcp_retransmit_timer_reset (tc);
+      if (tc->snd_wnd < tc->snd_mss)
+	tcp_persist_timer_set (tc);
+    }
+  else
+    tcp_timer_update (tc, TCP_TIMER_RETRANSMIT,
+		      clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
+}
+
 always_inline u8
 tcp_timer_is_active (tcp_connection_t * tc, tcp_timers_e timer)
 {
   return tc->timers[timer] != TCP_TIMER_HANDLE_INVALID;
 }
 
+#define tcp_validate_txf_size(_tc, _a) 					\
+  ASSERT(_tc->state != TCP_STATE_ESTABLISHED 				\
+	 || stream_session_tx_fifo_max_dequeue (&_tc->connection) >= _a)
+
 void
 scoreboard_remove_hole (sack_scoreboard_t * sb,
 			sack_scoreboard_hole_t * hole);
+void scoreboard_update_lost (tcp_connection_t * tc, sack_scoreboard_t * sb);
+sack_scoreboard_hole_t *scoreboard_insert_hole (sack_scoreboard_t * sb,
+						u32 prev_index, u32 start,
+						u32 end);
+sack_scoreboard_hole_t *scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
+						  sack_scoreboard_hole_t *
+						  start, u8 have_sent_1_smss,
+						  u8 * can_rescue,
+						  u8 * snd_limited);
+void scoreboard_init_high_rxt (sack_scoreboard_t * sb);
 
 always_inline sack_scoreboard_hole_t *
 scoreboard_get_hole (sack_scoreboard_t * sb, u32 index)
@@ -625,6 +688,14 @@
 }
 
 always_inline sack_scoreboard_hole_t *
+scoreboard_prev_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
+{
+  if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
+    return pool_elt_at_index (sb->holes, hole->prev);
+  return 0;
+}
+
+always_inline sack_scoreboard_hole_t *
 scoreboard_first_hole (sack_scoreboard_t * sb)
 {
   if (sb->head != TCP_INVALID_SACK_HOLE_INDEX)
@@ -643,15 +714,19 @@
 always_inline void
 scoreboard_clear (sack_scoreboard_t * sb)
 {
-  sack_scoreboard_hole_t *hole = scoreboard_first_hole (sb);
+  sack_scoreboard_hole_t *hole;
   while ((hole = scoreboard_first_hole (sb)))
     {
       scoreboard_remove_hole (sb, hole);
     }
   sb->sacked_bytes = 0;
   sb->last_sacked_bytes = 0;
+  sb->last_bytes_delivered = 0;
   sb->snd_una_adv = 0;
-  sb->max_byte_sacked = 0;
+  sb->high_sacked = 0;
+  sb->high_rxt = 0;
+  sb->lost_bytes = 0;
+  sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
 }
 
 always_inline u32
@@ -671,6 +746,7 @@
 {
   sb->head = TCP_INVALID_SACK_HOLE_INDEX;
   sb->tail = TCP_INVALID_SACK_HOLE_INDEX;
+  sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
 }
 
 void tcp_rcv_sacks (tcp_connection_t * tc, u32 ack);
diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h
index b4497a3..3a16cf6 100755
--- a/src/vnet/tcp/tcp_debug.h
+++ b/src/vnet/tcp/tcp_debug.h
@@ -393,7 +393,7 @@
   DECLARE_ETD(_tc, _e, 4);						\
   ed->data[0] = _seq - _tc->irs;					\
   ed->data[1] = _end - _tc->irs;					\
-  ed->data[2] = _tc->opt.tsval;						\
+  ed->data[2] = _tc->rcv_opts.tsval;					\
   ed->data[3] = _tc->tsval_recent;					\
 }
 
@@ -427,27 +427,27 @@
 {									\
   ELOG_TYPE_DECLARE (_e) =						\
   {									\
-    .format = "rtx: snd_nxt %u offset %u snd %u rtx %u",		\
+    .format = "rxt: snd_nxt %u offset %u snd %u rxt %u",		\
     .format_args = "i4i4i4i4",						\
   };									\
   DECLARE_ETD(_tc, _e, 4);						\
   ed->data[0] = _tc->snd_nxt - _tc->iss;				\
   ed->data[1] = offset;							\
   ed->data[2] = n_bytes;						\
-  ed->data[3] = _tc->rtx_bytes;						\
+  ed->data[3] = _tc->snd_rxt_bytes;					\
 }
 
 #define TCP_EVT_CC_EVT_HANDLER(_tc, _sub_evt, ...)			\
 {									\
   ELOG_TYPE_DECLARE (_e) =						\
   {									\
-    .format = "cc: %s wnd %u snd_cong %u rtx_bytes %u",			\
+    .format = "cc: %s wnd %u snd_cong %u rxt_bytes %u",			\
     .format_args = "t4i4i4i4",						\
     .n_enum_strings = 5,						\
     .enum_strings = {                                           	\
-      "fast-rtx",	                                             	\
-      "rtx-timeout",                                                 	\
-      "first-rtx",                                                 	\
+      "fast-rxt",	                                             	\
+      "rxt-timeout",                                                 	\
+      "first-rxt",                                                 	\
       "recovered",							\
       "congestion",							\
     },  								\
@@ -456,7 +456,7 @@
   ed->data[0] = _sub_evt;						\
   ed->data[1] = tcp_available_snd_space (_tc);				\
   ed->data[2] = _tc->snd_congestion - _tc->iss;				\
-  ed->data[3] = _tc->rtx_bytes;						\
+  ed->data[3] = _tc->snd_rxt_bytes;					\
 }
 
 #define TCP_EVT_CC_PACK_HANDLER(_tc, ...)				\
diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c
index 35bc909..ff2229b 100644
--- a/src/vnet/tcp/tcp_input.c
+++ b/src/vnet/tcp/tcp_input.c
@@ -231,8 +231,8 @@
 always_inline int
 tcp_segment_check_paws (tcp_connection_t * tc)
 {
-  return tcp_opts_tstamp (&tc->opt) && tc->tsval_recent
-    && timestamp_lt (tc->opt.tsval, tc->tsval_recent);
+  return tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent
+    && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
 }
 
 /**
@@ -248,10 +248,10 @@
    * then the TSval from the segment is copied to TS.Recent;
    * otherwise, the TSval is ignored.
    */
-  if (tcp_opts_tstamp (&tc->opt) && tc->tsval_recent
+  if (tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent
       && seq_leq (seq, tc->rcv_las) && seq_leq (tc->rcv_las, seq_end))
     {
-      tc->tsval_recent = tc->opt.tsval;
+      tc->tsval_recent = tc->rcv_opts.tsval;
       tc->tsval_recent_age = tcp_time_now ();
     }
 }
@@ -272,14 +272,21 @@
   if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
     return -1;
 
-  if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->opt)))
+  if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts)))
     {
       return -1;
     }
 
   if (tcp_segment_check_paws (tc0))
     {
-      clib_warning ("paws failed");
+      if (CLIB_DEBUG > 2)
+	{
+	  clib_warning ("paws failed\n%U", format_tcp_connection, tc0, 2);
+	  clib_warning ("seq %u seq_end %u ack %u",
+			vnet_buffer (b0)->tcp.seq_number - tc0->irs,
+			vnet_buffer (b0)->tcp.seq_end - tc0->irs,
+			vnet_buffer (b0)->tcp.ack_number - tc0->iss);
+	}
       TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
 		   vnet_buffer (b0)->tcp.seq_end);
 
@@ -348,7 +355,6 @@
   /* If segment in window, save timestamp */
   tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
 			vnet_buffer (b0)->tcp.seq_end);
-
   return 0;
 }
 
@@ -391,6 +397,12 @@
     }
 }
 
+void
+tcp_update_rto (tcp_connection_t * tc)
+{
+  tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
+}
+
 /** Update RTT estimate and RTO timer
  *
  * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
@@ -405,7 +417,7 @@
   u32 mrtt = 0;
   u8 rtx_acked;
 
-  /* Determine if only rtx bytes are acked. TODO fast retransmit */
+  /* Determine if only rtx bytes are acked. TODO XXX fast retransmit */
   rtx_acked = tc->rto_boff && (tc->bytes_acked <= tc->snd_mss);
 
   /* Karn's rule, part 1. Don't use retransmitted segments to estimate
@@ -418,9 +430,10 @@
    * snd_una, i.e., the left side of the send window:
    * seq_lt (tc->snd_una, ack). Note: last condition could be dropped, we don't
    * try to update rtt for dupacks */
-  else if (tcp_opts_tstamp (&tc->opt) && tc->opt.tsecr && tc->bytes_acked)
+  else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr
+	   && tc->bytes_acked)
     {
-      mrtt = tcp_time_now () - tc->opt.tsecr;
+      mrtt = tcp_time_now () - tc->rcv_opts.tsecr;
     }
 
   /* Allow measuring of a new RTT */
@@ -436,7 +449,7 @@
     return 0;
 
   tcp_estimate_rtt (tc, mrtt);
-  tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
+  tcp_update_rto (tc);
 
   return 0;
 }
@@ -447,25 +460,46 @@
 static void
 tcp_dequeue_acked (tcp_connection_t * tc, u32 ack)
 {
-  /* Dequeue the newly ACKed bytes */
-  stream_session_dequeue_drop (&tc->connection, tc->bytes_acked);
+  /* Dequeue the newly ACKed add SACKed bytes */
+  stream_session_dequeue_drop (&tc->connection,
+			       tc->bytes_acked + tc->sack_sb.snd_una_adv);
+
+  tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
 
   /* Update rtt and rto */
   tcp_update_rtt (tc, ack);
+
+  /* If everything has been acked, stop retransmit timer
+   * otherwise update. */
+  tcp_retransmit_timer_update (tc);
 }
 
 /**
- * Check if dupack as per RFC5681 Sec. 2
- *
- * This works only if called before updating snd_wnd.
- * */
-always_inline u8
-tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 new_snd_wnd)
+ * Check if duplicate ack as per RFC5681 Sec. 2
+ */
+static u8
+tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
+		   u32 prev_snd_una)
 {
-  return ((vnet_buffer (b)->tcp.ack_number == tc->snd_una)
+  return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
 	  && seq_gt (tc->snd_una_max, tc->snd_una)
 	  && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
-	  && (new_snd_wnd == tc->snd_wnd));
+	  && (prev_snd_wnd == tc->snd_wnd));
+}
+
+/**
+ * Checks if ack is a congestion control event.
+ */
+static u8
+tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
+		     u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
+{
+  /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
+   * defined to be 'duplicate' */
+  *is_dack = tc->sack_sb.last_sacked_bytes
+    || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
+
+  return (*is_dack || tcp_in_cong_recovery (tc));
 }
 
 void
@@ -478,6 +512,10 @@
       next = pool_elt_at_index (sb->holes, hole->next);
       next->prev = hole->prev;
     }
+  else
+    {
+      sb->tail = hole->prev;
+    }
 
   if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
     {
@@ -489,6 +527,9 @@
       sb->head = hole->next;
     }
 
+  if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
+    sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
+
   pool_put (sb->holes, hole);
 }
 
@@ -528,25 +569,130 @@
 }
 
 void
+scoreboard_update_lost (tcp_connection_t * tc, sack_scoreboard_t * sb)
+{
+  sack_scoreboard_hole_t *hole, *prev;
+  u32 bytes = 0, blks = 0;
+
+  sb->lost_bytes = 0;
+  hole = scoreboard_last_hole (sb);
+  if (!hole)
+    return;
+
+  if (seq_gt (sb->high_sacked, hole->end))
+    {
+      bytes = sb->high_sacked - hole->end;
+      blks = 1;
+    }
+
+  while ((prev = scoreboard_prev_hole (sb, hole))
+	 && (bytes < (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
+	     && blks < TCP_DUPACK_THRESHOLD))
+    {
+      bytes += hole->start - prev->end;
+      blks++;
+      hole = prev;
+    }
+
+  hole = prev;
+  while (hole)
+    {
+      sb->lost_bytes += scoreboard_hole_bytes (hole);
+      hole->is_lost = 1;
+      hole = scoreboard_prev_hole (sb, hole);
+    }
+}
+
+/**
+ * Figure out the next hole to retransmit
+ *
+ * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
+ */
+sack_scoreboard_hole_t *
+scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
+			  sack_scoreboard_hole_t * start,
+			  u8 have_sent_1_smss,
+			  u8 * can_rescue, u8 * snd_limited)
+{
+  sack_scoreboard_hole_t *hole = 0;
+
+  hole = start ? start : scoreboard_first_hole (sb);
+  while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
+    hole = scoreboard_next_hole (sb, hole);
+
+  /* Nothing, return */
+  if (!hole)
+    {
+      sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
+      return 0;
+    }
+
+  /* Rule (1): if higher than rxt, less than high_sacked and lost */
+  if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
+    {
+      sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
+    }
+  else
+    {
+      /* Rule (2): output takes care of transmitting new data */
+      if (!have_sent_1_smss)
+	{
+	  hole = 0;
+	  sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
+	}
+      /* Rule (3): if hole not lost */
+      else if (seq_lt (hole->start, sb->high_sacked))
+	{
+	  *snd_limited = 1;
+	  sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
+	}
+      /* Rule (4): if hole beyond high_sacked */
+      else
+	{
+	  ASSERT (seq_geq (hole->start, sb->high_sacked));
+	  *snd_limited = 1;
+	  *can_rescue = 1;
+	  /* HighRxt MUST NOT be updated */
+	  return 0;
+	}
+    }
+
+  if (hole && seq_lt (sb->high_rxt, hole->start))
+    sb->high_rxt = hole->start;
+
+  return hole;
+}
+
+void
+scoreboard_init_high_rxt (sack_scoreboard_t * sb)
+{
+  sack_scoreboard_hole_t *hole;
+  hole = scoreboard_first_hole (sb);
+  sb->high_rxt = hole->start;
+  sb->cur_rxt_hole = sb->head;
+}
+
+void
 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
 {
   sack_scoreboard_t *sb = &tc->sack_sb;
   sack_block_t *blk, tmp;
   sack_scoreboard_hole_t *hole, *next_hole, *last_hole, *new_hole;
-  u32 blk_index = 0, old_sacked_bytes, delivered_bytes, hole_index;
+  u32 blk_index = 0, old_sacked_bytes, hole_index;
   int i, j;
 
   sb->last_sacked_bytes = 0;
   sb->snd_una_adv = 0;
   old_sacked_bytes = sb->sacked_bytes;
-  delivered_bytes = 0;
+  sb->last_bytes_delivered = 0;
 
-  if (!tcp_opts_sack (&tc->opt) && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
+  if (!tcp_opts_sack (&tc->rcv_opts)
+      && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
     return;
 
   /* Remove invalid blocks */
-  blk = tc->opt.sacks;
-  while (blk < vec_end (tc->opt.sacks))
+  blk = tc->rcv_opts.sacks;
+  while (blk < vec_end (tc->rcv_opts.sacks))
     {
       if (seq_lt (blk->start, blk->end)
 	  && seq_gt (blk->start, tc->snd_una)
@@ -555,7 +701,7 @@
 	  blk++;
 	  continue;
 	}
-      vec_del1 (tc->opt.sacks, blk - tc->opt.sacks);
+      vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
     }
 
   /* Add block for cumulative ack */
@@ -563,20 +709,20 @@
     {
       tmp.start = tc->snd_una;
       tmp.end = ack;
-      vec_add1 (tc->opt.sacks, tmp);
+      vec_add1 (tc->rcv_opts.sacks, tmp);
     }
 
-  if (vec_len (tc->opt.sacks) == 0)
+  if (vec_len (tc->rcv_opts.sacks) == 0)
     return;
 
   /* Make sure blocks are ordered */
-  for (i = 0; i < vec_len (tc->opt.sacks); i++)
-    for (j = i + 1; j < vec_len (tc->opt.sacks); j++)
-      if (seq_lt (tc->opt.sacks[j].start, tc->opt.sacks[i].start))
+  for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++)
+    for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++)
+      if (seq_lt (tc->rcv_opts.sacks[j].start, tc->rcv_opts.sacks[i].start))
 	{
-	  tmp = tc->opt.sacks[i];
-	  tc->opt.sacks[i] = tc->opt.sacks[j];
-	  tc->opt.sacks[j] = tmp;
+	  tmp = tc->rcv_opts.sacks[i];
+	  tc->rcv_opts.sacks[i] = tc->rcv_opts.sacks[j];
+	  tc->rcv_opts.sacks[j] = tmp;
 	}
 
   if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
@@ -585,25 +731,25 @@
       last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
 					  tc->snd_una, tc->snd_una_max);
       sb->tail = scoreboard_hole_index (sb, last_hole);
-      tmp = tc->opt.sacks[vec_len (tc->opt.sacks) - 1];
-      sb->max_byte_sacked = tmp.end;
+      tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
+      sb->high_sacked = tmp.end;
     }
   else
     {
       /* If we have holes but snd_una_max is beyond the last hole, update
        * last hole end */
-      tmp = tc->opt.sacks[vec_len (tc->opt.sacks) - 1];
+      tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
       last_hole = scoreboard_last_hole (sb);
-      if (seq_gt (tc->snd_una_max, sb->max_byte_sacked)
+      if (seq_gt (tc->snd_una_max, sb->high_sacked)
 	  && seq_gt (tc->snd_una_max, last_hole->end))
 	last_hole->end = tc->snd_una_max;
     }
 
   /* Walk the holes with the SACK blocks */
   hole = pool_elt_at_index (sb->holes, sb->head);
-  while (hole && blk_index < vec_len (tc->opt.sacks))
+  while (hole && blk_index < vec_len (tc->rcv_opts.sacks))
     {
-      blk = &tc->opt.sacks[blk_index];
+      blk = &tc->rcv_opts.sacks[blk_index];
 
       if (seq_leq (blk->start, hole->start))
 	{
@@ -617,9 +763,9 @@
 		{
 		  /* Bytes lost because snd_wnd left edge advances */
 		  if (next_hole && seq_leq (next_hole->start, ack))
-		    delivered_bytes += next_hole->start - hole->end;
+		    sb->last_bytes_delivered += next_hole->start - hole->end;
 		  else
-		    delivered_bytes += ack - hole->end;
+		    sb->last_bytes_delivered += ack - hole->end;
 		}
 	      else
 		{
@@ -633,8 +779,8 @@
 		  last_hole = scoreboard_last_hole (sb);
 		  /* keep track of max byte sacked for when the last hole
 		   * is acked */
-		  if (seq_gt (hole->end, sb->max_byte_sacked))
-		    sb->max_byte_sacked = hole->end;
+		  if (seq_gt (hole->end, sb->high_sacked))
+		    sb->high_sacked = hole->end;
 		}
 
 	      /* snd_una needs to be advanced */
@@ -645,12 +791,12 @@
 		      sb->snd_una_adv = next_hole->start - ack;
 
 		      /* all these can be delivered */
-		      delivered_bytes += sb->snd_una_adv;
+		      sb->last_bytes_delivered += sb->snd_una_adv;
 		    }
 		  else if (!next_hole)
 		    {
-		      sb->snd_una_adv = sb->max_byte_sacked - ack;
-		      delivered_bytes += sb->snd_una_adv;
+		      sb->snd_una_adv = sb->high_sacked - ack;
+		      sb->last_bytes_delivered += sb->snd_una_adv;
 		    }
 		}
 
@@ -691,28 +837,33 @@
 		}
 
 	      blk_index++;
-	      hole = scoreboard_next_hole (sb, hole);
 	    }
-	  else
+	  else if (seq_leq (blk->start, hole->end))
 	    {
 	      sb->sacked_bytes += hole->end - blk->start;
 	      hole->end = blk->start;
-	      hole = scoreboard_next_hole (sb, hole);
 	    }
+
+	  hole = scoreboard_next_hole (sb, hole);
 	}
     }
 
   sb->last_sacked_bytes = sb->sacked_bytes - old_sacked_bytes;
-  sb->sacked_bytes -= delivered_bytes;
+  sb->sacked_bytes -= sb->last_bytes_delivered;
+  scoreboard_update_lost (tc, sb);
 }
 
-/** Update snd_wnd
+/**
+ * Try to update snd_wnd based on feedback received from peer.
  *
- * If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
- * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
+ * If successful, and new window is 'effectively' 0, activate persist
+ * timer.
+ */
 static void
 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
 {
+  /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
+   * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
   if (seq_lt (tc->snd_wl1, seq)
       || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
     {
@@ -721,138 +872,269 @@
       tc->snd_wl2 = ack;
       TCP_EVT_DBG (TCP_EVT_SND_WND, tc);
 
-      /* Set probe timer if we just got 0 wnd */
       if (tc->snd_wnd < tc->snd_mss)
 	{
-	  if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST))
+	  /* Set persist timer if not set and we just got 0 wnd */
+	  if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
+	      && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
 	    tcp_persist_timer_set (tc);
 	}
       else
-	tcp_persist_timer_reset (tc);
+	{
+	  tcp_persist_timer_reset (tc);
+	  if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
+	    {
+	      tc->rto_boff = 0;
+	      tcp_update_rto (tc);
+	    }
+	}
     }
 }
 
 void
-tcp_cc_congestion (tcp_connection_t * tc)
+tcp_cc_init_congestion (tcp_connection_t * tc)
 {
-  tc->snd_congestion = tc->snd_nxt;
+  tcp_fastrecovery_on (tc);
+  tc->snd_congestion = tc->snd_una_max;
   tc->cc_algo->congestion (tc);
   TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4);
 }
 
-void
-tcp_cc_recover (tcp_connection_t * tc)
+static void
+tcp_cc_recovery_exit (tcp_connection_t * tc)
 {
-  /* TODO: check if time to recover was small. It might be that RTO popped
-   * too soon.
-   */
+  /* Deflate rto */
+  tcp_update_rto (tc);
+  tc->rto_boff = 0;
+  tc->snd_rxt_ts = 0;
+  tcp_recovery_off (tc);
+}
 
+void
+tcp_cc_fastrecovery_exit (tcp_connection_t * tc)
+{
   tc->cc_algo->recovered (tc);
-
-  tc->rtx_bytes = 0;
+  tc->snd_rxt_bytes = 0;
   tc->rcv_dupacks = 0;
-  tc->snd_nxt = tc->snd_una;
-
-  tc->cc_algo->rcv_ack (tc);
-  tc->tsecr_last_ack = tc->opt.tsecr;
-
-  tcp_cong_recovery_off (tc);
-
-  TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
+  tcp_fastrecovery_off (tc);
+  tcp_fastrecovery_1_smss_off (tc);
 }
 
 static void
-tcp_cc_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b)
+tcp_cc_congestion_undo (tcp_connection_t * tc)
 {
-  u8 partial_ack;
-  u32 bytes_advanced;
+  tc->cwnd = tc->prev_cwnd;
+  tc->ssthresh = tc->prev_ssthresh;
+  tc->snd_nxt = tc->snd_una_max;
+  tc->rcv_dupacks = 0;
+  if (tcp_in_recovery (tc))
+    tcp_cc_recovery_exit (tc);
+  ASSERT (tc->rto_boff == 0);
+  /* TODO extend for fastrecovery */
+}
 
-  if (tcp_in_fastrecovery (tc))
+static u8
+tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
+{
+  return (tc->snd_rxt_ts
+	  && tcp_opts_tstamp (&tc->rcv_opts)
+	  && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
+}
+
+int
+tcp_cc_recover (tcp_connection_t * tc)
+{
+  ASSERT (tcp_in_cong_recovery (tc));
+  if (tcp_cc_is_spurious_retransmit (tc))
     {
-      partial_ack = seq_lt (tc->snd_una, tc->snd_congestion);
-      if (!partial_ack)
+      tcp_cc_congestion_undo (tc);
+      return 1;
+    }
+
+  if (tcp_in_recovery (tc))
+    tcp_cc_recovery_exit (tc);
+  else if (tcp_in_fastrecovery (tc))
+    tcp_cc_fastrecovery_exit (tc);
+
+  ASSERT (tc->rto_boff == 0);
+  ASSERT (!tcp_in_cong_recovery (tc));
+
+  TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
+  return 0;
+}
+
+static void
+tcp_cc_update (tcp_connection_t * tc, vlib_buffer_t * b)
+{
+  ASSERT (!tcp_in_cong_recovery (tc));
+
+  /* Congestion avoidance */
+  tc->cc_algo->rcv_ack (tc);
+  tc->tsecr_last_ack = tc->rcv_opts.tsecr;
+
+  /* If a cumulative ack, make sure dupacks is 0 */
+  tc->rcv_dupacks = 0;
+
+  /* When dupacks hits the threshold we only enter fast retransmit if
+   * cumulative ack covers more than snd_congestion. Should snd_una
+   * wrap this test may fail under otherwise valid circumstances.
+   * Therefore, proactively update snd_congestion when wrap detected. */
+  if (PREDICT_FALSE
+      (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
+       && seq_gt (tc->snd_congestion, tc->snd_una)))
+    tc->snd_congestion = tc->snd_una - 1;
+}
+
+static u8
+tcp_should_fastrecover_sack (tcp_connection_t * tc)
+{
+  return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes;
+}
+
+static u8
+tcp_should_fastrecover (tcp_connection_t * tc)
+{
+  return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD
+	  || tcp_should_fastrecover_sack (tc));
+}
+
+static void
+tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)
+{
+  /*
+   * Duplicate ACK. Check if we should enter fast recovery, or if already in
+   * it account for the bytes that left the network.
+   */
+  if (is_dack)
+    {
+      ASSERT (tc->snd_una != tc->snd_una_max
+	      || tc->sack_sb.last_sacked_bytes);
+      tc->rcv_dupacks++;
+
+      if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked)
 	{
-	  /* Clear retransmitted bytes. */
-	  tcp_cc_recover (tc);
+	  ASSERT (tcp_in_fastrecovery (tc));
+	  /* Pure duplicate ack. If some data got acked, it's handled lower */
+	  tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
+	  return;
 	}
-      else
+      else if (tcp_should_fastrecover (tc))
 	{
-	  TCP_EVT_DBG (TCP_EVT_CC_PACK, tc);
-
-	  /* Clear retransmitted bytes. XXX should we clear all? */
-	  tc->rtx_bytes = 0;
-
-	  tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK);
-
-	  /* In case snd_nxt is still in the past and output tries to
-	   * shove some new bytes */
-	  tc->snd_nxt = tc->snd_una_max;
-
-	  /* XXX need proper RFC6675 support */
-	  if (tc->sack_sb.last_sacked_bytes && !tcp_in_recovery (tc))
+	  /* Things are already bad */
+	  if (tcp_in_cong_recovery (tc))
 	    {
-	      tcp_fast_retransmit (tc);
+	      tc->rcv_dupacks = 0;
+	      goto partial_ack_test;
+	    }
+
+	  /* If of of the two conditions lower hold, reset dupacks
+	   * 1) Cumulative ack does not cover more than congestion threshold
+	   * 2) RFC6582 heuristic to avoid multiple fast retransmits
+	   */
+	  if (seq_leq (tc->snd_una, tc->snd_congestion)
+	      || tc->rcv_opts.tsecr != tc->tsecr_last_ack)
+	    {
+	      tc->rcv_dupacks = 0;
+	      return;
+	    }
+
+	  tcp_cc_init_congestion (tc);
+	  tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
+
+	  /* The first segment MUST be retransmitted */
+	  tcp_retransmit_first_unacked (tc);
+
+	  /* Post retransmit update cwnd to ssthresh and account for the
+	   * three segments that have left the network and should've been
+	   * buffered at the receiver XXX */
+	  tc->cwnd = tc->ssthresh + tc->rcv_dupacks * tc->snd_mss;
+
+	  /* If cwnd allows, send more data */
+	  if (tcp_opts_sack_permitted (&tc->rcv_opts)
+	      && scoreboard_first_hole (&tc->sack_sb))
+	    {
+	      scoreboard_init_high_rxt (&tc->sack_sb);
+	      tcp_fast_retransmit_sack (tc);
 	    }
 	  else
 	    {
-	      /* Retransmit first unacked segment */
-	      tcp_retransmit_first_unacked (tc);
+	      tcp_fast_retransmit_no_sack (tc);
 	    }
+
+	  return;
 	}
+      else if (!tc->bytes_acked
+	       || (tc->bytes_acked && !tcp_in_cong_recovery (tc)))
+	{
+	  tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
+	  return;
+	}
+      else
+	goto partial_ack;
+    }
+
+partial_ack_test:
+
+  if (!tc->bytes_acked)
+    return;
+
+partial_ack:
+  /*
+   * Legitimate ACK. 1) See if we can exit recovery
+   */
+  /* XXX limit this only to first partial ack? */
+  tcp_retransmit_timer_update (tc);
+
+  if (seq_geq (tc->snd_una, tc->snd_congestion))
+    {
+      /* If spurious return, we've already updated everything */
+      if (tcp_cc_recover (tc))
+	return;
+
+      tc->snd_nxt = tc->snd_una_max;
+
+      /* Treat as congestion avoidance ack */
+      tc->cc_algo->rcv_ack (tc);
+      tc->tsecr_last_ack = tc->rcv_opts.tsecr;
+      return;
+    }
+
+  /*
+   * Legitimate ACK. 2) If PARTIAL ACK try to retransmit
+   */
+  TCP_EVT_DBG (TCP_EVT_CC_PACK, tc);
+
+  /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
+   * reset dupacks to 0 */
+  tc->rcv_dupacks = 0;
+
+  tcp_retransmit_first_unacked (tc);
+
+  /* Post RTO timeout don't try anything fancy */
+  if (tcp_in_recovery (tc))
+    return;
+
+  /* Remove retransmitted bytes that have been delivered */
+  if (tc->sack_sb.last_bytes_delivered
+      && seq_gt (tc->sack_sb.high_rxt, tc->snd_una))
+    {
+      /* If we have sacks and we haven't gotten an ack beyond high_rxt,
+       * remove sacked bytes delivered */
+      tc->snd_rxt_bytes -= tc->sack_sb.last_bytes_delivered;
     }
   else
     {
-      tc->cc_algo->rcv_ack (tc);
-      tc->tsecr_last_ack = tc->opt.tsecr;
-      tc->rcv_dupacks = 0;
-      if (tcp_in_recovery (tc))
-	{
-	  bytes_advanced = tc->bytes_acked + tc->sack_sb.snd_una_adv;
-	  tc->rtx_bytes -= clib_min (bytes_advanced, tc->rtx_bytes);
-	  tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
-	  if (seq_geq (tc->snd_una, tc->snd_congestion))
-	    {
-	      tc->rtx_bytes = 0;
-	      tcp_recovery_off (tc);
-	    }
-	}
+      /* Either all retransmitted holes have been acked, or we're
+       * "in the blind" and retransmitting segment by segment */
+      tc->snd_rxt_bytes = 0;
     }
-}
 
-static void
-tcp_cc_rcv_dupack (tcp_connection_t * tc, u32 ack)
-{
-//  ASSERT (seq_geq(tc->snd_una, ack));
+  tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK);
 
-  tc->rcv_dupacks++;
-  if (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
-    {
-      /* RFC6582 NewReno heuristic to avoid multiple fast retransmits */
-      if (tc->opt.tsecr != tc->tsecr_last_ack)
-	{
-	  tc->rcv_dupacks = 0;
-	  return;
-	}
-
-      tcp_fastrecovery_on (tc);
-
-      /* Handle congestion and dupack */
-      tcp_cc_congestion (tc);
-      tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
-
-      tcp_fast_retransmit (tc);
-
-      /* Post retransmit update cwnd to ssthresh and account for the
-       * three segments that have left the network and should've been
-       * buffered at the receiver */
-      tc->cwnd = tc->ssthresh + TCP_DUPACK_THRESHOLD * tc->snd_mss;
-    }
-  else if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD)
-    {
-      ASSERT (tcp_in_fastrecovery (tc));
-
-      tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
-    }
+  /*
+   * Since this was a partial ack, try to retransmit some more data
+   */
+  tcp_fast_retransmit (tc);
 }
 
 void
@@ -862,14 +1144,18 @@
   tc->cc_algo->init (tc);
 }
 
+/**
+ * Process incoming ACK
+ */
 static int
 tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b,
 	     tcp_header_t * th, u32 * next, u32 * error)
 {
-  u32 new_snd_wnd;
+  u32 prev_snd_wnd, prev_snd_una;
+  u8 is_dack;
 
   /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
-  if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt))
+  if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
     {
       /* If we have outstanding data and this is within the window, accept it,
        * probably retransmit has timed out. Otherwise ACK segment and then
@@ -892,7 +1178,7 @@
     }
 
   /* If old ACK, probably it's an old dupack */
-  if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
+  if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
     {
       *error = TCP_ERROR_ACK_OLD;
       TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1,
@@ -900,53 +1186,49 @@
       if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
 	{
 	  TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc);
-	  tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number);
+	  tcp_cc_handle_event (tc, 1);
 	}
       /* Don't drop yet */
       return 0;
     }
 
-  if (tcp_opts_sack_permitted (&tc->opt))
+  /*
+   * Looks okay, process feedback
+   */
+
+  TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc);
+
+  if (tcp_opts_sack_permitted (&tc->rcv_opts))
     tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
 
-  new_snd_wnd = clib_net_to_host_u16 (th->window) << tc->snd_wscale;
+  prev_snd_wnd = tc->snd_wnd;
+  prev_snd_una = tc->snd_una;
+  tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
+		      vnet_buffer (b)->tcp.ack_number,
+		      clib_net_to_host_u16 (th->window) << tc->snd_wscale);
+  tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
+  tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
+  tcp_validate_txf_size (tc, tc->bytes_acked);
 
-  if (tcp_ack_is_dupack (tc, b, new_snd_wnd))
+  if (tc->bytes_acked)
+    tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number);
+
+  /*
+   * Check if we have congestion event
+   */
+
+  if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
     {
-      TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
-      tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number);
+      tcp_cc_handle_event (tc, is_dack);
       *error = TCP_ERROR_ACK_DUP;
-      return -1;
+      TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
+      return vnet_buffer (b)->tcp.data_len ? 0 : -1;
     }
 
   /*
-   * Valid ACK
+   * Update congestion control (slow start/congestion avoidance)
    */
-
-  tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
-  tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
-
-  /* Dequeue ACKed data and update RTT */
-  tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number);
-  tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
-		      vnet_buffer (b)->tcp.ack_number, new_snd_wnd);
-
-  /* If some of our sent bytes have been acked, update cc and retransmit
-   * timer. */
-  if (tc->bytes_acked)
-    {
-      TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc);
-
-      /* Updates congestion control (slow start/congestion avoidance) */
-      tcp_cc_rcv_ack (tc, b);
-
-      /* If everything has been acked, stop retransmit timer
-       * otherwise update. */
-      if (tc->snd_una == tc->snd_una_max)
-	tcp_retransmit_timer_reset (tc);
-      else
-	tcp_retransmit_timer_update (tc);
-    }
+  tcp_cc_update (tc, b);
 
   return 0;
 }
@@ -1059,7 +1341,7 @@
     }
 
   /* Update SACK list if need be */
-  if (tcp_opts_sack_permitted (&tc->opt))
+  if (tcp_opts_sack_permitted (&tc->rcv_opts))
     {
       /* Remove SACK blocks that have been delivered */
       tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
@@ -1097,7 +1379,7 @@
   TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len);
 
   /* Update SACK list if in use */
-  if (tcp_opts_sack_permitted (&tc->opt))
+  if (tcp_opts_sack_permitted (&tc->rcv_opts))
     {
       ooo_segment_t *newest;
       u32 start, end;
@@ -1294,7 +1576,6 @@
       u32 n_left_to_next;
 
       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
       while (n_left_from > 0 && n_left_to_next > 0)
 	{
 	  u32 bi0;
@@ -1321,7 +1602,6 @@
 	    }
 
 	  th0 = tcp_buffer_hdr (b0);
-
 	  is_fin = (th0->flags & TCP_FLAG_FIN) != 0;
 
 	  /* SYNs, FINs and data consume sequence numbers */
@@ -1387,7 +1667,6 @@
 
   errors = session_manager_flush_enqueue_events (my_thread_index);
   tcp_established_inc_counter (vm, is_ip4, TCP_ERROR_EVENT_FIFO_FULL, errors);
-
   return from_frame->n_vectors;
 }
 
@@ -1582,17 +1861,17 @@
 	  new_tc0->irs = seq0;
 
 	  /* Parse options */
-	  if (tcp_options_parse (tcp0, &new_tc0->opt))
+	  if (tcp_options_parse (tcp0, &new_tc0->rcv_opts))
 	    goto drop;
 
-	  if (tcp_opts_tstamp (&new_tc0->opt))
+	  if (tcp_opts_tstamp (&new_tc0->rcv_opts))
 	    {
-	      new_tc0->tsval_recent = new_tc0->opt.tsval;
+	      new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
 	      new_tc0->tsval_recent_age = tcp_time_now ();
 	    }
 
-	  if (tcp_opts_wscale (&new_tc0->opt))
-	    new_tc0->snd_wscale = new_tc0->opt.wscale;
+	  if (tcp_opts_wscale (&new_tc0->rcv_opts))
+	    new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
 
 	  /* No scaling */
 	  new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window);
@@ -1845,7 +2124,7 @@
 	      /* Initialize session variables */
 	      tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
 	      tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
-		<< tc0->opt.wscale;
+		<< tc0->rcv_opts.wscale;
 	      tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
 	      tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
 
@@ -1903,13 +2182,21 @@
 
 	      break;
 	    case TCP_STATE_LAST_ACK:
-	      /* The only thing that can arrive in this state is an
+	      /* The only thing that [should] arrive in this state is an
 	       * acknowledgment of our FIN. If our FIN is now acknowledged,
 	       * delete the TCB, enter the CLOSED state, and return. */
 
 	      if (!tcp_rcv_ack_is_acceptable (tc0, b0))
 		goto drop;
 
+	      /* Apparently our FIN was lost */
+	      if (tcp_fin (tcp0))
+		{
+		  /* Don't "make" fin since that increments snd_nxt */
+		  tcp_send_fin (tc0);
+		  goto drop;
+		}
+
 	      tc0->state = TCP_STATE_CLOSED;
 
 	      /* Don't delete the connection/session yet. Instead, wait a
@@ -1929,8 +2216,15 @@
 	       * retransmission of the remote FIN. Acknowledge it, and restart
 	       * the 2 MSL timeout. */
 
-	      /* TODO */
+	      if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
+		goto drop;
+
+	      tcp_make_ack (tc0, b0);
+	      tcp_timer_reset (tc0, TCP_TIMER_WAITCLOSE);
+	      tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
+
 	      goto drop;
+
 	      break;
 	    default:
 	      ASSERT (0);
@@ -2194,7 +2488,7 @@
 	      goto drop;
 	    }
 
-	  if (tcp_options_parse (th0, &child0->opt))
+	  if (tcp_options_parse (th0, &child0->rcv_opts))
 	    {
 	      goto drop;
 	    }
@@ -2205,14 +2499,14 @@
 
 	  /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
 	   * segments are used to initialize PAWS. */
-	  if (tcp_opts_tstamp (&child0->opt))
+	  if (tcp_opts_tstamp (&child0->rcv_opts))
 	    {
-	      child0->tsval_recent = child0->opt.tsval;
+	      child0->tsval_recent = child0->rcv_opts.tsval;
 	      child0->tsval_recent_age = tcp_time_now ();
 	    }
 
-	  if (tcp_opts_wscale (&child0->opt))
-	    child0->snd_wscale = child0->opt.wscale;
+	  if (tcp_opts_wscale (&child0->rcv_opts))
+	    child0->snd_wscale = child0->rcv_opts.wscale;
 
 	  /* No scaling */
 	  child0->snd_wnd = clib_net_to_host_u16 (th0->window);
@@ -2477,7 +2771,6 @@
 		vlib_add_trace (vm, node, b0, sizeof (*t0));
 	      tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
 	    }
-
 	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
 					   n_left_to_next, bi0, next0);
 	}
@@ -2600,7 +2893,13 @@
   _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
     TCP_ERROR_NONE);
   _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
+  _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
+  _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
+    TCP_ERROR_NONE);
   _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
+  _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
+  _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
+    TCP_ERROR_NONE);
   _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
   _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
 #undef _
diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c
index 3525f4e..c66250e 100644
--- a/src/vnet/tcp/tcp_newreno.c
+++ b/src/vnet/tcp/tcp_newreno.c
@@ -51,9 +51,23 @@
     }
   else if (ack_type == TCP_CC_PARTIALACK)
     {
-      tc->cwnd -= tc->bytes_acked;
-      if (tc->bytes_acked > tc->snd_mss)
-	tc->bytes_acked += tc->snd_mss;
+      /* RFC 6582 Sec. 3.2 */
+      if (!tcp_opts_sack_permitted (&tc->rcv_opts))
+	{
+	  /* Deflate the congestion window by the amount of new data
+	   * acknowledged by the Cumulative Acknowledgment field.
+	   * If the partial ACK acknowledges at least one SMSS of new data,
+	   * then add back SMSS bytes to the congestion window. This
+	   * artificially inflates the congestion window in order to reflect
+	   * the additional segment that has left the network. This "partial
+	   * window deflation" attempts to ensure that, when fast recovery
+	   * eventually ends, approximately ssthresh amount of data will be
+	   * outstanding in the network.*/
+	  tc->cwnd = (tc->cwnd > tc->bytes_acked) ?
+	    tc->cwnd - tc->bytes_acked : 0;
+	  if (tc->bytes_acked > tc->snd_mss)
+	    tc->cwnd += tc->snd_mss;
+	}
     }
 }
 
diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c
index 49fd6be..47c94e6 100644
--- a/src/vnet/tcp/tcp_output.c
+++ b/src/vnet/tcp/tcp_output.c
@@ -136,10 +136,10 @@
    * Figure out how much space we have available
    */
   available_space = stream_session_max_rx_enqueue (&tc->connection);
-  max_fifo = stream_session_fifo_size (&tc->connection);
+  max_fifo = stream_session_rx_fifo_size (&tc->connection);
 
-  ASSERT (tc->opt.mss < max_fifo);
-  if (available_space < tc->opt.mss && available_space < max_fifo >> 3)
+  ASSERT (tc->rcv_opts.mss < max_fifo);
+  if (available_space < tc->rcv_opts.mss && available_space < max_fifo >> 3)
     available_space = 0;
 
   /*
@@ -276,8 +276,11 @@
   opts->tsecr = 0;
   len += TCP_OPTION_LEN_TIMESTAMP;
 
-  opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
-  len += TCP_OPTION_LEN_SACK_PERMITTED;
+  if (TCP_USE_SACKS)
+    {
+      opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
+      len += TCP_OPTION_LEN_SACK_PERMITTED;
+    }
 
   /* Align to needed boundary */
   len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
@@ -293,14 +296,14 @@
   opts->mss = tc->mss;
   len += TCP_OPTION_LEN_MSS;
 
-  if (tcp_opts_wscale (&tc->opt))
+  if (tcp_opts_wscale (&tc->rcv_opts))
     {
       opts->flags |= TCP_OPTS_FLAG_WSCALE;
       opts->wscale = tc->rcv_wscale;
       len += TCP_OPTION_LEN_WINDOW_SCALE;
     }
 
-  if (tcp_opts_tstamp (&tc->opt))
+  if (tcp_opts_tstamp (&tc->rcv_opts))
     {
       opts->flags |= TCP_OPTS_FLAG_TSTAMP;
       opts->tsval = tcp_time_now ();
@@ -308,7 +311,7 @@
       len += TCP_OPTION_LEN_TIMESTAMP;
     }
 
-  if (tcp_opts_sack_permitted (&tc->opt))
+  if (tcp_opts_sack_permitted (&tc->rcv_opts))
     {
       opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
       len += TCP_OPTION_LEN_SACK_PERMITTED;
@@ -326,14 +329,14 @@
 
   opts->flags = 0;
 
-  if (tcp_opts_tstamp (&tc->opt))
+  if (tcp_opts_tstamp (&tc->rcv_opts))
     {
       opts->flags |= TCP_OPTS_FLAG_TSTAMP;
       opts->tsval = tcp_time_now ();
       opts->tsecr = tc->tsval_recent;
       len += TCP_OPTION_LEN_TIMESTAMP;
     }
-  if (tcp_opts_sack_permitted (&tc->opt))
+  if (tcp_opts_sack_permitted (&tc->rcv_opts))
     {
       if (vec_len (tc->snd_sacks))
 	{
@@ -395,7 +398,7 @@
     tcp_make_options (tc, &tc->snd_opts, TCP_STATE_ESTABLISHED);
 
   /* XXX check if MTU has been updated */
-  tc->snd_mss = clib_min (tc->mss, tc->opt.mss) - tc->snd_opts_len;
+  tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
   ASSERT (tc->snd_mss > 0);
 }
 
@@ -406,21 +409,21 @@
   tcp_update_rcv_mss (tc);
 
   /* TODO cache mss and consider PMTU discovery */
-  tc->snd_mss = clib_min (tc->opt.mss, tc->mss);
+  tc->snd_mss = clib_min (tc->rcv_opts.mss, tc->mss);
 
   if (tc->snd_mss < 45)
     {
       clib_warning ("snd mss is 0");
       /* Assume that at least the min default mss works */
       tc->snd_mss = default_min_mss;
-      tc->opt.mss = default_min_mss;
+      tc->rcv_opts.mss = default_min_mss;
     }
 
   /* We should have enough space for 40 bytes of options */
   ASSERT (tc->snd_mss > 45);
 
   /* If we use timestamp option, account for it */
-  if (tcp_opts_tstamp (&tc->opt))
+  if (tcp_opts_tstamp (&tc->rcv_opts))
     tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP;
 }
 
@@ -879,6 +882,7 @@
   tcp_make_fin (tc, b);
   tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
   tc->flags |= TCP_CONN_FINSNT;
+  tcp_retransmit_timer_force_update (tc);
   TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc);
 }
 
@@ -919,10 +923,7 @@
   if (compute_opts)
     tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
 
-  /* Write pre-computed options */
   tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
-
-  /* Get rcv window to advertise */
   advertise_wnd = tcp_window_to_advertise (tc, next_state);
   flags = tcp_make_state_flags (next_state);
 
@@ -930,26 +931,25 @@
   th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
 			     tc->rcv_nxt, tcp_hdr_opts_len, flags,
 			     advertise_wnd);
-
   opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
 
   ASSERT (opts_write_len == tc->snd_opts_len);
-
-  /* Tag the buffer with the connection index  */
   vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
 
+  /*
+   * Update connection variables
+   */
+
   tc->snd_nxt += data_len;
   tc->rcv_las = tc->rcv_nxt;
 
   /* TODO this is updated in output as well ... */
-  if (tc->snd_nxt > tc->snd_una_max)
-    tc->snd_una_max = tc->snd_nxt;
-
-  if (tc->rtt_ts == 0)
+  if (seq_gt (tc->snd_nxt, tc->snd_una_max))
     {
-      tc->rtt_ts = tcp_time_now ();
-      tc->rtt_seq = tc->snd_nxt;
+      tc->snd_una_max = tc->snd_nxt;
+      tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
     }
+
   TCP_EVT_DBG (TCP_EVT_PKTIZE, tc);
 }
 
@@ -987,13 +987,14 @@
  *
  * @return the number of bytes in the segment or 0 if there's nothing to
  *         retransmit
- * */
+ */
 u32
 tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b,
 				u32 offset, u32 max_bytes)
 {
   vlib_main_t *vm = vlib_get_main ();
-  u32 n_bytes = 0;
+  int n_bytes = 0;
+  u32 start;
 
   tcp_reuse_buffer (vm, b);
 
@@ -1001,15 +1002,16 @@
   ASSERT (max_bytes != 0);
 
   max_bytes = clib_min (tc->snd_mss, max_bytes);
+  start = tc->snd_una + offset;
 
   /* Start is beyond snd_congestion */
-  if (seq_geq (tc->snd_una + offset, tc->snd_congestion))
+  if (seq_geq (start, tc->snd_congestion))
     goto done;
 
   /* Don't overshoot snd_congestion */
-  if (seq_gt (tc->snd_nxt + max_bytes, tc->snd_congestion))
+  if (seq_gt (start + max_bytes, tc->snd_congestion))
     {
-      max_bytes = tc->snd_congestion - tc->snd_nxt;
+      max_bytes = tc->snd_congestion - start;
       if (max_bytes == 0)
 	goto done;
     }
@@ -1021,15 +1023,12 @@
   n_bytes = stream_session_peek_bytes (&tc->connection,
 				       vlib_buffer_get_current (b), offset,
 				       max_bytes);
-  ASSERT (n_bytes != 0);
+  ASSERT (n_bytes > 0);
   b->current_length = n_bytes;
   tcp_push_hdr_i (tc, b, tc->state, 0);
 
-  /* Don't count multiple retransmits of the same segment */
-  if (tc->rto_boff > 1)
-    goto done;
-
-  tc->rtx_bytes += n_bytes;
+  if (tcp_in_fastrecovery (tc))
+    tc->snd_rxt_bytes += n_bytes;
 
 done:
   TCP_EVT_DBG (TCP_EVT_CC_RTX, tc, offset, n_bytes);
@@ -1042,18 +1041,15 @@
 static void
 tcp_rtx_timeout_cc (tcp_connection_t * tc)
 {
+  tc->prev_ssthresh = tc->ssthresh;
+  tc->prev_cwnd = tc->cwnd;
+
   /* Cleanly recover cc (also clears up fast retransmit) */
   if (tcp_in_fastrecovery (tc))
-    {
-      tcp_cc_recover (tc);
-    }
-  else
-    {
-      tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss);
-    }
+    tcp_cc_fastrecovery_exit (tc);
 
   /* Start again from the beginning */
-
+  tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss);
   tc->cwnd = tcp_loss_wnd (tc);
   tc->snd_congestion = tc->snd_una_max;
   tcp_recovery_on (tc);
@@ -1081,18 +1077,31 @@
   /* Make sure timer handle is set to invalid */
   tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
 
+  if (!tcp_in_recovery (tc) && tc->rto_boff > 0
+      && tc->state >= TCP_STATE_ESTABLISHED)
+    {
+      tc->rto_boff = 0;
+      tcp_update_rto (tc);
+    }
+
   /* Increment RTO backoff (also equal to number of retries) */
   tc->rto_boff += 1;
 
   /* Go back to first un-acked byte */
   tc->snd_nxt = tc->snd_una;
 
-  /* Get buffer */
   tcp_get_free_buffer_index (tm, &bi);
   b = vlib_get_buffer (vm, bi);
 
   if (tc->state >= TCP_STATE_ESTABLISHED)
     {
+      /* Lost FIN, retransmit and return */
+      if (tc->flags & TCP_CONN_FINSNT)
+	{
+	  tcp_send_fin (tc);
+	  return;
+	}
+
       /* First retransmit timeout */
       if (tc->rto_boff == 1)
 	tcp_rtx_timeout_cc (tc);
@@ -1102,24 +1111,30 @@
 
       TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1);
 
-      /* Send one segment. No fancy recovery for now! */
+      /* Send one segment */
       n_bytes = tcp_prepare_retransmit_segment (tc, b, 0, tc->snd_mss);
+      /* TODO be less aggressive about this */
       scoreboard_clear (&tc->sack_sb);
 
       if (n_bytes == 0)
 	{
 	  clib_warning ("could not retransmit anything");
+	  clib_warning ("%U", format_tcp_connection, tc, 2);
+
 	  /* Try again eventually */
 	  tcp_retransmit_timer_set (tc);
+	  ASSERT (0 || (tc->rto_boff > 1
+			&& tc->snd_una == tc->snd_congestion));
 	  return;
 	}
-    }
-  else
-    {
-      /* Retransmit for SYN/SYNACK */
-      ASSERT (tc->state == TCP_STATE_SYN_RCVD
-	      || tc->state == TCP_STATE_SYN_SENT);
 
+      /* For first retransmit, record timestamp (Eifel detection RFC3522) */
+      if (tc->rto_boff == 1)
+	tc->snd_rxt_ts = tcp_time_now ();
+    }
+  /* Retransmit for SYN/SYNACK */
+  else if (tc->state == TCP_STATE_SYN_RCVD || tc->state == TCP_STATE_SYN_SENT)
+    {
       /* Try without increasing RTO a number of times. If this fails,
        * start growing RTO exponentially */
       if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
@@ -1132,6 +1147,12 @@
       /* Account for the SYN */
       tc->snd_nxt += 1;
     }
+  else
+    {
+      ASSERT (tc->state == TCP_STATE_CLOSED);
+      clib_warning ("connection closed ...");
+      return;
+    }
 
   if (!is_syn)
     {
@@ -1180,7 +1201,8 @@
   u32 thread_index = vlib_get_thread_index ();
   tcp_connection_t *tc;
   vlib_buffer_t *b;
-  u32 bi, n_bytes;
+  u32 bi, old_snd_nxt;
+  int n_bytes = 0;
 
   tc = tcp_connection_get_if_valid (index, thread_index);
 
@@ -1202,13 +1224,15 @@
   /* Try to force the first unsent segment  */
   tcp_get_free_buffer_index (tm, &bi);
   b = vlib_get_buffer (vm, bi);
+
+  tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
   tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
   n_bytes = stream_session_peek_bytes (&tc->connection,
 				       vlib_buffer_get_current (b),
 				       tc->snd_una_max - tc->snd_una,
 				       tc->snd_mss);
   /* Nothing to send */
-  if (n_bytes == 0)
+  if (n_bytes <= 0)
     {
       clib_warning ("persist found nothing to send");
       tcp_return_buffer (tm);
@@ -1216,7 +1240,13 @@
     }
 
   b->current_length = n_bytes;
+  ASSERT (tc->snd_nxt == tc->snd_una_max || tc->rto_boff > 1
+	  || tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT));
+
+  /* Allow updating of snd_una_max but don't update snd_nxt */
+  old_snd_nxt = tc->snd_nxt;
   tcp_push_hdr_i (tc, b, tc->state, 0);
+  tc->snd_nxt = old_snd_nxt;
   tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
 
   /* Re-enable persist timer */
@@ -1232,8 +1262,9 @@
   tcp_main_t *tm = vnet_get_tcp_main ();
   vlib_main_t *vm = vlib_get_main ();
   vlib_buffer_t *b;
-  u32 bi, n_bytes;
+  u32 bi, n_bytes, old_snd_nxt;
 
+  old_snd_nxt = tc->snd_nxt;
   tc->snd_nxt = tc->snd_una;
 
   /* Get buffer */
@@ -1244,75 +1275,117 @@
 
   n_bytes = tcp_prepare_retransmit_segment (tc, b, 0, tc->snd_mss);
   if (n_bytes == 0)
-    goto done;
+    {
+      tcp_return_buffer (tm);
+      goto done;
+    }
 
   tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
 
 done:
-  tc->snd_nxt = tc->snd_una_max;
-}
-
-sack_scoreboard_hole_t *
-scoreboard_first_rtx_hole (sack_scoreboard_t * sb)
-{
-  sack_scoreboard_hole_t *hole = 0;
-
-//  hole = scoreboard_first_hole (&tc->sack_sb);
-//  if (hole)
-//    {
-//
-//      offset = hole->start - tc->snd_una;
-//      hole_size = hole->end - hole->start;
-//
-//      ASSERT(hole_size);
-//
-//      if (hole_size < max_bytes)
-//      max_bytes = hole_size;
-//    }
-  return hole;
+  tc->snd_nxt = old_snd_nxt;
 }
 
 /**
- * Do fast retransmit.
+ * Do fast retransmit with SACKs
  */
 void
-tcp_fast_retransmit (tcp_connection_t * tc)
+tcp_fast_retransmit_sack (tcp_connection_t * tc)
 {
   tcp_main_t *tm = vnet_get_tcp_main ();
   vlib_main_t *vm = vlib_get_main ();
-  u32 bi;
-  int snd_space;
-  u32 n_written = 0, offset = 0;
+  u32 n_written = 0, offset = 0, max_bytes;
   vlib_buffer_t *b;
-  u8 use_sacks = 0;
+  sack_scoreboard_hole_t *hole;
+  sack_scoreboard_t *sb;
+  u32 bi, old_snd_nxt;
+  int snd_space;
+  u8 snd_limited = 0, can_rescue = 0;
 
   ASSERT (tcp_in_fastrecovery (tc));
-
-  /* Start resending from first un-acked segment */
-  tc->snd_nxt = tc->snd_una;
-
-  snd_space = tcp_available_snd_space (tc);
   TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0);
 
-  /* If we have SACKs use them */
-  if (tcp_opts_sack_permitted (&tc->opt)
-      && scoreboard_first_hole (&tc->sack_sb))
-    use_sacks = 0;
+  old_snd_nxt = tc->snd_nxt;
+  sb = &tc->sack_sb;
+  snd_space = tcp_available_snd_space (tc);
+
+  hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
+  while (hole && snd_space > 0)
+    {
+      tcp_get_free_buffer_index (tm, &bi);
+      b = vlib_get_buffer (vm, bi);
+
+      hole = scoreboard_next_rxt_hole (sb, hole,
+				       tcp_fastrecovery_sent_1_smss (tc),
+				       &can_rescue, &snd_limited);
+      if (!hole)
+	{
+	  if (!can_rescue || !(seq_lt (sb->rescue_rxt, tc->snd_una)
+			       || seq_gt (sb->rescue_rxt,
+					  tc->snd_congestion)))
+	    break;
+
+	  /* If rescue rxt undefined or less than snd_una then one segment of
+	   * up to SMSS octets that MUST include the highest outstanding
+	   * unSACKed sequence number SHOULD be returned, and RescueRxt set to
+	   * RecoveryPoint. HighRxt MUST NOT be updated.
+	   */
+	  max_bytes = clib_min (tc->snd_mss, snd_space);
+	  offset = tc->snd_congestion - tc->snd_una - max_bytes;
+	  sb->rescue_rxt = tc->snd_congestion;
+	  tc->snd_nxt = tc->snd_una + offset;
+	  tcp_prepare_retransmit_segment (tc, b, offset, max_bytes);
+	  tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
+	  break;
+	}
+
+      max_bytes = snd_limited ? tc->snd_mss : hole->end - sb->high_rxt;
+      offset = sb->high_rxt - tc->snd_una;
+      tc->snd_nxt = tc->snd_una + offset;
+      n_written = tcp_prepare_retransmit_segment (tc, b, offset, max_bytes);
+
+      /* Nothing left to retransmit */
+      if (n_written == 0)
+	{
+	  tcp_return_buffer (tm);
+	  break;
+	}
+
+      sb->high_rxt += n_written;
+      tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
+      snd_space -= n_written;
+    }
+
+  /* If window allows, send 1 SMSS of new data */
+  tc->snd_nxt = old_snd_nxt;
+}
+
+/**
+ * Fast retransmit without SACK info
+ */
+void
+tcp_fast_retransmit_no_sack (tcp_connection_t * tc)
+{
+  tcp_main_t *tm = vnet_get_tcp_main ();
+  vlib_main_t *vm = vlib_get_main ();
+  u32 n_written = 0, offset = 0, bi, old_snd_nxt;
+  int snd_space;
+  vlib_buffer_t *b;
+
+  ASSERT (tcp_in_fastrecovery (tc));
+  TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0);
+
+  /* Start resending from first un-acked segment */
+  old_snd_nxt = tc->snd_nxt;
+  tc->snd_nxt = tc->snd_una;
+  snd_space = tcp_available_snd_space (tc);
 
   while (snd_space > 0)
     {
       tcp_get_free_buffer_index (tm, &bi);
       b = vlib_get_buffer (vm, bi);
 
-      if (use_sacks)
-	{
-	  scoreboard_first_rtx_hole (&tc->sack_sb);
-	}
-      else
-	{
-	  offset += n_written;
-	}
-
+      offset += n_written;
       n_written = tcp_prepare_retransmit_segment (tc, b, offset, snd_space);
 
       /* Nothing left to retransmit */
@@ -1326,9 +1399,21 @@
       snd_space -= n_written;
     }
 
-  /* If window allows, send 1 SMSS of new data */
-  if (seq_lt (tc->snd_nxt, tc->snd_congestion))
-    tc->snd_nxt = tc->snd_congestion;
+  /* Restore snd_nxt. If window allows, send 1 SMSS of new data */
+  tc->snd_nxt = old_snd_nxt;
+}
+
+/**
+ * Do fast retransmit
+ */
+void
+tcp_fast_retransmit (tcp_connection_t * tc)
+{
+  if (tcp_opts_sack_permitted (&tc->rcv_opts)
+      && scoreboard_first_hole (&tc->sack_sb))
+    tcp_fast_retransmit_sack (tc);
+  else
+    tcp_fast_retransmit_no_sack (tc);
 }
 
 always_inline u32
@@ -1544,6 +1629,12 @@
 
   tc = (tcp_connection_t *) tconn;
   tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, 0);
+
+  if (tc->rtt_ts == 0)
+    {
+      tc->rtt_ts = tcp_time_now ();
+      tc->rtt_seq = tc->snd_nxt;
+    }
   return 0;
 }
 
diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c
index 2af3848..3f8afa4 100644
--- a/src/vnet/tcp/tcp_test.c
+++ b/src/vnet/tcp/tcp_test.c
@@ -54,7 +54,7 @@
   tc->snd_una = 0;
   tc->snd_una_max = 1000;
   tc->snd_nxt = 1000;
-  tc->opt.flags |= TCP_OPTS_FLAG_SACK;
+  tc->rcv_opts.flags |= TCP_OPTS_FLAG_SACK;
   scoreboard_init (&tc->sack_sb);
 
   for (i = 0; i < 1000 / 100; i++)
@@ -70,9 +70,9 @@
 
   for (i = 0; i < 1000 / 200; i++)
     {
-      vec_add1 (tc->opt.sacks, sacks[i * 2]);
+      vec_add1 (tc->rcv_opts.sacks, sacks[i * 2]);
     }
-  tc->opt.n_sack_blocks = vec_len (tc->opt.sacks);
+  tc->rcv_opts.n_sack_blocks = vec_len (tc->rcv_opts.sacks);
   tcp_rcv_sacks (tc, 0);
 
   if (verbose)
@@ -93,18 +93,17 @@
   TCP_TEST ((sb->snd_una_adv == 0), "snd_una_adv %u", sb->snd_una_adv);
   TCP_TEST ((sb->last_sacked_bytes == 400),
 	    "last sacked bytes %d", sb->last_sacked_bytes);
-  TCP_TEST ((sb->max_byte_sacked == 900),
-	    "max byte sacked %u", sb->max_byte_sacked);
+  TCP_TEST ((sb->high_sacked == 900), "max byte sacked %u", sb->high_sacked);
   /*
    * Inject odd blocks
    */
 
-  vec_reset_length (tc->opt.sacks);
+  vec_reset_length (tc->rcv_opts.sacks);
   for (i = 0; i < 1000 / 200; i++)
     {
-      vec_add1 (tc->opt.sacks, sacks[i * 2 + 1]);
+      vec_add1 (tc->rcv_opts.sacks, sacks[i * 2 + 1]);
     }
-  tc->opt.n_sack_blocks = vec_len (tc->opt.sacks);
+  tc->rcv_opts.n_sack_blocks = vec_len (tc->rcv_opts.sacks);
   tcp_rcv_sacks (tc, 0);
 
   if (verbose)
@@ -118,8 +117,7 @@
 	    "first hole start %u end %u", hole->start, hole->end);
   TCP_TEST ((sb->sacked_bytes == 900), "sacked bytes %d", sb->sacked_bytes);
   TCP_TEST ((sb->snd_una_adv == 0), "snd_una_adv %u", sb->snd_una_adv);
-  TCP_TEST ((sb->max_byte_sacked == 1000),
-	    "max sacked byte %u", sb->max_byte_sacked);
+  TCP_TEST ((sb->high_sacked == 1000), "max sacked byte %u", sb->high_sacked);
   TCP_TEST ((sb->last_sacked_bytes == 500),
 	    "last sacked bytes %d", sb->last_sacked_bytes);
 
@@ -135,8 +133,7 @@
 	    "scoreboard has %d elements", pool_elts (sb->holes));
   TCP_TEST ((sb->snd_una_adv == 900),
 	    "snd_una_adv after ack %u", sb->snd_una_adv);
-  TCP_TEST ((sb->max_byte_sacked == 1000),
-	    "max sacked byte %u", sb->max_byte_sacked);
+  TCP_TEST ((sb->high_sacked == 1000), "max sacked byte %u", sb->high_sacked);
   TCP_TEST ((sb->sacked_bytes == 0), "sacked bytes %d", sb->sacked_bytes);
   TCP_TEST ((sb->last_sacked_bytes == 0),
 	    "last sacked bytes %d", sb->last_sacked_bytes);
@@ -145,11 +142,11 @@
    * Add new block
    */
 
-  vec_reset_length (tc->opt.sacks);
+  vec_reset_length (tc->rcv_opts.sacks);
 
   block.start = 1200;
   block.end = 1300;
-  vec_add1 (tc->opt.sacks, block);
+  vec_add1 (tc->rcv_opts.sacks, block);
 
   if (verbose)
     vlib_cli_output (vm, "add [1200, 1300]:\n%U", format_tcp_scoreboard, sb);
@@ -171,8 +168,7 @@
 	    "first hole start %u end %u", hole->start, hole->end);
   TCP_TEST ((sb->snd_una_adv == 0),
 	    "snd_una_adv after ack %u", sb->snd_una_adv);
-  TCP_TEST ((sb->max_byte_sacked == 1300),
-	    "max sacked byte %u", sb->max_byte_sacked);
+  TCP_TEST ((sb->high_sacked == 1300), "max sacked byte %u", sb->high_sacked);
   hole = scoreboard_last_hole (sb);
   TCP_TEST ((hole->start == 1300 && hole->end == 1500),
 	    "last hole start %u end %u", hole->start, hole->end);
@@ -182,7 +178,7 @@
    * Ack first hole
    */
 
-  vec_reset_length (tc->opt.sacks);
+  vec_reset_length (tc->rcv_opts.sacks);
   tcp_rcv_sacks (tc, 1200);
 
   if (verbose)
@@ -196,8 +192,16 @@
 	    "scoreboard has %d elements", pool_elts (sb->holes));
 
   /*
-   * Remove all
+   * Add some more blocks and then remove all
    */
+  vec_reset_length (tc->rcv_opts.sacks);
+  for (i = 0; i < 5; i++)
+    {
+      block.start = i * 100 + 1200;
+      block.end = (i + 1) * 100 + 1200;
+      vec_add1 (tc->rcv_opts.sacks, block);
+    }
+  tcp_rcv_sacks (tc, 1900);
 
   scoreboard_clear (sb);
   if (verbose)
@@ -205,6 +209,9 @@
 
   TCP_TEST ((pool_elts (sb->holes) == 0),
 	    "number of holes %d", pool_elts (sb->holes));
+  TCP_TEST ((sb->head == TCP_INVALID_SACK_HOLE_INDEX), "head %u", sb->head);
+  TCP_TEST ((sb->tail == TCP_INVALID_SACK_HOLE_INDEX), "tail %u", sb->tail);
+
   /*
    * Re-inject odd blocks and ack them all
    */
@@ -214,9 +221,9 @@
   tc->snd_nxt = 1000;
   for (i = 0; i < 5; i++)
     {
-      vec_add1 (tc->opt.sacks, sacks[i * 2 + 1]);
+      vec_add1 (tc->rcv_opts.sacks, sacks[i * 2 + 1]);
     }
-  tc->opt.n_sack_blocks = vec_len (tc->opt.sacks);
+  tc->rcv_opts.n_sack_blocks = vec_len (tc->rcv_opts.sacks);
   tcp_rcv_sacks (tc, 0);
   if (verbose)
     vlib_cli_output (vm, "sb added odd blocks and ack [0, 950]:\n%U",
@@ -740,6 +747,10 @@
       TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], data[j]);
     }
 
+  /* Try to peek beyond the data */
+  rv = svm_fifo_peek (f, svm_fifo_max_dequeue (f), vec_len (data), data_buf);
+  TCP_TEST ((rv == 0), "peeked %u expected 0", rv);
+
   vec_free (data_buf);
   svm_fifo_free (f);
   vec_free (test_data);
@@ -1239,7 +1250,7 @@
       tc0->c_thread_index = 0;
       tc0->c_lcl_ip4.as_u32 = local.as_u32;
       tc0->c_rmt_ip4.as_u32 = remote.as_u32;
-      tc0->opt.mss = 1450;
+      tc0->rcv_opts.mss = 1450;
       tcp_connection_init_vars (tc0);
 
       TCP_EVT_DBG (TCP_EVT_OPEN, tc0);