blob: a9ec58c262f74cfdee323e4f9d1afec1f688e241 [file] [log] [blame]
Dave Barach68b0fb02017-02-28 15:15:56 -05001/*
2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/tcp/tcp.h>
17
18void
19newreno_congestion (tcp_connection_t * tc)
20{
Dave Barach68b0fb02017-02-28 15:15:56 -050021 tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss);
22}
23
24void
25newreno_recovered (tcp_connection_t * tc)
26{
27 tc->cwnd = tc->ssthresh;
28}
29
30void
31newreno_rcv_ack (tcp_connection_t * tc)
32{
33 if (tcp_in_slowstart (tc))
34 {
35 tc->cwnd += clib_min (tc->snd_mss, tc->bytes_acked);
36 }
37 else
38 {
Florin Coras62166002018-04-18 16:40:55 -070039 /* tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); */
40 tc->cwnd_acc_bytes += tc->bytes_acked;
41 if (tc->cwnd_acc_bytes >= tc->cwnd)
42 {
43 u32 inc = tc->cwnd_acc_bytes / tc->cwnd;
Florin Coras62166002018-04-18 16:40:55 -070044 tc->cwnd_acc_bytes -= inc * tc->cwnd;
Florin Corasca1c8f32018-05-23 21:01:30 -070045 tc->cwnd += inc * tc->snd_mss;
Florin Coras62166002018-04-18 16:40:55 -070046 }
Florin Corasd2aab832018-05-22 11:39:59 -070047 tc->cwnd = clib_min (tc->cwnd,
48 transport_tx_fifo_size (&tc->connection));
Dave Barach68b0fb02017-02-28 15:15:56 -050049 }
50}
51
52void
53newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type)
54{
55 if (ack_type == TCP_CC_DUPACK)
56 {
Florin Corasf03a59a2017-06-09 21:07:32 -070057 if (!tcp_opts_sack_permitted (tc))
58 tc->cwnd += tc->snd_mss;
Dave Barach68b0fb02017-02-28 15:15:56 -050059 }
60 else if (ack_type == TCP_CC_PARTIALACK)
61 {
Florin Coras93992a92017-05-24 18:03:56 -070062 /* RFC 6582 Sec. 3.2 */
63 if (!tcp_opts_sack_permitted (&tc->rcv_opts))
64 {
65 /* Deflate the congestion window by the amount of new data
66 * acknowledged by the Cumulative Acknowledgment field.
67 * If the partial ACK acknowledges at least one SMSS of new data,
68 * then add back SMSS bytes to the congestion window. This
69 * artificially inflates the congestion window in order to reflect
70 * the additional segment that has left the network. This "partial
71 * window deflation" attempts to ensure that, when fast recovery
72 * eventually ends, approximately ssthresh amount of data will be
73 * outstanding in the network.*/
Dave Barach2c25a622017-06-26 11:35:07 -040074 tc->cwnd = (tc->cwnd > tc->bytes_acked + tc->snd_mss) ?
75 tc->cwnd - tc->bytes_acked : tc->snd_mss;
Florin Coras93992a92017-05-24 18:03:56 -070076 if (tc->bytes_acked > tc->snd_mss)
77 tc->cwnd += tc->snd_mss;
78 }
Dave Barach68b0fb02017-02-28 15:15:56 -050079 }
80}
81
82void
83newreno_conn_init (tcp_connection_t * tc)
84{
85 tc->ssthresh = tc->snd_wnd;
86 tc->cwnd = tcp_initial_cwnd (tc);
87}
88
89const static tcp_cc_algorithm_t tcp_newreno = {
90 .congestion = newreno_congestion,
91 .recovered = newreno_recovered,
92 .rcv_ack = newreno_rcv_ack,
93 .rcv_cong_ack = newreno_rcv_cong_ack,
94 .init = newreno_conn_init
95};
96
97clib_error_t *
98newreno_init (vlib_main_t * vm)
99{
100 clib_error_t *error = 0;
101
102 tcp_cc_algo_register (TCP_CC_NEWRENO, &tcp_newreno);
103
104 return error;
105}
106
107VLIB_INIT_FUNCTION (newreno_init);
108
109/*
110 * fd.io coding-style-patch-verification: ON
111 *
112 * Local Variables:
113 * eval: (c-set-style "gnu")
114 * End:
115 */