blob: c5ffc2a41091f6e35d53c077423bf0beb3daede7 [file] [log] [blame]
Dave Barach68b0fb02017-02-28 15:15:56 -05001/*
Florin Corasc5df8c72019-04-08 07:42:30 -07002 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
Dave Barach68b0fb02017-02-28 15:15:56 -05003 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <vnet/tcp/tcp.h>
Florin Coras999840c2020-03-18 20:31:34 +000017#include <vnet/tcp/tcp_inlines.h>
Dave Barach68b0fb02017-02-28 15:15:56 -050018
Sergey Ivanushkinc30318d2019-10-17 10:16:27 +010019typedef struct nwreno_cfg_
20{
21 u32 ssthresh;
22} newreno_cfg_t;
23
24static newreno_cfg_t newreno_cfg = {
25 .ssthresh = 0x7FFFFFFFU,
26};
27
Florin Corasa3c32652019-07-03 17:47:22 -070028static void
Dave Barach68b0fb02017-02-28 15:15:56 -050029newreno_congestion (tcp_connection_t * tc)
30{
Dave Barach68b0fb02017-02-28 15:15:56 -050031 tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss);
Florin Coras8b4114e2019-09-03 12:37:11 -070032 tc->cwnd = tc->ssthresh;
Dave Barach68b0fb02017-02-28 15:15:56 -050033}
34
Florin Corasa3c32652019-07-03 17:47:22 -070035static void
36newreno_loss (tcp_connection_t * tc)
37{
Florin Corasa3c32652019-07-03 17:47:22 -070038 tc->cwnd = tcp_loss_wnd (tc);
39}
40
41static void
Dave Barach68b0fb02017-02-28 15:15:56 -050042newreno_recovered (tcp_connection_t * tc)
43{
44 tc->cwnd = tc->ssthresh;
45}
46
Florin Corasa3c32652019-07-03 17:47:22 -070047static void
Florin Coras52814732019-06-12 15:38:19 -070048newreno_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
Dave Barach68b0fb02017-02-28 15:15:56 -050049{
50 if (tcp_in_slowstart (tc))
51 {
52 tc->cwnd += clib_min (tc->snd_mss, tc->bytes_acked);
53 }
54 else
55 {
Florin Coras62166002018-04-18 16:40:55 -070056 /* tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); */
Florin Coras2e31cc32018-09-25 14:00:34 -070057 tcp_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked);
Dave Barach68b0fb02017-02-28 15:15:56 -050058 }
59}
60
61void
Florin Coras52814732019-06-12 15:38:19 -070062newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type,
63 tcp_rate_sample_t * rs)
Dave Barach68b0fb02017-02-28 15:15:56 -050064{
65 if (ack_type == TCP_CC_DUPACK)
66 {
Florin Corasf03a59a2017-06-09 21:07:32 -070067 if (!tcp_opts_sack_permitted (tc))
68 tc->cwnd += tc->snd_mss;
Dave Barach68b0fb02017-02-28 15:15:56 -050069 }
70 else if (ack_type == TCP_CC_PARTIALACK)
71 {
Florin Coras93992a92017-05-24 18:03:56 -070072 /* RFC 6582 Sec. 3.2 */
73 if (!tcp_opts_sack_permitted (&tc->rcv_opts))
74 {
75 /* Deflate the congestion window by the amount of new data
76 * acknowledged by the Cumulative Acknowledgment field.
77 * If the partial ACK acknowledges at least one SMSS of new data,
78 * then add back SMSS bytes to the congestion window. This
79 * artificially inflates the congestion window in order to reflect
80 * the additional segment that has left the network. This "partial
81 * window deflation" attempts to ensure that, when fast recovery
82 * eventually ends, approximately ssthresh amount of data will be
83 * outstanding in the network.*/
Dave Barach2c25a622017-06-26 11:35:07 -040084 tc->cwnd = (tc->cwnd > tc->bytes_acked + tc->snd_mss) ?
85 tc->cwnd - tc->bytes_acked : tc->snd_mss;
Florin Coras93992a92017-05-24 18:03:56 -070086 if (tc->bytes_acked > tc->snd_mss)
87 tc->cwnd += tc->snd_mss;
88 }
Dave Barach68b0fb02017-02-28 15:15:56 -050089 }
90}
91
Florin Corasa3c32652019-07-03 17:47:22 -070092static void
Dave Barach68b0fb02017-02-28 15:15:56 -050093newreno_conn_init (tcp_connection_t * tc)
94{
Sergey Ivanushkinc30318d2019-10-17 10:16:27 +010095 tc->ssthresh = newreno_cfg.ssthresh;
Dave Barach68b0fb02017-02-28 15:15:56 -050096 tc->cwnd = tcp_initial_cwnd (tc);
97}
98
Sergey Ivanushkinc30318d2019-10-17 10:16:27 +010099static uword
100newreno_unformat_config (unformat_input_t * input)
101{
102 u32 ssthresh = 0x7FFFFFFFU;
103
104 if (!input)
105 return 0;
106
107 unformat_skip_white_space (input);
108
109 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
110 {
111 if (unformat (input, "ssthresh %u", &ssthresh))
112 newreno_cfg.ssthresh = ssthresh;
113 else
114 return 0;
115 }
116 return 1;
117}
118
Dave Barach68b0fb02017-02-28 15:15:56 -0500119const static tcp_cc_algorithm_t tcp_newreno = {
Florin Corasaa01abb2018-11-12 09:13:10 -0800120 .name = "newreno",
Sergey Ivanushkinc30318d2019-10-17 10:16:27 +0100121 .unformat_cfg = newreno_unformat_config,
Dave Barach68b0fb02017-02-28 15:15:56 -0500122 .congestion = newreno_congestion,
Florin Corasa3c32652019-07-03 17:47:22 -0700123 .loss = newreno_loss,
Dave Barach68b0fb02017-02-28 15:15:56 -0500124 .recovered = newreno_recovered,
125 .rcv_ack = newreno_rcv_ack,
126 .rcv_cong_ack = newreno_rcv_cong_ack,
127 .init = newreno_conn_init
128};
129
130clib_error_t *
131newreno_init (vlib_main_t * vm)
132{
133 clib_error_t *error = 0;
134
135 tcp_cc_algo_register (TCP_CC_NEWRENO, &tcp_newreno);
136
137 return error;
138}
139
140VLIB_INIT_FUNCTION (newreno_init);
141
142/*
143 * fd.io coding-style-patch-verification: ON
144 *
145 * Local Variables:
146 * eval: (c-set-style "gnu")
147 * End:
148 */