blob: 8c0508c0e287742a8fbbcbf49134e76e9e52b807 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
15 */
16
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/skbuff.h>
21#include <net/pkt_sched.h>
22#include <net/inet_ecn.h>
23#include <net/red.h>
24
25
26/* Parameters, settable by user:
27 -----------------------------
28
29 limit - bytes (must be > qth_max + burst)
30
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
37 */
38
39struct red_sched_data {
40 u32 limit; /* HARD maximal queue length */
41 unsigned char flags;
42 struct timer_list adapt_timer;
43 struct red_parms parms;
44 struct red_vars vars;
45 struct red_stats stats;
46 struct Qdisc *qdisc;
47};
48
49static inline int red_use_ecn(struct red_sched_data *q)
50{
51 return q->flags & TC_RED_ECN;
52}
53
54static inline int red_use_harddrop(struct red_sched_data *q)
55{
56 return q->flags & TC_RED_HARDDROP;
57}
58
59static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
60{
61 struct red_sched_data *q = qdisc_priv(sch);
62 struct Qdisc *child = q->qdisc;
63 int ret;
64
65 q->vars.qavg = red_calc_qavg(&q->parms,
66 &q->vars,
67 child->qstats.backlog);
68
69 if (red_is_idling(&q->vars))
70 red_end_of_idle_period(&q->vars);
71
72 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
73 case RED_DONT_MARK:
74 break;
75
76 case RED_PROB_MARK:
77 qdisc_qstats_overlimit(sch);
78 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
79 q->stats.prob_drop++;
80 goto congestion_drop;
81 }
82
83 q->stats.prob_mark++;
84 break;
85
86 case RED_HARD_MARK:
87 qdisc_qstats_overlimit(sch);
88 if (red_use_harddrop(q) || !red_use_ecn(q) ||
89 !INET_ECN_set_ce(skb)) {
90 q->stats.forced_drop++;
91 goto congestion_drop;
92 }
93
94 q->stats.forced_mark++;
95 break;
96 }
97
98 ret = qdisc_enqueue(skb, child);
99 if (likely(ret == NET_XMIT_SUCCESS)) {
100 sch->q.qlen++;
101 } else if (net_xmit_drop_count(ret)) {
102 q->stats.pdrop++;
103 qdisc_qstats_drop(sch);
104 }
105 return ret;
106
107congestion_drop:
108 qdisc_drop(skb, sch);
109 return NET_XMIT_CN;
110}
111
112static struct sk_buff *red_dequeue(struct Qdisc *sch)
113{
114 struct sk_buff *skb;
115 struct red_sched_data *q = qdisc_priv(sch);
116 struct Qdisc *child = q->qdisc;
117
118 skb = child->dequeue(child);
119 if (skb) {
120 qdisc_bstats_update(sch, skb);
121 sch->q.qlen--;
122 } else {
123 if (!red_is_idling(&q->vars))
124 red_start_of_idle_period(&q->vars);
125 }
126 return skb;
127}
128
129static struct sk_buff *red_peek(struct Qdisc *sch)
130{
131 struct red_sched_data *q = qdisc_priv(sch);
132 struct Qdisc *child = q->qdisc;
133
134 return child->ops->peek(child);
135}
136
137static unsigned int red_drop(struct Qdisc *sch)
138{
139 struct red_sched_data *q = qdisc_priv(sch);
140 struct Qdisc *child = q->qdisc;
141 unsigned int len;
142
143 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
144 q->stats.other++;
145 qdisc_qstats_drop(sch);
146 sch->q.qlen--;
147 return len;
148 }
149
150 if (!red_is_idling(&q->vars))
151 red_start_of_idle_period(&q->vars);
152
153 return 0;
154}
155
156static void red_reset(struct Qdisc *sch)
157{
158 struct red_sched_data *q = qdisc_priv(sch);
159
160 qdisc_reset(q->qdisc);
161 sch->q.qlen = 0;
162 red_restart(&q->vars);
163}
164
165static void red_destroy(struct Qdisc *sch)
166{
167 struct red_sched_data *q = qdisc_priv(sch);
168
169 del_timer_sync(&q->adapt_timer);
170 qdisc_destroy(q->qdisc);
171}
172
173static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
174 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
175 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
176 [TCA_RED_MAX_P] = { .type = NLA_U32 },
177};
178
179static int red_change(struct Qdisc *sch, struct nlattr *opt)
180{
181 struct red_sched_data *q = qdisc_priv(sch);
182 struct nlattr *tb[TCA_RED_MAX + 1];
183 struct tc_red_qopt *ctl;
184 struct Qdisc *child = NULL;
185 int err;
186 u32 max_P;
187
188 if (opt == NULL)
189 return -EINVAL;
190
191 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
192 if (err < 0)
193 return err;
194
195 if (tb[TCA_RED_PARMS] == NULL ||
196 tb[TCA_RED_STAB] == NULL)
197 return -EINVAL;
198
199 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
200
201 ctl = nla_data(tb[TCA_RED_PARMS]);
202
203 if (ctl->limit > 0) {
204 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
205 if (IS_ERR(child))
206 return PTR_ERR(child);
207 }
208
209 sch_tree_lock(sch);
210 q->flags = ctl->flags;
211 q->limit = ctl->limit;
212 if (child) {
213 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
214 q->qdisc->qstats.backlog);
215 qdisc_destroy(q->qdisc);
216 q->qdisc = child;
217 }
218
219 red_set_parms(&q->parms,
220 ctl->qth_min, ctl->qth_max, ctl->Wlog,
221 ctl->Plog, ctl->Scell_log,
222 nla_data(tb[TCA_RED_STAB]),
223 max_P);
224 red_set_vars(&q->vars);
225
226 del_timer(&q->adapt_timer);
227 if (ctl->flags & TC_RED_ADAPTATIVE)
228 mod_timer(&q->adapt_timer, jiffies + HZ/2);
229
230 if (!q->qdisc->q.qlen)
231 red_start_of_idle_period(&q->vars);
232
233 sch_tree_unlock(sch);
234 return 0;
235}
236
237static inline void red_adaptative_timer(unsigned long arg)
238{
239 struct Qdisc *sch = (struct Qdisc *)arg;
240 struct red_sched_data *q = qdisc_priv(sch);
241 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
242
243 spin_lock(root_lock);
244 red_adaptative_algo(&q->parms, &q->vars);
245 mod_timer(&q->adapt_timer, jiffies + HZ/2);
246 spin_unlock(root_lock);
247}
248
249static int red_init(struct Qdisc *sch, struct nlattr *opt)
250{
251 struct red_sched_data *q = qdisc_priv(sch);
252
253 q->qdisc = &noop_qdisc;
254 setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
255 return red_change(sch, opt);
256}
257
258static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
259{
260 struct red_sched_data *q = qdisc_priv(sch);
261 struct nlattr *opts = NULL;
262 struct tc_red_qopt opt = {
263 .limit = q->limit,
264 .flags = q->flags,
265 .qth_min = q->parms.qth_min >> q->parms.Wlog,
266 .qth_max = q->parms.qth_max >> q->parms.Wlog,
267 .Wlog = q->parms.Wlog,
268 .Plog = q->parms.Plog,
269 .Scell_log = q->parms.Scell_log,
270 };
271
272 sch->qstats.backlog = q->qdisc->qstats.backlog;
273 opts = nla_nest_start(skb, TCA_OPTIONS);
274 if (opts == NULL)
275 goto nla_put_failure;
276 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
277 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
278 goto nla_put_failure;
279 return nla_nest_end(skb, opts);
280
281nla_put_failure:
282 nla_nest_cancel(skb, opts);
283 return -EMSGSIZE;
284}
285
286static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
287{
288 struct red_sched_data *q = qdisc_priv(sch);
289 struct tc_red_xstats st = {
290 .early = q->stats.prob_drop + q->stats.forced_drop,
291 .pdrop = q->stats.pdrop,
292 .other = q->stats.other,
293 .marked = q->stats.prob_mark + q->stats.forced_mark,
294 };
295
296 return gnet_stats_copy_app(d, &st, sizeof(st));
297}
298
299static int red_dump_class(struct Qdisc *sch, unsigned long cl,
300 struct sk_buff *skb, struct tcmsg *tcm)
301{
302 struct red_sched_data *q = qdisc_priv(sch);
303
304 tcm->tcm_handle |= TC_H_MIN(1);
305 tcm->tcm_info = q->qdisc->handle;
306 return 0;
307}
308
309static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
310 struct Qdisc **old)
311{
312 struct red_sched_data *q = qdisc_priv(sch);
313
314 if (new == NULL)
315 new = &noop_qdisc;
316
317 *old = qdisc_replace(sch, new, &q->qdisc);
318 return 0;
319}
320
321static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
322{
323 struct red_sched_data *q = qdisc_priv(sch);
324 return q->qdisc;
325}
326
327static unsigned long red_get(struct Qdisc *sch, u32 classid)
328{
329 return 1;
330}
331
332static void red_put(struct Qdisc *sch, unsigned long arg)
333{
334}
335
336static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
337{
338 if (!walker->stop) {
339 if (walker->count >= walker->skip)
340 if (walker->fn(sch, 1, walker) < 0) {
341 walker->stop = 1;
342 return;
343 }
344 walker->count++;
345 }
346}
347
348static const struct Qdisc_class_ops red_class_ops = {
349 .graft = red_graft,
350 .leaf = red_leaf,
351 .get = red_get,
352 .put = red_put,
353 .walk = red_walk,
354 .dump = red_dump_class,
355};
356
357static struct Qdisc_ops red_qdisc_ops __read_mostly = {
358 .id = "red",
359 .priv_size = sizeof(struct red_sched_data),
360 .cl_ops = &red_class_ops,
361 .enqueue = red_enqueue,
362 .dequeue = red_dequeue,
363 .peek = red_peek,
364 .drop = red_drop,
365 .init = red_init,
366 .reset = red_reset,
367 .destroy = red_destroy,
368 .change = red_change,
369 .dump = red_dump,
370 .dump_stats = red_dump_stats,
371 .owner = THIS_MODULE,
372};
373
374static int __init red_module_init(void)
375{
376 return register_qdisc(&red_qdisc_ops);
377}
378
379static void __exit red_module_exit(void)
380{
381 unregister_qdisc(&red_qdisc_ops);
382}
383
384module_init(red_module_init)
385module_exit(red_module_exit)
386
387MODULE_LICENSE("GPL");