Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame] | 1 | /* |
| 2 | * net/sched/sch_mq.c Classful multiqueue dummy scheduler |
| 3 | * |
| 4 | * Copyright (c) 2009 Patrick McHardy <kaber@trash.net> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * version 2 as published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/export.h> |
| 15 | #include <linux/string.h> |
| 16 | #include <linux/errno.h> |
| 17 | #include <linux/skbuff.h> |
| 18 | #include <net/netlink.h> |
| 19 | #include <net/pkt_sched.h> |
| 20 | |
| 21 | struct mq_sched { |
| 22 | struct Qdisc **qdiscs; |
| 23 | }; |
| 24 | |
| 25 | static void mq_destroy(struct Qdisc *sch) |
| 26 | { |
| 27 | struct net_device *dev = qdisc_dev(sch); |
| 28 | struct mq_sched *priv = qdisc_priv(sch); |
| 29 | unsigned int ntx; |
| 30 | |
| 31 | if (!priv->qdiscs) |
| 32 | return; |
| 33 | for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) |
| 34 | qdisc_destroy(priv->qdiscs[ntx]); |
| 35 | kfree(priv->qdiscs); |
| 36 | } |
| 37 | |
| 38 | static int mq_init(struct Qdisc *sch, struct nlattr *opt) |
| 39 | { |
| 40 | struct net_device *dev = qdisc_dev(sch); |
| 41 | struct mq_sched *priv = qdisc_priv(sch); |
| 42 | struct netdev_queue *dev_queue; |
| 43 | struct Qdisc *qdisc; |
| 44 | unsigned int ntx; |
| 45 | |
| 46 | if (sch->parent != TC_H_ROOT) |
| 47 | return -EOPNOTSUPP; |
| 48 | |
| 49 | if (!netif_is_multiqueue(dev)) |
| 50 | return -EOPNOTSUPP; |
| 51 | |
| 52 | /* pre-allocate qdiscs, attachment can't fail */ |
| 53 | priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), |
| 54 | GFP_KERNEL); |
| 55 | if (!priv->qdiscs) |
| 56 | return -ENOMEM; |
| 57 | |
| 58 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { |
| 59 | dev_queue = netdev_get_tx_queue(dev, ntx); |
Kyle Swenson | e01461f | 2021-03-15 11:14:57 -0600 | [diff] [blame] | 60 | qdisc = qdisc_create_dflt(dev_queue, &fq_codel_qdisc_ops, |
Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame] | 61 | TC_H_MAKE(TC_H_MAJ(sch->handle), |
| 62 | TC_H_MIN(ntx + 1))); |
| 63 | if (!qdisc) |
| 64 | return -ENOMEM; |
| 65 | priv->qdiscs[ntx] = qdisc; |
| 66 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
| 67 | } |
| 68 | |
| 69 | sch->flags |= TCQ_F_MQROOT; |
| 70 | return 0; |
| 71 | } |
| 72 | |
| 73 | static void mq_attach(struct Qdisc *sch) |
| 74 | { |
| 75 | struct net_device *dev = qdisc_dev(sch); |
| 76 | struct mq_sched *priv = qdisc_priv(sch); |
| 77 | struct Qdisc *qdisc, *old; |
| 78 | unsigned int ntx; |
| 79 | |
| 80 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { |
| 81 | qdisc = priv->qdiscs[ntx]; |
| 82 | old = dev_graft_qdisc(qdisc->dev_queue, qdisc); |
| 83 | if (old) |
| 84 | qdisc_destroy(old); |
| 85 | #ifdef CONFIG_NET_SCHED |
| 86 | if (ntx < dev->real_num_tx_queues) |
| 87 | qdisc_list_add(qdisc); |
| 88 | #endif |
| 89 | |
| 90 | } |
| 91 | kfree(priv->qdiscs); |
| 92 | priv->qdiscs = NULL; |
| 93 | } |
| 94 | |
| 95 | static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) |
| 96 | { |
| 97 | struct net_device *dev = qdisc_dev(sch); |
| 98 | struct Qdisc *qdisc; |
| 99 | unsigned int ntx; |
| 100 | |
| 101 | sch->q.qlen = 0; |
| 102 | memset(&sch->bstats, 0, sizeof(sch->bstats)); |
| 103 | memset(&sch->qstats, 0, sizeof(sch->qstats)); |
| 104 | |
| 105 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { |
| 106 | qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; |
| 107 | spin_lock_bh(qdisc_lock(qdisc)); |
| 108 | sch->q.qlen += qdisc->q.qlen; |
| 109 | sch->bstats.bytes += qdisc->bstats.bytes; |
| 110 | sch->bstats.packets += qdisc->bstats.packets; |
| 111 | sch->qstats.backlog += qdisc->qstats.backlog; |
| 112 | sch->qstats.drops += qdisc->qstats.drops; |
| 113 | sch->qstats.requeues += qdisc->qstats.requeues; |
| 114 | sch->qstats.overlimits += qdisc->qstats.overlimits; |
| 115 | spin_unlock_bh(qdisc_lock(qdisc)); |
| 116 | } |
| 117 | return 0; |
| 118 | } |
| 119 | |
| 120 | static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl) |
| 121 | { |
| 122 | struct net_device *dev = qdisc_dev(sch); |
| 123 | unsigned long ntx = cl - 1; |
| 124 | |
| 125 | if (ntx >= dev->num_tx_queues) |
| 126 | return NULL; |
| 127 | return netdev_get_tx_queue(dev, ntx); |
| 128 | } |
| 129 | |
| 130 | static struct netdev_queue *mq_select_queue(struct Qdisc *sch, |
| 131 | struct tcmsg *tcm) |
| 132 | { |
| 133 | unsigned int ntx = TC_H_MIN(tcm->tcm_parent); |
| 134 | struct netdev_queue *dev_queue = mq_queue_get(sch, ntx); |
| 135 | |
| 136 | if (!dev_queue) { |
| 137 | struct net_device *dev = qdisc_dev(sch); |
| 138 | |
| 139 | return netdev_get_tx_queue(dev, 0); |
| 140 | } |
| 141 | return dev_queue; |
| 142 | } |
| 143 | |
| 144 | static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, |
| 145 | struct Qdisc **old) |
| 146 | { |
| 147 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); |
| 148 | struct net_device *dev = qdisc_dev(sch); |
| 149 | |
| 150 | if (dev->flags & IFF_UP) |
| 151 | dev_deactivate(dev); |
| 152 | |
| 153 | *old = dev_graft_qdisc(dev_queue, new); |
| 154 | if (new) |
| 155 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
| 156 | if (dev->flags & IFF_UP) |
| 157 | dev_activate(dev); |
| 158 | return 0; |
| 159 | } |
| 160 | |
| 161 | static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) |
| 162 | { |
| 163 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); |
| 164 | |
| 165 | return dev_queue->qdisc_sleeping; |
| 166 | } |
| 167 | |
| 168 | static unsigned long mq_get(struct Qdisc *sch, u32 classid) |
| 169 | { |
| 170 | unsigned int ntx = TC_H_MIN(classid); |
| 171 | |
| 172 | if (!mq_queue_get(sch, ntx)) |
| 173 | return 0; |
| 174 | return ntx; |
| 175 | } |
| 176 | |
| 177 | static void mq_put(struct Qdisc *sch, unsigned long cl) |
| 178 | { |
| 179 | } |
| 180 | |
| 181 | static int mq_dump_class(struct Qdisc *sch, unsigned long cl, |
| 182 | struct sk_buff *skb, struct tcmsg *tcm) |
| 183 | { |
| 184 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); |
| 185 | |
| 186 | tcm->tcm_parent = TC_H_ROOT; |
| 187 | tcm->tcm_handle |= TC_H_MIN(cl); |
| 188 | tcm->tcm_info = dev_queue->qdisc_sleeping->handle; |
| 189 | return 0; |
| 190 | } |
| 191 | |
| 192 | static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, |
| 193 | struct gnet_dump *d) |
| 194 | { |
| 195 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); |
| 196 | |
| 197 | sch = dev_queue->qdisc_sleeping; |
| 198 | if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || |
| 199 | gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) |
| 200 | return -1; |
| 201 | return 0; |
| 202 | } |
| 203 | |
| 204 | static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
| 205 | { |
| 206 | struct net_device *dev = qdisc_dev(sch); |
| 207 | unsigned int ntx; |
| 208 | |
| 209 | if (arg->stop) |
| 210 | return; |
| 211 | |
| 212 | arg->count = arg->skip; |
| 213 | for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { |
| 214 | if (arg->fn(sch, ntx + 1, arg) < 0) { |
| 215 | arg->stop = 1; |
| 216 | break; |
| 217 | } |
| 218 | arg->count++; |
| 219 | } |
| 220 | } |
| 221 | |
| 222 | static const struct Qdisc_class_ops mq_class_ops = { |
| 223 | .select_queue = mq_select_queue, |
| 224 | .graft = mq_graft, |
| 225 | .leaf = mq_leaf, |
| 226 | .get = mq_get, |
| 227 | .put = mq_put, |
| 228 | .walk = mq_walk, |
| 229 | .dump = mq_dump_class, |
| 230 | .dump_stats = mq_dump_class_stats, |
| 231 | }; |
| 232 | |
| 233 | struct Qdisc_ops mq_qdisc_ops __read_mostly = { |
| 234 | .cl_ops = &mq_class_ops, |
| 235 | .id = "mq", |
| 236 | .priv_size = sizeof(struct mq_sched), |
| 237 | .init = mq_init, |
| 238 | .destroy = mq_destroy, |
| 239 | .attach = mq_attach, |
| 240 | .dump = mq_dump, |
| 241 | .owner = THIS_MODULE, |
| 242 | }; |