blob: 61729d31bef2d6417b3b2836a3881c61562e986c [file] [log] [blame]
Murat Sezgin7a705422014-01-30 16:09:22 -08001/*
2 **************************************************************************
Sakthi Vignesh Radhakrishnan275d35b2014-02-07 09:49:48 -08003 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
Murat Sezgin7a705422014-01-30 16:09:22 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17/*
18 * Note: This file will be moved into the nss-qdisc directory once the driver
19 * is re-organized.
20 */
21
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/skbuff.h>
26#include <net/pkt_sched.h>
27#include <net/inet_ecn.h>
28#include <net/netfilter/nf_conntrack.h>
29#include <linux/if_bridge.h>
30#include <linux/list.h>
Murat Sezgin7a705422014-01-30 16:09:22 -080031#include <linux/version.h>
32#include <br_private.h>
Murat Sezgin84ca6512014-04-14 13:57:24 -070033#include <nss_api_if.h>
Murat Sezgin7a705422014-01-30 16:09:22 -080034
35/*
36 * NSS QDisc debug macros
37 */
38#if (NSSQDISC_DEBUG_LEVEL < 1)
39#define nssqdisc_assert(fmt, args...)
40#else
41#define nssqdisc_assert(c) if (!(c)) { BUG_ON(!(c)); }
42#endif
43
44#if (NSSQDISC_DEBUG_LEVEL < 2)
45#define nssqdisc_error(fmt, args...)
46#else
47#define nssqdisc_error(fmt, args...) printk(KERN_ERR "%d:ERROR:"fmt, __LINE__, ##args)
48#endif
49
50#if (NSSQDISC_DEBUG_LEVEL < 3)
51#define nssqdisc_warning(fmt, args...)
52#else
53#define nssqdisc_warning(fmt, args...) printk(KERN_WARNING "%d:WARN:"fmt, __LINE__, ##args)
54#endif
55
56#if (NSSQDISC_DEBUG_LEVEL < 4)
57#define nssqdisc_info(fmt, args...)
58#else
59#define nssqdisc_info(fmt, args...) printk(KERN_INFO "%d:INFO:"fmt, __LINE__, ##args)
60#endif
61
62#if (NSSQDISC_DEBUG_LEVEL < 5)
63#define nssqdisc_trace(fmt, args...)
64#else
65#define nssqdisc_trace(fmt, args...) printk(KERN_DEBUG "%d:TRACE:"fmt, __LINE__, ##args)
66#endif
67
68/*
69 * State values
70 */
71#define NSSQDISC_STATE_IDLE 0
72#define NSSQDISC_STATE_READY 1
73#define NSSQDISC_STATE_BUSY 2
74
75#define NSSQDISC_STATE_INIT_FAILED -1
76#define NSSQDISC_STATE_ASSIGN_SHAPER_SEND_FAIL -2
77#define NSSQDISC_STATE_SHAPER_ASSIGN_FAILED -3
78#define NSSQDISC_STATE_NODE_ALLOC_SEND_FAIL -4
79#define NSSQDISC_STATE_NODE_ALLOC_FAIL -5
80#define NSSQDISC_STATE_ROOT_SET_SEND_FAIL -6
81#define NSSQDISC_STATE_ROOT_SET_FAIL -7
82#define NSSQDISC_STATE_DEFAULT_SET_SEND_FAIL -8
83#define NSSQDISC_STATE_DEFAULT_SET_FAIL -9
84#define NSSQDISC_STATE_CHILD_ALLOC_SEND_FAIL -10
85#define NSSQDISC_STATE_NODE_ALLOC_FAIL_CHILD -11
86#define NSSQDISC_STATE_FAILED_RESPONSE -12
87
88#define NSSQDISC_BRIDGE_PORT_MAX 100
89
90void *nssqdisc_ctx; /* Shaping context for nssqdisc */
91
92struct nssqdisc_qdisc {
93 struct Qdisc *qdisc; /* Handy pointer back to containing qdisc */
94 void *nss_shaping_ctx; /* NSS context for general operations */
95 int32_t nss_interface_number; /* NSS Interface number we are shaping on */
96 nss_shaper_node_type_t type; /* Type of shaper node */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -070097 bool is_class; /* True if this represents a class and not a qdisc */
Murat Sezgin7a705422014-01-30 16:09:22 -080098 bool is_root; /* True if root qdisc on a net device */
99 bool is_bridge; /* True when qdisc is a bridge */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700100 bool is_virtual; /* True when the device is represented as a virtual in
101 * the NSS e.g. perhaps operating on a wifi interface
102 * or bridge.
Murat Sezgin7a705422014-01-30 16:09:22 -0800103 */
104 bool destroy_virtual_interface; /* Set if the interface is first registered in NSS by
105 * us. This means it needs to be un-regisreted when the
106 * module goes down.
107 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700108 atomic_t state; /* < 0: Signal that qdisc has 'failed'. 0
Murat Sezgin7a705422014-01-30 16:09:22 -0800109 * indicates 'pending' setup. > 0 is READY.
110 * NOTE: volatile AND atomic - this is polled
111 * AND is used for syncronisation.
112 */
113 uint32_t shaper_id; /* Used when is_root. Child qdiscs use this
114 * information to know what shaper under
115 * which to create shaper nodes
116 */
117 uint32_t qos_tag; /* QoS tag of this node */
118 volatile int32_t pending_final_state; /* Used to let the callback cycle know what
119 * state to set the qdisc in on successful
120 * completion.
121 */
122 void *virtual_interface_context; /* Context provided by the NSS driver for
123 * new interfaces that are registered.
124 */
125 void *bounce_context; /* Context for bounce registration. Bounce
126 * enables packets to be sent to NSS for
127 * shaping purposes, and is returned to
128 * Linux for transmit.
129 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700130 spinlock_t bounce_protection_lock; /* Lock to protect the enqueue and dequeue
131 * operation on skb lists triggeret by bounce
132 * callbacks.
Murat Sezgin7a705422014-01-30 16:09:22 -0800133 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700134 void (*stats_update_callback)(void *, struct nss_shaper_configure *);
135 /* Stats update callback function for qdisc specific
136 * stats update. Currently unused.
137 */
138 struct gnet_stats_basic_packed bstats; /* Basic class statistics */
139 struct gnet_stats_queue qstats; /* Qstats for use by classes */
140 atomic_t refcnt; /* Reference count for class use */
Murat Sezgin7a705422014-01-30 16:09:22 -0800141 struct timer_list stats_get_timer; /* Timer used to poll for stats */
142 atomic_t pending_stat_requests; /* Number of pending stats responses */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700143 struct nss_shaper_shaper_node_basic_stats_get basic_stats_latest;
Murat Sezgin7a705422014-01-30 16:09:22 -0800144 /* Latest stats obtained */
145};
146
147/*
148 * nssqdisc bridge update structure
149 */
150struct nssqdisc_bridge_update {
151 int port_list[NSSQDISC_BRIDGE_PORT_MAX];
152 int port_list_count;
153 int unassign_count;
154};
155
156/*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700157 * Task types for bridge scanner.
Murat Sezgin7a705422014-01-30 16:09:22 -0800158 */
159enum nssqdisc_bshaper_tasks {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700160 NSSQDISC_SCAN_AND_ASSIGN_BSHAPER,
161 NSSQDISC_SCAN_AND_UNASSIGN_BSHAPER,
Murat Sezgin7a705422014-01-30 16:09:22 -0800162};
163
164/*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700165 * Types of messages sent down to NSS interfaces
166 */
167enum nssqdisc_interface_msgs {
168 NSSQDISC_IF_SHAPER_ASSIGN,
169 NSSQDISC_IF_SHAPER_UNASSIGN,
170 NSSQDISC_IF_SHAPER_CONFIG,
171};
172
173/*
174 * nssqdisc_get_interface_msg()
175 * Returns the correct message that needs to be sent down to the NSS interface.
176 */
177static inline int nssqdisc_get_interface_msg(bool is_bridge, uint32_t msg_type)
178{
179 /*
180 * We re-assign the message based on whether this is for the I shaper
181 * or the B shaper. The is_bridge flag tells if we are on a bridge interface.
182 */
183 if (is_bridge) {
184 switch(msg_type) {
185 case NSSQDISC_IF_SHAPER_ASSIGN:
186 return NSS_IF_BSHAPER_ASSIGN;
187 case NSSQDISC_IF_SHAPER_UNASSIGN:
188 return NSS_IF_BSHAPER_UNASSIGN;
189 case NSSQDISC_IF_SHAPER_CONFIG:
190 return NSS_IF_BSHAPER_CONFIG;
191 default:
192 nssqdisc_info("%s: Unknown message type for a bridge - type %d", __func__, msg_type);
193 return -1;
194 }
195 } else {
196 switch(msg_type) {
197 case NSSQDISC_IF_SHAPER_ASSIGN:
198 return NSS_IF_ISHAPER_ASSIGN;
199 case NSSQDISC_IF_SHAPER_UNASSIGN:
200 return NSS_IF_ISHAPER_UNASSIGN;
201 case NSSQDISC_IF_SHAPER_CONFIG:
202 return NSS_IF_ISHAPER_CONFIG;
203 default:
204 nssqdisc_info("%s: Unknown message type for an interface - type %d", __func__, msg_type);
205 return -1;
206 }
207 }
208}
209
210/*
Murat Sezgin7a705422014-01-30 16:09:22 -0800211 * nssqdisc_get_br_port()
212 * Returns the bridge port structure of the bridge to which the device is attached to.
213 */
214static inline struct net_bridge_port *nssqdisc_get_br_port(const struct net_device *dev)
215{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700216 struct net_bridge_port *br_port;
Murat Sezgin7a705422014-01-30 16:09:22 -0800217
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700218 if (!dev) {
219 return NULL;
220 }
Murat Sezgin7a705422014-01-30 16:09:22 -0800221
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700222 rcu_read_lock();
Murat Sezgin7a705422014-01-30 16:09:22 -0800223#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700224 br_port = br_port_get_rcu(dev);
Murat Sezgin7a705422014-01-30 16:09:22 -0800225#else
226 br_port = rcu_dereference(dev->br_port);
227#endif
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700228 rcu_read_unlock();
Murat Sezgin7a705422014-01-30 16:09:22 -0800229
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700230 return br_port;
Murat Sezgin7a705422014-01-30 16:09:22 -0800231}
232
233/*
234 * nssqdisc_attach_bshaper_callback()
235 * Call back funtion for bridge shaper attach to an interface.
236 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700237static void nssqdisc_attach_bshaper_callback(void *app_data, struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -0800238{
239 struct Qdisc *sch = (struct Qdisc *)app_data;
240 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
241
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700242 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
243 nssqdisc_warning("%s: B-shaper attach FAILED - response: %d\n", __func__,
244 nim->cm.error);
Murat Sezgin7a705422014-01-30 16:09:22 -0800245 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
246 return;
247 }
248
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700249 nssqdisc_info("%s: B-shaper attach SUCCESS\n", __func__);
Murat Sezgin7a705422014-01-30 16:09:22 -0800250 atomic_set(&nq->state, NSSQDISC_STATE_READY);
251}
252
253/*
254 * nssqdisc_attach_bridge()
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700255 * Attaches a given bridge shaper to a given interface (Different from shaper_assign)
Murat Sezgin7a705422014-01-30 16:09:22 -0800256 */
257static int nssqdisc_attach_bshaper(struct Qdisc *sch, uint32_t if_num)
258{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700259 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -0800260 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)qdisc_priv(sch);
261 int32_t state, rc;
262
263 nssqdisc_info("%s: Attaching B-shaper %u to interface %u\n", __func__,
264 nq->shaper_id, if_num);
265
266 state = atomic_read(&nq->state);
267 if (state != NSSQDISC_STATE_READY) {
268 nssqdisc_error("%s: qdisc %p (type %d) is not ready: State - %d\n",
269 __func__, sch, nq->type, state);
270 BUG();
271 }
272
273 /*
274 * Set shaper node state to IDLE
275 */
276 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
277
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700278 /*
279 * Populate the message and send it down
280 */
281 nss_cmn_msg_init(&nim.cm, if_num, NSS_IF_BSHAPER_ASSIGN,
282 sizeof(struct nss_if_msg), nssqdisc_attach_bshaper_callback, sch);
283 /*
284 * Assign the ID of the Bshaper that needs to be assigned to the interface recognized
285 * by if_num.
286 */
287 nim.msg.shaper_assign.shaper_id = nq->shaper_id;
288 rc = nss_if_tx_msg(nq->nss_shaping_ctx, &nim);
Murat Sezgin7a705422014-01-30 16:09:22 -0800289
Murat Sezgin7a705422014-01-30 16:09:22 -0800290 if (rc != NSS_TX_SUCCESS) {
291 nssqdisc_warning("%s: Failed to send bshaper (id: %u) attach for "
292 "interface(if_num: %u)\n", __func__, nq->shaper_id, if_num);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700293 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -0800294 return -1;
295 }
296
297 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
298 yield();
299 }
300
301 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
302 nssqdisc_error("%s: Failed to attach B-shaper %u to interface %u\n",
303 __func__, nq->shaper_id, if_num);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700304 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -0800305 return -1;
306 }
307
308 nssqdisc_info("%s: Attach of B-shaper %u to interface %u is complete\n",
309 __func__, nq->shaper_id, if_num);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700310 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -0800311 return 0;
312}
313
314/*
315 * nssqdisc_detach_bshaper_callback()
316 * Call back function for bridge shaper detach
317 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700318static void nssqdisc_detach_bshaper_callback(void *app_data, struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -0800319{
320 struct Qdisc *sch = (struct Qdisc *)app_data;
321 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
322
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700323 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
324 nssqdisc_error("%s: B-shaper detach FAILED - response: %d\n",
325 __func__, nim->cm.error);
Murat Sezgin7a705422014-01-30 16:09:22 -0800326 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
327 return;
328 }
329
330 nssqdisc_info("%s: B-shaper detach SUCCESS\n", __func__);
331 atomic_set(&nq->state, NSSQDISC_STATE_READY);
332}
333
334/*
335 * nssqdisc_detach_bridge()
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700336 * Detaches a given bridge shaper from a given interface (different from shaper unassign)
Murat Sezgin7a705422014-01-30 16:09:22 -0800337 */
338static int nssqdisc_detach_bshaper(struct Qdisc *sch, uint32_t if_num)
339{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700340 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -0800341 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)qdisc_priv(sch);
342 int32_t state, rc;
343
344 nssqdisc_info("%s: Detaching B-shaper %u from interface %u\n",
345 __func__, nq->shaper_id, if_num);
346
347 state = atomic_read(&nq->state);
348 if (state != NSSQDISC_STATE_READY) {
349 nssqdisc_error("%s: qdisc %p (type %d) is not ready: %d\n",
350 __func__, sch, nq->type, state);
351 BUG();
352 }
353
354 /*
355 * Set shaper node state to IDLE
356 */
357 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
358
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700359 /*
360 * Create and send shaper unassign message to the NSS interface
361 */
362 nss_cmn_msg_init(&nim.cm, if_num, NSS_IF_BSHAPER_UNASSIGN,
363 sizeof(struct nss_if_msg), nssqdisc_detach_bshaper_callback, sch);
364 nim.msg.shaper_unassign.shaper_id = nq->shaper_id;
365 rc = nss_if_tx_msg(nq->nss_shaping_ctx, &nim);
Murat Sezgin7a705422014-01-30 16:09:22 -0800366
Murat Sezgin7a705422014-01-30 16:09:22 -0800367 if (rc != NSS_TX_SUCCESS) {
368 nssqdisc_warning("%s: Failed to send B-shaper (id: %u) detach "
369 "for interface(if_num: %u)\n", __func__, nq->shaper_id, if_num);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700370 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -0800371 return -1;
372 }
373
374 nssqdisc_info("%s: Detach of B-shaper %u to interface %u is complete.",
375 __func__, nq->shaper_id, if_num);
376 atomic_set(&nq->state, NSSQDISC_STATE_READY);
377 return 0;
378}
379
380/*
381 * nssqdisc_refresh_bshaper_assignment()
382 * Performs assign on unassign of bshaper for interfaces on the bridge.
383 */
384static int nssqdisc_refresh_bshaper_assignment(struct Qdisc *br_qdisc,
385 enum nssqdisc_bshaper_tasks task)
386{
387 struct net_device *dev;
388 struct net_device *br_dev = qdisc_dev(br_qdisc);
389 struct nssqdisc_qdisc *nq;
390 struct nssqdisc_bridge_update br_update;
391 int i;
392
393 if ((br_qdisc->parent != TC_H_ROOT) && (br_qdisc->parent != TC_H_UNSPEC)) {
394 nssqdisc_error("%s: Qdisc not root qdisc for the bridge interface: "
395 "Handle - %x", __func__, br_qdisc->parent);
396 return -1;
397 }
398
399 nq = qdisc_priv(br_qdisc);
400
401 /*
402 * Initialize the bridge update srtucture.
403 */
404 br_update.port_list_count = 0;
405 br_update.unassign_count = 0;
406
407 read_lock(&dev_base_lock);
408 dev = first_net_device(&init_net);
409 while(dev) {
410 struct net_bridge_port *br_port = nssqdisc_get_br_port(dev);
411 int nss_if_num;
412
413 nssqdisc_info("%s: Scanning device %s", __func__, dev->name);
414 if (!br_port || !br_port->br) {
415 goto nextdev;
416 }
417
418 /*
419 * Dont care if this device is not on the
420 * bridge that is of concern.
421 */
422 if (br_port->br->dev != br_dev) {
423 goto nextdev;
424 }
425
426 /*
427 * If the interface is known to NSS then we will have to shape it.
428 * Irrespective of whether it has an interface qdisc or not.
429 */
Abhishek Rastogi99714332014-04-02 19:38:12 +0530430 nss_if_num = nss_cmn_get_interface_number(nq->nss_shaping_ctx, dev);
Murat Sezgin7a705422014-01-30 16:09:22 -0800431 if (nss_if_num < 0) {
432 goto nextdev;
433 }
434
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700435 nssqdisc_info("%s: Will be linking/unlinking %s to/from bridge %s\n", __func__,
Murat Sezgin7a705422014-01-30 16:09:22 -0800436 dev->name, br_dev->name);
437 br_update.port_list[br_update.port_list_count++] = nss_if_num;
438nextdev:
439 dev = next_net_device(dev);
440 }
441 read_unlock(&dev_base_lock);
442
443 nssqdisc_info("%s: List count %d\n", __func__, br_update.port_list_count);
444
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700445 if (task == NSSQDISC_SCAN_AND_ASSIGN_BSHAPER) {
Murat Sezgin7a705422014-01-30 16:09:22 -0800446 /*
447 * Loop through the ports and assign them with B-shapers.
448 */
449 for (i = 0; i < br_update.port_list_count; i++) {
450 if (nssqdisc_attach_bshaper(br_qdisc, br_update.port_list[i]) >= 0) {
451 nssqdisc_info("%s: Interface %u added to bridge %s\n",
452 __func__, br_update.port_list[i], br_dev->name);
453 continue;
454 }
455 nssqdisc_error("%s: Unable to attach bshaper with shaper-id: %u, "
456 "to interface if_num: %d\n", __func__, nq->shaper_id,
457 br_update.port_list[i]);
458 br_update.unassign_count = i;
459 break;
460 }
461 nssqdisc_info("%s: Unassign count %d\n", __func__, br_update.unassign_count);
462 if (br_update.unassign_count == 0) {
463 return 0;
464 }
465
466 /*
467 * In case of a failure, unassign the B-shapers that were assigned above
468 */
469 for (i = 0; i < br_update.unassign_count; i++) {
470 if (nssqdisc_detach_bshaper(br_qdisc, br_update.port_list[i]) >= 0) {
471 continue;
472 }
473 nssqdisc_error("%s: Unable to detach bshaper with shaper-id: %u, "
474 "from interface if_num: %d\n", __func__, nq->shaper_id,
475 br_update.port_list[i]);
476 BUG();
477 }
478
479 nssqdisc_info("%s: Failed to link interfaces to bridge\n", __func__);
480 return -1;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700481 } else if (task == NSSQDISC_SCAN_AND_UNASSIGN_BSHAPER) {
Murat Sezgin7a705422014-01-30 16:09:22 -0800482 /*
483 * Loop through the ports and assign them with B-shapers.
484 */
485 for (i = 0; i < br_update.port_list_count; i++) {
486 if (nssqdisc_detach_bshaper(br_qdisc, br_update.port_list[i]) >= 0) {
487 nssqdisc_info("%s: Interface %u removed from bridge %s\n",
488 __func__, br_update.port_list[i], br_dev->name);
489 continue;
490 }
491 nssqdisc_error("%s: Unable to detach bshaper with shaper-id: %u, "
492 "from interface if_num: %d\n", __func__, nq->shaper_id,
493 br_update.port_list[i]);
494 BUG();
495 }
496 }
497
498 return 0;
499}
500
501/*
502 * nssqdisc_root_cleanup_final()
503 * Performs final cleanup of a root shaper node after all other
504 * shaper node cleanup is complete.
505 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700506static void nssqdisc_root_cleanup_final(struct nssqdisc_qdisc *nq)
Murat Sezgin7a705422014-01-30 16:09:22 -0800507{
Murat Sezgin7a705422014-01-30 16:09:22 -0800508 nssqdisc_info("%s: Root qdisc %p (type %d) final cleanup\n", __func__,
509 nq->qdisc, nq->type);
510
511 /*
512 * If we are a bridge then we have to unregister for bridge bouncing
513 * AND destroy the virtual interface that provides bridge shaping.
514 */
515 if (nq->is_bridge) {
516 /*
517 * Unregister for bouncing to the NSS for bridge shaping
518 */
519 nssqdisc_info("%s: Unregister for bridge bouncing: %p\n", __func__,
520 nq->bounce_context);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700521 nss_shaper_unregister_shaper_bounce_bridge(nq->nss_interface_number);
Murat Sezgin7a705422014-01-30 16:09:22 -0800522
523 /*
524 * Unregister the virtual interface we use to act as shaper
525 * for bridge shaping.
526 */
527 nssqdisc_info("%s: Release root bridge virtual interface: %p\n",
528 __func__, nq->virtual_interface_context);
529 nss_destroy_virt_if(nq->virtual_interface_context);
530 }
531
532 /*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700533 * If we are a virual interface other than a bridge then we simply
534 * unregister for interface bouncing and not care about deleting the
535 * interface.
Murat Sezgin7a705422014-01-30 16:09:22 -0800536 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700537 if (nq->is_virtual && !nq->is_bridge) {
Murat Sezgin7a705422014-01-30 16:09:22 -0800538 /*
539 * Unregister for interface bouncing of packets
540 */
541 nssqdisc_info("%s: Unregister for interface bouncing: %p\n",
542 __func__, nq->bounce_context);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700543 nss_shaper_unregister_shaper_bounce_interface(nq->nss_interface_number);
Murat Sezgin7a705422014-01-30 16:09:22 -0800544 }
545
546 /*
547 * Finally unregister for shaping
548 */
549 nssqdisc_info("%s: Unregister for shaping\n", __func__);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700550 nss_shaper_unregister_shaping(nq->nss_shaping_ctx);
Murat Sezgin7a705422014-01-30 16:09:22 -0800551
552 /*
553 * Now set our final state
554 */
555 atomic_set(&nq->state, nq->pending_final_state);
556}
557
558/*
559 * nssqdisc_root_cleanup_shaper_unassign_callback()
560 * Invoked on the response to a shaper unassign config command issued
561 */
562static void nssqdisc_root_cleanup_shaper_unassign_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700563 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -0800564{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700565 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
566 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
567 nssqdisc_error("%s: Root qdisc %p (type %d) shaper unsassign FAILED\n", __func__, nq->qdisc, nq->type);
568 BUG();
569 }
570 nssqdisc_root_cleanup_final(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800571}
572
573/*
574 * nssqdisc_root_cleanup_shaper_unassign()
575 * Issue command to unassign the shaper
576 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700577static void nssqdisc_root_cleanup_shaper_unassign(struct nssqdisc_qdisc *nq)
Murat Sezgin7a705422014-01-30 16:09:22 -0800578{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700579 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -0800580 nss_tx_status_t rc;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700581 int msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -0800582
583 nssqdisc_info("%s: Root qdisc %p (type %d): shaper unassign: %d\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700584 __func__, nq->qdisc, nq->type, nq->shaper_id);
Murat Sezgin7a705422014-01-30 16:09:22 -0800585
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700586 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_UNASSIGN);
587 nss_cmn_msg_init(&nim.cm, nq->nss_interface_number, msg_type,
588 sizeof(struct nss_if_msg), nssqdisc_root_cleanup_shaper_unassign_callback, nq);
589 nim.msg.shaper_unassign.shaper_id = nq->shaper_id;
590 rc = nss_if_tx_msg(nq->nss_shaping_ctx, &nim);
Murat Sezgin7a705422014-01-30 16:09:22 -0800591
Murat Sezgin7a705422014-01-30 16:09:22 -0800592 if (rc == NSS_TX_SUCCESS) {
593 return;
594 }
595
596 nssqdisc_error("%s: Root qdisc %p (type %d): unassign command send failed: "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700597 "%d, shaper id: %d\n", __func__, nq->qdisc, nq->type, rc, nq->shaper_id);
Murat Sezgin7a705422014-01-30 16:09:22 -0800598
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700599 nssqdisc_root_cleanup_final(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800600}
601
602/*
603 * nssqdisc_root_cleanup_free_node_callback()
604 * Invoked on the response to freeing a shaper node
605 */
606static void nssqdisc_root_cleanup_free_node_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700607 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -0800608{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700609 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
610 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
611 nssqdisc_error("%s: Root qdisc %p (type %d) free FAILED response "
612 "type: %d\n", __func__, nq->qdisc, nq->type,
613 nim->msg.shaper_configure.config.response_type);
614 BUG();
615 }
Murat Sezgin7a705422014-01-30 16:09:22 -0800616
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700617 nssqdisc_info("%s: Root qdisc %p (type %d) free SUCCESS - response "
618 "type: %d\n", __func__, nq->qdisc, nq->type,
619 nim->msg.shaper_configure.config.response_type);
620
621 nssqdisc_root_cleanup_shaper_unassign(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800622}
623
624/*
625 * nssqdisc_root_cleanup_free_node()
626 * Free the shaper node, issue command to do so.
627 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700628static void nssqdisc_root_cleanup_free_node(struct nssqdisc_qdisc *nq)
Murat Sezgin7a705422014-01-30 16:09:22 -0800629{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700630 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -0800631 nss_tx_status_t rc;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700632 int msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -0800633
634 nssqdisc_info("%s: Root qdisc %p (type %d): freeing shaper node\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700635 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -0800636
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700637 /*
638 * Construct and send the shaper configure message down to the NSS interface
639 */
640 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
641 nss_cmn_msg_init(&nim.cm, nq->nss_interface_number, msg_type,
642 sizeof(struct nss_if_msg), nssqdisc_root_cleanup_free_node_callback, nq);
643 nim.msg.shaper_configure.config.request_type = NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE;
644 nim.msg.shaper_configure.config.msg.free_shaper_node.qos_tag = nq->qos_tag;
645 rc = nss_if_tx_msg(nq->nss_shaping_ctx, &nim);
Murat Sezgin7a705422014-01-30 16:09:22 -0800646
Murat Sezgin7a705422014-01-30 16:09:22 -0800647 if (rc == NSS_TX_SUCCESS) {
648 return;
649 }
650
651 nssqdisc_error("%s: Qdisc %p (type %d): free command send "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700652 "failed: %d, qos tag: %x\n", __func__, nq->qdisc, nq->type,
Murat Sezgin7a705422014-01-30 16:09:22 -0800653 rc, nq->qos_tag);
654
655 /*
656 * Move onto unassigning the shaper instead
657 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700658 nssqdisc_root_cleanup_shaper_unassign(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800659}
660
661/*
662 * nssqdisc_root_init_root_assign_callback()
663 * Invoked on the response to assigning shaper node as root
664 */
665static void nssqdisc_root_init_root_assign_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700666 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -0800667{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700668 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
Murat Sezgin7a705422014-01-30 16:09:22 -0800669
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700670 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
671 nssqdisc_error("%s: Root assign FAILED for qdisc %p (type %d), "
672 "response type: %d\n", __func__, nq->qdisc, nq->type,
673 nim->msg.shaper_configure.config.response_type);
Murat Sezgin7a705422014-01-30 16:09:22 -0800674 nq->pending_final_state = NSSQDISC_STATE_ROOT_SET_FAIL;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700675 nssqdisc_root_cleanup_free_node(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800676 return;
677 }
678
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700679 nssqdisc_info("%s: Qdisc %p (type %d): set as root is done. Response - %d"
680 , __func__, nq->qdisc, nq->type, nim->msg.shaper_configure.config.response_type);
Murat Sezgin7a705422014-01-30 16:09:22 -0800681 atomic_set(&nq->state, NSSQDISC_STATE_READY);
682}
683
684/*
685 * nssqdisc_root_init_alloc_node_callback()
686 * Invoked on the response to creating a shaper node as root
687 */
688static void nssqdisc_root_init_alloc_node_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700689 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -0800690{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700691 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
Murat Sezgin7a705422014-01-30 16:09:22 -0800692 nss_tx_status_t rc;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700693 int msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -0800694
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700695 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
696 nssqdisc_info("%s: Qdisc %p (type %d) root alloc node FAILED "
697 "response type: %d\n", __func__, nq->qdisc, nq->type,
698 nim->msg.shaper_configure.config.response_type);
Murat Sezgin7a705422014-01-30 16:09:22 -0800699
Murat Sezgin7a705422014-01-30 16:09:22 -0800700 nq->pending_final_state = NSSQDISC_STATE_NODE_ALLOC_FAIL;
701
702 /*
703 * No shaper node created, cleanup from unsassigning the shaper
704 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700705 nssqdisc_root_cleanup_shaper_unassign(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800706 return;
707 }
708
709 /*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700710 * Create and send shaper configure message to the NSS interface
Murat Sezgin7a705422014-01-30 16:09:22 -0800711 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700712 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
713 nss_cmn_msg_init(&nim->cm, nq->nss_interface_number, msg_type,
714 sizeof(struct nss_if_msg), nssqdisc_root_init_root_assign_callback, nq);
715 nim->msg.shaper_configure.config.request_type = NSS_SHAPER_CONFIG_TYPE_SET_ROOT;
716 nim->msg.shaper_configure.config.msg.set_root_node.qos_tag = nq->qos_tag;
717 rc = nss_if_tx_msg(nq->nss_shaping_ctx, nim);
Murat Sezgin7a705422014-01-30 16:09:22 -0800718
Murat Sezgin7a705422014-01-30 16:09:22 -0800719 if (rc == NSS_TX_SUCCESS) {
720 return;
721 }
722
723 nssqdisc_error("%s: Root assign send command failed: %d\n",
724 __func__, rc);
725
726 nq->pending_final_state = NSSQDISC_STATE_ROOT_SET_SEND_FAIL;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700727 nssqdisc_root_cleanup_free_node(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800728}
729
730/*
731 * nssqdisc_root_init_shaper_assign_callback()
732 * Invoked on the response to a shaper assign config command issued
733 */
734static void nssqdisc_root_init_shaper_assign_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700735 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -0800736{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700737 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
Murat Sezgin7a705422014-01-30 16:09:22 -0800738 nss_tx_status_t rc;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700739 int msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -0800740
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700741 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
742 nssqdisc_warning("%s: Qdisc %x (type %d): shaper assign failed - phys_if response type: %d\n",
743 __func__, nq->qos_tag, nq->type, nim->cm.error);
Murat Sezgin7a705422014-01-30 16:09:22 -0800744 /*
745 * Unable to assign a shaper, perform cleanup from final stage
746 */
747 nq->pending_final_state = NSSQDISC_STATE_SHAPER_ASSIGN_FAILED;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700748 nssqdisc_root_cleanup_final(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800749 return;
750 }
751
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700752 if (nim->cm.type != NSS_IF_ISHAPER_ASSIGN && nim->cm.type != NSS_IF_BSHAPER_ASSIGN) {
753 nssqdisc_error("%s: Qdisc %x (type %d): shaper assign callback received garbage: %d\n",
754 __func__, nq->qos_tag, nq->type, nim->cm.type);
755 /*
756 * Unable to assign a shaper, perform cleanup from final stage
757 */
758 nq->pending_final_state = NSSQDISC_STATE_SHAPER_ASSIGN_FAILED;
759 nssqdisc_root_cleanup_final(nq);
760 return;
761 } else {
Sakthi Vignesh Radhakrishnanc5a228c2014-05-13 09:31:41 -0700762 nssqdisc_info("%s: Qdisc %x (type %d): shaper assign callback received sane message: %d\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700763 __func__, nq->qos_tag, nq->type, nim->cm.type);
764 }
765
Murat Sezgin7a705422014-01-30 16:09:22 -0800766 /*
767 * Shaper has been allocated and assigned
768 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700769 nq->shaper_id = nim->msg.shaper_assign.new_shaper_id;
Murat Sezgin7a705422014-01-30 16:09:22 -0800770 nssqdisc_info("%s: Qdisc %p (type %d), shaper assigned: %u\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700771 __func__, nq->qdisc, nq->type, nq->shaper_id);
Murat Sezgin7a705422014-01-30 16:09:22 -0800772
773 /*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700774 * Create and send the shaper configure message to the NSS interface
Murat Sezgin7a705422014-01-30 16:09:22 -0800775 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700776 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
777 nss_cmn_msg_init(&nim->cm, nq->nss_interface_number, msg_type, sizeof(struct nss_if_msg),
778 nssqdisc_root_init_alloc_node_callback, nq);
779 nim->msg.shaper_configure.config.request_type = NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE;
780 nim->msg.shaper_configure.config.msg.alloc_shaper_node.node_type = nq->type;
781 nim->msg.shaper_configure.config.msg.alloc_shaper_node.qos_tag = nq->qos_tag;
782 rc = nss_if_tx_msg(nq->nss_shaping_ctx, nim);
Murat Sezgin7a705422014-01-30 16:09:22 -0800783
Murat Sezgin7a705422014-01-30 16:09:22 -0800784 if (rc == NSS_TX_SUCCESS) {
785 return;
786 }
787
788 /*
789 * Unable to send alloc node command, cleanup from unassigning the shaper
790 */
791 nssqdisc_error("%s: Qdisc %p (type %d) create command failed: %d\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700792 __func__, nq->qdisc, nq->type, rc);
Murat Sezgin7a705422014-01-30 16:09:22 -0800793
794 nq->pending_final_state = NSSQDISC_STATE_NODE_ALLOC_SEND_FAIL;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700795 nssqdisc_root_cleanup_shaper_unassign(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800796}
797
798
799/*
800 * nssqdisc_child_cleanup_final()
801 * Perform final cleanup of a shaper node after all shaper node
802 * cleanup is complete.
803 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700804static void nssqdisc_child_cleanup_final(struct nssqdisc_qdisc *nq)
Murat Sezgin7a705422014-01-30 16:09:22 -0800805{
Murat Sezgin7a705422014-01-30 16:09:22 -0800806 nssqdisc_info("%s: Final cleanup type %d: %p\n", __func__,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700807 nq->type, nq->qdisc);
Murat Sezgin7a705422014-01-30 16:09:22 -0800808
809 /*
810 * Finally unregister for shaping
811 */
812 nssqdisc_info("%s: Unregister for shaping\n", __func__);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700813 nss_shaper_unregister_shaping(nq->nss_shaping_ctx);
Murat Sezgin7a705422014-01-30 16:09:22 -0800814
815 /*
816 * Now set our final state
817 */
818 atomic_set(&nq->state, nq->pending_final_state);
819}
820
821
822/*
823 * nssqdisc_child_cleanup_free_node_callback()
824 * Invoked on the response to freeing a child shaper node
825 */
826static void nssqdisc_child_cleanup_free_node_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700827 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -0800828{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700829 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
Murat Sezgin7a705422014-01-30 16:09:22 -0800830
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700831 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
832 nssqdisc_info("%s: Qdisc %p (type %d qos_tag %x): child free FAILED response type: %d\n",
833 __func__, nq->qdisc, nq->type, nq->qos_tag, nim->msg.shaper_configure.config.response_type);
834 return;
Murat Sezgin7a705422014-01-30 16:09:22 -0800835 }
836
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700837 nssqdisc_info("%s: Qdisc %p (type %d): child shaper node "
838 "free complete\n", __func__, nq->qdisc, nq->type);
839
Murat Sezgin7a705422014-01-30 16:09:22 -0800840 /*
841 * Perform final cleanup
842 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700843 nssqdisc_child_cleanup_final(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800844}
845
846/*
847 * nssqdisc_child_cleanup_free_node()
848 * Free the child shaper node, issue command to do so.
849 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700850static void nssqdisc_child_cleanup_free_node(struct nssqdisc_qdisc *nq)
Murat Sezgin7a705422014-01-30 16:09:22 -0800851{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700852 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -0800853 nss_tx_status_t rc;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700854 int msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -0800855
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700856 nssqdisc_info("%s: Qdisc %p (type %d qos_tag %x): free shaper node command\n",
857 __func__, nq->qdisc, nq->type, nq->qos_tag);
Murat Sezgin7a705422014-01-30 16:09:22 -0800858
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700859 /*
860 * Create and send the shaper configure message to the NSS interface
861 */
862 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
863 nss_cmn_msg_init(&nim.cm, nq->nss_interface_number, msg_type, sizeof(struct nss_if_msg),
864 nssqdisc_child_cleanup_free_node_callback, nq);
865 nim.msg.shaper_configure.config.request_type = NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE;
866 nim.msg.shaper_configure.config.msg.free_shaper_node.qos_tag = nq->qos_tag;
867 rc = nss_if_tx_msg(nq->nss_shaping_ctx, &nim);
Murat Sezgin7a705422014-01-30 16:09:22 -0800868
Murat Sezgin7a705422014-01-30 16:09:22 -0800869 if (rc == NSS_TX_SUCCESS) {
870 return;
871 }
872
873 nssqdisc_error("%s: Qdisc %p (type %d): child free node command send "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700874 "failed: %d, qos tag: %x\n", __func__, nq->qdisc, nq->type,
Murat Sezgin7a705422014-01-30 16:09:22 -0800875 rc, nq->qos_tag);
876
877 /*
878 * Perform final cleanup
879 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700880 nssqdisc_child_cleanup_final(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800881}
882
883/*
884 * nssqdisc_child_init_alloc_node_callback()
885 * Invoked on the response to creating a child shaper node
886 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700887static void nssqdisc_child_init_alloc_node_callback(void *app_data, struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -0800888{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700889 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
Murat Sezgin7a705422014-01-30 16:09:22 -0800890
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700891 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
892 nssqdisc_error("%s: Qdisc %p (type %d): child alloc node FAILED, response "
893 "type: %d\n", __func__, nq->qdisc, nq->type, nim->msg.shaper_configure.config.response_type);
Murat Sezgin7a705422014-01-30 16:09:22 -0800894 /*
895 * Cleanup from final stage
896 */
897 nq->pending_final_state = NSSQDISC_STATE_NODE_ALLOC_FAIL_CHILD;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700898 nssqdisc_child_cleanup_final(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -0800899 return;
900 }
901
902 /*
903 * Shaper node has been allocated
904 */
905 nssqdisc_info("%s: Qdisc %p (type %d): shaper node successfully "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700906 "created as a child node\n",__func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -0800907
908 atomic_set(&nq->state, NSSQDISC_STATE_READY);
909}
910
911/*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -0700912 * nssqdisc_add_to_tail_protected()
913 * Adds to list while holding the qdisc lock.
914 */
915static inline void nssqdisc_add_to_tail_protected(struct sk_buff *skb, struct Qdisc *sch)
916{
917 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
918
919 /*
920 * Since packets can come back from the NSS at any time (in case of bounce),
921 * enqueue's and dequeue's can cause corruption, if not done within locks.
922 */
923 spin_lock_bh(&nq->bounce_protection_lock);
924
925 /*
926 * We do not use the qdisc_enqueue_tail() API here in order
927 * to prevent stats from getting updated by the API.
928 */
929 __skb_queue_tail(&sch->q, skb);
930
931 spin_unlock_bh(&nq->bounce_protection_lock);
932};
933
934/*
935 * nssqdisc_add_to_tail()
936 * Adds to list without holding any locks.
937 */
938static inline void nssqdisc_add_to_tail(struct sk_buff *skb, struct Qdisc *sch)
939{
940 /*
941 * We do not use the qdisc_enqueue_tail() API here in order
942 * to prevent stats from getting updated by the API.
943 */
944 __skb_queue_tail(&sch->q, skb);
945};
946
947/*
948 * nssqdisc_remove_from_tail_protected()
949 * Removes from list while holding the qdisc lock.
950 */
951static inline struct sk_buff *nssqdisc_remove_from_tail_protected(struct Qdisc *sch)
952{
953 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
954 struct sk_buff *skb;
955
956 /*
957 * Since packets can come back from the NSS at any time (in case of bounce),
958 * enqueue's and dequeue's can cause corruption, if not done within locks.
959 */
960 spin_lock_bh(&nq->bounce_protection_lock);
961
962 /*
963 * We use __skb_dequeue() to ensure that
964 * stats don't get updated twice.
965 */
966 skb = __skb_dequeue(&sch->q);
967
968 spin_unlock_bh(&nq->bounce_protection_lock);
969
970 return skb;
971};
972
973/*
974 * nssqdisc_remove_to_tail_protected()
975 * Removes from list without holding any locks.
976 */
977static inline struct sk_buff *nssqdisc_remove_from_tail(struct Qdisc *sch)
978{
979 /*
980 * We use __skb_dequeue() to ensure that
981 * stats don't get updated twice.
982 */
983 return __skb_dequeue(&sch->q);
984};
985
986/*
Sakthi Vignesh Radhakrishnan9ae9e2f2014-05-09 11:02:58 -0700987 * nssqdisc_bounce_callback()
988 * Enqueues packets bounced back from NSS firmware.
989 */
990static void nssqdisc_bounce_callback(void *app_data, struct sk_buff *skb)
991{
992 struct Qdisc *sch = (struct Qdisc *)app_data;
993
994 /*
995 * Enqueue the packet for transmit and schedule a dequeue
996 * This enqueue has to be protected in order to avoid corruption.
997 */
998 nssqdisc_add_to_tail_protected(skb, sch);
999 __netif_schedule(sch);
1000}
1001
1002/*
1003 * nssqdisc_peek()
1004 * Called to peek at the head of an nss qdisc
1005 */
1006static struct sk_buff *nssqdisc_peek(struct Qdisc *sch)
1007{
1008 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1009 struct sk_buff *skb;
1010
1011 if (!nq->is_virtual) {
1012 skb = skb_peek(&sch->q);
1013 } else {
1014 spin_lock_bh(&nq->bounce_protection_lock);
1015 skb = skb_peek(&sch->q);
1016 spin_unlock_bh(&nq->bounce_protection_lock);
1017 }
1018
1019 return skb;
1020}
1021
1022/*
1023 * nssqdisc_drop()
1024 * Called to drop the packet at the head of queue
1025 */
1026static unsigned int nssqdisc_drop(struct Qdisc *sch)
1027{
1028 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1029 unsigned int ret;
1030
1031 if (!nq->is_virtual) {
1032 ret = __qdisc_queue_drop_head(sch, &sch->q);
1033 } else {
1034 spin_lock_bh(&nq->bounce_protection_lock);
1035 /*
1036 * This function is safe to call within locks
1037 */
1038 ret = __qdisc_queue_drop_head(sch, &sch->q);
1039 spin_unlock_bh(&nq->bounce_protection_lock);
1040 }
1041
1042 return ret;
1043}
1044
1045/*
1046 * nssqdisc_reset()
1047 * Called when a qdisc is reset
1048 */
1049static void nssqdisc_reset(struct Qdisc *sch)
1050{
1051 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1052
1053 nssqdisc_info("%s: Qdisc %p (type %d) resetting\n",
1054 __func__, sch, nq->type);
1055
1056 /*
1057 * Delete all packets pending in the output queue and reset stats
1058 */
1059 if (!nq->is_virtual) {
1060 qdisc_reset_queue(sch);
1061 } else {
1062 spin_lock_bh(&nq->bounce_protection_lock);
1063 /*
1064 * This function is safe to call within locks
1065 */
1066 qdisc_reset_queue(sch);
1067 spin_unlock_bh(&nq->bounce_protection_lock);
1068 }
1069
1070 nssqdisc_info("%s: Qdisc %p (type %d) reset complete\n",
1071 __func__, sch, nq->type);
1072}
1073
1074/*
Murat Sezgin7a705422014-01-30 16:09:22 -08001075 * nssqdisc_enqueue()
1076 * Generic enqueue call for enqueuing packets into NSS for shaping
1077 */
1078static int nssqdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1079{
1080 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1081 nss_tx_status_t status;
1082
1083 /*
1084 * If we are not the root qdisc then we should not be getting packets!!
1085 */
Sakthi Vignesh Radhakrishnan9ae9e2f2014-05-09 11:02:58 -07001086 if (unlikely(!nq->is_root)) {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001087 nssqdisc_error("%s: Qdisc %p (type %d): unexpected packet "
Murat Sezgin7a705422014-01-30 16:09:22 -08001088 "for child qdisc - skb: %p\n", __func__, sch, nq->type, skb);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001089 nssqdisc_add_to_tail(skb, sch);
Murat Sezgin7a705422014-01-30 16:09:22 -08001090 __netif_schedule(sch);
1091 return NET_XMIT_SUCCESS;
1092 }
1093
1094 /*
1095 * Packet enueued in linux for transmit.
1096 *
1097 * What we do here depends upon whether we are a bridge or not. If not a
1098 * bridge then it depends on if we are a physical or virtual interface
1099 * The decision we are trying to reach is whether to bounce a packet to
1100 * the NSS to be shaped or not.
1101 *
1102 * is_bridge is_virtual Meaning
1103 * ---------------------------------------------------------------------------
1104 * false false Physical interface in NSS
1105 *
1106 * Action: Simply allow the packet to be dequeued. The packet will be
1107 * shaped by the interface shaper in the NSS by the usual transmit path.
1108 *
1109 *
1110 * false true Physical interface in Linux.
1111 * NSS still responsible for shaping
1112 *
1113 * Action: Bounce the packet to the NSS virtual interface that represents
1114 * this Linux physical interface for INTERFACE shaping. When the packet is
1115 * returned from being shaped we allow it to be dequeued for transmit.
1116 *
1117 * true n/a Logical Linux interface.
1118 * Root qdisc created a virtual interface
1119 * to represent it in the NSS for shaping
1120 * purposes.
1121 *
1122 * Action: Bounce the packet to the NSS virtual interface (for BRIDGE shaping)
1123 * the bridge root qdisc created for it. When the packet is returned from being
1124 * shaped we allow it to be dequeued for transmit.
1125 */
1126
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001127 if (!nq->is_virtual) {
Murat Sezgin7a705422014-01-30 16:09:22 -08001128 /*
1129 * TX to an NSS physical - the shaping will occur as part of normal
1130 * transmit path.
1131 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001132 nssqdisc_add_to_tail(skb, sch);
Murat Sezgin7a705422014-01-30 16:09:22 -08001133 __netif_schedule(sch);
1134 return NET_XMIT_SUCCESS;
1135 }
1136
Sakthi Vignesh Radhakrishnan9ae9e2f2014-05-09 11:02:58 -07001137 if (nq->is_bridge) {
Murat Sezgin7a705422014-01-30 16:09:22 -08001138 /*
Sakthi Vignesh Radhakrishnan9ae9e2f2014-05-09 11:02:58 -07001139 * TX to a bridge, this is to be shaped by the b shaper on the virtual interface created
1140 * to represent the bridge interface.
Murat Sezgin7a705422014-01-30 16:09:22 -08001141 */
Sakthi Vignesh Radhakrishnan9ae9e2f2014-05-09 11:02:58 -07001142 status = nss_shaper_bounce_bridge_packet(nq->bounce_context, nq->nss_interface_number, skb);
1143 if (likely(status == NSS_TX_SUCCESS)) {
1144 return NET_XMIT_SUCCESS;
Murat Sezgin7a705422014-01-30 16:09:22 -08001145 }
Murat Sezgin7a705422014-01-30 16:09:22 -08001146
Murat Sezgin7a705422014-01-30 16:09:22 -08001147 /*
1148 * Just transmit anyway, don't want to loose the packet
1149 */
1150 nssqdisc_warning("%s: Qdisc %p (type %d): failed to bounce for bridge %d, skb: %p\n",
1151 __func__, sch, nq->type, nq->nss_interface_number, skb);
Sakthi Vignesh Radhakrishnan9ae9e2f2014-05-09 11:02:58 -07001152
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001153 /*
Sakthi Vignesh Radhakrishnan9ae9e2f2014-05-09 11:02:58 -07001154 * We were unable to transmit the packet for bridge shaping.
1155 * We therefore drop it.
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001156 */
Sakthi Vignesh Radhakrishnan9ae9e2f2014-05-09 11:02:58 -07001157 kfree_skb(skb);
1158 return NET_XMIT_DROP;
Murat Sezgin7a705422014-01-30 16:09:22 -08001159 }
Sakthi Vignesh Radhakrishnan9ae9e2f2014-05-09 11:02:58 -07001160
1161 /*
1162 * TX to a physical Linux (NSS virtual). Bounce packet to NSS for
1163 * interface shaping.
1164 */
1165 status = nss_shaper_bounce_interface_packet(nq->bounce_context,
1166 nq->nss_interface_number, skb);
1167 if (likely(status == NSS_TX_SUCCESS)) {
1168 return NET_XMIT_SUCCESS;
1169 }
1170
1171 /*
1172 * Just transmit anyway, don't want to loose the packet
1173 */
1174 nssqdisc_warning("%s: Qdisc %p (type %d): failed to bounce for "
1175 "interface: %d, skb: %p\n", __func__, sch, nq->type,
1176 nq->nss_interface_number, skb);
1177
1178 /*
1179 * We were unable to transmit the packet for bridge shaping.
1180 * We therefore drop it.
1181 */
1182 kfree_skb(skb);
1183 return NET_XMIT_DROP;
Murat Sezgin7a705422014-01-30 16:09:22 -08001184}
1185
1186/*
1187 * nssqdisc_dequeue()
1188 * Generic dequeue call for dequeuing bounced packets.
1189 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001190static inline struct sk_buff *nssqdisc_dequeue(struct Qdisc *sch)
Murat Sezgin7a705422014-01-30 16:09:22 -08001191{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001192 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
Murat Sezgin7a705422014-01-30 16:09:22 -08001193
1194 /*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001195 * We use the protected dequeue API if the interface involves bounce.
1196 * That is, a bridge or a virtual interface. Else, we use the unprotected
1197 * API.
Murat Sezgin7a705422014-01-30 16:09:22 -08001198 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001199 if (nq->is_virtual) {
1200 return nssqdisc_remove_from_tail_protected(sch);
1201 } else {
1202 return nssqdisc_remove_from_tail(sch);
1203 }
Murat Sezgin7a705422014-01-30 16:09:22 -08001204}
1205
1206/*
1207 * nssqdisc_set_default_callback()
1208 * The callback function for a shaper node set default
1209 */
1210static void nssqdisc_set_default_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001211 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -08001212{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001213 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
Murat Sezgin7a705422014-01-30 16:09:22 -08001214
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001215 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
1216 nssqdisc_error("%s: Qdisc %p (type %d): shaper node set default FAILED, response type: %d\n",
1217 __func__, nq->qdisc, nq->type, nim->msg.shaper_configure.config.response_type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001218 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
1219 return;
1220 }
1221
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001222 nssqdisc_info("%s: Qdisc %p (type %d): attach complete\n", __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001223 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1224}
1225
1226/*
1227 * nssqdisc_node_set_default()
1228 * Configuration function that sets shaper node as default for packet enqueue
1229 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001230static int nssqdisc_set_default(struct nssqdisc_qdisc *nq)
Murat Sezgin7a705422014-01-30 16:09:22 -08001231{
Murat Sezgin7a705422014-01-30 16:09:22 -08001232 int32_t state, rc;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001233 int msg_type;
1234 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -08001235
1236 nssqdisc_info("%s: Setting qdisc %p (type %d) as default\n", __func__,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001237 nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001238
1239 state = atomic_read(&nq->state);
1240 if (state != NSSQDISC_STATE_READY) {
Sakthi Vignesh Radhakrishnanc5a228c2014-05-13 09:31:41 -07001241 nssqdisc_error("%s: Qdisc %p (type %d): qdisc state not ready: %d\n", __func__,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001242 nq->qdisc, nq->type, state);
Murat Sezgin7a705422014-01-30 16:09:22 -08001243 BUG();
1244 }
1245
1246 /*
1247 * Set shaper node state to IDLE
1248 */
1249 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1250
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001251 /*
1252 * Create the shaper configure message and send it down to the NSS interface
1253 */
1254 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
1255 nss_cmn_msg_init(&nim.cm, nq->nss_interface_number, msg_type, sizeof(struct nss_if_msg),
1256 nssqdisc_set_default_callback, nq);
1257 nim.msg.shaper_configure.config.request_type = NSS_SHAPER_CONFIG_TYPE_SET_DEFAULT;
1258 nim.msg.shaper_configure.config.msg.set_default_node.qos_tag = nq->qos_tag;
1259 rc = nss_if_tx_msg(nq->nss_shaping_ctx, &nim);
Murat Sezgin7a705422014-01-30 16:09:22 -08001260
Murat Sezgin7a705422014-01-30 16:09:22 -08001261 if (rc != NSS_TX_SUCCESS) {
1262 nssqdisc_warning("%s: Failed to send set default message for "
1263 "qdisc type %d\n", __func__, nq->type);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001264 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -08001265 return -1;
1266 }
1267
1268 /*
1269 * Wait until cleanup operation is complete at which point the state
1270 * shall become idle. NOTE: This relies on the NSS driver to be able
1271 * to operate asynchronously which means kernel preemption is required.
1272 */
1273 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1274 yield();
1275 }
1276
1277 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
1278 nssqdisc_error("%s: Qdisc %p (type %d): failed to default "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001279 "State: %d\n", __func__, nq->qdisc, nq->type, state);
1280 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -08001281 return -1;
1282 }
1283
1284 nssqdisc_info("%s: Qdisc %p (type %d): shaper node default complete\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001285 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001286 return 0;
1287}
1288
1289/*
1290 * nssqdisc_node_attach_callback()
1291 * The callback function for a shaper node attach message
1292 */
1293static void nssqdisc_node_attach_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001294 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -08001295{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001296 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
Murat Sezgin7a705422014-01-30 16:09:22 -08001297
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001298 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
1299 nssqdisc_info("%s: Qdisc %p (type %d) shaper node attach FAILED - response "
1300 "type: %d\n", __func__, nq->qdisc, nq->type,
1301 nim->msg.shaper_configure.config.response_type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001302 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
1303 return;
1304 }
1305
1306 nssqdisc_info("%s: qdisc type %d: %p, attach complete\n", __func__,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001307 nq->type, nq->qdisc);
Murat Sezgin7a705422014-01-30 16:09:22 -08001308
1309 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1310}
1311
1312/*
1313 * nssqdisc_node_attach()
1314 * Configuration function that helps attach a child shaper node to a parent.
1315 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001316static int nssqdisc_node_attach(struct nssqdisc_qdisc *nq,
1317 struct nss_if_msg *nim, int32_t attach_type)
Murat Sezgin7a705422014-01-30 16:09:22 -08001318{
Murat Sezgin7a705422014-01-30 16:09:22 -08001319 int32_t state, rc;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001320 int msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -08001321
1322 nssqdisc_info("%s: Qdisc %p (type %d) attaching\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001323 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001324
1325 state = atomic_read(&nq->state);
1326 if (state != NSSQDISC_STATE_READY) {
1327 nssqdisc_error("%s: Qdisc %p (type %d): not ready, state: %d\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001328 __func__, nq->qdisc, nq->type, state);
Murat Sezgin7a705422014-01-30 16:09:22 -08001329 BUG();
1330 }
1331
1332 /*
1333 * Set shaper node state to IDLE
1334 */
1335 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1336
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001337 /*
1338 * Create the shaper configure message and send it down to the NSS interface
1339 */
1340 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
1341 nss_cmn_msg_init(&nim->cm, nq->nss_interface_number, msg_type, sizeof(struct nss_if_msg),
1342 nssqdisc_node_attach_callback, nq);
1343 nim->msg.shaper_configure.config.request_type = attach_type;
1344 rc = nss_if_tx_msg(nq->nss_shaping_ctx, nim);
Murat Sezgin7a705422014-01-30 16:09:22 -08001345
Murat Sezgin7a705422014-01-30 16:09:22 -08001346 if (rc != NSS_TX_SUCCESS) {
1347 nssqdisc_warning("%s: Failed to send configure message for "
1348 "qdisc type %d\n", __func__, nq->type);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001349 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -08001350 return -1;
1351 }
1352
1353 /*
1354 * Wait until cleanup operation is complete at which point the state
1355 * shall become idle. NOTE: This relies on the NSS driver to be able
1356 * to operate asynchronously which means kernel preemption is required.
1357 */
1358 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1359 yield();
1360 }
1361
1362 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
1363 nssqdisc_error("%s: Qdisc %p (type %d) failed to attach child "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001364 "node, State: %d\n", __func__, nq->qdisc, nq->type, state);
1365 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -08001366 return -1;
1367 }
1368
1369 nssqdisc_info("%s: Qdisc %p (type %d): shaper node attach complete\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001370 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001371 return 0;
1372}
1373
1374/*
1375 * nssqdisc_node_detach_callback()
1376 * The callback function for a shaper node detach message
1377 */
1378static void nssqdisc_node_detach_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001379 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -08001380{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001381 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
Murat Sezgin7a705422014-01-30 16:09:22 -08001382
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001383 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
1384 nssqdisc_info("%s: Qdisc %p (type %d): shaper node detach FAILED - response "
1385 "type: %d\n", __func__, nq->qdisc, nq->type,
1386 nim->msg.shaper_configure.config.response_type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001387 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
1388 return;
1389 }
1390
1391 nssqdisc_info("%s: Qdisc %p (type %d): detach complete\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001392 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001393
1394 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1395}
1396
1397/*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001398 * nssqdisc_node_detach()
Murat Sezgin7a705422014-01-30 16:09:22 -08001399 * Configuration function that helps detach a child shaper node to a parent.
1400 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001401static int nssqdisc_node_detach(struct nssqdisc_qdisc *nq,
1402 struct nss_if_msg *nim, int32_t detach_type)
Murat Sezgin7a705422014-01-30 16:09:22 -08001403{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001404 int32_t state, rc, msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -08001405
1406 nssqdisc_info("%s: Qdisc %p (type %d) detaching\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001407 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001408
1409 state = atomic_read(&nq->state);
1410 if (state != NSSQDISC_STATE_READY) {
1411 nssqdisc_error("%s: Qdisc %p (type %d): not ready, state: %d\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001412 __func__, nq->qdisc, nq->type, state);
Murat Sezgin7a705422014-01-30 16:09:22 -08001413 BUG();
1414 }
1415
1416 /*
1417 * Set shaper node state to IDLE
1418 */
1419 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1420
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001421 /*
1422 * Create and send the shaper configure message to the NSS interface
1423 */
1424 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
1425 nss_cmn_msg_init(&nim->cm, nq->nss_interface_number, msg_type, sizeof(struct nss_if_msg),
1426 nssqdisc_node_detach_callback, nq);
1427 nim->msg.shaper_configure.config.request_type = detach_type;
1428 rc = nss_if_tx_msg(nq->nss_shaping_ctx, nim);
Murat Sezgin7a705422014-01-30 16:09:22 -08001429
Murat Sezgin7a705422014-01-30 16:09:22 -08001430 if (rc != NSS_TX_SUCCESS) {
1431 nssqdisc_warning("%s: Qdisc %p (type %d): Failed to send configure "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001432 "message.", __func__, nq->qdisc, nq->type);
1433 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -08001434 return -1;
1435 }
1436
1437 /*
1438 * Wait until cleanup operation is complete at which point the state shall become idle.
1439 * NOTE: This relies on the NSS driver to be able to operate asynchronously which means
1440 * kernel preemption is required.
1441 */
1442 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1443 yield();
1444 }
1445
1446 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001447 nssqdisc_error("%s: Qdisc %p (type %d): failed to detach child node, "
1448 "State: %d\n", __func__, nq->qdisc, nq->type, state);
1449 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -08001450 return -1;
1451 }
1452
1453 nssqdisc_info("%s: Qdisc %p (type %d): shaper node detach complete\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001454 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001455 return 0;
1456}
1457
1458/*
1459 * nssqdisc_configure_callback()
1460 * The call back function for a shaper node configure message
1461 */
1462static void nssqdisc_configure_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001463 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -08001464{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001465 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
Murat Sezgin7a705422014-01-30 16:09:22 -08001466
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001467 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
1468 nssqdisc_info("%s: Qdisc %p (type %d): shaper node configure FAILED "
1469 "response type: %d\n", __func__, nq->qdisc, nq->type,
1470 nim->msg.shaper_configure.config.response_type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001471 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
1472 return;
1473 }
1474
1475 nssqdisc_info("%s: Qdisc %p (type %d): configuration complete\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001476 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001477 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1478}
1479
1480/*
1481 * nssqdisc_configure()
1482 * Configuration function that aids in tuning of queuing parameters.
1483 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001484static int nssqdisc_configure(struct nssqdisc_qdisc *nq,
1485 struct nss_if_msg *nim, int32_t config_type)
Murat Sezgin7a705422014-01-30 16:09:22 -08001486{
Murat Sezgin7a705422014-01-30 16:09:22 -08001487 int32_t state, rc;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001488 int msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -08001489
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001490 nssqdisc_info("%s: Qdisc %p (type %d) configuring\n", __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001491
1492 state = atomic_read(&nq->state);
1493 if (state != NSSQDISC_STATE_READY) {
1494 nssqdisc_error("%s: Qdisc %p (type %d): not ready for configure, "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001495 "state : %d\n", __func__, nq->qdisc, nq->type, state);
Murat Sezgin7a705422014-01-30 16:09:22 -08001496 BUG();
1497 }
1498
1499 /*
1500 * Set shaper node state to IDLE
1501 */
1502 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1503
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001504 /*
1505 * Create and send the shaper configure message to the NSS interface
1506 */
1507 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
1508 nss_cmn_msg_init(&nim->cm, nq->nss_interface_number, msg_type, sizeof(struct nss_if_msg),
1509 nssqdisc_configure_callback, nq);
1510 nim->msg.shaper_configure.config.request_type = config_type;
1511 rc = nss_if_tx_msg(nq->nss_shaping_ctx, nim);
Murat Sezgin7a705422014-01-30 16:09:22 -08001512
Murat Sezgin7a705422014-01-30 16:09:22 -08001513 if (rc != NSS_TX_SUCCESS) {
1514 nssqdisc_warning("%s: Qdisc %p (type %d): Failed to send configure "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001515 "message\n", __func__, nq->qdisc, nq->type);
1516 atomic_set(&nq->state, NSSQDISC_STATE_READY);
Murat Sezgin7a705422014-01-30 16:09:22 -08001517 return -1;
1518 }
1519
1520 /*
1521 * Wait until cleanup operation is complete at which point the state
1522 * shall become idle. NOTE: This relies on the NSS driver to be able
1523 * to operate asynchronously which means kernel preemption is required.
1524 */
1525 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1526 yield();
1527 }
1528
1529 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
1530 nssqdisc_error("%s: Qdisc %p (type %d): failed to configure shaper "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001531 "node: State: %d\n", __func__, nq->qdisc, nq->type, state);
Murat Sezgin7a705422014-01-30 16:09:22 -08001532 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1533 return -1;
1534 }
1535
1536 nssqdisc_info("%s: Qdisc %p (type %d): shaper node configure complete\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001537 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001538 return 0;
1539}
1540
1541/*
1542 * nssqdisc_destroy()
1543 * Destroys a shaper in NSS, and the sequence is based on the position of
1544 * this qdisc (child or root) and the interface to which it is attached to.
1545 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001546static void nssqdisc_destroy(struct nssqdisc_qdisc *nq)
Murat Sezgin7a705422014-01-30 16:09:22 -08001547{
Murat Sezgin7a705422014-01-30 16:09:22 -08001548 int32_t state;
1549
1550 nssqdisc_info("%s: Qdisc %p (type %d) destroy\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001551 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001552
1553
1554 state = atomic_read(&nq->state);
1555 if (state != NSSQDISC_STATE_READY) {
1556 nssqdisc_error("%s: Qdisc %p (type %d): destroy not ready, "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001557 "state: %d\n", __func__, nq->qdisc, nq->type, state);
Murat Sezgin7a705422014-01-30 16:09:22 -08001558 BUG();
1559 }
1560
1561 /*
1562 * How we begin to tidy up depends on whether we are root or child
1563 */
1564 nq->pending_final_state = NSSQDISC_STATE_IDLE;
1565 if (nq->is_root) {
1566
1567 /*
1568 * If this is root on a bridge interface, then unassign
1569 * the bshaper from all the attached interfaces.
1570 */
1571 if (nq->is_bridge) {
1572 nssqdisc_info("%s: Qdisc %p (type %d): is root on bridge. Need to "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001573 "unassign bshapers from its interfaces\n", __func__, nq->qdisc, nq->type);
1574 nssqdisc_refresh_bshaper_assignment(nq->qdisc, NSSQDISC_SCAN_AND_UNASSIGN_BSHAPER);
Murat Sezgin7a705422014-01-30 16:09:22 -08001575 }
1576
1577 /*
1578 * Begin by freeing the root shaper node
1579 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001580 nssqdisc_root_cleanup_free_node(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08001581 } else {
1582 /*
1583 * Begin by freeing the child shaper node
1584 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001585 nssqdisc_child_cleanup_free_node(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08001586 }
1587
1588 /*
1589 * Wait until cleanup operation is complete at which point the state
1590 * shall become idle. NOTE: This relies on the NSS driver to be able
1591 * to operate asynchronously which means kernel preemption is required.
1592 */
1593 while (NSSQDISC_STATE_IDLE != (state = atomic_read(&nq->state))) {
1594 yield();
1595 }
1596
1597 if (nq->destroy_virtual_interface) {
1598 nss_destroy_virt_if((void *)nq->nss_interface_number);
1599 }
1600
1601 nssqdisc_info("%s: Qdisc %p (type %d): destroy complete\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001602 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001603}
1604
1605
1606/*
1607 * nssqdisc_init()
1608 * Initializes a shaper in NSS, based on the position of this qdisc (child or root)
1609 * and if its a normal interface or a bridge interface.
1610 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001611static int nssqdisc_init(struct Qdisc *sch, struct nssqdisc_qdisc *nq, nss_shaper_node_type_t type, uint32_t classid)
Murat Sezgin7a705422014-01-30 16:09:22 -08001612{
Murat Sezgin7a705422014-01-30 16:09:22 -08001613 struct Qdisc *root;
1614 u32 parent;
1615 nss_tx_status_t rc;
1616 struct net_device *dev;
1617 int32_t state;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001618 struct nss_if_msg nim;
1619 int msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -08001620
1621 /*
1622 * Record our qdisc and type in the private region for handy use
1623 */
1624 nq->qdisc = sch;
1625 nq->type = type;
1626
1627 /*
1628 * We dont have to destroy a virtual interface unless
1629 * we are the ones who created it. So set it to false
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001630 * by default.
Murat Sezgin7a705422014-01-30 16:09:22 -08001631 */
1632 nq->destroy_virtual_interface = false;
1633
1634 /*
1635 * Set shaper node state to IDLE
1636 */
1637 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1638
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001639 /*
1640 * If we are a class, then classid is used as the qos tag.
1641 * Else the qdisc handle will be used as the qos tag.
1642 */
1643 if (classid) {
1644 nq->qos_tag = classid;
1645 nq->is_class = true;
1646 } else {
1647 nq->qos_tag = (uint32_t)sch->handle;
1648 nq->is_class = false;
1649 }
1650
1651 /*
1652 * If our parent is TC_H_ROOT and we are not a class, then we are the root qdisc.
1653 * Note, classes might have its qdisc as root, however we should not set is_root to
1654 * true for classes. This is the reason why we check for classid.
1655 */
1656 if ((sch->parent == TC_H_ROOT) && (!nq->is_class)) {
1657 nssqdisc_info("%s: Qdisc %p (type %d) is root\n", __func__, nq->qdisc, nq->type);
1658 nq->is_root = true;
1659 } else {
1660 nssqdisc_info("%s: Qdisc %p (type %d) not root\n", __func__, nq->qdisc, nq->type);
1661 nq->is_root = false;
1662 }
Murat Sezgin7a705422014-01-30 16:09:22 -08001663
1664 /*
1665 * The root must be of an nss type (unless we are of course going to be root).
1666 * This is to prevent mixing NSS qdisc with other types of qdisc.
1667 */
1668 parent = sch->parent;
1669 root = qdisc_root(sch);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001670
1671 /*
1672 * Get the net device as it will tell us if we are on a bridge,
1673 * or on a net device that is represented by a virtual NSS interface (e.g. WIFI)
1674 */
1675 dev = qdisc_dev(sch);
1676 nssqdisc_info("%s: Qdisc %p (type %d) init dev: %p\n", __func__, nq->qdisc, nq->type, dev);
1677
1678 /*
1679 * Determine if dev is a bridge or not as this determines if we
1680 * interract with an I or B shaper.
1681 */
1682 if (dev->priv_flags == IFF_EBRIDGE) {
1683 nssqdisc_info("%s: Qdisc %p (type %d) init qdisc: %p, is bridge\n",
1684 __func__, nq->qdisc, nq->type, nq->qdisc);
1685 nq->is_bridge = true;
1686 } else {
1687 nssqdisc_info("%s: Qdisc %p (type %d) init qdisc: %p, not bridge\n",
1688 __func__, nq->qdisc, nq->type, nq->qdisc);
1689 nq->is_bridge = false;
1690 }
1691
1692 nssqdisc_info("%s: Qdisc %p (type %d) init root: %p, qos tag: %x, "
1693 "parent: %x rootid: %s owner: %p\n", __func__, nq->qdisc, nq->type, root,
1694 nq->qos_tag, parent, root->ops->id, root->ops->owner);
Murat Sezgin7a705422014-01-30 16:09:22 -08001695
1696 if ((parent != TC_H_ROOT) && (root->ops->owner != THIS_MODULE)) {
Sakthi Vignesh Radhakrishnanc5a228c2014-05-13 09:31:41 -07001697 nssqdisc_warning("%s: NSS qdisc %p (type %d) used along with non-NSS qdiscs,"
1698 " or the interface is currently down", __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001699 }
1700
1701 /*
1702 * Register for NSS shaping
1703 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001704 nq->nss_shaping_ctx = nss_shaper_register_shaping();
Murat Sezgin7a705422014-01-30 16:09:22 -08001705 if (!nq->nss_shaping_ctx) {
1706 nssqdisc_error("%s: no shaping context returned for type %d\n",
1707 __func__, nq->type);
1708 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1709 return -1;
1710 }
1711
1712 /*
Murat Sezgin7a705422014-01-30 16:09:22 -08001713 * If we are not the root qdisc then we have a simple enough job to do
1714 */
1715 if (!nq->is_root) {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001716 struct nss_if_msg nim_alloc;
Murat Sezgin7a705422014-01-30 16:09:22 -08001717 nssqdisc_info("%s: Qdisc %p (type %d) initializing non-root qdisc\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001718 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001719
1720 /*
1721 * The device we are operational on MUST be recognised as an NSS interface.
1722 * NOTE: We do NOT support non-NSS known interfaces in this implementation.
1723 * NOTE: This will still work where the dev is registered as virtual, in which case
1724 * nss_interface_number shall indicate a virtual NSS interface.
1725 */
Abhishek Rastogi99714332014-04-02 19:38:12 +05301726 nq->nss_interface_number = nss_cmn_get_interface_number(nq->nss_shaping_ctx, dev);
Murat Sezgin7a705422014-01-30 16:09:22 -08001727 if (nq->nss_interface_number < 0) {
1728 nssqdisc_error("%s: Qdisc %p (type %d) net device unknown to "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001729 "nss driver %s\n", __func__, nq->qdisc, nq->type, dev->name);
1730 nss_shaper_unregister_shaping(nq->nss_shaping_ctx);
Murat Sezgin7a705422014-01-30 16:09:22 -08001731 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1732 return -1;
1733 }
1734
1735 /*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001736 * Set the virtual flag
1737 */
1738 nq->is_virtual = nss_cmn_interface_is_virtual(nq->nss_shaping_ctx, nq->nss_interface_number);
1739
1740 /*
Murat Sezgin7a705422014-01-30 16:09:22 -08001741 * Create a shaper node for requested type.
1742 * Essentially all we need to do is create the shaper node.
1743 */
1744 nssqdisc_info("%s: Qdisc %p (type %d) non-root (child) create\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001745 __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001746
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001747 /*
1748 * Create and send the shaper configure message to the interface
1749 */
1750 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
1751 nss_cmn_msg_init(&nim_alloc.cm, nq->nss_interface_number, msg_type, sizeof(struct nss_if_msg),
1752 nssqdisc_child_init_alloc_node_callback, nq);
1753 nim_alloc.msg.shaper_configure.config.request_type = NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE;
1754 nim_alloc.msg.shaper_configure.config.msg.alloc_shaper_node.node_type = nq->type;
1755 nim_alloc.msg.shaper_configure.config.msg.alloc_shaper_node.qos_tag = nq->qos_tag;
1756 rc = nss_if_tx_msg(nq->nss_shaping_ctx, &nim_alloc);
Murat Sezgin7a705422014-01-30 16:09:22 -08001757
Murat Sezgin7a705422014-01-30 16:09:22 -08001758 if (rc != NSS_TX_SUCCESS) {
1759 nssqdisc_error("%s: Qdisc %p (type %d) create command "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001760 "failed: %d\n", __func__, nq->qdisc, nq->type, rc);
Murat Sezgin7a705422014-01-30 16:09:22 -08001761 nq->pending_final_state = NSSQDISC_STATE_CHILD_ALLOC_SEND_FAIL;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001762 nssqdisc_child_cleanup_final(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08001763 return -1;
1764 }
1765
1766 /*
1767 * Wait until init operation is complete.
1768 * NOTE: This relies on the NSS driver to be able to operate
1769 * asynchronously which means kernel preemption is required.
1770 */
1771 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1772 yield();
1773 }
1774 nssqdisc_info("%s: Qdisc %p (type %d): initialised with state: %d\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001775 __func__, nq->qdisc, nq->type, state);
Murat Sezgin7a705422014-01-30 16:09:22 -08001776 if (state > 0) {
1777 return 0;
1778 }
1779 return -1;
1780 }
1781
1782 /*
1783 * Root qdisc has a lot of work to do. It is responsible for setting up
1784 * the shaper and creating the root and default shaper nodes. Also, when
1785 * operating on a bridge, a virtual NSS interface is created to represent
1786 * bridge shaping. Further, when operating on a bridge, we monitor for
1787 * bridge port changes and assign B shapers to the interfaces of the ports.
1788 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001789 nssqdisc_info("%s: init qdisc type %d : %p, ROOT\n", __func__, nq->type, nq->qdisc);
Murat Sezgin7a705422014-01-30 16:09:22 -08001790
1791 /*
1792 * Detect if we are operating on a bridge or interface
1793 */
1794 if (nq->is_bridge) {
1795 nssqdisc_info("%s: Qdisc %p (type %d): initializing root qdisc on "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001796 "bridge\n", __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001797
1798 /*
1799 * As we are a root qdisc on this bridge then we have to create a
1800 * virtual interface to represent this bridge in the NSS. This will
1801 * allow us to bounce packets to the NSS for bridge shaping action.
1802 * Also set the destroy virtual interface flag so that it is destroyed
1803 * when the module goes down. If this is not done, the OS waits for
1804 * the interface to be released.
1805 */
1806 nq->virtual_interface_context = nss_create_virt_if(dev);
Murat Sezgin7a705422014-01-30 16:09:22 -08001807 if (!nq->virtual_interface_context) {
1808 nssqdisc_error("%s: Qdisc %p (type %d): cannot create virtual "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001809 "interface\n", __func__, nq->qdisc, nq->type);
1810 nss_shaper_unregister_shaping(nq->nss_shaping_ctx);
Murat Sezgin7a705422014-01-30 16:09:22 -08001811 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1812 return -1;
1813 }
1814 nssqdisc_info("%s: Qdisc %p (type %d): virtual interface registered "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001815 "in NSS: %p\n", __func__, nq->qdisc, nq->type, nq->virtual_interface_context);
1816
1817 /*
1818 * Get the virtual interface number, and set the related flags
1819 */
Murat Sezgin7a705422014-01-30 16:09:22 -08001820 nq->nss_interface_number = nss_virt_if_get_interface_num(nq->virtual_interface_context);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001821 nq->destroy_virtual_interface = true;
1822 nq->is_virtual = true;
Murat Sezgin7a705422014-01-30 16:09:22 -08001823 nssqdisc_info("%s: Qdisc %p (type %d) virtual interface number: %d\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001824 __func__, nq->qdisc, nq->type, nq->nss_interface_number);
Murat Sezgin7a705422014-01-30 16:09:22 -08001825
1826 /*
1827 * The root qdisc will get packets enqueued to it, so it must
1828 * register for bridge bouncing as it will be responsible for
1829 * bouncing packets to the NSS for bridge shaping.
1830 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001831 nq->bounce_context = nss_shaper_register_shaper_bounce_bridge(nq->nss_interface_number,
1832 nssqdisc_bounce_callback, nq->qdisc, THIS_MODULE);
Murat Sezgin7a705422014-01-30 16:09:22 -08001833 if (!nq->bounce_context) {
Sakthi Vignesh Radhakrishnanc5a228c2014-05-13 09:31:41 -07001834 nssqdisc_error("%s: Qdisc %p (type %d): is root but cannot register "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001835 "for bridge bouncing\n", __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001836 nss_destroy_virt_if(nq->virtual_interface_context);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001837 nss_shaper_unregister_shaping(nq->nss_shaping_ctx);
Murat Sezgin7a705422014-01-30 16:09:22 -08001838 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1839 return -1;
1840 }
1841
1842 } else {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001843 nssqdisc_info("%s: Qdisc %p (type %d): is interface\n", __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001844
1845 /*
1846 * The device we are operational on MUST be recognised as an NSS interface.
1847 * NOTE: We do NOT support non-NSS known interfaces in this basic implementation.
1848 * NOTE: This will still work where the dev is registered as virtual, in which case
1849 * nss_interface_number shall indicate a virtual NSS interface.
1850 */
Abhishek Rastogi99714332014-04-02 19:38:12 +05301851 nq->nss_interface_number = nss_cmn_get_interface_number(nq->nss_shaping_ctx, dev);
Murat Sezgin7a705422014-01-30 16:09:22 -08001852 if (nq->nss_interface_number < 0) {
1853 nssqdisc_error("%s: Qdisc %p (type %d): interface unknown to nss driver %s\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001854 __func__, nq->qdisc, nq->type, dev->name);
1855 nss_shaper_unregister_shaping(nq->nss_shaping_ctx);
Murat Sezgin7a705422014-01-30 16:09:22 -08001856 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1857 return -1;
1858 }
1859
1860 /*
1861 * Is the interface virtual or not?
1862 * NOTE: If this interface is virtual then we have to bounce packets to it for shaping
1863 */
Abhishek Rastogi99714332014-04-02 19:38:12 +05301864 nq->is_virtual = nss_cmn_interface_is_virtual(nq->nss_shaping_ctx, nq->nss_interface_number);
Murat Sezgin7a705422014-01-30 16:09:22 -08001865 if (!nq->is_virtual) {
1866 nssqdisc_info("%s: Qdisc %p (type %d): interface %u is physical\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001867 __func__, nq->qdisc, nq->type, nq->nss_interface_number);
Murat Sezgin7a705422014-01-30 16:09:22 -08001868 } else {
1869 nssqdisc_info("%s: Qdisc %p (type %d): interface %u is virtual\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001870 __func__, nq->qdisc, nq->type, nq->nss_interface_number);
Murat Sezgin7a705422014-01-30 16:09:22 -08001871
1872 /*
1873 * Register for interface bounce shaping.
1874 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001875 nq->bounce_context = nss_shaper_register_shaper_bounce_interface(nq->nss_interface_number,
1876 nssqdisc_bounce_callback, nq->qdisc, THIS_MODULE);
Murat Sezgin7a705422014-01-30 16:09:22 -08001877 if (!nq->bounce_context) {
1878 nssqdisc_error("%s: Qdisc %p (type %d): is root but failed "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001879 "to register for interface bouncing\n", __func__, nq->qdisc, nq->type);
1880 nss_shaper_unregister_shaping(nq->nss_shaping_ctx);
Murat Sezgin7a705422014-01-30 16:09:22 -08001881 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1882 return -1;
1883 }
1884 }
1885 }
1886
1887 /*
1888 * We need to issue a command to establish a shaper on the interface.
1889 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001890
1891 /*
1892 * Create and send the shaper assign message to the NSS interface
1893 */
1894 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_ASSIGN);
1895 nss_cmn_msg_init(&nim.cm, nq->nss_interface_number, msg_type, sizeof(struct nss_if_msg),
1896 nssqdisc_root_init_shaper_assign_callback, nq);
1897 nim.msg.shaper_assign.shaper_id = 0; /* Any free shaper will do */
1898 rc = nss_if_tx_msg(nq->nss_shaping_ctx, &nim);
1899
Murat Sezgin7a705422014-01-30 16:09:22 -08001900 if (rc != NSS_TX_SUCCESS) {
1901 nssqdisc_error("%s: shaper assign command failed: %d\n", __func__, rc);
1902 nq->pending_final_state = NSSQDISC_STATE_ASSIGN_SHAPER_SEND_FAIL;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001903 nssqdisc_root_cleanup_final(nq);
1904 /*
1905 * We dont have to clean up the virtual interface, since this is
1906 * taken care of by the nssqdisc_root_cleanup_final() function.
1907 */
Murat Sezgin7a705422014-01-30 16:09:22 -08001908 return -1;
1909 }
1910
1911 /*
1912 * Wait until init operation is complete.
1913 * NOTE: This relies on the NSS driver to be able to operate asynchronously which means
1914 * kernel preemption is required.
1915 */
1916 nssqdisc_info("%s: Qdisc %p (type %d): Waiting on response from NSS for "
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001917 "shaper assign message\n", __func__, nq->qdisc, nq->type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001918 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1919 yield();
1920 }
1921 nssqdisc_info("%s: Qdisc %p (type %d): is initialised with state: %d\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001922 __func__, nq->qdisc, nq->type, state);
Murat Sezgin7a705422014-01-30 16:09:22 -08001923
1924 if (state > 0) {
1925
1926 /*
1927 * Return if this is not a root qdisc on a bridge interface.
1928 */
1929 if (!nq->is_root || !nq->is_bridge) {
1930 return 0;
1931 }
1932
1933 nssqdisc_info("%s: This is a bridge interface. Linking bridge ...\n",
1934 __func__);
1935 /*
1936 * This is a root qdisc added to a bridge interface. Now we go ahead
1937 * and add this B-shaper to interfaces known to the NSS
1938 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001939 if (nssqdisc_refresh_bshaper_assignment(nq->qdisc, NSSQDISC_SCAN_AND_ASSIGN_BSHAPER) < 0) {
1940 nssqdisc_destroy(nq);
Sakthi Vignesh Radhakrishnanc5a228c2014-05-13 09:31:41 -07001941 nssqdisc_error("%s: bridge linking failed\n", __func__);
Murat Sezgin7a705422014-01-30 16:09:22 -08001942 return -1;
1943 }
1944 nssqdisc_info("%s: Bridge linking complete\n", __func__);
1945 return 0;
1946 }
1947
1948 /*
1949 * Destroy any virtual interfaces created by us before returning a failure.
1950 */
1951 if (nq->destroy_virtual_interface) {
1952 nss_destroy_virt_if(nq->virtual_interface_context);
1953 }
1954
1955 return -1;
1956}
1957
1958/*
1959 * nssqdisc_basic_stats_callback()
1960 * Invoked after getting basic stats
1961 */
1962static void nssqdisc_basic_stats_callback(void *app_data,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001963 struct nss_if_msg *nim)
Murat Sezgin7a705422014-01-30 16:09:22 -08001964{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001965 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)app_data;
1966 struct Qdisc *qdisc = nq->qdisc;
1967 struct gnet_stats_basic_packed *bstats; /* Basic class statistics */
1968 struct gnet_stats_queue *qstats; /* Qstats for use by classes */
1969 atomic_t *refcnt;
Murat Sezgin7a705422014-01-30 16:09:22 -08001970
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001971 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
1972 nssqdisc_info("%s: Qdisc %p (type %d): Receive stats FAILED - "
Murat Sezgin7a705422014-01-30 16:09:22 -08001973 "response: type: %d\n", __func__, qdisc, nq->type,
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001974 nim->msg.shaper_configure.config.response_type);
Murat Sezgin7a705422014-01-30 16:09:22 -08001975 atomic_sub(1, &nq->pending_stat_requests);
1976 return;
1977 }
1978
1979 /*
1980 * Record latest basic stats
1981 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07001982 nq->basic_stats_latest = nim->msg.shaper_configure.config.msg.shaper_node_basic_stats_get;
1983
1984 /*
1985 * Get the right stats pointers based on whether it is a class
1986 * or a qdisc.
1987 */
1988 if (nq->is_class) {
1989 bstats = &nq->bstats;
1990 qstats = &nq->qstats;
1991 refcnt = &nq->refcnt;
1992 } else {
1993 bstats = &qdisc->bstats;
1994 qstats = &qdisc->qstats;
1995 refcnt = &qdisc->refcnt;
1996 qdisc->q.qlen = nq->basic_stats_latest.qlen_packets;
1997 }
Murat Sezgin7a705422014-01-30 16:09:22 -08001998
1999 /*
Sakthi Vignesh Radhakrishnan10132a82014-02-21 11:41:42 -08002000 * Get the right stats pointers based on whether it is a class
2001 * or a qdisc.
2002 */
2003 if (nq->is_class) {
2004 bstats = &nq->bstats;
2005 qstats = &nq->qstats;
2006 refcnt = &nq->refcnt;
2007 } else {
2008 bstats = &qdisc->bstats;
2009 qstats = &qdisc->qstats;
2010 refcnt = &qdisc->refcnt;
2011 qdisc->q.qlen = nq->basic_stats_latest.qlen_packets;
2012 }
2013
2014 /*
Murat Sezgin7a705422014-01-30 16:09:22 -08002015 * Update qdisc->bstats
2016 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002017 bstats->bytes += (__u64)nq->basic_stats_latest.delta.dequeued_bytes;
2018 bstats->packets += nq->basic_stats_latest.delta.dequeued_packets;
Murat Sezgin7a705422014-01-30 16:09:22 -08002019
2020 /*
2021 * Update qdisc->qstats
2022 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002023 qstats->backlog = nq->basic_stats_latest.qlen_bytes;
Murat Sezgin7a705422014-01-30 16:09:22 -08002024
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002025 qstats->drops += (nq->basic_stats_latest.delta.enqueued_packets_dropped +
Murat Sezgin7a705422014-01-30 16:09:22 -08002026 nq->basic_stats_latest.delta.dequeued_packets_dropped);
2027
2028 /*
2029 * Update qdisc->qstats
2030 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002031 qstats->qlen = nq->basic_stats_latest.qlen_packets;
2032 qstats->requeues = 0;
2033 qstats->overlimits += nq->basic_stats_latest.delta.queue_overrun;
Murat Sezgin7a705422014-01-30 16:09:22 -08002034
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002035 if (atomic_read(refcnt) == 0) {
Murat Sezgin7a705422014-01-30 16:09:22 -08002036 atomic_sub(1, &nq->pending_stat_requests);
2037 return;
2038 }
2039
2040 /*
2041 * Requests for stats again, after 1 sec.
2042 */
2043 nq->stats_get_timer.expires += HZ;
2044 if (nq->stats_get_timer.expires <= jiffies) {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002045 nssqdisc_warning("losing time %lu, jiffies = %lu\n",
Murat Sezgin7a705422014-01-30 16:09:22 -08002046 nq->stats_get_timer.expires, jiffies);
2047 nq->stats_get_timer.expires = jiffies + HZ;
2048 }
2049 add_timer(&nq->stats_get_timer);
2050}
2051
2052/*
2053 * nssqdisc_get_stats_timer_callback()
2054 * Invoked periodically to get updated stats
2055 */
2056static void nssqdisc_get_stats_timer_callback(unsigned long int data)
2057{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002058 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)data;
Murat Sezgin7a705422014-01-30 16:09:22 -08002059 nss_tx_status_t rc;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002060 struct nss_if_msg nim;
2061 int msg_type;
Murat Sezgin7a705422014-01-30 16:09:22 -08002062
2063 /*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002064 * Create and send the shaper configure message to the NSS interface
Murat Sezgin7a705422014-01-30 16:09:22 -08002065 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002066 msg_type = nssqdisc_get_interface_msg(nq->is_bridge, NSSQDISC_IF_SHAPER_CONFIG);
2067 nss_cmn_msg_init(&nim.cm, nq->nss_interface_number, msg_type, sizeof(struct nss_if_msg),
2068 nssqdisc_basic_stats_callback, nq);
2069 nim.msg.shaper_configure.config.request_type = NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET;
2070 nim.msg.shaper_configure.config.msg.shaper_node_basic_stats_get.qos_tag = nq->qos_tag;
2071 rc = nss_if_tx_msg(nq->nss_shaping_ctx, &nim);
2072
Murat Sezgin7a705422014-01-30 16:09:22 -08002073 if (rc != NSS_TX_SUCCESS) {
2074 nssqdisc_error("%s: %p: basic stats get failed to send\n",
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002075 __func__, nq->qdisc);
Murat Sezgin7a705422014-01-30 16:09:22 -08002076 atomic_sub(1, &nq->pending_stat_requests);
2077 }
2078}
2079
2080/*
2081 * nssqdisc_start_basic_stats_polling()
2082 * Call to initiate the stats polling timer
2083 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002084static void nssqdisc_start_basic_stats_polling(struct nssqdisc_qdisc *nq)
Murat Sezgin7a705422014-01-30 16:09:22 -08002085{
Murat Sezgin7a705422014-01-30 16:09:22 -08002086 init_timer(&nq->stats_get_timer);
2087 nq->stats_get_timer.function = nssqdisc_get_stats_timer_callback;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002088 nq->stats_get_timer.data = (unsigned long)nq;
Murat Sezgin7a705422014-01-30 16:09:22 -08002089 nq->stats_get_timer.expires = jiffies + HZ;
2090 atomic_set(&nq->pending_stat_requests, 1);
2091 add_timer(&nq->stats_get_timer);
2092}
2093
2094/*
2095 * nssqdisc_stop_basic_stats_polling()
2096 * Call to stop polling of basic stats
2097 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002098static void nssqdisc_stop_basic_stats_polling(struct nssqdisc_qdisc *nq)
Murat Sezgin7a705422014-01-30 16:09:22 -08002099{
Murat Sezgin7a705422014-01-30 16:09:22 -08002100 /*
2101 * We wait until we have received the final stats
2102 */
2103 while (atomic_read(&nq->pending_stat_requests) != 0) {
2104 yield();
2105 }
2106}
2107
2108/*
2109 * nssqdisc_if_event_cb()
2110 * Callback function that is registered to listen to events on net_device.
2111 */
2112static int nssqdisc_if_event_cb(struct notifier_block *unused,
2113 unsigned long event, void *ptr)
2114{
2115 struct net_device *dev = (struct net_device *)ptr;
2116 struct net_device *br;
2117 struct Qdisc *br_qdisc;
2118 int if_num, br_num;
2119
2120 switch (event) {
2121 case NETDEV_BR_JOIN:
2122 nssqdisc_info("Reveived NETDEV_BR_JOIN on interface %s\n",
2123 dev->name);
2124 case NETDEV_BR_LEAVE:
2125 nssqdisc_info("Reveived NETDEV_BR_LEAVE on interface %s\n",
2126 dev->name);
2127 br = dev->master;
Abhishek Rastogi99714332014-04-02 19:38:12 +05302128 if_num = nss_cmn_get_interface_number(nssqdisc_ctx, dev);
Murat Sezgin7a705422014-01-30 16:09:22 -08002129
2130 if (br == NULL || br->priv_flags != IFF_EBRIDGE) {
2131 nssqdisc_error("Sensed bridge activity on interface %s "
2132 "that is not on any bridge\n", dev->name);
2133 break;
2134 }
2135
Abhishek Rastogi99714332014-04-02 19:38:12 +05302136 br_num = nss_cmn_get_interface_number(nssqdisc_ctx, br);
Murat Sezgin7a705422014-01-30 16:09:22 -08002137 br_qdisc = br->qdisc;
2138 /*
2139 * TODO: Properly ensure that the interface and bridge are
2140 * shaped by us.
2141 */
2142 if (if_num < 0 || br_num < 0) {
2143 nssqdisc_info("No action taken since if_num is %d for %s "
2144 "and br_num is %d for bridge %s\n", if_num,
2145 dev->name, br_num, br->name);
2146 break;
2147 }
2148
2149 /*
2150 * Call attach or detach according as per event type.
2151 */
2152 if (event == NETDEV_BR_JOIN) {
2153 nssqdisc_info("Instructing interface %s to attach to bridge(%s) "
2154 "shaping\n", dev->name, br->name);
2155 nssqdisc_attach_bshaper(br_qdisc, if_num);
2156 } else if (event == NETDEV_BR_LEAVE) {
2157 nssqdisc_info("Instructing interface %s to detach from bridge(%s) "
2158 "shaping\n",dev->name, br->name);
2159 nssqdisc_detach_bshaper(br_qdisc, if_num);
2160 }
2161
2162 break;
2163 default:
2164 nssqdisc_info("Received NETDEV_DEFAULT on interface %s\n", dev->name);
2165 break;
2166 }
2167
2168 return NOTIFY_DONE;
2169}
2170
2171static struct notifier_block nssqdisc_device_notifier = {
2172 .notifier_call = nssqdisc_if_event_cb };
2173
2174/* =========================== NSSFIFO ========================= */
2175
2176struct nssfifo_sched_data {
2177 struct nssqdisc_qdisc nq; /* Common base class for all nss qdiscs */
2178 u32 limit; /* Queue length in packets */
2179 /* TODO: Support for queue length in bytes */
2180 u8 set_default; /* Flag to set qdisc as default qdisc for enqueue */
2181};
2182
2183static int nssfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2184{
2185 return nssqdisc_enqueue(skb, sch);
2186}
2187
2188static struct sk_buff *nssfifo_dequeue(struct Qdisc *sch)
2189{
2190 return nssqdisc_dequeue(sch);
2191}
2192
2193static unsigned int nssfifo_drop(struct Qdisc *sch)
2194{
2195 nssqdisc_info("nssfifo dropping");
2196 return nssqdisc_drop(sch);
2197}
2198
2199static void nssfifo_reset(struct Qdisc *sch)
2200{
2201 nssqdisc_info("nssfifo resetting!");
2202 nssqdisc_reset(sch);
2203}
2204
2205static void nssfifo_destroy(struct Qdisc *sch)
2206{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002207 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)qdisc_priv(sch);
2208
Murat Sezgin7a705422014-01-30 16:09:22 -08002209 /*
2210 * Stop the polling of basic stats
2211 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002212 nssqdisc_stop_basic_stats_polling(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002213
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002214 nssqdisc_destroy(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002215 nssqdisc_info("nssfifo destroyed");
2216}
2217
2218static const struct nla_policy nssfifo_policy[TCA_NSSFIFO_MAX + 1] = {
2219 [TCA_NSSFIFO_PARMS] = { .len = sizeof(struct tc_nssfifo_qopt) },
2220};
2221
2222static int nssfifo_change(struct Qdisc *sch, struct nlattr *opt)
2223{
2224 struct nssfifo_sched_data *q;
2225 struct nlattr *na[TCA_NSSFIFO_MAX + 1];
2226 struct tc_nssfifo_qopt *qopt;
2227 int err;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002228 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -08002229
2230 q = qdisc_priv(sch);
2231
2232 if (opt == NULL) {
2233 return -EINVAL;
2234 }
2235
2236 err = nla_parse_nested(na, TCA_NSSFIFO_MAX, opt, nssfifo_policy);
2237 if (err < 0)
2238 return err;
2239
2240 if (na[TCA_NSSFIFO_PARMS] == NULL)
2241 return -EINVAL;
2242
2243 qopt = nla_data(na[TCA_NSSFIFO_PARMS]);
2244
2245 if (!qopt->limit) {
2246 nssqdisc_error("%s: limit must be non-zero\n", __func__);
2247 return -EINVAL;
2248 }
2249
2250 q->limit = qopt->limit;
2251
2252 /*
2253 * Required for basic stats display
2254 */
2255 sch->limit = qopt->limit;
2256
2257 q->set_default = qopt->set_default;
2258 nssqdisc_info("%s: limit:%u set_default:%u\n", __func__, qopt->limit, qopt->set_default);
2259
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002260 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
2261 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.fifo_param.limit = q->limit;
2262 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.fifo_param.drop_mode = NSS_SHAPER_FIFO_DROP_MODE_TAIL;
2263 if (nssqdisc_configure(&q->nq, &nim, NSS_SHAPER_CONFIG_TYPE_FIFO_CHANGE_PARAM) < 0) {
2264 nssqdisc_error("%s: nssfifo %p configuration failed\n", __func__, sch);
Murat Sezgin7a705422014-01-30 16:09:22 -08002265 return -EINVAL;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002266 }
Murat Sezgin7a705422014-01-30 16:09:22 -08002267
2268 /*
2269 * There is nothing we need to do if the qdisc is not
2270 * set as default qdisc.
2271 */
2272 if (q->set_default == 0)
2273 return 0;
2274
2275 /*
2276 * Set this qdisc to be the default qdisc for enqueuing packets.
2277 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002278 if (nssqdisc_set_default(&q->nq) < 0) {
2279 nssqdisc_error("%s: nssfifo %p set_default failed\n", __func__, sch);
Murat Sezgin7a705422014-01-30 16:09:22 -08002280 return -EINVAL;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002281 }
Murat Sezgin7a705422014-01-30 16:09:22 -08002282
2283 nssqdisc_info("%s: nssfifo queue (qos_tag:%u) set as default\n", __func__, q->nq.qos_tag);
2284 return 0;
2285}
2286
2287static int nssfifo_init(struct Qdisc *sch, struct nlattr *opt)
2288{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002289 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
2290
Murat Sezgin7a705422014-01-30 16:09:22 -08002291 if (opt == NULL)
2292 return -EINVAL;
2293
2294 nssqdisc_info("Initializing Fifo - type %d\n", NSS_SHAPER_NODE_TYPE_FIFO);
2295 nssfifo_reset(sch);
2296
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002297 if (nssqdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_FIFO, 0) < 0)
Murat Sezgin7a705422014-01-30 16:09:22 -08002298 return -EINVAL;
2299
2300 nssqdisc_info("NSS fifo initialized - handle %x parent %x\n", sch->handle, sch->parent);
2301 if (nssfifo_change(sch, opt) < 0) {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002302 nssqdisc_destroy(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002303 return -EINVAL;
2304 }
2305
2306 /*
2307 * Start the stats polling timer
2308 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002309 nssqdisc_start_basic_stats_polling(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002310
2311 return 0;
2312}
2313
2314static int nssfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
2315{
2316 struct nssfifo_sched_data *q;
2317 struct nlattr *opts = NULL;
2318 struct tc_nssfifo_qopt opt;
2319
2320 nssqdisc_info("Nssfifo Dumping!");
2321
2322 q = qdisc_priv(sch);
2323 if (q == NULL) {
2324 return -1;
2325 }
2326
2327 opt.limit = q->limit;
2328
2329 opts = nla_nest_start(skb, TCA_OPTIONS);
2330 if (opts == NULL) {
2331 goto nla_put_failure;
2332 }
2333 if (nla_put(skb, TCA_NSSFIFO_PARMS, sizeof(opt), &opt))
2334 goto nla_put_failure;
2335
2336 return nla_nest_end(skb, opts);
2337
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002338nla_put_failure:
Murat Sezgin7a705422014-01-30 16:09:22 -08002339 nla_nest_cancel(skb, opts);
2340 return -EMSGSIZE;
2341}
2342
2343static struct sk_buff *nssfifo_peek(struct Qdisc *sch)
2344{
2345 nssqdisc_info("Nssfifo Peeking");
2346 return nssqdisc_peek(sch);
2347}
2348
2349static struct Qdisc_ops nsspfifo_qdisc_ops __read_mostly = {
2350 .id = "nsspfifo",
2351 .priv_size = sizeof(struct nssfifo_sched_data),
2352 .enqueue = nssfifo_enqueue,
2353 .dequeue = nssfifo_dequeue,
2354 .peek = nssfifo_peek,
2355 .drop = nssfifo_drop,
2356 .init = nssfifo_init,
2357 .reset = nssfifo_reset,
2358 .destroy = nssfifo_destroy,
2359 .change = nssfifo_change,
2360 .dump = nssfifo_dump,
2361 .owner = THIS_MODULE,
2362};
2363
2364static struct Qdisc_ops nssbfifo_qdisc_ops __read_mostly = {
2365 .id = "nssbfifo",
2366 .priv_size = sizeof(struct nssfifo_sched_data),
2367 .enqueue = nssfifo_enqueue,
2368 .dequeue = nssfifo_dequeue,
2369 .peek = nssfifo_peek,
2370 .drop = nssfifo_drop,
2371 .init = nssfifo_init,
2372 .reset = nssfifo_reset,
2373 .destroy = nssfifo_destroy,
2374 .change = nssfifo_change,
2375 .dump = nssfifo_dump,
2376 .owner = THIS_MODULE,
2377};
2378
2379/* =========================== NSSCODEL ========================= */
2380
2381struct nsscodel_stats {
2382 u32 peak_queue_delay; /* Peak delay experienced by a dequeued packet */
2383 u32 peak_drop_delay; /* Peak delay experienced by a packet that is dropped */
2384};
2385
2386struct nsscodel_sched_data {
2387 struct nssqdisc_qdisc nq; /* Common base class for all nss qdiscs */
2388 u32 target; /* Acceptable value of queue delay */
2389 u32 limit; /* Length of queue */
2390 u32 interval; /* Monitoring interval */
2391 u8 set_default; /* Flag to set qdisc as default qdisc for enqueue */
2392 struct nsscodel_stats stats; /* Contains nsscodel related stats */
2393};
2394
2395static int nsscodel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2396{
2397 return nssqdisc_enqueue(skb, sch);
2398}
2399
2400static struct sk_buff *nsscodel_dequeue(struct Qdisc *sch)
2401{
2402 return nssqdisc_dequeue(sch);
2403}
2404
2405static unsigned int nsscodel_drop(struct Qdisc *sch)
2406{
2407 return nssqdisc_drop(sch);
2408}
2409
2410static void nsscodel_reset(struct Qdisc *sch)
2411{
2412 nssqdisc_info("nsscodel resetting!");
2413 nssqdisc_reset(sch);
2414}
2415
2416static void nsscodel_destroy(struct Qdisc *sch)
2417{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002418 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
Murat Sezgin7a705422014-01-30 16:09:22 -08002419 /*
2420 * Stop the polling of basic stats
2421 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002422 nssqdisc_stop_basic_stats_polling(nq);
2423 nssqdisc_destroy(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002424 nssqdisc_info("nsscodel destroyed");
2425}
2426
2427static const struct nla_policy nsscodel_policy[TCA_NSSCODEL_MAX + 1] = {
2428 [TCA_NSSCODEL_PARMS] = { .len = sizeof(struct tc_nsscodel_qopt) },
2429};
2430
2431static int nsscodel_change(struct Qdisc *sch, struct nlattr *opt)
2432{
2433 struct nsscodel_sched_data *q;
2434 struct nlattr *na[TCA_NSSCODEL_MAX + 1];
2435 struct tc_nsscodel_qopt *qopt;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002436 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -08002437 int err;
2438 struct net_device *dev = qdisc_dev(sch);
2439
2440 q = qdisc_priv(sch);
2441
2442 if (opt == NULL)
2443 return -EINVAL;
2444
2445 err = nla_parse_nested(na, TCA_NSSCODEL_MAX, opt, nsscodel_policy);
2446 if (err < 0)
2447 return err;
2448
2449 if (na[TCA_NSSCODEL_PARMS] == NULL)
2450 return -EINVAL;
2451
2452 qopt = nla_data(na[TCA_NSSCODEL_PARMS]);
2453
2454 if (!qopt->target || !qopt->interval || !qopt->limit) {
2455 nssqdisc_error("nsscodel requires a non-zero value for target, "
2456 "interval and limit\n");
2457 return -EINVAL;
2458 }
2459
2460 q->target = qopt->target;
2461 q->limit = qopt->limit;
2462 q->interval = qopt->interval;
2463 q->set_default = qopt->set_default;
2464
2465 /*
2466 * Required for basic stats display
2467 */
2468 sch->limit = qopt->limit;
2469
2470 nssqdisc_info("Target:%u Limit:%u Interval:%u set_default = %u\n",
2471 q->target, q->limit, q->interval, qopt->set_default);
2472
2473
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002474 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
Murat Sezgin7a705422014-01-30 16:09:22 -08002475 /*
2476 * Target and interval time needs to be provided in milliseconds
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002477 * (tc provides us the time in mircoseconds and therefore we divide by 1000)
Murat Sezgin7a705422014-01-30 16:09:22 -08002478 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002479 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.codel_param.qlen_max = q->limit;
2480 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.codel_param.cap.interval = q->interval/1000;
2481 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.codel_param.cap.target = q->target/1000;
2482 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.codel_param.cap.mtu = psched_mtu(dev);
2483 nssqdisc_info("%s: MTU size of interface %s is %u bytes\n", __func__, dev->name,
2484 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.codel_param.cap.mtu);
Murat Sezgin7a705422014-01-30 16:09:22 -08002485
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002486 if (nssqdisc_configure(&q->nq, &nim,
Murat Sezgin7a705422014-01-30 16:09:22 -08002487 NSS_SHAPER_CONFIG_TYPE_CODEL_CHANGE_PARAM) < 0) {
2488 return -EINVAL;
2489 }
2490
2491 /*
2492 * There is nothing we need to do if the qdisc is not
2493 * set as default qdisc.
2494 */
2495 if (!q->set_default)
2496 return 0;
2497
2498 /*
2499 * Set this qdisc to be the default qdisc for enqueuing packets.
2500 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002501 if (nssqdisc_set_default(&q->nq) < 0)
Murat Sezgin7a705422014-01-30 16:09:22 -08002502 return -EINVAL;
2503
2504 return 0;
2505}
2506
2507static int nsscodel_init(struct Qdisc *sch, struct nlattr *opt)
2508{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002509 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
2510
Murat Sezgin7a705422014-01-30 16:09:22 -08002511 if (opt == NULL)
2512 return -EINVAL;
2513
2514 nsscodel_reset(sch);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002515 if (nssqdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_CODEL, 0) < 0)
Murat Sezgin7a705422014-01-30 16:09:22 -08002516 return -EINVAL;
2517
2518 if (nsscodel_change(sch, opt) < 0) {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002519 nssqdisc_destroy(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002520 return -EINVAL;
2521 }
2522
2523 /*
2524 * Start the stats polling timer
2525 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002526 nssqdisc_start_basic_stats_polling(nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002527
2528 return 0;
2529}
2530
2531static int nsscodel_dump(struct Qdisc *sch, struct sk_buff *skb)
2532{
2533 struct nsscodel_sched_data *q;
2534 struct nlattr *opts = NULL;
2535 struct tc_nsscodel_qopt opt;
2536
2537 nssqdisc_info("NssCodel Dumping!");
2538
2539 q = qdisc_priv(sch);
2540 if (q == NULL) {
2541 return -1;
2542 }
2543
2544 opt.target = q->target;
2545 opt.limit = q->limit;
2546 opt.interval = q->interval;
2547 opts = nla_nest_start(skb, TCA_OPTIONS);
2548 if (opts == NULL) {
2549 goto nla_put_failure;
2550 }
2551 if (nla_put(skb, TCA_NSSCODEL_PARMS, sizeof(opt), &opt))
2552 goto nla_put_failure;
2553
2554 return nla_nest_end(skb, opts);
2555
2556nla_put_failure:
2557 nla_nest_cancel(skb, opts);
2558 return -EMSGSIZE;
2559}
2560
2561static int nsscodel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2562{
2563 struct nsscodel_sched_data *q = qdisc_priv(sch);
2564 struct tc_nsscodel_xstats st = {
2565 .peak_queue_delay = q->nq.basic_stats_latest.packet_latency_peak_msec_dequeued,
2566 .peak_drop_delay = q->nq.basic_stats_latest.packet_latency_peak_msec_dropped,
2567 };
2568
2569 return gnet_stats_copy_app(d, &st, sizeof(st));
2570}
2571
2572static struct sk_buff *nsscodel_peek(struct Qdisc *sch)
2573{
2574 nssqdisc_info("Nsscodel Peeking");
2575 return nssqdisc_peek(sch);
2576}
2577
2578
2579static struct Qdisc_ops nsscodel_qdisc_ops __read_mostly = {
2580 .id = "nsscodel",
2581 .priv_size = sizeof(struct nsscodel_sched_data),
2582 .enqueue = nsscodel_enqueue,
2583 .dequeue = nsscodel_dequeue,
2584 .peek = nsscodel_peek,
2585 .drop = nsscodel_drop,
2586 .init = nsscodel_init,
2587 .reset = nsscodel_reset,
2588 .destroy = nsscodel_destroy,
2589 .change = nsscodel_change,
2590 .dump = nsscodel_dump,
2591 .dump_stats = nsscodel_dump_stats,
2592 .owner = THIS_MODULE,
2593};
2594
2595/* =========================== NSSTBL ========================= */
2596
2597struct nsstbl_sched_data {
2598 struct nssqdisc_qdisc nq; /* Common base class for all nss qdiscs */
2599 u32 rate; /* Limiting rate of TBL */
2600 u32 peakrate; /* Maximum rate to control bursts */
2601 u32 burst; /* Maximum allowed burst size */
2602 u32 mtu; /* MTU of the interface attached to */
Murat Sezgin7a705422014-01-30 16:09:22 -08002603 struct Qdisc *qdisc; /* Qdisc to which it is attached to */
2604};
2605
2606
2607static int nsstbl_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2608{
2609 return nssqdisc_enqueue(skb, sch);
2610}
2611
2612static struct sk_buff *nsstbl_dequeue(struct Qdisc *sch)
2613{
2614 return nssqdisc_dequeue(sch);
2615}
2616
2617static unsigned int nsstbl_drop(struct Qdisc *sch)
2618{
2619 return nssqdisc_drop(sch);
2620}
2621
2622static struct sk_buff *nsstbl_peek(struct Qdisc *sch)
2623{
2624 return nssqdisc_peek(sch);
2625}
2626
2627static void nsstbl_reset(struct Qdisc *sch)
2628{
2629 nssqdisc_reset(sch);
2630}
2631
2632static void nsstbl_destroy(struct Qdisc *sch)
2633{
2634 struct nsstbl_sched_data *q = qdisc_priv(sch);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002635 struct nss_if_msg nim;
2636
2637 /*
2638 * We must always detach our child node in NSS before destroying it.
2639 * Also, we make sure we dont send down the command for noop qdiscs.
2640 */
2641 if (q->qdisc != &noop_qdisc) {
2642 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
2643 if (nssqdisc_node_detach(&q->nq, &nim,
2644 NSS_SHAPER_CONFIG_TYPE_TBL_DETACH) < 0) {
2645 nssqdisc_error("%s: Failed to detach child %x from nsstbl %x\n",
2646 __func__, q->qdisc->handle, q->nq.qos_tag);
2647 return;
2648 }
2649 }
2650
2651 /*
2652 * Now we can destroy our child qdisc
2653 */
Murat Sezgin7a705422014-01-30 16:09:22 -08002654 qdisc_destroy(q->qdisc);
2655
2656 /*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002657 * Stop the polling of basic stats and destroy qdisc.
Murat Sezgin7a705422014-01-30 16:09:22 -08002658 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002659 nssqdisc_stop_basic_stats_polling(&q->nq);
2660 nssqdisc_destroy(&q->nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002661}
2662
2663static const struct nla_policy nsstbl_policy[TCA_NSSTBL_MAX + 1] = {
2664 [TCA_NSSTBL_PARMS] = { .len = sizeof(struct tc_nsstbl_qopt) },
2665};
2666
2667static int nsstbl_change(struct Qdisc *sch, struct nlattr *opt)
2668{
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002669 struct nsstbl_sched_data *q = qdisc_priv(sch);
Murat Sezgin7a705422014-01-30 16:09:22 -08002670 struct nlattr *na[TCA_NSSTBL_MAX + 1];
2671 struct tc_nsstbl_qopt *qopt;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002672 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -08002673 int err;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002674 struct net_device *dev = qdisc_dev(sch);
Murat Sezgin7a705422014-01-30 16:09:22 -08002675
2676 if (opt == NULL)
2677 return -EINVAL;
2678
2679 err = nla_parse_nested(na, TCA_NSSTBL_MAX, opt, nsstbl_policy);
2680 if (err < 0)
2681 return err;
2682
2683 if (na[TCA_NSSTBL_PARMS] == NULL)
2684 return -EINVAL;
2685
2686 qopt = nla_data(na[TCA_NSSTBL_PARMS]);
2687
2688 /*
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002689 * Set MTU if it wasn't specified explicitely
2690 */
2691 if (!qopt->mtu) {
2692 qopt->mtu = psched_mtu(dev);
2693 nssqdisc_info("MTU not provided for nsstbl. Setting it to %s's default %u bytes\n", dev->name, qopt->mtu);
2694 }
2695
2696 /*
Murat Sezgin7a705422014-01-30 16:09:22 -08002697 * Burst size cannot be less than MTU
2698 */
2699 if (qopt->burst < qopt->mtu) {
2700 nssqdisc_error("Burst size: %u is less than the specified MTU: %u\n", qopt->burst, qopt->mtu);
2701 return -EINVAL;
2702 }
2703
2704 /*
Murat Sezgin7a705422014-01-30 16:09:22 -08002705 * Rate can be zero. Therefore we dont do a check on it.
2706 */
2707 q->rate = qopt->rate;
2708 nssqdisc_info("Rate = %u", qopt->rate);
2709 q->burst = qopt->burst;
2710 nssqdisc_info("Burst = %u", qopt->burst);
2711 q->mtu = qopt->mtu;
2712 nssqdisc_info("MTU = %u", qopt->mtu);
2713 q->peakrate = qopt->peakrate;
2714 nssqdisc_info("Peak Rate = %u", qopt->peakrate);
2715
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002716 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
2717 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_param.lap_cir.rate = q->rate;
2718 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_param.lap_cir.burst = q->burst;
2719 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_param.lap_cir.max_size = q->mtu;
2720 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_param.lap_cir.short_circuit = false;
2721 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_param.lap_pir.rate = q->peakrate;
Murat Sezgin7a705422014-01-30 16:09:22 -08002722
2723 /*
2724 * It is important to set these two parameters to be the same as MTU.
2725 * This ensures bursts from CIR dont go above the specified peakrate.
2726 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002727 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_param.lap_pir.burst = q->mtu;
2728 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_param.lap_pir.max_size = q->mtu;
Murat Sezgin7a705422014-01-30 16:09:22 -08002729
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002730 /*
2731 * We can short circuit peakrate limiter if it is not being configured.
2732 */
Murat Sezgin7a705422014-01-30 16:09:22 -08002733 if (q->peakrate) {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002734 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_param.lap_pir.short_circuit = false;
Murat Sezgin7a705422014-01-30 16:09:22 -08002735 } else {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002736 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_param.lap_pir.short_circuit = true;
Murat Sezgin7a705422014-01-30 16:09:22 -08002737 }
2738
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002739 if (nssqdisc_configure(&q->nq, &nim, NSS_SHAPER_CONFIG_TYPE_TBL_CHANGE_PARAM) < 0) {
Murat Sezgin7a705422014-01-30 16:09:22 -08002740 return -EINVAL;
2741 }
2742
2743 return 0;
2744}
2745
2746static int nsstbl_init(struct Qdisc *sch, struct nlattr *opt)
2747{
2748 struct nsstbl_sched_data *q = qdisc_priv(sch);
2749
2750 if (opt == NULL)
2751 return -EINVAL;
2752
2753 q->qdisc = &noop_qdisc;
2754
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002755 if (nssqdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_TBL, 0) < 0)
Murat Sezgin7a705422014-01-30 16:09:22 -08002756 return -EINVAL;
2757
2758 if (nsstbl_change(sch, opt) < 0) {
2759 nssqdisc_info("Failed to configure tbl\n");
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002760 nssqdisc_destroy(&q->nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002761 return -EINVAL;
2762 }
2763
2764 /*
2765 * Start the stats polling timer
2766 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002767 nssqdisc_start_basic_stats_polling(&q->nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002768
2769 return 0;
2770}
2771
2772static int nsstbl_dump(struct Qdisc *sch, struct sk_buff *skb)
2773{
2774 struct nsstbl_sched_data *q = qdisc_priv(sch);
2775 struct nlattr *opts = NULL;
2776 struct tc_nsstbl_qopt opt = {
2777 .rate = q->rate,
2778 .peakrate = q->peakrate,
2779 .burst = q->burst,
2780 .mtu = q->mtu,
2781 };
2782
2783 nssqdisc_info("Nsstbl dumping");
2784 opts = nla_nest_start(skb, TCA_OPTIONS);
2785 if (opts == NULL)
2786 goto nla_put_failure;
2787 NLA_PUT(skb, TCA_NSSTBL_PARMS, sizeof(opt), &opt);
2788 return nla_nest_end(skb, opts);
2789
2790nla_put_failure:
2791 nla_nest_cancel(skb, opts);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002792 return -EMSGSIZE;
Murat Sezgin7a705422014-01-30 16:09:22 -08002793}
2794
2795static int nsstbl_dump_class(struct Qdisc *sch, unsigned long cl,
2796 struct sk_buff *skb, struct tcmsg *tcm)
2797{
2798 struct nsstbl_sched_data *q = qdisc_priv(sch);
2799 nssqdisc_info("Nsstbl dumping class");
2800
2801 tcm->tcm_handle |= TC_H_MIN(1);
2802 tcm->tcm_info = q->qdisc->handle;
2803
2804 return 0;
2805}
2806
2807static int nsstbl_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
2808 struct Qdisc **old)
2809{
2810 struct nsstbl_sched_data *q = qdisc_priv(sch);
2811 struct nssqdisc_qdisc *nq_new = (struct nssqdisc_qdisc *)qdisc_priv(new);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002812 struct nss_if_msg nim_attach;
2813 struct nss_if_msg nim_detach;
Murat Sezgin7a705422014-01-30 16:09:22 -08002814
2815 if (new == NULL)
2816 new = &noop_qdisc;
2817
2818 sch_tree_lock(sch);
2819 *old = q->qdisc;
2820 q->qdisc = new;
2821 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
2822 qdisc_reset(*old);
2823 sch_tree_unlock(sch);
2824
2825 nssqdisc_info("%s:Grafting old: %p with new: %p\n", __func__, *old, new);
2826 if (*old != &noop_qdisc) {
2827 nssqdisc_info("%s: Detaching old: %p\n", __func__, *old);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002828 nim_detach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
2829 if (nssqdisc_node_detach(&q->nq, &nim_detach,
Murat Sezgin7a705422014-01-30 16:09:22 -08002830 NSS_SHAPER_CONFIG_TYPE_TBL_DETACH) < 0) {
2831 return -EINVAL;
2832 }
2833 }
2834
2835 if (new != &noop_qdisc) {
2836 nssqdisc_info("%s: Attaching new: %p\n", __func__, new);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002837 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
2838 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.snc.tbl_attach.child_qos_tag = nq_new->qos_tag;
2839 if (nssqdisc_node_attach(&q->nq, &nim_attach,
Murat Sezgin7a705422014-01-30 16:09:22 -08002840 NSS_SHAPER_CONFIG_TYPE_TBL_ATTACH) < 0) {
2841 return -EINVAL;
2842 }
2843 }
2844
2845 nssqdisc_info("Nsstbl grafted");
2846
2847 return 0;
2848}
2849
2850static struct Qdisc *nsstbl_leaf(struct Qdisc *sch, unsigned long arg)
2851{
2852 struct nsstbl_sched_data *q = qdisc_priv(sch);
2853 nssqdisc_info("Nsstbl returns leaf");
2854 return q->qdisc;
2855}
2856
2857static unsigned long nsstbl_get(struct Qdisc *sch, u32 classid)
2858{
2859 return 1;
2860}
2861
2862static void nsstbl_put(struct Qdisc *sch, unsigned long arg)
2863{
2864}
2865
2866static void nsstbl_walk(struct Qdisc *sch, struct qdisc_walker *walker)
2867{
2868 nssqdisc_info("Nsstbl walk called");
2869 if (!walker->stop) {
2870 if (walker->count >= walker->skip)
2871 if (walker->fn(sch, 1, walker) < 0) {
2872 walker->stop = 1;
2873 return;
2874 }
2875 walker->count++;
2876 }
2877}
2878
2879static const struct Qdisc_class_ops nsstbl_class_ops = {
2880 .graft = nsstbl_graft,
2881 .leaf = nsstbl_leaf,
2882 .get = nsstbl_get,
2883 .put = nsstbl_put,
2884 .walk = nsstbl_walk,
2885 .dump = nsstbl_dump_class,
2886};
2887
2888static struct Qdisc_ops nsstbl_qdisc_ops __read_mostly = {
2889 .next = NULL,
2890 .id = "nsstbl",
2891 .priv_size = sizeof(struct nsstbl_sched_data),
2892 .cl_ops = &nsstbl_class_ops,
2893 .enqueue = nsstbl_enqueue,
2894 .dequeue = nsstbl_dequeue,
2895 .peek = nsstbl_peek,
2896 .drop = nsstbl_drop,
2897 .init = nsstbl_init,
2898 .reset = nsstbl_reset,
2899 .destroy = nsstbl_destroy,
2900 .change = nsstbl_change,
2901 .dump = nsstbl_dump,
2902 .owner = THIS_MODULE,
2903};
2904
2905/* =========================== NSSPRIO ========================= */
2906
2907struct nssprio_sched_data {
2908 struct nssqdisc_qdisc nq; /* Common base class for all nss qdiscs */
2909 int bands; /* Number of priority bands to use */
2910 struct Qdisc *queues[TCA_NSSPRIO_MAX_BANDS];
2911 /* Array of child qdisc holder */
2912};
2913
2914static int nssprio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2915{
2916 return nssqdisc_enqueue(skb, sch);
2917}
2918
2919static struct sk_buff *nssprio_dequeue(struct Qdisc *sch)
2920{
2921 return nssqdisc_dequeue(sch);
2922}
2923
2924static unsigned int nssprio_drop(struct Qdisc *sch)
2925{
2926 return nssqdisc_drop(sch);
2927}
2928
2929static struct sk_buff *nssprio_peek(struct Qdisc *sch)
2930{
2931 return nssqdisc_peek(sch);
2932}
2933
2934static void nssprio_reset(struct Qdisc *sch)
2935{
2936 return nssqdisc_reset(sch);
2937}
2938
2939static void nssprio_destroy(struct Qdisc *sch)
2940{
2941 struct nssprio_sched_data *q = qdisc_priv(sch);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002942 struct nss_if_msg nim;
Murat Sezgin7a705422014-01-30 16:09:22 -08002943 int i;
2944
2945 nssqdisc_info("Destroying prio");
2946
2947 /*
2948 * Destroy all attached child nodes before destroying prio
2949 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002950 for (i = 0; i < q->bands; i++) {
2951
2952 /*
2953 * We always detach the shaper in NSS before destroying it.
2954 * It is very important to check for noop qdisc since those dont
2955 * exist in the NSS.
2956 */
2957 if (q->queues[i] != &noop_qdisc) {
2958 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
2959 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.prio_detach.priority = i;
2960 if (nssqdisc_node_detach(&q->nq, &nim,
2961 NSS_SHAPER_CONFIG_TYPE_PRIO_DETACH) < 0) {
2962 nssqdisc_error("%s: Failed to detach child in band %d from prio %x\n",
2963 __func__, i, q->nq.qos_tag);
2964 return;
2965 }
2966 }
2967
2968 /*
2969 * We can now destroy it
2970 */
Murat Sezgin7a705422014-01-30 16:09:22 -08002971 qdisc_destroy(q->queues[i]);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002972 }
Murat Sezgin7a705422014-01-30 16:09:22 -08002973
2974 /*
2975 * Stop the polling of basic stats
2976 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002977 nssqdisc_stop_basic_stats_polling(&q->nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002978
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07002979 /*
2980 * Destroy the qdisc in NSS
2981 */
2982 nssqdisc_destroy(&q->nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08002983}
2984
2985static const struct nla_policy nssprio_policy[TCA_NSSTBL_MAX + 1] = {
2986 [TCA_NSSTBL_PARMS] = { .len = sizeof(struct tc_nssprio_qopt) },
2987};
2988
2989static int nssprio_change(struct Qdisc *sch, struct nlattr *opt)
2990{
2991 struct nssprio_sched_data *q;
2992 struct nlattr *na[TCA_NSSTBL_MAX + 1];
2993 struct tc_nssprio_qopt *qopt;
2994 int err;
2995
2996 q = qdisc_priv(sch);
2997
2998 if (opt == NULL) {
2999 return -EINVAL;
3000 }
3001
3002 err = nla_parse_nested(na, TCA_NSSPRIO_MAX, opt, nssprio_policy);
3003 if (err < 0) {
3004 return err;
3005 }
3006
3007 if (na[TCA_NSSPRIO_PARMS] == NULL) {
3008 return -EINVAL;
3009 }
3010
3011 qopt = nla_data(na[TCA_NSSPRIO_PARMS]);
3012
3013 if (qopt->bands > TCA_NSSPRIO_MAX_BANDS) {
3014 return -EINVAL;
3015 }
3016
3017 q->bands = qopt->bands;
3018 nssqdisc_info("Bands = %u\n", qopt->bands);
3019
3020 return 0;
3021}
3022
3023static int nssprio_init(struct Qdisc *sch, struct nlattr *opt)
3024{
3025 struct nssprio_sched_data *q = qdisc_priv(sch);
3026 int i;
3027
3028 if (opt == NULL)
3029 return -EINVAL;
3030
3031 for (i = 0; i < TCA_NSSPRIO_MAX_BANDS; i++)
3032 q->queues[i] = &noop_qdisc;
3033
3034 q->bands = 0;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003035 if (nssqdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_PRIO, 0) < 0)
Murat Sezgin7a705422014-01-30 16:09:22 -08003036 return -EINVAL;
3037
3038 nssqdisc_info("Nssprio initialized - handle %x parent %x\n",
3039 sch->handle, sch->parent);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003040
Murat Sezgin7a705422014-01-30 16:09:22 -08003041 if (nssprio_change(sch, opt) < 0) {
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003042 nssqdisc_destroy(&q->nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08003043 return -EINVAL;
3044 }
3045
3046 /*
3047 * Start the stats polling timer
3048 */
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003049 nssqdisc_start_basic_stats_polling(&q->nq);
Murat Sezgin7a705422014-01-30 16:09:22 -08003050 return 0;
3051}
3052
3053static int nssprio_dump(struct Qdisc *sch, struct sk_buff *skb)
3054{
3055 struct nssprio_sched_data *q = qdisc_priv(sch);
3056 struct nlattr *opts = NULL;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003057 struct tc_nssprio_qopt qopt;
Murat Sezgin7a705422014-01-30 16:09:22 -08003058
3059 nssqdisc_info("Nssprio dumping");
3060 qopt.bands = q->bands;
3061
3062 opts = nla_nest_start(skb, TCA_OPTIONS);
3063 if (opts == NULL)
3064 goto nla_put_failure;
3065 NLA_PUT(skb, TCA_NSSPRIO_PARMS, sizeof(qopt), &qopt);
3066 return nla_nest_end(skb, opts);
3067
3068nla_put_failure:
3069 nla_nest_cancel(skb, opts);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003070 return -EMSGSIZE;
Murat Sezgin7a705422014-01-30 16:09:22 -08003071}
3072
3073static int nssprio_graft(struct Qdisc *sch, unsigned long arg,
3074 struct Qdisc *new, struct Qdisc **old)
3075{
3076 struct nssprio_sched_data *q = qdisc_priv(sch);
3077 struct nssqdisc_qdisc *nq_new = (struct nssqdisc_qdisc *)qdisc_priv(new);
3078 uint32_t band = (uint32_t)(arg - 1);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003079 struct nss_if_msg nim_attach;
3080 struct nss_if_msg nim_detach;
Murat Sezgin7a705422014-01-30 16:09:22 -08003081
3082 nssqdisc_info("Grafting band %u, available bands %u\n", band, q->bands);
3083
3084 if (new == NULL)
3085 new = &noop_qdisc;
3086
3087 if (band > q->bands)
3088 return -EINVAL;
3089
3090 sch_tree_lock(sch);
3091 *old = q->queues[band];
3092 q->queues[band] = new;
3093 qdisc_reset(*old);
3094 sch_tree_unlock(sch);
3095
3096 nssqdisc_info("%s:Grafting old: %p with new: %p\n", __func__, *old, new);
3097 if (*old != &noop_qdisc) {
3098 nssqdisc_info("%s:Detaching old: %p\n", __func__, *old);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003099 nim_detach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
3100 nim_detach.msg.shaper_configure.config.msg.shaper_node_config.snc.prio_detach.priority = band;
3101 if (nssqdisc_node_detach(&q->nq, &nim_detach,
Murat Sezgin7a705422014-01-30 16:09:22 -08003102 NSS_SHAPER_CONFIG_TYPE_PRIO_DETACH) < 0) {
3103 return -EINVAL;
3104 }
3105 }
3106
3107 if (new != &noop_qdisc) {
3108 nssqdisc_info("%s:Attaching new child with qos tag: %x, priority: %u to "
3109 "qos_tag: %x\n", __func__, nq_new->qos_tag, band, q->nq.qos_tag);
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003110 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
3111 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.snc.prio_attach.child_qos_tag = nq_new->qos_tag;
3112 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.snc.prio_attach.priority = band;
3113 if (nssqdisc_node_attach(&q->nq, &nim_attach,
Murat Sezgin7a705422014-01-30 16:09:22 -08003114 NSS_SHAPER_CONFIG_TYPE_PRIO_ATTACH) < 0) {
3115 return -EINVAL;
3116 }
3117 }
3118 nssqdisc_info("Nssprio grafted");
3119
3120 return 0;
3121}
3122
3123static struct Qdisc *nssprio_leaf(struct Qdisc *sch, unsigned long arg)
3124{
3125 struct nssprio_sched_data *q = qdisc_priv(sch);
3126 uint32_t band = (uint32_t)(arg - 1);
3127
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003128 nssqdisc_info("Nssprio returns leaf\n");
Murat Sezgin7a705422014-01-30 16:09:22 -08003129
3130 if (band > q->bands)
3131 return NULL;
3132
3133 return q->queues[band];
3134}
3135
3136static unsigned long nssprio_get(struct Qdisc *sch, u32 classid)
3137{
3138 struct nssprio_sched_data *q = qdisc_priv(sch);
3139 unsigned long band = TC_H_MIN(classid);
3140
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003141 nssqdisc_info("Inside get. Handle - %x Classid - %x Band %lu Available band %u\n", sch->handle, classid, band, q->bands);
Murat Sezgin7a705422014-01-30 16:09:22 -08003142
3143 if (band > q->bands)
3144 return 0;
3145
3146 return band;
3147}
3148
3149static void nssprio_put(struct Qdisc *sch, unsigned long arg)
3150{
3151 nssqdisc_info("Inside prio get\n");
3152}
3153
3154static void nssprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
3155{
3156 struct nssprio_sched_data *q = qdisc_priv(sch);
3157 int i;
3158
3159 if (arg->stop)
3160 return;
3161
3162 for (i = 0; i < q->bands; i++) {
3163 if (arg->count < arg->skip) {
3164 arg->count++;
3165 continue;
3166 }
3167 if (arg->fn(sch, i + 1, arg) < 0) {
3168 arg->stop = 1;
3169 break;
3170 }
3171 arg->count++;
3172 }
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003173 nssqdisc_info("Nssprio walk called\n");
Murat Sezgin7a705422014-01-30 16:09:22 -08003174}
3175
3176static int nssprio_dump_class(struct Qdisc *sch, unsigned long cl,
3177 struct sk_buff *skb, struct tcmsg *tcm)
3178{
3179 struct nssprio_sched_data *q = qdisc_priv(sch);
3180
3181 tcm->tcm_handle |= TC_H_MIN(cl);
3182 tcm->tcm_info = q->queues[cl - 1]->handle;
3183
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003184 nssqdisc_info("Nssprio dumping class\n");
Murat Sezgin7a705422014-01-30 16:09:22 -08003185 return 0;
3186}
3187
3188static int nssprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
3189 struct gnet_dump *d)
3190{
3191 struct nssprio_sched_data *q = qdisc_priv(sch);
3192 struct Qdisc *cl_q;
3193
3194 cl_q = q->queues[cl - 1];
3195 cl_q->qstats.qlen = cl_q->q.qlen;
3196 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
3197 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
3198 return -1;
3199
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003200 nssqdisc_info("Nssprio dumping class stats\n");
Murat Sezgin7a705422014-01-30 16:09:22 -08003201 return 0;
3202}
3203
3204static const struct Qdisc_class_ops nssprio_class_ops = {
3205 .graft = nssprio_graft,
3206 .leaf = nssprio_leaf,
3207 .get = nssprio_get,
3208 .put = nssprio_put,
3209 .walk = nssprio_walk,
3210 .dump = nssprio_dump_class,
3211 .dump_stats = nssprio_dump_class_stats,
3212};
3213
3214static struct Qdisc_ops nssprio_qdisc_ops __read_mostly = {
3215 .next = NULL,
3216 .id = "nssprio",
3217 .priv_size = sizeof(struct nssprio_sched_data),
3218 .cl_ops = &nssprio_class_ops,
3219 .enqueue = nssprio_enqueue,
3220 .dequeue = nssprio_dequeue,
3221 .peek = nssprio_peek,
3222 .drop = nssprio_drop,
3223 .init = nssprio_init,
3224 .reset = nssprio_reset,
3225 .destroy = nssprio_destroy,
3226 .change = nssprio_change,
3227 .dump = nssprio_dump,
3228 .owner = THIS_MODULE,
3229};
3230
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003231/* ========================= NSSBF ===================== */
3232
3233struct nssbf_class_data {
3234 struct nssqdisc_qdisc nq; /* Base class used by nssqdisc */
3235 struct Qdisc_class_common cl_common; /* Common class structure */
3236 u32 rate; /* Allowed bandwidth for this class */
3237 u32 burst; /* Allowed burst for this class */
3238 u32 mtu; /* MTU size of the interface */
3239 u32 quantum; /* Quantum allocation for DRR */
3240 struct Qdisc *qdisc; /* Pointer to child qdisc */
3241};
3242
3243struct nssbf_sched_data {
3244 struct nssqdisc_qdisc nq; /* Base class used by nssqdisc */
3245 u16 defcls; /* default class id */
3246 struct nssbf_class_data root; /* root class */
3247 struct Qdisc_class_hash clhash; /* class hash */
3248};
3249
3250static inline struct nssbf_class_data *nssbf_find_class(u32 classid,
3251 struct Qdisc *sch)
3252{
3253 struct nssbf_sched_data *q = qdisc_priv(sch);
3254 struct Qdisc_class_common *clc;
3255 clc = qdisc_class_find(&q->clhash, classid);
3256 if (clc == NULL) {
3257 nssqdisc_warning("%s: Cannot find class with classid %u in qdisc %p hash table %p\n", __func__, classid, sch, &q->clhash);
3258 return NULL;
3259 }
3260 return container_of(clc, struct nssbf_class_data, cl_common);
3261}
3262
3263static const struct nla_policy nssbf_policy[TCA_NSSBF_MAX + 1] = {
3264 [TCA_NSSBF_CLASS_PARMS] = { .len = sizeof(struct tc_nssbf_class_qopt) },
3265};
3266
3267static int nssbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
3268 struct nlattr **tca, unsigned long *arg)
3269{
3270 struct nssbf_sched_data *q = qdisc_priv(sch);
3271 struct nssbf_class_data *cl = (struct nssbf_class_data *)*arg;
3272 struct nlattr *opt = tca[TCA_OPTIONS];
3273 struct nlattr *na[TCA_NSSBF_MAX + 1];
3274 struct tc_nssbf_class_qopt *qopt;
3275 int err;
3276 struct nss_if_msg nim_config;
3277 struct net_device *dev = qdisc_dev(sch);
3278
3279 nssqdisc_info("%s: Changing bf class %u\n", __func__, classid);
3280 if (opt == NULL)
3281 return -EINVAL;
3282
3283 err = nla_parse_nested(na, TCA_NSSBF_MAX, opt, nssbf_policy);
3284 if (err < 0)
3285 return err;
3286
3287 if (na[TCA_NSSBF_CLASS_PARMS] == NULL)
3288 return -EINVAL;
3289
3290 /*
3291 * If class with a given classid is not found, we allocate a new one
3292 */
3293 if (!cl) {
3294 struct nss_if_msg nim_attach;
3295 nssqdisc_info("%s: Bf class %u not found. Allocating a new class.\n", __func__, classid);
3296 cl = kzalloc(sizeof(struct nssbf_class_data), GFP_KERNEL);
3297
3298 if (!cl) {
3299 nssqdisc_error("%s: Class allocation failed for classid %u\n", __func__, classid);
3300 return -EINVAL;
3301 }
3302
3303 nssqdisc_info("%s: Bf class %u allocated %p\n", __func__, classid, cl);
3304 cl->cl_common.classid = classid;
3305
3306 /*
3307 * We make the child qdisc a noop qdisc, and
3308 * set reference count to 1. This is important,
3309 * reference count should not be 0.
3310 */
3311 cl->qdisc = &noop_qdisc;
3312 atomic_set(&cl->nq.refcnt, 1);
3313 *arg = (unsigned long)cl;
3314
3315 nssqdisc_info("%s: Adding classid %u to qdisc %p hash queue %p\n", __func__, classid, sch, &q->clhash);
3316
3317 /*
3318 * This is where a class gets initialized. Classes do not have a init function
3319 * that is registered to Linux. Therefore we initialize the NSSBF_GROUP shaper
3320 * here.
3321 */
3322 if (nssqdisc_init(sch, &cl->nq, NSS_SHAPER_NODE_TYPE_BF_GROUP, classid) < 0) {
3323 nssqdisc_error("%s: Nss init for class %u failed\n", __func__, classid);
3324 return -EINVAL;
3325 }
3326
3327 /*
3328 * Set qos_tag of parent to which the class needs to e attached to.
3329 */
3330 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
3331
3332 /*
3333 * Set the child to be this class.
3334 */
3335 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.snc.bf_attach.child_qos_tag = cl->nq.qos_tag;
3336
3337 /*
3338 * Send node_attach command down to the NSS
3339 */
3340 if (nssqdisc_node_attach(&q->nq, &nim_attach,
3341 NSS_SHAPER_CONFIG_TYPE_BF_ATTACH) < 0) {
3342 nssqdisc_error("%s: Nss attach for class %u failed\n", __func__, classid);
3343 return -EINVAL;
3344 }
3345
3346 /*
3347 * Add class to hash tree once it is attached in the NSS
3348 */
3349 sch_tree_lock(sch);
3350 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
3351 sch_tree_unlock(sch);
3352
3353 /*
3354 * Hash grow should not come within the tree lock
3355 */
3356 qdisc_class_hash_grow(sch, &q->clhash);
3357
3358 /*
3359 * Start the stats polling timer
3360 */
3361 nssqdisc_start_basic_stats_polling(&cl->nq);
3362
3363 nssqdisc_info("%s: Class %u successfully allocated\n", __func__, classid);
3364 }
3365
3366 qopt = nla_data(na[TCA_NSSBF_CLASS_PARMS]);
3367
3368 sch_tree_lock(sch);
3369 cl->rate = qopt->rate;
3370 cl->burst = qopt->burst;
3371
3372 /*
3373 * If MTU and quantum values are not provided, set them to
3374 * the interface's MTU value.
3375 */
3376 if (!qopt->mtu) {
3377 cl->mtu = psched_mtu(dev);
3378 nssqdisc_info("MTU not provided for bf class on interface %s. "
3379 "Setting MTU to %u bytes\n", dev->name, cl->mtu);
3380 } else {
3381 cl->mtu = qopt->mtu;
3382 }
3383
3384 if (!qopt->quantum) {
3385 cl->quantum = psched_mtu(dev);
3386 nssqdisc_info("Quantum value not provided for bf class on interface %s. "
3387 "Setting quantum to %u\n", dev->name, cl->quantum);
3388 } else {
Sakthi Vignesh Radhakrishnan88f7d742014-04-27 23:56:44 -07003389 cl->quantum = qopt->quantum;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07003390 }
3391
3392 sch_tree_unlock(sch);
3393
3394 /*
3395 * Fill information that needs to be sent down to the NSS for configuring the
3396 * bf class.
3397 */
3398 nim_config.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = cl->nq.qos_tag;
3399 nim_config.msg.shaper_configure.config.msg.shaper_node_config.snc.bf_group_param.quantum = cl->quantum;
3400 nim_config.msg.shaper_configure.config.msg.shaper_node_config.snc.bf_group_param.lap.rate = cl->rate;
3401 nim_config.msg.shaper_configure.config.msg.shaper_node_config.snc.bf_group_param.lap.burst = cl->burst;
3402 nim_config.msg.shaper_configure.config.msg.shaper_node_config.snc.bf_group_param.lap.max_size = cl->mtu;
3403 nim_config.msg.shaper_configure.config.msg.shaper_node_config.snc.bf_group_param.lap.short_circuit = false;
3404
3405 nssqdisc_info("Rate = %u Burst = %u MTU = %u Quantum = %u\n", cl->rate, cl->burst, cl->mtu, cl->quantum);
3406
3407 /*
3408 * Send configure command to the NSS
3409 */
3410 if (nssqdisc_configure(&cl->nq, &nim_config,
3411 NSS_SHAPER_CONFIG_TYPE_BF_GROUP_CHANGE_PARAM) < 0) {
3412 nssqdisc_error("%s: Failed to configure class %u\n", __func__, classid);
3413 return -EINVAL;
3414 }
3415
3416 nssqdisc_info("%s: Class %u changed successfully\n", __func__, classid);
3417 return 0;
3418}
3419
3420static void nssbf_destroy_class(struct Qdisc *sch, struct nssbf_class_data *cl)
3421{
3422 struct nssbf_sched_data *q = qdisc_priv(sch);
3423 struct nss_if_msg nim;
3424
3425 nssqdisc_info("Destroying bf class %p from qdisc %p\n", cl, sch);
3426
3427 /*
3428 * Note, this function gets called even for NSSBF and not just for NSSBF_GROUP.
3429 * If this is BF qdisc then we should not call nssqdisc_destroy or stop polling
3430 * for stats. These two actions will happen inside nssbf_destroy(), which is called
3431 * only for the root qdisc.
3432 */
3433 if (cl == &q->root) {
3434 nssqdisc_info("%s: We do not destroy bf class %p here since this is "
3435 "the qdisc %p\n", __func__, cl, sch);
3436 return;
3437 }
3438
3439 /*
3440 * We always have to detach our child qdisc in NSS, before destroying it.
3441 */
3442 if (cl->qdisc != &noop_qdisc) {
3443 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = cl->nq.qos_tag;
3444 if (nssqdisc_node_detach(&cl->nq, &nim,
3445 NSS_SHAPER_CONFIG_TYPE_BF_GROUP_DETACH) < 0) {
3446 nssqdisc_error("%s: Failed to detach child %x from class %x\n",
3447 __func__, cl->qdisc->handle, q->nq.qos_tag);
3448 return;
3449 }
3450 }
3451
3452 /*
3453 * And now we destroy the child.
3454 */
3455 qdisc_destroy(cl->qdisc);
3456
3457 /*
3458 * Stop the stats polling timer and free class
3459 */
3460 nssqdisc_stop_basic_stats_polling(&cl->nq);
3461
3462 /*
3463 * Destroy the shaper in NSS
3464 */
3465 nssqdisc_destroy(&cl->nq);
3466
3467 /*
3468 * Free class
3469 */
3470 kfree(cl);
3471}
3472
3473static int nssbf_delete_class(struct Qdisc *sch, unsigned long arg)
3474{
3475 struct nssbf_sched_data *q = qdisc_priv(sch);
3476 struct nssbf_class_data *cl = (struct nssbf_class_data *)arg;
3477 struct nss_if_msg nim;
3478 int refcnt;
3479
3480 /*
3481 * Since all classes are leaf nodes in our case, we dont have to make
3482 * that check.
3483 */
3484 if (cl == &q->root)
3485 return -EBUSY;
3486
3487 /*
3488 * The message to NSS should be sent to the parent of this class
3489 */
3490 nssqdisc_info("%s: Detaching bf class: %p\n", __func__, cl);
3491 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
3492 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.bf_detach.child_qos_tag = cl->nq.qos_tag;
3493 if (nssqdisc_node_detach(&q->nq, &nim,
3494 NSS_SHAPER_CONFIG_TYPE_BF_DETACH) < 0) {
3495 return -EINVAL;
3496 }
3497
3498 sch_tree_lock(sch);
3499 qdisc_reset(cl->qdisc);
3500 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
3501 refcnt = atomic_sub_return(1, &cl->nq.refcnt);
3502 sch_tree_unlock(sch);
3503 if (!refcnt) {
3504 nssqdisc_error("%s: Reference count should not be zero for class %p\n", __func__, cl);
3505 }
3506
3507 return 0;
3508}
3509
3510static int nssbf_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
3511 struct Qdisc **old)
3512{
3513 struct nssbf_class_data *cl = (struct nssbf_class_data *)arg;
3514 struct nss_if_msg nim_detach;
3515 struct nss_if_msg nim_attach;
3516 struct nssqdisc_qdisc *nq_new = qdisc_priv(new);
3517
3518 nssqdisc_info("Grafting class %p\n", sch);
3519 if (new == NULL)
3520 new = &noop_qdisc;
3521
3522 sch_tree_lock(sch);
3523 *old = cl->qdisc;
3524 sch_tree_unlock(sch);
3525
3526 /*
3527 * Since we initially attached a noop qdisc as child (in Linux),
3528 * we do not perform a detach in the NSS if its a noop qdisc.
3529 */
3530 nssqdisc_info("%s:Grafting old: %p with new: %p\n", __func__, *old, new);
3531 if (*old != &noop_qdisc) {
3532 nssqdisc_info("%s: Detaching old: %p\n", __func__, *old);
3533 nim_detach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = cl->nq.qos_tag;
3534 if (nssqdisc_node_detach(&cl->nq, &nim_detach,
3535 NSS_SHAPER_CONFIG_TYPE_BF_GROUP_DETACH) < 0) {
3536 return -EINVAL;
3537 }
3538 }
3539
3540 /*
3541 * If the new qdisc is a noop qdisc, we do not send down an attach command
3542 * to the NSS.
3543 */
3544 if (new != &noop_qdisc) {
3545 nssqdisc_info("%s: Attaching new: %p\n", __func__, new);
3546 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = cl->nq.qos_tag;
3547 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.snc.bf_group_attach.child_qos_tag = nq_new->qos_tag;
3548 if (nssqdisc_node_attach(&cl->nq, &nim_attach,
3549 NSS_SHAPER_CONFIG_TYPE_BF_GROUP_ATTACH) < 0) {
3550 return -EINVAL;
3551 }
3552 }
3553
3554 /*
3555 * Attach qdisc once it is done in the NSS
3556 */
3557 sch_tree_lock(sch);
3558 cl->qdisc = new;
3559 sch_tree_unlock(sch);
3560
3561 nssqdisc_info("Nssbf grafted");
3562
3563 return 0;
3564}
3565
3566static struct Qdisc *nssbf_leaf_class(struct Qdisc *sch, unsigned long arg)
3567{
3568 struct nssbf_class_data *cl = (struct nssbf_class_data *)arg;
3569 nssqdisc_info("bf class leaf %p\n", cl);
3570
3571 /*
3572 * Since all nssbf groups are leaf nodes, we can always
3573 * return the attached qdisc.
3574 */
3575 return cl->qdisc;
3576}
3577
3578static void nssbf_qlen_notify(struct Qdisc *sch, unsigned long arg)
3579{
3580 nssqdisc_info("bf qlen notify %p\n", sch);
3581 /*
3582 * Gets called when qlen of child changes (Useful for deactivating)
3583 * Not useful for us here.
3584 */
3585}
3586
3587static unsigned long nssbf_get_class(struct Qdisc *sch, u32 classid)
3588{
3589 struct nssbf_class_data *cl = nssbf_find_class(classid, sch);
3590
3591 nssqdisc_info("Get bf class %p - class match = %p\n", sch, cl);
3592
3593 if (cl != NULL)
3594 atomic_add(1, &cl->nq.refcnt);
3595
3596 return (unsigned long)cl;
3597}
3598
3599static void nssbf_put_class(struct Qdisc *sch, unsigned long arg)
3600{
3601 struct nssbf_class_data *cl = (struct nssbf_class_data *)arg;
3602 nssqdisc_info("bf put class for %p\n", cl);
3603
3604 /*
3605 * We are safe to destroy the qdisc if the reference count
3606 * goes down to 0.
3607 */
3608 if (atomic_sub_return(1, &cl->nq.refcnt) == 0) {
3609 nssbf_destroy_class(sch, cl);
3610 }
3611}
3612
3613static int nssbf_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
3614 struct tcmsg *tcm)
3615{
3616 struct nssbf_class_data *cl = (struct nssbf_class_data *)arg;
3617 struct nlattr *opts;
3618 struct tc_nssbf_class_qopt qopt;
3619
3620 nssqdisc_info("Dumping class %p of Qdisc %p\n", cl, sch);
3621
3622 qopt.burst = cl->burst;
3623 qopt.rate = cl->rate;
3624 qopt.mtu = cl->mtu;
3625 qopt.quantum = cl->quantum;
3626
3627 /*
3628 * All bf group nodes are root nodes. i.e. they dont
3629 * have any mode bf groups attached beneath them.
3630 */
3631 tcm->tcm_parent = TC_H_ROOT;
3632 tcm->tcm_handle = cl->cl_common.classid;
3633 tcm->tcm_info = cl->qdisc->handle;
3634
3635 opts = nla_nest_start(skb, TCA_OPTIONS);
3636 if (opts == NULL)
3637 goto nla_put_failure;
3638 NLA_PUT(skb, TCA_NSSBF_CLASS_PARMS, sizeof(qopt), &qopt);
3639 return nla_nest_end(skb, opts);
3640
3641nla_put_failure:
3642 nla_nest_cancel(skb, opts);
3643 return -EMSGSIZE;
3644}
3645
3646static int nssbf_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
3647{
3648 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)arg;
3649
3650 if (gnet_stats_copy_basic(d, &nq->bstats) < 0 ||
3651 gnet_stats_copy_queue(d, &nq->qstats) < 0) {
3652 return -1;
3653 }
3654
3655 return 0;
3656}
3657
3658static void nssbf_walk(struct Qdisc *sch, struct qdisc_walker *arg)
3659{
3660 struct nssbf_sched_data *q = qdisc_priv(sch);
3661 struct hlist_node *n;
3662 struct nssbf_class_data *cl;
3663 unsigned int i;
3664
3665 nssqdisc_info("In bf walk %p\n", sch);
3666 if (arg->stop)
3667 return;
3668
3669 for (i = 0; i < q->clhash.hashsize; i++) {
3670 hlist_for_each_entry(cl, n, &q->clhash.hash[i],
3671 cl_common.hnode) {
3672 if (arg->count < arg->skip) {
3673 arg->count++;
3674 continue;
3675 }
3676 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
3677 arg->stop = 1;
3678 return;
3679 }
3680 arg->count++;
3681 }
3682 }
3683}
3684
3685static int nssbf_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
3686{
3687 struct nssbf_sched_data *q = qdisc_priv(sch);
3688 struct tc_nssbf_qopt *qopt;
3689 int err;
3690
3691 nssqdisc_info("Init bf qdisc %p\n", sch);
3692 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
3693 return -EINVAL;
3694 qopt = nla_data(opt);
3695
3696 q->defcls = qopt->defcls;
3697 err = qdisc_class_hash_init(&q->clhash);
3698 if (err < 0)
3699 return err;
3700
3701 q->root.cl_common.classid = sch->handle;
3702 q->root.qdisc = &noop_qdisc;
3703
3704 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
3705 qdisc_class_hash_grow(sch, &q->clhash);
3706
3707 /*
3708 * Initialize the NSSBF shaper in NSS
3709 */
3710 if (nssqdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_BF, 0) < 0)
3711 return -EINVAL;
3712
3713 nssqdisc_info("Nssbf initialized - handle %x parent %x\n", sch->handle, sch->parent);
3714
3715 /*
3716 * Start the stats polling timer
3717 */
3718 nssqdisc_start_basic_stats_polling(&q->nq);
3719
3720 return 0;
3721}
3722
3723static int nssbf_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
3724{
3725 struct nssbf_sched_data *q = qdisc_priv(sch);
3726 struct tc_nssbf_qopt *qopt;
3727
3728 /*
3729 * NSSBF does not care about the defcls, so we dont send down any
3730 * configuration parameter.
3731 */
3732 nssqdisc_info("Changing bf qdisc %p\n", sch);
3733 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
3734 return -EINVAL;
3735 qopt = nla_data(opt);
3736
3737 sch_tree_lock(sch);
3738 q->defcls = qopt->defcls;
3739 sch_tree_unlock(sch);
3740
3741 return 0;
3742}
3743
3744static void nssbf_reset_class(struct nssbf_class_data *cl)
3745{
3746 nssqdisc_reset(cl->qdisc);
3747 nssqdisc_info("Nssbf class resetted %p\n", cl->qdisc);
3748}
3749
3750static void nssbf_reset_qdisc(struct Qdisc *sch)
3751{
3752 struct nssbf_sched_data *q = qdisc_priv(sch);
3753 struct nssbf_class_data *cl;
3754 struct hlist_node *n;
3755 unsigned int i;
3756
3757 for (i = 0; i < q->clhash.hashsize; i++) {
3758 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
3759 nssbf_reset_class(cl);
3760 }
3761
3762 nssqdisc_reset(sch);
3763 nssqdisc_info("Nssbf qdisc resetted %p\n", sch);
3764}
3765
3766static void nssbf_destroy_qdisc(struct Qdisc *sch)
3767{
3768 struct nssbf_sched_data *q = qdisc_priv(sch);
3769 struct hlist_node *n, *next;
3770 struct nssbf_class_data *cl;
3771 struct nss_if_msg nim;
3772 unsigned int i;
3773
3774 /*
3775 * Destroy all the classes before the root qdisc is destroyed.
3776 */
3777 for (i = 0; i < q->clhash.hashsize; i++) {
3778 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], cl_common.hnode) {
3779
3780 /*
3781 * If this is the root class, we dont have to destroy it. This will be taken
3782 * care of by the nssbf_destroy() function.
3783 */
3784 if (cl == &q->root) {
3785 nssqdisc_info("%s: We do not detach or destroy bf class %p here since this is "
3786 "the qdisc %p\n", __func__, cl, sch);
3787 continue;
3788 }
3789
3790 /*
3791 * Reduce refcnt by 1 before destroying. This is to
3792 * ensure that polling of stat stops properly.
3793 */
3794 atomic_sub(1, &cl->nq.refcnt);
3795
3796 /*
3797 * Detach class before destroying it. We dont check for noop qdisc here
3798 * since we do not attach anu such at init.
3799 */
3800 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
3801 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.bf_detach.child_qos_tag = cl->nq.qos_tag;
3802 if (nssqdisc_node_detach(&q->nq, &nim,
3803 NSS_SHAPER_CONFIG_TYPE_BF_DETACH) < 0) {
3804 nssqdisc_error("%s: Node detach failed for qdisc %x class %x\n",
3805 __func__, cl->nq.qos_tag, q->nq.qos_tag);
3806 return;
3807 }
3808
3809 /*
3810 * Now we can destroy the class.
3811 */
3812 nssbf_destroy_class(sch, cl);
3813 }
3814 }
3815 qdisc_class_hash_destroy(&q->clhash);
3816
3817 /*
3818 * Stop the polling of basic stats
3819 */
3820 nssqdisc_stop_basic_stats_polling(&q->nq);
3821
3822 /*
3823 * Now we can go ahead and destroy the qdisc.
3824 * Note: We dont have to detach ourself from our parent because this
3825 * will be taken care of by the graft call.
3826 */
3827 nssqdisc_destroy(&q->nq);
3828 nssqdisc_info("Nssbf destroyed %p\n", sch);
3829}
3830
3831static int nssbf_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
3832{
3833 struct nssbf_sched_data *q = qdisc_priv(sch);
3834 unsigned char *b = skb_tail_pointer(skb);
3835 struct tc_nssbf_qopt qopt;
3836 struct nlattr *nest;
3837
3838 nssqdisc_info("In bf dump qdisc\n");
3839
3840 nest = nla_nest_start(skb, TCA_OPTIONS);
3841 if (nest == NULL) {
3842 goto nla_put_failure;
3843 }
3844
3845 qopt.defcls = q->defcls;
3846 NLA_PUT(skb, TCA_NSSBF_QDISC_PARMS, sizeof(qopt), &qopt);
3847 nla_nest_end(skb, nest);
3848
3849 return skb->len;
3850
3851 nla_put_failure:
3852 nlmsg_trim(skb, b);
3853 return -1;
3854}
3855
3856static int nssbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3857{
3858 return nssqdisc_enqueue(skb, sch);
3859}
3860
3861static struct sk_buff *nssbf_dequeue(struct Qdisc *sch)
3862{
3863 return nssqdisc_dequeue(sch);
3864}
3865
3866static unsigned int nssbf_drop(struct Qdisc *sch)
3867{
3868 printk("In bf drop\n");
3869 return nssqdisc_drop(sch);
3870}
3871
3872static const struct Qdisc_class_ops nssbf_class_ops = {
3873 .change = nssbf_change_class,
3874 .delete = nssbf_delete_class,
3875 .graft = nssbf_graft_class,
3876 .leaf = nssbf_leaf_class,
3877 .qlen_notify = nssbf_qlen_notify,
3878 .get = nssbf_get_class,
3879 .put = nssbf_put_class,
3880 .dump = nssbf_dump_class,
3881 .dump_stats = nssbf_dump_class_stats,
3882 .walk = nssbf_walk
3883};
3884
3885static struct Qdisc_ops nssbf_qdisc_ops __read_mostly = {
3886 .id = "nssbf",
3887 .init = nssbf_init_qdisc,
3888 .change = nssbf_change_qdisc,
3889 .reset = nssbf_reset_qdisc,
3890 .destroy = nssbf_destroy_qdisc,
3891 .dump = nssbf_dump_qdisc,
3892 .enqueue = nssbf_enqueue,
3893 .dequeue = nssbf_dequeue,
3894 .peek = qdisc_peek_dequeued,
3895 .drop = nssbf_drop,
3896 .cl_ops = &nssbf_class_ops,
3897 .priv_size = sizeof(struct nssbf_sched_data),
3898 .owner = THIS_MODULE
3899};
3900
3901/* ========================= NSSWRR ===================== */
3902
3903struct nsswrr_class_data {
3904 struct nssqdisc_qdisc nq; /* Base class used by nssqdisc */
3905 struct Qdisc_class_common cl_common; /* Common class structure */
3906 u32 quantum; /* Quantum allocation for DRR */
3907 struct Qdisc *qdisc; /* Pointer to child qdisc */
3908};
3909
3910struct nsswrr_sched_data {
3911 struct nssqdisc_qdisc nq; /* Base class used by nssqdisc */
3912 struct nsswrr_class_data root; /* root class */
3913 struct Qdisc_class_hash clhash; /* class hash */
3914};
3915
3916static inline struct nsswrr_class_data *nsswrr_find_class(u32 classid,
3917 struct Qdisc *sch)
3918{
3919 struct nsswrr_sched_data *q = qdisc_priv(sch);
3920 struct Qdisc_class_common *clc;
3921 clc = qdisc_class_find(&q->clhash, classid);
3922 if (clc == NULL) {
3923 nssqdisc_warning("%s: Cannot find class with classid %u in qdisc %p hash table %p\n", __func__, classid, sch, &q->clhash);
3924 return NULL;
3925 }
3926 return container_of(clc, struct nsswrr_class_data, cl_common);
3927}
3928
3929static const struct nla_policy nsswrr_policy[TCA_NSSWRR_MAX + 1] = {
3930 [TCA_NSSWRR_CLASS_PARMS] = { .len = sizeof(struct tc_nsswrr_class_qopt) },
3931};
3932
3933static void nsswrr_destroy_class(struct Qdisc *sch, struct nsswrr_class_data *cl)
3934{
3935 struct nsswrr_sched_data *q = qdisc_priv(sch);
3936 struct nss_if_msg nim;
3937
3938 nssqdisc_info("Destroying nsswrr class %p from qdisc %p\n", cl, sch);
3939
3940 /*
3941 * Note, this function gets called even for NSSWRR and not just for NSSWRR_GROUP.
3942 * If this is wrr qdisc then we should not call nssqdisc_destroy or stop polling
3943 * for stats. These two actions will happen inside nsswrr_destroy(), which is called
3944 * only for the root qdisc.
3945 */
3946 if (cl == &q->root) {
3947 nssqdisc_info("%s: We do not destroy nsswrr class %p here since this is "
3948 "the qdisc %p\n", __func__, cl, sch);
3949 return;
3950 }
3951
3952 /*
3953 * We always have to detach our child qdisc in NSS, before destroying it.
3954 */
3955 if (cl->qdisc != &noop_qdisc) {
3956 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = cl->nq.qos_tag;
3957 if (nssqdisc_node_detach(&cl->nq, &nim,
3958 NSS_SHAPER_CONFIG_TYPE_WRR_GROUP_DETACH) < 0) {
3959 nssqdisc_error("%s: Failed to detach child %x from class %x\n",
3960 __func__, cl->qdisc->handle, q->nq.qos_tag);
3961 return;
3962 }
3963 }
3964
3965 /*
3966 * And now we destroy the child.
3967 */
3968 qdisc_destroy(cl->qdisc);
3969
3970 /*
3971 * Stop the stats polling timer and free class
3972 */
3973 nssqdisc_stop_basic_stats_polling(&cl->nq);
3974
3975 /*
3976 * Destroy the shaper in NSS
3977 */
3978 nssqdisc_destroy(&cl->nq);
3979
3980 /*
3981 * Free class
3982 */
3983 kfree(cl);
3984}
3985
3986static int nsswrr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
3987 struct nlattr **tca, unsigned long *arg)
3988{
3989 struct nsswrr_sched_data *q = qdisc_priv(sch);
3990 struct nsswrr_class_data *cl = (struct nsswrr_class_data *)*arg;
3991 struct nlattr *opt = tca[TCA_OPTIONS];
3992 struct nlattr *na[TCA_NSSWRR_MAX + 1];
3993 struct tc_nsswrr_class_qopt *qopt;
3994 struct nss_if_msg nim_config;
3995 struct net_device *dev = qdisc_dev(sch);
3996 bool new_init = false;
3997 int err;
3998
3999 nssqdisc_info("%s: Changing nsswrr class %u\n", __func__, classid);
4000 if (opt == NULL)
4001 return -EINVAL;
4002
4003 err = nla_parse_nested(na, TCA_NSSWRR_MAX, opt, nsswrr_policy);
4004 if (err < 0)
4005 return err;
4006
4007 if (na[TCA_NSSWRR_CLASS_PARMS] == NULL)
4008 return -EINVAL;
4009
4010 /*
4011 * If class with a given classid is not found, we allocate a new one
4012 */
4013 if (!cl) {
4014
4015 struct nss_if_msg nim_attach;
4016
4017 /*
4018 * The class does not already exist, we are newly initializing it.
4019 */
4020 new_init = true;
4021
4022 nssqdisc_info("%s: nsswrr class %u not found. Allocating a new class.\n", __func__, classid);
4023 cl = kzalloc(sizeof(struct nsswrr_class_data), GFP_KERNEL);
4024
4025 if (!cl) {
4026 nssqdisc_error("%s: Class allocation failed for classid %u\n", __func__, classid);
4027 return -EINVAL;
4028 }
4029
4030 nssqdisc_info("%s: Bf class %u allocated %p\n", __func__, classid, cl);
4031 cl->cl_common.classid = classid;
4032
4033 /*
4034 * We make the child qdisc a noop qdisc, and
4035 * set reference count to 1. This is important,
4036 * reference count should not be 0.
4037 */
4038 cl->qdisc = &noop_qdisc;
4039 atomic_set(&cl->nq.refcnt, 1);
4040 *arg = (unsigned long)cl;
4041
4042 nssqdisc_info("%s: Adding classid %u to qdisc %p hash queue %p\n", __func__, classid, sch, &q->clhash);
4043
4044 /*
4045 * This is where a class gets initialized. Classes do not have a init function
4046 * that is registered to Linux. Therefore we initialize the NSSWRR_GROUP shaper
4047 * here.
4048 */
4049 if (nssqdisc_init(sch, &cl->nq, NSS_SHAPER_NODE_TYPE_WRR_GROUP, classid) < 0) {
4050 nssqdisc_error("%s: Nss init for class %u failed\n", __func__, classid);
4051 return -EINVAL;
4052 }
4053
4054 /*
4055 * Set qos_tag of parent to which the class needs to e attached to.
4056 */
4057 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
4058
4059 /*
4060 * Set the child to be this class.
4061 */
4062 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.snc.wrr_attach.child_qos_tag = cl->nq.qos_tag;
4063
4064 /*
4065 * Send node_attach command down to the NSS
4066 */
4067 if (nssqdisc_node_attach(&q->nq, &nim_attach,
4068 NSS_SHAPER_CONFIG_TYPE_WRR_ATTACH) < 0) {
4069 nssqdisc_error("%s: Nss attach for class %u failed\n", __func__, classid);
4070 nssqdisc_destroy(&cl->nq);
4071 return -EINVAL;
4072 }
4073
4074 /*
4075 * Add class to hash tree once it is attached in the NSS
4076 */
4077 sch_tree_lock(sch);
4078 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
4079 sch_tree_unlock(sch);
4080
4081 /*
4082 * Hash grow should not come within the tree lock
4083 */
4084 qdisc_class_hash_grow(sch, &q->clhash);
4085
4086 /*
4087 * Start the stats polling timer
4088 */
4089 nssqdisc_start_basic_stats_polling(&cl->nq);
4090
4091 nssqdisc_info("%s: Class %u successfully allocated\n", __func__, classid);
4092 }
4093
4094 qopt = nla_data(na[TCA_NSSWRR_CLASS_PARMS]);
4095
4096 sch_tree_lock(sch);
4097
4098 /*
4099 * If the value of quantum is not provided default it based on the type
4100 * of operation (i.e. wrr or wfq)
4101 */
4102 cl->quantum = qopt->quantum;
4103 if (!cl->quantum) {
4104 if (strncmp(sch->ops->id, "nsswrr", 6) == 0) {
4105 cl->quantum = 1;
4106 nssqdisc_info("Quantum value not provided for nsswrr class on interface %s. "
4107 "Setting quantum to %up\n", dev->name, cl->quantum);
4108 } else if (strncmp(sch->ops->id, "nsswfq", 6) == 0) {
4109 cl->quantum = psched_mtu(dev);
4110 nssqdisc_info("Quantum value not provided for nsswrr class on interface %s. "
4111 "Setting quantum to %ubytes\n", dev->name, cl->quantum);
4112 } else {
4113 nssqdisc_error("%s: Unsupported parent type", __func__);
4114 return -EINVAL;
4115 }
4116 }
4117
4118 sch_tree_unlock(sch);
4119
4120 /*
4121 * Fill information that needs to be sent down to the NSS for configuring the
4122 * bf class.
4123 */
4124 nim_config.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = cl->nq.qos_tag;
4125 nim_config.msg.shaper_configure.config.msg.shaper_node_config.snc.wrr_group_param.quantum = cl->quantum;
4126
4127 nssqdisc_info("Quantum = %u\n", cl->quantum);
4128
4129 /*
4130 * Send configure command to the NSS
4131 */
4132 if (nssqdisc_configure(&cl->nq, &nim_config,
4133 NSS_SHAPER_CONFIG_TYPE_WRR_GROUP_CHANGE_PARAM) < 0) {
4134 nssqdisc_error("%s: Failed to configure class %x\n", __func__, classid);
4135
4136 /*
4137 * We dont have to destroy the class if this was just a
4138 * change command.
4139 */
4140 if (!new_init) {
4141 return -EINVAL;
4142 }
4143
4144 /*
4145 * Else, we have failed in the NSS and we will have to
4146 * destroy the class
4147 */
4148 nsswrr_destroy_class(sch, cl);
4149 return -EINVAL;
4150 }
4151
4152 nssqdisc_info("%s: Class %x changed successfully\n", __func__, classid);
4153 return 0;
4154}
4155
4156static int nsswrr_delete_class(struct Qdisc *sch, unsigned long arg)
4157{
4158 struct nsswrr_sched_data *q = qdisc_priv(sch);
4159 struct nsswrr_class_data *cl = (struct nsswrr_class_data *)arg;
4160 struct nss_if_msg nim;
4161 int refcnt;
4162
4163 /*
4164 * Since all classes are leaf nodes in our case, we dont have to make
4165 * that check.
4166 */
4167 if (cl == &q->root)
4168 return -EBUSY;
4169
4170 /*
4171 * The message to NSS should be sent to the parent of this class
4172 */
4173 nssqdisc_info("%s: Detaching nsswrr class: %p\n", __func__, cl);
4174 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
4175 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.wrr_detach.child_qos_tag = cl->nq.qos_tag;
4176 if (nssqdisc_node_detach(&q->nq, &nim,
4177 NSS_SHAPER_CONFIG_TYPE_WRR_DETACH) < 0) {
4178 return -EINVAL;
4179 }
4180
4181 sch_tree_lock(sch);
4182 qdisc_reset(cl->qdisc);
4183 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
4184 refcnt = atomic_sub_return(1, &cl->nq.refcnt);
4185 sch_tree_unlock(sch);
4186 if (!refcnt) {
4187 nssqdisc_error("%s: Reference count should not be zero for class %p\n", __func__, cl);
4188 }
4189
4190 return 0;
4191}
4192
4193static int nsswrr_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4194 struct Qdisc **old)
4195{
4196 struct nsswrr_class_data *cl = (struct nsswrr_class_data *)arg;
4197 struct nss_if_msg nim_detach;
4198 struct nss_if_msg nim_attach;
4199 struct nssqdisc_qdisc *nq_new = qdisc_priv(new);
4200
4201 nssqdisc_info("Grafting class %p\n", sch);
4202 if (new == NULL)
4203 new = &noop_qdisc;
4204
4205 sch_tree_lock(sch);
4206 *old = cl->qdisc;
4207 sch_tree_unlock(sch);
4208
4209 /*
4210 * Since we initially attached a noop qdisc as child (in Linux),
4211 * we do not perform a detach in the NSS if its a noop qdisc.
4212 */
4213 nssqdisc_info("%s:Grafting old: %p with new: %p\n", __func__, *old, new);
4214 if (*old != &noop_qdisc) {
4215 nssqdisc_info("%s: Detaching old: %p\n", __func__, *old);
4216 nim_detach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = cl->nq.qos_tag;
4217 if (nssqdisc_node_detach(&cl->nq, &nim_detach,
4218 NSS_SHAPER_CONFIG_TYPE_WRR_GROUP_DETACH) < 0) {
4219 return -EINVAL;
4220 }
4221 }
4222
4223 /*
4224 * If the new qdisc is a noop qdisc, we do not send down an attach command
4225 * to the NSS.
4226 */
4227 if (new != &noop_qdisc) {
4228 nssqdisc_info("%s: Attaching new: %p\n", __func__, new);
4229 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = cl->nq.qos_tag;
4230 nim_attach.msg.shaper_configure.config.msg.shaper_node_config.snc.wrr_group_attach.child_qos_tag = nq_new->qos_tag;
4231 if (nssqdisc_node_attach(&cl->nq, &nim_attach,
4232 NSS_SHAPER_CONFIG_TYPE_WRR_GROUP_ATTACH) < 0) {
4233 return -EINVAL;
4234 }
4235 }
4236
4237 /*
4238 * Attach qdisc once it is done in the NSS
4239 */
4240 sch_tree_lock(sch);
4241 cl->qdisc = new;
4242 sch_tree_unlock(sch);
4243
4244 nssqdisc_info("Nsswrr grafted");
4245
4246 return 0;
4247}
4248
4249static struct Qdisc *nsswrr_leaf_class(struct Qdisc *sch, unsigned long arg)
4250{
4251 struct nsswrr_class_data *cl = (struct nsswrr_class_data *)arg;
4252 nssqdisc_info("nsswrr class leaf %p\n", cl);
4253
4254 /*
4255 * Since all nsswrr groups are leaf nodes, we can always
4256 * return the attached qdisc.
4257 */
4258 return cl->qdisc;
4259}
4260
4261static void nsswrr_qlen_notify(struct Qdisc *sch, unsigned long arg)
4262{
4263 nssqdisc_info("nsswrr qlen notify %p\n", sch);
4264 /*
4265 * Gets called when qlen of child changes (Useful for deactivating)
4266 * Not useful for us here.
4267 */
4268}
4269
4270static unsigned long nsswrr_get_class(struct Qdisc *sch, u32 classid)
4271{
4272 struct nsswrr_class_data *cl = nsswrr_find_class(classid, sch);
4273
4274 nssqdisc_info("Get nsswrr class %p - class match = %p\n", sch, cl);
4275
4276 if (cl != NULL)
4277 atomic_add(1, &cl->nq.refcnt);
4278
4279 return (unsigned long)cl;
4280}
4281
4282static void nsswrr_put_class(struct Qdisc *sch, unsigned long arg)
4283{
4284 struct nsswrr_class_data *cl = (struct nsswrr_class_data *)arg;
4285 nssqdisc_info("nsswrr put class for %p\n", cl);
4286
4287 /*
4288 * We are safe to destroy the qdisc if the reference count
4289 * goes down to 0.
4290 */
4291 if (atomic_sub_return(1, &cl->nq.refcnt) == 0) {
4292 nsswrr_destroy_class(sch, cl);
4293 }
4294}
4295
4296static int nsswrr_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
4297 struct tcmsg *tcm)
4298{
4299 struct nsswrr_class_data *cl = (struct nsswrr_class_data *)arg;
4300 struct nlattr *opts;
4301 struct tc_nsswrr_class_qopt qopt;
4302
4303 nssqdisc_info("Dumping class %p of Qdisc %x\n", cl, sch->handle);
4304
4305 qopt.quantum = cl->quantum;
4306
4307 /*
4308 * All bf group nodes are root nodes. i.e. they dont
4309 * have any mode bf groups attached beneath them.
4310 */
4311 tcm->tcm_parent = TC_H_ROOT;
4312 tcm->tcm_handle = cl->cl_common.classid;
4313 tcm->tcm_info = cl->qdisc->handle;
4314
4315 opts = nla_nest_start(skb, TCA_OPTIONS);
4316 if (opts == NULL)
4317 goto nla_put_failure;
4318 NLA_PUT(skb, TCA_NSSWRR_CLASS_PARMS, sizeof(qopt), &qopt);
4319 return nla_nest_end(skb, opts);
4320
4321nla_put_failure:
4322 nla_nest_cancel(skb, opts);
4323 return -EMSGSIZE;
4324}
4325
4326static int nsswrr_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
4327{
4328 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)arg;
4329
4330 if (gnet_stats_copy_basic(d, &nq->bstats) < 0 ||
4331 gnet_stats_copy_queue(d, &nq->qstats) < 0) {
4332 return -1;
4333 }
4334
4335 return 0;
4336}
4337
4338static void nsswrr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
4339{
4340 struct nsswrr_sched_data *q = qdisc_priv(sch);
4341 struct hlist_node *n;
4342 struct nsswrr_class_data *cl;
4343 unsigned int i;
4344
4345 nssqdisc_info("In nsswrr walk %p\n", sch);
4346 if (arg->stop)
4347 return;
4348
4349 for (i = 0; i < q->clhash.hashsize; i++) {
4350 hlist_for_each_entry(cl, n, &q->clhash.hash[i],
4351 cl_common.hnode) {
4352 if (arg->count < arg->skip) {
4353 arg->count++;
4354 continue;
4355 }
4356 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
4357 arg->stop = 1;
4358 return;
4359 }
4360 arg->count++;
4361 }
4362 }
4363}
4364
4365static int nsswrr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
4366{
4367 struct nsswrr_sched_data *q = qdisc_priv(sch);
4368 int err;
4369 struct nss_if_msg nim;
4370
4371 nssqdisc_info("Init nsswrr qdisc %p\n", sch);
4372
4373 err = qdisc_class_hash_init(&q->clhash);
4374 if (err < 0)
4375 return err;
4376
4377 q->root.cl_common.classid = sch->handle;
4378 q->root.qdisc = &noop_qdisc;
4379
4380 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
4381 qdisc_class_hash_grow(sch, &q->clhash);
4382
4383 /*
4384 * Initialize the NSSWRR shaper in NSS
4385 */
4386 if (nssqdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_WRR, 0) < 0)
4387 return -EINVAL;
4388
4389 /*
4390 * Configure nsswrr in NSS to operate in round robin mode (not fair queue)
4391 */
4392 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
4393 if (strncmp(sch->ops->id, "nsswrr", 6) == 0) {
4394 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.wrr_param.operation_mode = NSS_SHAPER_WRR_MODE_ROUND_ROBIN;
4395 } else if (strncmp(sch->ops->id, "nsswfq", 6) == 0) {
4396 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.wrr_param.operation_mode = NSS_SHAPER_WRR_MODE_FAIR_QUEUEING;
4397 } else {
4398 nssqdisc_error("%s: Unknow qdisc association", __func__);
4399 nssqdisc_destroy(&q->nq);
4400 return -EINVAL;
4401 }
4402
4403 /*
4404 * Send configure command to the NSS
4405 */
4406 if (nssqdisc_configure(&q->nq, &nim, NSS_SHAPER_CONFIG_TYPE_WRR_CHANGE_PARAM) < 0) {
4407 nssqdisc_error("%s: Failed to configure nsswrr qdisc %x\n", __func__, q->nq.qos_tag);
4408 nssqdisc_destroy(&q->nq);
4409 return -EINVAL;
4410 }
4411
4412 nssqdisc_info("Nsswrr initialized - handle %x parent %x\n", sch->handle, sch->parent);
4413
4414 /*
4415 * Start the stats polling timer
4416 */
4417 nssqdisc_start_basic_stats_polling(&q->nq);
4418
4419 return 0;
4420}
4421
4422static int nsswrr_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
4423{
4424 return 0;
4425}
4426
4427static void nsswrr_reset_class(struct nsswrr_class_data *cl)
4428{
4429 nssqdisc_reset(cl->qdisc);
4430 nssqdisc_info("Nsswrr class resetted %p\n", cl->qdisc);
4431}
4432
4433static void nsswrr_reset_qdisc(struct Qdisc *sch)
4434{
4435 struct nsswrr_sched_data *q = qdisc_priv(sch);
4436 struct nsswrr_class_data *cl;
4437 struct hlist_node *n;
4438 unsigned int i;
4439
4440 for (i = 0; i < q->clhash.hashsize; i++) {
4441 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
4442 nsswrr_reset_class(cl);
4443 }
4444
4445 nssqdisc_reset(sch);
4446 nssqdisc_info("Nsswrr qdisc resetted %p\n", sch);
4447}
4448
4449static void nsswrr_destroy_qdisc(struct Qdisc *sch)
4450{
4451 struct nsswrr_sched_data *q = qdisc_priv(sch);
4452 struct hlist_node *n, *next;
4453 struct nsswrr_class_data *cl;
4454 struct nss_if_msg nim;
4455 unsigned int i;
4456
4457 /*
4458 * Destroy all the classes before the root qdisc is destroyed.
4459 */
4460 for (i = 0; i < q->clhash.hashsize; i++) {
4461 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], cl_common.hnode) {
4462
4463 /*
4464 * If this is the root class, we dont have to destroy it. This will be taken
4465 * care of by the nsswrr_destroy() function.
4466 */
4467 if (cl == &q->root) {
4468 nssqdisc_info("%s: We do not detach or destroy nsswrr class %p here since this is "
4469 "the qdisc %p\n", __func__, cl, sch);
4470 continue;
4471 }
4472
4473 /*
4474 * Reduce refcnt by 1 before destroying. This is to
4475 * ensure that polling of stat stops properly.
4476 */
4477 atomic_sub(1, &cl->nq.refcnt);
4478
4479 /*
4480 * Detach class before destroying it. We dont check for noop qdisc here
4481 * since we do not attach anu such at init.
4482 */
4483 nim.msg.shaper_configure.config.msg.shaper_node_config.qos_tag = q->nq.qos_tag;
4484 nim.msg.shaper_configure.config.msg.shaper_node_config.snc.wrr_detach.child_qos_tag = cl->nq.qos_tag;
4485 if (nssqdisc_node_detach(&q->nq, &nim, NSS_SHAPER_CONFIG_TYPE_WRR_DETACH) < 0) {
4486 nssqdisc_error("%s: Node detach failed for qdisc %x class %x\n",
4487 __func__, cl->nq.qos_tag, q->nq.qos_tag);
4488 return;
4489 }
4490
4491 /*
4492 * Now we can destroy the class.
4493 */
4494 nsswrr_destroy_class(sch, cl);
4495 }
4496 }
4497 qdisc_class_hash_destroy(&q->clhash);
4498
4499 /*
4500 * Stop the polling of basic stats
4501 */
4502 nssqdisc_stop_basic_stats_polling(&q->nq);
4503
4504 /*
4505 * Now we can go ahead and destroy the qdisc.
4506 * Note: We dont have to detach ourself from our parent because this
4507 * will be taken care of by the graft call.
4508 */
4509 nssqdisc_destroy(&q->nq);
4510 nssqdisc_info("Nsswrr destroyed %p\n", sch);
4511}
4512
4513static int nsswrr_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
4514{
4515 nssqdisc_info("Nsswrr dumping qdisc\n");
4516 return skb->len;
4517}
4518
4519static int nsswrr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
4520{
4521 return nssqdisc_enqueue(skb, sch);
4522}
4523
4524static struct sk_buff *nsswrr_dequeue(struct Qdisc *sch)
4525{
4526 return nssqdisc_dequeue(sch);
4527}
4528
4529static unsigned int nsswrr_drop(struct Qdisc *sch)
4530{
4531 printk("In nsswrr drop\n");
4532 return nssqdisc_drop(sch);
4533}
4534
4535static const struct Qdisc_class_ops nsswrr_class_ops = {
4536 .change = nsswrr_change_class,
4537 .delete = nsswrr_delete_class,
4538 .graft = nsswrr_graft_class,
4539 .leaf = nsswrr_leaf_class,
4540 .qlen_notify = nsswrr_qlen_notify,
4541 .get = nsswrr_get_class,
4542 .put = nsswrr_put_class,
4543 .dump = nsswrr_dump_class,
4544 .dump_stats = nsswrr_dump_class_stats,
4545 .walk = nsswrr_walk
4546};
4547
4548static struct Qdisc_ops nsswrr_qdisc_ops __read_mostly = {
4549 .id = "nsswrr",
4550 .init = nsswrr_init_qdisc,
4551 .change = nsswrr_change_qdisc,
4552 .reset = nsswrr_reset_qdisc,
4553 .destroy = nsswrr_destroy_qdisc,
4554 .dump = nsswrr_dump_qdisc,
4555 .enqueue = nsswrr_enqueue,
4556 .dequeue = nsswrr_dequeue,
4557 .peek = qdisc_peek_dequeued,
4558 .drop = nsswrr_drop,
4559 .cl_ops = &nsswrr_class_ops,
4560 .priv_size = sizeof(struct nsswrr_sched_data),
4561 .owner = THIS_MODULE
4562};
4563
4564static const struct Qdisc_class_ops nsswfq_class_ops = {
4565 .change = nsswrr_change_class,
4566 .delete = nsswrr_delete_class,
4567 .graft = nsswrr_graft_class,
4568 .leaf = nsswrr_leaf_class,
4569 .qlen_notify = nsswrr_qlen_notify,
4570 .get = nsswrr_get_class,
4571 .put = nsswrr_put_class,
4572 .dump = nsswrr_dump_class,
4573 .dump_stats = nsswrr_dump_class_stats,
4574 .walk = nsswrr_walk
4575};
4576
4577static struct Qdisc_ops nsswfq_qdisc_ops __read_mostly = {
4578 .id = "nsswfq",
4579 .init = nsswrr_init_qdisc,
4580 .change = nsswrr_change_qdisc,
4581 .reset = nsswrr_reset_qdisc,
4582 .destroy = nsswrr_destroy_qdisc,
4583 .dump = nsswrr_dump_qdisc,
4584 .enqueue = nsswrr_enqueue,
4585 .dequeue = nsswrr_dequeue,
4586 .peek = qdisc_peek_dequeued,
4587 .drop = nsswrr_drop,
4588 .cl_ops = &nsswrr_class_ops,
4589 .priv_size = sizeof(struct nsswrr_sched_data),
4590 .owner = THIS_MODULE
4591};
4592
4593
Murat Sezgin7a705422014-01-30 16:09:22 -08004594/* ================== Module registration ================= */
4595
4596static int __init nssqdisc_module_init(void)
4597{
4598 int ret;
4599 nssqdisc_info("Module initializing");
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07004600 nssqdisc_ctx = nss_shaper_register_shaping();
Murat Sezgin7a705422014-01-30 16:09:22 -08004601
4602 ret = register_qdisc(&nsspfifo_qdisc_ops);
4603 if (ret != 0)
4604 return ret;
4605 nssqdisc_info("NSS pfifo registered");
4606
4607 ret = register_qdisc(&nssbfifo_qdisc_ops);
4608 if (ret != 0)
4609 return ret;
4610 nssqdisc_info("NSS bfifo registered");
4611
4612 ret = register_qdisc(&nsscodel_qdisc_ops);
4613 if (ret != 0)
4614 return ret;
4615 nssqdisc_info("NSSCodel registered");
4616
4617 ret = register_qdisc(&nsstbl_qdisc_ops);
4618 if (ret != 0)
4619 return ret;
4620 nssqdisc_info("NSSTBL registered");
4621
4622 ret = register_qdisc(&nssprio_qdisc_ops);
4623 if (ret != 0)
4624 return ret;
4625 nssqdisc_info("NSSPRIO registered");
4626
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07004627 ret = register_qdisc(&nssbf_qdisc_ops);
4628 if (ret != 0)
4629 return ret;
4630 nssqdisc_info("NSSBF registered");
4631
4632 ret = register_qdisc(&nsswrr_qdisc_ops);
4633 if (ret != 0)
4634 return ret;
4635 nssqdisc_info("NSSWRR registered");
4636
4637 ret = register_qdisc(&nsswfq_qdisc_ops);
4638 if (ret != 0)
4639 return ret;
4640 nssqdisc_info("NSSWFQ registered");
4641
Murat Sezgin7a705422014-01-30 16:09:22 -08004642 ret = register_netdevice_notifier(&nssqdisc_device_notifier);
4643 if (ret != 0)
4644 return ret;
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07004645 nssqdisc_info("NSS qdisc device notifiers registered");
Murat Sezgin7a705422014-01-30 16:09:22 -08004646
4647 return 0;
4648}
4649
4650static void __exit nssqdisc_module_exit(void)
4651{
4652 unregister_qdisc(&nsspfifo_qdisc_ops);
4653 nssqdisc_info("NSSPFIFO Unregistered");
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07004654
Murat Sezgin7a705422014-01-30 16:09:22 -08004655 unregister_qdisc(&nssbfifo_qdisc_ops);
4656 nssqdisc_info("NSSBFIFO Unregistered");
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07004657
Murat Sezgin7a705422014-01-30 16:09:22 -08004658 unregister_qdisc(&nsscodel_qdisc_ops);
4659 nssqdisc_info("NSSCODEL Unregistered");
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07004660
Murat Sezgin7a705422014-01-30 16:09:22 -08004661 unregister_qdisc(&nsstbl_qdisc_ops);
4662 nssqdisc_info("NSSTBL Unregistered");
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07004663
Murat Sezgin7a705422014-01-30 16:09:22 -08004664 unregister_qdisc(&nssprio_qdisc_ops);
4665 nssqdisc_info("NSSPRIO Unregistered");
Sakthi Vignesh Radhakrishnanef918492014-04-07 14:24:19 -07004666
4667 unregister_qdisc(&nssbf_qdisc_ops);
4668 nssqdisc_info("NSSBF Unregistered\n");
4669
4670 unregister_qdisc(&nsswrr_qdisc_ops);
4671 nssqdisc_info("NSSWRR Unregistered\n");
4672
4673 unregister_qdisc(&nsswfq_qdisc_ops);
4674 nssqdisc_info("NSSWFQ Unregistered\n");
4675
Murat Sezgin7a705422014-01-30 16:09:22 -08004676 unregister_netdevice_notifier(&nssqdisc_device_notifier);
4677}
4678
4679module_init(nssqdisc_module_init)
4680module_exit(nssqdisc_module_exit)
4681
4682MODULE_LICENSE("GPL");