blob: d3e9b68fd8e78fa36acca8e74cddc22bb482b267 [file] [log] [blame]
Murat Sezgin7a705422014-01-30 16:09:22 -08001/*
2 **************************************************************************
3 * Copyright (c) 2014, Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17/*
18 * Note: This file will be moved into the nss-qdisc directory once the driver
19 * is re-organized.
20 */
21
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/skbuff.h>
26#include <net/pkt_sched.h>
27#include <net/inet_ecn.h>
28#include <net/netfilter/nf_conntrack.h>
29#include <linux/if_bridge.h>
30#include <linux/list.h>
31#include <nss_api_if.h>
32#include <linux/version.h>
33#include <br_private.h>
34
35/*
36 * NSS QDisc debug macros
37 */
38#if (NSSQDISC_DEBUG_LEVEL < 1)
39#define nssqdisc_assert(fmt, args...)
40#else
41#define nssqdisc_assert(c) if (!(c)) { BUG_ON(!(c)); }
42#endif
43
44#if (NSSQDISC_DEBUG_LEVEL < 2)
45#define nssqdisc_error(fmt, args...)
46#else
47#define nssqdisc_error(fmt, args...) printk(KERN_ERR "%d:ERROR:"fmt, __LINE__, ##args)
48#endif
49
50#if (NSSQDISC_DEBUG_LEVEL < 3)
51#define nssqdisc_warning(fmt, args...)
52#else
53#define nssqdisc_warning(fmt, args...) printk(KERN_WARNING "%d:WARN:"fmt, __LINE__, ##args)
54#endif
55
56#if (NSSQDISC_DEBUG_LEVEL < 4)
57#define nssqdisc_info(fmt, args...)
58#else
59#define nssqdisc_info(fmt, args...) printk(KERN_INFO "%d:INFO:"fmt, __LINE__, ##args)
60#endif
61
62#if (NSSQDISC_DEBUG_LEVEL < 5)
63#define nssqdisc_trace(fmt, args...)
64#else
65#define nssqdisc_trace(fmt, args...) printk(KERN_DEBUG "%d:TRACE:"fmt, __LINE__, ##args)
66#endif
67
68/*
69 * State values
70 */
71#define NSSQDISC_STATE_IDLE 0
72#define NSSQDISC_STATE_READY 1
73#define NSSQDISC_STATE_BUSY 2
74
75#define NSSQDISC_STATE_INIT_FAILED -1
76#define NSSQDISC_STATE_ASSIGN_SHAPER_SEND_FAIL -2
77#define NSSQDISC_STATE_SHAPER_ASSIGN_FAILED -3
78#define NSSQDISC_STATE_NODE_ALLOC_SEND_FAIL -4
79#define NSSQDISC_STATE_NODE_ALLOC_FAIL -5
80#define NSSQDISC_STATE_ROOT_SET_SEND_FAIL -6
81#define NSSQDISC_STATE_ROOT_SET_FAIL -7
82#define NSSQDISC_STATE_DEFAULT_SET_SEND_FAIL -8
83#define NSSQDISC_STATE_DEFAULT_SET_FAIL -9
84#define NSSQDISC_STATE_CHILD_ALLOC_SEND_FAIL -10
85#define NSSQDISC_STATE_NODE_ALLOC_FAIL_CHILD -11
86#define NSSQDISC_STATE_FAILED_RESPONSE -12
87
88#define NSSQDISC_BRIDGE_PORT_MAX 100
89
90void *nssqdisc_ctx; /* Shaping context for nssqdisc */
91
92struct nssqdisc_qdisc {
93 struct Qdisc *qdisc; /* Handy pointer back to containing qdisc */
94 void *nss_shaping_ctx; /* NSS context for general operations */
95 int32_t nss_interface_number; /* NSS Interface number we are shaping on */
96 nss_shaper_node_type_t type; /* Type of shaper node */
97 bool is_root; /* True if root qdisc on a net device */
98 bool is_bridge; /* True when qdisc is a bridge */
99 bool is_virtual; /* True when this is a non-bridge qdisc BUT
100 * the device is represented as a virtual in
101 * the NSS e.g. perhaps operating on a wifi interface.
102 */
103 bool destroy_virtual_interface; /* Set if the interface is first registered in NSS by
104 * us. This means it needs to be un-regisreted when the
105 * module goes down.
106 */
107 volatile atomic_t state; /* < 0: Signal that qdisc has 'failed'. 0
108 * indicates 'pending' setup. > 0 is READY.
109 * NOTE: volatile AND atomic - this is polled
110 * AND is used for syncronisation.
111 */
112 uint32_t shaper_id; /* Used when is_root. Child qdiscs use this
113 * information to know what shaper under
114 * which to create shaper nodes
115 */
116 uint32_t qos_tag; /* QoS tag of this node */
117 volatile int32_t pending_final_state; /* Used to let the callback cycle know what
118 * state to set the qdisc in on successful
119 * completion.
120 */
121 void *virtual_interface_context; /* Context provided by the NSS driver for
122 * new interfaces that are registered.
123 */
124 void *bounce_context; /* Context for bounce registration. Bounce
125 * enables packets to be sent to NSS for
126 * shaping purposes, and is returned to
127 * Linux for transmit.
128 */
129 void (*stats_update_callback)(void *, struct nss_shaper_response *);
130 /* Stats update callback function for qdisc specific
131 * stats update
132 */
133 struct timer_list stats_get_timer; /* Timer used to poll for stats */
134 atomic_t pending_stat_requests; /* Number of pending stats responses */
135 struct nss_shaper_response_shaper_node_basic_stats_get_success basic_stats_latest;
136 /* Latest stats obtained */
137};
138
139/*
140 * nssqdisc bridge update structure
141 */
142struct nssqdisc_bridge_update {
143 int port_list[NSSQDISC_BRIDGE_PORT_MAX];
144 int port_list_count;
145 int unassign_count;
146};
147
148/*
149 * nssqdisc bridge task types
150 */
151enum nssqdisc_bshaper_tasks {
152 NSSQDISC_ASSIGN_BSHAPER,
153 NSSQDISC_UNASSIGN_BSHAPER,
154};
155
156/*
157 * nssqdisc_get_br_port()
158 * Returns the bridge port structure of the bridge to which the device is attached to.
159 */
160static inline struct net_bridge_port *nssqdisc_get_br_port(const struct net_device *dev)
161{
162 struct net_bridge_port *br_port;
163
164 if (!dev)
165 return NULL;
166
167 rcu_read_lock();
168#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
169 br_port = br_port_get_rcu(dev);
170#else
171 br_port = rcu_dereference(dev->br_port);
172#endif
173 rcu_read_unlock();
174
175 return br_port;
176}
177
178/*
179 * nssqdisc_attach_bshaper_callback()
180 * Call back funtion for bridge shaper attach to an interface.
181 */
182static void nssqdisc_attach_bshaper_callback(void *app_data, struct nss_shaper_response *response)
183{
184 struct Qdisc *sch = (struct Qdisc *)app_data;
185 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
186
187 if (response->type < 0) {
188 nssqdisc_info("%s: B-shaper attach FAILED - response: %d\n", __func__, response->type);
189 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
190 return;
191 }
192
193 nssqdisc_info("%s: B-shaper attach SUCCESS - response %d\n", __func__, response->type);
194 atomic_set(&nq->state, NSSQDISC_STATE_READY);
195}
196
197/*
198 * nssqdisc_attach_bridge()
199 * Attaches a given bridge shaper to a given interface.
200 */
201static int nssqdisc_attach_bshaper(struct Qdisc *sch, uint32_t if_num)
202{
203 struct nss_shaper_configure shaper_assign;
204 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)qdisc_priv(sch);
205 int32_t state, rc;
206
207 nssqdisc_info("%s: Attaching B-shaper %u to interface %u\n", __func__,
208 nq->shaper_id, if_num);
209
210 state = atomic_read(&nq->state);
211 if (state != NSSQDISC_STATE_READY) {
212 nssqdisc_error("%s: qdisc %p (type %d) is not ready: State - %d\n",
213 __func__, sch, nq->type, state);
214 BUG();
215 }
216
217 /*
218 * Set shaper node state to IDLE
219 */
220 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
221
222 shaper_assign.interface_num = if_num;
223 shaper_assign.i_shaper = false;
224 shaper_assign.cb = nssqdisc_attach_bshaper_callback;
225 shaper_assign.app_data = sch;
226 shaper_assign.owner = THIS_MODULE;
227 shaper_assign.type = NSS_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER;
228 shaper_assign.mt.unassign_shaper.shaper_num = nq->shaper_id;
229
230 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_assign);
231 if (rc != NSS_TX_SUCCESS) {
232 nssqdisc_warning("%s: Failed to send bshaper (id: %u) attach for "
233 "interface(if_num: %u)\n", __func__, nq->shaper_id, if_num);
234 return -1;
235 }
236
237 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
238 yield();
239 }
240
241 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
242 nssqdisc_error("%s: Failed to attach B-shaper %u to interface %u\n",
243 __func__, nq->shaper_id, if_num);
244 return -1;
245 }
246
247 nssqdisc_info("%s: Attach of B-shaper %u to interface %u is complete\n",
248 __func__, nq->shaper_id, if_num);
249 return 0;
250}
251
252/*
253 * nssqdisc_detach_bshaper_callback()
254 * Call back function for bridge shaper detach
255 */
256static void nssqdisc_detach_bshaper_callback(void *app_data, struct nss_shaper_response *response)
257{
258 struct Qdisc *sch = (struct Qdisc *)app_data;
259 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
260
261 if (response->type < 0) {
262 nssqdisc_info("%s: B-shaper detach FAILED - response: %d\n",
263 __func__, response->type);
264 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
265 return;
266 }
267
268 nssqdisc_info("%s: B-shaper detach SUCCESS\n", __func__);
269 atomic_set(&nq->state, NSSQDISC_STATE_READY);
270}
271
272/*
273 * nssqdisc_detach_bridge()
274 * Detaches a given bridge shaper from a given interface
275 */
276static int nssqdisc_detach_bshaper(struct Qdisc *sch, uint32_t if_num)
277{
278 struct nss_shaper_configure shaper_assign;
279 struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)qdisc_priv(sch);
280 int32_t state, rc;
281
282 nssqdisc_info("%s: Detaching B-shaper %u from interface %u\n",
283 __func__, nq->shaper_id, if_num);
284
285 state = atomic_read(&nq->state);
286 if (state != NSSQDISC_STATE_READY) {
287 nssqdisc_error("%s: qdisc %p (type %d) is not ready: %d\n",
288 __func__, sch, nq->type, state);
289 BUG();
290 }
291
292 /*
293 * Set shaper node state to IDLE
294 */
295 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
296
297 shaper_assign.interface_num = if_num;
298 shaper_assign.i_shaper = false;
299 shaper_assign.cb = nssqdisc_detach_bshaper_callback;
300 shaper_assign.app_data = sch;
301 shaper_assign.owner = THIS_MODULE;
302 shaper_assign.type = NSS_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER;
303 shaper_assign.mt.unassign_shaper.shaper_num = nq->shaper_id;
304
305 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_assign);
306 if (rc != NSS_TX_SUCCESS) {
307 nssqdisc_warning("%s: Failed to send B-shaper (id: %u) detach "
308 "for interface(if_num: %u)\n", __func__, nq->shaper_id, if_num);
309 return -1;
310 }
311
312 nssqdisc_info("%s: Detach of B-shaper %u to interface %u is complete.",
313 __func__, nq->shaper_id, if_num);
314 atomic_set(&nq->state, NSSQDISC_STATE_READY);
315 return 0;
316}
317
318/*
319 * nssqdisc_refresh_bshaper_assignment()
320 * Performs assign on unassign of bshaper for interfaces on the bridge.
321 */
322static int nssqdisc_refresh_bshaper_assignment(struct Qdisc *br_qdisc,
323 enum nssqdisc_bshaper_tasks task)
324{
325 struct net_device *dev;
326 struct net_device *br_dev = qdisc_dev(br_qdisc);
327 struct nssqdisc_qdisc *nq;
328 struct nssqdisc_bridge_update br_update;
329 int i;
330
331 if ((br_qdisc->parent != TC_H_ROOT) && (br_qdisc->parent != TC_H_UNSPEC)) {
332 nssqdisc_error("%s: Qdisc not root qdisc for the bridge interface: "
333 "Handle - %x", __func__, br_qdisc->parent);
334 return -1;
335 }
336
337 nq = qdisc_priv(br_qdisc);
338
339 /*
340 * Initialize the bridge update srtucture.
341 */
342 br_update.port_list_count = 0;
343 br_update.unassign_count = 0;
344
345 read_lock(&dev_base_lock);
346 dev = first_net_device(&init_net);
347 while(dev) {
348 struct net_bridge_port *br_port = nssqdisc_get_br_port(dev);
349 int nss_if_num;
350
351 nssqdisc_info("%s: Scanning device %s", __func__, dev->name);
352 if (!br_port || !br_port->br) {
353 goto nextdev;
354 }
355
356 /*
357 * Dont care if this device is not on the
358 * bridge that is of concern.
359 */
360 if (br_port->br->dev != br_dev) {
361 goto nextdev;
362 }
363
364 /*
365 * If the interface is known to NSS then we will have to shape it.
366 * Irrespective of whether it has an interface qdisc or not.
367 */
368 nss_if_num = nss_get_interface_number(nq->nss_shaping_ctx, dev);
369 if (nss_if_num < 0) {
370 goto nextdev;
371 }
372
373 nssqdisc_info("%s: Will be linking %s to bridge %s\n", __func__,
374 dev->name, br_dev->name);
375 br_update.port_list[br_update.port_list_count++] = nss_if_num;
376nextdev:
377 dev = next_net_device(dev);
378 }
379 read_unlock(&dev_base_lock);
380
381 nssqdisc_info("%s: List count %d\n", __func__, br_update.port_list_count);
382
383 if (task == NSSQDISC_ASSIGN_BSHAPER) {
384 /*
385 * Loop through the ports and assign them with B-shapers.
386 */
387 for (i = 0; i < br_update.port_list_count; i++) {
388 if (nssqdisc_attach_bshaper(br_qdisc, br_update.port_list[i]) >= 0) {
389 nssqdisc_info("%s: Interface %u added to bridge %s\n",
390 __func__, br_update.port_list[i], br_dev->name);
391 continue;
392 }
393 nssqdisc_error("%s: Unable to attach bshaper with shaper-id: %u, "
394 "to interface if_num: %d\n", __func__, nq->shaper_id,
395 br_update.port_list[i]);
396 br_update.unassign_count = i;
397 break;
398 }
399 nssqdisc_info("%s: Unassign count %d\n", __func__, br_update.unassign_count);
400 if (br_update.unassign_count == 0) {
401 return 0;
402 }
403
404 /*
405 * In case of a failure, unassign the B-shapers that were assigned above
406 */
407 for (i = 0; i < br_update.unassign_count; i++) {
408 if (nssqdisc_detach_bshaper(br_qdisc, br_update.port_list[i]) >= 0) {
409 continue;
410 }
411 nssqdisc_error("%s: Unable to detach bshaper with shaper-id: %u, "
412 "from interface if_num: %d\n", __func__, nq->shaper_id,
413 br_update.port_list[i]);
414 BUG();
415 }
416
417 nssqdisc_info("%s: Failed to link interfaces to bridge\n", __func__);
418 return -1;
419 } else if (task == NSSQDISC_UNASSIGN_BSHAPER) {
420 /*
421 * Loop through the ports and assign them with B-shapers.
422 */
423 for (i = 0; i < br_update.port_list_count; i++) {
424 if (nssqdisc_detach_bshaper(br_qdisc, br_update.port_list[i]) >= 0) {
425 nssqdisc_info("%s: Interface %u removed from bridge %s\n",
426 __func__, br_update.port_list[i], br_dev->name);
427 continue;
428 }
429 nssqdisc_error("%s: Unable to detach bshaper with shaper-id: %u, "
430 "from interface if_num: %d\n", __func__, nq->shaper_id,
431 br_update.port_list[i]);
432 BUG();
433 }
434 }
435
436 return 0;
437}
438
439/*
440 * nssqdisc_root_cleanup_final()
441 * Performs final cleanup of a root shaper node after all other
442 * shaper node cleanup is complete.
443 */
444static void nssqdisc_root_cleanup_final(struct Qdisc *sch)
445{
446 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
447
448 nssqdisc_info("%s: Root qdisc %p (type %d) final cleanup\n", __func__,
449 nq->qdisc, nq->type);
450
451 /*
452 * If we are a bridge then we have to unregister for bridge bouncing
453 * AND destroy the virtual interface that provides bridge shaping.
454 */
455 if (nq->is_bridge) {
456 /*
457 * Unregister for bouncing to the NSS for bridge shaping
458 */
459 nssqdisc_info("%s: Unregister for bridge bouncing: %p\n", __func__,
460 nq->bounce_context);
461 nss_unregister_shaper_bounce_bridge(nq->nss_interface_number);
462
463 /*
464 * Unregister the virtual interface we use to act as shaper
465 * for bridge shaping.
466 */
467 nssqdisc_info("%s: Release root bridge virtual interface: %p\n",
468 __func__, nq->virtual_interface_context);
469 nss_destroy_virt_if(nq->virtual_interface_context);
470 }
471
472 /*
473 * If we are a virual interface then we have to unregister for interface
474 * bouncing.
475 */
476 if (nq->is_virtual) {
477 /*
478 * Unregister for interface bouncing of packets
479 */
480 nssqdisc_info("%s: Unregister for interface bouncing: %p\n",
481 __func__, nq->bounce_context);
482 nss_unregister_shaper_bounce_interface(nq->nss_interface_number);
483 }
484
485 /*
486 * Finally unregister for shaping
487 */
488 nssqdisc_info("%s: Unregister for shaping\n", __func__);
489 nss_unregister_shaping(nq->nss_shaping_ctx);
490
491 /*
492 * Now set our final state
493 */
494 atomic_set(&nq->state, nq->pending_final_state);
495}
496
497/*
498 * nssqdisc_root_cleanup_shaper_unassign_callback()
499 * Invoked on the response to a shaper unassign config command issued
500 */
501static void nssqdisc_root_cleanup_shaper_unassign_callback(void *app_data,
502 struct nss_shaper_response *response)
503{
504 struct Qdisc *sch = (struct Qdisc *)app_data;
505 struct nssqdisc_qdisc *nq __attribute__ ((unused)) = qdisc_priv(sch);
506 nssqdisc_info("%s: Root qdisc %p (type %d) shaper unsassign "
507 "response: %d\n", __func__, sch, nq->type, response->type);
508 nssqdisc_root_cleanup_final(sch);
509}
510
511/*
512 * nssqdisc_root_cleanup_shaper_unassign()
513 * Issue command to unassign the shaper
514 */
515static void nssqdisc_root_cleanup_shaper_unassign(struct Qdisc *sch)
516{
517 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
518 struct nss_shaper_configure shaper_unassign;
519 nss_tx_status_t rc;
520
521 nssqdisc_info("%s: Root qdisc %p (type %d): shaper unassign: %d\n",
522 __func__, sch, nq->type, nq->shaper_id);
523
524 shaper_unassign.interface_num = nq->nss_interface_number;
525 shaper_unassign.i_shaper = (nq->is_bridge)? false : true;
526 shaper_unassign.cb = nssqdisc_root_cleanup_shaper_unassign_callback;
527 shaper_unassign.app_data = sch;
528 shaper_unassign.owner = THIS_MODULE;
529 shaper_unassign.type = NSS_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER;
530 shaper_unassign.mt.unassign_shaper.shaper_num = nq->shaper_id;
531
532 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_unassign);
533 if (rc == NSS_TX_SUCCESS) {
534 return;
535 }
536
537 nssqdisc_error("%s: Root qdisc %p (type %d): unassign command send failed: "
538 "%d, shaper id: %d\n", __func__, sch, nq->type, rc, nq->shaper_id);
539
540 nssqdisc_root_cleanup_final(sch);
541}
542
543/*
544 * nssqdisc_root_cleanup_free_node_callback()
545 * Invoked on the response to freeing a shaper node
546 */
547static void nssqdisc_root_cleanup_free_node_callback(void *app_data,
548 struct nss_shaper_response *response)
549{
550 struct Qdisc *sch = (struct Qdisc *)app_data;
551 struct nssqdisc_qdisc *nq __attribute__ ((unused)) = qdisc_priv(sch);
552 nssqdisc_info("%s: Root qdisc %p (type %d) free response "
553 "type: %d\n", __func__, sch, nq->type, response->type);
554
555 nssqdisc_root_cleanup_shaper_unassign(sch);
556}
557
558/*
559 * nssqdisc_root_cleanup_free_node()
560 * Free the shaper node, issue command to do so.
561 */
562static void nssqdisc_root_cleanup_free_node(struct Qdisc *sch)
563{
564 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
565 struct nss_shaper_configure shaper_node_free;
566 nss_tx_status_t rc;
567
568 nssqdisc_info("%s: Root qdisc %p (type %d): freeing shaper node\n",
569 __func__, sch, nq->type);
570
571 shaper_node_free.interface_num = nq->nss_interface_number;
572 shaper_node_free.i_shaper = (nq->is_bridge)? false : true;
573 shaper_node_free.cb = nssqdisc_root_cleanup_free_node_callback;
574 shaper_node_free.app_data = sch;
575 shaper_node_free.owner = THIS_MODULE;
576 shaper_node_free.type = NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE;
577 shaper_node_free.mt.free_shaper_node.qos_tag = nq->qos_tag;
578
579 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_free);
580 if (rc == NSS_TX_SUCCESS) {
581 return;
582 }
583
584 nssqdisc_error("%s: Qdisc %p (type %d): free command send "
585 "failed: %d, qos tag: %x\n", __func__, sch, nq->type,
586 rc, nq->qos_tag);
587
588 /*
589 * Move onto unassigning the shaper instead
590 */
591 nssqdisc_root_cleanup_shaper_unassign(sch);
592}
593
594/*
595 * nssqdisc_root_init_root_assign_callback()
596 * Invoked on the response to assigning shaper node as root
597 */
598static void nssqdisc_root_init_root_assign_callback(void *app_data,
599 struct nss_shaper_response *response)
600{
601 struct Qdisc *sch = (struct Qdisc *)app_data;
602 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
603
604 nssqdisc_info("%s: Root assign response for qdisc %p (type %d), "
605 "response type: %d\n", __func__, sch, nq->type, response->type);
606
607 if (response->type < 0) {
608 nq->pending_final_state = NSSQDISC_STATE_ROOT_SET_FAIL;
609 nssqdisc_root_cleanup_free_node(sch);
610 return;
611 }
612
613 /*
614 * If we are not a root upon a bridge then we are ready
615 */
616 if (!nq->is_bridge) {
617 nssqdisc_info("%s: Qdisc %p (type %d): set as root and "
618 "default, and is READY\n", __func__, sch, nq->type);
619 atomic_set(&nq->state, NSSQDISC_STATE_READY);
620 return;
621 }
622
623 /*
624 * We need to scan the bridge for ports that must have shapers
625 * assigned to them
626 */
627 nssqdisc_info("%s: Qdisc %p (type %d): set as root is done. "
628 "Bridge update..\n", __func__, sch, nq->type);
629
630 atomic_set(&nq->state, NSSQDISC_STATE_READY);
631}
632
633/*
634 * nssqdisc_root_init_alloc_node_callback()
635 * Invoked on the response to creating a shaper node as root
636 */
637static void nssqdisc_root_init_alloc_node_callback(void *app_data,
638 struct nss_shaper_response *response)
639{
640 struct Qdisc *sch = (struct Qdisc *)app_data;
641 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
642 struct nss_shaper_configure root_assign;
643 nss_tx_status_t rc;
644
645 nssqdisc_info("%s: Qdisc %p (type %d) root alloc node "
646 "response type: %d\n", __func__, sch, nq->type,
647 response->type);
648
649 if (response->type < 0) {
650 nq->pending_final_state = NSSQDISC_STATE_NODE_ALLOC_FAIL;
651
652 /*
653 * No shaper node created, cleanup from unsassigning the shaper
654 */
655 nssqdisc_root_cleanup_shaper_unassign(sch);
656 return;
657 }
658
659 /*
660 * Shaper node has been allocated. Next step is to assign
661 * the shaper node as the root node of our shaper.
662 */
663 root_assign.interface_num = nq->nss_interface_number;
664 root_assign.i_shaper = (nq->is_bridge)? false : true;
665 root_assign.cb = nssqdisc_root_init_root_assign_callback;
666 root_assign.app_data = sch;
667 root_assign.owner = THIS_MODULE;
668 root_assign.type = NSS_SHAPER_CONFIG_TYPE_SET_ROOT;
669 root_assign.mt.set_root_node.qos_tag = nq->qos_tag;
670
671 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &root_assign);
672 if (rc == NSS_TX_SUCCESS) {
673 return;
674 }
675
676 nssqdisc_error("%s: Root assign send command failed: %d\n",
677 __func__, rc);
678
679 nq->pending_final_state = NSSQDISC_STATE_ROOT_SET_SEND_FAIL;
680 nssqdisc_root_cleanup_free_node(sch);
681}
682
683/*
684 * nssqdisc_root_init_shaper_assign_callback()
685 * Invoked on the response to a shaper assign config command issued
686 */
687static void nssqdisc_root_init_shaper_assign_callback(void *app_data,
688 struct nss_shaper_response *response)
689{
690 struct Qdisc *sch = (struct Qdisc *)app_data;
691 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
692 struct nss_shaper_configure shaper_node_create;
693 nss_tx_status_t rc;
694
695 nssqdisc_info("%s: Qdisc %p (type %d): shaper assign response type: %d\n",
696 __func__, sch, nq->type, response->type);
697
698 if (response->type < 0) {
699 /*
700 * Unable to assign a shaper, perform cleanup from final stage
701 */
702 nq->pending_final_state = NSSQDISC_STATE_SHAPER_ASSIGN_FAILED;
703 nssqdisc_root_cleanup_final(sch);
704 return;
705 }
706
707 /*
708 * Shaper has been allocated and assigned
709 */
710 nq->shaper_id = response->rt.shaper_assign_success.shaper_num;
711 nssqdisc_info("%s: Qdisc %p (type %d), shaper assigned: %u\n",
712 __func__, sch, nq->type, nq->shaper_id);
713
714 /*
715 * Next step is to allocate our actual shaper node
716 * qos_tag will be the handle we have been given
717 */
718 shaper_node_create.interface_num = nq->nss_interface_number;
719 shaper_node_create.i_shaper = (nq->is_bridge)? false : true;
720 shaper_node_create.cb = nssqdisc_root_init_alloc_node_callback;
721 shaper_node_create.app_data = sch;
722 shaper_node_create.owner = THIS_MODULE;
723 shaper_node_create.type = NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE;
724 shaper_node_create.mt.alloc_shaper_node.node_type = nq->type;
725 shaper_node_create.mt.alloc_shaper_node.qos_tag = nq->qos_tag;
726
727 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_create);
728 if (rc == NSS_TX_SUCCESS) {
729 return;
730 }
731
732 /*
733 * Unable to send alloc node command, cleanup from unassigning the shaper
734 */
735 nssqdisc_error("%s: Qdisc %p (type %d) create command failed: %d\n",
736 __func__, sch, nq->type, rc);
737
738 nq->pending_final_state = NSSQDISC_STATE_NODE_ALLOC_SEND_FAIL;
739 nssqdisc_root_cleanup_shaper_unassign(sch);
740}
741
742
743/*
744 * nssqdisc_child_cleanup_final()
745 * Perform final cleanup of a shaper node after all shaper node
746 * cleanup is complete.
747 */
748static void nssqdisc_child_cleanup_final(struct Qdisc *sch)
749{
750 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
751
752 nssqdisc_info("%s: Final cleanup type %d: %p\n", __func__,
753 nq->type, sch);
754
755 /*
756 * Finally unregister for shaping
757 */
758 nssqdisc_info("%s: Unregister for shaping\n", __func__);
759 nss_unregister_shaping(nq->nss_shaping_ctx);
760
761 /*
762 * Now set our final state
763 */
764 atomic_set(&nq->state, nq->pending_final_state);
765}
766
767
768/*
769 * nssqdisc_child_cleanup_free_node_callback()
770 * Invoked on the response to freeing a child shaper node
771 */
772static void nssqdisc_child_cleanup_free_node_callback(void *app_data,
773 struct nss_shaper_response *response)
774{
775 struct Qdisc *sch = (struct Qdisc *)app_data;
776 struct nssqdisc_qdisc *nq __attribute__((unused)) = qdisc_priv(sch);
777
778 nssqdisc_info("%s: Qdisc %p (type %d): child free response type: %d\n",
779 __func__, sch, nq->type, response->type);
780
781 if (response->type < 0) {
782 nssqdisc_error("%s: Qdisc %p (type %d): free shaper node failed\n",
783 __func__, sch, nq->type);
784 } else {
785 nssqdisc_info("%s: Qdisc %p (type %d): child shaper node "
786 "free complete\n", __func__, sch, nq->type);
787 }
788
789 /*
790 * Perform final cleanup
791 */
792 nssqdisc_child_cleanup_final(sch);
793}
794
795/*
796 * nssqdisc_child_cleanup_free_node()
797 * Free the child shaper node, issue command to do so.
798 */
799static void nssqdisc_child_cleanup_free_node(struct Qdisc *sch)
800{
801 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
802 struct nss_shaper_configure shaper_node_free;
803 nss_tx_status_t rc;
804
805 nssqdisc_info("%s: Qdisc %p (type %d): free shaper node command\n",
806 __func__, sch, nq->type);
807
808 shaper_node_free.interface_num = nq->nss_interface_number;
809 shaper_node_free.i_shaper = (nq->is_bridge)? false : true;
810 shaper_node_free.cb = nssqdisc_child_cleanup_free_node_callback;
811 shaper_node_free.app_data = sch;
812 shaper_node_free.owner = THIS_MODULE;
813 shaper_node_free.type = NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE;
814 shaper_node_free.mt.free_shaper_node.qos_tag = nq->qos_tag;
815
816 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_free);
817 if (rc == NSS_TX_SUCCESS) {
818 return;
819 }
820
821 nssqdisc_error("%s: Qdisc %p (type %d): child free node command send "
822 "failed: %d, qos tag: %x\n", __func__, sch, nq->type,
823 rc, nq->qos_tag);
824
825 /*
826 * Perform final cleanup
827 */
828 nssqdisc_child_cleanup_final(sch);
829}
830
831/*
832 * nssqdisc_child_init_alloc_node_callback()
833 * Invoked on the response to creating a child shaper node
834 */
835static void nssqdisc_child_init_alloc_node_callback(void *app_data,
836 struct nss_shaper_response *response)
837{
838 struct Qdisc *sch = (struct Qdisc *)app_data;
839 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
840
841 nssqdisc_info("%s: Qdisc %p (type %d): child alloc node, response "
842 "type: %d\n", __func__, sch, nq->type, response->type);
843
844 if (response->type < 0) {
845 /*
846 * Cleanup from final stage
847 */
848 nq->pending_final_state = NSSQDISC_STATE_NODE_ALLOC_FAIL_CHILD;
849 nssqdisc_child_cleanup_final(sch);
850 return;
851 }
852
853 /*
854 * Shaper node has been allocated
855 */
856 nssqdisc_info("%s: Qdisc %p (type %d): shaper node successfully "
857 "created as a child node\n",__func__, sch, nq->type);
858
859 atomic_set(&nq->state, NSSQDISC_STATE_READY);
860}
861
862/*
863 * nssqdisc_bounce_callback()
864 * Enqueues packets bounced back from NSS firmware.
865 */
866static void nssqdisc_bounce_callback(void *app_data, struct sk_buff *skb)
867{
868 struct Qdisc *sch = (struct Qdisc *)app_data;
869
870 /*
871 * All we have to do is enqueue for transmit and schedule a dequeue
872 */
873 __qdisc_enqueue_tail(skb, sch, &sch->q);
874 __netif_schedule(sch);
875}
876
877/*
878 * nssqdisc_peek()
879 * Called to peek at the head of an nss qdisc
880 */
881static struct sk_buff *nssqdisc_peek(struct Qdisc *sch)
882{
883 return skb_peek(&sch->q);
884}
885
886/*
887 * nssqdisc_drop()
888 * Called to drop the packet at the head of queue
889 */
890static unsigned int nssqdisc_drop(struct Qdisc *sch)
891{
892 return __qdisc_queue_drop_head(sch, &sch->q);
893}
894
895/*
896 * nssqdisc_reset()
897 * Called when a qdisc is reset
898 */
899static void nssqdisc_reset(struct Qdisc *sch)
900{
901 struct nssqdisc_qdisc *nq __attribute__ ((unused)) = qdisc_priv(sch);
902
903 nssqdisc_info("%s: Qdisc %p (type %d) resetting\n",
904 __func__, sch, nq->type);
905
906 /*
907 * Delete all packets pending in the output queue and reset stats
908 */
909 qdisc_reset_queue(sch);
910}
911
912/*
913 * nssqdisc_enqueue()
914 * Generic enqueue call for enqueuing packets into NSS for shaping
915 */
916static int nssqdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
917{
918 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
919 nss_tx_status_t status;
920
921 /*
922 * If we are not the root qdisc then we should not be getting packets!!
923 */
924 if (!nq->is_root) {
925 nssqdisc_warning("%s: Qdisc %p (type %d): unexpected packet "
926 "for child qdisc - skb: %p\n", __func__, sch, nq->type, skb);
927 __qdisc_enqueue_tail(skb, sch, &sch->q);
928 __netif_schedule(sch);
929 return NET_XMIT_SUCCESS;
930 }
931
932 /*
933 * Packet enueued in linux for transmit.
934 *
935 * What we do here depends upon whether we are a bridge or not. If not a
936 * bridge then it depends on if we are a physical or virtual interface
937 * The decision we are trying to reach is whether to bounce a packet to
938 * the NSS to be shaped or not.
939 *
940 * is_bridge is_virtual Meaning
941 * ---------------------------------------------------------------------------
942 * false false Physical interface in NSS
943 *
944 * Action: Simply allow the packet to be dequeued. The packet will be
945 * shaped by the interface shaper in the NSS by the usual transmit path.
946 *
947 *
948 * false true Physical interface in Linux.
949 * NSS still responsible for shaping
950 *
951 * Action: Bounce the packet to the NSS virtual interface that represents
952 * this Linux physical interface for INTERFACE shaping. When the packet is
953 * returned from being shaped we allow it to be dequeued for transmit.
954 *
955 * true n/a Logical Linux interface.
956 * Root qdisc created a virtual interface
957 * to represent it in the NSS for shaping
958 * purposes.
959 *
960 * Action: Bounce the packet to the NSS virtual interface (for BRIDGE shaping)
961 * the bridge root qdisc created for it. When the packet is returned from being
962 * shaped we allow it to be dequeued for transmit.
963 */
964
965 if (!nq->is_bridge && !nq->is_virtual) {
966 /*
967 * TX to an NSS physical - the shaping will occur as part of normal
968 * transmit path.
969 */
970 __qdisc_enqueue_tail(skb, sch, &sch->q);
971 __netif_schedule(sch);
972 return NET_XMIT_SUCCESS;
973 }
974
975 if (!nq->is_bridge && nq->is_virtual) {
976 /*
977 * TX to a physical Linux (NSS virtual). Bounce packet to NSS for
978 * interface shaping.
979 */
980 nss_tx_status_t status = nss_shaper_bounce_interface_packet(nq->bounce_context,
981 nq->nss_interface_number, skb);
982 if (status != NSS_TX_SUCCESS) {
983 /*
984 * Just transmit anyway, don't want to loose the packet
985 */
986 nssqdisc_warning("%s: Qdisc %p (type %d): failed to bounce for "
987 "interface: %d, skb: %p\n", __func__, sch, nq->type,
988 nq->nss_interface_number, skb);
989
990 __qdisc_enqueue_tail(skb, sch, &sch->q);
991 __netif_schedule(sch);
992 }
993 return NET_XMIT_SUCCESS;
994 }
995
996 /*
997 * TX to a bridge, this is to be shaped by the b shaper on the virtual interface created
998 * to represent the bridge interface.
999 */
1000 status = nss_shaper_bounce_bridge_packet(nq->bounce_context, nq->nss_interface_number, skb);
1001 if (status != NSS_TX_SUCCESS) {
1002 /*
1003 * Just transmit anyway, don't want to loose the packet
1004 */
1005 nssqdisc_warning("%s: Qdisc %p (type %d): failed to bounce for bridge %d, skb: %p\n",
1006 __func__, sch, nq->type, nq->nss_interface_number, skb);
1007 __qdisc_enqueue_tail(skb, sch, &sch->q);
1008 __netif_schedule(sch);
1009 }
1010 return NET_XMIT_SUCCESS;
1011}
1012
1013/*
1014 * nssqdisc_dequeue()
1015 * Generic dequeue call for dequeuing bounced packets.
1016 */
1017static struct sk_buff *nssqdisc_dequeue(struct Qdisc *sch)
1018{
1019 struct sk_buff *skb;
1020
1021 /*
1022 * We use __skb_dequeue() to ensure that
1023 * stats don't get updated twice.
1024 */
1025 skb = __skb_dequeue(&sch->q);
1026
1027 return skb;
1028}
1029
1030/*
1031 * nssqdisc_set_default_callback()
1032 * The callback function for a shaper node set default
1033 */
1034static void nssqdisc_set_default_callback(void *app_data,
1035 struct nss_shaper_response *response)
1036{
1037 struct Qdisc *sch = (struct Qdisc *)app_data;
1038 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1039
1040 nssqdisc_info("%s: Qdisc %p (type %d): shaper node set default, response type: %d\n",
1041 __func__, sch, nq->type, response->type);
1042
1043 if (response->type < 0) {
1044 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
1045 return;
1046 }
1047
1048 nssqdisc_info("%s: Qdisc %p (type %d): attach complete\n", __func__, sch, nq->type);
1049 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1050}
1051
1052/*
1053 * nssqdisc_node_set_default()
1054 * Configuration function that sets shaper node as default for packet enqueue
1055 */
1056static int nssqdisc_set_default(struct Qdisc *sch)
1057{
1058 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1059 struct nss_shaper_configure shaper_node_default;
1060 int32_t state, rc;
1061
1062 nssqdisc_info("%s: Setting qdisc %p (type %d) as default\n", __func__,
1063 sch, nq->type);
1064
1065 state = atomic_read(&nq->state);
1066 if (state != NSSQDISC_STATE_READY) {
1067 nssqdisc_error("%s: Qdisc %p (type %d): not ready: %d\n", __func__,
1068 sch, nq->type, state);
1069 BUG();
1070 }
1071
1072 /*
1073 * Set shaper node state to IDLE
1074 */
1075 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1076
1077 shaper_node_default.interface_num = nq->nss_interface_number;
1078 shaper_node_default.i_shaper = (nq->is_bridge)? false : true;
1079 shaper_node_default.cb = nssqdisc_set_default_callback;
1080 shaper_node_default.app_data = sch;
1081 shaper_node_default.owner = THIS_MODULE;
1082 shaper_node_default.type = NSS_SHAPER_CONFIG_TYPE_SET_DEFAULT;
1083 shaper_node_default.mt.set_default_node.qos_tag = nq->qos_tag;
1084
1085 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_default);
1086 if (rc != NSS_TX_SUCCESS) {
1087 nssqdisc_warning("%s: Failed to send set default message for "
1088 "qdisc type %d\n", __func__, nq->type);
1089 return -1;
1090 }
1091
1092 /*
1093 * Wait until cleanup operation is complete at which point the state
1094 * shall become idle. NOTE: This relies on the NSS driver to be able
1095 * to operate asynchronously which means kernel preemption is required.
1096 */
1097 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1098 yield();
1099 }
1100
1101 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
1102 nssqdisc_error("%s: Qdisc %p (type %d): failed to default "
1103 "State: %d\n", __func__, sch, nq->type, state);
1104 return -1;
1105 }
1106
1107 nssqdisc_info("%s: Qdisc %p (type %d): shaper node default complete\n",
1108 __func__, sch, nq->type);
1109 return 0;
1110}
1111
1112/*
1113 * nssqdisc_node_attach_callback()
1114 * The callback function for a shaper node attach message
1115 */
1116static void nssqdisc_node_attach_callback(void *app_data,
1117 struct nss_shaper_response *response)
1118{
1119 struct Qdisc *sch = (struct Qdisc *)app_data;
1120 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1121
1122 nssqdisc_info("%s: Qdisc %p (type %d) shaper node attach response "
1123 "type: %d\n", __func__, sch, nq->type, response->type);
1124 if (response->type < 0) {
1125 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
1126 return;
1127 }
1128
1129 nssqdisc_info("%s: qdisc type %d: %p, attach complete\n", __func__,
1130 nq->type, sch);
1131
1132 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1133}
1134
1135/*
1136 * nssqdisc_node_attach()
1137 * Configuration function that helps attach a child shaper node to a parent.
1138 */
1139static int nssqdisc_node_attach(struct Qdisc *sch,
1140 struct nss_shaper_configure *shaper_node_attach, int32_t attach_type)
1141{
1142 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1143 int32_t state, rc;
1144
1145 nssqdisc_info("%s: Qdisc %p (type %d) attaching\n",
1146 __func__, sch, nq->type);
1147
1148 state = atomic_read(&nq->state);
1149 if (state != NSSQDISC_STATE_READY) {
1150 nssqdisc_error("%s: Qdisc %p (type %d): not ready, state: %d\n",
1151 __func__, sch, nq->type, state);
1152 BUG();
1153 }
1154
1155 /*
1156 * Set shaper node state to IDLE
1157 */
1158 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1159
1160 shaper_node_attach->interface_num = nq->nss_interface_number;
1161 shaper_node_attach->i_shaper = (nq->is_bridge)? false : true;
1162 shaper_node_attach->cb = nssqdisc_node_attach_callback;
1163 shaper_node_attach->app_data = sch;
1164 shaper_node_attach->owner = THIS_MODULE;
1165 shaper_node_attach->type = attach_type;
1166
1167 rc = nss_shaper_config_send(nq->nss_shaping_ctx, shaper_node_attach);
1168 if (rc != NSS_TX_SUCCESS) {
1169 nssqdisc_warning("%s: Failed to send configure message for "
1170 "qdisc type %d\n", __func__, nq->type);
1171 return -1;
1172 }
1173
1174 /*
1175 * Wait until cleanup operation is complete at which point the state
1176 * shall become idle. NOTE: This relies on the NSS driver to be able
1177 * to operate asynchronously which means kernel preemption is required.
1178 */
1179 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1180 yield();
1181 }
1182
1183 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
1184 nssqdisc_error("%s: Qdisc %p (type %d) failed to attach child "
1185 "node, State: %d\n", __func__, sch, nq->type, state);
1186 return -1;
1187 }
1188
1189 nssqdisc_info("%s: Qdisc %p (type %d): shaper node attach complete\n",
1190 __func__, sch, nq->type);
1191 return 0;
1192}
1193
1194/*
1195 * nssqdisc_node_detach_callback()
1196 * The callback function for a shaper node detach message
1197 */
1198static void nssqdisc_node_detach_callback(void *app_data,
1199 struct nss_shaper_response *response)
1200{
1201 struct Qdisc *sch = (struct Qdisc *)app_data;
1202 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1203
1204 nssqdisc_info("%s: Qdisc %p (type %d): shaper node detach response "
1205 "type: %d\n", __func__, sch, nq->type, response->type);
1206
1207 if (response->type < 0) {
1208 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
1209 return;
1210 }
1211
1212 nssqdisc_info("%s: Qdisc %p (type %d): detach complete\n",
1213 __func__, sch, nq->type);
1214
1215 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1216}
1217
1218/*
1219 * nssqdisc_detach()
1220 * Configuration function that helps detach a child shaper node to a parent.
1221 */
1222static int nssqdisc_node_detach(struct Qdisc *sch,
1223 struct nss_shaper_configure *shaper_node_detach, int32_t detach_type)
1224{
1225 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1226 int32_t state, rc;
1227
1228 nssqdisc_info("%s: Qdisc %p (type %d) detaching\n",
1229 __func__, sch, nq->type);
1230
1231 state = atomic_read(&nq->state);
1232 if (state != NSSQDISC_STATE_READY) {
1233 nssqdisc_error("%s: Qdisc %p (type %d): not ready, state: %d\n",
1234 __func__, sch, nq->type, state);
1235 BUG();
1236 }
1237
1238 /*
1239 * Set shaper node state to IDLE
1240 */
1241 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1242
1243 shaper_node_detach->interface_num = nq->nss_interface_number;
1244 shaper_node_detach->i_shaper = (nq->is_bridge)? false : true;
1245 shaper_node_detach->cb = nssqdisc_node_detach_callback;
1246 shaper_node_detach->app_data = sch;
1247 shaper_node_detach->owner = THIS_MODULE;
1248 shaper_node_detach->type = detach_type;
1249
1250 rc = nss_shaper_config_send(nq->nss_shaping_ctx, shaper_node_detach);
1251 if (rc != NSS_TX_SUCCESS) {
1252 nssqdisc_warning("%s: Qdisc %p (type %d): Failed to send configure "
1253 "message.", __func__, sch, nq->type);
1254 return -1;
1255 }
1256
1257 /*
1258 * Wait until cleanup operation is complete at which point the state shall become idle.
1259 * NOTE: This relies on the NSS driver to be able to operate asynchronously which means
1260 * kernel preemption is required.
1261 */
1262 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1263 yield();
1264 }
1265
1266 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
1267 nssqdisc_error("%s: Qdisc %p (type %d): failed to attach child node, "
1268 "State: %d\n", __func__, sch, nq->type, state);
1269 return -1;
1270 }
1271
1272 nssqdisc_info("%s: Qdisc %p (type %d): shaper node detach complete\n",
1273 __func__, sch, nq->type);
1274 return 0;
1275}
1276
1277/*
1278 * nssqdisc_configure_callback()
1279 * The call back function for a shaper node configure message
1280 */
1281static void nssqdisc_configure_callback(void *app_data,
1282 struct nss_shaper_response *response)
1283{
1284 struct Qdisc *sch = (struct Qdisc *)app_data;
1285 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1286
1287 nssqdisc_info("%s: Qdisc %p (type %d): shaper node configure "
1288 "response type: %d\n", __func__, sch, nq->type, response->type);
1289
1290 if (response->type < 0) {
1291 atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
1292 return;
1293 }
1294
1295 nssqdisc_info("%s: Qdisc %p (type %d): configuration complete\n",
1296 __func__, sch, nq->type);
1297 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1298}
1299
1300/*
1301 * nssqdisc_configure()
1302 * Configuration function that aids in tuning of queuing parameters.
1303 */
1304static int nssqdisc_configure(struct Qdisc *sch,
1305 struct nss_shaper_configure *shaper_node_configure, int32_t config_type)
1306{
1307 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1308 int32_t state, rc;
1309
1310 nssqdisc_info("%s: Qdisc %p (type %d) configuring\n", __func__, sch, nq->type);
1311
1312 state = atomic_read(&nq->state);
1313 if (state != NSSQDISC_STATE_READY) {
1314 nssqdisc_error("%s: Qdisc %p (type %d): not ready for configure, "
1315 "state : %d\n", __func__, sch, nq->type, state);
1316 BUG();
1317 }
1318
1319 /*
1320 * Set shaper node state to IDLE
1321 */
1322 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1323
1324 shaper_node_configure->interface_num = nq->nss_interface_number;
1325 shaper_node_configure->i_shaper = (nq->is_bridge)? false : true;
1326 shaper_node_configure->cb = nssqdisc_configure_callback;
1327 shaper_node_configure->app_data = sch;
1328 shaper_node_configure->owner = THIS_MODULE;
1329 shaper_node_configure->type = config_type;
1330
1331 nssqdisc_info("Sending config type %d\n", config_type);
1332 rc = nss_shaper_config_send(nq->nss_shaping_ctx, shaper_node_configure);
1333 if (rc != NSS_TX_SUCCESS) {
1334 nssqdisc_warning("%s: Qdisc %p (type %d): Failed to send configure "
1335 "message\n", __func__, sch, nq->type);
1336 return -1;
1337 }
1338
1339 /*
1340 * Wait until cleanup operation is complete at which point the state
1341 * shall become idle. NOTE: This relies on the NSS driver to be able
1342 * to operate asynchronously which means kernel preemption is required.
1343 */
1344 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1345 yield();
1346 }
1347
1348 if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
1349 nssqdisc_error("%s: Qdisc %p (type %d): failed to configure shaper "
1350 "node: State: %d\n", __func__, sch, nq->type, state);
1351 atomic_set(&nq->state, NSSQDISC_STATE_READY);
1352 return -1;
1353 }
1354
1355 nssqdisc_info("%s: Qdisc %p (type %d): shaper node configure complete\n",
1356 __func__, sch, nq->type);
1357 return 0;
1358}
1359
1360/*
1361 * nssqdisc_destroy()
1362 * Destroys a shaper in NSS, and the sequence is based on the position of
1363 * this qdisc (child or root) and the interface to which it is attached to.
1364 */
1365static void nssqdisc_destroy(struct Qdisc *sch)
1366{
1367 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1368 int32_t state;
1369
1370 nssqdisc_info("%s: Qdisc %p (type %d) destroy\n",
1371 __func__, sch, nq->type);
1372
1373
1374 state = atomic_read(&nq->state);
1375 if (state != NSSQDISC_STATE_READY) {
1376 nssqdisc_error("%s: Qdisc %p (type %d): destroy not ready, "
1377 "state: %d\n", __func__, sch, nq->type, state);
1378 BUG();
1379 }
1380
1381 /*
1382 * How we begin to tidy up depends on whether we are root or child
1383 */
1384 nq->pending_final_state = NSSQDISC_STATE_IDLE;
1385 if (nq->is_root) {
1386
1387 /*
1388 * If this is root on a bridge interface, then unassign
1389 * the bshaper from all the attached interfaces.
1390 */
1391 if (nq->is_bridge) {
1392 nssqdisc_info("%s: Qdisc %p (type %d): is root on bridge. Need to "
1393 "unassign bshapers from its interfaces\n", __func__, sch, nq->type);
1394 nssqdisc_refresh_bshaper_assignment(sch, NSSQDISC_UNASSIGN_BSHAPER);
1395 }
1396
1397 /*
1398 * Begin by freeing the root shaper node
1399 */
1400 nssqdisc_root_cleanup_free_node(sch);
1401 } else {
1402 /*
1403 * Begin by freeing the child shaper node
1404 */
1405 nssqdisc_child_cleanup_free_node(sch);
1406 }
1407
1408 /*
1409 * Wait until cleanup operation is complete at which point the state
1410 * shall become idle. NOTE: This relies on the NSS driver to be able
1411 * to operate asynchronously which means kernel preemption is required.
1412 */
1413 while (NSSQDISC_STATE_IDLE != (state = atomic_read(&nq->state))) {
1414 yield();
1415 }
1416
1417 if (nq->destroy_virtual_interface) {
1418 nss_destroy_virt_if((void *)nq->nss_interface_number);
1419 }
1420
1421 nssqdisc_info("%s: Qdisc %p (type %d): destroy complete\n",
1422 __func__, sch, nq->type);
1423}
1424
1425
1426/*
1427 * nssqdisc_init()
1428 * Initializes a shaper in NSS, based on the position of this qdisc (child or root)
1429 * and if its a normal interface or a bridge interface.
1430 */
1431static int nssqdisc_init(struct Qdisc *sch, nss_shaper_node_type_t type)
1432{
1433 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1434 struct Qdisc *root;
1435 u32 parent;
1436 nss_tx_status_t rc;
1437 struct net_device *dev;
1438 int32_t state;
1439 struct nss_shaper_configure shaper_assign;
1440
1441 /*
1442 * Record our qdisc and type in the private region for handy use
1443 */
1444 nq->qdisc = sch;
1445 nq->type = type;
1446
1447 /*
1448 * We dont have to destroy a virtual interface unless
1449 * we are the ones who created it. So set it to false
1450 * as default.
1451 */
1452 nq->destroy_virtual_interface = false;
1453
1454 /*
1455 * Set shaper node state to IDLE
1456 */
1457 atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
1458
1459 nq->qos_tag = (uint32_t)sch->handle >> 16;
1460
1461 /*
1462 * The root must be of an nss type (unless we are of course going to be root).
1463 * This is to prevent mixing NSS qdisc with other types of qdisc.
1464 */
1465 parent = sch->parent;
1466 root = qdisc_root(sch);
1467 nssqdisc_info("%s: Qdisc %p (type %d) init root: %p, me: %p, my handle: %x, "
1468 "parent: %x rootid: %s owner: %p\n", __func__, sch, nq->type, root,
1469 sch, nq->qos_tag, parent, root->ops->id, root->ops->owner);
1470
1471 if ((parent != TC_H_ROOT) && (root->ops->owner != THIS_MODULE)) {
1472 nssqdisc_error("%s: Qdisc %p (type %d) used outside of NSS shaping "
1473 "framework. Parent: %x ops: %p Our Module: %p\n", __func__,
1474 sch, nq->type, parent, root->ops, THIS_MODULE);
1475
1476 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1477 return -1;
1478 }
1479
1480 /*
1481 * Register for NSS shaping
1482 */
1483 nq->nss_shaping_ctx = nss_register_shaping();
1484 if (!nq->nss_shaping_ctx) {
1485 nssqdisc_error("%s: no shaping context returned for type %d\n",
1486 __func__, nq->type);
1487 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1488 return -1;
1489 }
1490
1491 /*
1492 * Are we the root qdisc?
1493 */
1494 if (parent == TC_H_ROOT) {
1495 nssqdisc_info("%s: Qdisc %p (type %d) is root\n", __func__, sch, nq->type);
1496 nq->is_root = true;
1497 } else {
1498 nssqdisc_info("%s: Qdisc %p (type %d) not root\n", __func__, sch, nq->type);
1499 nq->is_root = false;
1500 }
1501
1502 /*
1503 * Get the net device as it will tell us if we are on a bridge,
1504 * or on a net device that is represented by a virtual NSS interface (e.g. WIFI)
1505 */
1506 dev = qdisc_dev(sch);
1507 nssqdisc_info("%s: Qdisc %p (type %d) init dev: %p\n", __func__, sch, nq->type, dev);
1508
1509 /*
1510 * Determine if dev is a bridge or not as this determines if we
1511 * interract with an I or B shaper
1512 */
1513 if (dev->priv_flags == IFF_EBRIDGE) {
1514 nssqdisc_info("%s: Qdisc %p (type %d) init qdisc: %p, is bridge\n",
1515 __func__, sch, nq->type, nq->qdisc);
1516 nq->is_bridge = true;
1517 } else {
1518 nssqdisc_info("%s: Qdisc %p (type %d) init qdisc: %p, not bridge\n",
1519 __func__, sch, nq->type, nq->qdisc);
1520 nq->is_bridge = false;
1521 }
1522
1523 /*
1524 * If we are not the root qdisc then we have a simple enough job to do
1525 */
1526 if (!nq->is_root) {
1527 struct nss_shaper_configure shaper_node_create;
1528
1529 nssqdisc_info("%s: Qdisc %p (type %d) initializing non-root qdisc\n",
1530 __func__, sch, nq->type);
1531
1532 /*
1533 * The device we are operational on MUST be recognised as an NSS interface.
1534 * NOTE: We do NOT support non-NSS known interfaces in this implementation.
1535 * NOTE: This will still work where the dev is registered as virtual, in which case
1536 * nss_interface_number shall indicate a virtual NSS interface.
1537 */
1538 nq->nss_interface_number = nss_get_interface_number(nq->nss_shaping_ctx, dev);
1539 if (nq->nss_interface_number < 0) {
1540 nssqdisc_error("%s: Qdisc %p (type %d) net device unknown to "
1541 "nss driver %s\n", __func__, sch, nq->type, dev->name);
1542 nss_unregister_shaping(nq->nss_shaping_ctx);
1543 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1544 return -1;
1545 }
1546
1547 /*
1548 * Create a shaper node for requested type.
1549 * Essentially all we need to do is create the shaper node.
1550 */
1551 nssqdisc_info("%s: Qdisc %p (type %d) non-root (child) create\n",
1552 __func__, sch, nq->type);
1553
1554 shaper_node_create.interface_num = nq->nss_interface_number;
1555 shaper_node_create.i_shaper = (nq->is_bridge)? false : true;
1556 shaper_node_create.cb = nssqdisc_child_init_alloc_node_callback;
1557 shaper_node_create.app_data = sch;
1558 shaper_node_create.owner = THIS_MODULE;
1559 shaper_node_create.type = NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE;
1560 shaper_node_create.mt.alloc_shaper_node.node_type = nq->type;
1561 shaper_node_create.mt.alloc_shaper_node.qos_tag = nq->qos_tag;
1562
1563 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_create);
1564 if (rc != NSS_TX_SUCCESS) {
1565 nssqdisc_error("%s: Qdisc %p (type %d) create command "
1566 "failed: %d\n", __func__, sch, nq->type, rc);
1567 nq->pending_final_state = NSSQDISC_STATE_CHILD_ALLOC_SEND_FAIL;
1568 nssqdisc_child_cleanup_final(sch);
1569 return -1;
1570 }
1571
1572 /*
1573 * Wait until init operation is complete.
1574 * NOTE: This relies on the NSS driver to be able to operate
1575 * asynchronously which means kernel preemption is required.
1576 */
1577 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1578 yield();
1579 }
1580 nssqdisc_info("%s: Qdisc %p (type %d): initialised with state: %d\n",
1581 __func__, sch, nq->type, state);
1582 if (state > 0) {
1583 return 0;
1584 }
1585 return -1;
1586 }
1587
1588 /*
1589 * Root qdisc has a lot of work to do. It is responsible for setting up
1590 * the shaper and creating the root and default shaper nodes. Also, when
1591 * operating on a bridge, a virtual NSS interface is created to represent
1592 * bridge shaping. Further, when operating on a bridge, we monitor for
1593 * bridge port changes and assign B shapers to the interfaces of the ports.
1594 */
1595 nssqdisc_info("%s: init qdisc type %d : %p, ROOT\n", __func__, nq->type, sch);
1596
1597 /*
1598 * Detect if we are operating on a bridge or interface
1599 */
1600 if (nq->is_bridge) {
1601 nssqdisc_info("%s: Qdisc %p (type %d): initializing root qdisc on "
1602 "bridge\n", __func__, sch, nq->type);
1603
1604 /*
1605 * As we are a root qdisc on this bridge then we have to create a
1606 * virtual interface to represent this bridge in the NSS. This will
1607 * allow us to bounce packets to the NSS for bridge shaping action.
1608 * Also set the destroy virtual interface flag so that it is destroyed
1609 * when the module goes down. If this is not done, the OS waits for
1610 * the interface to be released.
1611 */
1612 nq->virtual_interface_context = nss_create_virt_if(dev);
1613 nq->destroy_virtual_interface = true;
1614 if (!nq->virtual_interface_context) {
1615 nssqdisc_error("%s: Qdisc %p (type %d): cannot create virtual "
1616 "interface\n", __func__, sch, nq->type);
1617 nss_unregister_shaping(nq->nss_shaping_ctx);
1618 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1619 return -1;
1620 }
1621 nssqdisc_info("%s: Qdisc %p (type %d): virtual interface registered "
1622 "in NSS: %p\n", __func__, sch, nq->type, nq->virtual_interface_context);
1623 nq->nss_interface_number = nss_virt_if_get_interface_num(nq->virtual_interface_context);
1624 nssqdisc_info("%s: Qdisc %p (type %d) virtual interface number: %d\n",
1625 __func__, sch, nq->type, nq->nss_interface_number);
1626
1627 /*
1628 * The root qdisc will get packets enqueued to it, so it must
1629 * register for bridge bouncing as it will be responsible for
1630 * bouncing packets to the NSS for bridge shaping.
1631 */
1632 nq->bounce_context = nss_register_shaper_bounce_bridge(nq->nss_interface_number,
1633 nssqdisc_bounce_callback, sch, THIS_MODULE);
1634 if (!nq->bounce_context) {
1635 nssqdisc_error("%s: Qdisc %p (type %d): root but cannot register "
1636 "for bridge bouncing\n", __func__, sch, nq->type);
1637 nss_destroy_virt_if(nq->virtual_interface_context);
1638 nss_unregister_shaping(nq->nss_shaping_ctx);
1639 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1640 return -1;
1641 }
1642
1643 } else {
1644 nssqdisc_info("%s: Qdisc %p (type %d): is interface\n", __func__, sch, nq->type);
1645
1646 /*
1647 * The device we are operational on MUST be recognised as an NSS interface.
1648 * NOTE: We do NOT support non-NSS known interfaces in this basic implementation.
1649 * NOTE: This will still work where the dev is registered as virtual, in which case
1650 * nss_interface_number shall indicate a virtual NSS interface.
1651 */
1652 nq->nss_interface_number = nss_get_interface_number(nq->nss_shaping_ctx, dev);
1653 if (nq->nss_interface_number < 0) {
1654 nssqdisc_error("%s: Qdisc %p (type %d): interface unknown to nss driver %s\n",
1655 __func__, sch, nq->type, dev->name);
1656 nss_unregister_shaping(nq->nss_shaping_ctx);
1657 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1658 return -1;
1659 }
1660
1661 /*
1662 * Is the interface virtual or not?
1663 * NOTE: If this interface is virtual then we have to bounce packets to it for shaping
1664 */
1665 nq->is_virtual = nss_interface_is_virtual(nq->nss_shaping_ctx, nq->nss_interface_number);
1666 if (!nq->is_virtual) {
1667 nssqdisc_info("%s: Qdisc %p (type %d): interface %u is physical\n",
1668 __func__, sch, nq->type, nq->nss_interface_number);
1669 } else {
1670 nssqdisc_info("%s: Qdisc %p (type %d): interface %u is virtual\n",
1671 __func__, sch, nq->type, nq->nss_interface_number);
1672
1673 /*
1674 * Register for interface bounce shaping.
1675 */
1676 nq->bounce_context = nss_register_shaper_bounce_interface(nq->nss_interface_number,
1677 nssqdisc_bounce_callback, sch, THIS_MODULE);
1678 if (!nq->bounce_context) {
1679 nssqdisc_error("%s: Qdisc %p (type %d): is root but failed "
1680 "to register for interface bouncing\n", __func__, sch, nq->type);
1681 nss_unregister_shaping(nq->nss_shaping_ctx);
1682 atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
1683 return -1;
1684 }
1685 }
1686 }
1687
1688 /*
1689 * We need to issue a command to establish a shaper on the interface.
1690 */
1691 shaper_assign.interface_num = nq->nss_interface_number;
1692 shaper_assign.i_shaper = (nq->is_bridge)? false : true;
1693 shaper_assign.cb = nssqdisc_root_init_shaper_assign_callback;
1694 shaper_assign.app_data = sch;
1695 shaper_assign.owner = THIS_MODULE;
1696 shaper_assign.type = NSS_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER;
1697 shaper_assign.mt.assign_shaper.shaper_num = 0; /* Any free shaper will do */
1698 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_assign);
1699 if (rc != NSS_TX_SUCCESS) {
1700 nssqdisc_error("%s: shaper assign command failed: %d\n", __func__, rc);
1701 nq->pending_final_state = NSSQDISC_STATE_ASSIGN_SHAPER_SEND_FAIL;
1702 nssqdisc_root_cleanup_final(sch);
1703 if (nq->destroy_virtual_interface) {
1704 nss_destroy_virt_if(nq->virtual_interface_context);
1705 }
1706 return -1;
1707 }
1708
1709 /*
1710 * Wait until init operation is complete.
1711 * NOTE: This relies on the NSS driver to be able to operate asynchronously which means
1712 * kernel preemption is required.
1713 */
1714 nssqdisc_info("%s: Qdisc %p (type %d): Waiting on response from NSS for "
1715 "shaper assign message\n", __func__, sch, nq->type);
1716 while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
1717 yield();
1718 }
1719 nssqdisc_info("%s: Qdisc %p (type %d): is initialised with state: %d\n",
1720 __func__, sch, nq->type, state);
1721
1722 if (state > 0) {
1723
1724 /*
1725 * Return if this is not a root qdisc on a bridge interface.
1726 */
1727 if (!nq->is_root || !nq->is_bridge) {
1728 return 0;
1729 }
1730
1731 nssqdisc_info("%s: This is a bridge interface. Linking bridge ...\n",
1732 __func__);
1733 /*
1734 * This is a root qdisc added to a bridge interface. Now we go ahead
1735 * and add this B-shaper to interfaces known to the NSS
1736 */
1737 if (nssqdisc_refresh_bshaper_assignment(sch, NSSQDISC_ASSIGN_BSHAPER) < 0) {
1738 nssqdisc_destroy(sch);
1739 nssqdisc_error("%s: Bridge linking failed\n", __func__);
1740 return -1;
1741 }
1742 nssqdisc_info("%s: Bridge linking complete\n", __func__);
1743 return 0;
1744 }
1745
1746 /*
1747 * Destroy any virtual interfaces created by us before returning a failure.
1748 */
1749 if (nq->destroy_virtual_interface) {
1750 nss_destroy_virt_if(nq->virtual_interface_context);
1751 }
1752
1753 return -1;
1754}
1755
1756/*
1757 * nssqdisc_basic_stats_callback()
1758 * Invoked after getting basic stats
1759 */
1760static void nssqdisc_basic_stats_callback(void *app_data,
1761 struct nss_shaper_response *response)
1762{
1763 struct Qdisc *qdisc = (struct Qdisc *)app_data;
1764 struct nssqdisc_qdisc *nq = qdisc_priv(qdisc);
1765
1766 if (response->type < 0) {
1767 nssqdisc_info("%s: Qdisc %p (type %d): Received stats - "
1768 "response: type: %d\n", __func__, qdisc, nq->type,
1769 response->type);
1770 atomic_sub(1, &nq->pending_stat_requests);
1771 return;
1772 }
1773
1774 /*
1775 * Record latest basic stats
1776 */
1777 nq->basic_stats_latest = response->rt.shaper_node_basic_stats_get_success;
1778
1779 /*
1780 * Update qdisc->bstats
1781 */
1782 qdisc->bstats.bytes += (__u64)nq->basic_stats_latest.delta.dequeued_bytes;
1783 qdisc->bstats.packets += nq->basic_stats_latest.delta.dequeued_packets;
1784
1785 /*
1786 * Update qdisc->qstats
1787 */
1788 qdisc->qstats.backlog = nq->basic_stats_latest.qlen_bytes;
1789 qdisc->q.qlen = nq->basic_stats_latest.qlen_packets;
1790
1791 qdisc->qstats.drops += (nq->basic_stats_latest.delta.enqueued_packets_dropped +
1792 nq->basic_stats_latest.delta.dequeued_packets_dropped);
1793
1794 /*
1795 * Update qdisc->qstats
1796 */
1797 qdisc->qstats.qlen = qdisc->limit;
1798 qdisc->qstats.requeues = 0;
1799 qdisc->qstats.overlimits += nq->basic_stats_latest.delta.queue_overrun;
1800
1801 if (atomic_read(&qdisc->refcnt) == 0) {
1802 atomic_sub(1, &nq->pending_stat_requests);
1803 return;
1804 }
1805
1806 /*
1807 * Requests for stats again, after 1 sec.
1808 */
1809 nq->stats_get_timer.expires += HZ;
1810 if (nq->stats_get_timer.expires <= jiffies) {
1811 nssqdisc_error("losing time %lu, jiffies = %lu\n",
1812 nq->stats_get_timer.expires, jiffies);
1813 nq->stats_get_timer.expires = jiffies + HZ;
1814 }
1815 add_timer(&nq->stats_get_timer);
1816}
1817
1818/*
1819 * nssqdisc_get_stats_timer_callback()
1820 * Invoked periodically to get updated stats
1821 */
1822static void nssqdisc_get_stats_timer_callback(unsigned long int data)
1823{
1824 struct Qdisc *qdisc = (struct Qdisc *)data;
1825 struct nssqdisc_qdisc *nq = qdisc_priv(qdisc);
1826 nss_tx_status_t rc;
1827 struct nss_shaper_configure basic_stats_get;
1828
1829 /*
1830 * Issue command to get stats
1831 * Stats still in progress? If not then send a new poll
1832 */
1833 basic_stats_get.interface_num = nq->nss_interface_number;
1834 basic_stats_get.i_shaper = (nq->is_bridge)? false : true;
1835 basic_stats_get.cb = nssqdisc_basic_stats_callback;
1836 basic_stats_get.app_data = qdisc;
1837 basic_stats_get.owner = THIS_MODULE;
1838 basic_stats_get.type = NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET;
1839 basic_stats_get.mt.shaper_node_basic_stats_get.qos_tag = nq->qos_tag;
1840 rc = nss_shaper_config_send(nq->nss_shaping_ctx, &basic_stats_get);
1841 if (rc != NSS_TX_SUCCESS) {
1842 nssqdisc_error("%s: %p: basic stats get failed to send\n",
1843 __func__, qdisc);
1844 atomic_sub(1, &nq->pending_stat_requests);
1845 }
1846}
1847
1848/*
1849 * nssqdisc_start_basic_stats_polling()
1850 * Call to initiate the stats polling timer
1851 */
1852static void nssqdisc_start_basic_stats_polling(struct Qdisc *sch)
1853{
1854 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1855
1856 init_timer(&nq->stats_get_timer);
1857 nq->stats_get_timer.function = nssqdisc_get_stats_timer_callback;
1858 nq->stats_get_timer.data = (unsigned long)sch;
1859 nq->stats_get_timer.expires = jiffies + HZ;
1860 atomic_set(&nq->pending_stat_requests, 1);
1861 add_timer(&nq->stats_get_timer);
1862}
1863
1864/*
1865 * nssqdisc_stop_basic_stats_polling()
1866 * Call to stop polling of basic stats
1867 */
1868static void nssqdisc_stop_basic_stats_polling(struct Qdisc *sch)
1869{
1870 struct nssqdisc_qdisc *nq = qdisc_priv(sch);
1871
1872 /*
1873 * We wait until we have received the final stats
1874 */
1875 while (atomic_read(&nq->pending_stat_requests) != 0) {
1876 yield();
1877 }
1878}
1879
1880/*
1881 * nssqdisc_if_event_cb()
1882 * Callback function that is registered to listen to events on net_device.
1883 */
1884static int nssqdisc_if_event_cb(struct notifier_block *unused,
1885 unsigned long event, void *ptr)
1886{
1887 struct net_device *dev = (struct net_device *)ptr;
1888 struct net_device *br;
1889 struct Qdisc *br_qdisc;
1890 int if_num, br_num;
1891
1892 switch (event) {
1893 case NETDEV_BR_JOIN:
1894 nssqdisc_info("Reveived NETDEV_BR_JOIN on interface %s\n",
1895 dev->name);
1896 case NETDEV_BR_LEAVE:
1897 nssqdisc_info("Reveived NETDEV_BR_LEAVE on interface %s\n",
1898 dev->name);
1899 br = dev->master;
1900 if_num = nss_get_interface_number(nssqdisc_ctx, dev);
1901
1902 if (br == NULL || br->priv_flags != IFF_EBRIDGE) {
1903 nssqdisc_error("Sensed bridge activity on interface %s "
1904 "that is not on any bridge\n", dev->name);
1905 break;
1906 }
1907
1908 br_num = nss_get_interface_number(nssqdisc_ctx, br);
1909 br_qdisc = br->qdisc;
1910 /*
1911 * TODO: Properly ensure that the interface and bridge are
1912 * shaped by us.
1913 */
1914 if (if_num < 0 || br_num < 0) {
1915 nssqdisc_info("No action taken since if_num is %d for %s "
1916 "and br_num is %d for bridge %s\n", if_num,
1917 dev->name, br_num, br->name);
1918 break;
1919 }
1920
1921 /*
1922 * Call attach or detach according as per event type.
1923 */
1924 if (event == NETDEV_BR_JOIN) {
1925 nssqdisc_info("Instructing interface %s to attach to bridge(%s) "
1926 "shaping\n", dev->name, br->name);
1927 nssqdisc_attach_bshaper(br_qdisc, if_num);
1928 } else if (event == NETDEV_BR_LEAVE) {
1929 nssqdisc_info("Instructing interface %s to detach from bridge(%s) "
1930 "shaping\n",dev->name, br->name);
1931 nssqdisc_detach_bshaper(br_qdisc, if_num);
1932 }
1933
1934 break;
1935 default:
1936 nssqdisc_info("Received NETDEV_DEFAULT on interface %s\n", dev->name);
1937 break;
1938 }
1939
1940 return NOTIFY_DONE;
1941}
1942
1943static struct notifier_block nssqdisc_device_notifier = {
1944 .notifier_call = nssqdisc_if_event_cb };
1945
1946/* =========================== NSSFIFO ========================= */
1947
1948struct nssfifo_sched_data {
1949 struct nssqdisc_qdisc nq; /* Common base class for all nss qdiscs */
1950 u32 limit; /* Queue length in packets */
1951 /* TODO: Support for queue length in bytes */
1952 u8 set_default; /* Flag to set qdisc as default qdisc for enqueue */
1953};
1954
1955static int nssfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1956{
1957 return nssqdisc_enqueue(skb, sch);
1958}
1959
1960static struct sk_buff *nssfifo_dequeue(struct Qdisc *sch)
1961{
1962 return nssqdisc_dequeue(sch);
1963}
1964
1965static unsigned int nssfifo_drop(struct Qdisc *sch)
1966{
1967 nssqdisc_info("nssfifo dropping");
1968 return nssqdisc_drop(sch);
1969}
1970
1971static void nssfifo_reset(struct Qdisc *sch)
1972{
1973 nssqdisc_info("nssfifo resetting!");
1974 nssqdisc_reset(sch);
1975}
1976
1977static void nssfifo_destroy(struct Qdisc *sch)
1978{
1979 /*
1980 * Stop the polling of basic stats
1981 */
1982 nssqdisc_stop_basic_stats_polling(sch);
1983
1984 nssqdisc_destroy(sch);
1985 nssqdisc_info("nssfifo destroyed");
1986}
1987
1988static const struct nla_policy nssfifo_policy[TCA_NSSFIFO_MAX + 1] = {
1989 [TCA_NSSFIFO_PARMS] = { .len = sizeof(struct tc_nssfifo_qopt) },
1990};
1991
1992static int nssfifo_change(struct Qdisc *sch, struct nlattr *opt)
1993{
1994 struct nssfifo_sched_data *q;
1995 struct nlattr *na[TCA_NSSFIFO_MAX + 1];
1996 struct tc_nssfifo_qopt *qopt;
1997 int err;
1998 struct nss_shaper_configure shaper_node_change_param;
1999
2000 q = qdisc_priv(sch);
2001
2002 if (opt == NULL) {
2003 return -EINVAL;
2004 }
2005
2006 err = nla_parse_nested(na, TCA_NSSFIFO_MAX, opt, nssfifo_policy);
2007 if (err < 0)
2008 return err;
2009
2010 if (na[TCA_NSSFIFO_PARMS] == NULL)
2011 return -EINVAL;
2012
2013 qopt = nla_data(na[TCA_NSSFIFO_PARMS]);
2014
2015 if (!qopt->limit) {
2016 nssqdisc_error("%s: limit must be non-zero\n", __func__);
2017 return -EINVAL;
2018 }
2019
2020 q->limit = qopt->limit;
2021
2022 /*
2023 * Required for basic stats display
2024 */
2025 sch->limit = qopt->limit;
2026
2027 q->set_default = qopt->set_default;
2028 nssqdisc_info("%s: limit:%u set_default:%u\n", __func__, qopt->limit, qopt->set_default);
2029
2030 shaper_node_change_param.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
2031 shaper_node_change_param.mt.shaper_node_config.snc.fifo_param.limit = q->limit;
2032 shaper_node_change_param.mt.shaper_node_config.snc.fifo_param.drop_mode = NSS_SHAPER_FIFO_DROP_MODE_TAIL;
2033 if (nssqdisc_configure(sch, &shaper_node_change_param, NSS_SHAPER_CONFIG_TYPE_FIFO_CHANGE_PARAM) < 0)
2034 return -EINVAL;
2035
2036 /*
2037 * There is nothing we need to do if the qdisc is not
2038 * set as default qdisc.
2039 */
2040 if (q->set_default == 0)
2041 return 0;
2042
2043 /*
2044 * Set this qdisc to be the default qdisc for enqueuing packets.
2045 */
2046 if (nssqdisc_set_default(sch) < 0)
2047 return -EINVAL;
2048
2049 nssqdisc_info("%s: nssfifo queue (qos_tag:%u) set as default\n", __func__, q->nq.qos_tag);
2050 return 0;
2051}
2052
2053static int nssfifo_init(struct Qdisc *sch, struct nlattr *opt)
2054{
2055 if (opt == NULL)
2056 return -EINVAL;
2057
2058 nssqdisc_info("Initializing Fifo - type %d\n", NSS_SHAPER_NODE_TYPE_FIFO);
2059 nssfifo_reset(sch);
2060
2061 if (nssqdisc_init(sch, NSS_SHAPER_NODE_TYPE_FIFO) < 0)
2062 return -EINVAL;
2063
2064 nssqdisc_info("NSS fifo initialized - handle %x parent %x\n", sch->handle, sch->parent);
2065 if (nssfifo_change(sch, opt) < 0) {
2066 nssqdisc_destroy(sch);
2067 return -EINVAL;
2068 }
2069
2070 /*
2071 * Start the stats polling timer
2072 */
2073 nssqdisc_start_basic_stats_polling(sch);
2074
2075 return 0;
2076}
2077
2078static int nssfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
2079{
2080 struct nssfifo_sched_data *q;
2081 struct nlattr *opts = NULL;
2082 struct tc_nssfifo_qopt opt;
2083
2084 nssqdisc_info("Nssfifo Dumping!");
2085
2086 q = qdisc_priv(sch);
2087 if (q == NULL) {
2088 return -1;
2089 }
2090
2091 opt.limit = q->limit;
2092
2093 opts = nla_nest_start(skb, TCA_OPTIONS);
2094 if (opts == NULL) {
2095 goto nla_put_failure;
2096 }
2097 if (nla_put(skb, TCA_NSSFIFO_PARMS, sizeof(opt), &opt))
2098 goto nla_put_failure;
2099
2100 return nla_nest_end(skb, opts);
2101
2102nla_put_failure:
2103 nla_nest_cancel(skb, opts);
2104 return -EMSGSIZE;
2105}
2106
2107static struct sk_buff *nssfifo_peek(struct Qdisc *sch)
2108{
2109 nssqdisc_info("Nssfifo Peeking");
2110 return nssqdisc_peek(sch);
2111}
2112
2113static struct Qdisc_ops nsspfifo_qdisc_ops __read_mostly = {
2114 .id = "nsspfifo",
2115 .priv_size = sizeof(struct nssfifo_sched_data),
2116 .enqueue = nssfifo_enqueue,
2117 .dequeue = nssfifo_dequeue,
2118 .peek = nssfifo_peek,
2119 .drop = nssfifo_drop,
2120 .init = nssfifo_init,
2121 .reset = nssfifo_reset,
2122 .destroy = nssfifo_destroy,
2123 .change = nssfifo_change,
2124 .dump = nssfifo_dump,
2125 .owner = THIS_MODULE,
2126};
2127
2128static struct Qdisc_ops nssbfifo_qdisc_ops __read_mostly = {
2129 .id = "nssbfifo",
2130 .priv_size = sizeof(struct nssfifo_sched_data),
2131 .enqueue = nssfifo_enqueue,
2132 .dequeue = nssfifo_dequeue,
2133 .peek = nssfifo_peek,
2134 .drop = nssfifo_drop,
2135 .init = nssfifo_init,
2136 .reset = nssfifo_reset,
2137 .destroy = nssfifo_destroy,
2138 .change = nssfifo_change,
2139 .dump = nssfifo_dump,
2140 .owner = THIS_MODULE,
2141};
2142
2143/* =========================== NSSCODEL ========================= */
2144
2145struct nsscodel_stats {
2146 u32 peak_queue_delay; /* Peak delay experienced by a dequeued packet */
2147 u32 peak_drop_delay; /* Peak delay experienced by a packet that is dropped */
2148};
2149
2150struct nsscodel_sched_data {
2151 struct nssqdisc_qdisc nq; /* Common base class for all nss qdiscs */
2152 u32 target; /* Acceptable value of queue delay */
2153 u32 limit; /* Length of queue */
2154 u32 interval; /* Monitoring interval */
2155 u8 set_default; /* Flag to set qdisc as default qdisc for enqueue */
2156 struct nsscodel_stats stats; /* Contains nsscodel related stats */
2157};
2158
2159static int nsscodel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2160{
2161 return nssqdisc_enqueue(skb, sch);
2162}
2163
2164static struct sk_buff *nsscodel_dequeue(struct Qdisc *sch)
2165{
2166 return nssqdisc_dequeue(sch);
2167}
2168
2169static unsigned int nsscodel_drop(struct Qdisc *sch)
2170{
2171 return nssqdisc_drop(sch);
2172}
2173
2174static void nsscodel_reset(struct Qdisc *sch)
2175{
2176 nssqdisc_info("nsscodel resetting!");
2177 nssqdisc_reset(sch);
2178}
2179
2180static void nsscodel_destroy(struct Qdisc *sch)
2181{
2182 /*
2183 * Stop the polling of basic stats
2184 */
2185 nssqdisc_stop_basic_stats_polling(sch);
2186 nssqdisc_destroy(sch);
2187 nssqdisc_info("nsscodel destroyed");
2188}
2189
2190static const struct nla_policy nsscodel_policy[TCA_NSSCODEL_MAX + 1] = {
2191 [TCA_NSSCODEL_PARMS] = { .len = sizeof(struct tc_nsscodel_qopt) },
2192};
2193
2194static int nsscodel_change(struct Qdisc *sch, struct nlattr *opt)
2195{
2196 struct nsscodel_sched_data *q;
2197 struct nlattr *na[TCA_NSSCODEL_MAX + 1];
2198 struct tc_nsscodel_qopt *qopt;
2199 struct nss_shaper_configure shaper_node_change_param;
2200 int err;
2201 struct net_device *dev = qdisc_dev(sch);
2202
2203 q = qdisc_priv(sch);
2204
2205 if (opt == NULL)
2206 return -EINVAL;
2207
2208 err = nla_parse_nested(na, TCA_NSSCODEL_MAX, opt, nsscodel_policy);
2209 if (err < 0)
2210 return err;
2211
2212 if (na[TCA_NSSCODEL_PARMS] == NULL)
2213 return -EINVAL;
2214
2215 qopt = nla_data(na[TCA_NSSCODEL_PARMS]);
2216
2217 if (!qopt->target || !qopt->interval || !qopt->limit) {
2218 nssqdisc_error("nsscodel requires a non-zero value for target, "
2219 "interval and limit\n");
2220 return -EINVAL;
2221 }
2222
2223 q->target = qopt->target;
2224 q->limit = qopt->limit;
2225 q->interval = qopt->interval;
2226 q->set_default = qopt->set_default;
2227
2228 /*
2229 * Required for basic stats display
2230 */
2231 sch->limit = qopt->limit;
2232
2233 nssqdisc_info("Target:%u Limit:%u Interval:%u set_default = %u\n",
2234 q->target, q->limit, q->interval, qopt->set_default);
2235
2236
2237 shaper_node_change_param.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
2238 /*
2239 * Target and interval time needs to be provided in milliseconds
2240 * (tc provides us the time in mircoseconds and therefore we divide by 100)
2241 */
2242 shaper_node_change_param.mt.shaper_node_config.snc.codel_param.qlen_max = q->limit;
2243 shaper_node_change_param.mt.shaper_node_config.snc.codel_param.cap.interval = q->interval/1000;
2244 shaper_node_change_param.mt.shaper_node_config.snc.codel_param.cap.target = q->target/1000;
2245 shaper_node_change_param.mt.shaper_node_config.snc.codel_param.cap.mtu = dev->mtu;
2246 nssqdisc_info("%s: MTU size of interface %s is %u\n", __func__, dev->name, dev->mtu);
2247
2248 if (nssqdisc_configure(sch, &shaper_node_change_param,
2249 NSS_SHAPER_CONFIG_TYPE_CODEL_CHANGE_PARAM) < 0) {
2250 return -EINVAL;
2251 }
2252
2253 /*
2254 * There is nothing we need to do if the qdisc is not
2255 * set as default qdisc.
2256 */
2257 if (!q->set_default)
2258 return 0;
2259
2260 /*
2261 * Set this qdisc to be the default qdisc for enqueuing packets.
2262 */
2263 if (nssqdisc_set_default(sch) < 0)
2264 return -EINVAL;
2265
2266 return 0;
2267}
2268
2269static int nsscodel_init(struct Qdisc *sch, struct nlattr *opt)
2270{
2271 if (opt == NULL)
2272 return -EINVAL;
2273
2274 nsscodel_reset(sch);
2275 if (nssqdisc_init(sch, NSS_SHAPER_NODE_TYPE_CODEL) < 0)
2276 return -EINVAL;
2277
2278 if (nsscodel_change(sch, opt) < 0) {
2279 nssqdisc_destroy(sch);
2280 return -EINVAL;
2281 }
2282
2283 /*
2284 * Start the stats polling timer
2285 */
2286 nssqdisc_start_basic_stats_polling(sch);
2287
2288 return 0;
2289}
2290
2291static int nsscodel_dump(struct Qdisc *sch, struct sk_buff *skb)
2292{
2293 struct nsscodel_sched_data *q;
2294 struct nlattr *opts = NULL;
2295 struct tc_nsscodel_qopt opt;
2296
2297 nssqdisc_info("NssCodel Dumping!");
2298
2299 q = qdisc_priv(sch);
2300 if (q == NULL) {
2301 return -1;
2302 }
2303
2304 opt.target = q->target;
2305 opt.limit = q->limit;
2306 opt.interval = q->interval;
2307 opts = nla_nest_start(skb, TCA_OPTIONS);
2308 if (opts == NULL) {
2309 goto nla_put_failure;
2310 }
2311 if (nla_put(skb, TCA_NSSCODEL_PARMS, sizeof(opt), &opt))
2312 goto nla_put_failure;
2313
2314 return nla_nest_end(skb, opts);
2315
2316nla_put_failure:
2317 nla_nest_cancel(skb, opts);
2318 return -EMSGSIZE;
2319}
2320
2321static int nsscodel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2322{
2323 struct nsscodel_sched_data *q = qdisc_priv(sch);
2324 struct tc_nsscodel_xstats st = {
2325 .peak_queue_delay = q->nq.basic_stats_latest.packet_latency_peak_msec_dequeued,
2326 .peak_drop_delay = q->nq.basic_stats_latest.packet_latency_peak_msec_dropped,
2327 };
2328
2329 return gnet_stats_copy_app(d, &st, sizeof(st));
2330}
2331
2332static struct sk_buff *nsscodel_peek(struct Qdisc *sch)
2333{
2334 nssqdisc_info("Nsscodel Peeking");
2335 return nssqdisc_peek(sch);
2336}
2337
2338
2339static struct Qdisc_ops nsscodel_qdisc_ops __read_mostly = {
2340 .id = "nsscodel",
2341 .priv_size = sizeof(struct nsscodel_sched_data),
2342 .enqueue = nsscodel_enqueue,
2343 .dequeue = nsscodel_dequeue,
2344 .peek = nsscodel_peek,
2345 .drop = nsscodel_drop,
2346 .init = nsscodel_init,
2347 .reset = nsscodel_reset,
2348 .destroy = nsscodel_destroy,
2349 .change = nsscodel_change,
2350 .dump = nsscodel_dump,
2351 .dump_stats = nsscodel_dump_stats,
2352 .owner = THIS_MODULE,
2353};
2354
2355/* =========================== NSSTBL ========================= */
2356
2357struct nsstbl_sched_data {
2358 struct nssqdisc_qdisc nq; /* Common base class for all nss qdiscs */
2359 u32 rate; /* Limiting rate of TBL */
2360 u32 peakrate; /* Maximum rate to control bursts */
2361 u32 burst; /* Maximum allowed burst size */
2362 u32 mtu; /* MTU of the interface attached to */
2363 u32 mpu; /* Minimum size of a packet (when there is
2364 * no data)
2365 */
2366 struct Qdisc *qdisc; /* Qdisc to which it is attached to */
2367};
2368
2369
2370static int nsstbl_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2371{
2372 return nssqdisc_enqueue(skb, sch);
2373}
2374
2375static struct sk_buff *nsstbl_dequeue(struct Qdisc *sch)
2376{
2377 return nssqdisc_dequeue(sch);
2378}
2379
2380static unsigned int nsstbl_drop(struct Qdisc *sch)
2381{
2382 return nssqdisc_drop(sch);
2383}
2384
2385static struct sk_buff *nsstbl_peek(struct Qdisc *sch)
2386{
2387 return nssqdisc_peek(sch);
2388}
2389
2390static void nsstbl_reset(struct Qdisc *sch)
2391{
2392 nssqdisc_reset(sch);
2393}
2394
2395static void nsstbl_destroy(struct Qdisc *sch)
2396{
2397 struct nsstbl_sched_data *q = qdisc_priv(sch);
2398 qdisc_destroy(q->qdisc);
2399
2400 /*
2401 * Stop the polling of basic stats
2402 */
2403 nssqdisc_start_basic_stats_polling(sch);
2404 nssqdisc_destroy(sch);
2405}
2406
2407static const struct nla_policy nsstbl_policy[TCA_NSSTBL_MAX + 1] = {
2408 [TCA_NSSTBL_PARMS] = { .len = sizeof(struct tc_nsstbl_qopt) },
2409};
2410
2411static int nsstbl_change(struct Qdisc *sch, struct nlattr *opt)
2412{
2413 struct nsstbl_sched_data *q;
2414 struct nlattr *na[TCA_NSSTBL_MAX + 1];
2415 struct tc_nsstbl_qopt *qopt;
2416 struct nss_shaper_configure shaper_node_change_param;
2417 int err;
2418
2419 q = qdisc_priv(sch);
2420
2421 if (opt == NULL)
2422 return -EINVAL;
2423
2424 err = nla_parse_nested(na, TCA_NSSTBL_MAX, opt, nsstbl_policy);
2425 if (err < 0)
2426 return err;
2427
2428 if (na[TCA_NSSTBL_PARMS] == NULL)
2429 return -EINVAL;
2430
2431 qopt = nla_data(na[TCA_NSSTBL_PARMS]);
2432
2433 /*
2434 * Burst size cannot be less than MTU
2435 */
2436 if (qopt->burst < qopt->mtu) {
2437 nssqdisc_error("Burst size: %u is less than the specified MTU: %u\n", qopt->burst, qopt->mtu);
2438 return -EINVAL;
2439 }
2440
2441 /*
2442 * For peak rate to work, MTU must be specified.
2443 */
2444 if (qopt->peakrate > 0 && qopt->mtu == 0) {
2445 nssqdisc_error("MTU cannot be zero if peakrate is specified\n");
2446 return -EINVAL;
2447 }
2448
2449
2450 /*
2451 * Rate can be zero. Therefore we dont do a check on it.
2452 */
2453 q->rate = qopt->rate;
2454 nssqdisc_info("Rate = %u", qopt->rate);
2455 q->burst = qopt->burst;
2456 nssqdisc_info("Burst = %u", qopt->burst);
2457 q->mtu = qopt->mtu;
2458 nssqdisc_info("MTU = %u", qopt->mtu);
2459 q->peakrate = qopt->peakrate;
2460 nssqdisc_info("Peak Rate = %u", qopt->peakrate);
2461
2462 shaper_node_change_param.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
2463 shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_cir.rate = q->rate;
2464 shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_cir.burst = q->burst;
2465 shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_cir.max_size = q->mtu;
2466 shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_cir.short_circuit = false;
2467 shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.rate = q->peakrate;
2468
2469 /*
2470 * It is important to set these two parameters to be the same as MTU.
2471 * This ensures bursts from CIR dont go above the specified peakrate.
2472 */
2473 shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.burst = q->mtu;
2474 shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.max_size = q->mtu;
2475
2476 if (q->peakrate) {
2477 shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.short_circuit = false;
2478 } else {
2479 shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.short_circuit = true;
2480 }
2481
2482 if (nssqdisc_configure(sch, &shaper_node_change_param,
2483 NSS_SHAPER_CONFIG_TYPE_TBL_CHANGE_PARAM) < 0) {
2484 return -EINVAL;
2485 }
2486
2487 return 0;
2488}
2489
2490static int nsstbl_init(struct Qdisc *sch, struct nlattr *opt)
2491{
2492 struct nsstbl_sched_data *q = qdisc_priv(sch);
2493
2494 if (opt == NULL)
2495 return -EINVAL;
2496
2497 q->qdisc = &noop_qdisc;
2498
2499 if (nssqdisc_init(sch, NSS_SHAPER_NODE_TYPE_TBL) < 0)
2500 return -EINVAL;
2501
2502 if (nsstbl_change(sch, opt) < 0) {
2503 nssqdisc_info("Failed to configure tbl\n");
2504 nssqdisc_destroy(sch);
2505 return -EINVAL;
2506 }
2507
2508 /*
2509 * Start the stats polling timer
2510 */
2511 nssqdisc_start_basic_stats_polling(sch);
2512
2513 return 0;
2514}
2515
2516static int nsstbl_dump(struct Qdisc *sch, struct sk_buff *skb)
2517{
2518 struct nsstbl_sched_data *q = qdisc_priv(sch);
2519 struct nlattr *opts = NULL;
2520 struct tc_nsstbl_qopt opt = {
2521 .rate = q->rate,
2522 .peakrate = q->peakrate,
2523 .burst = q->burst,
2524 .mtu = q->mtu,
2525 };
2526
2527 nssqdisc_info("Nsstbl dumping");
2528 opts = nla_nest_start(skb, TCA_OPTIONS);
2529 if (opts == NULL)
2530 goto nla_put_failure;
2531 NLA_PUT(skb, TCA_NSSTBL_PARMS, sizeof(opt), &opt);
2532 return nla_nest_end(skb, opts);
2533
2534nla_put_failure:
2535 nla_nest_cancel(skb, opts);
2536 return -EMSGSIZE;
2537}
2538
2539static int nsstbl_dump_class(struct Qdisc *sch, unsigned long cl,
2540 struct sk_buff *skb, struct tcmsg *tcm)
2541{
2542 struct nsstbl_sched_data *q = qdisc_priv(sch);
2543 nssqdisc_info("Nsstbl dumping class");
2544
2545 tcm->tcm_handle |= TC_H_MIN(1);
2546 tcm->tcm_info = q->qdisc->handle;
2547
2548 return 0;
2549}
2550
2551static int nsstbl_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
2552 struct Qdisc **old)
2553{
2554 struct nsstbl_sched_data *q = qdisc_priv(sch);
2555 struct nssqdisc_qdisc *nq_new = (struct nssqdisc_qdisc *)qdisc_priv(new);
2556 struct nss_shaper_configure shaper_node_attach, shaper_node_detach;
2557
2558 if (new == NULL)
2559 new = &noop_qdisc;
2560
2561 sch_tree_lock(sch);
2562 *old = q->qdisc;
2563 q->qdisc = new;
2564 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
2565 qdisc_reset(*old);
2566 sch_tree_unlock(sch);
2567
2568 nssqdisc_info("%s:Grafting old: %p with new: %p\n", __func__, *old, new);
2569 if (*old != &noop_qdisc) {
2570 nssqdisc_info("%s: Detaching old: %p\n", __func__, *old);
2571 shaper_node_detach.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
2572 if (nssqdisc_node_detach(sch, &shaper_node_detach,
2573 NSS_SHAPER_CONFIG_TYPE_TBL_DETACH) < 0) {
2574 return -EINVAL;
2575 }
2576 }
2577
2578 if (new != &noop_qdisc) {
2579 nssqdisc_info("%s: Attaching new: %p\n", __func__, new);
2580 shaper_node_attach.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
2581 shaper_node_attach.mt.shaper_node_config.snc.tbl_attach.child_qos_tag = nq_new->qos_tag;
2582 if (nssqdisc_node_attach(sch, &shaper_node_attach,
2583 NSS_SHAPER_CONFIG_TYPE_TBL_ATTACH) < 0) {
2584 return -EINVAL;
2585 }
2586 }
2587
2588 nssqdisc_info("Nsstbl grafted");
2589
2590 return 0;
2591}
2592
2593static struct Qdisc *nsstbl_leaf(struct Qdisc *sch, unsigned long arg)
2594{
2595 struct nsstbl_sched_data *q = qdisc_priv(sch);
2596 nssqdisc_info("Nsstbl returns leaf");
2597 return q->qdisc;
2598}
2599
2600static unsigned long nsstbl_get(struct Qdisc *sch, u32 classid)
2601{
2602 return 1;
2603}
2604
2605static void nsstbl_put(struct Qdisc *sch, unsigned long arg)
2606{
2607}
2608
2609static void nsstbl_walk(struct Qdisc *sch, struct qdisc_walker *walker)
2610{
2611 nssqdisc_info("Nsstbl walk called");
2612 if (!walker->stop) {
2613 if (walker->count >= walker->skip)
2614 if (walker->fn(sch, 1, walker) < 0) {
2615 walker->stop = 1;
2616 return;
2617 }
2618 walker->count++;
2619 }
2620}
2621
2622static const struct Qdisc_class_ops nsstbl_class_ops = {
2623 .graft = nsstbl_graft,
2624 .leaf = nsstbl_leaf,
2625 .get = nsstbl_get,
2626 .put = nsstbl_put,
2627 .walk = nsstbl_walk,
2628 .dump = nsstbl_dump_class,
2629};
2630
2631static struct Qdisc_ops nsstbl_qdisc_ops __read_mostly = {
2632 .next = NULL,
2633 .id = "nsstbl",
2634 .priv_size = sizeof(struct nsstbl_sched_data),
2635 .cl_ops = &nsstbl_class_ops,
2636 .enqueue = nsstbl_enqueue,
2637 .dequeue = nsstbl_dequeue,
2638 .peek = nsstbl_peek,
2639 .drop = nsstbl_drop,
2640 .init = nsstbl_init,
2641 .reset = nsstbl_reset,
2642 .destroy = nsstbl_destroy,
2643 .change = nsstbl_change,
2644 .dump = nsstbl_dump,
2645 .owner = THIS_MODULE,
2646};
2647
2648/* =========================== NSSPRIO ========================= */
2649
2650struct nssprio_sched_data {
2651 struct nssqdisc_qdisc nq; /* Common base class for all nss qdiscs */
2652 int bands; /* Number of priority bands to use */
2653 struct Qdisc *queues[TCA_NSSPRIO_MAX_BANDS];
2654 /* Array of child qdisc holder */
2655};
2656
2657static int nssprio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
2658{
2659 return nssqdisc_enqueue(skb, sch);
2660}
2661
2662static struct sk_buff *nssprio_dequeue(struct Qdisc *sch)
2663{
2664 return nssqdisc_dequeue(sch);
2665}
2666
2667static unsigned int nssprio_drop(struct Qdisc *sch)
2668{
2669 return nssqdisc_drop(sch);
2670}
2671
2672static struct sk_buff *nssprio_peek(struct Qdisc *sch)
2673{
2674 return nssqdisc_peek(sch);
2675}
2676
2677static void nssprio_reset(struct Qdisc *sch)
2678{
2679 return nssqdisc_reset(sch);
2680}
2681
2682static void nssprio_destroy(struct Qdisc *sch)
2683{
2684 struct nssprio_sched_data *q = qdisc_priv(sch);
2685 int i;
2686
2687 nssqdisc_info("Destroying prio");
2688
2689 /*
2690 * Destroy all attached child nodes before destroying prio
2691 */
2692 for (i = 0; i < q->bands; i++)
2693 qdisc_destroy(q->queues[i]);
2694
2695 /*
2696 * Stop the polling of basic stats
2697 */
2698 nssqdisc_stop_basic_stats_polling(sch);
2699
2700 nssqdisc_destroy(sch);
2701}
2702
2703static const struct nla_policy nssprio_policy[TCA_NSSTBL_MAX + 1] = {
2704 [TCA_NSSTBL_PARMS] = { .len = sizeof(struct tc_nssprio_qopt) },
2705};
2706
2707static int nssprio_change(struct Qdisc *sch, struct nlattr *opt)
2708{
2709 struct nssprio_sched_data *q;
2710 struct nlattr *na[TCA_NSSTBL_MAX + 1];
2711 struct tc_nssprio_qopt *qopt;
2712 int err;
2713
2714 q = qdisc_priv(sch);
2715
2716 if (opt == NULL) {
2717 return -EINVAL;
2718 }
2719
2720 err = nla_parse_nested(na, TCA_NSSPRIO_MAX, opt, nssprio_policy);
2721 if (err < 0) {
2722 return err;
2723 }
2724
2725 if (na[TCA_NSSPRIO_PARMS] == NULL) {
2726 return -EINVAL;
2727 }
2728
2729 qopt = nla_data(na[TCA_NSSPRIO_PARMS]);
2730
2731 if (qopt->bands > TCA_NSSPRIO_MAX_BANDS) {
2732 return -EINVAL;
2733 }
2734
2735 q->bands = qopt->bands;
2736 nssqdisc_info("Bands = %u\n", qopt->bands);
2737
2738 return 0;
2739}
2740
2741static int nssprio_init(struct Qdisc *sch, struct nlattr *opt)
2742{
2743 struct nssprio_sched_data *q = qdisc_priv(sch);
2744 int i;
2745
2746 if (opt == NULL)
2747 return -EINVAL;
2748
2749 for (i = 0; i < TCA_NSSPRIO_MAX_BANDS; i++)
2750 q->queues[i] = &noop_qdisc;
2751
2752 q->bands = 0;
2753 if (nssqdisc_init(sch, NSS_SHAPER_NODE_TYPE_PRIO) < 0)
2754 return -EINVAL;
2755
2756 nssqdisc_info("Nssprio initialized - handle %x parent %x\n",
2757 sch->handle, sch->parent);
2758 if (nssprio_change(sch, opt) < 0) {
2759 nssqdisc_destroy(sch);
2760 return -EINVAL;
2761 }
2762
2763 /*
2764 * Start the stats polling timer
2765 */
2766 nssqdisc_start_basic_stats_polling(sch);
2767 return 0;
2768}
2769
2770static int nssprio_dump(struct Qdisc *sch, struct sk_buff *skb)
2771{
2772 struct nssprio_sched_data *q = qdisc_priv(sch);
2773 struct nlattr *opts = NULL;
2774 struct tc_nssprio_qopt qopt;
2775
2776 nssqdisc_info("Nssprio dumping");
2777 qopt.bands = q->bands;
2778
2779 opts = nla_nest_start(skb, TCA_OPTIONS);
2780 if (opts == NULL)
2781 goto nla_put_failure;
2782 NLA_PUT(skb, TCA_NSSPRIO_PARMS, sizeof(qopt), &qopt);
2783 return nla_nest_end(skb, opts);
2784
2785nla_put_failure:
2786 nla_nest_cancel(skb, opts);
2787 return -EMSGSIZE;
2788}
2789
2790static int nssprio_graft(struct Qdisc *sch, unsigned long arg,
2791 struct Qdisc *new, struct Qdisc **old)
2792{
2793 struct nssprio_sched_data *q = qdisc_priv(sch);
2794 struct nssqdisc_qdisc *nq_new = (struct nssqdisc_qdisc *)qdisc_priv(new);
2795 uint32_t band = (uint32_t)(arg - 1);
2796 struct nss_shaper_configure shaper_node_attach, shaper_node_detach;
2797
2798 nssqdisc_info("Grafting band %u, available bands %u\n", band, q->bands);
2799
2800 if (new == NULL)
2801 new = &noop_qdisc;
2802
2803 if (band > q->bands)
2804 return -EINVAL;
2805
2806 sch_tree_lock(sch);
2807 *old = q->queues[band];
2808 q->queues[band] = new;
2809 qdisc_reset(*old);
2810 sch_tree_unlock(sch);
2811
2812 nssqdisc_info("%s:Grafting old: %p with new: %p\n", __func__, *old, new);
2813 if (*old != &noop_qdisc) {
2814 nssqdisc_info("%s:Detaching old: %p\n", __func__, *old);
2815 shaper_node_detach.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
2816 shaper_node_detach.mt.shaper_node_config.snc.prio_detach.priority = band;
2817 if (nssqdisc_node_detach(sch, &shaper_node_detach,
2818 NSS_SHAPER_CONFIG_TYPE_PRIO_DETACH) < 0) {
2819 return -EINVAL;
2820 }
2821 }
2822
2823 if (new != &noop_qdisc) {
2824 nssqdisc_info("%s:Attaching new child with qos tag: %x, priority: %u to "
2825 "qos_tag: %x\n", __func__, nq_new->qos_tag, band, q->nq.qos_tag);
2826 shaper_node_attach.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
2827 shaper_node_attach.mt.shaper_node_config.snc.prio_attach.child_qos_tag = nq_new->qos_tag;
2828 shaper_node_attach.mt.shaper_node_config.snc.prio_attach.priority = band;
2829 if (nssqdisc_node_attach(sch, &shaper_node_attach,
2830 NSS_SHAPER_CONFIG_TYPE_PRIO_ATTACH) < 0) {
2831 return -EINVAL;
2832 }
2833 }
2834 nssqdisc_info("Nssprio grafted");
2835
2836 return 0;
2837}
2838
2839static struct Qdisc *nssprio_leaf(struct Qdisc *sch, unsigned long arg)
2840{
2841 struct nssprio_sched_data *q = qdisc_priv(sch);
2842 uint32_t band = (uint32_t)(arg - 1);
2843
2844 nssqdisc_info("Nssprio returns leaf");
2845
2846 if (band > q->bands)
2847 return NULL;
2848
2849 return q->queues[band];
2850}
2851
2852static unsigned long nssprio_get(struct Qdisc *sch, u32 classid)
2853{
2854 struct nssprio_sched_data *q = qdisc_priv(sch);
2855 unsigned long band = TC_H_MIN(classid);
2856
2857 nssqdisc_info("Inside get. Handle - %x Classid - %x Band %lu Available band %u", sch->handle, classid, band, q->bands);
2858
2859 if (band > q->bands)
2860 return 0;
2861
2862 return band;
2863}
2864
2865static void nssprio_put(struct Qdisc *sch, unsigned long arg)
2866{
2867 nssqdisc_info("Inside prio get\n");
2868}
2869
2870static void nssprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2871{
2872 struct nssprio_sched_data *q = qdisc_priv(sch);
2873 int i;
2874
2875 if (arg->stop)
2876 return;
2877
2878 for (i = 0; i < q->bands; i++) {
2879 if (arg->count < arg->skip) {
2880 arg->count++;
2881 continue;
2882 }
2883 if (arg->fn(sch, i + 1, arg) < 0) {
2884 arg->stop = 1;
2885 break;
2886 }
2887 arg->count++;
2888 }
2889 nssqdisc_info("Nssprio walk called");
2890}
2891
2892static int nssprio_dump_class(struct Qdisc *sch, unsigned long cl,
2893 struct sk_buff *skb, struct tcmsg *tcm)
2894{
2895 struct nssprio_sched_data *q = qdisc_priv(sch);
2896
2897 tcm->tcm_handle |= TC_H_MIN(cl);
2898 tcm->tcm_info = q->queues[cl - 1]->handle;
2899
2900 nssqdisc_info("Nssprio dumping class");
2901 return 0;
2902}
2903
2904static int nssprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2905 struct gnet_dump *d)
2906{
2907 struct nssprio_sched_data *q = qdisc_priv(sch);
2908 struct Qdisc *cl_q;
2909
2910 cl_q = q->queues[cl - 1];
2911 cl_q->qstats.qlen = cl_q->q.qlen;
2912 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
2913 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
2914 return -1;
2915
2916 nssqdisc_info("Nssprio dumping class stats");
2917 return 0;
2918}
2919
2920static const struct Qdisc_class_ops nssprio_class_ops = {
2921 .graft = nssprio_graft,
2922 .leaf = nssprio_leaf,
2923 .get = nssprio_get,
2924 .put = nssprio_put,
2925 .walk = nssprio_walk,
2926 .dump = nssprio_dump_class,
2927 .dump_stats = nssprio_dump_class_stats,
2928};
2929
2930static struct Qdisc_ops nssprio_qdisc_ops __read_mostly = {
2931 .next = NULL,
2932 .id = "nssprio",
2933 .priv_size = sizeof(struct nssprio_sched_data),
2934 .cl_ops = &nssprio_class_ops,
2935 .enqueue = nssprio_enqueue,
2936 .dequeue = nssprio_dequeue,
2937 .peek = nssprio_peek,
2938 .drop = nssprio_drop,
2939 .init = nssprio_init,
2940 .reset = nssprio_reset,
2941 .destroy = nssprio_destroy,
2942 .change = nssprio_change,
2943 .dump = nssprio_dump,
2944 .owner = THIS_MODULE,
2945};
2946
2947/* ================== Module registration ================= */
2948
2949static int __init nssqdisc_module_init(void)
2950{
2951 int ret;
2952 nssqdisc_info("Module initializing");
2953 nssqdisc_ctx = nss_register_shaping();
2954
2955 ret = register_qdisc(&nsspfifo_qdisc_ops);
2956 if (ret != 0)
2957 return ret;
2958 nssqdisc_info("NSS pfifo registered");
2959
2960 ret = register_qdisc(&nssbfifo_qdisc_ops);
2961 if (ret != 0)
2962 return ret;
2963 nssqdisc_info("NSS bfifo registered");
2964
2965 ret = register_qdisc(&nsscodel_qdisc_ops);
2966 if (ret != 0)
2967 return ret;
2968 nssqdisc_info("NSSCodel registered");
2969
2970 ret = register_qdisc(&nsstbl_qdisc_ops);
2971 if (ret != 0)
2972 return ret;
2973 nssqdisc_info("NSSTBL registered");
2974
2975 ret = register_qdisc(&nssprio_qdisc_ops);
2976 if (ret != 0)
2977 return ret;
2978 nssqdisc_info("NSSPRIO registered");
2979
2980 ret = register_netdevice_notifier(&nssqdisc_device_notifier);
2981 if (ret != 0)
2982 return ret;
2983 nssqdisc_info("NSS qdisc device notifiers registered");
2984
2985 return 0;
2986}
2987
2988static void __exit nssqdisc_module_exit(void)
2989{
2990 unregister_qdisc(&nsspfifo_qdisc_ops);
2991 nssqdisc_info("NSSPFIFO Unregistered");
2992 unregister_qdisc(&nssbfifo_qdisc_ops);
2993 nssqdisc_info("NSSBFIFO Unregistered");
2994 unregister_qdisc(&nsscodel_qdisc_ops);
2995 nssqdisc_info("NSSCODEL Unregistered");
2996 unregister_qdisc(&nsstbl_qdisc_ops);
2997 nssqdisc_info("NSSTBL Unregistered");
2998 unregister_qdisc(&nssprio_qdisc_ops);
2999 nssqdisc_info("NSSPRIO Unregistered");
3000 unregister_netdevice_notifier(&nssqdisc_device_notifier);
3001}
3002
3003module_init(nssqdisc_module_init)
3004module_exit(nssqdisc_module_exit)
3005
3006MODULE_LICENSE("GPL");