blob: 1b7756a8746f2478fbc73a0fe09ef225a4415cf8 [file] [log] [blame]
Radhakrishna Jiguru1c9b2252013-08-27 23:57:48 +05301/*
2 **************************************************************************
Sundarajan Srinivasan6e0366b2015-01-20 12:10:42 -08003 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
Radhakrishna Jiguru1c9b2252013-08-27 23:57:48 +05304 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053016
17/*
18 * nss_stats.c
19 * NSS stats APIs
20 *
21 */
22
23#include "nss_core.h"
24
25/*
26 * Maximum string length:
27 * This should be equal to maximum string size of any stats
28 * inclusive of stats value
29 */
30#define NSS_STATS_MAX_STR_LENGTH 96
31
32/*
33 * Global variables/extern declarations
34 */
35extern struct nss_top_instance nss_top_main;
36
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +053037uint64_t stats_shadow_pppoe_except[NSS_PPPOE_NUM_SESSION_PER_INTERFACE][NSS_PPPOE_EXCEPTION_EVENT_MAX];
Abhishek Rastogi84d95d02014-03-26 19:31:31 +053038
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053039/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -070040 * Private data for every file descriptor
41 */
42struct nss_stats_data {
43 uint32_t if_num; /**< Interface number for CAPWAP stats */
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +053044 uint32_t index; /**< Index for GRE_REDIR stats */
Saurabh Misra09dddeb2014-09-30 16:38:07 -070045};
46
47/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053048 * Statistics structures
49 */
50
51/*
52 * nss_stats_str_ipv4
53 * IPv4 stats strings
54 */
55static int8_t *nss_stats_str_ipv4[NSS_STATS_IPV4_MAX] = {
56 "rx_pkts",
57 "rx_bytes",
58 "tx_pkts",
59 "tx_bytes",
60 "create_requests",
61 "create_collisions",
62 "create_invalid_interface",
63 "destroy_requests",
64 "destroy_misses",
65 "hash_hits",
66 "hash_reorders",
67 "flushes",
Selin Dag60ea2b22014-11-05 09:36:22 -080068 "evictions",
69 "fragmentations"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053070};
71
72/*
Selin Dag6d9b0c12014-11-04 18:27:21 -080073 * nss_stats_str_ipv4_reasm
74 * IPv4 reassembly stats strings
75 */
76static int8_t *nss_stats_str_ipv4_reasm[NSS_STATS_IPV4_REASM_MAX] = {
77 "evictions",
78 "alloc_fails",
79 "timeouts",
80};
81
82/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053083 * nss_stats_str_ipv6
84 * IPv6 stats strings
85 */
86static int8_t *nss_stats_str_ipv6[NSS_STATS_IPV6_MAX] = {
87 "rx_pkts",
88 "rx_bytes",
89 "tx_pkts",
90 "tx_bytes",
91 "create_requests",
92 "create_collisions",
93 "create_invalid_interface",
94 "destroy_requests",
95 "destroy_misses",
96 "hash_hits",
97 "hash_reorders",
98 "flushes",
99 "evictions",
100};
101
102/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530103 * nss_stats_str_n2h
104 * N2H stats strings
105 */
106static int8_t *nss_stats_str_n2h[NSS_STATS_N2H_MAX] = {
107 "queue_dropped",
108 "ticks",
109 "worst_ticks",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700110 "iterations",
Thomas Wu3fd8dd72014-06-11 15:57:05 -0700111 "pbuf_ocm_alloc_fails",
112 "pbuf_ocm_free_count",
113 "pbuf_ocm_total_count",
114 "pbuf_default_alloc_fails",
115 "pbuf_default_free_count",
116 "pbuf_default_total_count",
Sakthi Vignesh Radhakrishnan2a8ee962014-11-22 13:35:38 -0800117 "payload_fails",
Thomas Wu53679842015-01-22 13:37:35 -0800118 "payload_free_count",
Sakthi Vignesh Radhakrishnan2a8ee962014-11-22 13:35:38 -0800119 "h2n_control_packets",
120 "h2n_control_bytes",
121 "n2h_control_packets",
122 "n2h_control_bytes",
123 "h2n_data_packets",
124 "h2n_data_bytes",
125 "n2h_data_packets",
126 "n2h_data_bytes",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530127};
128
129/*
Thomas Wuc3e382c2014-10-29 15:35:13 -0700130 * nss_stats_str_lso_rx
131 * LSO_RX stats strings
132 */
133static int8_t *nss_stats_str_lso_rx[NSS_STATS_LSO_RX_MAX] = {
134 "tx_dropped",
135 "dropped",
136 "pbuf_alloc_fail",
137 "pbuf_reference_fail"
138};
139
140/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530141 * nss_stats_str_drv
142 * Host driver stats strings
143 */
144static int8_t *nss_stats_str_drv[NSS_STATS_DRV_MAX] = {
145 "nbuf_alloc_errors",
146 "tx_queue_full[0]",
147 "tx_queue_full[1]",
148 "tx_buffers_empty",
149 "tx_buffers_pkt",
150 "tx_buffers_cmd",
151 "tx_buffers_crypto",
152 "rx_buffers_empty",
153 "rx_buffers_pkt",
154 "rx_buffers_cmd_resp",
155 "rx_buffers_status_sync",
156 "rx_buffers_crypto",
Thomas Wu0acd8162014-12-07 15:43:39 -0800157 "rx_buffers_virtual",
158 "tx_skb_simple",
159 "tx_skb_nr_frags",
160 "tx_skb_fraglist",
161 "rx_skb_simple",
162 "rx_skb_nr_frags",
163 "rx_skb_fraglist",
Sundarajan Srinivasan6e0366b2015-01-20 12:10:42 -0800164 "rx_bad_desciptor",
165 "nss_skb_count"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530166};
167
168/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530169 * nss_stats_str_pppoe
170 * PPPoE stats strings
171 */
172static int8_t *nss_stats_str_pppoe[NSS_STATS_PPPOE_MAX] = {
173 "create_requests",
174 "create_failures",
175 "destroy_requests",
176 "destroy_misses"
177};
178
179/*
180 * nss_stats_str_gmac
181 * GMAC stats strings
182 */
183static int8_t *nss_stats_str_gmac[NSS_STATS_GMAC_MAX] = {
184 "ticks",
185 "worst_ticks",
186 "iterations"
187};
188
189/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530190 * nss_stats_str_node
191 * Interface stats strings per node
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530192 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530193static int8_t *nss_stats_str_node[NSS_STATS_NODE_MAX] = {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530194 "rx_packets",
195 "rx_bytes",
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530196 "rx_dropped",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530197 "tx_packets",
198 "tx_bytes"
199};
200
201/*
Murat Sezgin99dab642014-08-28 14:40:34 -0700202 * nss_stats_str_eth_rx
203 * eth_rx stats strings
204 */
205static int8_t *nss_stats_str_eth_rx[NSS_STATS_ETH_RX_MAX] = {
206 "ticks",
207 "worst_ticks",
208 "iterations"
209};
210
211/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530212 * nss_stats_str_if_exception_unknown
213 * Interface stats strings for unknown exceptions
214 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530215static int8_t *nss_stats_str_if_exception_eth_rx[NSS_EXCEPTION_EVENT_ETH_RX_MAX] = {
216 "UNKNOWN_L3_PROTOCOL"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530217};
218
219/*
220 * nss_stats_str_if_exception_ipv4
221 * Interface stats strings for ipv4 exceptions
222 */
223static int8_t *nss_stats_str_if_exception_ipv4[NSS_EXCEPTION_EVENT_IPV4_MAX] = {
224 "IPV4_ICMP_HEADER_INCOMPLETE",
225 "IPV4_ICMP_UNHANDLED_TYPE",
226 "IPV4_ICMP_IPV4_HEADER_INCOMPLETE",
227 "IPV4_ICMP_IPV4_UDP_HEADER_INCOMPLETE",
228 "IPV4_ICMP_IPV4_TCP_HEADER_INCOMPLETE",
229 "IPV4_ICMP_IPV4_UNKNOWN_PROTOCOL",
230 "IPV4_ICMP_NO_ICME",
231 "IPV4_ICMP_FLUSH_TO_HOST",
232 "IPV4_TCP_HEADER_INCOMPLETE",
233 "IPV4_TCP_NO_ICME",
234 "IPV4_TCP_IP_OPTION",
235 "IPV4_TCP_IP_FRAGMENT",
236 "IPV4_TCP_SMALL_TTL",
237 "IPV4_TCP_NEEDS_FRAGMENTATION",
238 "IPV4_TCP_FLAGS",
239 "IPV4_TCP_SEQ_EXCEEDS_RIGHT_EDGE",
240 "IPV4_TCP_SMALL_DATA_OFFS",
241 "IPV4_TCP_BAD_SACK",
242 "IPV4_TCP_BIG_DATA_OFFS",
243 "IPV4_TCP_SEQ_BEFORE_LEFT_EDGE",
244 "IPV4_TCP_ACK_EXCEEDS_RIGHT_EDGE",
245 "IPV4_TCP_ACK_BEFORE_LEFT_EDGE",
246 "IPV4_UDP_HEADER_INCOMPLETE",
247 "IPV4_UDP_NO_ICME",
248 "IPV4_UDP_IP_OPTION",
249 "IPV4_UDP_IP_FRAGMENT",
250 "IPV4_UDP_SMALL_TTL",
251 "IPV4_UDP_NEEDS_FRAGMENTATION",
252 "IPV4_WRONG_TARGET_MAC",
253 "IPV4_HEADER_INCOMPLETE",
254 "IPV4_BAD_TOTAL_LENGTH",
255 "IPV4_BAD_CHECKSUM",
256 "IPV4_NON_INITIAL_FRAGMENT",
257 "IPV4_DATAGRAM_INCOMPLETE",
258 "IPV4_OPTIONS_INCOMPLETE",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530259 "IPV4_UNKNOWN_PROTOCOL",
260 "IPV4_ESP_HEADER_INCOMPLETE",
261 "IPV4_ESP_NO_ICME",
262 "IPV4_ESP_IP_OPTION",
263 "IPV4_ESP_IP_FRAGMENT",
264 "IPV4_ESP_SMALL_TTL",
265 "IPV4_ESP_NEEDS_FRAGMENTATION",
266 "IPV4_INGRESS_VID_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700267 "IPV4_INGRESS_VID_MISSING",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530268 "IPV4_6RD_NO_ICME",
269 "IPV4_6RD_IP_OPTION",
270 "IPV4_6RD_IP_FRAGMENT",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700271 "IPV4_6RD_NEEDS_FRAGMENTATION",
272 "IPV4_DSCP_MARKING_MISMATCH",
Murat Sezgin7c5956a2014-05-12 09:59:51 -0700273 "IPV4_VLAN_MARKING_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700274 "IPV4_INTERFACE_MISMATCH",
Radha krishna Simha Jiguru00cfe562014-10-21 16:22:12 +0530275 "IPV4_GRE_HEADER_INCOMPLETE",
276 "IPV4_GRE_NO_ICME",
277 "IPV4_GRE_IP_OPTION",
278 "IPV4_GRE_IP_FRAGMENT",
279 "IPV4_GRE_SMALL_TTL",
280 "IPV4_GRE_NEEDS_FRAGMENTATION",
Selin Dag60ea2b22014-11-05 09:36:22 -0800281 "IPV4_FRAG_DF_SET",
282 "IPV4_FRAG_FAIL",
Saurabh Misra5b07cc02015-01-15 14:20:58 -0800283 "IPV4_DESTROY",
284 "IPV4_ICMP_IPV4_UDPLITE_HEADER_INCOMPLETE",
285 "IPV4_UDPLITE_HEADER_INCOMPLETE",
286 "IPV4_UDPLITE_NO_ICME",
287 "IPV4_UDPLITE_IP_OPTION",
288 "IPV4_UDPLITE_IP_FRAGMENT",
289 "IPV4_UDPLITE_SMALL_TTL",
290 "IPV4_UDPLITE_NEEDS_FRAGMENTATION"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530291};
292
293/*
294 * nss_stats_str_if_exception_ipv6
295 * Interface stats strings for ipv6 exceptions
296 */
297static int8_t *nss_stats_str_if_exception_ipv6[NSS_EXCEPTION_EVENT_IPV6_MAX] = {
298 "IPV6_ICMP_HEADER_INCOMPLETE",
299 "IPV6_ICMP_UNHANDLED_TYPE",
300 "IPV6_ICMP_IPV6_HEADER_INCOMPLETE",
301 "IPV6_ICMP_IPV6_UDP_HEADER_INCOMPLETE",
302 "IPV6_ICMP_IPV6_TCP_HEADER_INCOMPLETE",
303 "IPV6_ICMP_IPV6_UNKNOWN_PROTOCOL",
304 "IPV6_ICMP_NO_ICME",
305 "IPV6_ICMP_FLUSH_TO_HOST",
306 "IPV6_TCP_HEADER_INCOMPLETE",
307 "IPV6_TCP_NO_ICME",
308 "IPV6_TCP_SMALL_HOP_LIMIT",
309 "IPV6_TCP_NEEDS_FRAGMENTATION",
310 "IPV6_TCP_FLAGS",
311 "IPV6_TCP_SEQ_EXCEEDS_RIGHT_EDGE",
312 "IPV6_TCP_SMALL_DATA_OFFS",
313 "IPV6_TCP_BAD_SACK",
314 "IPV6_TCP_BIG_DATA_OFFS",
315 "IPV6_TCP_SEQ_BEFORE_LEFT_EDGE",
316 "IPV6_TCP_ACK_EXCEEDS_RIGHT_EDGE",
317 "IPV6_TCP_ACK_BEFORE_LEFT_EDGE",
318 "IPV6_UDP_HEADER_INCOMPLETE",
319 "IPV6_UDP_NO_ICME",
320 "IPV6_UDP_SMALL_HOP_LIMIT",
321 "IPV6_UDP_NEEDS_FRAGMENTATION",
322 "IPV6_WRONG_TARGET_MAC",
323 "IPV6_HEADER_INCOMPLETE",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530324 "IPV6_UNKNOWN_PROTOCOL",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700325 "IPV6_INGRESS_VID_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700326 "IPV6_INGRESS_VID_MISSING",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700327 "IPV6_DSCP_MARKING_MISMATCH",
328 "IPV6_VLAN_MARKING_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700329 "IPV6_INTERFACE_MISMATCH",
Saurabh Misra5b07cc02015-01-15 14:20:58 -0800330 "IPV6_GRE_NO_ICME",
331 "IPV6_GRE_NEEDS_FRAGMENTATION",
332 "IPV6_GRE_SMALL_HOP_LIMIT",
333 "IPV6_DESTROY",
334 "IPV6_ICMP_IPV6_UDPLITE_HEADER_INCOMPLETE",
335 "IPV6_UDPLITE_HEADER_INCOMPLETE",
336 "IPV6_UDPLITE_NO_ICME",
337 "IPV6_UDPLITE_SMALL_HOP_LIMIT",
338 "IPV6_UDPLITE_NEEDS_FRAGMENTATION"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530339};
340
341/*
342 * nss_stats_str_if_exception_pppoe
343 * Interface stats strings for PPPoE exceptions
344 */
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +0530345static int8_t *nss_stats_str_if_exception_pppoe[NSS_PPPOE_EXCEPTION_EVENT_MAX] = {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530346 "PPPOE_WRONG_VERSION_OR_TYPE",
347 "PPPOE_WRONG_CODE",
348 "PPPOE_HEADER_INCOMPLETE",
Murat Sezgin7c5956a2014-05-12 09:59:51 -0700349 "PPPOE_UNSUPPORTED_PPP_PROTOCOL",
350 "PPPOE_INTERFACE_MISMATCH"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530351};
352
353/*
354 * nss_stats_ipv4_read()
355 * Read IPV4 stats
356 */
357static ssize_t nss_stats_ipv4_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
358{
359 int32_t i;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530360 /*
361 * max output lines = #stats + start tag line + end tag line + three blank lines
362 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530363 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV4_MAX + 3) + (NSS_EXCEPTION_EVENT_IPV4_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530364 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
365 size_t size_wr = 0;
366 ssize_t bytes_read = 0;
367 uint64_t *stats_shadow;
368
369 char *lbuf = kzalloc(size_al, GFP_KERNEL);
370 if (unlikely(lbuf == NULL)) {
371 nss_warning("Could not allocate memory for local statistics buffer");
372 return 0;
373 }
374
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530375 /*
376 * Note: The assumption here is that exception event count is larger than other statistics count for IPv4
377 */
378 stats_shadow = kzalloc(NSS_EXCEPTION_EVENT_IPV4_MAX * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530379 if (unlikely(stats_shadow == NULL)) {
380 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530381 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530382 return 0;
383 }
384
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530385 size_wr = scnprintf(lbuf, size_al, "ipv4 stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530386
387 /*
388 * Common node stats
389 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530390 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530391 spin_lock_bh(&nss_top_main.stats_lock);
392 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
393 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV4_RX_INTERFACE][i];
394 }
395
396 spin_unlock_bh(&nss_top_main.stats_lock);
397
398 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
399 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
400 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
401 }
402
403 /*
404 * IPv4 node stats
405 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530406 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530407
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530408 spin_lock_bh(&nss_top_main.stats_lock);
409 for (i = 0; (i < NSS_STATS_IPV4_MAX); i++) {
410 stats_shadow[i] = nss_top_main.stats_ipv4[i];
411 }
412
413 spin_unlock_bh(&nss_top_main.stats_lock);
414
415 for (i = 0; (i < NSS_STATS_IPV4_MAX); i++) {
416 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
417 "%s = %llu\n", nss_stats_str_ipv4[i], stats_shadow[i]);
418 }
419
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530420 /*
421 * Exception stats
422 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530423 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530424
425 spin_lock_bh(&nss_top_main.stats_lock);
426 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
427 stats_shadow[i] = nss_top_main.stats_if_exception_ipv4[i];
428 }
429
430 spin_unlock_bh(&nss_top_main.stats_lock);
431
432 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
433 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
434 "%s = %llu\n", nss_stats_str_if_exception_ipv4[i], stats_shadow[i]);
435 }
436
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530437 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530438 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
439 kfree(lbuf);
440 kfree(stats_shadow);
441
442 return bytes_read;
443}
444
445/*
Selin Dag6d9b0c12014-11-04 18:27:21 -0800446 * nss_stats_ipv4_reasm_read()
447 * Read IPV4 reassembly stats
448 */
449static ssize_t nss_stats_ipv4_reasm_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
450{
451 int32_t i;
452 /*
453 * max output lines = #stats + start tag line + end tag line + three blank lines
454 */
455 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV4_REASM_MAX + 3) + 5;
456 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
457 size_t size_wr = 0;
458 ssize_t bytes_read = 0;
459 uint64_t *stats_shadow;
460
461 char *lbuf = kzalloc(size_al, GFP_KERNEL);
462 if (unlikely(lbuf == NULL)) {
463 nss_warning("Could not allocate memory for local statistics buffer");
464 return 0;
465 }
466
467 stats_shadow = kzalloc(NSS_STATS_IPV4_REASM_MAX * 8, GFP_KERNEL);
468 if (unlikely(stats_shadow == NULL)) {
469 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530470 kfree(lbuf);
Selin Dag6d9b0c12014-11-04 18:27:21 -0800471 return 0;
472 }
473
474 size_wr = scnprintf(lbuf, size_al, "ipv4 reasm stats start:\n\n");
475
476 /*
477 * Common node stats
478 */
479 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
480 spin_lock_bh(&nss_top_main.stats_lock);
481 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
482 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV4_REASM_INTERFACE][i];
483 }
484
485 spin_unlock_bh(&nss_top_main.stats_lock);
486
487 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
488 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
489 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
490 }
491
492 /*
493 * IPv4 reasm node stats
494 */
495 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 reasm node stats:\n\n");
496
497 spin_lock_bh(&nss_top_main.stats_lock);
498 for (i = 0; (i < NSS_STATS_IPV4_REASM_MAX); i++) {
499 stats_shadow[i] = nss_top_main.stats_ipv4_reasm[i];
500 }
501
502 spin_unlock_bh(&nss_top_main.stats_lock);
503
504 for (i = 0; (i < NSS_STATS_IPV4_REASM_MAX); i++) {
505 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
506 "%s = %llu\n", nss_stats_str_ipv4_reasm[i], stats_shadow[i]);
507 }
508
509 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 reasm stats end\n\n");
510 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
511 kfree(lbuf);
512 kfree(stats_shadow);
513
514 return bytes_read;
515}
516
517/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530518 * nss_stats_ipv6_read()
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530519 * Read IPV6 stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530520 */
521static ssize_t nss_stats_ipv6_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
522{
523 int32_t i;
524
525 /*
526 * max output lines = #stats + start tag line + end tag line + three blank lines
527 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530528 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV6_MAX + 3) + (NSS_EXCEPTION_EVENT_IPV6_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530529 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
530 size_t size_wr = 0;
531 ssize_t bytes_read = 0;
532 uint64_t *stats_shadow;
533
534 char *lbuf = kzalloc(size_al, GFP_KERNEL);
535 if (unlikely(lbuf == NULL)) {
536 nss_warning("Could not allocate memory for local statistics buffer");
537 return 0;
538 }
539
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530540 /*
541 * Note: The assumption here is that exception event count is larger than other statistics count for IPv4
542 */
543 stats_shadow = kzalloc(NSS_EXCEPTION_EVENT_IPV6_MAX * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530544 if (unlikely(stats_shadow == NULL)) {
545 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530546 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530547 return 0;
548 }
549
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530550 size_wr = scnprintf(lbuf, size_al, "ipv6 stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530551
552 /*
553 * Common node stats
554 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530555 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530556 spin_lock_bh(&nss_top_main.stats_lock);
557 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
558 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV6_RX_INTERFACE][i];
559 }
560
561 spin_unlock_bh(&nss_top_main.stats_lock);
562
563 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
564 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
565 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
566 }
567
568 /*
569 * IPv6 node stats
570 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530571 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530572
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530573 spin_lock_bh(&nss_top_main.stats_lock);
574 for (i = 0; (i < NSS_STATS_IPV6_MAX); i++) {
575 stats_shadow[i] = nss_top_main.stats_ipv6[i];
576 }
577
578 spin_unlock_bh(&nss_top_main.stats_lock);
579
580 for (i = 0; (i < NSS_STATS_IPV6_MAX); i++) {
581 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
582 "%s = %llu\n", nss_stats_str_ipv6[i], stats_shadow[i]);
583 }
584
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530585 /*
586 * Exception stats
587 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530588 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530589
590 spin_lock_bh(&nss_top_main.stats_lock);
591 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
592 stats_shadow[i] = nss_top_main.stats_if_exception_ipv6[i];
593 }
594
595 spin_unlock_bh(&nss_top_main.stats_lock);
596
597 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
598 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
599 "%s = %llu\n", nss_stats_str_if_exception_ipv6[i], stats_shadow[i]);
600 }
601
602 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\nipv6 stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530603 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
604 kfree(lbuf);
605 kfree(stats_shadow);
606
607 return bytes_read;
608}
609
610/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530611 * nss_stats_eth_rx_read()
612 * Read ETH_RX stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530613 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530614static ssize_t nss_stats_eth_rx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530615{
616 int32_t i;
617
618 /*
619 * max output lines = #stats + start tag line + end tag line + three blank lines
620 */
Murat Sezgin99dab642014-08-28 14:40:34 -0700621 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_ETH_RX_MAX + 3) + (NSS_EXCEPTION_EVENT_ETH_RX_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530622 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
623 size_t size_wr = 0;
624 ssize_t bytes_read = 0;
625 uint64_t *stats_shadow;
626
627 char *lbuf = kzalloc(size_al, GFP_KERNEL);
628 if (unlikely(lbuf == NULL)) {
629 nss_warning("Could not allocate memory for local statistics buffer");
630 return 0;
631 }
632
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530633 /*
634 * Note: The assumption here is that we do not have more than 64 stats
635 */
636 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530637 if (unlikely(stats_shadow == NULL)) {
638 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530639 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530640 return 0;
641 }
642
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530643 size_wr = scnprintf(lbuf, size_al,"eth_rx stats start:\n\n");
644
645 /*
646 * Common node stats
647 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530648 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530649 spin_lock_bh(&nss_top_main.stats_lock);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530650 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
651 stats_shadow[i] = nss_top_main.stats_node[NSS_ETH_RX_INTERFACE][i];
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530652 }
653
654 spin_unlock_bh(&nss_top_main.stats_lock);
655
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530656 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530657 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530658 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530659 }
660
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530661 /*
Murat Sezgin99dab642014-08-28 14:40:34 -0700662 * eth_rx node stats
663 */
664 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\neth_rx node stats:\n\n");
665 spin_lock_bh(&nss_top_main.stats_lock);
666 for (i = 0; (i < NSS_STATS_ETH_RX_MAX); i++) {
667 stats_shadow[i] = nss_top_main.stats_eth_rx[i];
668 }
669
670 spin_unlock_bh(&nss_top_main.stats_lock);
671
672 for (i = 0; (i < NSS_STATS_ETH_RX_MAX); i++) {
673 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
674 "%s = %llu\n", nss_stats_str_eth_rx[i], stats_shadow[i]);
675 }
676
677 /*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530678 * Exception stats
679 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530680 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\neth_rx exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530681
682 spin_lock_bh(&nss_top_main.stats_lock);
683 for (i = 0; (i < NSS_EXCEPTION_EVENT_ETH_RX_MAX); i++) {
684 stats_shadow[i] = nss_top_main.stats_if_exception_eth_rx[i];
685 }
686
687 spin_unlock_bh(&nss_top_main.stats_lock);
688
689 for (i = 0; (i < NSS_EXCEPTION_EVENT_ETH_RX_MAX); i++) {
690 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
691 "%s = %llu\n", nss_stats_str_if_exception_eth_rx[i], stats_shadow[i]);
692 }
693
694 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\neth_rx stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530695 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
696 kfree(lbuf);
697 kfree(stats_shadow);
698
699 return bytes_read;
700}
701
702/*
703 * nss_stats_n2h_read()
704 * Read N2H stats
705 */
706static ssize_t nss_stats_n2h_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
707{
708 int32_t i;
709
710 /*
711 * max output lines = #stats + start tag line + end tag line + three blank lines
712 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530713 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_N2H_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530714 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
715 size_t size_wr = 0;
716 ssize_t bytes_read = 0;
717 uint64_t *stats_shadow;
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700718 int max = NSS_STATS_N2H_MAX - NSS_STATS_NODE_MAX;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530719
720 char *lbuf = kzalloc(size_al, GFP_KERNEL);
721 if (unlikely(lbuf == NULL)) {
722 nss_warning("Could not allocate memory for local statistics buffer");
723 return 0;
724 }
725
726 stats_shadow = kzalloc(NSS_STATS_N2H_MAX * 8, GFP_KERNEL);
727 if (unlikely(stats_shadow == NULL)) {
728 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530729 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530730 return 0;
731 }
732
733 size_wr = scnprintf(lbuf, size_al, "n2h stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530734
735 /*
736 * Common node stats
737 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530738 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530739 spin_lock_bh(&nss_top_main.stats_lock);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530740 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
741 stats_shadow[i] = nss_top_main.nss[0].stats_n2h[i];
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530742 }
743
744 spin_unlock_bh(&nss_top_main.stats_lock);
745
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530746 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
747 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
748 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
749 }
750
751 /*
752 * N2H node stats
753 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530754 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nn2h node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530755 spin_lock_bh(&nss_top_main.stats_lock);
756 for (i = NSS_STATS_NODE_MAX; (i < NSS_STATS_N2H_MAX); i++) {
757 stats_shadow[i] = nss_top_main.nss[0].stats_n2h[i];
758 }
759
760 spin_unlock_bh(&nss_top_main.stats_lock);
761
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700762 for (i = 0; i < max; i++) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530763 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700764 "%s = %llu\n", nss_stats_str_n2h[i], stats_shadow[i + NSS_STATS_NODE_MAX]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530765 }
766
767 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nn2h stats end\n\n");
768 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
769 kfree(lbuf);
770 kfree(stats_shadow);
771
772 return bytes_read;
773}
774
775/*
Thomas Wuc3e382c2014-10-29 15:35:13 -0700776 * nss_stats_lso_rx_read()
777 * Read LSO_RX stats
778 */
779static ssize_t nss_stats_lso_rx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
780{
781 int32_t i;
782
783 /*
784 * max output lines = #stats + start tag line + end tag line + three blank lines
785 */
786 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_LSO_RX_MAX + 3) + 5;
787 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
788 size_t size_wr = 0;
789 ssize_t bytes_read = 0;
790 uint64_t *stats_shadow;
791
792 char *lbuf = kzalloc(size_al, GFP_KERNEL);
793 if (unlikely(lbuf == NULL)) {
794 nss_warning("Could not allocate memory for local statistics buffer");
795 return 0;
796 }
797
798 stats_shadow = kzalloc(NSS_STATS_LSO_RX_MAX * 8, GFP_KERNEL);
799 if (unlikely(stats_shadow == NULL)) {
800 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530801 kfree(lbuf);
Thomas Wuc3e382c2014-10-29 15:35:13 -0700802 return 0;
803 }
804
805 size_wr = scnprintf(lbuf, size_al, "lso_rx stats start:\n\n");
806
807 /*
808 * Common node stats
809 */
810 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
811 spin_lock_bh(&nss_top_main.stats_lock);
812 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
813 stats_shadow[i] = nss_top_main.stats_node[NSS_LSO_RX_INTERFACE][i];
814 }
815
816 spin_unlock_bh(&nss_top_main.stats_lock);
817
818 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
819 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
820 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
821 }
822
823 /*
824 * lso_rx node stats
825 */
826 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nlso_rx node stats:\n\n");
827 spin_lock_bh(&nss_top_main.stats_lock);
828 for (i = 0; (i < NSS_STATS_LSO_RX_MAX); i++) {
829 stats_shadow[i] = nss_top_main.stats_lso_rx[i];
830 }
831
832 spin_unlock_bh(&nss_top_main.stats_lock);
833
834 for (i = 0; i < NSS_STATS_LSO_RX_MAX; i++) {
835 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
836 "%s = %llu\n", nss_stats_str_lso_rx[i], stats_shadow[i]);
837 }
838
839 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nlso_rx stats end\n\n");
840 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
841 kfree(lbuf);
842 kfree(stats_shadow);
843
844 return bytes_read;
845}
846
847/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530848 * nss_stats_drv_read()
849 * Read HLOS driver stats
850 */
851static ssize_t nss_stats_drv_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
852{
853 int32_t i;
854
855 /*
856 * max output lines = #stats + start tag line + end tag line + three blank lines
857 */
858 uint32_t max_output_lines = NSS_STATS_DRV_MAX + 5;
859 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
860 size_t size_wr = 0;
861 ssize_t bytes_read = 0;
862 uint64_t *stats_shadow;
863
864 char *lbuf = kzalloc(size_al, GFP_KERNEL);
865 if (unlikely(lbuf == NULL)) {
866 nss_warning("Could not allocate memory for local statistics buffer");
867 return 0;
868 }
869
870 stats_shadow = kzalloc(NSS_STATS_DRV_MAX * 8, GFP_KERNEL);
871 if (unlikely(stats_shadow == NULL)) {
872 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530873 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530874 return 0;
875 }
876
877 size_wr = scnprintf(lbuf, size_al, "drv stats start:\n\n");
878 spin_lock_bh(&nss_top_main.stats_lock);
879 for (i = 0; (i < NSS_STATS_DRV_MAX); i++) {
880 stats_shadow[i] = nss_top_main.stats_drv[i];
881 }
882
883 spin_unlock_bh(&nss_top_main.stats_lock);
884
885 for (i = 0; (i < NSS_STATS_DRV_MAX); i++) {
886 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
887 "%s = %llu\n", nss_stats_str_drv[i], stats_shadow[i]);
888 }
889
890 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ndrv stats end\n\n");
891 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
892 kfree(lbuf);
893 kfree(stats_shadow);
894
895 return bytes_read;
896}
897
898/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530899 * nss_stats_pppoe_read()
900 * Read PPPoE stats
901 */
902static ssize_t nss_stats_pppoe_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
903{
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530904 int32_t i, j, k;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530905
906 /*
907 * max output lines = #stats + start tag line + end tag line + three blank lines
908 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530909 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_PPPOE_MAX + 3) +
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +0530910 ((NSS_MAX_PHYSICAL_INTERFACES * NSS_PPPOE_NUM_SESSION_PER_INTERFACE * (NSS_PPPOE_EXCEPTION_EVENT_MAX + 5)) + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530911 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
912 size_t size_wr = 0;
913 ssize_t bytes_read = 0;
914 uint64_t *stats_shadow;
915
916 char *lbuf = kzalloc(size_al, GFP_KERNEL);
917 if (unlikely(lbuf == NULL)) {
918 nss_warning("Could not allocate memory for local statistics buffer");
919 return 0;
920 }
921
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530922 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530923 if (unlikely(stats_shadow == NULL)) {
924 nss_warning("Could not allocate memory for local shadow buffer");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530925 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530926 return 0;
927 }
928
929 size_wr = scnprintf(lbuf, size_al, "pppoe stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530930
931 /*
932 * Common node stats
933 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530934 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530935 spin_lock_bh(&nss_top_main.stats_lock);
936 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
937 stats_shadow[i] = nss_top_main.stats_node[NSS_PPPOE_RX_INTERFACE][i];
938 }
939
940 spin_unlock_bh(&nss_top_main.stats_lock);
941
942 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
943 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
944 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
945 }
946
947 /*
948 * PPPoE node stats
949 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530950 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "pppoe node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530951 spin_lock_bh(&nss_top_main.stats_lock);
952 for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
953 stats_shadow[i] = nss_top_main.stats_pppoe[i];
954 }
955
956 spin_unlock_bh(&nss_top_main.stats_lock);
957
958 for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
959 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
960 "%s = %llu\n", nss_stats_str_pppoe[i], stats_shadow[i]);
961 }
962
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530963 /*
964 * Exception stats
965 */
966 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nException PPPoE:\n\n");
967
968 for (j = 0; j < NSS_MAX_PHYSICAL_INTERFACES; j++) {
969 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nInterface %d:\n\n", j);
970
971 spin_lock_bh(&nss_top_main.stats_lock);
972 for (k = 0; k < NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +0530973 for (i = 0; (i < NSS_PPPOE_EXCEPTION_EVENT_MAX); i++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530974 stats_shadow_pppoe_except[k][i] = nss_top_main.stats_if_exception_pppoe[j][k][i];
975 }
976 }
977
978 spin_unlock_bh(&nss_top_main.stats_lock);
979
980 for (k = 0; k < NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
981 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. Session\n", k);
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +0530982 for (i = 0; (i < NSS_PPPOE_EXCEPTION_EVENT_MAX); i++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530983 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
984 "%s = %llu\n",
985 nss_stats_str_if_exception_pppoe[i],
986 stats_shadow_pppoe_except[k][i]);
987 }
988 }
989
990 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npppoe stats end\n\n");
991 }
992
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530993 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
994 kfree(lbuf);
995 kfree(stats_shadow);
996
997 return bytes_read;
998}
999
1000/*
1001 * nss_stats_gmac_read()
1002 * Read GMAC stats
1003 */
1004static ssize_t nss_stats_gmac_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1005{
1006 uint32_t i, id;
1007
1008 /*
1009 * max output lines = ((#stats + start tag + one blank) * #GMACs) + start/end tag + 3 blank
1010 */
1011 uint32_t max_output_lines = ((NSS_STATS_GMAC_MAX + 2) * NSS_MAX_PHYSICAL_INTERFACES) + 5;
1012 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1013 size_t size_wr = 0;
1014 ssize_t bytes_read = 0;
1015 uint64_t *stats_shadow;
1016
1017 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1018 if (unlikely(lbuf == NULL)) {
1019 nss_warning("Could not allocate memory for local statistics buffer");
1020 return 0;
1021 }
1022
1023 stats_shadow = kzalloc(NSS_STATS_GMAC_MAX * 8, GFP_KERNEL);
1024 if (unlikely(stats_shadow == NULL)) {
1025 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301026 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301027 return 0;
1028 }
1029
1030 size_wr = scnprintf(lbuf, size_al, "gmac stats start:\n\n");
1031
1032 for (id = 0; id < NSS_MAX_PHYSICAL_INTERFACES; id++) {
1033 spin_lock_bh(&nss_top_main.stats_lock);
1034 for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
1035 stats_shadow[i] = nss_top_main.stats_gmac[id][i];
1036 }
1037
1038 spin_unlock_bh(&nss_top_main.stats_lock);
1039
1040 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "GMAC ID: %d\n", id);
1041 for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
1042 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1043 "%s = %llu\n", nss_stats_str_gmac[i], stats_shadow[i]);
1044 }
1045 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\n");
1046 }
1047
1048 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngmac stats end\n\n");
1049 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1050 kfree(lbuf);
1051 kfree(stats_shadow);
1052
1053 return bytes_read;
1054}
1055
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001056/*
Ankit Dhanuka14999992014-11-12 15:35:11 +05301057 * nss_stats_sjack_read()
1058 * Read SJACK stats
1059 */
1060static ssize_t nss_stats_sjack_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1061{
1062 int32_t i;
1063 /*
1064 * max output lines = #stats + start tag line + end tag line + three blank lines
1065 */
1066 uint32_t max_output_lines = NSS_STATS_NODE_MAX + 5;
1067 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1068 size_t size_wr = 0;
1069 ssize_t bytes_read = 0;
1070 uint64_t *stats_shadow;
1071
1072 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1073 if (unlikely(lbuf == NULL)) {
1074 nss_warning("Could not allocate memory for local statistics buffer");
1075 return 0;
1076 }
1077
1078 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
1079 if (unlikely(stats_shadow == NULL)) {
1080 nss_warning("Could not allocate memory for local shadow buffer");
1081 kfree(lbuf);
1082 return 0;
1083 }
1084
1085 size_wr = scnprintf(lbuf, size_al, "sjack stats start:\n\n");
1086
1087 /*
1088 * Common node stats
1089 */
1090 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
1091 spin_lock_bh(&nss_top_main.stats_lock);
1092 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1093 stats_shadow[i] = nss_top_main.stats_node[NSS_SJACK_INTERFACE][i];
1094 }
1095
1096 spin_unlock_bh(&nss_top_main.stats_lock);
1097
1098 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1099 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1100 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1101 }
1102
1103 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nsjack stats end\n\n");
1104
1105 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1106 kfree(lbuf);
1107 kfree(stats_shadow);
1108
1109 return bytes_read;
1110}
1111
1112/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001113 * Make a row for CAPWAP encap stats.
1114 */
1115static ssize_t nss_stats_capwap_encap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s)
1116{
1117 char *header[] = { "TX Packets", "TX Bytes", "TX Drops", "Fragments", "QFull", "MemFail", "Unknown" };
1118 uint64_t tcnt = 0;
1119
1120 switch (i) {
1121 case 0:
1122 tcnt = s->pnode_stats.tx_packets;
1123 break;
1124 case 1:
1125 tcnt = s->pnode_stats.tx_bytes;
1126 break;
1127 case 2:
1128 tcnt = s->tx_dropped;
1129 break;
1130 case 3:
1131 tcnt = s->tx_segments;
1132 break;
1133 case 4:
1134 tcnt = s->tx_queue_full_drops;
1135 break;
1136 case 5:
1137 tcnt = s->tx_mem_failure_drops;
1138 break;
1139 default:
1140 i = 6;
1141 break;
1142 }
1143
1144 return (snprintf(line, len, "%14s %llu\n", header[i], tcnt));
1145}
1146
1147/*
1148 * Make a row for CAPWAP decap stats.
1149 */
1150static ssize_t nss_stats_capwap_decap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s)
1151{
1152 char *header[] = { "RX Packets", "RX Bytes", "RX Dropped", "DTLS pkts", "Fragments", "OSzDrop", "FTimeout", "FDup", "QFull", "MemFail", "Unknown" };
1153 uint64_t tcnt = 0;
1154
1155 switch(i) {
1156 case 0:
1157 tcnt = s->pnode_stats.rx_packets;
1158 break;
1159 case 1:
1160 tcnt = s->pnode_stats.rx_bytes;
1161 break;
1162 case 2:
1163 tcnt = s->pnode_stats.rx_dropped;
1164 break;
1165 case 3:
1166 tcnt = s->dtls_pkts;
1167 break;
1168 case 4:
1169 tcnt = s->rx_segments;
1170 break;
1171 case 5:
1172 tcnt = s->oversize_drops;
1173 break;
1174 case 6:
1175 tcnt = s->frag_timeout_drops;
1176 break;
1177 case 7:
1178 tcnt = s->rx_dup_frag;
1179 break;
1180 case 8:
1181 tcnt = s->rx_queue_full_drops;
1182 return (snprintf(line, len, "%14s: %llu (n2h: %llu)\n", header[i], tcnt, s->rx_n2h_queue_full_drops));
1183 case 9:
1184 tcnt = s->rx_mem_failure_drops;
1185 break;
1186 default:
1187 i = 10;
1188 break;
1189 }
1190
1191 return (snprintf(line, len, "%14s: %llu\n", header[i], tcnt));
1192}
1193
1194/*
1195 * nss_stats_capwap_read()
1196 * Read CAPWAP stats
1197 */
1198static ssize_t nss_stats_capwap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos, uint16_t type)
1199{
1200 struct nss_stats_data *data = fp->private_data;
1201 ssize_t bytes_read = 0;
1202 struct nss_capwap_tunnel_stats stats;
1203 size_t bytes;
1204 char line[80];
1205 int start, end;
1206 uint32_t if_num = NSS_DYNAMIC_IF_START;
1207 uint32_t max_if_num = NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES;
1208
1209 if (data) {
1210 if_num = data->if_num;
1211 }
1212
1213 /*
1214 * If we are done accomodating all the CAPWAP tunnels.
1215 */
1216 if (if_num > max_if_num) {
1217 return 0;
1218 }
1219
1220 for (; if_num <= max_if_num; if_num++) {
1221 bool isthere;
1222
1223 if (nss_is_dynamic_interface(if_num) == false) {
1224 continue;
1225 }
1226
1227 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP) {
1228 continue;
1229 }
1230
1231 /*
1232 * If CAPWAP tunnel does not exists, then isthere will be false.
1233 */
1234 isthere = nss_capwap_get_stats(if_num, &stats);
1235 if (!isthere) {
1236 continue;
1237 }
1238
1239 bytes = snprintf(line, sizeof(line), "(%2d) %9s %s\n", if_num, "Stats", "Total");
1240 if ((bytes_read + bytes) > sz) {
1241 break;
1242 }
1243
1244 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
1245 bytes_read = -EFAULT;
1246 goto fail;
1247 }
1248 bytes_read += bytes;
1249 start = 0;
1250 if (type == 1) {
1251 end = 5; /* encap */
1252 } else {
1253 end = 9; /* decap */
1254 }
1255 while (bytes_read < sz && start < end) {
1256 if (type == 1) {
1257 bytes = nss_stats_capwap_encap(line, sizeof(line), start, &stats);
1258 } else {
1259 bytes = nss_stats_capwap_decap(line, sizeof(line), start, &stats);
1260 }
1261
1262 if ((bytes_read + bytes) > sz)
1263 break;
1264
1265 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
1266 bytes_read = -EFAULT;
1267 goto fail;
1268 }
1269
1270 bytes_read += bytes;
1271 start++;
1272 }
1273 }
1274
1275 if (bytes_read > 0) {
1276 *ppos = bytes_read;
1277 }
1278
1279 if (data) {
1280 data->if_num = if_num;
1281 }
1282fail:
1283 return bytes_read;
1284}
1285
1286/*
1287 * nss_stats_capwap_decap_read()
1288 * Read CAPWAP decap stats
1289 */
1290static ssize_t nss_stats_capwap_decap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1291{
1292 return (nss_stats_capwap_read(fp, ubuf, sz, ppos, 0));
1293}
1294
1295/*
1296 * nss_stats_capwap_encap_read()
1297 * Read CAPWAP encap stats
1298 */
1299static ssize_t nss_stats_capwap_encap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1300{
1301 return (nss_stats_capwap_read(fp, ubuf, sz, ppos, 1));
1302}
1303
1304/*
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05301305 * nss_stats_gre_redir()
1306 * Make a row for GRE_REDIR stats.
1307 */
1308static ssize_t nss_stats_gre_redir(char *line, int len, int i, struct nss_gre_redir_tunnel_stats *s)
1309{
1310 char *header[] = { "TX Packets", "TX Bytes", "TX Drops", "RX Packets", "RX Bytes", "Rx Drops" };
1311 uint64_t tcnt = 0;
1312
1313 switch (i) {
1314 case 0:
1315 tcnt = s->node_stats.tx_packets;
1316 break;
1317 case 1:
1318 tcnt = s->node_stats.tx_bytes;
1319 break;
1320 case 2:
1321 tcnt = s->tx_dropped;
1322 break;
1323 case 3:
1324 tcnt = s->node_stats.rx_packets;
1325 break;
1326 case 4:
1327 tcnt = s->node_stats.rx_bytes;
1328 break;
1329 case 5:
1330 tcnt = s->node_stats.rx_dropped;
1331 break;
1332 default:
1333 i = 6;
1334 break;
1335 }
1336
1337 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
1338}
1339
1340/*
1341 * nss_stats_gre_redir_read()
1342 * READ gre_redir tunnel stats.
1343 */
1344static ssize_t nss_stats_gre_redir_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1345{
1346 struct nss_stats_data *data = fp->private_data;
1347 ssize_t bytes_read = 0;
1348 struct nss_gre_redir_tunnel_stats stats;
1349 size_t bytes;
1350 char line[80];
1351 int start, end;
1352 int index = 0;
1353
1354 if (data) {
1355 index = data->index;
1356 }
1357
1358 /*
1359 * If we are done accomodating all the GRE_REDIR tunnels.
1360 */
1361 if (index >= NSS_GRE_REDIR_MAX_INTERFACES) {
1362 return 0;
1363 }
1364
1365 for (; index < NSS_GRE_REDIR_MAX_INTERFACES; index++) {
1366 bool isthere;
1367
1368 /*
1369 * If gre_redir tunnel does not exists, then isthere will be false.
1370 */
1371 isthere = nss_gre_redir_get_stats(index, &stats);
1372 if (!isthere) {
1373 continue;
1374 }
1375
1376 bytes = snprintf(line, sizeof(line), "\nTunnel if_num: %2d\n", stats.if_num);
1377 if ((bytes_read + bytes) > sz) {
1378 break;
1379 }
1380
1381 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
1382 bytes_read = -EFAULT;
1383 goto fail;
1384 }
1385 bytes_read += bytes;
1386 start = 0;
1387 end = 6;
1388 while (bytes_read < sz && start < end) {
1389 bytes = nss_stats_gre_redir(line, sizeof(line), start, &stats);
1390
1391 if ((bytes_read + bytes) > sz)
1392 break;
1393
1394 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
1395 bytes_read = -EFAULT;
1396 goto fail;
1397 }
1398
1399 bytes_read += bytes;
1400 start++;
1401 }
1402 }
1403
1404 if (bytes_read > 0) {
1405 *ppos = bytes_read;
1406 }
1407
1408 if (data) {
1409 data->index = index;
1410 }
1411
1412fail:
1413 return bytes_read;
1414}
1415
1416/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001417 * nss_stats_open()
1418 */
1419static int nss_stats_open(struct inode *inode, struct file *filp)
1420{
1421 struct nss_stats_data *data = NULL;
1422
1423 data = kzalloc(sizeof(struct nss_stats_data), GFP_KERNEL);
1424 if (!data) {
1425 return -ENOMEM;
1426 }
1427 memset(data, 0, sizeof (struct nss_stats_data));
1428 data->if_num = NSS_DYNAMIC_IF_START;
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05301429 data->index = 0;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001430 filp->private_data = data;
1431
1432 return 0;
1433}
1434
1435/*
1436 * nss_stats_release()
1437 */
1438static int nss_stats_release(struct inode *inode, struct file *filp)
1439{
1440 struct nss_stats_data *data = filp->private_data;
1441
1442 if (data) {
1443 kfree(data);
1444 }
1445
1446 return 0;
1447}
1448
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301449#define NSS_STATS_DECLARE_FILE_OPERATIONS(name) \
1450static const struct file_operations nss_stats_##name##_ops = { \
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001451 .open = nss_stats_open, \
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301452 .read = nss_stats_##name##_read, \
1453 .llseek = generic_file_llseek, \
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001454 .release = nss_stats_release, \
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301455};
1456
1457/*
1458 * nss_ipv4_stats_ops
1459 */
1460NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4)
1461
1462/*
Selin Dag6d9b0c12014-11-04 18:27:21 -08001463 * ipv4_reasm_stats_ops
1464 */
1465NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4_reasm)
1466
1467/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301468 * ipv6_stats_ops
1469 */
1470NSS_STATS_DECLARE_FILE_OPERATIONS(ipv6)
1471
1472/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301473 * n2h_stats_ops
1474 */
1475NSS_STATS_DECLARE_FILE_OPERATIONS(n2h)
Thomas Wuc3e382c2014-10-29 15:35:13 -07001476
1477/*
1478 * lso_rx_stats_ops
1479 */
1480NSS_STATS_DECLARE_FILE_OPERATIONS(lso_rx)
1481
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301482/*
1483 * drv_stats_ops
1484 */
1485NSS_STATS_DECLARE_FILE_OPERATIONS(drv)
1486
1487/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301488 * pppoe_stats_ops
1489 */
1490NSS_STATS_DECLARE_FILE_OPERATIONS(pppoe)
1491
1492/*
1493 * gmac_stats_ops
1494 */
1495NSS_STATS_DECLARE_FILE_OPERATIONS(gmac)
1496
1497/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001498 * capwap_stats_ops
1499 */
1500NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_encap)
1501NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_decap)
1502
1503/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301504 * eth_rx_stats_ops
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301505 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301506NSS_STATS_DECLARE_FILE_OPERATIONS(eth_rx)
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301507
1508/*
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05301509 * gre_redir_ops
1510 */
1511NSS_STATS_DECLARE_FILE_OPERATIONS(gre_redir)
1512
1513/*
Ankit Dhanuka14999992014-11-12 15:35:11 +05301514 * sjack_stats_ops
1515 */
1516NSS_STATS_DECLARE_FILE_OPERATIONS(sjack)
1517
1518/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301519 * nss_stats_init()
1520 * Enable NSS statistics
1521 */
1522void nss_stats_init(void)
1523{
1524 /*
1525 * NSS driver entry
1526 */
1527 nss_top_main.top_dentry = debugfs_create_dir("qca-nss-drv", NULL);
1528 if (unlikely(nss_top_main.top_dentry == NULL)) {
1529 nss_warning("Failed to create qca-nss-drv directory in debugfs");
1530
1531 /*
1532 * Non availability of debugfs directory is not a catastrophy
1533 * We can still go ahead with other initialization
1534 */
1535 return;
1536 }
1537
1538 nss_top_main.stats_dentry = debugfs_create_dir("stats", nss_top_main.top_dentry);
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05301539 if (unlikely(nss_top_main.stats_dentry == NULL)) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301540 nss_warning("Failed to create qca-nss-drv directory in debugfs");
1541
1542 /*
1543 * Non availability of debugfs directory is not a catastrophy
1544 * We can still go ahead with rest of initialization
1545 */
1546 return;
1547 }
1548
1549 /*
1550 * Create files to obtain statistics
1551 */
1552
1553 /*
1554 * ipv4_stats
1555 */
1556 nss_top_main.ipv4_dentry = debugfs_create_file("ipv4", 0400,
1557 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv4_ops);
1558 if (unlikely(nss_top_main.ipv4_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05301559 nss_warning("Failed to create qca-nss-drv/stats/ipv4 file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301560 return;
1561 }
1562
1563 /*
Selin Dag6d9b0c12014-11-04 18:27:21 -08001564 * ipv4_reasm_stats
1565 */
1566 nss_top_main.ipv4_reasm_dentry = debugfs_create_file("ipv4_reasm", 0400,
1567 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv4_reasm_ops);
1568 if (unlikely(nss_top_main.ipv4_reasm_dentry == NULL)) {
1569 nss_warning("Failed to create qca-nss-drv/stats/ipv4_reasm file in debugfs");
1570 return;
1571 }
1572
1573 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301574 * ipv6_stats
1575 */
1576 nss_top_main.ipv6_dentry = debugfs_create_file("ipv6", 0400,
1577 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv6_ops);
1578 if (unlikely(nss_top_main.ipv6_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05301579 nss_warning("Failed to create qca-nss-drv/stats/ipv6 file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301580 return;
1581 }
1582
1583 /*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301584 * ipv6_stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301585 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301586 nss_top_main.eth_rx_dentry = debugfs_create_file("eth_rx", 0400,
1587 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_eth_rx_ops);
1588 if (unlikely(nss_top_main.eth_rx_dentry == NULL)) {
1589 nss_warning("Failed to create qca-nss-drv/stats/eth_rx file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301590 return;
1591 }
1592
1593 /*
1594 * n2h_stats
1595 */
1596 nss_top_main.n2h_dentry = debugfs_create_file("n2h", 0400,
1597 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_n2h_ops);
1598 if (unlikely(nss_top_main.n2h_dentry == NULL)) {
1599 nss_warning("Failed to create qca-nss-drv/stats/n2h directory in debugfs");
1600 return;
1601 }
1602
1603 /*
Thomas Wuc3e382c2014-10-29 15:35:13 -07001604 * lso_rx_stats
1605 */
1606 nss_top_main.lso_rx_dentry = debugfs_create_file("lso_rx", 0400,
1607 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_lso_rx_ops);
1608 if (unlikely(nss_top_main.lso_rx_dentry == NULL)) {
1609 nss_warning("Failed to create qca-nss-drv/stats/lso_rx file in debugfs");
1610 return;
1611 }
1612
1613 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301614 * drv_stats
1615 */
1616 nss_top_main.drv_dentry = debugfs_create_file("drv", 0400,
1617 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_drv_ops);
1618 if (unlikely(nss_top_main.drv_dentry == NULL)) {
1619 nss_warning("Failed to create qca-nss-drv/stats/drv directory in debugfs");
1620 return;
1621 }
1622
1623 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301624 * pppoe_stats
1625 */
1626 nss_top_main.pppoe_dentry = debugfs_create_file("pppoe", 0400,
1627 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_pppoe_ops);
1628 if (unlikely(nss_top_main.pppoe_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05301629 nss_warning("Failed to create qca-nss-drv/stats/pppoe file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301630 return;
1631 }
1632
1633 /*
1634 * gmac_stats
1635 */
1636 nss_top_main.gmac_dentry = debugfs_create_file("gmac", 0400,
1637 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gmac_ops);
1638 if (unlikely(nss_top_main.gmac_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05301639 nss_warning("Failed to create qca-nss-drv/stats/gmac file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301640 return;
1641 }
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001642
1643 /*
1644 * CAPWAP stats.
1645 */
1646 nss_top_main.capwap_encap_dentry = debugfs_create_file("capwap_encap", 0400,
1647 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_capwap_encap_ops);
1648 if (unlikely(nss_top_main.capwap_encap_dentry == NULL)) {
1649 nss_warning("Failed to create qca-nss-drv/stats/capwap_encap file in debugfs");
1650 return;
1651 }
1652
1653 nss_top_main.capwap_decap_dentry = debugfs_create_file("capwap_decap", 0400,
1654 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_capwap_decap_ops);
1655 if (unlikely(nss_top_main.capwap_decap_dentry == NULL)) {
1656 nss_warning("Failed to create qca-nss-drv/stats/capwap_decap file in debugfs");
1657 return;
1658 }
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05301659
1660 /*
1661 * GRE_REDIR stats
1662 */
1663 nss_top_main.gre_redir_dentry = debugfs_create_file("gre_redir", 0400,
Ankit Dhanuka14999992014-11-12 15:35:11 +05301664 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gre_redir_ops);
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05301665 if (unlikely(nss_top_main.gre_redir_dentry == NULL)) {
1666 nss_warning("Failed to create qca-nss-drv/stats/gre_redir file in debugfs");
1667 return;
1668 }
Ankit Dhanuka14999992014-11-12 15:35:11 +05301669
1670 /*
1671 * SJACK stats
1672 */
1673 nss_top_main.sjack_dentry = debugfs_create_file("sjack", 0400,
1674 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_sjack_ops);
1675 if (unlikely(nss_top_main.sjack_dentry == NULL)) {
1676 nss_warning("Failed to create qca-nss-drv/stats/sjack file in debugfs");
1677 return;
1678 }
Saurabh Misra96998db2014-07-10 12:15:48 -07001679
1680 nss_log_init();
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301681}
1682
1683
1684/*
1685 * nss_stats_clean()
1686 * Cleanup NSS statistics files
1687 */
1688void nss_stats_clean(void)
1689{
1690 /*
1691 * Remove debugfs tree
1692 */
1693 if (likely(nss_top_main.top_dentry != NULL)) {
1694 debugfs_remove_recursive(nss_top_main.top_dentry);
1695 }
1696}