blob: 8ed280a1d9467dd1c6c0e3c919af278dbed083ee [file] [log] [blame]
Radhakrishna Jiguru1c9b2252013-08-27 23:57:48 +05301/*
2 **************************************************************************
Stephen Wangaed46332016-12-12 17:29:03 -08003 * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Radhakrishna Jiguru1c9b2252013-08-27 23:57:48 +05304 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053016
17/*
18 * nss_stats.c
19 * NSS stats APIs
20 *
21 */
22
23#include "nss_core.h"
Tushar Mathurff8741b2015-12-02 20:28:59 +053024#include "nss_dtls_stats.h"
Thomas Wu71c5ecc2016-06-21 11:15:52 -070025#include "nss_gre_tunnel_stats.h"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053026
27/*
28 * Maximum string length:
29 * This should be equal to maximum string size of any stats
30 * inclusive of stats value
31 */
32#define NSS_STATS_MAX_STR_LENGTH 96
Aniruddha Paul1b170c22017-05-29 12:30:39 +053033#define NSS_STATS_WIFILI_MAX (NSS_STATS_WIFILI_TXRX_MAX + NSS_STATS_WIFILI_TCL_MAX + \
34 NSS_STATS_WIFILI_TX_DESC_FREE_MAX + NSS_STATS_WIFILI_REO_MAX + \
35 NSS_STATS_WIFILI_TX_DESC_MAX + NSS_STATS_WIFILI_EXT_TX_DESC_MAX + \
36 NSS_STATS_WIFILI_RX_DESC_MAX + NSS_STATS_WIFILI_RXDMA_DESC_MAX)
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053037
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -070038extern int32_t nss_tx_rx_virt_if_copy_stats(int32_t if_num, int i, char *line);
39
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +053040uint64_t stats_shadow_pppoe_except[NSS_PPPOE_NUM_SESSION_PER_INTERFACE][NSS_PPPOE_EXCEPTION_EVENT_MAX];
Abhishek Rastogi84d95d02014-03-26 19:31:31 +053041
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053042/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -070043 * Private data for every file descriptor
44 */
45struct nss_stats_data {
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -080046 uint32_t if_num; /**< Interface number for stats */
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +053047 uint32_t index; /**< Index for GRE_REDIR stats */
Shashank Balashankar512cb602016-08-01 17:57:42 -070048 uint32_t edma_id; /**< EDMA port ID or ring ID */
Saurabh Misra09dddeb2014-09-30 16:38:07 -070049};
50
51/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053052 * Statistics structures
53 */
54
55/*
56 * nss_stats_str_ipv4
57 * IPv4 stats strings
58 */
59static int8_t *nss_stats_str_ipv4[NSS_STATS_IPV4_MAX] = {
60 "rx_pkts",
61 "rx_bytes",
62 "tx_pkts",
63 "tx_bytes",
64 "create_requests",
65 "create_collisions",
66 "create_invalid_interface",
67 "destroy_requests",
68 "destroy_misses",
69 "hash_hits",
70 "hash_reorders",
71 "flushes",
Selin Dag60ea2b22014-11-05 09:36:22 -080072 "evictions",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +053073 "fragmentations",
74 "mc_create_requests",
75 "mc_update_requests",
76 "mc_create_invalid_interface",
77 "mc_destroy_requests",
78 "mc_destroy_misses",
79 "mc_flushes",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053080};
81
82/*
Selin Dag6d9b0c12014-11-04 18:27:21 -080083 * nss_stats_str_ipv4_reasm
84 * IPv4 reassembly stats strings
85 */
86static int8_t *nss_stats_str_ipv4_reasm[NSS_STATS_IPV4_REASM_MAX] = {
87 "evictions",
88 "alloc_fails",
89 "timeouts",
90};
91
92/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053093 * nss_stats_str_ipv6
94 * IPv6 stats strings
95 */
96static int8_t *nss_stats_str_ipv6[NSS_STATS_IPV6_MAX] = {
97 "rx_pkts",
98 "rx_bytes",
99 "tx_pkts",
100 "tx_bytes",
101 "create_requests",
102 "create_collisions",
103 "create_invalid_interface",
104 "destroy_requests",
105 "destroy_misses",
106 "hash_hits",
107 "hash_reorders",
108 "flushes",
109 "evictions",
Selin Dag5d68caa2015-05-12 13:23:33 -0700110 "fragmentations",
111 "frag_fails",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +0530112 "mc_create_requests",
113 "mc_update_requests",
114 "mc_create_invalid_interface",
115 "mc_destroy_requests",
116 "mc_destroy_misses",
117 "mc_flushes",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530118};
119
120/*
Selin Dag60a2f5b2015-06-29 14:39:49 -0700121 * nss_stats_str_ipv6_reasm
122 * IPv6 reassembly stats strings
123 */
124static int8_t *nss_stats_str_ipv6_reasm[NSS_STATS_IPV6_REASM_MAX] = {
125 "alloc_fails",
126 "timeouts",
127 "discards",
128};
129
130/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530131 * nss_stats_str_n2h
132 * N2H stats strings
133 */
134static int8_t *nss_stats_str_n2h[NSS_STATS_N2H_MAX] = {
135 "queue_dropped",
136 "ticks",
137 "worst_ticks",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700138 "iterations",
Thomas Wu3fd8dd72014-06-11 15:57:05 -0700139 "pbuf_ocm_alloc_fails",
140 "pbuf_ocm_free_count",
141 "pbuf_ocm_total_count",
142 "pbuf_default_alloc_fails",
143 "pbuf_default_free_count",
144 "pbuf_default_total_count",
Sakthi Vignesh Radhakrishnan2a8ee962014-11-22 13:35:38 -0800145 "payload_fails",
Thomas Wu53679842015-01-22 13:37:35 -0800146 "payload_free_count",
Sakthi Vignesh Radhakrishnan2a8ee962014-11-22 13:35:38 -0800147 "h2n_control_packets",
148 "h2n_control_bytes",
149 "n2h_control_packets",
150 "n2h_control_bytes",
151 "h2n_data_packets",
152 "h2n_data_bytes",
153 "n2h_data_packets",
154 "n2h_data_bytes",
Saurabh Misra71034db2015-06-04 16:18:38 -0700155 "n2h_tot_payloads",
Guojun Jin85dfa7b2015-09-02 15:13:56 -0700156 "n2h_data_interface_invalid",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530157};
158
159/*
Thomas Wuc3e382c2014-10-29 15:35:13 -0700160 * nss_stats_str_lso_rx
161 * LSO_RX stats strings
162 */
163static int8_t *nss_stats_str_lso_rx[NSS_STATS_LSO_RX_MAX] = {
164 "tx_dropped",
165 "dropped",
166 "pbuf_alloc_fail",
167 "pbuf_reference_fail"
168};
169
170/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530171 * nss_stats_str_drv
172 * Host driver stats strings
173 */
174static int8_t *nss_stats_str_drv[NSS_STATS_DRV_MAX] = {
175 "nbuf_alloc_errors",
176 "tx_queue_full[0]",
177 "tx_queue_full[1]",
178 "tx_buffers_empty",
179 "tx_buffers_pkt",
180 "tx_buffers_cmd",
181 "tx_buffers_crypto",
Murat Sezginb6e1a012015-09-29 14:06:37 -0700182 "tx_buffers_reuse",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530183 "rx_buffers_empty",
184 "rx_buffers_pkt",
185 "rx_buffers_cmd_resp",
186 "rx_buffers_status_sync",
187 "rx_buffers_crypto",
Thomas Wu0acd8162014-12-07 15:43:39 -0800188 "rx_buffers_virtual",
189 "tx_skb_simple",
190 "tx_skb_nr_frags",
191 "tx_skb_fraglist",
192 "rx_skb_simple",
193 "rx_skb_nr_frags",
194 "rx_skb_fraglist",
Sundarajan Srinivasan6e0366b2015-01-20 12:10:42 -0800195 "rx_bad_desciptor",
Thomas Wu1fbf5212015-06-04 14:38:40 -0700196 "nss_skb_count",
197 "rx_chain_seg_processed",
198 "rx_frag_seg_processed"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530199};
200
201/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530202 * nss_stats_str_pppoe
203 * PPPoE stats strings
204 */
205static int8_t *nss_stats_str_pppoe[NSS_STATS_PPPOE_MAX] = {
206 "create_requests",
207 "create_failures",
208 "destroy_requests",
209 "destroy_misses"
210};
211
212/*
213 * nss_stats_str_gmac
214 * GMAC stats strings
215 */
216static int8_t *nss_stats_str_gmac[NSS_STATS_GMAC_MAX] = {
217 "ticks",
218 "worst_ticks",
219 "iterations"
220};
221
222/*
Shashank Balashankar512cb602016-08-01 17:57:42 -0700223 * nss_stats_str_edma_tx
224 */
225static int8_t *nss_stats_str_edma_tx[NSS_STATS_EDMA_TX_MAX] = {
226 "tx_err",
227 "tx_dropped",
228 "desc_cnt"
229};
230
231/*
232 * nss_stats_str_edma_rx
233 */
234static int8_t *nss_stats_str_edma_rx[NSS_STATS_EDMA_RX_MAX] = {
235 "rx_csum_err",
236 "desc_cnt"
237};
238
239/*
240 * nss_stats_str_edma_txcmpl
241 */
242static int8_t *nss_stats_str_edma_txcmpl[NSS_STATS_EDMA_TXCMPL_MAX] = {
243 "desc_cnt"
244};
245
246/*
247 * nss_stats_str_edma_rxfill
248 */
249static int8_t *nss_stats_str_edma_rxfill[NSS_STATS_EDMA_RXFILL_MAX] = {
250 "desc_cnt"
251};
252
253/*
254 * nss_stats_str_edma_port_type
255 */
256static int8_t *nss_stats_str_edma_port_type[NSS_EDMA_PORT_TYPE_MAX] = {
257 "physical_port",
258 "virtual_port"
259};
260
261/*
262 * nss_stats_str_edma_port_ring_map
263 */
264static int8_t *nss_stats_str_edma_port_ring_map[NSS_EDMA_PORT_RING_MAP_MAX] = {
265 "rx_ring",
266 "tx_ring"
267};
268
269/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530270 * nss_stats_str_node
271 * Interface stats strings per node
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530272 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530273static int8_t *nss_stats_str_node[NSS_STATS_NODE_MAX] = {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530274 "rx_packets",
275 "rx_bytes",
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530276 "rx_dropped",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530277 "tx_packets",
278 "tx_bytes"
279};
280
281/*
Murat Sezgin99dab642014-08-28 14:40:34 -0700282 * nss_stats_str_eth_rx
283 * eth_rx stats strings
284 */
285static int8_t *nss_stats_str_eth_rx[NSS_STATS_ETH_RX_MAX] = {
286 "ticks",
287 "worst_ticks",
288 "iterations"
289};
290
291/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530292 * nss_stats_str_if_exception_unknown
293 * Interface stats strings for unknown exceptions
294 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530295static int8_t *nss_stats_str_if_exception_eth_rx[NSS_EXCEPTION_EVENT_ETH_RX_MAX] = {
Selin Dag2e8e48c2015-02-20 15:51:55 -0800296 "UNKNOWN_L3_PROTOCOL",
297 "ETH_HDR_MISSING",
Stephen Wangec5a85c2016-09-08 23:32:27 -0700298 "VLAN_MISSING",
299 "TRUSTSEC_HDR_MISSING"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530300};
301
302/*
303 * nss_stats_str_if_exception_ipv4
304 * Interface stats strings for ipv4 exceptions
305 */
306static int8_t *nss_stats_str_if_exception_ipv4[NSS_EXCEPTION_EVENT_IPV4_MAX] = {
307 "IPV4_ICMP_HEADER_INCOMPLETE",
308 "IPV4_ICMP_UNHANDLED_TYPE",
309 "IPV4_ICMP_IPV4_HEADER_INCOMPLETE",
310 "IPV4_ICMP_IPV4_UDP_HEADER_INCOMPLETE",
311 "IPV4_ICMP_IPV4_TCP_HEADER_INCOMPLETE",
312 "IPV4_ICMP_IPV4_UNKNOWN_PROTOCOL",
313 "IPV4_ICMP_NO_ICME",
314 "IPV4_ICMP_FLUSH_TO_HOST",
315 "IPV4_TCP_HEADER_INCOMPLETE",
316 "IPV4_TCP_NO_ICME",
317 "IPV4_TCP_IP_OPTION",
318 "IPV4_TCP_IP_FRAGMENT",
319 "IPV4_TCP_SMALL_TTL",
320 "IPV4_TCP_NEEDS_FRAGMENTATION",
321 "IPV4_TCP_FLAGS",
322 "IPV4_TCP_SEQ_EXCEEDS_RIGHT_EDGE",
323 "IPV4_TCP_SMALL_DATA_OFFS",
324 "IPV4_TCP_BAD_SACK",
325 "IPV4_TCP_BIG_DATA_OFFS",
326 "IPV4_TCP_SEQ_BEFORE_LEFT_EDGE",
327 "IPV4_TCP_ACK_EXCEEDS_RIGHT_EDGE",
328 "IPV4_TCP_ACK_BEFORE_LEFT_EDGE",
329 "IPV4_UDP_HEADER_INCOMPLETE",
330 "IPV4_UDP_NO_ICME",
331 "IPV4_UDP_IP_OPTION",
332 "IPV4_UDP_IP_FRAGMENT",
333 "IPV4_UDP_SMALL_TTL",
334 "IPV4_UDP_NEEDS_FRAGMENTATION",
335 "IPV4_WRONG_TARGET_MAC",
336 "IPV4_HEADER_INCOMPLETE",
337 "IPV4_BAD_TOTAL_LENGTH",
338 "IPV4_BAD_CHECKSUM",
339 "IPV4_NON_INITIAL_FRAGMENT",
340 "IPV4_DATAGRAM_INCOMPLETE",
341 "IPV4_OPTIONS_INCOMPLETE",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530342 "IPV4_UNKNOWN_PROTOCOL",
343 "IPV4_ESP_HEADER_INCOMPLETE",
344 "IPV4_ESP_NO_ICME",
345 "IPV4_ESP_IP_OPTION",
346 "IPV4_ESP_IP_FRAGMENT",
347 "IPV4_ESP_SMALL_TTL",
348 "IPV4_ESP_NEEDS_FRAGMENTATION",
349 "IPV4_INGRESS_VID_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700350 "IPV4_INGRESS_VID_MISSING",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530351 "IPV4_6RD_NO_ICME",
352 "IPV4_6RD_IP_OPTION",
353 "IPV4_6RD_IP_FRAGMENT",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700354 "IPV4_6RD_NEEDS_FRAGMENTATION",
355 "IPV4_DSCP_MARKING_MISMATCH",
Murat Sezgin7c5956a2014-05-12 09:59:51 -0700356 "IPV4_VLAN_MARKING_MISMATCH",
Murat Sezgin49d21d12016-02-03 17:36:47 -0800357 "IPV4_DEPRECATED",
Radha krishna Simha Jiguru00cfe562014-10-21 16:22:12 +0530358 "IPV4_GRE_HEADER_INCOMPLETE",
359 "IPV4_GRE_NO_ICME",
360 "IPV4_GRE_IP_OPTION",
361 "IPV4_GRE_IP_FRAGMENT",
362 "IPV4_GRE_SMALL_TTL",
363 "IPV4_GRE_NEEDS_FRAGMENTATION",
Shyam Sundere351f1b2015-12-17 14:11:51 +0530364 "IPV4_PPTP_GRE_SESSION_MATCH_FAIL",
365 "IPV4_PPTP_GRE_INVALID_PROTO",
366 "IPV4_PPTP_GRE_NO_CME",
367 "IPV4_PPTP_GRE_IP_OPTION",
368 "IPV4_PPTP_GRE_IP_FRAGMENT",
369 "IPV4_PPTP_GRE_SMALL_TTL",
370 "IPV4_PPTP_GRE_NEEDS_FRAGMENTATION",
371 "IPV4_DESTROY",
Selin Dag60ea2b22014-11-05 09:36:22 -0800372 "IPV4_FRAG_DF_SET",
373 "IPV4_FRAG_FAIL",
Saurabh Misra5b07cc02015-01-15 14:20:58 -0800374 "IPV4_ICMP_IPV4_UDPLITE_HEADER_INCOMPLETE",
375 "IPV4_UDPLITE_HEADER_INCOMPLETE",
376 "IPV4_UDPLITE_NO_ICME",
377 "IPV4_UDPLITE_IP_OPTION",
378 "IPV4_UDPLITE_IP_FRAGMENT",
379 "IPV4_UDPLITE_SMALL_TTL",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +0530380 "IPV4_UDPLITE_NEEDS_FRAGMENTATION",
381 "IPV4_MC_UDP_NO_ICME",
382 "IPV4_MC_MEM_ALLOC_FAILURE",
383 "IPV4_MC_UPDATE_FAILURE",
384 "IPV4_MC_PBUF_ALLOC_FAILURE"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530385};
386
387/*
388 * nss_stats_str_if_exception_ipv6
389 * Interface stats strings for ipv6 exceptions
390 */
391static int8_t *nss_stats_str_if_exception_ipv6[NSS_EXCEPTION_EVENT_IPV6_MAX] = {
392 "IPV6_ICMP_HEADER_INCOMPLETE",
393 "IPV6_ICMP_UNHANDLED_TYPE",
394 "IPV6_ICMP_IPV6_HEADER_INCOMPLETE",
395 "IPV6_ICMP_IPV6_UDP_HEADER_INCOMPLETE",
396 "IPV6_ICMP_IPV6_TCP_HEADER_INCOMPLETE",
397 "IPV6_ICMP_IPV6_UNKNOWN_PROTOCOL",
398 "IPV6_ICMP_NO_ICME",
399 "IPV6_ICMP_FLUSH_TO_HOST",
400 "IPV6_TCP_HEADER_INCOMPLETE",
401 "IPV6_TCP_NO_ICME",
402 "IPV6_TCP_SMALL_HOP_LIMIT",
403 "IPV6_TCP_NEEDS_FRAGMENTATION",
404 "IPV6_TCP_FLAGS",
405 "IPV6_TCP_SEQ_EXCEEDS_RIGHT_EDGE",
406 "IPV6_TCP_SMALL_DATA_OFFS",
407 "IPV6_TCP_BAD_SACK",
408 "IPV6_TCP_BIG_DATA_OFFS",
409 "IPV6_TCP_SEQ_BEFORE_LEFT_EDGE",
410 "IPV6_TCP_ACK_EXCEEDS_RIGHT_EDGE",
411 "IPV6_TCP_ACK_BEFORE_LEFT_EDGE",
412 "IPV6_UDP_HEADER_INCOMPLETE",
413 "IPV6_UDP_NO_ICME",
414 "IPV6_UDP_SMALL_HOP_LIMIT",
415 "IPV6_UDP_NEEDS_FRAGMENTATION",
416 "IPV6_WRONG_TARGET_MAC",
417 "IPV6_HEADER_INCOMPLETE",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530418 "IPV6_UNKNOWN_PROTOCOL",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700419 "IPV6_INGRESS_VID_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700420 "IPV6_INGRESS_VID_MISSING",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700421 "IPV6_DSCP_MARKING_MISMATCH",
422 "IPV6_VLAN_MARKING_MISMATCH",
Murat Sezgin49d21d12016-02-03 17:36:47 -0800423 "IPV6_DEPRECATED",
Saurabh Misra5b07cc02015-01-15 14:20:58 -0800424 "IPV6_GRE_NO_ICME",
425 "IPV6_GRE_NEEDS_FRAGMENTATION",
426 "IPV6_GRE_SMALL_HOP_LIMIT",
427 "IPV6_DESTROY",
428 "IPV6_ICMP_IPV6_UDPLITE_HEADER_INCOMPLETE",
429 "IPV6_UDPLITE_HEADER_INCOMPLETE",
430 "IPV6_UDPLITE_NO_ICME",
431 "IPV6_UDPLITE_SMALL_HOP_LIMIT",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +0530432 "IPV6_UDPLITE_NEEDS_FRAGMENTATION",
433 "IPV6_MC_UDP_NO_ICME",
434 "IPV6_MC_MEM_ALLOC_FAILURE",
435 "IPV6_MC_UPDATE_FAILURE",
mandrw7125bac2016-01-14 19:36:46 +0530436 "IPV6_MC_PBUF_ALLOC_FAILURE",
437 "IPV6_ESP_HEADER_INCOMPLETE",
438 "IPV6_ESP_NO_ICME",
439 "IPV6_ESP_IP_FRAGMENT",
440 "IPV6_ESP_SMALL_HOP_LIMIT",
441 "IPV6_ESP_NEEDS_FRAGMENTATION"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530442};
443
444/*
445 * nss_stats_str_if_exception_pppoe
446 * Interface stats strings for PPPoE exceptions
447 */
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +0530448static int8_t *nss_stats_str_if_exception_pppoe[NSS_PPPOE_EXCEPTION_EVENT_MAX] = {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530449 "PPPOE_WRONG_VERSION_OR_TYPE",
450 "PPPOE_WRONG_CODE",
451 "PPPOE_HEADER_INCOMPLETE",
Murat Sezgin7c5956a2014-05-12 09:59:51 -0700452 "PPPOE_UNSUPPORTED_PPP_PROTOCOL",
Murat Sezgin49d21d12016-02-03 17:36:47 -0800453 "PPPOE_DEPRECATED"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530454};
455
456/*
Bharath M Kumarcc666e92014-12-24 19:17:28 +0530457 * nss_stats_str_wifi
Aniruddha Paul1b170c22017-05-29 12:30:39 +0530458 * Wifi statistics strings
Bharath M Kumarcc666e92014-12-24 19:17:28 +0530459 */
460static int8_t *nss_stats_str_wifi[NSS_STATS_WIFI_MAX] = {
461 "RX_PACKETS",
462 "RX_DROPPED",
463 "TX_PACKETS",
464 "TX_DROPPED",
465 "TX_TRANSMIT_COMPLETED",
466 "TX_MGMT_RECEIVED",
467 "TX_MGMT_TRANSMITTED",
468 "TX_MGMT_DROPPED",
469 "TX_MGMT_COMPLETED",
470 "TX_INV_PEER_ENQ_CNT",
471 "RX_INV_PEER_RCV_CNT",
472 "RX_PN_CHECK_FAILED",
473 "RX_PKTS_DELIVERD",
Radha krishna Simha Jiguru36304d12015-12-03 20:21:02 +0530474 "RX_BYTES_DELIVERED",
475 "TX_BYTES_COMPLETED",
Pamidipati, Vijay670ce7e2016-03-15 16:46:59 +0530476 "RX_DELIVER_UNALIGNED_DROP_CNT",
477 "TIDQ_ENQUEUE_CNT_0",
478 "TIDQ_ENQUEUE_CNT_1",
479 "TIDQ_ENQUEUE_CNT_2",
480 "TIDQ_ENQUEUE_CNT_3",
481 "TIDQ_ENQUEUE_CNT_4",
482 "TIDQ_ENQUEUE_CNT_5",
483 "TIDQ_ENQUEUE_CNT_6",
484 "TIDQ_ENQUEUE_CNT_7",
485 "TIDQ_DEQUEUE_CNT_0",
486 "TIDQ_DEQUEUE_CNT_1",
487 "TIDQ_DEQUEUE_CNT_2",
488 "TIDQ_DEQUEUE_CNT_3",
489 "TIDQ_DEQUEUE_CNT_4",
490 "TIDQ_DEQUEUE_CNT_5",
491 "TIDQ_DEQUEUE_CNT_6",
492 "TIDQ_DEQUEUE_CNT_7",
493 "TIDQ_ENQUEUE_FAIL_CNT_0",
494 "TIDQ_ENQUEUE_FAIL_CNT_1",
495 "TIDQ_ENQUEUE_FAIL_CNT_2",
496 "TIDQ_ENQUEUE_FAIL_CNT_3",
497 "TIDQ_ENQUEUE_FAIL_CNT_4",
498 "TIDQ_ENQUEUE_FAIL_CNT_5",
499 "TIDQ_ENQUEUE_FAIL_CNT_6",
500 "TIDQ_ENQUEUE_FAIL_CNT_7",
501 "TIDQ_TTL_EXPIRE_CNT_0",
502 "TIDQ_TTL_EXPIRE_CNT_1",
503 "TIDQ_TTL_EXPIRE_CNT_2",
504 "TIDQ_TTL_EXPIRE_CNT_3",
505 "TIDQ_TTL_EXPIRE_CNT_4",
506 "TIDQ_TTL_EXPIRE_CNT_5",
507 "TIDQ_TTL_EXPIRE_CNT_6",
508 "TIDQ_TTL_EXPIRE_CNT_7",
509 "TIDQ_DEQUEUE_REQ_CNT_0",
510 "TIDQ_DEQUEUE_REQ_CNT_1",
511 "TIDQ_DEQUEUE_REQ_CNT_2",
512 "TIDQ_DEQUEUE_REQ_CNT_3",
513 "TIDQ_DEQUEUE_REQ_CNT_4",
514 "TIDQ_DEQUEUE_REQ_CNT_5",
515 "TIDQ_DEQUEUE_REQ_CNT_6",
516 "TIDQ_DEQUEUE_REQ_CNT_7",
517 "TOTAL_TIDQ_DEPTH",
518 "RX_HTT_FETCH_CNT",
519 "TOTAL_TIDQ_BYPASS_CNT",
520 "GLOBAL_Q_FULL_CNT",
521 "TIDQ_FULL_CNT",
Bharath M Kumarcc666e92014-12-24 19:17:28 +0530522};
523
524/*
Aniruddha Paul1b170c22017-05-29 12:30:39 +0530525 * nss_stats_str_wifili
526 * wifili txrx statistics
527 */
528static int8_t *nss_stats_str_wifili_txrx[NSS_STATS_WIFILI_TXRX_MAX] = {
529 "WIFILI_RX_MSDU_ERROR",
530 "WIFILI_RX_INV_PEER_RCV",
531 "WIFILI_RX_WDS_SRCPORT_EXCEPTION",
532 "WIFILI_RX_WDS_SRCPORT_EXCEPTION_FAIL",
533 "WIFILI_RX_DELIVERD",
534 "WIFILI_RX_DELIVER_DROPPED",
535 "WIFILI_RX_INTRA_BSS_UCAST",
536 "WIFILI_RX_INTRA_BSS_UCAST_FAIL",
537 "WIFILI_RX_INTRA_BSS_MCAST",
538 "WIFILI_RX_INTRA_BSS_MCAST_FAIL",
539 "WIFILI_RX_SG_RCV_FAIL",
540 "WIFILI_TX_ENQUEUE",
541 "WIFILI_TX_ENQUEUE_DROP",
542 "WIFILI_TX_DEQUEUE",
543 "WIFILI_TX_HW_ENQUEUE_FAIL",
544 "WIFILI_TX_SENT_COUNT",
545};
546
547/*
548 * nss_stats_str_wifili_tcl
549 * wifili tcl stats
550 */
551static int8_t *nss_stats_str_wifili_tcl[NSS_STATS_WIFILI_TCL_MAX] = {
552 "WIFILI_TCL_NO_HW_DESC",
553 "WIFILI_TCL_RING_FULL",
554 "WIFILI_TCL_RING_SENT",
555};
556
557/*
558 * nss_stats_str_wifili_tx_comp
559 * wifili tx comp stats
560 */
561static int8_t *nss_stats_str_wifili_tx_comp[NSS_STATS_WIFILI_TX_DESC_FREE_MAX] = {
562 "WIFILI_TX_DESC_FREE_INV_BUFSRC",
563 "WIFILI_TX_DESC_FREE_INV_COOKIE",
564 "WIFILI_TX_DESC_FREE_HW_RING_EMPTY",
565 "WIFILI_TX_DESC_FREE_REAPED",
566};
567
568/*
569 * nss_stats_str_wifili_reo
570 * wifili tx reo stats
571 */
572static int8_t *nss_stats_str_wifili_reo[NSS_STATS_WIFILI_REO_MAX] = {
573 "WIFILI_REO_ERROR",
574 "WIFILI_REO_REAPED",
575 "WIFILI_REO_INV_COOKIE",
576};
577
578/*
579 * nss_stats_str_wifili_txsw_pool
580 * wifili tx desc stats
581 */
582static int8_t *nss_stats_str_wifili_txsw_pool[NSS_STATS_WIFILI_TX_DESC_MAX] = {
583 "WIFILI_TX_DESC_IN_USE",
584 "WIFILI_TX_DESC_ALLOC_FAIL",
585 "WIFILI_TX_DESC_ALREADY_ALLOCATED",
586 "WIFILI_TX_DESC_INVALID_FREE",
587 "WIFILI_TX_DESC_FREE_SRC_FW",
588 "WIFILI_TX_DESC_FREE_COMPLETION",
589 "WIFILI_TX_DESC_NO_PB",
590};
591
592/*
593 * nss_stats_str_wifili_ext_txsw_pool
594 * wifili tx ext desc stats
595 */
596static uint8_t *nss_stats_str_wifili_ext_txsw_pool[NSS_STATS_WIFILI_EXT_TX_DESC_MAX] = {
597 "WIFILI_EXT_TX_DESC_IN_USE",
598 "WIFILI_EXT_TX_DESC_ALLOC_FAIL",
599 "WIFILI_EXT_TX_DESC_ALREADY_ALLOCATED",
600 "WIFILI_EXT_TX_DESC_INVALID_FREE",
601};
602
603/*
604 * nss_stats_str_wifili_rxdma_pool
605 * wifili rx desc stats
606 */
607static int8_t *nss_stats_str_wifili_rxdma_pool[NSS_STATS_WIFILI_RX_DESC_MAX] = {
608 "WIFILI_RX_DESC_NO_PB",
609 "WIFILI_RX_DESC_ALLOC_FAIL",
610 "WIFILI_RX_DESC_IN_USE",
611};
612
613/*
614 * nss_stats_str_wifili_rxdma_ring
615 * wifili rx dma ring stats
616 */
617static int8_t *nss_stats_str_wifili_rxdma_ring[NSS_STATS_WIFILI_RXDMA_DESC_MAX] = {
618 "WIFILI_RXDMA_HW_DESC_UNAVAILABLE",
619};
620
621/*
622 * nss_stats_str_wifili_wbm
623 * wifili wbm ring stats
624 */
625static int8_t *nss_stats_str_wifili_wbm[NSS_STATS_WIFILI_WBM_MAX] = {
626 "WIFILI_WBM_SRC_DMA",
627 "WIFILI_WBM_SRC_DMA_CODE_INV",
628 "WIFILI_WBM_SRC_REO",
629 "WIFILI_WBM_SRC_REO_CODE_NULLQ",
630 "WIFILI_WBM_SRC_REO_CODE_INV",
631 "WIFILI_WBM_SRC_INV",
632};
633
634/*
Stephen Wang9779d952015-10-28 11:39:07 -0700635 * nss_stats_str_portid
636 * PortID statistics strings
637 */
638static int8_t *nss_stats_str_portid[NSS_STATS_PORTID_MAX] = {
639 "RX_INVALID_HEADER",
640};
641
642/*
Tushar Mathurff8741b2015-12-02 20:28:59 +0530643 * nss_stats_str_dtls_session_stats
644 * DTLS statistics strings for nss session stats
645 */
646static int8_t *nss_stats_str_dtls_session_debug_stats[NSS_STATS_DTLS_SESSION_MAX] = {
647 "RX_PKTS",
648 "TX_PKTS",
649 "RX_DROPPED",
650 "RX_AUTH_DONE",
651 "TX_AUTH_DONE",
652 "RX_CIPHER_DONE",
653 "TX_CIPHER_DONE",
654 "RX_CBUF_ALLOC_FAIL",
655 "TX_CBUF_ALLOC_FAIL",
656 "TX_CENQUEUE_FAIL",
657 "RX_CENQUEUE_FAIL",
658 "TX_DROPPED_HROOM",
659 "TX_DROPPED_TROOM",
660 "TX_FORWARD_ENQUEUE_FAIL",
661 "RX_FORWARD_ENQUEUE_FAIL",
662 "RX_INVALID_VERSION",
663 "RX_INVALID_EPOCH",
664 "RX_MALFORMED",
665 "RX_CIPHER_FAIL",
666 "RX_AUTH_FAIL",
667 "RX_CAPWAP_CLASSIFY_FAIL",
668 "RX_SINGLE_REC_DGRAM",
669 "RX_MULTI_REC_DGRAM",
670 "RX_REPLAY_FAIL",
671 "RX_REPLAY_DUPLICATE",
672 "RX_REPLAY_OUT_OF_WINDOW",
673 "OUTFLOW_QUEUE_FULL",
674 "DECAP_QUEUE_FULL",
675 "PBUF_ALLOC_FAIL",
676 "PBUF_COPY_FAIL",
677 "EPOCH",
678 "TX_SEQ_HIGH",
679 "TX_SEQ_LOW",
680};
681
682/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -0700683 * nss_stats_str_gre_tunnel_session_stats
684 * GRE Tunnel statistics strings for nss session stats
685 */
686static int8_t *nss_stats_str_gre_tunnel_session_debug_stats[NSS_STATS_GRE_TUNNEL_SESSION_MAX] = {
687 "RX_PKTS",
688 "TX_PKTS",
689 "RX_DROPPED",
690 "RX_MALFORMED",
691 "RX_INVALID_PROT",
692 "DECAP_QUEUE_FULL",
693 "RX_SINGLE_REC_DGRAM",
694 "RX_INVALID_REC_DGRAM",
695 "BUFFER_ALLOC_FAIL",
696 "BUFFER_COPY_FAIL",
697 "OUTFLOW_QUEUE_FULL",
698 "TX_DROPPED_HROOM",
699 "RX_CBUFFER_ALLOC_FAIL",
700 "RX_CENQUEUE_FAIL",
701 "RX_DECRYPT_DONE",
702 "RX_FORWARD_ENQUEUE_FAIL",
703 "TX_CBUFFER_ALLOC_FAIL",
704 "TX_CENQUEUE_FAIL",
705 "TX_DROPPED_TROOM",
706 "TX_FORWARD_ENQUEUE_FAIL",
707 "TX_CIPHER_DONE",
708 "CRYPTO_NOSUPP",
709};
710
711/*
ratheesh kannoth7af985d2015-06-24 15:08:40 +0530712 * nss_stats_str_l2tpv2_session_stats
713 * l2tpv2 statistics strings for nss session stats
714 */
715static int8_t *nss_stats_str_l2tpv2_session_debug_stats[NSS_STATS_L2TPV2_SESSION_MAX] = {
716 "RX_PPP_LCP_PKTS",
717 "RX_EXP_PKTS",
718 "ENCAP_PBUF_ALLOC_FAIL",
719 "DECAP_PBUF_ALLOC_FAIL"
720};
721
722/*
ratheesh kannotha1245c32015-11-04 16:45:43 +0530723 * nss_stats_str_map_t_instance_stats
724 * map_t statistics strings for nss session stats
725 */
726static int8_t *nss_stats_str_map_t_instance_debug_stats[NSS_STATS_MAP_T_MAX] = {
727 "MAP_T_V4_TO_V6_PBUF_EXCEPTION_PKTS",
728 "MAP_T_V4_TO_V6_PBUF_NO_MATCHING_RULE",
729 "MAP_T_V4_TO_V6_PBUF_NOT_TCP_OR_UDP",
ratheesh kannoth32b6c422016-06-05 10:08:15 +0530730 "MAP_T_V4_TO_V6_RULE_ERR_LOCAL_PSID",
ratheesh kannotha1245c32015-11-04 16:45:43 +0530731 "MAP_T_V4_TO_V6_RULE_ERR_LOCAL_IPV6",
732 "MAP_T_V4_TO_V6_RULE_ERR_REMOTE_PSID",
733 "MAP_T_V4_TO_V6_RULE_ERR_REMOTE_EA_BITS",
734 "MAP_T_V4_TO_V6_RULE_ERR_REMOTE_IPV6",
735 "MAP_T_V6_TO_V4_PBUF_EXCEPTION_PKTS",
736 "MAP_T_V6_TO_V4_PBUF_NO_MATCHING_RULE",
737 "MAP_T_V6_TO_V4_PBUF_NOT_TCP_OR_UDP",
738 "MAP_T_V6_TO_V4_RULE_ERR_LOCAL_IPV4",
739 "MAP_T_V6_TO_V4_RULE_ERR_REMOTE_IPV4"
740};
741
ratheesh kannotheb2a0a82017-05-04 09:20:17 +0530742 /*
743 * nss_stats_str_gre_base_stats
744 * GRE debug statistics strings for base types
745 */
746static int8_t *nss_stats_str_gre_base_debug_stats[NSS_STATS_GRE_BASE_DEBUG_MAX] = {
747 "GRE_BASE_RX_PACKETS",
748 "GRE_BASE_RX_DROPPED",
749 "GRE_BASE_EXP_ETH_HDR_MISSING",
750 "GRE_BASE_EXP_ETH_TYPE_NON_IP",
751 "GRE_BASE_EXP_IP_UNKNOWN_PROTOCOL",
752 "GRE_BASE_EXP_IP_HEADER_INCOMPLETE",
753 "GRE_BASE_EXP_IP_BAD_TOTAL_LENGTH",
754 "GRE_BASE_EXP_IP_BAD_CHECKSUM",
755 "GRE_BASE_EXP_IP_DATAGRAM_INCOMPLETE",
756 "GRE_BASE_EXP_IP_FRAGMENT",
757 "GRE_BASE_EXP_IP_OPTIONS_INCOMPLETE",
758 "GRE_BASE_EXP_IP_WITH_OPTIONS",
759 "GRE_BASE_EXP_IPV6_UNKNOWN_PROTOCOL",
760 "GRE_BASE_EXP_IPV6_HEADER_INCOMPLETE",
761 "GRE_BASE_EXP_GRE_UNKNOWN_SESSION",
762 "GRE_BASE_EXP_GRE_NODE_INACTIVE",
763};
764
765/*
766 * nss_stats_str_gre_session_stats
767 * GRE debug statistics strings for sessions
768 */
769static int8_t *nss_stats_str_gre_session_debug_stats[NSS_STATS_GRE_SESSION_DEBUG_MAX] = {
770 "GRE_SESSION_PBUF_ALLOC_FAIL",
771 "GRE_SESSION_DECAP_FORWARD_ENQUEUE_FAIL",
772 "GRE_SESSION_ENCAP_FORWARD_ENQUEUE_FAIL",
773 "GRE_SESSION_DECAP_TX_FORWARDED",
774 "GRE_SESSION_ENCAP_RX_RECEIVED",
775 "GRE_SESSION_ENCAP_RX_DROPPED",
776 "GRE_SESSION_ENCAP_RX_LINEAR_FAIL",
777 "GRE_SESSION_EXP_RX_KEY_ERROR",
778 "GRE_SESSION_EXP_RX_SEQ_ERROR",
779 "GRE_SESSION_EXP_RX_CS_ERROR",
780 "GRE_SESSION_EXP_RX_FLAG_MISMATCH",
781 "GRE_SESSION_EXP_RX_MALFORMED",
782 "GRE_SESSION_EXP_RX_INVALID_PROTOCOL",
783 "GRE_SESSION_EXP_RX_NO_HEADROOM",
784};
785
ratheesh kannotha1245c32015-11-04 16:45:43 +0530786/*
Amit Gupta316729b2016-08-12 12:21:15 +0530787 * nss_stats_str_ppe_conn
788 * PPE statistics strings for nss flow stats
789 */
790static int8_t *nss_stats_str_ppe_conn[NSS_STATS_PPE_CONN_MAX] = {
791 "v4 routed flows",
792 "v4 bridge flows",
793 "v4 conn create req",
794 "v4 conn create fail",
795 "v4 conn destroy req",
796 "v4 conn destroy fail",
Amit Gupta263df9c2017-05-16 20:43:07 +0530797 "v4 conn MC create req",
798 "v4 conn MC create fail",
799 "v4 conn MC update req",
800 "v4 conn MC update fail",
801 "v4 conn MC delete req",
802 "v4 conn MC delete fail",
Amit Gupta316729b2016-08-12 12:21:15 +0530803
804 "v6 routed flows",
805 "v6 bridge flows",
806 "v6 conn create req",
807 "v6 conn create fail",
808 "v6 conn destroy req",
809 "v6 conn destroy fail",
Amit Gupta263df9c2017-05-16 20:43:07 +0530810 "v6 conn MC create req",
811 "v6 conn MC create fail",
812 "v6 conn MC update req",
813 "v6 conn MC update fail",
814 "v6 conn MC delete req",
815 "v6 conn MC delete fail",
Amit Gupta316729b2016-08-12 12:21:15 +0530816
Amit Gupta263df9c2017-05-16 20:43:07 +0530817 "conn fail - vp full",
Amit Gupta316729b2016-08-12 12:21:15 +0530818 "conn fail - nexthop full",
819 "conn fail - flow full",
820 "conn fail - host full",
821 "conn fail - pub-ip full",
822 "conn fail - port not setup",
823 "conn fail - rw fifo full",
Amit Gupta263df9c2017-05-16 20:43:07 +0530824 "conn fail - flow cmd failure",
Amit Gupta316729b2016-08-12 12:21:15 +0530825 "conn fail - unknown proto",
826 "conn fail - ppe not responding",
Amit Gupta263df9c2017-05-16 20:43:07 +0530827 "conn fail - CE opaque invalid",
Thomas Wufc4d9fd2017-03-22 10:15:30 -0700828 "conn fail - fqg full"
Amit Gupta316729b2016-08-12 12:21:15 +0530829};
830
831/*
832 * nss_stats_str_ppe_l3
833 * PPE statistics strings for nss debug stats
834 */
835static int8_t *nss_stats_str_ppe_l3[NSS_STATS_PPE_L3_MAX] = {
836 "PPE L3 dbg reg 0",
837 "PPE L3 dbg reg 1",
838 "PPE L3 dbg reg 2",
839 "PPE L3 dbg reg 3",
840 "PPE L3 dbg reg 4",
841 "PPE L3 dbg reg port",
842};
843
844/*
845 * nss_stats_str_ppe_code
846 * PPE statistics strings for nss debug stats
847 */
848static int8_t *nss_stats_str_ppe_code[NSS_STATS_PPE_CODE_MAX] = {
849 "PPE CPU_CODE",
850 "PPE DROP_CODE",
851};
852
853/*
Shyam Sunder66e889d2015-11-02 15:31:20 +0530854 * nss_stats_str_ppt_session_stats
855 * PPTP statistics strings for nss session stats
856 */
857static int8_t *nss_stats_str_pptp_session_debug_stats[NSS_STATS_PPTP_SESSION_MAX] = {
Shyam Sundere351f1b2015-12-17 14:11:51 +0530858 "ENCAP_RX_PACKETS",
859 "ENCAP_RX_BYTES",
860 "ENCAP_TX_PACKETS",
861 "ENCAP_TX_BYTES",
862 "ENCAP_RX_DROP",
863 "DECAP_RX_PACKETS",
864 "DECAP_RX_BYTES",
865 "DECAP_TX_PACKETS",
866 "DECAP_TX_BYTES",
867 "DECAP_RX_DROP",
868 "ENCAP_HEADROOM_ERR",
869 "ENCAP_SMALL_SIZE",
870 "ENCAP_PNODE_ENQUEUE_FAIL",
871 "DECAP_NO_SEQ_NOR_ACK",
872 "DECAP_INVAL_GRE_FLAGS",
873 "DECAP_INVAL_GRE_PROTO",
874 "DECAP_WRONG_SEQ",
875 "DECAP_INVAL_PPP_HDR",
876 "DECAP_PPP_LCP",
877 "DECAP_UNSUPPORTED_PPP_PROTO",
878 "DECAP_PNODE_ENQUEUE_FAIL",
Shyam Sunder66e889d2015-11-02 15:31:20 +0530879};
880
881/*
Stephen Wangec5a85c2016-09-08 23:32:27 -0700882 * nss_stats_str_trustsec_tx
883 * Trustsec TX stats strings
884 */
885static int8_t *nss_stats_str_trustsec_tx[NSS_STATS_TRUSTSEC_TX_MAX] = {
886 "INVALID_SRC",
887 "UNCONFIGURED_SRC",
888 "HEADROOM_NOT_ENOUGH",
889};
890
891/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530892 * nss_stats_ipv4_read()
893 * Read IPV4 stats
894 */
895static ssize_t nss_stats_ipv4_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
896{
897 int32_t i;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530898 /*
899 * max output lines = #stats + start tag line + end tag line + three blank lines
900 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530901 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV4_MAX + 3) + (NSS_EXCEPTION_EVENT_IPV4_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530902 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
903 size_t size_wr = 0;
904 ssize_t bytes_read = 0;
905 uint64_t *stats_shadow;
906
907 char *lbuf = kzalloc(size_al, GFP_KERNEL);
908 if (unlikely(lbuf == NULL)) {
909 nss_warning("Could not allocate memory for local statistics buffer");
910 return 0;
911 }
912
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530913 /*
914 * Note: The assumption here is that exception event count is larger than other statistics count for IPv4
915 */
916 stats_shadow = kzalloc(NSS_EXCEPTION_EVENT_IPV4_MAX * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530917 if (unlikely(stats_shadow == NULL)) {
918 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530919 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530920 return 0;
921 }
922
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530923 size_wr = scnprintf(lbuf, size_al, "ipv4 stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530924
925 /*
926 * Common node stats
927 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530928 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530929 spin_lock_bh(&nss_top_main.stats_lock);
930 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
931 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV4_RX_INTERFACE][i];
932 }
933
934 spin_unlock_bh(&nss_top_main.stats_lock);
935
936 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
937 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
938 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
939 }
940
941 /*
942 * IPv4 node stats
943 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530944 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530945
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530946 spin_lock_bh(&nss_top_main.stats_lock);
947 for (i = 0; (i < NSS_STATS_IPV4_MAX); i++) {
948 stats_shadow[i] = nss_top_main.stats_ipv4[i];
949 }
950
951 spin_unlock_bh(&nss_top_main.stats_lock);
952
953 for (i = 0; (i < NSS_STATS_IPV4_MAX); i++) {
954 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
955 "%s = %llu\n", nss_stats_str_ipv4[i], stats_shadow[i]);
956 }
957
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530958 /*
959 * Exception stats
960 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530961 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530962
963 spin_lock_bh(&nss_top_main.stats_lock);
964 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
965 stats_shadow[i] = nss_top_main.stats_if_exception_ipv4[i];
966 }
967
968 spin_unlock_bh(&nss_top_main.stats_lock);
969
970 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
971 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
972 "%s = %llu\n", nss_stats_str_if_exception_ipv4[i], stats_shadow[i]);
973 }
974
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530975 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530976 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
977 kfree(lbuf);
978 kfree(stats_shadow);
979
980 return bytes_read;
981}
982
983/*
Selin Dag6d9b0c12014-11-04 18:27:21 -0800984 * nss_stats_ipv4_reasm_read()
985 * Read IPV4 reassembly stats
986 */
987static ssize_t nss_stats_ipv4_reasm_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
988{
989 int32_t i;
990 /*
991 * max output lines = #stats + start tag line + end tag line + three blank lines
992 */
993 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV4_REASM_MAX + 3) + 5;
994 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
995 size_t size_wr = 0;
996 ssize_t bytes_read = 0;
997 uint64_t *stats_shadow;
998
999 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1000 if (unlikely(lbuf == NULL)) {
1001 nss_warning("Could not allocate memory for local statistics buffer");
1002 return 0;
1003 }
1004
1005 stats_shadow = kzalloc(NSS_STATS_IPV4_REASM_MAX * 8, GFP_KERNEL);
1006 if (unlikely(stats_shadow == NULL)) {
1007 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301008 kfree(lbuf);
Selin Dag6d9b0c12014-11-04 18:27:21 -08001009 return 0;
1010 }
1011
1012 size_wr = scnprintf(lbuf, size_al, "ipv4 reasm stats start:\n\n");
1013
1014 /*
1015 * Common node stats
1016 */
1017 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
1018 spin_lock_bh(&nss_top_main.stats_lock);
1019 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1020 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV4_REASM_INTERFACE][i];
1021 }
1022
1023 spin_unlock_bh(&nss_top_main.stats_lock);
1024
1025 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1026 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1027 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1028 }
1029
1030 /*
1031 * IPv4 reasm node stats
1032 */
1033 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 reasm node stats:\n\n");
1034
1035 spin_lock_bh(&nss_top_main.stats_lock);
1036 for (i = 0; (i < NSS_STATS_IPV4_REASM_MAX); i++) {
1037 stats_shadow[i] = nss_top_main.stats_ipv4_reasm[i];
1038 }
1039
1040 spin_unlock_bh(&nss_top_main.stats_lock);
1041
1042 for (i = 0; (i < NSS_STATS_IPV4_REASM_MAX); i++) {
1043 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1044 "%s = %llu\n", nss_stats_str_ipv4_reasm[i], stats_shadow[i]);
1045 }
1046
1047 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 reasm stats end\n\n");
1048 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1049 kfree(lbuf);
1050 kfree(stats_shadow);
1051
1052 return bytes_read;
1053}
1054
1055/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301056 * nss_stats_ipv6_read()
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301057 * Read IPV6 stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301058 */
1059static ssize_t nss_stats_ipv6_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1060{
1061 int32_t i;
1062
1063 /*
1064 * max output lines = #stats + start tag line + end tag line + three blank lines
1065 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301066 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV6_MAX + 3) + (NSS_EXCEPTION_EVENT_IPV6_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301067 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1068 size_t size_wr = 0;
1069 ssize_t bytes_read = 0;
1070 uint64_t *stats_shadow;
1071
1072 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1073 if (unlikely(lbuf == NULL)) {
1074 nss_warning("Could not allocate memory for local statistics buffer");
1075 return 0;
1076 }
1077
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301078 /*
1079 * Note: The assumption here is that exception event count is larger than other statistics count for IPv4
1080 */
1081 stats_shadow = kzalloc(NSS_EXCEPTION_EVENT_IPV6_MAX * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301082 if (unlikely(stats_shadow == NULL)) {
1083 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301084 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301085 return 0;
1086 }
1087
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301088 size_wr = scnprintf(lbuf, size_al, "ipv6 stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301089
1090 /*
1091 * Common node stats
1092 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301093 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301094 spin_lock_bh(&nss_top_main.stats_lock);
1095 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1096 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV6_RX_INTERFACE][i];
1097 }
1098
1099 spin_unlock_bh(&nss_top_main.stats_lock);
1100
1101 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1102 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1103 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1104 }
1105
1106 /*
1107 * IPv6 node stats
1108 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301109 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301110
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301111 spin_lock_bh(&nss_top_main.stats_lock);
1112 for (i = 0; (i < NSS_STATS_IPV6_MAX); i++) {
1113 stats_shadow[i] = nss_top_main.stats_ipv6[i];
1114 }
1115
1116 spin_unlock_bh(&nss_top_main.stats_lock);
1117
1118 for (i = 0; (i < NSS_STATS_IPV6_MAX); i++) {
1119 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1120 "%s = %llu\n", nss_stats_str_ipv6[i], stats_shadow[i]);
1121 }
1122
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301123 /*
1124 * Exception stats
1125 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301126 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301127
1128 spin_lock_bh(&nss_top_main.stats_lock);
1129 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
1130 stats_shadow[i] = nss_top_main.stats_if_exception_ipv6[i];
1131 }
1132
1133 spin_unlock_bh(&nss_top_main.stats_lock);
1134
1135 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
1136 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1137 "%s = %llu\n", nss_stats_str_if_exception_ipv6[i], stats_shadow[i]);
1138 }
1139
Aniruddha Paul1b170c22017-05-29 12:30:39 +05301140 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301141 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1142 kfree(lbuf);
1143 kfree(stats_shadow);
1144
1145 return bytes_read;
1146}
1147
1148/*
Selin Dag60a2f5b2015-06-29 14:39:49 -07001149 * nss_stats_ipv6_reasm_read()
1150 * Read IPV6 reassembly stats
1151 */
1152static ssize_t nss_stats_ipv6_reasm_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1153{
1154 int32_t i;
1155 /*
1156 * max output lines = #stats + start tag line + end tag line + three blank lines
1157 */
1158 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV6_REASM_MAX + 3) + 5;
1159 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1160 size_t size_wr = 0;
1161 ssize_t bytes_read = 0;
1162 uint64_t *stats_shadow;
1163
1164 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1165 if (unlikely(lbuf == NULL)) {
1166 nss_warning("Could not allocate memory for local statistics buffer");
1167 return 0;
1168 }
1169
1170 stats_shadow = kzalloc(NSS_STATS_IPV6_REASM_MAX * 8, GFP_KERNEL);
1171 if (unlikely(stats_shadow == NULL)) {
1172 nss_warning("Could not allocate memory for local shadow buffer");
1173 kfree(lbuf);
1174 return 0;
1175 }
1176
1177 size_wr = scnprintf(lbuf, size_al, "ipv6 reasm stats start:\n\n");
1178
1179 /*
1180 * Common node stats
1181 */
1182 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
1183 spin_lock_bh(&nss_top_main.stats_lock);
1184 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1185 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV6_REASM_INTERFACE][i];
1186 }
1187
1188 spin_unlock_bh(&nss_top_main.stats_lock);
1189
1190 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1191 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1192 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1193 }
1194
1195 /*
1196 * Ipv6 reasm node stats
1197 */
1198 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 reasm node stats:\n\n");
1199
1200 spin_lock_bh(&nss_top_main.stats_lock);
1201 for (i = 0; (i < NSS_STATS_IPV6_REASM_MAX); i++) {
1202 stats_shadow[i] = nss_top_main.stats_ipv6_reasm[i];
1203 }
1204
1205 spin_unlock_bh(&nss_top_main.stats_lock);
1206
1207 for (i = 0; (i < NSS_STATS_IPV6_REASM_MAX); i++) {
1208 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1209 "%s = %llu\n", nss_stats_str_ipv6_reasm[i], stats_shadow[i]);
1210 }
1211
1212 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 reasm stats end\n\n");
1213 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1214 kfree(lbuf);
1215 kfree(stats_shadow);
1216
1217 return bytes_read;
1218}
1219
1220/*
Shashank Balashankar512cb602016-08-01 17:57:42 -07001221 * nss_stats_edma_port_stats_read()
1222 * Read EDMA port stats
1223 */
1224static ssize_t nss_stats_edma_port_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1225{
1226 int32_t i;
1227
1228 /*
1229 * max output lines = #stats + start tag line + end tag line + three blank lines
1230 */
1231 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + 3;
1232 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1233 size_t size_wr = 0;
1234 ssize_t bytes_read = 0;
1235 uint64_t *stats_shadow;
1236 struct nss_stats_data *data = fp->private_data;
1237
1238 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1239 if (unlikely(lbuf == NULL)) {
1240 nss_warning("Could not allocate memory for local statistics buffer");
1241 return 0;
1242 }
1243
1244 /*
1245 * Note: The assumption here is that we do not have more than 64 stats
1246 */
1247 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1248 if (unlikely(stats_shadow == NULL)) {
1249 nss_warning("Could not allocate memory for local shadow buffer");
1250 kfree(lbuf);
1251 return 0;
1252 }
1253
1254 size_wr = scnprintf(lbuf, size_al, "edma stats start:\n\n");
1255
1256 /*
1257 * Common node stats
1258 */
1259 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d stats:\n\n", data->edma_id);
1260 spin_lock_bh(&nss_top_main.stats_lock);
1261 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1262 stats_shadow[i] = nss_top_main.stats_edma.port[data->edma_id].port_stats[i];
1263 }
1264
1265 spin_unlock_bh(&nss_top_main.stats_lock);
1266
1267 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1268 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1269 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1270 }
1271
1272 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n\n");
1273 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1274 kfree(lbuf);
1275 kfree(stats_shadow);
1276
1277 return bytes_read;
1278}
1279
1280/*
1281 * nss_stats_edma_port_type_read()
1282 * Read EDMA port type
1283 */
1284static ssize_t nss_stats_edma_port_type_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1285{
Shashank Balashankar512cb602016-08-01 17:57:42 -07001286 /*
1287 * max output lines = #stats + start tag line + end tag line + three blank lines
1288 */
1289 uint32_t max_output_lines = (1 + 2) + 3;
1290 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1291 size_t size_wr = 0;
1292 ssize_t bytes_read = 0;
1293 uint64_t port_type;
1294 struct nss_stats_data *data = fp->private_data;
1295
1296 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1297 if (unlikely(lbuf == NULL)) {
1298 nss_warning("Could not allocate memory for local statistics buffer");
1299 return 0;
1300 }
1301
1302 size_wr = scnprintf(lbuf, size_al, "edma port type start:\n\n");
1303 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d type:\n\n", data->edma_id);
1304
1305 /*
1306 * Port type
1307 */
1308 spin_lock_bh(&nss_top_main.stats_lock);
1309 port_type = nss_top_main.stats_edma.port[data->edma_id].port_type;
1310 spin_unlock_bh(&nss_top_main.stats_lock);
1311
1312 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1313 "port_type = %s\n", nss_stats_str_edma_port_type[port_type]);
1314
1315 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n");
1316 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1317 kfree(lbuf);
1318
1319 return bytes_read;
1320}
1321
1322/*
1323 * nss_stats_edma_port_ring_map_read()
1324 * Read EDMA port ring map
1325 */
1326static ssize_t nss_stats_edma_port_ring_map_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1327{
1328 int32_t i;
1329
1330 /*
1331 * max output lines = #stats + start tag line + end tag line + three blank lines
1332 */
1333 uint32_t max_output_lines = (4 + 2) + 3;
1334 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1335 size_t size_wr = 0;
1336 ssize_t bytes_read = 0;
1337 uint64_t *stats_shadow;
1338 struct nss_stats_data *data = fp->private_data;
1339
1340 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1341 if (unlikely(lbuf == NULL)) {
1342 nss_warning("Could not allocate memory for local statistics buffer");
1343 return 0;
1344 }
1345
1346 /*
1347 * Note: The assumption here is that we do not have more than 64 stats
1348 */
1349 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1350 if (unlikely(stats_shadow == NULL)) {
1351 nss_warning("Could not allocate memory for local shadow buffer");
1352 kfree(lbuf);
1353 return 0;
1354 }
1355
1356 size_wr = scnprintf(lbuf, size_al, "edma port ring map start:\n\n");
1357
1358 /*
1359 * Port ring map
1360 */
1361 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d ring map:\n\n", data->edma_id);
1362 spin_lock_bh(&nss_top_main.stats_lock);
1363 for (i = 0; i < NSS_EDMA_PORT_RING_MAP_MAX; i++) {
1364 stats_shadow[i] = nss_top_main.stats_edma.port[data->edma_id].port_ring_map[i];
1365 }
1366
1367 spin_unlock_bh(&nss_top_main.stats_lock);
1368
1369 for (i = 0; i < NSS_EDMA_PORT_RING_MAP_MAX; i++) {
1370 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1371 "%s = %llu\n", nss_stats_str_edma_port_ring_map[i], stats_shadow[i]);
1372 }
1373
1374 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n\n");
1375 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1376 kfree(lbuf);
1377 kfree(stats_shadow);
1378
1379 return bytes_read;
1380}
1381
1382/*
1383 * nss_stats_edma_txring_read()
1384 * Read EDMA Tx ring stats
1385 */
1386static ssize_t nss_stats_edma_txring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1387{
1388 int32_t i;
1389
1390 /*
1391 * max output lines = #stats + start tag line + end tag line + three blank lines
1392 */
1393 uint32_t max_output_lines = (NSS_STATS_EDMA_TX_MAX + 2) + 3;
1394 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1395 size_t size_wr = 0;
1396 ssize_t bytes_read = 0;
1397 uint64_t *stats_shadow;
1398 struct nss_stats_data *data = fp->private_data;
1399
1400 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1401 if (unlikely(lbuf == NULL)) {
1402 nss_warning("Could not allocate memory for local statistics buffer");
1403 return 0;
1404 }
1405
1406 /*
1407 * Note: The assumption here is that we do not have more than 64 stats
1408 */
1409 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1410 if (unlikely(stats_shadow == NULL)) {
1411 nss_warning("Could not allocate memory for local shadow buffer");
1412 kfree(lbuf);
1413 return 0;
1414 }
1415
1416 size_wr = scnprintf(lbuf, size_al, "edma Tx ring stats start:\n\n");
1417
1418 /*
1419 * Tx ring stats
1420 */
1421 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Tx ring %d stats:\n\n", data->edma_id);
1422 spin_lock_bh(&nss_top_main.stats_lock);
1423 for (i = 0; i < NSS_STATS_EDMA_TX_MAX; i++) {
1424 stats_shadow[i] = nss_top_main.stats_edma.tx_stats[data->edma_id][i];
1425 }
1426
1427 spin_unlock_bh(&nss_top_main.stats_lock);
1428
1429 for (i = 0; i < NSS_STATS_EDMA_TX_MAX; i++) {
1430 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1431 "%s = %llu\n", nss_stats_str_edma_tx[i], stats_shadow[i]);
1432 }
1433
1434 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Tx ring stats end\n\n");
1435 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1436 kfree(lbuf);
1437 kfree(stats_shadow);
1438
1439 return bytes_read;
1440}
1441
1442/*
1443 * nss_stats_edma_rxring_read()
1444 * Read EDMA rxring stats
1445 */
1446static ssize_t nss_stats_edma_rxring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1447{
1448 int32_t i;
1449
1450 /*
1451 * max output lines = #stats + start tag line + end tag line + three blank lines
1452 */
1453 uint32_t max_output_lines = (NSS_STATS_EDMA_RX_MAX + 2) + 3;
1454 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1455 size_t size_wr = 0;
1456 ssize_t bytes_read = 0;
1457 uint64_t *stats_shadow;
1458 struct nss_stats_data *data = fp->private_data;
1459
1460 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1461 if (unlikely(lbuf == NULL)) {
1462 nss_warning("Could not allocate memory for local statistics buffer");
1463 return 0;
1464 }
1465
1466 /*
1467 * Note: The assumption here is that we do not have more than 64 stats
1468 */
1469 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1470 if (unlikely(stats_shadow == NULL)) {
1471 nss_warning("Could not allocate memory for local shadow buffer");
1472 kfree(lbuf);
1473 return 0;
1474 }
1475
1476 size_wr = scnprintf(lbuf, size_al, "edma Rx ring stats start:\n\n");
1477
1478 /*
1479 * RX ring stats
1480 */
1481 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Rx ring %d stats:\n\n", data->edma_id);
1482 spin_lock_bh(&nss_top_main.stats_lock);
1483 for (i = 0; i < NSS_STATS_EDMA_RX_MAX; i++) {
1484 stats_shadow[i] = nss_top_main.stats_edma.rx_stats[data->edma_id][i];
1485 }
1486
1487 spin_unlock_bh(&nss_top_main.stats_lock);
1488
1489 for (i = 0; i < NSS_STATS_EDMA_RX_MAX; i++) {
1490 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1491 "%s = %llu\n", nss_stats_str_edma_rx[i], stats_shadow[i]);
1492 }
1493
1494 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Rx ring stats end\n\n");
1495 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1496 kfree(lbuf);
1497 kfree(stats_shadow);
1498
1499 return bytes_read;
1500}
1501
1502/*
1503 * nss_stats_edma_txcmplring_read()
1504 * Read EDMA txcmplring stats
1505 */
1506static ssize_t nss_stats_edma_txcmplring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1507{
1508 int32_t i;
1509
1510 /*
1511 * max output lines = #stats + start tag line + end tag line + three blank lines
1512 */
1513 uint32_t max_output_lines = (NSS_STATS_EDMA_TXCMPL_MAX + 2) + 3;
1514 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1515 size_t size_wr = 0;
1516 ssize_t bytes_read = 0;
1517 uint64_t *stats_shadow;
1518 struct nss_stats_data *data = fp->private_data;
1519
1520 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1521 if (unlikely(lbuf == NULL)) {
1522 nss_warning("Could not allocate memory for local statistics buffer");
1523 return 0;
1524 }
1525
1526 /*
1527 * Note: The assumption here is that we do not have more than 64 stats
1528 */
1529 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1530 if (unlikely(stats_shadow == NULL)) {
1531 nss_warning("Could not allocate memory for local shadow buffer");
1532 kfree(lbuf);
1533 return 0;
1534 }
1535
1536 size_wr = scnprintf(lbuf, size_al, "edma Tx cmpl ring stats start:\n\n");
1537
1538 /*
1539 * Tx cmpl ring stats
1540 */
1541 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Tx cmpl ring %d stats:\n\n", data->edma_id);
1542 spin_lock_bh(&nss_top_main.stats_lock);
1543 for (i = 0; i < NSS_STATS_EDMA_TXCMPL_MAX; i++) {
1544 stats_shadow[i] = nss_top_main.stats_edma.txcmpl_stats[data->edma_id][i];
1545 }
1546
1547 spin_unlock_bh(&nss_top_main.stats_lock);
1548
1549 for (i = 0; i < NSS_STATS_EDMA_TXCMPL_MAX; i++) {
1550 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1551 "%s = %llu\n", nss_stats_str_edma_txcmpl[i], stats_shadow[i]);
1552 }
1553
1554 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Tx cmpl ring stats end\n\n");
1555 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1556 kfree(lbuf);
1557 kfree(stats_shadow);
1558
1559 return bytes_read;
1560}
1561
1562/*
1563 * nss_stats_edma_rxfillring_read()
1564 * Read EDMA rxfillring stats
1565 */
1566static ssize_t nss_stats_edma_rxfillring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1567{
1568 int32_t i;
1569
1570 /*
1571 * max output lines = #stats + start tag line + end tag line + three blank lines
1572 */
1573 uint32_t max_output_lines = (NSS_STATS_EDMA_RXFILL_MAX + 2) + 3;
1574 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1575 size_t size_wr = 0;
1576 ssize_t bytes_read = 0;
1577 uint64_t *stats_shadow;
1578 struct nss_stats_data *data = fp->private_data;
1579
1580 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1581 if (unlikely(lbuf == NULL)) {
1582 nss_warning("Could not allocate memory for local statistics buffer");
1583 return 0;
1584 }
1585
1586 /*
1587 * Note: The assumption here is that we do not have more than 64 stats
1588 */
1589 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1590 if (unlikely(stats_shadow == NULL)) {
1591 nss_warning("Could not allocate memory for local shadow buffer");
1592 kfree(lbuf);
1593 return 0;
1594 }
1595
1596 size_wr = scnprintf(lbuf, size_al, "edma Rx fill ring stats start:\n\n");
1597
1598 /*
1599 * Rx fill ring stats
1600 */
1601 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Rx fill ring %d stats:\n\n", data->edma_id);
1602 spin_lock_bh(&nss_top_main.stats_lock);
1603 for (i = 0; i < NSS_STATS_EDMA_RXFILL_MAX; i++) {
1604 stats_shadow[i] = nss_top_main.stats_edma.rxfill_stats[data->edma_id][i];
1605 }
1606
1607 spin_unlock_bh(&nss_top_main.stats_lock);
1608
1609 for (i = 0; i < NSS_STATS_EDMA_RXFILL_MAX; i++) {
1610 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1611 "%s = %llu\n", nss_stats_str_edma_rxfill[i], stats_shadow[i]);
1612 }
1613
1614 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Rx fill ring stats end\n\n");
1615 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1616 kfree(lbuf);
1617 kfree(stats_shadow);
1618
1619 return bytes_read;
1620}
1621
1622/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301623 * nss_stats_eth_rx_read()
1624 * Read ETH_RX stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301625 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301626static ssize_t nss_stats_eth_rx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301627{
1628 int32_t i;
1629
1630 /*
1631 * max output lines = #stats + start tag line + end tag line + three blank lines
1632 */
Murat Sezgin99dab642014-08-28 14:40:34 -07001633 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_ETH_RX_MAX + 3) + (NSS_EXCEPTION_EVENT_ETH_RX_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301634 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1635 size_t size_wr = 0;
1636 ssize_t bytes_read = 0;
1637 uint64_t *stats_shadow;
1638
1639 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1640 if (unlikely(lbuf == NULL)) {
1641 nss_warning("Could not allocate memory for local statistics buffer");
1642 return 0;
1643 }
1644
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301645 /*
1646 * Note: The assumption here is that we do not have more than 64 stats
1647 */
1648 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301649 if (unlikely(stats_shadow == NULL)) {
1650 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301651 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301652 return 0;
1653 }
1654
Aniruddha Paul1b170c22017-05-29 12:30:39 +05301655 size_wr = scnprintf(lbuf, size_al, "eth_rx stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301656
1657 /*
1658 * Common node stats
1659 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301660 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301661 spin_lock_bh(&nss_top_main.stats_lock);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301662 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1663 stats_shadow[i] = nss_top_main.stats_node[NSS_ETH_RX_INTERFACE][i];
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301664 }
1665
1666 spin_unlock_bh(&nss_top_main.stats_lock);
1667
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301668 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301669 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301670 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301671 }
1672
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301673 /*
Murat Sezgin99dab642014-08-28 14:40:34 -07001674 * eth_rx node stats
1675 */
1676 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\neth_rx node stats:\n\n");
1677 spin_lock_bh(&nss_top_main.stats_lock);
1678 for (i = 0; (i < NSS_STATS_ETH_RX_MAX); i++) {
1679 stats_shadow[i] = nss_top_main.stats_eth_rx[i];
1680 }
1681
1682 spin_unlock_bh(&nss_top_main.stats_lock);
1683
1684 for (i = 0; (i < NSS_STATS_ETH_RX_MAX); i++) {
1685 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1686 "%s = %llu\n", nss_stats_str_eth_rx[i], stats_shadow[i]);
1687 }
1688
1689 /*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301690 * Exception stats
1691 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301692 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\neth_rx exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301693
1694 spin_lock_bh(&nss_top_main.stats_lock);
1695 for (i = 0; (i < NSS_EXCEPTION_EVENT_ETH_RX_MAX); i++) {
1696 stats_shadow[i] = nss_top_main.stats_if_exception_eth_rx[i];
1697 }
1698
1699 spin_unlock_bh(&nss_top_main.stats_lock);
1700
1701 for (i = 0; (i < NSS_EXCEPTION_EVENT_ETH_RX_MAX); i++) {
1702 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1703 "%s = %llu\n", nss_stats_str_if_exception_eth_rx[i], stats_shadow[i]);
1704 }
1705
Aniruddha Paul1b170c22017-05-29 12:30:39 +05301706 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\neth_rx stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301707 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1708 kfree(lbuf);
1709 kfree(stats_shadow);
1710
1711 return bytes_read;
1712}
1713
1714/*
1715 * nss_stats_n2h_read()
1716 * Read N2H stats
1717 */
1718static ssize_t nss_stats_n2h_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1719{
1720 int32_t i;
1721
1722 /*
1723 * max output lines = #stats + start tag line + end tag line + three blank lines
1724 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301725 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_N2H_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301726 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1727 size_t size_wr = 0;
1728 ssize_t bytes_read = 0;
1729 uint64_t *stats_shadow;
Murat Sezgin0c0561d2014-04-09 18:55:58 -07001730 int max = NSS_STATS_N2H_MAX - NSS_STATS_NODE_MAX;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301731
1732 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1733 if (unlikely(lbuf == NULL)) {
1734 nss_warning("Could not allocate memory for local statistics buffer");
1735 return 0;
1736 }
1737
1738 stats_shadow = kzalloc(NSS_STATS_N2H_MAX * 8, GFP_KERNEL);
1739 if (unlikely(stats_shadow == NULL)) {
1740 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301741 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301742 return 0;
1743 }
1744
1745 size_wr = scnprintf(lbuf, size_al, "n2h stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301746
1747 /*
1748 * Common node stats
1749 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301750 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301751 spin_lock_bh(&nss_top_main.stats_lock);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301752 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1753 stats_shadow[i] = nss_top_main.nss[0].stats_n2h[i];
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301754 }
1755
1756 spin_unlock_bh(&nss_top_main.stats_lock);
1757
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301758 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1759 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1760 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1761 }
1762
1763 /*
1764 * N2H node stats
1765 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301766 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nn2h node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301767 spin_lock_bh(&nss_top_main.stats_lock);
1768 for (i = NSS_STATS_NODE_MAX; (i < NSS_STATS_N2H_MAX); i++) {
1769 stats_shadow[i] = nss_top_main.nss[0].stats_n2h[i];
1770 }
1771
1772 spin_unlock_bh(&nss_top_main.stats_lock);
1773
Murat Sezgin0c0561d2014-04-09 18:55:58 -07001774 for (i = 0; i < max; i++) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301775 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
Murat Sezgin0c0561d2014-04-09 18:55:58 -07001776 "%s = %llu\n", nss_stats_str_n2h[i], stats_shadow[i + NSS_STATS_NODE_MAX]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301777 }
1778
1779 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nn2h stats end\n\n");
1780 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1781 kfree(lbuf);
1782 kfree(stats_shadow);
1783
1784 return bytes_read;
1785}
1786
1787/*
Thomas Wuc3e382c2014-10-29 15:35:13 -07001788 * nss_stats_lso_rx_read()
1789 * Read LSO_RX stats
1790 */
1791static ssize_t nss_stats_lso_rx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1792{
1793 int32_t i;
1794
1795 /*
1796 * max output lines = #stats + start tag line + end tag line + three blank lines
1797 */
1798 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_LSO_RX_MAX + 3) + 5;
1799 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1800 size_t size_wr = 0;
1801 ssize_t bytes_read = 0;
1802 uint64_t *stats_shadow;
1803
1804 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1805 if (unlikely(lbuf == NULL)) {
1806 nss_warning("Could not allocate memory for local statistics buffer");
1807 return 0;
1808 }
1809
1810 stats_shadow = kzalloc(NSS_STATS_LSO_RX_MAX * 8, GFP_KERNEL);
1811 if (unlikely(stats_shadow == NULL)) {
1812 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301813 kfree(lbuf);
Thomas Wuc3e382c2014-10-29 15:35:13 -07001814 return 0;
1815 }
1816
1817 size_wr = scnprintf(lbuf, size_al, "lso_rx stats start:\n\n");
1818
1819 /*
1820 * Common node stats
1821 */
1822 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
1823 spin_lock_bh(&nss_top_main.stats_lock);
1824 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1825 stats_shadow[i] = nss_top_main.stats_node[NSS_LSO_RX_INTERFACE][i];
1826 }
1827
1828 spin_unlock_bh(&nss_top_main.stats_lock);
1829
1830 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1831 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1832 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1833 }
1834
1835 /*
1836 * lso_rx node stats
1837 */
1838 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nlso_rx node stats:\n\n");
1839 spin_lock_bh(&nss_top_main.stats_lock);
1840 for (i = 0; (i < NSS_STATS_LSO_RX_MAX); i++) {
1841 stats_shadow[i] = nss_top_main.stats_lso_rx[i];
1842 }
1843
1844 spin_unlock_bh(&nss_top_main.stats_lock);
1845
1846 for (i = 0; i < NSS_STATS_LSO_RX_MAX; i++) {
1847 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1848 "%s = %llu\n", nss_stats_str_lso_rx[i], stats_shadow[i]);
1849 }
1850
1851 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nlso_rx stats end\n\n");
1852 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1853 kfree(lbuf);
1854 kfree(stats_shadow);
1855
1856 return bytes_read;
1857}
1858
1859/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301860 * nss_stats_drv_read()
1861 * Read HLOS driver stats
1862 */
1863static ssize_t nss_stats_drv_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1864{
1865 int32_t i;
1866
1867 /*
1868 * max output lines = #stats + start tag line + end tag line + three blank lines
1869 */
1870 uint32_t max_output_lines = NSS_STATS_DRV_MAX + 5;
1871 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1872 size_t size_wr = 0;
1873 ssize_t bytes_read = 0;
1874 uint64_t *stats_shadow;
1875
1876 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1877 if (unlikely(lbuf == NULL)) {
1878 nss_warning("Could not allocate memory for local statistics buffer");
1879 return 0;
1880 }
1881
1882 stats_shadow = kzalloc(NSS_STATS_DRV_MAX * 8, GFP_KERNEL);
1883 if (unlikely(stats_shadow == NULL)) {
1884 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301885 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301886 return 0;
1887 }
1888
1889 size_wr = scnprintf(lbuf, size_al, "drv stats start:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301890 for (i = 0; (i < NSS_STATS_DRV_MAX); i++) {
Sundarajan Srinivasan62fee7e2015-01-22 11:13:10 -08001891 stats_shadow[i] = NSS_PKT_STATS_READ(&nss_top_main.stats_drv[i]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301892 }
1893
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301894 for (i = 0; (i < NSS_STATS_DRV_MAX); i++) {
1895 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1896 "%s = %llu\n", nss_stats_str_drv[i], stats_shadow[i]);
1897 }
1898
1899 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ndrv stats end\n\n");
1900 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1901 kfree(lbuf);
1902 kfree(stats_shadow);
1903
1904 return bytes_read;
1905}
1906
1907/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301908 * nss_stats_pppoe_read()
1909 * Read PPPoE stats
1910 */
1911static ssize_t nss_stats_pppoe_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1912{
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301913 int32_t i, j, k;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301914
1915 /*
1916 * max output lines = #stats + start tag line + end tag line + three blank lines
1917 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301918 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_PPPOE_MAX + 3) +
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +05301919 ((NSS_MAX_PHYSICAL_INTERFACES * NSS_PPPOE_NUM_SESSION_PER_INTERFACE * (NSS_PPPOE_EXCEPTION_EVENT_MAX + 5)) + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301920 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1921 size_t size_wr = 0;
1922 ssize_t bytes_read = 0;
1923 uint64_t *stats_shadow;
1924
1925 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1926 if (unlikely(lbuf == NULL)) {
1927 nss_warning("Could not allocate memory for local statistics buffer");
1928 return 0;
1929 }
1930
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301931 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301932 if (unlikely(stats_shadow == NULL)) {
1933 nss_warning("Could not allocate memory for local shadow buffer");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301934 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301935 return 0;
1936 }
1937
1938 size_wr = scnprintf(lbuf, size_al, "pppoe stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301939
1940 /*
1941 * Common node stats
1942 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301943 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301944 spin_lock_bh(&nss_top_main.stats_lock);
1945 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1946 stats_shadow[i] = nss_top_main.stats_node[NSS_PPPOE_RX_INTERFACE][i];
1947 }
1948
1949 spin_unlock_bh(&nss_top_main.stats_lock);
1950
1951 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1952 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1953 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1954 }
1955
1956 /*
1957 * PPPoE node stats
1958 */
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001959 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npppoe node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301960 spin_lock_bh(&nss_top_main.stats_lock);
1961 for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
1962 stats_shadow[i] = nss_top_main.stats_pppoe[i];
1963 }
1964
1965 spin_unlock_bh(&nss_top_main.stats_lock);
1966
1967 for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
1968 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1969 "%s = %llu\n", nss_stats_str_pppoe[i], stats_shadow[i]);
1970 }
1971
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301972 /*
1973 * Exception stats
1974 */
1975 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nException PPPoE:\n\n");
1976
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001977 for (j = 1; j <= NSS_MAX_PHYSICAL_INTERFACES; j++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301978 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nInterface %d:\n\n", j);
1979
1980 spin_lock_bh(&nss_top_main.stats_lock);
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001981 for (k = 1; k <= NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +05301982 for (i = 0; (i < NSS_PPPOE_EXCEPTION_EVENT_MAX); i++) {
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001983 stats_shadow_pppoe_except[k - 1][i] = nss_top_main.stats_if_exception_pppoe[j][k][i];
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301984 }
1985 }
1986
1987 spin_unlock_bh(&nss_top_main.stats_lock);
1988
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001989 for (k = 1; k <= NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301990 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. Session\n", k);
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +05301991 for (i = 0; (i < NSS_PPPOE_EXCEPTION_EVENT_MAX); i++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301992 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1993 "%s = %llu\n",
1994 nss_stats_str_if_exception_pppoe[i],
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001995 stats_shadow_pppoe_except[k - 1][i]);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301996 }
1997 }
1998
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301999 }
2000
Murat Sezgin2f9241a2015-06-25 13:01:51 -07002001 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npppoe stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05302002 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
2003 kfree(lbuf);
2004 kfree(stats_shadow);
2005
2006 return bytes_read;
2007}
2008
2009/*
2010 * nss_stats_gmac_read()
2011 * Read GMAC stats
2012 */
2013static ssize_t nss_stats_gmac_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2014{
2015 uint32_t i, id;
2016
2017 /*
2018 * max output lines = ((#stats + start tag + one blank) * #GMACs) + start/end tag + 3 blank
2019 */
2020 uint32_t max_output_lines = ((NSS_STATS_GMAC_MAX + 2) * NSS_MAX_PHYSICAL_INTERFACES) + 5;
2021 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2022 size_t size_wr = 0;
2023 ssize_t bytes_read = 0;
2024 uint64_t *stats_shadow;
2025
2026 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2027 if (unlikely(lbuf == NULL)) {
2028 nss_warning("Could not allocate memory for local statistics buffer");
2029 return 0;
2030 }
2031
2032 stats_shadow = kzalloc(NSS_STATS_GMAC_MAX * 8, GFP_KERNEL);
2033 if (unlikely(stats_shadow == NULL)) {
2034 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05302035 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05302036 return 0;
2037 }
2038
2039 size_wr = scnprintf(lbuf, size_al, "gmac stats start:\n\n");
2040
2041 for (id = 0; id < NSS_MAX_PHYSICAL_INTERFACES; id++) {
2042 spin_lock_bh(&nss_top_main.stats_lock);
2043 for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
2044 stats_shadow[i] = nss_top_main.stats_gmac[id][i];
2045 }
2046
2047 spin_unlock_bh(&nss_top_main.stats_lock);
2048
2049 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "GMAC ID: %d\n", id);
2050 for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
2051 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2052 "%s = %llu\n", nss_stats_str_gmac[i], stats_shadow[i]);
2053 }
Aniruddha Paul1b170c22017-05-29 12:30:39 +05302054 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05302055 }
2056
2057 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngmac stats end\n\n");
2058 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
2059 kfree(lbuf);
2060 kfree(stats_shadow);
2061
2062 return bytes_read;
2063}
2064
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002065/*
Bharath M Kumarcc666e92014-12-24 19:17:28 +05302066 * nss_stats_wifi_read()
Stephen Wangaed46332016-12-12 17:29:03 -08002067 * Read wifi statistics
Bharath M Kumarcc666e92014-12-24 19:17:28 +05302068 */
2069static ssize_t nss_stats_wifi_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2070{
2071 uint32_t i, id;
2072
2073 /*
2074 * max output lines = ((#stats + start tag + one blank) * #WIFI RADIOs) + start/end tag + 3 blank
2075 */
2076 uint32_t max_output_lines = ((NSS_STATS_WIFI_MAX + 2) * NSS_MAX_WIFI_RADIO_INTERFACES) + 5;
2077 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2078 size_t size_wr = 0;
2079 ssize_t bytes_read = 0;
2080 uint64_t *stats_shadow;
2081
2082 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2083 if (unlikely(lbuf == NULL)) {
2084 nss_warning("Could not allocate memory for local statistics buffer");
2085 return 0;
2086 }
2087
2088 stats_shadow = kzalloc(NSS_STATS_WIFI_MAX * 8, GFP_KERNEL);
2089 if (unlikely(stats_shadow == NULL)) {
2090 nss_warning("Could not allocate memory for local shadow buffer");
2091 kfree(lbuf);
2092 return 0;
2093 }
2094
2095 size_wr = scnprintf(lbuf, size_al, "wifi stats start:\n\n");
2096
2097 for (id = 0; id < NSS_MAX_WIFI_RADIO_INTERFACES; id++) {
2098 spin_lock_bh(&nss_top_main.stats_lock);
2099 for (i = 0; (i < NSS_STATS_WIFI_MAX); i++) {
2100 stats_shadow[i] = nss_top_main.stats_wifi[id][i];
2101 }
2102
2103 spin_unlock_bh(&nss_top_main.stats_lock);
2104
2105 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "WIFI ID: %d\n", id);
2106 for (i = 0; (i < NSS_STATS_WIFI_MAX); i++) {
2107 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2108 "%s = %llu\n", nss_stats_str_wifi[i], stats_shadow[i]);
2109 }
2110 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\n");
2111 }
2112
2113 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nwifi stats end\n\n");
2114 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
2115 kfree(lbuf);
2116 kfree(stats_shadow);
2117
2118 return bytes_read;
2119}
2120
2121/*
Aniruddha Paul1b170c22017-05-29 12:30:39 +05302122 * nss_stats_wifili_read()
2123 * Read wifili statistics
2124 */
2125static ssize_t nss_stats_wifili_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2126{
2127 uint32_t i, j;
2128
2129 /*
2130 * max output lines = ((#stats + eight blank lines) * #WIFILI #STATS) + start/end tag + 3 blank
2131 */
2132 uint32_t max_output_lines = (((NSS_STATS_WIFILI_MAX + 9) * NSS_WIFILI_MAX_PDEV_NUM_MSG)+
2133 NSS_STATS_WIFILI_WBM_MAX + 5);
2134 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2135 size_t size_wr = 0;
2136 ssize_t bytes_read = 0;
2137 uint64_t *stats_shadow;
2138
2139 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2140 if (unlikely(lbuf == NULL)) {
2141 nss_warning("Could not allocate memory for local statistics buffer");
2142 return 0;
2143 }
2144
2145 /*
2146 * Take max of all wifili stats
2147 *
2148 * NOTE: txrx stats is bigger of all stats
2149 */
2150 stats_shadow = kzalloc(NSS_STATS_WIFILI_TXRX_MAX * 8, GFP_KERNEL);
2151 if (unlikely(stats_shadow == NULL)) {
2152 nss_warning("Could not allocate memory for local shadow buffer");
2153 kfree(lbuf);
2154 return 0;
2155 }
2156
2157 size_wr = scnprintf(lbuf, size_al, "wifili stats start:\n\n");
2158
2159 for (i = 0; i < NSS_WIFILI_MAX_PDEV_NUM_MSG; i++) {
2160
2161 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "WIFILI ID: %d\n", i);
2162
2163 spin_lock_bh(&nss_top_main.stats_lock);
2164 for (j = 0; (j < NSS_STATS_WIFILI_TXRX_MAX); j++) {
2165 stats_shadow[j] = nss_top_main.stats_wifili.stats_txrx[i][j];
2166 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2167 "%s = %llu\n", nss_stats_str_wifili_txrx[j], stats_shadow[j]);
2168 }
2169
2170 spin_unlock_bh(&nss_top_main.stats_lock);
2171 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2172
2173 /*
2174 * Fillinng TCL ring stats
2175 */
2176 spin_lock_bh(&nss_top_main.stats_lock);
2177 for (j = 0; (j < NSS_STATS_WIFILI_TCL_MAX); j++) {
2178 stats_shadow[j] = nss_top_main.stats_wifili.stats_tcl_ring[i][j];
2179 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2180 "%s = %llu\n", nss_stats_str_wifili_tcl[j], stats_shadow[j]);
2181 }
2182
2183 spin_unlock_bh(&nss_top_main.stats_lock);
2184 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2185
2186 /*
2187 * Fillinng TCL comp stats
2188 */
2189 spin_lock_bh(&nss_top_main.stats_lock);
2190 for (j = 0; (j < NSS_STATS_WIFILI_TX_DESC_FREE_MAX); j++) {
2191 stats_shadow[j] = nss_top_main.stats_wifili.stats_tx_comp[i][j];
2192 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2193 "%s = %llu\n", nss_stats_str_wifili_tx_comp[j], stats_shadow[j]);
2194 }
2195
2196 spin_unlock_bh(&nss_top_main.stats_lock);
2197 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2198
2199 /*
2200 * Fillinng reo ring stats
2201 */
2202 spin_lock_bh(&nss_top_main.stats_lock);
2203 for (j = 0; (j < NSS_STATS_WIFILI_REO_MAX); j++) {
2204 stats_shadow[j] = nss_top_main.stats_wifili.stats_reo[i][j];
2205 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2206 "%s = %llu\n", nss_stats_str_wifili_reo[j], stats_shadow[j]);
2207 }
2208
2209 spin_unlock_bh(&nss_top_main.stats_lock);
2210 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2211
2212 /*
2213 * Fillinng TX SW Pool
2214 */
2215 spin_lock_bh(&nss_top_main.stats_lock);
2216 for (j = 0; (j < NSS_STATS_WIFILI_TX_DESC_MAX); j++) {
2217 stats_shadow[j] = nss_top_main.stats_wifili.stats_tx_desc[i][j];
2218 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2219 "%s = %llu\n", nss_stats_str_wifili_txsw_pool[j], stats_shadow[j]);
2220 }
2221
2222 spin_unlock_bh(&nss_top_main.stats_lock);
2223 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2224
2225 /*
2226 * Fillinng TX EXt SW Pool
2227 */
2228 spin_lock_bh(&nss_top_main.stats_lock);
2229 for (j = 0; (j < NSS_STATS_WIFILI_EXT_TX_DESC_MAX); j++) {
2230 stats_shadow[j] = nss_top_main.stats_wifili.stats_ext_tx_desc[i][j];
2231 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2232 "%s = %llu\n", nss_stats_str_wifili_ext_txsw_pool[j], stats_shadow[j]);
2233 }
2234
2235 spin_unlock_bh(&nss_top_main.stats_lock);
2236 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2237
2238 /*
2239 * Fillinng rxdma pool stats
2240 */
2241 spin_lock_bh(&nss_top_main.stats_lock);
2242 for (j = 0; (j < NSS_STATS_WIFILI_RX_DESC_MAX); j++) {
2243 stats_shadow[j] = nss_top_main.stats_wifili.stats_rx_desc[i][j];
2244 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2245 "%s = %llu\n", nss_stats_str_wifili_rxdma_pool[j], stats_shadow[j]);
2246 }
2247
2248 spin_unlock_bh(&nss_top_main.stats_lock);
2249 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2250
2251 /*
2252 * Fillinng rxdma ring stats
2253 */
2254 spin_lock_bh(&nss_top_main.stats_lock);
2255 for (j = 0; (j < NSS_STATS_WIFILI_RXDMA_DESC_MAX); j++) {
2256 stats_shadow[j] = nss_top_main.stats_wifili.stats_rxdma[i][j];
2257 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2258 "%s = %llu\n", nss_stats_str_wifili_rxdma_ring[j], stats_shadow[j]);
2259 }
2260
2261 spin_unlock_bh(&nss_top_main.stats_lock);
2262 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2263
2264 }
2265
2266 /*
2267 * Fillinng wbm ring stats
2268 */
2269 spin_lock_bh(&nss_top_main.stats_lock);
2270 for (j = 0; (j < NSS_STATS_WIFILI_WBM_MAX); j++) {
2271 stats_shadow[j] = nss_top_main.stats_wifili.stats_wbm[j];
2272 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2273 "%s = %llu\n", nss_stats_str_wifili_wbm[j], stats_shadow[j]);
2274 }
2275
2276 spin_unlock_bh(&nss_top_main.stats_lock);
2277 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nwifili stats end\n\n");
2278
2279 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
2280 kfree(lbuf);
2281 kfree(stats_shadow);
2282
2283 return bytes_read;
2284}
2285
2286/*
Tushar Mathurff8741b2015-12-02 20:28:59 +05302287 * nss_stats_dtls_read()
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002288 * Read DTLS session statistics
Tushar Mathurff8741b2015-12-02 20:28:59 +05302289 */
2290static ssize_t nss_stats_dtls_read(struct file *fp, char __user *ubuf,
2291 size_t sz, loff_t *ppos)
2292{
2293 uint32_t max_output_lines = 2 + (NSS_MAX_DTLS_SESSIONS
2294 * (NSS_STATS_DTLS_SESSION_MAX + 2)) + 2;
2295 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2296 size_t size_wr = 0;
2297 ssize_t bytes_read = 0;
2298 struct net_device *dev;
2299 int id, i;
2300 struct nss_stats_dtls_session_debug *dtls_session_stats = NULL;
2301
2302 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2303 if (unlikely(lbuf == NULL)) {
2304 nss_warning("Could not allocate memory for local statistics buffer");
2305 return 0;
2306 }
2307
2308 dtls_session_stats = kzalloc((sizeof(struct nss_stats_dtls_session_debug)
2309 * NSS_MAX_DTLS_SESSIONS), GFP_KERNEL);
2310 if (unlikely(dtls_session_stats == NULL)) {
2311 nss_warning("Could not allocate memory for populating DTLS stats");
2312 kfree(lbuf);
2313 return 0;
2314 }
2315
2316 /*
2317 * Get all stats
2318 */
2319 nss_dtls_session_debug_stats_get(dtls_session_stats);
2320
2321 /*
2322 * Session stats
2323 */
2324 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2325 "\nDTLS session stats start:\n\n");
2326
2327 for (id = 0; id < NSS_MAX_DTLS_SESSIONS; id++) {
2328 if (!dtls_session_stats[id].valid)
2329 break;
2330
2331 dev = dev_get_by_index(&init_net, dtls_session_stats[id].if_index);
2332 if (likely(dev)) {
2333 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2334 "%d. nss interface id=%d, netdevice=%s\n",
2335 id, dtls_session_stats[id].if_num,
2336 dev->name);
2337 dev_put(dev);
2338 } else {
2339 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2340 "%d. nss interface id=%d\n", id,
2341 dtls_session_stats[id].if_num);
2342 }
2343
2344 for (i = 0; i < NSS_STATS_DTLS_SESSION_MAX; i++) {
2345 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2346 "\t%s = %llu\n",
2347 nss_stats_str_dtls_session_debug_stats[i],
2348 dtls_session_stats[id].stats[i]);
2349 }
2350
2351 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2352 }
2353
2354 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2355 "\nDTLS session stats end\n");
2356 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2357
2358 kfree(dtls_session_stats);
2359 kfree(lbuf);
2360 return bytes_read;
2361}
2362
Tushar Mathurff8741b2015-12-02 20:28:59 +05302363/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002364 * nss_stats_gre_tunnel_read()
2365 * Read GRE Tunnel session statistics
2366 */
2367static ssize_t nss_stats_gre_tunnel_read(struct file *fp, char __user *ubuf,
2368 size_t sz, loff_t *ppos)
2369{
2370 uint32_t max_output_lines = 2 + (NSS_MAX_GRE_TUNNEL_SESSIONS
2371 * (NSS_STATS_GRE_TUNNEL_SESSION_MAX + 2)) + 2;
2372 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2373 size_t size_wr = 0;
2374 ssize_t bytes_read = 0;
2375 struct net_device *dev;
2376 int id, i;
2377 struct nss_stats_gre_tunnel_session_debug *gre_tunnel_session_stats = NULL;
2378
2379 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2380 if (unlikely(lbuf == NULL)) {
2381 nss_warning("Could not allocate memory for local statistics buffer");
2382 return 0;
2383 }
2384
2385 gre_tunnel_session_stats = kzalloc((sizeof(struct nss_stats_gre_tunnel_session_debug)
2386 * NSS_MAX_GRE_TUNNEL_SESSIONS), GFP_KERNEL);
2387 if (unlikely(gre_tunnel_session_stats == NULL)) {
2388 nss_warning("Could not allocate memory for populating GRE Tunnel stats");
2389 kfree(lbuf);
2390 return 0;
2391 }
2392
2393 /*
2394 * Get all stats
2395 */
2396 nss_gre_tunnel_session_debug_stats_get(gre_tunnel_session_stats);
2397
2398 /*
2399 * Session stats
2400 */
2401 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2402 "\nGRE Tunnel session stats start:\n\n");
2403
2404 for (id = 0; id < NSS_MAX_GRE_TUNNEL_SESSIONS; id++) {
2405 if (!gre_tunnel_session_stats[id].valid)
2406 break;
2407
2408 dev = dev_get_by_index(&init_net, gre_tunnel_session_stats[id].if_index);
2409 if (likely(dev)) {
2410 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2411 "%d. nss interface id=%d, netdevice=%s\n",
2412 id, gre_tunnel_session_stats[id].if_num,
2413 dev->name);
2414 dev_put(dev);
2415 } else {
2416 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2417 "%d. nss interface id=%d\n", id,
2418 gre_tunnel_session_stats[id].if_num);
2419 }
2420
2421 for (i = 0; i < NSS_STATS_GRE_TUNNEL_SESSION_MAX; i++) {
2422 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2423 "\t%s = %llu\n",
2424 nss_stats_str_gre_tunnel_session_debug_stats[i],
2425 gre_tunnel_session_stats[id].stats[i]);
2426 }
2427
2428 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2429 }
2430
2431 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2432 "\nGRE Tunnel session stats end\n");
2433 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2434
2435 kfree(gre_tunnel_session_stats);
2436 kfree(lbuf);
2437 return bytes_read;
2438}
2439
2440/*
ratheesh kannoth7af985d2015-06-24 15:08:40 +05302441 * nss_stats_l2tpv2_read()
2442 * Read l2tpv2 statistics
2443 */
2444static ssize_t nss_stats_l2tpv2_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2445{
2446
2447 uint32_t max_output_lines = 2 /* header & footer for session stats */
2448 + NSS_MAX_L2TPV2_DYNAMIC_INTERFACES * (NSS_STATS_L2TPV2_SESSION_MAX + 2) /*session stats */
2449 + 2;
2450 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines ;
2451 size_t size_wr = 0;
2452 ssize_t bytes_read = 0;
2453 struct net_device *dev;
2454 struct nss_stats_l2tpv2_session_debug l2tpv2_session_stats[NSS_MAX_L2TPV2_DYNAMIC_INTERFACES];
2455 int id, i;
2456
2457 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2458 if (unlikely(lbuf == NULL)) {
2459 nss_warning("Could not allocate memory for local statistics buffer");
2460 return 0;
2461 }
2462
2463 memset(&l2tpv2_session_stats, 0, sizeof(struct nss_stats_l2tpv2_session_debug) * NSS_MAX_L2TPV2_DYNAMIC_INTERFACES);
2464
2465 /*
2466 * Get all stats
2467 */
2468 nss_l2tpv2_session_debug_stats_get((void *)&l2tpv2_session_stats);
2469
2470 /*
2471 * Session stats
2472 */
2473 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nl2tp v2 session stats start:\n\n");
2474 for (id = 0; id < NSS_MAX_L2TPV2_DYNAMIC_INTERFACES; id++) {
2475
2476 if (!l2tpv2_session_stats[id].valid) {
2477 break;
2478 }
2479
2480 dev = dev_get_by_index(&init_net, l2tpv2_session_stats[id].if_index);
2481 if (likely(dev)) {
2482
2483 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2484 l2tpv2_session_stats[id].if_num, dev->name);
2485 dev_put(dev);
2486 } else {
2487 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2488 l2tpv2_session_stats[id].if_num);
2489 }
2490
2491 for (i = 0; i < NSS_STATS_L2TPV2_SESSION_MAX; i++) {
2492 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2493 "\t%s = %llu\n", nss_stats_str_l2tpv2_session_debug_stats[i],
2494 l2tpv2_session_stats[id].stats[i]);
2495 }
2496 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2497 }
2498
2499 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nl2tp v2 session stats end\n");
2500 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2501
2502 kfree(lbuf);
2503 return bytes_read;
2504}
2505
2506/*
ratheesh kannotha1245c32015-11-04 16:45:43 +05302507 * nss_stats_map_t_read()
2508 * Read map_t statistics
2509 */
2510static ssize_t nss_stats_map_t_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2511{
2512
2513 uint32_t max_output_lines = 2 /* header & footer for instance stats */
2514 + NSS_MAX_MAP_T_DYNAMIC_INTERFACES * (NSS_STATS_MAP_T_MAX + 2) /*instance stats */
2515 + 2;
2516 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2517 size_t size_wr = 0;
2518 ssize_t bytes_read = 0;
2519 struct net_device *dev;
2520 struct nss_stats_map_t_instance_debug map_t_instance_stats[NSS_MAX_MAP_T_DYNAMIC_INTERFACES];
2521 int id, i;
2522
2523 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2524 if (unlikely(!lbuf)) {
2525 nss_warning("Could not allocate memory for local statistics buffer");
2526 return 0;
2527 }
2528
2529 memset(&map_t_instance_stats, 0, sizeof(struct nss_stats_map_t_instance_debug) * NSS_MAX_MAP_T_DYNAMIC_INTERFACES);
2530
2531 /*
2532 * Get all stats
2533 */
2534 nss_map_t_instance_debug_stats_get((void *)&map_t_instance_stats);
2535
2536 /*
2537 * Session stats
2538 */
2539 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nmap_t instance stats start:\n\n");
2540 for (id = 0; id < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; id++) {
2541
2542 if (!map_t_instance_stats[id].valid) {
2543 break;
2544 }
2545
2546 dev = dev_get_by_index(&init_net, map_t_instance_stats[id].if_index);
2547 if (likely(dev)) {
2548
2549 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2550 map_t_instance_stats[id].if_num, dev->name);
2551 dev_put(dev);
2552 } else {
2553 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2554 map_t_instance_stats[id].if_num);
2555 }
2556
2557 for (i = 0; i < NSS_STATS_MAP_T_MAX; i++) {
2558 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2559 "\t%s = %llu\n", nss_stats_str_map_t_instance_debug_stats[i],
2560 map_t_instance_stats[id].stats[i]);
2561 }
2562 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2563 }
2564
2565 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nmap_t instance stats end\n");
2566 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2567
2568 kfree(lbuf);
2569 return bytes_read;
2570}
2571
ratheesh kannotheb2a0a82017-05-04 09:20:17 +05302572 /*
2573 * nss_stats_gre_read()
2574 * Read GRE statistics
2575 */
2576static ssize_t nss_stats_gre_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2577{
2578 uint32_t max_output_lines = 2 /* header & footer for base debug stats */
2579 + 2 /* header & footer for session debug stats */
2580 + NSS_STATS_GRE_BASE_DEBUG_MAX /* Base debug */
2581 + NSS_GRE_MAX_DEBUG_SESSION_STATS * (NSS_STATS_GRE_SESSION_DEBUG_MAX + 2) /*session stats */
2582 + 2;
2583 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2584 size_t size_wr = 0;
2585 ssize_t bytes_read = 0;
2586 struct net_device *dev;
2587 struct nss_stats_gre_session_debug *sstats;
2588 struct nss_stats_gre_base_debug *bstats;
2589 int id, i;
2590
2591 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2592 if (unlikely(!lbuf)) {
2593 nss_warning("Could not allocate memory for local statistics buffer");
2594 return 0;
2595 }
2596
2597 bstats = kzalloc(sizeof(struct nss_stats_gre_base_debug), GFP_KERNEL);
2598 if (unlikely(!bstats)) {
2599 nss_warning("Could not allocate memory for base debug statistics buffer");
2600 kfree(lbuf);
2601 return 0;
2602 }
2603
2604 sstats = kzalloc(sizeof(struct nss_stats_gre_session_debug) * NSS_GRE_MAX_DEBUG_SESSION_STATS, GFP_KERNEL);
2605 if (unlikely(!sstats)) {
2606 nss_warning("Could not allocate memory for base debug statistics buffer");
2607 kfree(lbuf);
2608 kfree(bstats);
2609 return 0;
2610 }
2611
2612 /*
2613 * Get all base stats
2614 */
2615 nss_gre_base_debug_stats_get((void *)bstats, sizeof(struct nss_stats_gre_base_debug));
2616 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngre Base stats start:\n\n");
2617 for (i = 0; i < NSS_STATS_GRE_BASE_DEBUG_MAX; i++) {
2618 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2619 "\t%s = %llu\n", nss_stats_str_gre_base_debug_stats[i],
2620 bstats->stats[i]);
2621 }
2622
2623 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngre Base stats End\n\n");
2624
2625 /*
2626 * Get all session stats
2627 */
2628 nss_gre_session_debug_stats_get(sstats, sizeof(struct nss_stats_gre_session_debug) * NSS_GRE_MAX_DEBUG_SESSION_STATS);
2629 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngre Session stats start:\n\n");
2630
2631 for (id = 0; id < NSS_GRE_MAX_DEBUG_SESSION_STATS; id++) {
2632
2633 if (!((sstats + id)->valid)) {
2634 continue;
2635 }
2636
2637 dev = dev_get_by_index(&init_net, (sstats + id)->if_index);
2638 if (likely(dev)) {
2639
2640 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2641 (sstats + id)->if_num, dev->name);
2642 dev_put(dev);
2643 } else {
2644 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2645 (sstats + id)->if_num);
2646 }
2647
2648 for (i = 0; i < NSS_STATS_GRE_SESSION_DEBUG_MAX; i++) {
2649 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2650 "\t%s = %llu\n", nss_stats_str_gre_session_debug_stats[i],
2651 (sstats + id)->stats[i]);
2652 }
2653 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2654 }
2655
2656 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngre Session stats end\n");
2657 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2658
2659 kfree(sstats);
2660 kfree(bstats);
2661 kfree(lbuf);
2662 return bytes_read;
2663}
2664
ratheesh kannotha1245c32015-11-04 16:45:43 +05302665/*
Amit Gupta316729b2016-08-12 12:21:15 +05302666 * nss_stats_ppe_conn_read()
2667 * Read ppe connection stats
2668 */
2669static ssize_t nss_stats_ppe_conn_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2670{
2671
2672 int i;
2673 char *lbuf = NULL;
2674 size_t size_wr = 0;
2675 ssize_t bytes_read = 0;
2676 uint32_t ppe_stats[NSS_STATS_PPE_CONN_MAX];
2677 uint32_t max_output_lines = 2 /* header & footer for session stats */
2678 + NSS_STATS_PPE_CONN_MAX /* PPE flow counters */
2679 + 2;
2680 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2681
Amit Gupta316729b2016-08-12 12:21:15 +05302682 lbuf = kzalloc(size_al, GFP_KERNEL);
2683 if (unlikely(lbuf == NULL)) {
2684 nss_warning("Could not allocate memory for local statistics buffer");
2685 return 0;
2686 }
2687
2688 memset(&ppe_stats, 0, sizeof(uint32_t) * NSS_STATS_PPE_CONN_MAX);
2689
2690 /*
2691 * Get all stats
2692 */
2693 nss_ppe_stats_conn_get(ppe_stats);
2694
2695 /*
2696 * flow stats
2697 */
2698 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe flow counters start:\n\n");
2699
2700 for (i = 0; i < NSS_STATS_PPE_CONN_MAX; i++) {
2701 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2702 "\t%s = %u\n", nss_stats_str_ppe_conn[i],
2703 ppe_stats[i]);
2704 }
2705
2706 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2707
2708 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe flow counters end\n");
2709 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2710
2711 kfree(lbuf);
2712 return bytes_read;
2713}
2714
2715/*
2716 * nss_stats_ppe_l3_read()
2717 * Read ppe L3 debug stats
2718 */
2719static ssize_t nss_stats_ppe_l3_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2720{
2721
2722 int i;
2723 char *lbuf = NULL;
2724 size_t size_wr = 0;
2725 ssize_t bytes_read = 0;
2726 uint32_t ppe_stats[NSS_STATS_PPE_L3_MAX];
2727 uint32_t max_output_lines = 2 /* header & footer for session stats */
2728 + NSS_STATS_PPE_L3_MAX /* PPE flow counters */
2729 + 2;
2730 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2731
2732 lbuf = kzalloc(size_al, GFP_KERNEL);
2733 if (unlikely(!lbuf)) {
2734 nss_warning("Could not allocate memory for local statistics buffer");
2735 return 0;
2736 }
2737
2738 memset(ppe_stats, 0, sizeof(uint32_t) * NSS_STATS_PPE_L3_MAX);
2739
2740 /*
2741 * Get all stats
2742 */
2743 nss_ppe_stats_l3_get(ppe_stats);
2744
2745 /*
2746 * flow stats
2747 */
2748 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe l3 debug stats start:\n\n");
2749
2750 for (i = 0; i < NSS_STATS_PPE_L3_MAX; i++) {
2751 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2752 "\t%s = 0x%x\n", nss_stats_str_ppe_l3[i],
2753 ppe_stats[i]);
2754 }
2755
2756 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2757
2758 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe l3 debug stats end\n");
2759 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2760
2761 kfree(lbuf);
2762 return bytes_read;
2763}
2764
2765/*
2766 * nss_stats_ppe_code_read()
2767 * Read ppe CPU & DROP code
2768 */
2769static ssize_t nss_stats_ppe_code_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2770{
2771
2772 int i;
2773 char *lbuf = NULL;
2774 size_t size_wr = 0;
2775 ssize_t bytes_read = 0;
2776 uint32_t ppe_stats[NSS_STATS_PPE_CODE_MAX];
2777 uint32_t max_output_lines = 2 /* header & footer for session stats */
2778 + NSS_STATS_PPE_CODE_MAX /* PPE flow counters */
2779 + 2;
2780 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2781
2782 lbuf = kzalloc(size_al, GFP_KERNEL);
2783 if (unlikely(!lbuf)) {
2784 nss_warning("Could not allocate memory for local statistics buffer");
2785 return 0;
2786 }
2787
2788 memset(ppe_stats, 0, sizeof(uint32_t) * NSS_STATS_PPE_CODE_MAX);
2789
2790 /*
2791 * Get all stats
2792 */
2793 nss_ppe_stats_code_get(ppe_stats);
2794
2795 /*
2796 * flow stats
2797 */
2798 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe session stats start:\n\n");
2799
2800 for (i = 0; i < NSS_STATS_PPE_CODE_MAX; i++) {
2801 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2802 "\t%s = %u\n", nss_stats_str_ppe_code[i],
2803 ppe_stats[i]);
2804 }
2805
2806 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2807
2808 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe session stats end\n");
2809 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2810
2811 kfree(lbuf);
2812 return bytes_read;
2813}
2814
2815/*
Shyam Sunder66e889d2015-11-02 15:31:20 +05302816 * nss_stats_pptp_read()
2817 * Read pptp statistics
2818 */
2819static ssize_t nss_stats_pptp_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2820{
2821
2822 uint32_t max_output_lines = 2 /* header & footer for session stats */
2823 + NSS_MAX_PPTP_DYNAMIC_INTERFACES * (NSS_STATS_PPTP_SESSION_MAX + 2) /*session stats */
2824 + 2;
2825 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines ;
2826 size_t size_wr = 0;
2827 ssize_t bytes_read = 0;
2828 struct net_device *dev;
2829 struct nss_stats_pptp_session_debug pptp_session_stats[NSS_MAX_PPTP_DYNAMIC_INTERFACES];
2830 int id, i;
2831
2832 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2833 if (unlikely(lbuf == NULL)) {
2834 nss_warning("Could not allocate memory for local statistics buffer");
2835 return 0;
2836 }
2837
2838 memset(&pptp_session_stats, 0, sizeof(struct nss_stats_pptp_session_debug) * NSS_MAX_PPTP_DYNAMIC_INTERFACES);
2839
2840 /*
2841 * Get all stats
2842 */
2843 nss_pptp_session_debug_stats_get((void *)&pptp_session_stats);
2844
2845 /*
2846 * Session stats
2847 */
2848 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npptp session stats start:\n\n");
2849 for (id = 0; id < NSS_MAX_PPTP_DYNAMIC_INTERFACES; id++) {
2850
2851 if (!pptp_session_stats[id].valid) {
2852 break;
2853 }
2854
2855 dev = dev_get_by_index(&init_net, pptp_session_stats[id].if_index);
2856 if (likely(dev)) {
2857
2858 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2859 pptp_session_stats[id].if_num, dev->name);
2860 dev_put(dev);
2861 } else {
2862 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2863 pptp_session_stats[id].if_num);
2864 }
2865
2866 for (i = 0; i < NSS_STATS_PPTP_SESSION_MAX; i++) {
2867 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2868 "\t%s = %llu\n", nss_stats_str_pptp_session_debug_stats[i],
2869 pptp_session_stats[id].stats[i]);
2870 }
2871 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2872 }
2873
2874 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npptp session stats end\n");
2875 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2876
2877 kfree(lbuf);
2878 return bytes_read;
2879}
2880
2881/*
Ankit Dhanuka14999992014-11-12 15:35:11 +05302882 * nss_stats_sjack_read()
2883 * Read SJACK stats
2884 */
2885static ssize_t nss_stats_sjack_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2886{
2887 int32_t i;
2888 /*
2889 * max output lines = #stats + start tag line + end tag line + three blank lines
2890 */
2891 uint32_t max_output_lines = NSS_STATS_NODE_MAX + 5;
2892 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2893 size_t size_wr = 0;
2894 ssize_t bytes_read = 0;
2895 uint64_t *stats_shadow;
2896
2897 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2898 if (unlikely(lbuf == NULL)) {
2899 nss_warning("Could not allocate memory for local statistics buffer");
2900 return 0;
2901 }
2902
2903 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
2904 if (unlikely(stats_shadow == NULL)) {
2905 nss_warning("Could not allocate memory for local shadow buffer");
2906 kfree(lbuf);
2907 return 0;
2908 }
2909
2910 size_wr = scnprintf(lbuf, size_al, "sjack stats start:\n\n");
2911
2912 /*
2913 * Common node stats
2914 */
2915 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
2916 spin_lock_bh(&nss_top_main.stats_lock);
2917 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2918 stats_shadow[i] = nss_top_main.stats_node[NSS_SJACK_INTERFACE][i];
2919 }
2920
2921 spin_unlock_bh(&nss_top_main.stats_lock);
2922
2923 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2924 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2925 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
2926 }
2927
2928 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nsjack stats end\n\n");
2929
2930 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
2931 kfree(lbuf);
2932 kfree(stats_shadow);
2933
2934 return bytes_read;
2935}
2936
2937/*
Stephen Wang9779d952015-10-28 11:39:07 -07002938 * nss_stats_portid_read()
2939 * Read PortID stats
2940 */
2941static ssize_t nss_stats_portid_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2942{
2943 int32_t i;
2944 /*
2945 * max output lines = #stats + start tag line + end tag line + three blank lines
2946 */
2947 uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_STATS_PORTID_MAX + 5;
2948 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2949 size_t size_wr = 0;
2950 ssize_t bytes_read = 0;
2951 uint64_t *stats_shadow;
2952
2953 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2954 if (unlikely(lbuf == NULL)) {
2955 nss_warning("Could not allocate memory for local statistics buffer");
2956 return 0;
2957 }
2958
2959 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
2960 if (unlikely(stats_shadow == NULL)) {
2961 nss_warning("Could not allocate memory for local shadow buffer");
2962 kfree(lbuf);
2963 return 0;
2964 }
2965
2966 size_wr = scnprintf(lbuf, size_al, "portid stats start:\n\n");
2967
2968 /*
2969 * Common node stats
2970 */
2971 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
2972 spin_lock_bh(&nss_top_main.stats_lock);
2973 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2974 stats_shadow[i] = nss_top_main.stats_node[NSS_PORTID_INTERFACE][i];
2975 }
2976
2977 spin_unlock_bh(&nss_top_main.stats_lock);
2978
2979 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2980 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2981 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
2982 }
2983
2984 /*
2985 * PortID node stats
2986 */
2987 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nportid node stats:\n\n");
2988
2989 spin_lock_bh(&nss_top_main.stats_lock);
2990 for (i = 0; (i < NSS_STATS_PORTID_MAX); i++) {
2991 stats_shadow[i] = nss_top_main.stats_portid[i];
2992 }
2993
2994 spin_unlock_bh(&nss_top_main.stats_lock);
2995
2996 for (i = 0; (i < NSS_STATS_PORTID_MAX); i++) {
2997 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2998 "%s = %llu\n", nss_stats_str_portid[i], stats_shadow[i]);
2999 }
3000
3001 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nportid stats end\n\n");
3002
3003 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
3004 kfree(lbuf);
3005 kfree(stats_shadow);
3006
3007 return bytes_read;
3008}
3009
3010/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07003011 * nss_stats_capwap_encap()
3012 * Make a row for CAPWAP encap stats.
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003013 */
3014static ssize_t nss_stats_capwap_encap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s)
3015{
Saurabh Misra3f66e872015-04-03 11:30:42 -07003016 char *header[] = { "packets", "bytes", "fragments", "drop_ref", "drop_ver", "drop_unalign",
3017 "drop_hroom", "drop_dtls", "drop_nwireless", "drop_qfull", "drop_memfail", "unknown" };
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003018 uint64_t tcnt = 0;
3019
3020 switch (i) {
3021 case 0:
3022 tcnt = s->pnode_stats.tx_packets;
3023 break;
3024 case 1:
3025 tcnt = s->pnode_stats.tx_bytes;
3026 break;
3027 case 2:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003028 tcnt = s->tx_segments;
3029 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07003030 case 3:
3031 tcnt = s->tx_dropped_sg_ref;
3032 break;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003033 case 4:
Saurabh Misra3f66e872015-04-03 11:30:42 -07003034 tcnt = s->tx_dropped_ver_mis;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003035 break;
3036 case 5:
Saurabh Misra3f66e872015-04-03 11:30:42 -07003037 tcnt = s->tx_dropped_unalign;
3038 break;
3039 case 6:
3040 tcnt = s->tx_dropped_hroom;
3041 break;
3042 case 7:
3043 tcnt = s->tx_dropped_dtls;
3044 break;
3045 case 8:
3046 tcnt = s->tx_dropped_nwireless;
3047 break;
3048 case 9:
3049 tcnt = s->tx_queue_full_drops;
3050 break;
3051 case 10:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003052 tcnt = s->tx_mem_failure_drops;
3053 break;
3054 default:
Saurabh Misra3f66e872015-04-03 11:30:42 -07003055 return 0;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003056 }
3057
Saurabh Misra3f66e872015-04-03 11:30:42 -07003058 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003059}
3060
3061/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07003062 * nss_stats_capwap_decap()
3063 * Make a row for CAPWAP decap stats.
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003064 */
3065static ssize_t nss_stats_capwap_decap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s)
3066{
Saurabh Misra3f66e872015-04-03 11:30:42 -07003067 char *header[] = { "packets", "bytes", "DTLS_pkts", "fragments", "rx_dropped", "drop_oversize",
3068 "drop_frag_timeout", "drop_frag_dup", "drop_frag_gap", "drop_qfull", "drop_memfail",
3069 "drop_csum", "drop_malformed", "unknown" };
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003070 uint64_t tcnt = 0;
3071
Aniruddha Paul1b170c22017-05-29 12:30:39 +05303072 switch (i) {
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003073 case 0:
3074 tcnt = s->pnode_stats.rx_packets;
3075 break;
3076 case 1:
3077 tcnt = s->pnode_stats.rx_bytes;
3078 break;
3079 case 2:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003080 tcnt = s->dtls_pkts;
3081 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07003082 case 3:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003083 tcnt = s->rx_segments;
3084 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07003085 case 4:
3086 tcnt = s->pnode_stats.rx_dropped;
3087 break;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003088 case 5:
Saurabh Misra3f66e872015-04-03 11:30:42 -07003089 tcnt = s->rx_oversize_drops;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003090 break;
3091 case 6:
Saurabh Misra3f66e872015-04-03 11:30:42 -07003092 tcnt = s->rx_frag_timeout_drops;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003093 break;
3094 case 7:
3095 tcnt = s->rx_dup_frag;
3096 break;
3097 case 8:
Saurabh Misra3f66e872015-04-03 11:30:42 -07003098 tcnt = s->rx_frag_gap_drops;
3099 break;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003100 case 9:
Saurabh Misra3f66e872015-04-03 11:30:42 -07003101 tcnt = s->rx_queue_full_drops;
3102 return (snprintf(line, len, "%s = %llu (n2h = %llu)\n", header[i], tcnt, s->rx_n2h_queue_full_drops));
3103 case 10:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003104 tcnt = s->rx_mem_failure_drops;
3105 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07003106 case 11:
3107 tcnt = s->rx_csum_drops;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003108 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07003109 case 12:
3110 tcnt = s->rx_malformed;
3111 break;
3112 default:
3113 return 0;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003114 }
3115
Saurabh Misra3f66e872015-04-03 11:30:42 -07003116 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003117}
3118
3119/*
3120 * nss_stats_capwap_read()
3121 * Read CAPWAP stats
3122 */
3123static ssize_t nss_stats_capwap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos, uint16_t type)
3124{
3125 struct nss_stats_data *data = fp->private_data;
3126 ssize_t bytes_read = 0;
3127 struct nss_capwap_tunnel_stats stats;
3128 size_t bytes;
3129 char line[80];
Saurabh Misra3f66e872015-04-03 11:30:42 -07003130 int start;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003131 uint32_t if_num = NSS_DYNAMIC_IF_START;
3132 uint32_t max_if_num = NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES;
3133
3134 if (data) {
3135 if_num = data->if_num;
3136 }
3137
3138 /*
3139 * If we are done accomodating all the CAPWAP tunnels.
3140 */
3141 if (if_num > max_if_num) {
3142 return 0;
3143 }
3144
3145 for (; if_num <= max_if_num; if_num++) {
3146 bool isthere;
3147
3148 if (nss_is_dynamic_interface(if_num) == false) {
3149 continue;
3150 }
3151
3152 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP) {
3153 continue;
3154 }
3155
3156 /*
3157 * If CAPWAP tunnel does not exists, then isthere will be false.
3158 */
3159 isthere = nss_capwap_get_stats(if_num, &stats);
3160 if (!isthere) {
3161 continue;
3162 }
3163
Saurabh Misra3f66e872015-04-03 11:30:42 -07003164 bytes = snprintf(line, sizeof(line), "----if_num : %2d----\n", if_num);
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003165 if ((bytes_read + bytes) > sz) {
3166 break;
3167 }
3168
3169 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3170 bytes_read = -EFAULT;
3171 goto fail;
3172 }
3173 bytes_read += bytes;
3174 start = 0;
Saurabh Misra3f66e872015-04-03 11:30:42 -07003175 while (bytes_read < sz) {
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003176 if (type == 1) {
3177 bytes = nss_stats_capwap_encap(line, sizeof(line), start, &stats);
3178 } else {
3179 bytes = nss_stats_capwap_decap(line, sizeof(line), start, &stats);
3180 }
3181
Saurabh Misra3f66e872015-04-03 11:30:42 -07003182 /*
3183 * If we don't have any more lines in decap/encap.
3184 */
3185 if (bytes == 0) {
3186 break;
3187 }
3188
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003189 if ((bytes_read + bytes) > sz)
3190 break;
3191
3192 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3193 bytes_read = -EFAULT;
3194 goto fail;
3195 }
3196
3197 bytes_read += bytes;
3198 start++;
3199 }
3200 }
3201
3202 if (bytes_read > 0) {
3203 *ppos = bytes_read;
3204 }
3205
3206 if (data) {
3207 data->if_num = if_num;
3208 }
3209fail:
3210 return bytes_read;
3211}
3212
3213/*
3214 * nss_stats_capwap_decap_read()
3215 * Read CAPWAP decap stats
3216 */
3217static ssize_t nss_stats_capwap_decap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
3218{
3219 return (nss_stats_capwap_read(fp, ubuf, sz, ppos, 0));
3220}
3221
3222/*
3223 * nss_stats_capwap_encap_read()
3224 * Read CAPWAP encap stats
3225 */
3226static ssize_t nss_stats_capwap_encap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
3227{
3228 return (nss_stats_capwap_read(fp, ubuf, sz, ppos, 1));
3229}
3230
3231/*
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303232 * nss_stats_gre_redir()
Thomas Wu71c5ecc2016-06-21 11:15:52 -07003233 * Make a row for GRE_REDIR stats.
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303234 */
3235static ssize_t nss_stats_gre_redir(char *line, int len, int i, struct nss_gre_redir_tunnel_stats *s)
3236{
3237 char *header[] = { "TX Packets", "TX Bytes", "TX Drops", "RX Packets", "RX Bytes", "Rx Drops" };
3238 uint64_t tcnt = 0;
3239
3240 switch (i) {
3241 case 0:
3242 tcnt = s->node_stats.tx_packets;
3243 break;
3244 case 1:
3245 tcnt = s->node_stats.tx_bytes;
3246 break;
3247 case 2:
3248 tcnt = s->tx_dropped;
3249 break;
3250 case 3:
3251 tcnt = s->node_stats.rx_packets;
3252 break;
3253 case 4:
3254 tcnt = s->node_stats.rx_bytes;
3255 break;
3256 case 5:
3257 tcnt = s->node_stats.rx_dropped;
3258 break;
3259 default:
Radha krishna Simha Jigurudf53f022015-11-09 12:31:26 +05303260 return 0;
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303261 }
3262
3263 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
3264}
3265
3266/*
3267 * nss_stats_gre_redir_read()
Thomas Wu71c5ecc2016-06-21 11:15:52 -07003268 * READ gre_redir tunnel stats.
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303269 */
3270static ssize_t nss_stats_gre_redir_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
3271{
3272 struct nss_stats_data *data = fp->private_data;
3273 ssize_t bytes_read = 0;
3274 struct nss_gre_redir_tunnel_stats stats;
3275 size_t bytes;
3276 char line[80];
3277 int start, end;
3278 int index = 0;
3279
3280 if (data) {
3281 index = data->index;
3282 }
3283
3284 /*
3285 * If we are done accomodating all the GRE_REDIR tunnels.
3286 */
3287 if (index >= NSS_GRE_REDIR_MAX_INTERFACES) {
3288 return 0;
3289 }
3290
3291 for (; index < NSS_GRE_REDIR_MAX_INTERFACES; index++) {
3292 bool isthere;
3293
3294 /*
3295 * If gre_redir tunnel does not exists, then isthere will be false.
3296 */
3297 isthere = nss_gre_redir_get_stats(index, &stats);
3298 if (!isthere) {
3299 continue;
3300 }
3301
3302 bytes = snprintf(line, sizeof(line), "\nTunnel if_num: %2d\n", stats.if_num);
3303 if ((bytes_read + bytes) > sz) {
3304 break;
3305 }
3306
3307 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3308 bytes_read = -EFAULT;
3309 goto fail;
3310 }
3311 bytes_read += bytes;
3312 start = 0;
3313 end = 6;
3314 while (bytes_read < sz && start < end) {
3315 bytes = nss_stats_gre_redir(line, sizeof(line), start, &stats);
3316
3317 if ((bytes_read + bytes) > sz)
3318 break;
3319
3320 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3321 bytes_read = -EFAULT;
3322 goto fail;
3323 }
3324
3325 bytes_read += bytes;
3326 start++;
3327 }
3328 }
3329
3330 if (bytes_read > 0) {
3331 *ppos = bytes_read;
3332 }
3333
3334 if (data) {
3335 data->index = index;
3336 }
3337
3338fail:
3339 return bytes_read;
3340}
3341
3342/*
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -08003343 * nss_stats_wifi_if_read()
3344 * Read wifi_if statistics
3345 */
3346static ssize_t nss_stats_wifi_if_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
3347{
3348 struct nss_stats_data *data = fp->private_data;
3349 int32_t if_num = NSS_DYNAMIC_IF_START;
3350 int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES;
3351 size_t bytes = 0;
3352 ssize_t bytes_read = 0;
3353 char line[80];
3354 int start, end;
3355
3356 if (data) {
3357 if_num = data->if_num;
3358 }
3359
3360 if (if_num > max_if_num) {
3361 return 0;
3362 }
3363
3364 for (; if_num < max_if_num; if_num++) {
3365 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_WIFI)
3366 continue;
3367
3368 bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num);
3369 if ((bytes_read + bytes) > sz)
3370 break;
3371
3372 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3373 bytes_read = -EFAULT;
3374 goto end;
3375 }
3376
3377 bytes_read += bytes;
3378
3379 start = 0;
3380 end = 7;
3381 while (bytes_read < sz && start < end) {
3382 bytes = nss_wifi_if_copy_stats(if_num, start, line);
3383 if (!bytes)
3384 break;
3385
3386 if ((bytes_read + bytes) > sz)
3387 break;
3388
3389 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3390 bytes_read = -EFAULT;
3391 goto end;
3392 }
3393
3394 bytes_read += bytes;
3395 start++;
3396 }
3397
3398 bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num);
3399 if (bytes_read > (sz - bytes))
3400 break;
3401
3402 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3403 bytes_read = -EFAULT;
3404 goto end;
3405 }
3406
3407 bytes_read += bytes;
3408 }
3409
3410 if (bytes_read > 0) {
3411 *ppos = bytes_read;
3412 }
3413
3414 if (data) {
3415 data->if_num = if_num;
3416 }
3417
3418end:
3419 return bytes_read;
3420}
3421
3422/*
Sundarajan Srinivasanab2c8562015-06-09 16:14:10 -07003423 * nss_stats_virt_if_read()
3424 * Read virt_if statistics
3425 */
3426static ssize_t nss_stats_virt_if_read(struct file *fp, char __user *ubuf,
3427 size_t sz, loff_t *ppos)
3428{
3429 struct nss_stats_data *data = fp->private_data;
3430 int32_t if_num = NSS_DYNAMIC_IF_START;
3431 int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES;
3432 size_t bytes = 0;
3433 ssize_t bytes_read = 0;
3434 char line[80];
3435 int start, end;
3436
3437 if (data) {
3438 if_num = data->if_num;
3439 }
3440
3441 if (if_num > max_if_num) {
3442 return 0;
3443 }
3444
3445 for (; if_num < max_if_num; if_num++) {
3446 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_802_3_REDIR)
3447 continue;
3448
3449 bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num);
3450 if ((bytes_read + bytes) > sz)
3451 break;
3452
3453 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3454 bytes_read = -EFAULT;
3455 goto end;
3456 }
3457
3458 bytes_read += bytes;
3459
3460 start = 0;
3461 end = 7;
3462 while (bytes_read < sz && start < end) {
3463 bytes = nss_virt_if_copy_stats(if_num, start, line);
3464 if (!bytes)
3465 break;
3466
3467 if ((bytes_read + bytes) > sz)
3468 break;
3469
3470 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3471 bytes_read = -EFAULT;
3472 goto end;
3473 }
3474
3475 bytes_read += bytes;
3476 start++;
3477 }
3478
3479 bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num);
3480 if (bytes_read > (sz - bytes))
3481 break;
3482
3483 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3484 bytes_read = -EFAULT;
3485 goto end;
3486 }
3487
3488 bytes_read += bytes;
3489 }
3490
3491 if (bytes_read > 0) {
3492 *ppos = bytes_read;
3493 }
3494
3495 if (data) {
3496 data->if_num = if_num;
3497 }
3498
3499end:
3500 return bytes_read;
3501}
3502
3503/*
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -07003504 * nss_stats_tx_rx_virt_if_read()
3505 * Read tx_rx_virt_if statistics
3506 */
3507static ssize_t nss_stats_tx_rx_virt_if_read(struct file *fp, char __user *ubuf,
3508 size_t sz, loff_t *ppos)
3509{
3510 struct nss_stats_data *data = fp->private_data;
3511 int32_t if_num = NSS_DYNAMIC_IF_START;
3512 int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES;
3513 size_t bytes = 0;
3514 ssize_t bytes_read = 0;
3515 char line[80];
3516 int start, end;
3517
3518 if (data) {
3519 if_num = data->if_num;
3520 }
3521
3522 if (if_num > max_if_num) {
3523 return 0;
3524 }
3525
3526 for (; if_num < max_if_num; if_num++) {
3527 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_VIRTIF_DEPRECATED)
3528 continue;
3529
3530 bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num);
3531 if ((bytes_read + bytes) > sz)
3532 break;
3533
3534 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3535 bytes_read = -EFAULT;
3536 goto end;
3537 }
3538
3539 bytes_read += bytes;
3540
3541 start = 0;
3542 end = 7;
3543 while (bytes_read < sz && start < end) {
3544 bytes = nss_tx_rx_virt_if_copy_stats(if_num, start, line);
3545 if (!bytes)
3546 break;
3547
3548 if ((bytes_read + bytes) > sz)
3549 break;
3550
3551 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3552 bytes_read = -EFAULT;
3553 goto end;
3554 }
3555
3556 bytes_read += bytes;
3557 start++;
3558 }
3559
3560 bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num);
3561 if (bytes_read > (sz - bytes))
3562 break;
3563
3564 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3565 bytes_read = -EFAULT;
3566 goto end;
3567 }
3568
3569 bytes_read += bytes;
3570 }
3571
3572 if (bytes_read > 0) {
3573 *ppos = bytes_read;
3574 }
3575
3576 if (data) {
3577 data->if_num = if_num;
3578 }
3579
3580end:
3581 return bytes_read;
3582}
3583
3584/*
Stephen Wangec5a85c2016-09-08 23:32:27 -07003585 * nss_stats_trustsec_tx_read()
3586 * Read trustsec_tx stats
3587 */
3588static ssize_t nss_stats_trustsec_tx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
3589{
3590 int32_t i;
3591
3592 /*
3593 * max output lines = #stats + start tag line + end tag line + three blank lines
3594 */
3595 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_TRUSTSEC_TX_MAX + 3) + 5;
3596 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
3597 size_t size_wr = 0;
3598 ssize_t bytes_read = 0;
3599 uint64_t *stats_shadow;
3600
3601 char *lbuf = kzalloc(size_al, GFP_KERNEL);
3602 if (unlikely(lbuf == NULL)) {
3603 nss_warning("Could not allocate memory for local statistics buffer");
3604 return 0;
3605 }
3606
3607 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
3608 if (unlikely(stats_shadow == NULL)) {
3609 nss_warning("Could not allocate memory for local shadow buffer");
3610 kfree(lbuf);
3611 return 0;
3612 }
3613
3614 size_wr = scnprintf(lbuf, size_al, "trustsec_tx stats start:\n\n");
3615
3616 /*
3617 * Common node stats
3618 */
3619 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
3620 spin_lock_bh(&nss_top_main.stats_lock);
3621 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
3622 stats_shadow[i] = nss_top_main.stats_node[NSS_TRUSTSEC_TX_INTERFACE][i];
3623 }
3624
3625 spin_unlock_bh(&nss_top_main.stats_lock);
3626
3627 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
3628 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
3629 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
3630 }
3631
3632 /*
3633 * TrustSec TX node stats
3634 */
3635 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ntrustsec tx node stats:\n\n");
3636
3637 spin_lock_bh(&nss_top_main.stats_lock);
3638 for (i = 0; (i < NSS_STATS_TRUSTSEC_TX_MAX); i++) {
3639 stats_shadow[i] = nss_top_main.stats_trustsec_tx[i];
3640 }
3641
3642 spin_unlock_bh(&nss_top_main.stats_lock);
3643
3644 for (i = 0; (i < NSS_STATS_TRUSTSEC_TX_MAX); i++) {
3645 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
3646 "%s = %llu\n", nss_stats_str_trustsec_tx[i], stats_shadow[i]);
3647 }
3648
3649 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ntrustsec tx stats end\n\n");
3650 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
3651 kfree(lbuf);
3652 kfree(stats_shadow);
3653
3654 return bytes_read;
3655}
3656
3657/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003658 * nss_stats_open()
3659 */
3660static int nss_stats_open(struct inode *inode, struct file *filp)
3661{
3662 struct nss_stats_data *data = NULL;
3663
3664 data = kzalloc(sizeof(struct nss_stats_data), GFP_KERNEL);
3665 if (!data) {
3666 return -ENOMEM;
3667 }
3668 memset(data, 0, sizeof (struct nss_stats_data));
3669 data->if_num = NSS_DYNAMIC_IF_START;
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303670 data->index = 0;
Stephen Wangaed46332016-12-12 17:29:03 -08003671 data->edma_id = (nss_ptr_t)inode->i_private;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003672 filp->private_data = data;
3673
3674 return 0;
3675}
3676
3677/*
3678 * nss_stats_release()
3679 */
3680static int nss_stats_release(struct inode *inode, struct file *filp)
3681{
3682 struct nss_stats_data *data = filp->private_data;
3683
3684 if (data) {
3685 kfree(data);
3686 }
3687
3688 return 0;
3689}
3690
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303691#define NSS_STATS_DECLARE_FILE_OPERATIONS(name) \
3692static const struct file_operations nss_stats_##name##_ops = { \
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003693 .open = nss_stats_open, \
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303694 .read = nss_stats_##name##_read, \
3695 .llseek = generic_file_llseek, \
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003696 .release = nss_stats_release, \
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303697};
3698
3699/*
3700 * nss_ipv4_stats_ops
3701 */
3702NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4)
3703
3704/*
Selin Dag6d9b0c12014-11-04 18:27:21 -08003705 * ipv4_reasm_stats_ops
3706 */
3707NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4_reasm)
3708
3709/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303710 * ipv6_stats_ops
3711 */
3712NSS_STATS_DECLARE_FILE_OPERATIONS(ipv6)
3713
3714/*
Selin Dag60a2f5b2015-06-29 14:39:49 -07003715 * ipv6_reasm_stats_ops
3716 */
3717NSS_STATS_DECLARE_FILE_OPERATIONS(ipv6_reasm)
3718
3719/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303720 * n2h_stats_ops
3721 */
3722NSS_STATS_DECLARE_FILE_OPERATIONS(n2h)
Thomas Wuc3e382c2014-10-29 15:35:13 -07003723
3724/*
3725 * lso_rx_stats_ops
3726 */
3727NSS_STATS_DECLARE_FILE_OPERATIONS(lso_rx)
3728
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303729/*
3730 * drv_stats_ops
3731 */
3732NSS_STATS_DECLARE_FILE_OPERATIONS(drv)
3733
3734/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303735 * pppoe_stats_ops
3736 */
3737NSS_STATS_DECLARE_FILE_OPERATIONS(pppoe)
3738
3739/*
ratheesh kannoth7af985d2015-06-24 15:08:40 +05303740 * l2tpv2_stats_ops
3741 */
3742NSS_STATS_DECLARE_FILE_OPERATIONS(l2tpv2)
3743
3744/*
ratheesh kannotha1245c32015-11-04 16:45:43 +05303745 * map_t_stats_ops
3746 */
3747NSS_STATS_DECLARE_FILE_OPERATIONS(map_t)
3748
3749/*
ratheesh kannotheb2a0a82017-05-04 09:20:17 +05303750 * gre_stats_ops
3751 */
3752NSS_STATS_DECLARE_FILE_OPERATIONS(gre)
3753
3754/*
Amit Gupta316729b2016-08-12 12:21:15 +05303755 * ppe_stats_ops
3756 */
3757NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_conn)
3758NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_l3)
3759NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_code)
3760
3761/*
Shyam Sunder66e889d2015-11-02 15:31:20 +05303762 * pptp_stats_ops
3763 */
3764NSS_STATS_DECLARE_FILE_OPERATIONS(pptp)
3765
3766/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303767 * gmac_stats_ops
3768 */
3769NSS_STATS_DECLARE_FILE_OPERATIONS(gmac)
3770
3771/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003772 * capwap_stats_ops
3773 */
3774NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_encap)
3775NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_decap)
3776
3777/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05303778 * eth_rx_stats_ops
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303779 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05303780NSS_STATS_DECLARE_FILE_OPERATIONS(eth_rx)
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303781
3782/*
Shashank Balashankar512cb602016-08-01 17:57:42 -07003783 * edma_port_stats_ops
3784 */
3785NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_stats)
3786
3787/*
3788 * edma_port_type_ops
3789 */
3790NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_type)
3791
3792/*
3793 * edma_port_ring_map_ops
3794 */
3795NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_ring_map)
3796
3797/*
3798 * edma_txring_stats_ops
3799 */
3800NSS_STATS_DECLARE_FILE_OPERATIONS(edma_txring)
3801
3802/*
3803 * edma_rxring_stats_ops
3804 */
3805NSS_STATS_DECLARE_FILE_OPERATIONS(edma_rxring)
3806
3807/*
3808 * edma_txcmplring_stats_ops
3809 */
3810NSS_STATS_DECLARE_FILE_OPERATIONS(edma_txcmplring)
3811
3812/*
3813 * edma_rxfillring_stats_ops
3814 */
3815NSS_STATS_DECLARE_FILE_OPERATIONS(edma_rxfillring)
3816
3817/*
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303818 * gre_redir_ops
3819 */
3820NSS_STATS_DECLARE_FILE_OPERATIONS(gre_redir)
3821
3822/*
Ankit Dhanuka14999992014-11-12 15:35:11 +05303823 * sjack_stats_ops
3824 */
3825NSS_STATS_DECLARE_FILE_OPERATIONS(sjack)
3826
Stephen Wang9779d952015-10-28 11:39:07 -07003827/*
3828 * portid_ops
3829 */
3830NSS_STATS_DECLARE_FILE_OPERATIONS(portid)
3831
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -08003832NSS_STATS_DECLARE_FILE_OPERATIONS(wifi_if)
3833
Sundarajan Srinivasanab2c8562015-06-09 16:14:10 -07003834NSS_STATS_DECLARE_FILE_OPERATIONS(virt_if)
3835
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -07003836NSS_STATS_DECLARE_FILE_OPERATIONS(tx_rx_virt_if)
3837
Ankit Dhanuka14999992014-11-12 15:35:11 +05303838/*
Bharath M Kumarcc666e92014-12-24 19:17:28 +05303839 * wifi_stats_ops
3840 */
3841NSS_STATS_DECLARE_FILE_OPERATIONS(wifi)
3842
3843/*
Tushar Mathurff8741b2015-12-02 20:28:59 +05303844 * dtls_stats_ops
3845 */
3846NSS_STATS_DECLARE_FILE_OPERATIONS(dtls)
3847
3848/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07003849 * gre_tunnel_stats_ops
3850 */
3851NSS_STATS_DECLARE_FILE_OPERATIONS(gre_tunnel)
3852
3853/*
Stephen Wangec5a85c2016-09-08 23:32:27 -07003854 * trustsec_tx_stats_ops
3855 */
3856NSS_STATS_DECLARE_FILE_OPERATIONS(trustsec_tx)
3857
3858/*
Aniruddha Paul1b170c22017-05-29 12:30:39 +05303859 * wifili_stats_ops
3860 */
3861NSS_STATS_DECLARE_FILE_OPERATIONS(wifili)
3862
3863/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303864 * nss_stats_init()
3865 * Enable NSS statistics
3866 */
3867void nss_stats_init(void)
3868{
Shashank Balashankar512cb602016-08-01 17:57:42 -07003869 int i = 0;
3870 struct dentry *edma_d = NULL;
3871 struct dentry *edma_port_dir_d = NULL;
3872 struct dentry *edma_port_d = NULL;
3873 struct dentry *edma_port_type_d = NULL;
3874 struct dentry *edma_port_stats_d = NULL;
3875 struct dentry *edma_port_ring_map_d = NULL;
3876
3877 struct dentry *edma_rings_dir_d = NULL;
3878 struct dentry *edma_tx_dir_d = NULL;
3879 struct dentry *edma_tx_d = NULL;
3880 struct dentry *edma_rx_dir_d = NULL;
3881 struct dentry *edma_rx_d = NULL;
3882 struct dentry *edma_txcmpl_dir_d = NULL;
3883 struct dentry *edma_txcmpl_d = NULL;
3884 struct dentry *edma_rxfill_dir_d = NULL;
3885 struct dentry *edma_rxfill_d = NULL;
3886
3887 char file_name[10];
3888
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303889 /*
3890 * NSS driver entry
3891 */
3892 nss_top_main.top_dentry = debugfs_create_dir("qca-nss-drv", NULL);
3893 if (unlikely(nss_top_main.top_dentry == NULL)) {
3894 nss_warning("Failed to create qca-nss-drv directory in debugfs");
3895
3896 /*
3897 * Non availability of debugfs directory is not a catastrophy
3898 * We can still go ahead with other initialization
3899 */
3900 return;
3901 }
3902
3903 nss_top_main.stats_dentry = debugfs_create_dir("stats", nss_top_main.top_dentry);
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303904 if (unlikely(nss_top_main.stats_dentry == NULL)) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303905 nss_warning("Failed to create qca-nss-drv directory in debugfs");
3906
3907 /*
3908 * Non availability of debugfs directory is not a catastrophy
3909 * We can still go ahead with rest of initialization
3910 */
3911 return;
3912 }
3913
3914 /*
3915 * Create files to obtain statistics
3916 */
3917
3918 /*
3919 * ipv4_stats
3920 */
3921 nss_top_main.ipv4_dentry = debugfs_create_file("ipv4", 0400,
3922 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv4_ops);
3923 if (unlikely(nss_top_main.ipv4_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303924 nss_warning("Failed to create qca-nss-drv/stats/ipv4 file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303925 return;
3926 }
3927
3928 /*
Selin Dag6d9b0c12014-11-04 18:27:21 -08003929 * ipv4_reasm_stats
3930 */
3931 nss_top_main.ipv4_reasm_dentry = debugfs_create_file("ipv4_reasm", 0400,
3932 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv4_reasm_ops);
3933 if (unlikely(nss_top_main.ipv4_reasm_dentry == NULL)) {
3934 nss_warning("Failed to create qca-nss-drv/stats/ipv4_reasm file in debugfs");
3935 return;
3936 }
3937
3938 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303939 * ipv6_stats
3940 */
3941 nss_top_main.ipv6_dentry = debugfs_create_file("ipv6", 0400,
3942 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv6_ops);
3943 if (unlikely(nss_top_main.ipv6_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303944 nss_warning("Failed to create qca-nss-drv/stats/ipv6 file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303945 return;
3946 }
3947
3948 /*
Selin Dag60a2f5b2015-06-29 14:39:49 -07003949 * ipv6_reasm_stats
3950 */
3951 nss_top_main.ipv6_reasm_dentry = debugfs_create_file("ipv6_reasm", 0400,
3952 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv6_reasm_ops);
3953 if (unlikely(nss_top_main.ipv6_reasm_dentry == NULL)) {
3954 nss_warning("Failed to create qca-nss-drv/stats/ipv6_reasm file in debugfs");
3955 return;
3956 }
3957
3958 /*
3959 * eth_rx__stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303960 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05303961 nss_top_main.eth_rx_dentry = debugfs_create_file("eth_rx", 0400,
3962 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_eth_rx_ops);
3963 if (unlikely(nss_top_main.eth_rx_dentry == NULL)) {
3964 nss_warning("Failed to create qca-nss-drv/stats/eth_rx file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303965 return;
3966 }
3967
3968 /*
Shashank Balashankar512cb602016-08-01 17:57:42 -07003969 * edma stats
3970 */
3971 edma_d = debugfs_create_dir("edma", nss_top_main.stats_dentry);
3972 if (unlikely(edma_d == NULL)) {
3973 nss_warning("Failed to create qca-nss-drv/stats/edma directory in debugfs");
3974 return;
3975 }
3976
3977 /*
3978 * edma port stats
3979 */
3980 edma_port_dir_d = debugfs_create_dir("ports", edma_d);
3981 if (unlikely(edma_port_dir_d == NULL)) {
3982 nss_warning("Failed to create qca-nss-drv/stats/edma/ports directory in debugfs");
3983 return;
3984 }
3985
3986 for (i = 0; i < NSS_EDMA_NUM_PORTS_MAX; i++) {
3987 memset(file_name, 0, sizeof(file_name));
3988 snprintf(file_name, sizeof(file_name), "%d", i);
3989 edma_port_d = NULL;
3990 edma_port_stats_d = NULL;
3991 edma_port_type_d = NULL;
3992 edma_port_ring_map_d = NULL;
3993
3994 edma_port_d = debugfs_create_dir(file_name, edma_port_dir_d);
3995 if (unlikely(edma_port_d == NULL)) {
3996 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d dir in debugfs", i);
3997 return;
3998 }
3999
Stephen Wangaed46332016-12-12 17:29:03 -08004000 edma_port_stats_d = debugfs_create_file("stats", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_stats_edma_port_stats_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07004001 if (unlikely(edma_port_stats_d == NULL)) {
4002 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/stats file in debugfs", i);
4003 return;
4004 }
4005
Stephen Wangaed46332016-12-12 17:29:03 -08004006 edma_port_type_d = debugfs_create_file("type", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_stats_edma_port_type_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07004007 if (unlikely(edma_port_type_d == NULL)) {
4008 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/type file in debugfs", i);
4009 return;
4010 }
4011
Stephen Wangaed46332016-12-12 17:29:03 -08004012 edma_port_ring_map_d = debugfs_create_file("ring_map", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_stats_edma_port_ring_map_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07004013 if (unlikely(edma_port_ring_map_d == NULL)) {
4014 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/ring_map file in debugfs", i);
4015 return;
4016 }
4017 }
4018
4019 /*
4020 * edma ring stats
4021 */
4022 edma_rings_dir_d = debugfs_create_dir("rings", edma_d);
4023 if (unlikely(edma_rings_dir_d == NULL)) {
4024 nss_warning("Failed to create qca-nss-drv/stats/edma/rings directory in debugfs");
4025 return;
4026 }
4027
4028 /*
4029 * edma tx ring stats
4030 */
4031 edma_tx_dir_d = debugfs_create_dir("tx", edma_rings_dir_d);
4032 if (unlikely(edma_tx_dir_d == NULL)) {
4033 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/tx directory in debugfs");
4034 return;
4035 }
4036
4037 for (i = 0; i < NSS_EDMA_NUM_TX_RING_MAX; i++) {
4038 memset(file_name, 0, sizeof(file_name));
4039 scnprintf(file_name, sizeof(file_name), "%d", i);
4040 edma_tx_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08004041 edma_tx_d = debugfs_create_file(file_name, 0400, edma_tx_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_txring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07004042 if (unlikely(edma_tx_d == NULL)) {
4043 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/tx/%d file in debugfs", i);
4044 return;
4045 }
4046 }
4047
4048 /*
4049 * edma rx ring stats
4050 */
4051 edma_rx_dir_d = debugfs_create_dir("rx", edma_rings_dir_d);
4052 if (unlikely(edma_rx_dir_d == NULL)) {
4053 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rx directory in debugfs");
4054 return;
4055 }
4056
4057 for (i = 0; i < NSS_EDMA_NUM_RX_RING_MAX; i++) {
4058 memset(file_name, 0, sizeof(file_name));
4059 scnprintf(file_name, sizeof(file_name), "%d", i);
4060 edma_rx_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08004061 edma_rx_d = debugfs_create_file(file_name, 0400, edma_rx_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_rxring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07004062 if (unlikely(edma_rx_d == NULL)) {
4063 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rx/%d file in debugfs", i);
4064 return;
4065 }
4066 }
4067
4068 /*
4069 * edma tx cmpl ring stats
4070 */
4071 edma_txcmpl_dir_d = debugfs_create_dir("txcmpl", edma_rings_dir_d);
4072 if (unlikely(edma_txcmpl_dir_d == NULL)) {
4073 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/txcmpl directory in debugfs");
4074 return;
4075 }
4076
4077 for (i = 0; i < NSS_EDMA_NUM_TXCMPL_RING_MAX; i++) {
4078 memset(file_name, 0, sizeof(file_name));
4079 scnprintf(file_name, sizeof(file_name), "%d", i);
4080 edma_txcmpl_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08004081 edma_txcmpl_d = debugfs_create_file(file_name, 0400, edma_txcmpl_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_txcmplring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07004082 if (unlikely(edma_txcmpl_d == NULL)) {
4083 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/txcmpl/%d file in debugfs", i);
4084 return;
4085 }
4086 }
4087
4088 /*
4089 * edma rx fill ring stats
4090 */
4091 edma_rxfill_dir_d = debugfs_create_dir("rxfill", edma_rings_dir_d);
4092 if (unlikely(edma_rxfill_dir_d == NULL)) {
4093 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rxfill directory in debugfs");
4094 return;
4095 }
4096
4097 for (i = 0; i < NSS_EDMA_NUM_RXFILL_RING_MAX; i++) {
4098 memset(file_name, 0, sizeof(file_name));
4099 scnprintf(file_name, sizeof(file_name), "%d", i);
4100 edma_rxfill_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08004101 edma_rxfill_d = debugfs_create_file(file_name, 0400, edma_rxfill_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_rxfillring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07004102 if (unlikely(edma_rxfill_d == NULL)) {
4103 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rxfill/%d file in debugfs", i);
4104 return;
4105 }
4106 }
4107
4108 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304109 * n2h_stats
4110 */
4111 nss_top_main.n2h_dentry = debugfs_create_file("n2h", 0400,
4112 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_n2h_ops);
4113 if (unlikely(nss_top_main.n2h_dentry == NULL)) {
4114 nss_warning("Failed to create qca-nss-drv/stats/n2h directory in debugfs");
4115 return;
4116 }
4117
4118 /*
Thomas Wuc3e382c2014-10-29 15:35:13 -07004119 * lso_rx_stats
4120 */
4121 nss_top_main.lso_rx_dentry = debugfs_create_file("lso_rx", 0400,
4122 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_lso_rx_ops);
4123 if (unlikely(nss_top_main.lso_rx_dentry == NULL)) {
4124 nss_warning("Failed to create qca-nss-drv/stats/lso_rx file in debugfs");
4125 return;
4126 }
4127
4128 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304129 * drv_stats
4130 */
4131 nss_top_main.drv_dentry = debugfs_create_file("drv", 0400,
4132 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_drv_ops);
4133 if (unlikely(nss_top_main.drv_dentry == NULL)) {
4134 nss_warning("Failed to create qca-nss-drv/stats/drv directory in debugfs");
4135 return;
4136 }
4137
4138 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304139 * pppoe_stats
4140 */
4141 nss_top_main.pppoe_dentry = debugfs_create_file("pppoe", 0400,
4142 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_pppoe_ops);
4143 if (unlikely(nss_top_main.pppoe_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05304144 nss_warning("Failed to create qca-nss-drv/stats/pppoe file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304145 return;
4146 }
4147
4148 /*
4149 * gmac_stats
4150 */
4151 nss_top_main.gmac_dentry = debugfs_create_file("gmac", 0400,
4152 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gmac_ops);
4153 if (unlikely(nss_top_main.gmac_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05304154 nss_warning("Failed to create qca-nss-drv/stats/gmac file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304155 return;
4156 }
Saurabh Misra09dddeb2014-09-30 16:38:07 -07004157
4158 /*
4159 * CAPWAP stats.
4160 */
4161 nss_top_main.capwap_encap_dentry = debugfs_create_file("capwap_encap", 0400,
4162 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_capwap_encap_ops);
4163 if (unlikely(nss_top_main.capwap_encap_dentry == NULL)) {
4164 nss_warning("Failed to create qca-nss-drv/stats/capwap_encap file in debugfs");
4165 return;
4166 }
4167
4168 nss_top_main.capwap_decap_dentry = debugfs_create_file("capwap_decap", 0400,
4169 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_capwap_decap_ops);
4170 if (unlikely(nss_top_main.capwap_decap_dentry == NULL)) {
4171 nss_warning("Failed to create qca-nss-drv/stats/capwap_decap file in debugfs");
4172 return;
4173 }
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05304174
4175 /*
4176 * GRE_REDIR stats
4177 */
4178 nss_top_main.gre_redir_dentry = debugfs_create_file("gre_redir", 0400,
Ankit Dhanuka14999992014-11-12 15:35:11 +05304179 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gre_redir_ops);
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05304180 if (unlikely(nss_top_main.gre_redir_dentry == NULL)) {
4181 nss_warning("Failed to create qca-nss-drv/stats/gre_redir file in debugfs");
4182 return;
4183 }
Ankit Dhanuka14999992014-11-12 15:35:11 +05304184
4185 /*
4186 * SJACK stats
4187 */
4188 nss_top_main.sjack_dentry = debugfs_create_file("sjack", 0400,
4189 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_sjack_ops);
4190 if (unlikely(nss_top_main.sjack_dentry == NULL)) {
4191 nss_warning("Failed to create qca-nss-drv/stats/sjack file in debugfs");
4192 return;
4193 }
Saurabh Misra96998db2014-07-10 12:15:48 -07004194
Bharath M Kumarcc666e92014-12-24 19:17:28 +05304195 /*
Stephen Wang9779d952015-10-28 11:39:07 -07004196 * PORTID stats
4197 */
4198 nss_top_main.portid_dentry = debugfs_create_file("portid", 0400,
4199 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_portid_ops);
4200 if (unlikely(nss_top_main.portid_dentry == NULL)) {
4201 nss_warning("Failed to create qca-nss-drv/stats/portid file in debugfs");
4202 return;
4203 }
4204
4205 /*
Bharath M Kumarcc666e92014-12-24 19:17:28 +05304206 * WIFI stats
4207 */
4208 nss_top_main.wifi_dentry = debugfs_create_file("wifi", 0400,
4209 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_wifi_ops);
4210 if (unlikely(nss_top_main.wifi_dentry == NULL)) {
4211 nss_warning("Failed to create qca-nss-drv/stats/wifi file in debugfs");
4212 return;
4213 }
4214
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -08004215 /*
4216 * wifi_if stats
4217 */
4218 nss_top_main.wifi_if_dentry = debugfs_create_file("wifi_if", 0400,
4219 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_wifi_if_ops);
4220 if (unlikely(nss_top_main.wifi_if_dentry == NULL)) {
4221 nss_warning("Failed to create qca-nss-drv/stats/wifi_if file in debugfs");
4222 return;
4223 }
4224
Sundarajan Srinivasanab2c8562015-06-09 16:14:10 -07004225 nss_top_main.virt_if_dentry = debugfs_create_file("virt_if", 0400,
4226 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_virt_if_ops);
4227 if (unlikely(nss_top_main.virt_if_dentry == NULL)) {
4228 nss_warning("Failed to create qca-nss-drv/stats/virt_if file in debugfs");
4229 return;
4230 }
4231
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -07004232 nss_top_main.tx_rx_virt_if_dentry = debugfs_create_file("tx_rx_virt_if", 0400,
4233 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_tx_rx_virt_if_ops);
4234 if (unlikely(nss_top_main.virt_if_dentry == NULL)) {
4235 nss_warning("Failed to create qca-nss-drv/stats/tx_rx_virt_if file in debugfs");
4236 return;
4237 }
4238
ratheesh kannoth7af985d2015-06-24 15:08:40 +05304239 /*
4240 * L2TPV2 Stats
4241 */
4242 nss_top_main.l2tpv2_dentry = debugfs_create_file("l2tpv2", 0400,
4243 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_l2tpv2_ops);
4244 if (unlikely(nss_top_main.l2tpv2_dentry == NULL)) {
4245 nss_warning("Failed to create qca-nss-drv/stats/l2tpv2 file in debugfs");
4246 return;
4247 }
Shyam Sunder66e889d2015-11-02 15:31:20 +05304248
4249 /*
ratheesh kannotha1245c32015-11-04 16:45:43 +05304250 * Map-t Stats
4251 */
4252 nss_top_main.map_t_dentry = debugfs_create_file("map_t", 0400,
4253 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_map_t_ops);
4254 if (unlikely(nss_top_main.map_t_dentry == NULL)) {
4255 nss_warning("Failed to create qca-nss-drv/stats/map_t file in debugfs");
4256 return;
4257 }
4258
4259 /*
ratheesh kannotheb2a0a82017-05-04 09:20:17 +05304260 * GRE statistics
4261 */
4262 nss_top_main.gre_dentry = debugfs_create_file("gre", 0400,
4263 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gre_ops);
4264 if (unlikely(nss_top_main.gre_dentry == NULL)) {
4265 nss_warning("Failed to create qca-nss-drv/stats/gre file in debugfs");
4266 return;
4267 }
4268
4269 /*
Amit Gupta316729b2016-08-12 12:21:15 +05304270 * PPE Stats
4271 */
4272 nss_top_main.ppe_dentry = debugfs_create_dir("ppe", nss_top_main.stats_dentry);
4273 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
4274 nss_warning("Failed to create qca-nss-drv directory in debugfs");
4275 return;
4276 }
4277
4278 nss_top_main.ppe_conn_dentry = debugfs_create_file("connection", 0400,
4279 nss_top_main.ppe_dentry, &nss_top_main, &nss_stats_ppe_conn_ops);
4280 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
4281 nss_warning("Failed to create qca-nss-drv/stats/ppe/connection file in debugfs");
4282 }
4283
4284 nss_top_main.ppe_l3_dentry = debugfs_create_file("l3", 0400,
4285 nss_top_main.ppe_dentry, &nss_top_main, &nss_stats_ppe_l3_ops);
4286 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
4287 nss_warning("Failed to create qca-nss-drv/stats/ppe/l3 file in debugfs");
4288 }
4289
4290 nss_top_main.ppe_l3_dentry = debugfs_create_file("ppe_code", 0400,
4291 nss_top_main.ppe_dentry, &nss_top_main, &nss_stats_ppe_code_ops);
4292 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
4293 nss_warning("Failed to create qca-nss-drv/stats/ppe/ppe_code file in debugfs");
4294 }
4295
4296 /*
Shyam Sunder66e889d2015-11-02 15:31:20 +05304297 * PPTP Stats
4298 */
4299 nss_top_main.pptp_dentry = debugfs_create_file("pptp", 0400,
4300 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_pptp_ops);
4301 if (unlikely(nss_top_main.pptp_dentry == NULL)) {
4302 nss_warning("Failed to create qca-nss-drv/stats/pptp file in debugfs");
Tushar Mathurff8741b2015-12-02 20:28:59 +05304303 }
4304
4305 /*
4306 * DTLS Stats
4307 */
4308 nss_top_main.dtls_dentry = debugfs_create_file("dtls", 0400,
4309 nss_top_main.stats_dentry,
4310 &nss_top_main,
4311 &nss_stats_dtls_ops);
4312 if (unlikely(nss_top_main.dtls_dentry == NULL)) {
4313 nss_warning("Failed to create qca-nss-drv/stats/dtls file in debugfs");
Shyam Sunder66e889d2015-11-02 15:31:20 +05304314 return;
4315 }
4316
Thomas Wu71c5ecc2016-06-21 11:15:52 -07004317 /*
4318 * GRE Tunnel Stats
4319 */
4320 nss_top_main.gre_tunnel_dentry = debugfs_create_file("gre_tunnel", 0400,
4321 nss_top_main.stats_dentry,
4322 &nss_top_main,
4323 &nss_stats_gre_tunnel_ops);
4324 if (unlikely(nss_top_main.gre_tunnel_dentry == NULL)) {
4325 nss_warning("Failed to create qca-nss-drv/stats/gre_tunnel file in debugfs");
4326 return;
4327 }
4328
Stephen Wangec5a85c2016-09-08 23:32:27 -07004329 /*
4330 * TrustSec TX Stats
4331 */
4332 nss_top_main.trustsec_tx_dentry = debugfs_create_file("trustsec_tx", 0400,
4333 nss_top_main.stats_dentry,
4334 &nss_top_main,
4335 &nss_stats_trustsec_tx_ops);
4336 if (unlikely(nss_top_main.trustsec_tx_dentry == NULL)) {
4337 nss_warning("Failed to create qca-nss-drv/stats/trustsec_tx file in debugfs");
4338 return;
4339 }
4340
Aniruddha Paul1b170c22017-05-29 12:30:39 +05304341 /*
4342 * WIFILI stats
4343 */
4344 nss_top_main.wifili_dentry = debugfs_create_file("wifili", 0400,
4345 nss_top_main.stats_dentry,
4346 &nss_top_main, &nss_stats_wifili_ops);
4347 if (unlikely(nss_top_main.wifili_dentry == NULL)) {
4348 nss_warning("Failed to create qca-nss-drv/stats/wifili file in debugfs");
4349 return;
4350 }
4351
Saurabh Misra96998db2014-07-10 12:15:48 -07004352 nss_log_init();
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304353}
4354
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304355/*
4356 * nss_stats_clean()
4357 * Cleanup NSS statistics files
4358 */
4359void nss_stats_clean(void)
4360{
4361 /*
4362 * Remove debugfs tree
4363 */
4364 if (likely(nss_top_main.top_dentry != NULL)) {
4365 debugfs_remove_recursive(nss_top_main.top_dentry);
Stephen Wangdc8b5322015-06-27 20:11:50 -07004366 nss_top_main.top_dentry = NULL;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304367 }
4368}