blob: 6fd1d8fe1cead4b1238f24b9da5867ac995fb8cc [file] [log] [blame]
Radhakrishna Jiguru1c9b2252013-08-27 23:57:48 +05301/*
2 **************************************************************************
Stephen Wangaed46332016-12-12 17:29:03 -08003 * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Radhakrishna Jiguru1c9b2252013-08-27 23:57:48 +05304 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053016
17/*
18 * nss_stats.c
19 * NSS stats APIs
20 *
21 */
22
23#include "nss_core.h"
Tushar Mathurff8741b2015-12-02 20:28:59 +053024#include "nss_dtls_stats.h"
Thomas Wu71c5ecc2016-06-21 11:15:52 -070025#include "nss_gre_tunnel_stats.h"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053026
27/*
28 * Maximum string length:
29 * This should be equal to maximum string size of any stats
30 * inclusive of stats value
31 */
32#define NSS_STATS_MAX_STR_LENGTH 96
33
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -070034extern int32_t nss_tx_rx_virt_if_copy_stats(int32_t if_num, int i, char *line);
35
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +053036uint64_t stats_shadow_pppoe_except[NSS_PPPOE_NUM_SESSION_PER_INTERFACE][NSS_PPPOE_EXCEPTION_EVENT_MAX];
Abhishek Rastogi84d95d02014-03-26 19:31:31 +053037
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053038/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -070039 * Private data for every file descriptor
40 */
41struct nss_stats_data {
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -080042 uint32_t if_num; /**< Interface number for stats */
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +053043 uint32_t index; /**< Index for GRE_REDIR stats */
Shashank Balashankar512cb602016-08-01 17:57:42 -070044 uint32_t edma_id; /**< EDMA port ID or ring ID */
Saurabh Misra09dddeb2014-09-30 16:38:07 -070045};
46
47/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053048 * Statistics structures
49 */
50
51/*
52 * nss_stats_str_ipv4
53 * IPv4 stats strings
54 */
55static int8_t *nss_stats_str_ipv4[NSS_STATS_IPV4_MAX] = {
56 "rx_pkts",
57 "rx_bytes",
58 "tx_pkts",
59 "tx_bytes",
60 "create_requests",
61 "create_collisions",
62 "create_invalid_interface",
63 "destroy_requests",
64 "destroy_misses",
65 "hash_hits",
66 "hash_reorders",
67 "flushes",
Selin Dag60ea2b22014-11-05 09:36:22 -080068 "evictions",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +053069 "fragmentations",
70 "mc_create_requests",
71 "mc_update_requests",
72 "mc_create_invalid_interface",
73 "mc_destroy_requests",
74 "mc_destroy_misses",
75 "mc_flushes",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053076};
77
78/*
Selin Dag6d9b0c12014-11-04 18:27:21 -080079 * nss_stats_str_ipv4_reasm
80 * IPv4 reassembly stats strings
81 */
82static int8_t *nss_stats_str_ipv4_reasm[NSS_STATS_IPV4_REASM_MAX] = {
83 "evictions",
84 "alloc_fails",
85 "timeouts",
86};
87
88/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053089 * nss_stats_str_ipv6
90 * IPv6 stats strings
91 */
92static int8_t *nss_stats_str_ipv6[NSS_STATS_IPV6_MAX] = {
93 "rx_pkts",
94 "rx_bytes",
95 "tx_pkts",
96 "tx_bytes",
97 "create_requests",
98 "create_collisions",
99 "create_invalid_interface",
100 "destroy_requests",
101 "destroy_misses",
102 "hash_hits",
103 "hash_reorders",
104 "flushes",
105 "evictions",
Selin Dag5d68caa2015-05-12 13:23:33 -0700106 "fragmentations",
107 "frag_fails",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +0530108 "mc_create_requests",
109 "mc_update_requests",
110 "mc_create_invalid_interface",
111 "mc_destroy_requests",
112 "mc_destroy_misses",
113 "mc_flushes",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530114};
115
116/*
Selin Dag60a2f5b2015-06-29 14:39:49 -0700117 * nss_stats_str_ipv6_reasm
118 * IPv6 reassembly stats strings
119 */
120static int8_t *nss_stats_str_ipv6_reasm[NSS_STATS_IPV6_REASM_MAX] = {
121 "alloc_fails",
122 "timeouts",
123 "discards",
124};
125
126/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530127 * nss_stats_str_n2h
128 * N2H stats strings
129 */
130static int8_t *nss_stats_str_n2h[NSS_STATS_N2H_MAX] = {
131 "queue_dropped",
132 "ticks",
133 "worst_ticks",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700134 "iterations",
Thomas Wu3fd8dd72014-06-11 15:57:05 -0700135 "pbuf_ocm_alloc_fails",
136 "pbuf_ocm_free_count",
137 "pbuf_ocm_total_count",
138 "pbuf_default_alloc_fails",
139 "pbuf_default_free_count",
140 "pbuf_default_total_count",
Sakthi Vignesh Radhakrishnan2a8ee962014-11-22 13:35:38 -0800141 "payload_fails",
Thomas Wu53679842015-01-22 13:37:35 -0800142 "payload_free_count",
Sakthi Vignesh Radhakrishnan2a8ee962014-11-22 13:35:38 -0800143 "h2n_control_packets",
144 "h2n_control_bytes",
145 "n2h_control_packets",
146 "n2h_control_bytes",
147 "h2n_data_packets",
148 "h2n_data_bytes",
149 "n2h_data_packets",
150 "n2h_data_bytes",
Saurabh Misra71034db2015-06-04 16:18:38 -0700151 "n2h_tot_payloads",
Guojun Jin85dfa7b2015-09-02 15:13:56 -0700152 "n2h_data_interface_invalid",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530153};
154
155/*
Thomas Wuc3e382c2014-10-29 15:35:13 -0700156 * nss_stats_str_lso_rx
157 * LSO_RX stats strings
158 */
159static int8_t *nss_stats_str_lso_rx[NSS_STATS_LSO_RX_MAX] = {
160 "tx_dropped",
161 "dropped",
162 "pbuf_alloc_fail",
163 "pbuf_reference_fail"
164};
165
166/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530167 * nss_stats_str_drv
168 * Host driver stats strings
169 */
170static int8_t *nss_stats_str_drv[NSS_STATS_DRV_MAX] = {
171 "nbuf_alloc_errors",
172 "tx_queue_full[0]",
173 "tx_queue_full[1]",
174 "tx_buffers_empty",
175 "tx_buffers_pkt",
176 "tx_buffers_cmd",
177 "tx_buffers_crypto",
Murat Sezginb6e1a012015-09-29 14:06:37 -0700178 "tx_buffers_reuse",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530179 "rx_buffers_empty",
180 "rx_buffers_pkt",
181 "rx_buffers_cmd_resp",
182 "rx_buffers_status_sync",
183 "rx_buffers_crypto",
Thomas Wu0acd8162014-12-07 15:43:39 -0800184 "rx_buffers_virtual",
185 "tx_skb_simple",
186 "tx_skb_nr_frags",
187 "tx_skb_fraglist",
188 "rx_skb_simple",
189 "rx_skb_nr_frags",
190 "rx_skb_fraglist",
Sundarajan Srinivasan6e0366b2015-01-20 12:10:42 -0800191 "rx_bad_desciptor",
Thomas Wu1fbf5212015-06-04 14:38:40 -0700192 "nss_skb_count",
193 "rx_chain_seg_processed",
194 "rx_frag_seg_processed"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530195};
196
197/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530198 * nss_stats_str_pppoe
199 * PPPoE stats strings
200 */
201static int8_t *nss_stats_str_pppoe[NSS_STATS_PPPOE_MAX] = {
202 "create_requests",
203 "create_failures",
204 "destroy_requests",
205 "destroy_misses"
206};
207
208/*
209 * nss_stats_str_gmac
210 * GMAC stats strings
211 */
212static int8_t *nss_stats_str_gmac[NSS_STATS_GMAC_MAX] = {
213 "ticks",
214 "worst_ticks",
215 "iterations"
216};
217
218/*
Shashank Balashankar512cb602016-08-01 17:57:42 -0700219 * nss_stats_str_edma_tx
220 */
221static int8_t *nss_stats_str_edma_tx[NSS_STATS_EDMA_TX_MAX] = {
222 "tx_err",
223 "tx_dropped",
224 "desc_cnt"
225};
226
227/*
228 * nss_stats_str_edma_rx
229 */
230static int8_t *nss_stats_str_edma_rx[NSS_STATS_EDMA_RX_MAX] = {
231 "rx_csum_err",
232 "desc_cnt"
233};
234
235/*
236 * nss_stats_str_edma_txcmpl
237 */
238static int8_t *nss_stats_str_edma_txcmpl[NSS_STATS_EDMA_TXCMPL_MAX] = {
239 "desc_cnt"
240};
241
242/*
243 * nss_stats_str_edma_rxfill
244 */
245static int8_t *nss_stats_str_edma_rxfill[NSS_STATS_EDMA_RXFILL_MAX] = {
246 "desc_cnt"
247};
248
249/*
250 * nss_stats_str_edma_port_type
251 */
252static int8_t *nss_stats_str_edma_port_type[NSS_EDMA_PORT_TYPE_MAX] = {
253 "physical_port",
254 "virtual_port"
255};
256
257/*
258 * nss_stats_str_edma_port_ring_map
259 */
260static int8_t *nss_stats_str_edma_port_ring_map[NSS_EDMA_PORT_RING_MAP_MAX] = {
261 "rx_ring",
262 "tx_ring"
263};
264
265/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530266 * nss_stats_str_node
267 * Interface stats strings per node
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530268 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530269static int8_t *nss_stats_str_node[NSS_STATS_NODE_MAX] = {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530270 "rx_packets",
271 "rx_bytes",
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530272 "rx_dropped",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530273 "tx_packets",
274 "tx_bytes"
275};
276
277/*
Murat Sezgin99dab642014-08-28 14:40:34 -0700278 * nss_stats_str_eth_rx
279 * eth_rx stats strings
280 */
281static int8_t *nss_stats_str_eth_rx[NSS_STATS_ETH_RX_MAX] = {
282 "ticks",
283 "worst_ticks",
284 "iterations"
285};
286
287/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530288 * nss_stats_str_if_exception_unknown
289 * Interface stats strings for unknown exceptions
290 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530291static int8_t *nss_stats_str_if_exception_eth_rx[NSS_EXCEPTION_EVENT_ETH_RX_MAX] = {
Selin Dag2e8e48c2015-02-20 15:51:55 -0800292 "UNKNOWN_L3_PROTOCOL",
293 "ETH_HDR_MISSING",
Stephen Wangec5a85c2016-09-08 23:32:27 -0700294 "VLAN_MISSING",
295 "TRUSTSEC_HDR_MISSING"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530296};
297
298/*
299 * nss_stats_str_if_exception_ipv4
300 * Interface stats strings for ipv4 exceptions
301 */
302static int8_t *nss_stats_str_if_exception_ipv4[NSS_EXCEPTION_EVENT_IPV4_MAX] = {
303 "IPV4_ICMP_HEADER_INCOMPLETE",
304 "IPV4_ICMP_UNHANDLED_TYPE",
305 "IPV4_ICMP_IPV4_HEADER_INCOMPLETE",
306 "IPV4_ICMP_IPV4_UDP_HEADER_INCOMPLETE",
307 "IPV4_ICMP_IPV4_TCP_HEADER_INCOMPLETE",
308 "IPV4_ICMP_IPV4_UNKNOWN_PROTOCOL",
309 "IPV4_ICMP_NO_ICME",
310 "IPV4_ICMP_FLUSH_TO_HOST",
311 "IPV4_TCP_HEADER_INCOMPLETE",
312 "IPV4_TCP_NO_ICME",
313 "IPV4_TCP_IP_OPTION",
314 "IPV4_TCP_IP_FRAGMENT",
315 "IPV4_TCP_SMALL_TTL",
316 "IPV4_TCP_NEEDS_FRAGMENTATION",
317 "IPV4_TCP_FLAGS",
318 "IPV4_TCP_SEQ_EXCEEDS_RIGHT_EDGE",
319 "IPV4_TCP_SMALL_DATA_OFFS",
320 "IPV4_TCP_BAD_SACK",
321 "IPV4_TCP_BIG_DATA_OFFS",
322 "IPV4_TCP_SEQ_BEFORE_LEFT_EDGE",
323 "IPV4_TCP_ACK_EXCEEDS_RIGHT_EDGE",
324 "IPV4_TCP_ACK_BEFORE_LEFT_EDGE",
325 "IPV4_UDP_HEADER_INCOMPLETE",
326 "IPV4_UDP_NO_ICME",
327 "IPV4_UDP_IP_OPTION",
328 "IPV4_UDP_IP_FRAGMENT",
329 "IPV4_UDP_SMALL_TTL",
330 "IPV4_UDP_NEEDS_FRAGMENTATION",
331 "IPV4_WRONG_TARGET_MAC",
332 "IPV4_HEADER_INCOMPLETE",
333 "IPV4_BAD_TOTAL_LENGTH",
334 "IPV4_BAD_CHECKSUM",
335 "IPV4_NON_INITIAL_FRAGMENT",
336 "IPV4_DATAGRAM_INCOMPLETE",
337 "IPV4_OPTIONS_INCOMPLETE",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530338 "IPV4_UNKNOWN_PROTOCOL",
339 "IPV4_ESP_HEADER_INCOMPLETE",
340 "IPV4_ESP_NO_ICME",
341 "IPV4_ESP_IP_OPTION",
342 "IPV4_ESP_IP_FRAGMENT",
343 "IPV4_ESP_SMALL_TTL",
344 "IPV4_ESP_NEEDS_FRAGMENTATION",
345 "IPV4_INGRESS_VID_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700346 "IPV4_INGRESS_VID_MISSING",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530347 "IPV4_6RD_NO_ICME",
348 "IPV4_6RD_IP_OPTION",
349 "IPV4_6RD_IP_FRAGMENT",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700350 "IPV4_6RD_NEEDS_FRAGMENTATION",
351 "IPV4_DSCP_MARKING_MISMATCH",
Murat Sezgin7c5956a2014-05-12 09:59:51 -0700352 "IPV4_VLAN_MARKING_MISMATCH",
Murat Sezgin49d21d12016-02-03 17:36:47 -0800353 "IPV4_DEPRECATED",
Radha krishna Simha Jiguru00cfe562014-10-21 16:22:12 +0530354 "IPV4_GRE_HEADER_INCOMPLETE",
355 "IPV4_GRE_NO_ICME",
356 "IPV4_GRE_IP_OPTION",
357 "IPV4_GRE_IP_FRAGMENT",
358 "IPV4_GRE_SMALL_TTL",
359 "IPV4_GRE_NEEDS_FRAGMENTATION",
Shyam Sundere351f1b2015-12-17 14:11:51 +0530360 "IPV4_PPTP_GRE_SESSION_MATCH_FAIL",
361 "IPV4_PPTP_GRE_INVALID_PROTO",
362 "IPV4_PPTP_GRE_NO_CME",
363 "IPV4_PPTP_GRE_IP_OPTION",
364 "IPV4_PPTP_GRE_IP_FRAGMENT",
365 "IPV4_PPTP_GRE_SMALL_TTL",
366 "IPV4_PPTP_GRE_NEEDS_FRAGMENTATION",
367 "IPV4_DESTROY",
Selin Dag60ea2b22014-11-05 09:36:22 -0800368 "IPV4_FRAG_DF_SET",
369 "IPV4_FRAG_FAIL",
Saurabh Misra5b07cc02015-01-15 14:20:58 -0800370 "IPV4_ICMP_IPV4_UDPLITE_HEADER_INCOMPLETE",
371 "IPV4_UDPLITE_HEADER_INCOMPLETE",
372 "IPV4_UDPLITE_NO_ICME",
373 "IPV4_UDPLITE_IP_OPTION",
374 "IPV4_UDPLITE_IP_FRAGMENT",
375 "IPV4_UDPLITE_SMALL_TTL",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +0530376 "IPV4_UDPLITE_NEEDS_FRAGMENTATION",
377 "IPV4_MC_UDP_NO_ICME",
378 "IPV4_MC_MEM_ALLOC_FAILURE",
379 "IPV4_MC_UPDATE_FAILURE",
380 "IPV4_MC_PBUF_ALLOC_FAILURE"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530381};
382
383/*
384 * nss_stats_str_if_exception_ipv6
385 * Interface stats strings for ipv6 exceptions
386 */
387static int8_t *nss_stats_str_if_exception_ipv6[NSS_EXCEPTION_EVENT_IPV6_MAX] = {
388 "IPV6_ICMP_HEADER_INCOMPLETE",
389 "IPV6_ICMP_UNHANDLED_TYPE",
390 "IPV6_ICMP_IPV6_HEADER_INCOMPLETE",
391 "IPV6_ICMP_IPV6_UDP_HEADER_INCOMPLETE",
392 "IPV6_ICMP_IPV6_TCP_HEADER_INCOMPLETE",
393 "IPV6_ICMP_IPV6_UNKNOWN_PROTOCOL",
394 "IPV6_ICMP_NO_ICME",
395 "IPV6_ICMP_FLUSH_TO_HOST",
396 "IPV6_TCP_HEADER_INCOMPLETE",
397 "IPV6_TCP_NO_ICME",
398 "IPV6_TCP_SMALL_HOP_LIMIT",
399 "IPV6_TCP_NEEDS_FRAGMENTATION",
400 "IPV6_TCP_FLAGS",
401 "IPV6_TCP_SEQ_EXCEEDS_RIGHT_EDGE",
402 "IPV6_TCP_SMALL_DATA_OFFS",
403 "IPV6_TCP_BAD_SACK",
404 "IPV6_TCP_BIG_DATA_OFFS",
405 "IPV6_TCP_SEQ_BEFORE_LEFT_EDGE",
406 "IPV6_TCP_ACK_EXCEEDS_RIGHT_EDGE",
407 "IPV6_TCP_ACK_BEFORE_LEFT_EDGE",
408 "IPV6_UDP_HEADER_INCOMPLETE",
409 "IPV6_UDP_NO_ICME",
410 "IPV6_UDP_SMALL_HOP_LIMIT",
411 "IPV6_UDP_NEEDS_FRAGMENTATION",
412 "IPV6_WRONG_TARGET_MAC",
413 "IPV6_HEADER_INCOMPLETE",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530414 "IPV6_UNKNOWN_PROTOCOL",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700415 "IPV6_INGRESS_VID_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700416 "IPV6_INGRESS_VID_MISSING",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700417 "IPV6_DSCP_MARKING_MISMATCH",
418 "IPV6_VLAN_MARKING_MISMATCH",
Murat Sezgin49d21d12016-02-03 17:36:47 -0800419 "IPV6_DEPRECATED",
Saurabh Misra5b07cc02015-01-15 14:20:58 -0800420 "IPV6_GRE_NO_ICME",
421 "IPV6_GRE_NEEDS_FRAGMENTATION",
422 "IPV6_GRE_SMALL_HOP_LIMIT",
423 "IPV6_DESTROY",
424 "IPV6_ICMP_IPV6_UDPLITE_HEADER_INCOMPLETE",
425 "IPV6_UDPLITE_HEADER_INCOMPLETE",
426 "IPV6_UDPLITE_NO_ICME",
427 "IPV6_UDPLITE_SMALL_HOP_LIMIT",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +0530428 "IPV6_UDPLITE_NEEDS_FRAGMENTATION",
429 "IPV6_MC_UDP_NO_ICME",
430 "IPV6_MC_MEM_ALLOC_FAILURE",
431 "IPV6_MC_UPDATE_FAILURE",
mandrw7125bac2016-01-14 19:36:46 +0530432 "IPV6_MC_PBUF_ALLOC_FAILURE",
433 "IPV6_ESP_HEADER_INCOMPLETE",
434 "IPV6_ESP_NO_ICME",
435 "IPV6_ESP_IP_FRAGMENT",
436 "IPV6_ESP_SMALL_HOP_LIMIT",
437 "IPV6_ESP_NEEDS_FRAGMENTATION"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530438};
439
440/*
441 * nss_stats_str_if_exception_pppoe
442 * Interface stats strings for PPPoE exceptions
443 */
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +0530444static int8_t *nss_stats_str_if_exception_pppoe[NSS_PPPOE_EXCEPTION_EVENT_MAX] = {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530445 "PPPOE_WRONG_VERSION_OR_TYPE",
446 "PPPOE_WRONG_CODE",
447 "PPPOE_HEADER_INCOMPLETE",
Murat Sezgin7c5956a2014-05-12 09:59:51 -0700448 "PPPOE_UNSUPPORTED_PPP_PROTOCOL",
Murat Sezgin49d21d12016-02-03 17:36:47 -0800449 "PPPOE_DEPRECATED"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530450};
451
452/*
Bharath M Kumarcc666e92014-12-24 19:17:28 +0530453 * nss_stats_str_wifi
454 * Wifi statistics strings
455 */
456static int8_t *nss_stats_str_wifi[NSS_STATS_WIFI_MAX] = {
457 "RX_PACKETS",
458 "RX_DROPPED",
459 "TX_PACKETS",
460 "TX_DROPPED",
461 "TX_TRANSMIT_COMPLETED",
462 "TX_MGMT_RECEIVED",
463 "TX_MGMT_TRANSMITTED",
464 "TX_MGMT_DROPPED",
465 "TX_MGMT_COMPLETED",
466 "TX_INV_PEER_ENQ_CNT",
467 "RX_INV_PEER_RCV_CNT",
468 "RX_PN_CHECK_FAILED",
469 "RX_PKTS_DELIVERD",
Radha krishna Simha Jiguru36304d12015-12-03 20:21:02 +0530470 "RX_BYTES_DELIVERED",
471 "TX_BYTES_COMPLETED",
Pamidipati, Vijay670ce7e2016-03-15 16:46:59 +0530472 "RX_DELIVER_UNALIGNED_DROP_CNT",
473 "TIDQ_ENQUEUE_CNT_0",
474 "TIDQ_ENQUEUE_CNT_1",
475 "TIDQ_ENQUEUE_CNT_2",
476 "TIDQ_ENQUEUE_CNT_3",
477 "TIDQ_ENQUEUE_CNT_4",
478 "TIDQ_ENQUEUE_CNT_5",
479 "TIDQ_ENQUEUE_CNT_6",
480 "TIDQ_ENQUEUE_CNT_7",
481 "TIDQ_DEQUEUE_CNT_0",
482 "TIDQ_DEQUEUE_CNT_1",
483 "TIDQ_DEQUEUE_CNT_2",
484 "TIDQ_DEQUEUE_CNT_3",
485 "TIDQ_DEQUEUE_CNT_4",
486 "TIDQ_DEQUEUE_CNT_5",
487 "TIDQ_DEQUEUE_CNT_6",
488 "TIDQ_DEQUEUE_CNT_7",
489 "TIDQ_ENQUEUE_FAIL_CNT_0",
490 "TIDQ_ENQUEUE_FAIL_CNT_1",
491 "TIDQ_ENQUEUE_FAIL_CNT_2",
492 "TIDQ_ENQUEUE_FAIL_CNT_3",
493 "TIDQ_ENQUEUE_FAIL_CNT_4",
494 "TIDQ_ENQUEUE_FAIL_CNT_5",
495 "TIDQ_ENQUEUE_FAIL_CNT_6",
496 "TIDQ_ENQUEUE_FAIL_CNT_7",
497 "TIDQ_TTL_EXPIRE_CNT_0",
498 "TIDQ_TTL_EXPIRE_CNT_1",
499 "TIDQ_TTL_EXPIRE_CNT_2",
500 "TIDQ_TTL_EXPIRE_CNT_3",
501 "TIDQ_TTL_EXPIRE_CNT_4",
502 "TIDQ_TTL_EXPIRE_CNT_5",
503 "TIDQ_TTL_EXPIRE_CNT_6",
504 "TIDQ_TTL_EXPIRE_CNT_7",
505 "TIDQ_DEQUEUE_REQ_CNT_0",
506 "TIDQ_DEQUEUE_REQ_CNT_1",
507 "TIDQ_DEQUEUE_REQ_CNT_2",
508 "TIDQ_DEQUEUE_REQ_CNT_3",
509 "TIDQ_DEQUEUE_REQ_CNT_4",
510 "TIDQ_DEQUEUE_REQ_CNT_5",
511 "TIDQ_DEQUEUE_REQ_CNT_6",
512 "TIDQ_DEQUEUE_REQ_CNT_7",
513 "TOTAL_TIDQ_DEPTH",
514 "RX_HTT_FETCH_CNT",
515 "TOTAL_TIDQ_BYPASS_CNT",
516 "GLOBAL_Q_FULL_CNT",
517 "TIDQ_FULL_CNT",
Bharath M Kumarcc666e92014-12-24 19:17:28 +0530518};
519
520/*
Stephen Wang9779d952015-10-28 11:39:07 -0700521 * nss_stats_str_portid
522 * PortID statistics strings
523 */
524static int8_t *nss_stats_str_portid[NSS_STATS_PORTID_MAX] = {
525 "RX_INVALID_HEADER",
526};
527
528/*
Tushar Mathurff8741b2015-12-02 20:28:59 +0530529 * nss_stats_str_dtls_session_stats
530 * DTLS statistics strings for nss session stats
531 */
532static int8_t *nss_stats_str_dtls_session_debug_stats[NSS_STATS_DTLS_SESSION_MAX] = {
533 "RX_PKTS",
534 "TX_PKTS",
535 "RX_DROPPED",
536 "RX_AUTH_DONE",
537 "TX_AUTH_DONE",
538 "RX_CIPHER_DONE",
539 "TX_CIPHER_DONE",
540 "RX_CBUF_ALLOC_FAIL",
541 "TX_CBUF_ALLOC_FAIL",
542 "TX_CENQUEUE_FAIL",
543 "RX_CENQUEUE_FAIL",
544 "TX_DROPPED_HROOM",
545 "TX_DROPPED_TROOM",
546 "TX_FORWARD_ENQUEUE_FAIL",
547 "RX_FORWARD_ENQUEUE_FAIL",
548 "RX_INVALID_VERSION",
549 "RX_INVALID_EPOCH",
550 "RX_MALFORMED",
551 "RX_CIPHER_FAIL",
552 "RX_AUTH_FAIL",
553 "RX_CAPWAP_CLASSIFY_FAIL",
554 "RX_SINGLE_REC_DGRAM",
555 "RX_MULTI_REC_DGRAM",
556 "RX_REPLAY_FAIL",
557 "RX_REPLAY_DUPLICATE",
558 "RX_REPLAY_OUT_OF_WINDOW",
559 "OUTFLOW_QUEUE_FULL",
560 "DECAP_QUEUE_FULL",
561 "PBUF_ALLOC_FAIL",
562 "PBUF_COPY_FAIL",
563 "EPOCH",
564 "TX_SEQ_HIGH",
565 "TX_SEQ_LOW",
566};
567
568/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -0700569 * nss_stats_str_gre_tunnel_session_stats
570 * GRE Tunnel statistics strings for nss session stats
571 */
572static int8_t *nss_stats_str_gre_tunnel_session_debug_stats[NSS_STATS_GRE_TUNNEL_SESSION_MAX] = {
573 "RX_PKTS",
574 "TX_PKTS",
575 "RX_DROPPED",
576 "RX_MALFORMED",
577 "RX_INVALID_PROT",
578 "DECAP_QUEUE_FULL",
579 "RX_SINGLE_REC_DGRAM",
580 "RX_INVALID_REC_DGRAM",
581 "BUFFER_ALLOC_FAIL",
582 "BUFFER_COPY_FAIL",
583 "OUTFLOW_QUEUE_FULL",
584 "TX_DROPPED_HROOM",
585 "RX_CBUFFER_ALLOC_FAIL",
586 "RX_CENQUEUE_FAIL",
587 "RX_DECRYPT_DONE",
588 "RX_FORWARD_ENQUEUE_FAIL",
589 "TX_CBUFFER_ALLOC_FAIL",
590 "TX_CENQUEUE_FAIL",
591 "TX_DROPPED_TROOM",
592 "TX_FORWARD_ENQUEUE_FAIL",
593 "TX_CIPHER_DONE",
594 "CRYPTO_NOSUPP",
595};
596
597/*
ratheesh kannoth7af985d2015-06-24 15:08:40 +0530598 * nss_stats_str_l2tpv2_session_stats
599 * l2tpv2 statistics strings for nss session stats
600 */
601static int8_t *nss_stats_str_l2tpv2_session_debug_stats[NSS_STATS_L2TPV2_SESSION_MAX] = {
602 "RX_PPP_LCP_PKTS",
603 "RX_EXP_PKTS",
604 "ENCAP_PBUF_ALLOC_FAIL",
605 "DECAP_PBUF_ALLOC_FAIL"
606};
607
608/*
ratheesh kannotha1245c32015-11-04 16:45:43 +0530609 * nss_stats_str_map_t_instance_stats
610 * map_t statistics strings for nss session stats
611 */
612static int8_t *nss_stats_str_map_t_instance_debug_stats[NSS_STATS_MAP_T_MAX] = {
613 "MAP_T_V4_TO_V6_PBUF_EXCEPTION_PKTS",
614 "MAP_T_V4_TO_V6_PBUF_NO_MATCHING_RULE",
615 "MAP_T_V4_TO_V6_PBUF_NOT_TCP_OR_UDP",
ratheesh kannoth32b6c422016-06-05 10:08:15 +0530616 "MAP_T_V4_TO_V6_RULE_ERR_LOCAL_PSID",
ratheesh kannotha1245c32015-11-04 16:45:43 +0530617 "MAP_T_V4_TO_V6_RULE_ERR_LOCAL_IPV6",
618 "MAP_T_V4_TO_V6_RULE_ERR_REMOTE_PSID",
619 "MAP_T_V4_TO_V6_RULE_ERR_REMOTE_EA_BITS",
620 "MAP_T_V4_TO_V6_RULE_ERR_REMOTE_IPV6",
621 "MAP_T_V6_TO_V4_PBUF_EXCEPTION_PKTS",
622 "MAP_T_V6_TO_V4_PBUF_NO_MATCHING_RULE",
623 "MAP_T_V6_TO_V4_PBUF_NOT_TCP_OR_UDP",
624 "MAP_T_V6_TO_V4_RULE_ERR_LOCAL_IPV4",
625 "MAP_T_V6_TO_V4_RULE_ERR_REMOTE_IPV4"
626};
627
ratheesh kannotheb2a0a82017-05-04 09:20:17 +0530628 /*
629 * nss_stats_str_gre_base_stats
630 * GRE debug statistics strings for base types
631 */
632static int8_t *nss_stats_str_gre_base_debug_stats[NSS_STATS_GRE_BASE_DEBUG_MAX] = {
633 "GRE_BASE_RX_PACKETS",
634 "GRE_BASE_RX_DROPPED",
635 "GRE_BASE_EXP_ETH_HDR_MISSING",
636 "GRE_BASE_EXP_ETH_TYPE_NON_IP",
637 "GRE_BASE_EXP_IP_UNKNOWN_PROTOCOL",
638 "GRE_BASE_EXP_IP_HEADER_INCOMPLETE",
639 "GRE_BASE_EXP_IP_BAD_TOTAL_LENGTH",
640 "GRE_BASE_EXP_IP_BAD_CHECKSUM",
641 "GRE_BASE_EXP_IP_DATAGRAM_INCOMPLETE",
642 "GRE_BASE_EXP_IP_FRAGMENT",
643 "GRE_BASE_EXP_IP_OPTIONS_INCOMPLETE",
644 "GRE_BASE_EXP_IP_WITH_OPTIONS",
645 "GRE_BASE_EXP_IPV6_UNKNOWN_PROTOCOL",
646 "GRE_BASE_EXP_IPV6_HEADER_INCOMPLETE",
647 "GRE_BASE_EXP_GRE_UNKNOWN_SESSION",
648 "GRE_BASE_EXP_GRE_NODE_INACTIVE",
649};
650
651/*
652 * nss_stats_str_gre_session_stats
653 * GRE debug statistics strings for sessions
654 */
655static int8_t *nss_stats_str_gre_session_debug_stats[NSS_STATS_GRE_SESSION_DEBUG_MAX] = {
656 "GRE_SESSION_PBUF_ALLOC_FAIL",
657 "GRE_SESSION_DECAP_FORWARD_ENQUEUE_FAIL",
658 "GRE_SESSION_ENCAP_FORWARD_ENQUEUE_FAIL",
659 "GRE_SESSION_DECAP_TX_FORWARDED",
660 "GRE_SESSION_ENCAP_RX_RECEIVED",
661 "GRE_SESSION_ENCAP_RX_DROPPED",
662 "GRE_SESSION_ENCAP_RX_LINEAR_FAIL",
663 "GRE_SESSION_EXP_RX_KEY_ERROR",
664 "GRE_SESSION_EXP_RX_SEQ_ERROR",
665 "GRE_SESSION_EXP_RX_CS_ERROR",
666 "GRE_SESSION_EXP_RX_FLAG_MISMATCH",
667 "GRE_SESSION_EXP_RX_MALFORMED",
668 "GRE_SESSION_EXP_RX_INVALID_PROTOCOL",
669 "GRE_SESSION_EXP_RX_NO_HEADROOM",
670};
671
ratheesh kannotha1245c32015-11-04 16:45:43 +0530672/*
Amit Gupta316729b2016-08-12 12:21:15 +0530673 * nss_stats_str_ppe_conn
674 * PPE statistics strings for nss flow stats
675 */
676static int8_t *nss_stats_str_ppe_conn[NSS_STATS_PPE_CONN_MAX] = {
677 "v4 routed flows",
678 "v4 bridge flows",
679 "v4 conn create req",
680 "v4 conn create fail",
681 "v4 conn destroy req",
682 "v4 conn destroy fail",
683
684 "v6 routed flows",
685 "v6 bridge flows",
686 "v6 conn create req",
687 "v6 conn create fail",
688 "v6 conn destroy req",
689 "v6 conn destroy fail",
690
691 "conn fail - nexthop full",
692 "conn fail - flow full",
693 "conn fail - host full",
694 "conn fail - pub-ip full",
695 "conn fail - port not setup",
696 "conn fail - rw fifo full",
697 "conn fail - unknown proto",
698 "conn fail - ppe not responding",
Thomas Wufc4d9fd2017-03-22 10:15:30 -0700699 "conn fail - fqg full"
Amit Gupta316729b2016-08-12 12:21:15 +0530700};
701
702/*
703 * nss_stats_str_ppe_l3
704 * PPE statistics strings for nss debug stats
705 */
706static int8_t *nss_stats_str_ppe_l3[NSS_STATS_PPE_L3_MAX] = {
707 "PPE L3 dbg reg 0",
708 "PPE L3 dbg reg 1",
709 "PPE L3 dbg reg 2",
710 "PPE L3 dbg reg 3",
711 "PPE L3 dbg reg 4",
712 "PPE L3 dbg reg port",
713};
714
715/*
716 * nss_stats_str_ppe_code
717 * PPE statistics strings for nss debug stats
718 */
719static int8_t *nss_stats_str_ppe_code[NSS_STATS_PPE_CODE_MAX] = {
720 "PPE CPU_CODE",
721 "PPE DROP_CODE",
722};
723
724/*
Shyam Sunder66e889d2015-11-02 15:31:20 +0530725 * nss_stats_str_ppt_session_stats
726 * PPTP statistics strings for nss session stats
727 */
728static int8_t *nss_stats_str_pptp_session_debug_stats[NSS_STATS_PPTP_SESSION_MAX] = {
Shyam Sundere351f1b2015-12-17 14:11:51 +0530729 "ENCAP_RX_PACKETS",
730 "ENCAP_RX_BYTES",
731 "ENCAP_TX_PACKETS",
732 "ENCAP_TX_BYTES",
733 "ENCAP_RX_DROP",
734 "DECAP_RX_PACKETS",
735 "DECAP_RX_BYTES",
736 "DECAP_TX_PACKETS",
737 "DECAP_TX_BYTES",
738 "DECAP_RX_DROP",
739 "ENCAP_HEADROOM_ERR",
740 "ENCAP_SMALL_SIZE",
741 "ENCAP_PNODE_ENQUEUE_FAIL",
742 "DECAP_NO_SEQ_NOR_ACK",
743 "DECAP_INVAL_GRE_FLAGS",
744 "DECAP_INVAL_GRE_PROTO",
745 "DECAP_WRONG_SEQ",
746 "DECAP_INVAL_PPP_HDR",
747 "DECAP_PPP_LCP",
748 "DECAP_UNSUPPORTED_PPP_PROTO",
749 "DECAP_PNODE_ENQUEUE_FAIL",
Shyam Sunder66e889d2015-11-02 15:31:20 +0530750};
751
752/*
Stephen Wangec5a85c2016-09-08 23:32:27 -0700753 * nss_stats_str_trustsec_tx
754 * Trustsec TX stats strings
755 */
756static int8_t *nss_stats_str_trustsec_tx[NSS_STATS_TRUSTSEC_TX_MAX] = {
757 "INVALID_SRC",
758 "UNCONFIGURED_SRC",
759 "HEADROOM_NOT_ENOUGH",
760};
761
762/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530763 * nss_stats_ipv4_read()
764 * Read IPV4 stats
765 */
766static ssize_t nss_stats_ipv4_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
767{
768 int32_t i;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530769 /*
770 * max output lines = #stats + start tag line + end tag line + three blank lines
771 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530772 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV4_MAX + 3) + (NSS_EXCEPTION_EVENT_IPV4_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530773 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
774 size_t size_wr = 0;
775 ssize_t bytes_read = 0;
776 uint64_t *stats_shadow;
777
778 char *lbuf = kzalloc(size_al, GFP_KERNEL);
779 if (unlikely(lbuf == NULL)) {
780 nss_warning("Could not allocate memory for local statistics buffer");
781 return 0;
782 }
783
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530784 /*
785 * Note: The assumption here is that exception event count is larger than other statistics count for IPv4
786 */
787 stats_shadow = kzalloc(NSS_EXCEPTION_EVENT_IPV4_MAX * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530788 if (unlikely(stats_shadow == NULL)) {
789 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530790 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530791 return 0;
792 }
793
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530794 size_wr = scnprintf(lbuf, size_al, "ipv4 stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530795
796 /*
797 * Common node stats
798 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530799 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530800 spin_lock_bh(&nss_top_main.stats_lock);
801 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
802 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV4_RX_INTERFACE][i];
803 }
804
805 spin_unlock_bh(&nss_top_main.stats_lock);
806
807 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
808 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
809 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
810 }
811
812 /*
813 * IPv4 node stats
814 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530815 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530816
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530817 spin_lock_bh(&nss_top_main.stats_lock);
818 for (i = 0; (i < NSS_STATS_IPV4_MAX); i++) {
819 stats_shadow[i] = nss_top_main.stats_ipv4[i];
820 }
821
822 spin_unlock_bh(&nss_top_main.stats_lock);
823
824 for (i = 0; (i < NSS_STATS_IPV4_MAX); i++) {
825 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
826 "%s = %llu\n", nss_stats_str_ipv4[i], stats_shadow[i]);
827 }
828
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530829 /*
830 * Exception stats
831 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530832 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530833
834 spin_lock_bh(&nss_top_main.stats_lock);
835 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
836 stats_shadow[i] = nss_top_main.stats_if_exception_ipv4[i];
837 }
838
839 spin_unlock_bh(&nss_top_main.stats_lock);
840
841 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
842 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
843 "%s = %llu\n", nss_stats_str_if_exception_ipv4[i], stats_shadow[i]);
844 }
845
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530846 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530847 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
848 kfree(lbuf);
849 kfree(stats_shadow);
850
851 return bytes_read;
852}
853
854/*
Selin Dag6d9b0c12014-11-04 18:27:21 -0800855 * nss_stats_ipv4_reasm_read()
856 * Read IPV4 reassembly stats
857 */
858static ssize_t nss_stats_ipv4_reasm_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
859{
860 int32_t i;
861 /*
862 * max output lines = #stats + start tag line + end tag line + three blank lines
863 */
864 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV4_REASM_MAX + 3) + 5;
865 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
866 size_t size_wr = 0;
867 ssize_t bytes_read = 0;
868 uint64_t *stats_shadow;
869
870 char *lbuf = kzalloc(size_al, GFP_KERNEL);
871 if (unlikely(lbuf == NULL)) {
872 nss_warning("Could not allocate memory for local statistics buffer");
873 return 0;
874 }
875
876 stats_shadow = kzalloc(NSS_STATS_IPV4_REASM_MAX * 8, GFP_KERNEL);
877 if (unlikely(stats_shadow == NULL)) {
878 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530879 kfree(lbuf);
Selin Dag6d9b0c12014-11-04 18:27:21 -0800880 return 0;
881 }
882
883 size_wr = scnprintf(lbuf, size_al, "ipv4 reasm stats start:\n\n");
884
885 /*
886 * Common node stats
887 */
888 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
889 spin_lock_bh(&nss_top_main.stats_lock);
890 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
891 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV4_REASM_INTERFACE][i];
892 }
893
894 spin_unlock_bh(&nss_top_main.stats_lock);
895
896 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
897 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
898 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
899 }
900
901 /*
902 * IPv4 reasm node stats
903 */
904 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 reasm node stats:\n\n");
905
906 spin_lock_bh(&nss_top_main.stats_lock);
907 for (i = 0; (i < NSS_STATS_IPV4_REASM_MAX); i++) {
908 stats_shadow[i] = nss_top_main.stats_ipv4_reasm[i];
909 }
910
911 spin_unlock_bh(&nss_top_main.stats_lock);
912
913 for (i = 0; (i < NSS_STATS_IPV4_REASM_MAX); i++) {
914 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
915 "%s = %llu\n", nss_stats_str_ipv4_reasm[i], stats_shadow[i]);
916 }
917
918 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 reasm stats end\n\n");
919 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
920 kfree(lbuf);
921 kfree(stats_shadow);
922
923 return bytes_read;
924}
925
926/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530927 * nss_stats_ipv6_read()
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530928 * Read IPV6 stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530929 */
930static ssize_t nss_stats_ipv6_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
931{
932 int32_t i;
933
934 /*
935 * max output lines = #stats + start tag line + end tag line + three blank lines
936 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530937 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV6_MAX + 3) + (NSS_EXCEPTION_EVENT_IPV6_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530938 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
939 size_t size_wr = 0;
940 ssize_t bytes_read = 0;
941 uint64_t *stats_shadow;
942
943 char *lbuf = kzalloc(size_al, GFP_KERNEL);
944 if (unlikely(lbuf == NULL)) {
945 nss_warning("Could not allocate memory for local statistics buffer");
946 return 0;
947 }
948
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530949 /*
950 * Note: The assumption here is that exception event count is larger than other statistics count for IPv4
951 */
952 stats_shadow = kzalloc(NSS_EXCEPTION_EVENT_IPV6_MAX * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530953 if (unlikely(stats_shadow == NULL)) {
954 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530955 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530956 return 0;
957 }
958
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530959 size_wr = scnprintf(lbuf, size_al, "ipv6 stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530960
961 /*
962 * Common node stats
963 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530964 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530965 spin_lock_bh(&nss_top_main.stats_lock);
966 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
967 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV6_RX_INTERFACE][i];
968 }
969
970 spin_unlock_bh(&nss_top_main.stats_lock);
971
972 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
973 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
974 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
975 }
976
977 /*
978 * IPv6 node stats
979 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530980 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530981
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530982 spin_lock_bh(&nss_top_main.stats_lock);
983 for (i = 0; (i < NSS_STATS_IPV6_MAX); i++) {
984 stats_shadow[i] = nss_top_main.stats_ipv6[i];
985 }
986
987 spin_unlock_bh(&nss_top_main.stats_lock);
988
989 for (i = 0; (i < NSS_STATS_IPV6_MAX); i++) {
990 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
991 "%s = %llu\n", nss_stats_str_ipv6[i], stats_shadow[i]);
992 }
993
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530994 /*
995 * Exception stats
996 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530997 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530998
999 spin_lock_bh(&nss_top_main.stats_lock);
1000 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
1001 stats_shadow[i] = nss_top_main.stats_if_exception_ipv6[i];
1002 }
1003
1004 spin_unlock_bh(&nss_top_main.stats_lock);
1005
1006 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
1007 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1008 "%s = %llu\n", nss_stats_str_if_exception_ipv6[i], stats_shadow[i]);
1009 }
1010
1011 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\nipv6 stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301012 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1013 kfree(lbuf);
1014 kfree(stats_shadow);
1015
1016 return bytes_read;
1017}
1018
1019/*
Selin Dag60a2f5b2015-06-29 14:39:49 -07001020 * nss_stats_ipv6_reasm_read()
1021 * Read IPV6 reassembly stats
1022 */
1023static ssize_t nss_stats_ipv6_reasm_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1024{
1025 int32_t i;
1026 /*
1027 * max output lines = #stats + start tag line + end tag line + three blank lines
1028 */
1029 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV6_REASM_MAX + 3) + 5;
1030 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1031 size_t size_wr = 0;
1032 ssize_t bytes_read = 0;
1033 uint64_t *stats_shadow;
1034
1035 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1036 if (unlikely(lbuf == NULL)) {
1037 nss_warning("Could not allocate memory for local statistics buffer");
1038 return 0;
1039 }
1040
1041 stats_shadow = kzalloc(NSS_STATS_IPV6_REASM_MAX * 8, GFP_KERNEL);
1042 if (unlikely(stats_shadow == NULL)) {
1043 nss_warning("Could not allocate memory for local shadow buffer");
1044 kfree(lbuf);
1045 return 0;
1046 }
1047
1048 size_wr = scnprintf(lbuf, size_al, "ipv6 reasm stats start:\n\n");
1049
1050 /*
1051 * Common node stats
1052 */
1053 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
1054 spin_lock_bh(&nss_top_main.stats_lock);
1055 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1056 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV6_REASM_INTERFACE][i];
1057 }
1058
1059 spin_unlock_bh(&nss_top_main.stats_lock);
1060
1061 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1062 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1063 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1064 }
1065
1066 /*
1067 * Ipv6 reasm node stats
1068 */
1069 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 reasm node stats:\n\n");
1070
1071 spin_lock_bh(&nss_top_main.stats_lock);
1072 for (i = 0; (i < NSS_STATS_IPV6_REASM_MAX); i++) {
1073 stats_shadow[i] = nss_top_main.stats_ipv6_reasm[i];
1074 }
1075
1076 spin_unlock_bh(&nss_top_main.stats_lock);
1077
1078 for (i = 0; (i < NSS_STATS_IPV6_REASM_MAX); i++) {
1079 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1080 "%s = %llu\n", nss_stats_str_ipv6_reasm[i], stats_shadow[i]);
1081 }
1082
1083 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 reasm stats end\n\n");
1084 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1085 kfree(lbuf);
1086 kfree(stats_shadow);
1087
1088 return bytes_read;
1089}
1090
1091/*
Shashank Balashankar512cb602016-08-01 17:57:42 -07001092 * nss_stats_edma_port_stats_read()
1093 * Read EDMA port stats
1094 */
1095static ssize_t nss_stats_edma_port_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1096{
1097 int32_t i;
1098
1099 /*
1100 * max output lines = #stats + start tag line + end tag line + three blank lines
1101 */
1102 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + 3;
1103 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1104 size_t size_wr = 0;
1105 ssize_t bytes_read = 0;
1106 uint64_t *stats_shadow;
1107 struct nss_stats_data *data = fp->private_data;
1108
1109 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1110 if (unlikely(lbuf == NULL)) {
1111 nss_warning("Could not allocate memory for local statistics buffer");
1112 return 0;
1113 }
1114
1115 /*
1116 * Note: The assumption here is that we do not have more than 64 stats
1117 */
1118 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1119 if (unlikely(stats_shadow == NULL)) {
1120 nss_warning("Could not allocate memory for local shadow buffer");
1121 kfree(lbuf);
1122 return 0;
1123 }
1124
1125 size_wr = scnprintf(lbuf, size_al, "edma stats start:\n\n");
1126
1127 /*
1128 * Common node stats
1129 */
1130 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d stats:\n\n", data->edma_id);
1131 spin_lock_bh(&nss_top_main.stats_lock);
1132 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1133 stats_shadow[i] = nss_top_main.stats_edma.port[data->edma_id].port_stats[i];
1134 }
1135
1136 spin_unlock_bh(&nss_top_main.stats_lock);
1137
1138 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1139 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1140 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1141 }
1142
1143 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n\n");
1144 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1145 kfree(lbuf);
1146 kfree(stats_shadow);
1147
1148 return bytes_read;
1149}
1150
1151/*
1152 * nss_stats_edma_port_type_read()
1153 * Read EDMA port type
1154 */
1155static ssize_t nss_stats_edma_port_type_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1156{
Shashank Balashankar512cb602016-08-01 17:57:42 -07001157 /*
1158 * max output lines = #stats + start tag line + end tag line + three blank lines
1159 */
1160 uint32_t max_output_lines = (1 + 2) + 3;
1161 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1162 size_t size_wr = 0;
1163 ssize_t bytes_read = 0;
1164 uint64_t port_type;
1165 struct nss_stats_data *data = fp->private_data;
1166
1167 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1168 if (unlikely(lbuf == NULL)) {
1169 nss_warning("Could not allocate memory for local statistics buffer");
1170 return 0;
1171 }
1172
1173 size_wr = scnprintf(lbuf, size_al, "edma port type start:\n\n");
1174 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d type:\n\n", data->edma_id);
1175
1176 /*
1177 * Port type
1178 */
1179 spin_lock_bh(&nss_top_main.stats_lock);
1180 port_type = nss_top_main.stats_edma.port[data->edma_id].port_type;
1181 spin_unlock_bh(&nss_top_main.stats_lock);
1182
1183 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1184 "port_type = %s\n", nss_stats_str_edma_port_type[port_type]);
1185
1186 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n");
1187 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1188 kfree(lbuf);
1189
1190 return bytes_read;
1191}
1192
1193/*
1194 * nss_stats_edma_port_ring_map_read()
1195 * Read EDMA port ring map
1196 */
1197static ssize_t nss_stats_edma_port_ring_map_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1198{
1199 int32_t i;
1200
1201 /*
1202 * max output lines = #stats + start tag line + end tag line + three blank lines
1203 */
1204 uint32_t max_output_lines = (4 + 2) + 3;
1205 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1206 size_t size_wr = 0;
1207 ssize_t bytes_read = 0;
1208 uint64_t *stats_shadow;
1209 struct nss_stats_data *data = fp->private_data;
1210
1211 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1212 if (unlikely(lbuf == NULL)) {
1213 nss_warning("Could not allocate memory for local statistics buffer");
1214 return 0;
1215 }
1216
1217 /*
1218 * Note: The assumption here is that we do not have more than 64 stats
1219 */
1220 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1221 if (unlikely(stats_shadow == NULL)) {
1222 nss_warning("Could not allocate memory for local shadow buffer");
1223 kfree(lbuf);
1224 return 0;
1225 }
1226
1227 size_wr = scnprintf(lbuf, size_al, "edma port ring map start:\n\n");
1228
1229 /*
1230 * Port ring map
1231 */
1232 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d ring map:\n\n", data->edma_id);
1233 spin_lock_bh(&nss_top_main.stats_lock);
1234 for (i = 0; i < NSS_EDMA_PORT_RING_MAP_MAX; i++) {
1235 stats_shadow[i] = nss_top_main.stats_edma.port[data->edma_id].port_ring_map[i];
1236 }
1237
1238 spin_unlock_bh(&nss_top_main.stats_lock);
1239
1240 for (i = 0; i < NSS_EDMA_PORT_RING_MAP_MAX; i++) {
1241 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1242 "%s = %llu\n", nss_stats_str_edma_port_ring_map[i], stats_shadow[i]);
1243 }
1244
1245 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n\n");
1246 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1247 kfree(lbuf);
1248 kfree(stats_shadow);
1249
1250 return bytes_read;
1251}
1252
1253/*
1254 * nss_stats_edma_txring_read()
1255 * Read EDMA Tx ring stats
1256 */
1257static ssize_t nss_stats_edma_txring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1258{
1259 int32_t i;
1260
1261 /*
1262 * max output lines = #stats + start tag line + end tag line + three blank lines
1263 */
1264 uint32_t max_output_lines = (NSS_STATS_EDMA_TX_MAX + 2) + 3;
1265 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1266 size_t size_wr = 0;
1267 ssize_t bytes_read = 0;
1268 uint64_t *stats_shadow;
1269 struct nss_stats_data *data = fp->private_data;
1270
1271 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1272 if (unlikely(lbuf == NULL)) {
1273 nss_warning("Could not allocate memory for local statistics buffer");
1274 return 0;
1275 }
1276
1277 /*
1278 * Note: The assumption here is that we do not have more than 64 stats
1279 */
1280 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1281 if (unlikely(stats_shadow == NULL)) {
1282 nss_warning("Could not allocate memory for local shadow buffer");
1283 kfree(lbuf);
1284 return 0;
1285 }
1286
1287 size_wr = scnprintf(lbuf, size_al, "edma Tx ring stats start:\n\n");
1288
1289 /*
1290 * Tx ring stats
1291 */
1292 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Tx ring %d stats:\n\n", data->edma_id);
1293 spin_lock_bh(&nss_top_main.stats_lock);
1294 for (i = 0; i < NSS_STATS_EDMA_TX_MAX; i++) {
1295 stats_shadow[i] = nss_top_main.stats_edma.tx_stats[data->edma_id][i];
1296 }
1297
1298 spin_unlock_bh(&nss_top_main.stats_lock);
1299
1300 for (i = 0; i < NSS_STATS_EDMA_TX_MAX; i++) {
1301 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1302 "%s = %llu\n", nss_stats_str_edma_tx[i], stats_shadow[i]);
1303 }
1304
1305 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Tx ring stats end\n\n");
1306 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1307 kfree(lbuf);
1308 kfree(stats_shadow);
1309
1310 return bytes_read;
1311}
1312
1313/*
1314 * nss_stats_edma_rxring_read()
1315 * Read EDMA rxring stats
1316 */
1317static ssize_t nss_stats_edma_rxring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1318{
1319 int32_t i;
1320
1321 /*
1322 * max output lines = #stats + start tag line + end tag line + three blank lines
1323 */
1324 uint32_t max_output_lines = (NSS_STATS_EDMA_RX_MAX + 2) + 3;
1325 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1326 size_t size_wr = 0;
1327 ssize_t bytes_read = 0;
1328 uint64_t *stats_shadow;
1329 struct nss_stats_data *data = fp->private_data;
1330
1331 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1332 if (unlikely(lbuf == NULL)) {
1333 nss_warning("Could not allocate memory for local statistics buffer");
1334 return 0;
1335 }
1336
1337 /*
1338 * Note: The assumption here is that we do not have more than 64 stats
1339 */
1340 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1341 if (unlikely(stats_shadow == NULL)) {
1342 nss_warning("Could not allocate memory for local shadow buffer");
1343 kfree(lbuf);
1344 return 0;
1345 }
1346
1347 size_wr = scnprintf(lbuf, size_al, "edma Rx ring stats start:\n\n");
1348
1349 /*
1350 * RX ring stats
1351 */
1352 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Rx ring %d stats:\n\n", data->edma_id);
1353 spin_lock_bh(&nss_top_main.stats_lock);
1354 for (i = 0; i < NSS_STATS_EDMA_RX_MAX; i++) {
1355 stats_shadow[i] = nss_top_main.stats_edma.rx_stats[data->edma_id][i];
1356 }
1357
1358 spin_unlock_bh(&nss_top_main.stats_lock);
1359
1360 for (i = 0; i < NSS_STATS_EDMA_RX_MAX; i++) {
1361 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1362 "%s = %llu\n", nss_stats_str_edma_rx[i], stats_shadow[i]);
1363 }
1364
1365 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Rx ring stats end\n\n");
1366 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1367 kfree(lbuf);
1368 kfree(stats_shadow);
1369
1370 return bytes_read;
1371}
1372
1373/*
1374 * nss_stats_edma_txcmplring_read()
1375 * Read EDMA txcmplring stats
1376 */
1377static ssize_t nss_stats_edma_txcmplring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1378{
1379 int32_t i;
1380
1381 /*
1382 * max output lines = #stats + start tag line + end tag line + three blank lines
1383 */
1384 uint32_t max_output_lines = (NSS_STATS_EDMA_TXCMPL_MAX + 2) + 3;
1385 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1386 size_t size_wr = 0;
1387 ssize_t bytes_read = 0;
1388 uint64_t *stats_shadow;
1389 struct nss_stats_data *data = fp->private_data;
1390
1391 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1392 if (unlikely(lbuf == NULL)) {
1393 nss_warning("Could not allocate memory for local statistics buffer");
1394 return 0;
1395 }
1396
1397 /*
1398 * Note: The assumption here is that we do not have more than 64 stats
1399 */
1400 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1401 if (unlikely(stats_shadow == NULL)) {
1402 nss_warning("Could not allocate memory for local shadow buffer");
1403 kfree(lbuf);
1404 return 0;
1405 }
1406
1407 size_wr = scnprintf(lbuf, size_al, "edma Tx cmpl ring stats start:\n\n");
1408
1409 /*
1410 * Tx cmpl ring stats
1411 */
1412 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Tx cmpl ring %d stats:\n\n", data->edma_id);
1413 spin_lock_bh(&nss_top_main.stats_lock);
1414 for (i = 0; i < NSS_STATS_EDMA_TXCMPL_MAX; i++) {
1415 stats_shadow[i] = nss_top_main.stats_edma.txcmpl_stats[data->edma_id][i];
1416 }
1417
1418 spin_unlock_bh(&nss_top_main.stats_lock);
1419
1420 for (i = 0; i < NSS_STATS_EDMA_TXCMPL_MAX; i++) {
1421 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1422 "%s = %llu\n", nss_stats_str_edma_txcmpl[i], stats_shadow[i]);
1423 }
1424
1425 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Tx cmpl ring stats end\n\n");
1426 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1427 kfree(lbuf);
1428 kfree(stats_shadow);
1429
1430 return bytes_read;
1431}
1432
1433/*
1434 * nss_stats_edma_rxfillring_read()
1435 * Read EDMA rxfillring stats
1436 */
1437static ssize_t nss_stats_edma_rxfillring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1438{
1439 int32_t i;
1440
1441 /*
1442 * max output lines = #stats + start tag line + end tag line + three blank lines
1443 */
1444 uint32_t max_output_lines = (NSS_STATS_EDMA_RXFILL_MAX + 2) + 3;
1445 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1446 size_t size_wr = 0;
1447 ssize_t bytes_read = 0;
1448 uint64_t *stats_shadow;
1449 struct nss_stats_data *data = fp->private_data;
1450
1451 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1452 if (unlikely(lbuf == NULL)) {
1453 nss_warning("Could not allocate memory for local statistics buffer");
1454 return 0;
1455 }
1456
1457 /*
1458 * Note: The assumption here is that we do not have more than 64 stats
1459 */
1460 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1461 if (unlikely(stats_shadow == NULL)) {
1462 nss_warning("Could not allocate memory for local shadow buffer");
1463 kfree(lbuf);
1464 return 0;
1465 }
1466
1467 size_wr = scnprintf(lbuf, size_al, "edma Rx fill ring stats start:\n\n");
1468
1469 /*
1470 * Rx fill ring stats
1471 */
1472 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Rx fill ring %d stats:\n\n", data->edma_id);
1473 spin_lock_bh(&nss_top_main.stats_lock);
1474 for (i = 0; i < NSS_STATS_EDMA_RXFILL_MAX; i++) {
1475 stats_shadow[i] = nss_top_main.stats_edma.rxfill_stats[data->edma_id][i];
1476 }
1477
1478 spin_unlock_bh(&nss_top_main.stats_lock);
1479
1480 for (i = 0; i < NSS_STATS_EDMA_RXFILL_MAX; i++) {
1481 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1482 "%s = %llu\n", nss_stats_str_edma_rxfill[i], stats_shadow[i]);
1483 }
1484
1485 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Rx fill ring stats end\n\n");
1486 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1487 kfree(lbuf);
1488 kfree(stats_shadow);
1489
1490 return bytes_read;
1491}
1492
1493/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301494 * nss_stats_eth_rx_read()
1495 * Read ETH_RX stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301496 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301497static ssize_t nss_stats_eth_rx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301498{
1499 int32_t i;
1500
1501 /*
1502 * max output lines = #stats + start tag line + end tag line + three blank lines
1503 */
Murat Sezgin99dab642014-08-28 14:40:34 -07001504 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_ETH_RX_MAX + 3) + (NSS_EXCEPTION_EVENT_ETH_RX_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301505 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1506 size_t size_wr = 0;
1507 ssize_t bytes_read = 0;
1508 uint64_t *stats_shadow;
1509
1510 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1511 if (unlikely(lbuf == NULL)) {
1512 nss_warning("Could not allocate memory for local statistics buffer");
1513 return 0;
1514 }
1515
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301516 /*
1517 * Note: The assumption here is that we do not have more than 64 stats
1518 */
1519 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301520 if (unlikely(stats_shadow == NULL)) {
1521 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301522 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301523 return 0;
1524 }
1525
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301526 size_wr = scnprintf(lbuf, size_al,"eth_rx stats start:\n\n");
1527
1528 /*
1529 * Common node stats
1530 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301531 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301532 spin_lock_bh(&nss_top_main.stats_lock);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301533 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1534 stats_shadow[i] = nss_top_main.stats_node[NSS_ETH_RX_INTERFACE][i];
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301535 }
1536
1537 spin_unlock_bh(&nss_top_main.stats_lock);
1538
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301539 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301540 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301541 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301542 }
1543
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301544 /*
Murat Sezgin99dab642014-08-28 14:40:34 -07001545 * eth_rx node stats
1546 */
1547 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\neth_rx node stats:\n\n");
1548 spin_lock_bh(&nss_top_main.stats_lock);
1549 for (i = 0; (i < NSS_STATS_ETH_RX_MAX); i++) {
1550 stats_shadow[i] = nss_top_main.stats_eth_rx[i];
1551 }
1552
1553 spin_unlock_bh(&nss_top_main.stats_lock);
1554
1555 for (i = 0; (i < NSS_STATS_ETH_RX_MAX); i++) {
1556 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1557 "%s = %llu\n", nss_stats_str_eth_rx[i], stats_shadow[i]);
1558 }
1559
1560 /*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301561 * Exception stats
1562 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301563 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\neth_rx exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301564
1565 spin_lock_bh(&nss_top_main.stats_lock);
1566 for (i = 0; (i < NSS_EXCEPTION_EVENT_ETH_RX_MAX); i++) {
1567 stats_shadow[i] = nss_top_main.stats_if_exception_eth_rx[i];
1568 }
1569
1570 spin_unlock_bh(&nss_top_main.stats_lock);
1571
1572 for (i = 0; (i < NSS_EXCEPTION_EVENT_ETH_RX_MAX); i++) {
1573 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1574 "%s = %llu\n", nss_stats_str_if_exception_eth_rx[i], stats_shadow[i]);
1575 }
1576
1577 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\neth_rx stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301578 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1579 kfree(lbuf);
1580 kfree(stats_shadow);
1581
1582 return bytes_read;
1583}
1584
1585/*
1586 * nss_stats_n2h_read()
1587 * Read N2H stats
1588 */
1589static ssize_t nss_stats_n2h_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1590{
1591 int32_t i;
1592
1593 /*
1594 * max output lines = #stats + start tag line + end tag line + three blank lines
1595 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301596 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_N2H_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301597 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1598 size_t size_wr = 0;
1599 ssize_t bytes_read = 0;
1600 uint64_t *stats_shadow;
Murat Sezgin0c0561d2014-04-09 18:55:58 -07001601 int max = NSS_STATS_N2H_MAX - NSS_STATS_NODE_MAX;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301602
1603 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1604 if (unlikely(lbuf == NULL)) {
1605 nss_warning("Could not allocate memory for local statistics buffer");
1606 return 0;
1607 }
1608
1609 stats_shadow = kzalloc(NSS_STATS_N2H_MAX * 8, GFP_KERNEL);
1610 if (unlikely(stats_shadow == NULL)) {
1611 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301612 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301613 return 0;
1614 }
1615
1616 size_wr = scnprintf(lbuf, size_al, "n2h stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301617
1618 /*
1619 * Common node stats
1620 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301621 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301622 spin_lock_bh(&nss_top_main.stats_lock);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301623 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1624 stats_shadow[i] = nss_top_main.nss[0].stats_n2h[i];
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301625 }
1626
1627 spin_unlock_bh(&nss_top_main.stats_lock);
1628
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301629 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1630 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1631 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1632 }
1633
1634 /*
1635 * N2H node stats
1636 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301637 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nn2h node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301638 spin_lock_bh(&nss_top_main.stats_lock);
1639 for (i = NSS_STATS_NODE_MAX; (i < NSS_STATS_N2H_MAX); i++) {
1640 stats_shadow[i] = nss_top_main.nss[0].stats_n2h[i];
1641 }
1642
1643 spin_unlock_bh(&nss_top_main.stats_lock);
1644
Murat Sezgin0c0561d2014-04-09 18:55:58 -07001645 for (i = 0; i < max; i++) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301646 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
Murat Sezgin0c0561d2014-04-09 18:55:58 -07001647 "%s = %llu\n", nss_stats_str_n2h[i], stats_shadow[i + NSS_STATS_NODE_MAX]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301648 }
1649
1650 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nn2h stats end\n\n");
1651 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1652 kfree(lbuf);
1653 kfree(stats_shadow);
1654
1655 return bytes_read;
1656}
1657
1658/*
Thomas Wuc3e382c2014-10-29 15:35:13 -07001659 * nss_stats_lso_rx_read()
1660 * Read LSO_RX stats
1661 */
1662static ssize_t nss_stats_lso_rx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1663{
1664 int32_t i;
1665
1666 /*
1667 * max output lines = #stats + start tag line + end tag line + three blank lines
1668 */
1669 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_LSO_RX_MAX + 3) + 5;
1670 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1671 size_t size_wr = 0;
1672 ssize_t bytes_read = 0;
1673 uint64_t *stats_shadow;
1674
1675 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1676 if (unlikely(lbuf == NULL)) {
1677 nss_warning("Could not allocate memory for local statistics buffer");
1678 return 0;
1679 }
1680
1681 stats_shadow = kzalloc(NSS_STATS_LSO_RX_MAX * 8, GFP_KERNEL);
1682 if (unlikely(stats_shadow == NULL)) {
1683 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301684 kfree(lbuf);
Thomas Wuc3e382c2014-10-29 15:35:13 -07001685 return 0;
1686 }
1687
1688 size_wr = scnprintf(lbuf, size_al, "lso_rx stats start:\n\n");
1689
1690 /*
1691 * Common node stats
1692 */
1693 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
1694 spin_lock_bh(&nss_top_main.stats_lock);
1695 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1696 stats_shadow[i] = nss_top_main.stats_node[NSS_LSO_RX_INTERFACE][i];
1697 }
1698
1699 spin_unlock_bh(&nss_top_main.stats_lock);
1700
1701 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1702 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1703 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1704 }
1705
1706 /*
1707 * lso_rx node stats
1708 */
1709 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nlso_rx node stats:\n\n");
1710 spin_lock_bh(&nss_top_main.stats_lock);
1711 for (i = 0; (i < NSS_STATS_LSO_RX_MAX); i++) {
1712 stats_shadow[i] = nss_top_main.stats_lso_rx[i];
1713 }
1714
1715 spin_unlock_bh(&nss_top_main.stats_lock);
1716
1717 for (i = 0; i < NSS_STATS_LSO_RX_MAX; i++) {
1718 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1719 "%s = %llu\n", nss_stats_str_lso_rx[i], stats_shadow[i]);
1720 }
1721
1722 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nlso_rx stats end\n\n");
1723 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1724 kfree(lbuf);
1725 kfree(stats_shadow);
1726
1727 return bytes_read;
1728}
1729
1730/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301731 * nss_stats_drv_read()
1732 * Read HLOS driver stats
1733 */
1734static ssize_t nss_stats_drv_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1735{
1736 int32_t i;
1737
1738 /*
1739 * max output lines = #stats + start tag line + end tag line + three blank lines
1740 */
1741 uint32_t max_output_lines = NSS_STATS_DRV_MAX + 5;
1742 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1743 size_t size_wr = 0;
1744 ssize_t bytes_read = 0;
1745 uint64_t *stats_shadow;
1746
1747 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1748 if (unlikely(lbuf == NULL)) {
1749 nss_warning("Could not allocate memory for local statistics buffer");
1750 return 0;
1751 }
1752
1753 stats_shadow = kzalloc(NSS_STATS_DRV_MAX * 8, GFP_KERNEL);
1754 if (unlikely(stats_shadow == NULL)) {
1755 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301756 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301757 return 0;
1758 }
1759
1760 size_wr = scnprintf(lbuf, size_al, "drv stats start:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301761 for (i = 0; (i < NSS_STATS_DRV_MAX); i++) {
Sundarajan Srinivasan62fee7e2015-01-22 11:13:10 -08001762 stats_shadow[i] = NSS_PKT_STATS_READ(&nss_top_main.stats_drv[i]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301763 }
1764
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301765 for (i = 0; (i < NSS_STATS_DRV_MAX); i++) {
1766 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1767 "%s = %llu\n", nss_stats_str_drv[i], stats_shadow[i]);
1768 }
1769
1770 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ndrv stats end\n\n");
1771 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1772 kfree(lbuf);
1773 kfree(stats_shadow);
1774
1775 return bytes_read;
1776}
1777
1778/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301779 * nss_stats_pppoe_read()
1780 * Read PPPoE stats
1781 */
1782static ssize_t nss_stats_pppoe_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1783{
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301784 int32_t i, j, k;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301785
1786 /*
1787 * max output lines = #stats + start tag line + end tag line + three blank lines
1788 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301789 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_PPPOE_MAX + 3) +
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +05301790 ((NSS_MAX_PHYSICAL_INTERFACES * NSS_PPPOE_NUM_SESSION_PER_INTERFACE * (NSS_PPPOE_EXCEPTION_EVENT_MAX + 5)) + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301791 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1792 size_t size_wr = 0;
1793 ssize_t bytes_read = 0;
1794 uint64_t *stats_shadow;
1795
1796 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1797 if (unlikely(lbuf == NULL)) {
1798 nss_warning("Could not allocate memory for local statistics buffer");
1799 return 0;
1800 }
1801
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301802 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301803 if (unlikely(stats_shadow == NULL)) {
1804 nss_warning("Could not allocate memory for local shadow buffer");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301805 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301806 return 0;
1807 }
1808
1809 size_wr = scnprintf(lbuf, size_al, "pppoe stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301810
1811 /*
1812 * Common node stats
1813 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301814 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301815 spin_lock_bh(&nss_top_main.stats_lock);
1816 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1817 stats_shadow[i] = nss_top_main.stats_node[NSS_PPPOE_RX_INTERFACE][i];
1818 }
1819
1820 spin_unlock_bh(&nss_top_main.stats_lock);
1821
1822 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1823 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1824 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1825 }
1826
1827 /*
1828 * PPPoE node stats
1829 */
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001830 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npppoe node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301831 spin_lock_bh(&nss_top_main.stats_lock);
1832 for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
1833 stats_shadow[i] = nss_top_main.stats_pppoe[i];
1834 }
1835
1836 spin_unlock_bh(&nss_top_main.stats_lock);
1837
1838 for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
1839 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1840 "%s = %llu\n", nss_stats_str_pppoe[i], stats_shadow[i]);
1841 }
1842
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301843 /*
1844 * Exception stats
1845 */
1846 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nException PPPoE:\n\n");
1847
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001848 for (j = 1; j <= NSS_MAX_PHYSICAL_INTERFACES; j++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301849 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nInterface %d:\n\n", j);
1850
1851 spin_lock_bh(&nss_top_main.stats_lock);
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001852 for (k = 1; k <= NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +05301853 for (i = 0; (i < NSS_PPPOE_EXCEPTION_EVENT_MAX); i++) {
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001854 stats_shadow_pppoe_except[k - 1][i] = nss_top_main.stats_if_exception_pppoe[j][k][i];
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301855 }
1856 }
1857
1858 spin_unlock_bh(&nss_top_main.stats_lock);
1859
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001860 for (k = 1; k <= NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301861 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. Session\n", k);
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +05301862 for (i = 0; (i < NSS_PPPOE_EXCEPTION_EVENT_MAX); i++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301863 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1864 "%s = %llu\n",
1865 nss_stats_str_if_exception_pppoe[i],
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001866 stats_shadow_pppoe_except[k - 1][i]);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301867 }
1868 }
1869
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301870 }
1871
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001872 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npppoe stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301873 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1874 kfree(lbuf);
1875 kfree(stats_shadow);
1876
1877 return bytes_read;
1878}
1879
1880/*
1881 * nss_stats_gmac_read()
1882 * Read GMAC stats
1883 */
1884static ssize_t nss_stats_gmac_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1885{
1886 uint32_t i, id;
1887
1888 /*
1889 * max output lines = ((#stats + start tag + one blank) * #GMACs) + start/end tag + 3 blank
1890 */
1891 uint32_t max_output_lines = ((NSS_STATS_GMAC_MAX + 2) * NSS_MAX_PHYSICAL_INTERFACES) + 5;
1892 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1893 size_t size_wr = 0;
1894 ssize_t bytes_read = 0;
1895 uint64_t *stats_shadow;
1896
1897 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1898 if (unlikely(lbuf == NULL)) {
1899 nss_warning("Could not allocate memory for local statistics buffer");
1900 return 0;
1901 }
1902
1903 stats_shadow = kzalloc(NSS_STATS_GMAC_MAX * 8, GFP_KERNEL);
1904 if (unlikely(stats_shadow == NULL)) {
1905 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301906 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301907 return 0;
1908 }
1909
1910 size_wr = scnprintf(lbuf, size_al, "gmac stats start:\n\n");
1911
1912 for (id = 0; id < NSS_MAX_PHYSICAL_INTERFACES; id++) {
1913 spin_lock_bh(&nss_top_main.stats_lock);
1914 for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
1915 stats_shadow[i] = nss_top_main.stats_gmac[id][i];
1916 }
1917
1918 spin_unlock_bh(&nss_top_main.stats_lock);
1919
1920 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "GMAC ID: %d\n", id);
1921 for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
1922 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1923 "%s = %llu\n", nss_stats_str_gmac[i], stats_shadow[i]);
1924 }
1925 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\n");
1926 }
1927
1928 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngmac stats end\n\n");
1929 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1930 kfree(lbuf);
1931 kfree(stats_shadow);
1932
1933 return bytes_read;
1934}
1935
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001936/*
Bharath M Kumarcc666e92014-12-24 19:17:28 +05301937 * nss_stats_wifi_read()
Stephen Wangaed46332016-12-12 17:29:03 -08001938 * Read wifi statistics
Bharath M Kumarcc666e92014-12-24 19:17:28 +05301939 */
1940static ssize_t nss_stats_wifi_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1941{
1942 uint32_t i, id;
1943
1944 /*
1945 * max output lines = ((#stats + start tag + one blank) * #WIFI RADIOs) + start/end tag + 3 blank
1946 */
1947 uint32_t max_output_lines = ((NSS_STATS_WIFI_MAX + 2) * NSS_MAX_WIFI_RADIO_INTERFACES) + 5;
1948 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1949 size_t size_wr = 0;
1950 ssize_t bytes_read = 0;
1951 uint64_t *stats_shadow;
1952
1953 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1954 if (unlikely(lbuf == NULL)) {
1955 nss_warning("Could not allocate memory for local statistics buffer");
1956 return 0;
1957 }
1958
1959 stats_shadow = kzalloc(NSS_STATS_WIFI_MAX * 8, GFP_KERNEL);
1960 if (unlikely(stats_shadow == NULL)) {
1961 nss_warning("Could not allocate memory for local shadow buffer");
1962 kfree(lbuf);
1963 return 0;
1964 }
1965
1966 size_wr = scnprintf(lbuf, size_al, "wifi stats start:\n\n");
1967
1968 for (id = 0; id < NSS_MAX_WIFI_RADIO_INTERFACES; id++) {
1969 spin_lock_bh(&nss_top_main.stats_lock);
1970 for (i = 0; (i < NSS_STATS_WIFI_MAX); i++) {
1971 stats_shadow[i] = nss_top_main.stats_wifi[id][i];
1972 }
1973
1974 spin_unlock_bh(&nss_top_main.stats_lock);
1975
1976 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "WIFI ID: %d\n", id);
1977 for (i = 0; (i < NSS_STATS_WIFI_MAX); i++) {
1978 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1979 "%s = %llu\n", nss_stats_str_wifi[i], stats_shadow[i]);
1980 }
1981 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\n");
1982 }
1983
1984 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nwifi stats end\n\n");
1985 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1986 kfree(lbuf);
1987 kfree(stats_shadow);
1988
1989 return bytes_read;
1990}
1991
1992/*
Tushar Mathurff8741b2015-12-02 20:28:59 +05301993 * nss_stats_dtls_read()
Thomas Wu71c5ecc2016-06-21 11:15:52 -07001994 * Read DTLS session statistics
Tushar Mathurff8741b2015-12-02 20:28:59 +05301995 */
1996static ssize_t nss_stats_dtls_read(struct file *fp, char __user *ubuf,
1997 size_t sz, loff_t *ppos)
1998{
1999 uint32_t max_output_lines = 2 + (NSS_MAX_DTLS_SESSIONS
2000 * (NSS_STATS_DTLS_SESSION_MAX + 2)) + 2;
2001 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2002 size_t size_wr = 0;
2003 ssize_t bytes_read = 0;
2004 struct net_device *dev;
2005 int id, i;
2006 struct nss_stats_dtls_session_debug *dtls_session_stats = NULL;
2007
2008 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2009 if (unlikely(lbuf == NULL)) {
2010 nss_warning("Could not allocate memory for local statistics buffer");
2011 return 0;
2012 }
2013
2014 dtls_session_stats = kzalloc((sizeof(struct nss_stats_dtls_session_debug)
2015 * NSS_MAX_DTLS_SESSIONS), GFP_KERNEL);
2016 if (unlikely(dtls_session_stats == NULL)) {
2017 nss_warning("Could not allocate memory for populating DTLS stats");
2018 kfree(lbuf);
2019 return 0;
2020 }
2021
2022 /*
2023 * Get all stats
2024 */
2025 nss_dtls_session_debug_stats_get(dtls_session_stats);
2026
2027 /*
2028 * Session stats
2029 */
2030 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2031 "\nDTLS session stats start:\n\n");
2032
2033 for (id = 0; id < NSS_MAX_DTLS_SESSIONS; id++) {
2034 if (!dtls_session_stats[id].valid)
2035 break;
2036
2037 dev = dev_get_by_index(&init_net, dtls_session_stats[id].if_index);
2038 if (likely(dev)) {
2039 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2040 "%d. nss interface id=%d, netdevice=%s\n",
2041 id, dtls_session_stats[id].if_num,
2042 dev->name);
2043 dev_put(dev);
2044 } else {
2045 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2046 "%d. nss interface id=%d\n", id,
2047 dtls_session_stats[id].if_num);
2048 }
2049
2050 for (i = 0; i < NSS_STATS_DTLS_SESSION_MAX; i++) {
2051 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2052 "\t%s = %llu\n",
2053 nss_stats_str_dtls_session_debug_stats[i],
2054 dtls_session_stats[id].stats[i]);
2055 }
2056
2057 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2058 }
2059
2060 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2061 "\nDTLS session stats end\n");
2062 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2063
2064 kfree(dtls_session_stats);
2065 kfree(lbuf);
2066 return bytes_read;
2067}
2068
Tushar Mathurff8741b2015-12-02 20:28:59 +05302069/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002070 * nss_stats_gre_tunnel_read()
2071 * Read GRE Tunnel session statistics
2072 */
2073static ssize_t nss_stats_gre_tunnel_read(struct file *fp, char __user *ubuf,
2074 size_t sz, loff_t *ppos)
2075{
2076 uint32_t max_output_lines = 2 + (NSS_MAX_GRE_TUNNEL_SESSIONS
2077 * (NSS_STATS_GRE_TUNNEL_SESSION_MAX + 2)) + 2;
2078 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2079 size_t size_wr = 0;
2080 ssize_t bytes_read = 0;
2081 struct net_device *dev;
2082 int id, i;
2083 struct nss_stats_gre_tunnel_session_debug *gre_tunnel_session_stats = NULL;
2084
2085 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2086 if (unlikely(lbuf == NULL)) {
2087 nss_warning("Could not allocate memory for local statistics buffer");
2088 return 0;
2089 }
2090
2091 gre_tunnel_session_stats = kzalloc((sizeof(struct nss_stats_gre_tunnel_session_debug)
2092 * NSS_MAX_GRE_TUNNEL_SESSIONS), GFP_KERNEL);
2093 if (unlikely(gre_tunnel_session_stats == NULL)) {
2094 nss_warning("Could not allocate memory for populating GRE Tunnel stats");
2095 kfree(lbuf);
2096 return 0;
2097 }
2098
2099 /*
2100 * Get all stats
2101 */
2102 nss_gre_tunnel_session_debug_stats_get(gre_tunnel_session_stats);
2103
2104 /*
2105 * Session stats
2106 */
2107 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2108 "\nGRE Tunnel session stats start:\n\n");
2109
2110 for (id = 0; id < NSS_MAX_GRE_TUNNEL_SESSIONS; id++) {
2111 if (!gre_tunnel_session_stats[id].valid)
2112 break;
2113
2114 dev = dev_get_by_index(&init_net, gre_tunnel_session_stats[id].if_index);
2115 if (likely(dev)) {
2116 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2117 "%d. nss interface id=%d, netdevice=%s\n",
2118 id, gre_tunnel_session_stats[id].if_num,
2119 dev->name);
2120 dev_put(dev);
2121 } else {
2122 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2123 "%d. nss interface id=%d\n", id,
2124 gre_tunnel_session_stats[id].if_num);
2125 }
2126
2127 for (i = 0; i < NSS_STATS_GRE_TUNNEL_SESSION_MAX; i++) {
2128 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2129 "\t%s = %llu\n",
2130 nss_stats_str_gre_tunnel_session_debug_stats[i],
2131 gre_tunnel_session_stats[id].stats[i]);
2132 }
2133
2134 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2135 }
2136
2137 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2138 "\nGRE Tunnel session stats end\n");
2139 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2140
2141 kfree(gre_tunnel_session_stats);
2142 kfree(lbuf);
2143 return bytes_read;
2144}
2145
2146/*
ratheesh kannoth7af985d2015-06-24 15:08:40 +05302147 * nss_stats_l2tpv2_read()
2148 * Read l2tpv2 statistics
2149 */
2150static ssize_t nss_stats_l2tpv2_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2151{
2152
2153 uint32_t max_output_lines = 2 /* header & footer for session stats */
2154 + NSS_MAX_L2TPV2_DYNAMIC_INTERFACES * (NSS_STATS_L2TPV2_SESSION_MAX + 2) /*session stats */
2155 + 2;
2156 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines ;
2157 size_t size_wr = 0;
2158 ssize_t bytes_read = 0;
2159 struct net_device *dev;
2160 struct nss_stats_l2tpv2_session_debug l2tpv2_session_stats[NSS_MAX_L2TPV2_DYNAMIC_INTERFACES];
2161 int id, i;
2162
2163 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2164 if (unlikely(lbuf == NULL)) {
2165 nss_warning("Could not allocate memory for local statistics buffer");
2166 return 0;
2167 }
2168
2169 memset(&l2tpv2_session_stats, 0, sizeof(struct nss_stats_l2tpv2_session_debug) * NSS_MAX_L2TPV2_DYNAMIC_INTERFACES);
2170
2171 /*
2172 * Get all stats
2173 */
2174 nss_l2tpv2_session_debug_stats_get((void *)&l2tpv2_session_stats);
2175
2176 /*
2177 * Session stats
2178 */
2179 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nl2tp v2 session stats start:\n\n");
2180 for (id = 0; id < NSS_MAX_L2TPV2_DYNAMIC_INTERFACES; id++) {
2181
2182 if (!l2tpv2_session_stats[id].valid) {
2183 break;
2184 }
2185
2186 dev = dev_get_by_index(&init_net, l2tpv2_session_stats[id].if_index);
2187 if (likely(dev)) {
2188
2189 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2190 l2tpv2_session_stats[id].if_num, dev->name);
2191 dev_put(dev);
2192 } else {
2193 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2194 l2tpv2_session_stats[id].if_num);
2195 }
2196
2197 for (i = 0; i < NSS_STATS_L2TPV2_SESSION_MAX; i++) {
2198 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2199 "\t%s = %llu\n", nss_stats_str_l2tpv2_session_debug_stats[i],
2200 l2tpv2_session_stats[id].stats[i]);
2201 }
2202 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2203 }
2204
2205 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nl2tp v2 session stats end\n");
2206 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2207
2208 kfree(lbuf);
2209 return bytes_read;
2210}
2211
2212/*
ratheesh kannotha1245c32015-11-04 16:45:43 +05302213 * nss_stats_map_t_read()
2214 * Read map_t statistics
2215 */
2216static ssize_t nss_stats_map_t_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2217{
2218
2219 uint32_t max_output_lines = 2 /* header & footer for instance stats */
2220 + NSS_MAX_MAP_T_DYNAMIC_INTERFACES * (NSS_STATS_MAP_T_MAX + 2) /*instance stats */
2221 + 2;
2222 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2223 size_t size_wr = 0;
2224 ssize_t bytes_read = 0;
2225 struct net_device *dev;
2226 struct nss_stats_map_t_instance_debug map_t_instance_stats[NSS_MAX_MAP_T_DYNAMIC_INTERFACES];
2227 int id, i;
2228
2229 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2230 if (unlikely(!lbuf)) {
2231 nss_warning("Could not allocate memory for local statistics buffer");
2232 return 0;
2233 }
2234
2235 memset(&map_t_instance_stats, 0, sizeof(struct nss_stats_map_t_instance_debug) * NSS_MAX_MAP_T_DYNAMIC_INTERFACES);
2236
2237 /*
2238 * Get all stats
2239 */
2240 nss_map_t_instance_debug_stats_get((void *)&map_t_instance_stats);
2241
2242 /*
2243 * Session stats
2244 */
2245 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nmap_t instance stats start:\n\n");
2246 for (id = 0; id < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; id++) {
2247
2248 if (!map_t_instance_stats[id].valid) {
2249 break;
2250 }
2251
2252 dev = dev_get_by_index(&init_net, map_t_instance_stats[id].if_index);
2253 if (likely(dev)) {
2254
2255 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2256 map_t_instance_stats[id].if_num, dev->name);
2257 dev_put(dev);
2258 } else {
2259 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2260 map_t_instance_stats[id].if_num);
2261 }
2262
2263 for (i = 0; i < NSS_STATS_MAP_T_MAX; i++) {
2264 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2265 "\t%s = %llu\n", nss_stats_str_map_t_instance_debug_stats[i],
2266 map_t_instance_stats[id].stats[i]);
2267 }
2268 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2269 }
2270
2271 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nmap_t instance stats end\n");
2272 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2273
2274 kfree(lbuf);
2275 return bytes_read;
2276}
2277
ratheesh kannotheb2a0a82017-05-04 09:20:17 +05302278 /*
2279 * nss_stats_gre_read()
2280 * Read GRE statistics
2281 */
2282static ssize_t nss_stats_gre_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2283{
2284 uint32_t max_output_lines = 2 /* header & footer for base debug stats */
2285 + 2 /* header & footer for session debug stats */
2286 + NSS_STATS_GRE_BASE_DEBUG_MAX /* Base debug */
2287 + NSS_GRE_MAX_DEBUG_SESSION_STATS * (NSS_STATS_GRE_SESSION_DEBUG_MAX + 2) /*session stats */
2288 + 2;
2289 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2290 size_t size_wr = 0;
2291 ssize_t bytes_read = 0;
2292 struct net_device *dev;
2293 struct nss_stats_gre_session_debug *sstats;
2294 struct nss_stats_gre_base_debug *bstats;
2295 int id, i;
2296
2297 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2298 if (unlikely(!lbuf)) {
2299 nss_warning("Could not allocate memory for local statistics buffer");
2300 return 0;
2301 }
2302
2303 bstats = kzalloc(sizeof(struct nss_stats_gre_base_debug), GFP_KERNEL);
2304 if (unlikely(!bstats)) {
2305 nss_warning("Could not allocate memory for base debug statistics buffer");
2306 kfree(lbuf);
2307 return 0;
2308 }
2309
2310 sstats = kzalloc(sizeof(struct nss_stats_gre_session_debug) * NSS_GRE_MAX_DEBUG_SESSION_STATS, GFP_KERNEL);
2311 if (unlikely(!sstats)) {
2312 nss_warning("Could not allocate memory for base debug statistics buffer");
2313 kfree(lbuf);
2314 kfree(bstats);
2315 return 0;
2316 }
2317
2318 /*
2319 * Get all base stats
2320 */
2321 nss_gre_base_debug_stats_get((void *)bstats, sizeof(struct nss_stats_gre_base_debug));
2322 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngre Base stats start:\n\n");
2323 for (i = 0; i < NSS_STATS_GRE_BASE_DEBUG_MAX; i++) {
2324 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2325 "\t%s = %llu\n", nss_stats_str_gre_base_debug_stats[i],
2326 bstats->stats[i]);
2327 }
2328
2329 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngre Base stats End\n\n");
2330
2331 /*
2332 * Get all session stats
2333 */
2334 nss_gre_session_debug_stats_get(sstats, sizeof(struct nss_stats_gre_session_debug) * NSS_GRE_MAX_DEBUG_SESSION_STATS);
2335 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngre Session stats start:\n\n");
2336
2337 for (id = 0; id < NSS_GRE_MAX_DEBUG_SESSION_STATS; id++) {
2338
2339 if (!((sstats + id)->valid)) {
2340 continue;
2341 }
2342
2343 dev = dev_get_by_index(&init_net, (sstats + id)->if_index);
2344 if (likely(dev)) {
2345
2346 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2347 (sstats + id)->if_num, dev->name);
2348 dev_put(dev);
2349 } else {
2350 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2351 (sstats + id)->if_num);
2352 }
2353
2354 for (i = 0; i < NSS_STATS_GRE_SESSION_DEBUG_MAX; i++) {
2355 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2356 "\t%s = %llu\n", nss_stats_str_gre_session_debug_stats[i],
2357 (sstats + id)->stats[i]);
2358 }
2359 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2360 }
2361
2362 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngre Session stats end\n");
2363 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2364
2365 kfree(sstats);
2366 kfree(bstats);
2367 kfree(lbuf);
2368 return bytes_read;
2369}
2370
ratheesh kannotha1245c32015-11-04 16:45:43 +05302371/*
Amit Gupta316729b2016-08-12 12:21:15 +05302372 * nss_stats_ppe_conn_read()
2373 * Read ppe connection stats
2374 */
2375static ssize_t nss_stats_ppe_conn_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2376{
2377
2378 int i;
2379 char *lbuf = NULL;
2380 size_t size_wr = 0;
2381 ssize_t bytes_read = 0;
2382 uint32_t ppe_stats[NSS_STATS_PPE_CONN_MAX];
2383 uint32_t max_output_lines = 2 /* header & footer for session stats */
2384 + NSS_STATS_PPE_CONN_MAX /* PPE flow counters */
2385 + 2;
2386 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2387
Amit Gupta316729b2016-08-12 12:21:15 +05302388 lbuf = kzalloc(size_al, GFP_KERNEL);
2389 if (unlikely(lbuf == NULL)) {
2390 nss_warning("Could not allocate memory for local statistics buffer");
2391 return 0;
2392 }
2393
2394 memset(&ppe_stats, 0, sizeof(uint32_t) * NSS_STATS_PPE_CONN_MAX);
2395
2396 /*
2397 * Get all stats
2398 */
2399 nss_ppe_stats_conn_get(ppe_stats);
2400
2401 /*
2402 * flow stats
2403 */
2404 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe flow counters start:\n\n");
2405
2406 for (i = 0; i < NSS_STATS_PPE_CONN_MAX; i++) {
2407 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2408 "\t%s = %u\n", nss_stats_str_ppe_conn[i],
2409 ppe_stats[i]);
2410 }
2411
2412 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2413
2414 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe flow counters end\n");
2415 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2416
2417 kfree(lbuf);
2418 return bytes_read;
2419}
2420
2421/*
2422 * nss_stats_ppe_l3_read()
2423 * Read ppe L3 debug stats
2424 */
2425static ssize_t nss_stats_ppe_l3_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2426{
2427
2428 int i;
2429 char *lbuf = NULL;
2430 size_t size_wr = 0;
2431 ssize_t bytes_read = 0;
2432 uint32_t ppe_stats[NSS_STATS_PPE_L3_MAX];
2433 uint32_t max_output_lines = 2 /* header & footer for session stats */
2434 + NSS_STATS_PPE_L3_MAX /* PPE flow counters */
2435 + 2;
2436 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2437
2438 lbuf = kzalloc(size_al, GFP_KERNEL);
2439 if (unlikely(!lbuf)) {
2440 nss_warning("Could not allocate memory for local statistics buffer");
2441 return 0;
2442 }
2443
2444 memset(ppe_stats, 0, sizeof(uint32_t) * NSS_STATS_PPE_L3_MAX);
2445
2446 /*
2447 * Get all stats
2448 */
2449 nss_ppe_stats_l3_get(ppe_stats);
2450
2451 /*
2452 * flow stats
2453 */
2454 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe l3 debug stats start:\n\n");
2455
2456 for (i = 0; i < NSS_STATS_PPE_L3_MAX; i++) {
2457 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2458 "\t%s = 0x%x\n", nss_stats_str_ppe_l3[i],
2459 ppe_stats[i]);
2460 }
2461
2462 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2463
2464 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe l3 debug stats end\n");
2465 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2466
2467 kfree(lbuf);
2468 return bytes_read;
2469}
2470
2471/*
2472 * nss_stats_ppe_code_read()
2473 * Read ppe CPU & DROP code
2474 */
2475static ssize_t nss_stats_ppe_code_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2476{
2477
2478 int i;
2479 char *lbuf = NULL;
2480 size_t size_wr = 0;
2481 ssize_t bytes_read = 0;
2482 uint32_t ppe_stats[NSS_STATS_PPE_CODE_MAX];
2483 uint32_t max_output_lines = 2 /* header & footer for session stats */
2484 + NSS_STATS_PPE_CODE_MAX /* PPE flow counters */
2485 + 2;
2486 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2487
2488 lbuf = kzalloc(size_al, GFP_KERNEL);
2489 if (unlikely(!lbuf)) {
2490 nss_warning("Could not allocate memory for local statistics buffer");
2491 return 0;
2492 }
2493
2494 memset(ppe_stats, 0, sizeof(uint32_t) * NSS_STATS_PPE_CODE_MAX);
2495
2496 /*
2497 * Get all stats
2498 */
2499 nss_ppe_stats_code_get(ppe_stats);
2500
2501 /*
2502 * flow stats
2503 */
2504 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe session stats start:\n\n");
2505
2506 for (i = 0; i < NSS_STATS_PPE_CODE_MAX; i++) {
2507 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2508 "\t%s = %u\n", nss_stats_str_ppe_code[i],
2509 ppe_stats[i]);
2510 }
2511
2512 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2513
2514 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe session stats end\n");
2515 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2516
2517 kfree(lbuf);
2518 return bytes_read;
2519}
2520
2521/*
Shyam Sunder66e889d2015-11-02 15:31:20 +05302522 * nss_stats_pptp_read()
2523 * Read pptp statistics
2524 */
2525static ssize_t nss_stats_pptp_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2526{
2527
2528 uint32_t max_output_lines = 2 /* header & footer for session stats */
2529 + NSS_MAX_PPTP_DYNAMIC_INTERFACES * (NSS_STATS_PPTP_SESSION_MAX + 2) /*session stats */
2530 + 2;
2531 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines ;
2532 size_t size_wr = 0;
2533 ssize_t bytes_read = 0;
2534 struct net_device *dev;
2535 struct nss_stats_pptp_session_debug pptp_session_stats[NSS_MAX_PPTP_DYNAMIC_INTERFACES];
2536 int id, i;
2537
2538 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2539 if (unlikely(lbuf == NULL)) {
2540 nss_warning("Could not allocate memory for local statistics buffer");
2541 return 0;
2542 }
2543
2544 memset(&pptp_session_stats, 0, sizeof(struct nss_stats_pptp_session_debug) * NSS_MAX_PPTP_DYNAMIC_INTERFACES);
2545
2546 /*
2547 * Get all stats
2548 */
2549 nss_pptp_session_debug_stats_get((void *)&pptp_session_stats);
2550
2551 /*
2552 * Session stats
2553 */
2554 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npptp session stats start:\n\n");
2555 for (id = 0; id < NSS_MAX_PPTP_DYNAMIC_INTERFACES; id++) {
2556
2557 if (!pptp_session_stats[id].valid) {
2558 break;
2559 }
2560
2561 dev = dev_get_by_index(&init_net, pptp_session_stats[id].if_index);
2562 if (likely(dev)) {
2563
2564 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2565 pptp_session_stats[id].if_num, dev->name);
2566 dev_put(dev);
2567 } else {
2568 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2569 pptp_session_stats[id].if_num);
2570 }
2571
2572 for (i = 0; i < NSS_STATS_PPTP_SESSION_MAX; i++) {
2573 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2574 "\t%s = %llu\n", nss_stats_str_pptp_session_debug_stats[i],
2575 pptp_session_stats[id].stats[i]);
2576 }
2577 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2578 }
2579
2580 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npptp session stats end\n");
2581 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2582
2583 kfree(lbuf);
2584 return bytes_read;
2585}
2586
2587/*
Ankit Dhanuka14999992014-11-12 15:35:11 +05302588 * nss_stats_sjack_read()
2589 * Read SJACK stats
2590 */
2591static ssize_t nss_stats_sjack_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2592{
2593 int32_t i;
2594 /*
2595 * max output lines = #stats + start tag line + end tag line + three blank lines
2596 */
2597 uint32_t max_output_lines = NSS_STATS_NODE_MAX + 5;
2598 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2599 size_t size_wr = 0;
2600 ssize_t bytes_read = 0;
2601 uint64_t *stats_shadow;
2602
2603 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2604 if (unlikely(lbuf == NULL)) {
2605 nss_warning("Could not allocate memory for local statistics buffer");
2606 return 0;
2607 }
2608
2609 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
2610 if (unlikely(stats_shadow == NULL)) {
2611 nss_warning("Could not allocate memory for local shadow buffer");
2612 kfree(lbuf);
2613 return 0;
2614 }
2615
2616 size_wr = scnprintf(lbuf, size_al, "sjack stats start:\n\n");
2617
2618 /*
2619 * Common node stats
2620 */
2621 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
2622 spin_lock_bh(&nss_top_main.stats_lock);
2623 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2624 stats_shadow[i] = nss_top_main.stats_node[NSS_SJACK_INTERFACE][i];
2625 }
2626
2627 spin_unlock_bh(&nss_top_main.stats_lock);
2628
2629 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2630 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2631 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
2632 }
2633
2634 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nsjack stats end\n\n");
2635
2636 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
2637 kfree(lbuf);
2638 kfree(stats_shadow);
2639
2640 return bytes_read;
2641}
2642
2643/*
Stephen Wang9779d952015-10-28 11:39:07 -07002644 * nss_stats_portid_read()
2645 * Read PortID stats
2646 */
2647static ssize_t nss_stats_portid_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2648{
2649 int32_t i;
2650 /*
2651 * max output lines = #stats + start tag line + end tag line + three blank lines
2652 */
2653 uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_STATS_PORTID_MAX + 5;
2654 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2655 size_t size_wr = 0;
2656 ssize_t bytes_read = 0;
2657 uint64_t *stats_shadow;
2658
2659 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2660 if (unlikely(lbuf == NULL)) {
2661 nss_warning("Could not allocate memory for local statistics buffer");
2662 return 0;
2663 }
2664
2665 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
2666 if (unlikely(stats_shadow == NULL)) {
2667 nss_warning("Could not allocate memory for local shadow buffer");
2668 kfree(lbuf);
2669 return 0;
2670 }
2671
2672 size_wr = scnprintf(lbuf, size_al, "portid stats start:\n\n");
2673
2674 /*
2675 * Common node stats
2676 */
2677 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
2678 spin_lock_bh(&nss_top_main.stats_lock);
2679 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2680 stats_shadow[i] = nss_top_main.stats_node[NSS_PORTID_INTERFACE][i];
2681 }
2682
2683 spin_unlock_bh(&nss_top_main.stats_lock);
2684
2685 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2686 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2687 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
2688 }
2689
2690 /*
2691 * PortID node stats
2692 */
2693 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nportid node stats:\n\n");
2694
2695 spin_lock_bh(&nss_top_main.stats_lock);
2696 for (i = 0; (i < NSS_STATS_PORTID_MAX); i++) {
2697 stats_shadow[i] = nss_top_main.stats_portid[i];
2698 }
2699
2700 spin_unlock_bh(&nss_top_main.stats_lock);
2701
2702 for (i = 0; (i < NSS_STATS_PORTID_MAX); i++) {
2703 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2704 "%s = %llu\n", nss_stats_str_portid[i], stats_shadow[i]);
2705 }
2706
2707 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nportid stats end\n\n");
2708
2709 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
2710 kfree(lbuf);
2711 kfree(stats_shadow);
2712
2713 return bytes_read;
2714}
2715
2716/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002717 * nss_stats_capwap_encap()
2718 * Make a row for CAPWAP encap stats.
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002719 */
2720static ssize_t nss_stats_capwap_encap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s)
2721{
Saurabh Misra3f66e872015-04-03 11:30:42 -07002722 char *header[] = { "packets", "bytes", "fragments", "drop_ref", "drop_ver", "drop_unalign",
2723 "drop_hroom", "drop_dtls", "drop_nwireless", "drop_qfull", "drop_memfail", "unknown" };
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002724 uint64_t tcnt = 0;
2725
2726 switch (i) {
2727 case 0:
2728 tcnt = s->pnode_stats.tx_packets;
2729 break;
2730 case 1:
2731 tcnt = s->pnode_stats.tx_bytes;
2732 break;
2733 case 2:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002734 tcnt = s->tx_segments;
2735 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002736 case 3:
2737 tcnt = s->tx_dropped_sg_ref;
2738 break;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002739 case 4:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002740 tcnt = s->tx_dropped_ver_mis;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002741 break;
2742 case 5:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002743 tcnt = s->tx_dropped_unalign;
2744 break;
2745 case 6:
2746 tcnt = s->tx_dropped_hroom;
2747 break;
2748 case 7:
2749 tcnt = s->tx_dropped_dtls;
2750 break;
2751 case 8:
2752 tcnt = s->tx_dropped_nwireless;
2753 break;
2754 case 9:
2755 tcnt = s->tx_queue_full_drops;
2756 break;
2757 case 10:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002758 tcnt = s->tx_mem_failure_drops;
2759 break;
2760 default:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002761 return 0;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002762 }
2763
Saurabh Misra3f66e872015-04-03 11:30:42 -07002764 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002765}
2766
2767/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002768 * nss_stats_capwap_decap()
2769 * Make a row for CAPWAP decap stats.
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002770 */
2771static ssize_t nss_stats_capwap_decap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s)
2772{
Saurabh Misra3f66e872015-04-03 11:30:42 -07002773 char *header[] = { "packets", "bytes", "DTLS_pkts", "fragments", "rx_dropped", "drop_oversize",
2774 "drop_frag_timeout", "drop_frag_dup", "drop_frag_gap", "drop_qfull", "drop_memfail",
2775 "drop_csum", "drop_malformed", "unknown" };
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002776 uint64_t tcnt = 0;
2777
2778 switch(i) {
2779 case 0:
2780 tcnt = s->pnode_stats.rx_packets;
2781 break;
2782 case 1:
2783 tcnt = s->pnode_stats.rx_bytes;
2784 break;
2785 case 2:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002786 tcnt = s->dtls_pkts;
2787 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002788 case 3:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002789 tcnt = s->rx_segments;
2790 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002791 case 4:
2792 tcnt = s->pnode_stats.rx_dropped;
2793 break;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002794 case 5:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002795 tcnt = s->rx_oversize_drops;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002796 break;
2797 case 6:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002798 tcnt = s->rx_frag_timeout_drops;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002799 break;
2800 case 7:
2801 tcnt = s->rx_dup_frag;
2802 break;
2803 case 8:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002804 tcnt = s->rx_frag_gap_drops;
2805 break;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002806 case 9:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002807 tcnt = s->rx_queue_full_drops;
2808 return (snprintf(line, len, "%s = %llu (n2h = %llu)\n", header[i], tcnt, s->rx_n2h_queue_full_drops));
2809 case 10:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002810 tcnt = s->rx_mem_failure_drops;
2811 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002812 case 11:
2813 tcnt = s->rx_csum_drops;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002814 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002815 case 12:
2816 tcnt = s->rx_malformed;
2817 break;
2818 default:
2819 return 0;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002820 }
2821
Saurabh Misra3f66e872015-04-03 11:30:42 -07002822 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002823}
2824
2825/*
2826 * nss_stats_capwap_read()
2827 * Read CAPWAP stats
2828 */
2829static ssize_t nss_stats_capwap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos, uint16_t type)
2830{
2831 struct nss_stats_data *data = fp->private_data;
2832 ssize_t bytes_read = 0;
2833 struct nss_capwap_tunnel_stats stats;
2834 size_t bytes;
2835 char line[80];
Saurabh Misra3f66e872015-04-03 11:30:42 -07002836 int start;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002837 uint32_t if_num = NSS_DYNAMIC_IF_START;
2838 uint32_t max_if_num = NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES;
2839
2840 if (data) {
2841 if_num = data->if_num;
2842 }
2843
2844 /*
2845 * If we are done accomodating all the CAPWAP tunnels.
2846 */
2847 if (if_num > max_if_num) {
2848 return 0;
2849 }
2850
2851 for (; if_num <= max_if_num; if_num++) {
2852 bool isthere;
2853
2854 if (nss_is_dynamic_interface(if_num) == false) {
2855 continue;
2856 }
2857
2858 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP) {
2859 continue;
2860 }
2861
2862 /*
2863 * If CAPWAP tunnel does not exists, then isthere will be false.
2864 */
2865 isthere = nss_capwap_get_stats(if_num, &stats);
2866 if (!isthere) {
2867 continue;
2868 }
2869
Saurabh Misra3f66e872015-04-03 11:30:42 -07002870 bytes = snprintf(line, sizeof(line), "----if_num : %2d----\n", if_num);
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002871 if ((bytes_read + bytes) > sz) {
2872 break;
2873 }
2874
2875 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
2876 bytes_read = -EFAULT;
2877 goto fail;
2878 }
2879 bytes_read += bytes;
2880 start = 0;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002881 while (bytes_read < sz) {
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002882 if (type == 1) {
2883 bytes = nss_stats_capwap_encap(line, sizeof(line), start, &stats);
2884 } else {
2885 bytes = nss_stats_capwap_decap(line, sizeof(line), start, &stats);
2886 }
2887
Saurabh Misra3f66e872015-04-03 11:30:42 -07002888 /*
2889 * If we don't have any more lines in decap/encap.
2890 */
2891 if (bytes == 0) {
2892 break;
2893 }
2894
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002895 if ((bytes_read + bytes) > sz)
2896 break;
2897
2898 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
2899 bytes_read = -EFAULT;
2900 goto fail;
2901 }
2902
2903 bytes_read += bytes;
2904 start++;
2905 }
2906 }
2907
2908 if (bytes_read > 0) {
2909 *ppos = bytes_read;
2910 }
2911
2912 if (data) {
2913 data->if_num = if_num;
2914 }
2915fail:
2916 return bytes_read;
2917}
2918
2919/*
2920 * nss_stats_capwap_decap_read()
2921 * Read CAPWAP decap stats
2922 */
2923static ssize_t nss_stats_capwap_decap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2924{
2925 return (nss_stats_capwap_read(fp, ubuf, sz, ppos, 0));
2926}
2927
2928/*
2929 * nss_stats_capwap_encap_read()
2930 * Read CAPWAP encap stats
2931 */
2932static ssize_t nss_stats_capwap_encap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2933{
2934 return (nss_stats_capwap_read(fp, ubuf, sz, ppos, 1));
2935}
2936
2937/*
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05302938 * nss_stats_gre_redir()
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002939 * Make a row for GRE_REDIR stats.
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05302940 */
2941static ssize_t nss_stats_gre_redir(char *line, int len, int i, struct nss_gre_redir_tunnel_stats *s)
2942{
2943 char *header[] = { "TX Packets", "TX Bytes", "TX Drops", "RX Packets", "RX Bytes", "Rx Drops" };
2944 uint64_t tcnt = 0;
2945
2946 switch (i) {
2947 case 0:
2948 tcnt = s->node_stats.tx_packets;
2949 break;
2950 case 1:
2951 tcnt = s->node_stats.tx_bytes;
2952 break;
2953 case 2:
2954 tcnt = s->tx_dropped;
2955 break;
2956 case 3:
2957 tcnt = s->node_stats.rx_packets;
2958 break;
2959 case 4:
2960 tcnt = s->node_stats.rx_bytes;
2961 break;
2962 case 5:
2963 tcnt = s->node_stats.rx_dropped;
2964 break;
2965 default:
Radha krishna Simha Jigurudf53f022015-11-09 12:31:26 +05302966 return 0;
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05302967 }
2968
2969 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
2970}
2971
2972/*
2973 * nss_stats_gre_redir_read()
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002974 * READ gre_redir tunnel stats.
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05302975 */
2976static ssize_t nss_stats_gre_redir_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2977{
2978 struct nss_stats_data *data = fp->private_data;
2979 ssize_t bytes_read = 0;
2980 struct nss_gre_redir_tunnel_stats stats;
2981 size_t bytes;
2982 char line[80];
2983 int start, end;
2984 int index = 0;
2985
2986 if (data) {
2987 index = data->index;
2988 }
2989
2990 /*
2991 * If we are done accomodating all the GRE_REDIR tunnels.
2992 */
2993 if (index >= NSS_GRE_REDIR_MAX_INTERFACES) {
2994 return 0;
2995 }
2996
2997 for (; index < NSS_GRE_REDIR_MAX_INTERFACES; index++) {
2998 bool isthere;
2999
3000 /*
3001 * If gre_redir tunnel does not exists, then isthere will be false.
3002 */
3003 isthere = nss_gre_redir_get_stats(index, &stats);
3004 if (!isthere) {
3005 continue;
3006 }
3007
3008 bytes = snprintf(line, sizeof(line), "\nTunnel if_num: %2d\n", stats.if_num);
3009 if ((bytes_read + bytes) > sz) {
3010 break;
3011 }
3012
3013 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3014 bytes_read = -EFAULT;
3015 goto fail;
3016 }
3017 bytes_read += bytes;
3018 start = 0;
3019 end = 6;
3020 while (bytes_read < sz && start < end) {
3021 bytes = nss_stats_gre_redir(line, sizeof(line), start, &stats);
3022
3023 if ((bytes_read + bytes) > sz)
3024 break;
3025
3026 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3027 bytes_read = -EFAULT;
3028 goto fail;
3029 }
3030
3031 bytes_read += bytes;
3032 start++;
3033 }
3034 }
3035
3036 if (bytes_read > 0) {
3037 *ppos = bytes_read;
3038 }
3039
3040 if (data) {
3041 data->index = index;
3042 }
3043
3044fail:
3045 return bytes_read;
3046}
3047
3048/*
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -08003049 * nss_stats_wifi_if_read()
3050 * Read wifi_if statistics
3051 */
3052static ssize_t nss_stats_wifi_if_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
3053{
3054 struct nss_stats_data *data = fp->private_data;
3055 int32_t if_num = NSS_DYNAMIC_IF_START;
3056 int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES;
3057 size_t bytes = 0;
3058 ssize_t bytes_read = 0;
3059 char line[80];
3060 int start, end;
3061
3062 if (data) {
3063 if_num = data->if_num;
3064 }
3065
3066 if (if_num > max_if_num) {
3067 return 0;
3068 }
3069
3070 for (; if_num < max_if_num; if_num++) {
3071 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_WIFI)
3072 continue;
3073
3074 bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num);
3075 if ((bytes_read + bytes) > sz)
3076 break;
3077
3078 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3079 bytes_read = -EFAULT;
3080 goto end;
3081 }
3082
3083 bytes_read += bytes;
3084
3085 start = 0;
3086 end = 7;
3087 while (bytes_read < sz && start < end) {
3088 bytes = nss_wifi_if_copy_stats(if_num, start, line);
3089 if (!bytes)
3090 break;
3091
3092 if ((bytes_read + bytes) > sz)
3093 break;
3094
3095 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3096 bytes_read = -EFAULT;
3097 goto end;
3098 }
3099
3100 bytes_read += bytes;
3101 start++;
3102 }
3103
3104 bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num);
3105 if (bytes_read > (sz - bytes))
3106 break;
3107
3108 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3109 bytes_read = -EFAULT;
3110 goto end;
3111 }
3112
3113 bytes_read += bytes;
3114 }
3115
3116 if (bytes_read > 0) {
3117 *ppos = bytes_read;
3118 }
3119
3120 if (data) {
3121 data->if_num = if_num;
3122 }
3123
3124end:
3125 return bytes_read;
3126}
3127
3128/*
Sundarajan Srinivasanab2c8562015-06-09 16:14:10 -07003129 * nss_stats_virt_if_read()
3130 * Read virt_if statistics
3131 */
3132static ssize_t nss_stats_virt_if_read(struct file *fp, char __user *ubuf,
3133 size_t sz, loff_t *ppos)
3134{
3135 struct nss_stats_data *data = fp->private_data;
3136 int32_t if_num = NSS_DYNAMIC_IF_START;
3137 int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES;
3138 size_t bytes = 0;
3139 ssize_t bytes_read = 0;
3140 char line[80];
3141 int start, end;
3142
3143 if (data) {
3144 if_num = data->if_num;
3145 }
3146
3147 if (if_num > max_if_num) {
3148 return 0;
3149 }
3150
3151 for (; if_num < max_if_num; if_num++) {
3152 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_802_3_REDIR)
3153 continue;
3154
3155 bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num);
3156 if ((bytes_read + bytes) > sz)
3157 break;
3158
3159 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3160 bytes_read = -EFAULT;
3161 goto end;
3162 }
3163
3164 bytes_read += bytes;
3165
3166 start = 0;
3167 end = 7;
3168 while (bytes_read < sz && start < end) {
3169 bytes = nss_virt_if_copy_stats(if_num, start, line);
3170 if (!bytes)
3171 break;
3172
3173 if ((bytes_read + bytes) > sz)
3174 break;
3175
3176 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3177 bytes_read = -EFAULT;
3178 goto end;
3179 }
3180
3181 bytes_read += bytes;
3182 start++;
3183 }
3184
3185 bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num);
3186 if (bytes_read > (sz - bytes))
3187 break;
3188
3189 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3190 bytes_read = -EFAULT;
3191 goto end;
3192 }
3193
3194 bytes_read += bytes;
3195 }
3196
3197 if (bytes_read > 0) {
3198 *ppos = bytes_read;
3199 }
3200
3201 if (data) {
3202 data->if_num = if_num;
3203 }
3204
3205end:
3206 return bytes_read;
3207}
3208
3209/*
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -07003210 * nss_stats_tx_rx_virt_if_read()
3211 * Read tx_rx_virt_if statistics
3212 */
3213static ssize_t nss_stats_tx_rx_virt_if_read(struct file *fp, char __user *ubuf,
3214 size_t sz, loff_t *ppos)
3215{
3216 struct nss_stats_data *data = fp->private_data;
3217 int32_t if_num = NSS_DYNAMIC_IF_START;
3218 int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES;
3219 size_t bytes = 0;
3220 ssize_t bytes_read = 0;
3221 char line[80];
3222 int start, end;
3223
3224 if (data) {
3225 if_num = data->if_num;
3226 }
3227
3228 if (if_num > max_if_num) {
3229 return 0;
3230 }
3231
3232 for (; if_num < max_if_num; if_num++) {
3233 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_VIRTIF_DEPRECATED)
3234 continue;
3235
3236 bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num);
3237 if ((bytes_read + bytes) > sz)
3238 break;
3239
3240 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3241 bytes_read = -EFAULT;
3242 goto end;
3243 }
3244
3245 bytes_read += bytes;
3246
3247 start = 0;
3248 end = 7;
3249 while (bytes_read < sz && start < end) {
3250 bytes = nss_tx_rx_virt_if_copy_stats(if_num, start, line);
3251 if (!bytes)
3252 break;
3253
3254 if ((bytes_read + bytes) > sz)
3255 break;
3256
3257 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3258 bytes_read = -EFAULT;
3259 goto end;
3260 }
3261
3262 bytes_read += bytes;
3263 start++;
3264 }
3265
3266 bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num);
3267 if (bytes_read > (sz - bytes))
3268 break;
3269
3270 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3271 bytes_read = -EFAULT;
3272 goto end;
3273 }
3274
3275 bytes_read += bytes;
3276 }
3277
3278 if (bytes_read > 0) {
3279 *ppos = bytes_read;
3280 }
3281
3282 if (data) {
3283 data->if_num = if_num;
3284 }
3285
3286end:
3287 return bytes_read;
3288}
3289
3290/*
Stephen Wangec5a85c2016-09-08 23:32:27 -07003291 * nss_stats_trustsec_tx_read()
3292 * Read trustsec_tx stats
3293 */
3294static ssize_t nss_stats_trustsec_tx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
3295{
3296 int32_t i;
3297
3298 /*
3299 * max output lines = #stats + start tag line + end tag line + three blank lines
3300 */
3301 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_TRUSTSEC_TX_MAX + 3) + 5;
3302 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
3303 size_t size_wr = 0;
3304 ssize_t bytes_read = 0;
3305 uint64_t *stats_shadow;
3306
3307 char *lbuf = kzalloc(size_al, GFP_KERNEL);
3308 if (unlikely(lbuf == NULL)) {
3309 nss_warning("Could not allocate memory for local statistics buffer");
3310 return 0;
3311 }
3312
3313 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
3314 if (unlikely(stats_shadow == NULL)) {
3315 nss_warning("Could not allocate memory for local shadow buffer");
3316 kfree(lbuf);
3317 return 0;
3318 }
3319
3320 size_wr = scnprintf(lbuf, size_al, "trustsec_tx stats start:\n\n");
3321
3322 /*
3323 * Common node stats
3324 */
3325 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
3326 spin_lock_bh(&nss_top_main.stats_lock);
3327 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
3328 stats_shadow[i] = nss_top_main.stats_node[NSS_TRUSTSEC_TX_INTERFACE][i];
3329 }
3330
3331 spin_unlock_bh(&nss_top_main.stats_lock);
3332
3333 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
3334 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
3335 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
3336 }
3337
3338 /*
3339 * TrustSec TX node stats
3340 */
3341 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ntrustsec tx node stats:\n\n");
3342
3343 spin_lock_bh(&nss_top_main.stats_lock);
3344 for (i = 0; (i < NSS_STATS_TRUSTSEC_TX_MAX); i++) {
3345 stats_shadow[i] = nss_top_main.stats_trustsec_tx[i];
3346 }
3347
3348 spin_unlock_bh(&nss_top_main.stats_lock);
3349
3350 for (i = 0; (i < NSS_STATS_TRUSTSEC_TX_MAX); i++) {
3351 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
3352 "%s = %llu\n", nss_stats_str_trustsec_tx[i], stats_shadow[i]);
3353 }
3354
3355 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ntrustsec tx stats end\n\n");
3356 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
3357 kfree(lbuf);
3358 kfree(stats_shadow);
3359
3360 return bytes_read;
3361}
3362
3363/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003364 * nss_stats_open()
3365 */
3366static int nss_stats_open(struct inode *inode, struct file *filp)
3367{
3368 struct nss_stats_data *data = NULL;
3369
3370 data = kzalloc(sizeof(struct nss_stats_data), GFP_KERNEL);
3371 if (!data) {
3372 return -ENOMEM;
3373 }
3374 memset(data, 0, sizeof (struct nss_stats_data));
3375 data->if_num = NSS_DYNAMIC_IF_START;
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303376 data->index = 0;
Stephen Wangaed46332016-12-12 17:29:03 -08003377 data->edma_id = (nss_ptr_t)inode->i_private;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003378 filp->private_data = data;
3379
3380 return 0;
3381}
3382
3383/*
3384 * nss_stats_release()
3385 */
3386static int nss_stats_release(struct inode *inode, struct file *filp)
3387{
3388 struct nss_stats_data *data = filp->private_data;
3389
3390 if (data) {
3391 kfree(data);
3392 }
3393
3394 return 0;
3395}
3396
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303397#define NSS_STATS_DECLARE_FILE_OPERATIONS(name) \
3398static const struct file_operations nss_stats_##name##_ops = { \
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003399 .open = nss_stats_open, \
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303400 .read = nss_stats_##name##_read, \
3401 .llseek = generic_file_llseek, \
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003402 .release = nss_stats_release, \
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303403};
3404
3405/*
3406 * nss_ipv4_stats_ops
3407 */
3408NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4)
3409
3410/*
Selin Dag6d9b0c12014-11-04 18:27:21 -08003411 * ipv4_reasm_stats_ops
3412 */
3413NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4_reasm)
3414
3415/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303416 * ipv6_stats_ops
3417 */
3418NSS_STATS_DECLARE_FILE_OPERATIONS(ipv6)
3419
3420/*
Selin Dag60a2f5b2015-06-29 14:39:49 -07003421 * ipv6_reasm_stats_ops
3422 */
3423NSS_STATS_DECLARE_FILE_OPERATIONS(ipv6_reasm)
3424
3425/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303426 * n2h_stats_ops
3427 */
3428NSS_STATS_DECLARE_FILE_OPERATIONS(n2h)
Thomas Wuc3e382c2014-10-29 15:35:13 -07003429
3430/*
3431 * lso_rx_stats_ops
3432 */
3433NSS_STATS_DECLARE_FILE_OPERATIONS(lso_rx)
3434
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303435/*
3436 * drv_stats_ops
3437 */
3438NSS_STATS_DECLARE_FILE_OPERATIONS(drv)
3439
3440/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303441 * pppoe_stats_ops
3442 */
3443NSS_STATS_DECLARE_FILE_OPERATIONS(pppoe)
3444
3445/*
ratheesh kannoth7af985d2015-06-24 15:08:40 +05303446 * l2tpv2_stats_ops
3447 */
3448NSS_STATS_DECLARE_FILE_OPERATIONS(l2tpv2)
3449
3450/*
ratheesh kannotha1245c32015-11-04 16:45:43 +05303451 * map_t_stats_ops
3452 */
3453NSS_STATS_DECLARE_FILE_OPERATIONS(map_t)
3454
3455/*
ratheesh kannotheb2a0a82017-05-04 09:20:17 +05303456 * gre_stats_ops
3457 */
3458NSS_STATS_DECLARE_FILE_OPERATIONS(gre)
3459
3460/*
Amit Gupta316729b2016-08-12 12:21:15 +05303461 * ppe_stats_ops
3462 */
3463NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_conn)
3464NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_l3)
3465NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_code)
3466
3467/*
Shyam Sunder66e889d2015-11-02 15:31:20 +05303468 * pptp_stats_ops
3469 */
3470NSS_STATS_DECLARE_FILE_OPERATIONS(pptp)
3471
3472/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303473 * gmac_stats_ops
3474 */
3475NSS_STATS_DECLARE_FILE_OPERATIONS(gmac)
3476
3477/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003478 * capwap_stats_ops
3479 */
3480NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_encap)
3481NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_decap)
3482
3483/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05303484 * eth_rx_stats_ops
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303485 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05303486NSS_STATS_DECLARE_FILE_OPERATIONS(eth_rx)
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303487
3488/*
Shashank Balashankar512cb602016-08-01 17:57:42 -07003489 * edma_port_stats_ops
3490 */
3491NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_stats)
3492
3493/*
3494 * edma_port_type_ops
3495 */
3496NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_type)
3497
3498/*
3499 * edma_port_ring_map_ops
3500 */
3501NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_ring_map)
3502
3503/*
3504 * edma_txring_stats_ops
3505 */
3506NSS_STATS_DECLARE_FILE_OPERATIONS(edma_txring)
3507
3508/*
3509 * edma_rxring_stats_ops
3510 */
3511NSS_STATS_DECLARE_FILE_OPERATIONS(edma_rxring)
3512
3513/*
3514 * edma_txcmplring_stats_ops
3515 */
3516NSS_STATS_DECLARE_FILE_OPERATIONS(edma_txcmplring)
3517
3518/*
3519 * edma_rxfillring_stats_ops
3520 */
3521NSS_STATS_DECLARE_FILE_OPERATIONS(edma_rxfillring)
3522
3523/*
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303524 * gre_redir_ops
3525 */
3526NSS_STATS_DECLARE_FILE_OPERATIONS(gre_redir)
3527
3528/*
Ankit Dhanuka14999992014-11-12 15:35:11 +05303529 * sjack_stats_ops
3530 */
3531NSS_STATS_DECLARE_FILE_OPERATIONS(sjack)
3532
Stephen Wang9779d952015-10-28 11:39:07 -07003533/*
3534 * portid_ops
3535 */
3536NSS_STATS_DECLARE_FILE_OPERATIONS(portid)
3537
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -08003538NSS_STATS_DECLARE_FILE_OPERATIONS(wifi_if)
3539
Sundarajan Srinivasanab2c8562015-06-09 16:14:10 -07003540NSS_STATS_DECLARE_FILE_OPERATIONS(virt_if)
3541
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -07003542NSS_STATS_DECLARE_FILE_OPERATIONS(tx_rx_virt_if)
3543
Ankit Dhanuka14999992014-11-12 15:35:11 +05303544/*
Bharath M Kumarcc666e92014-12-24 19:17:28 +05303545 * wifi_stats_ops
3546 */
3547NSS_STATS_DECLARE_FILE_OPERATIONS(wifi)
3548
3549/*
Tushar Mathurff8741b2015-12-02 20:28:59 +05303550 * dtls_stats_ops
3551 */
3552NSS_STATS_DECLARE_FILE_OPERATIONS(dtls)
3553
3554/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07003555 * gre_tunnel_stats_ops
3556 */
3557NSS_STATS_DECLARE_FILE_OPERATIONS(gre_tunnel)
3558
3559/*
Stephen Wangec5a85c2016-09-08 23:32:27 -07003560 * trustsec_tx_stats_ops
3561 */
3562NSS_STATS_DECLARE_FILE_OPERATIONS(trustsec_tx)
3563
3564/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303565 * nss_stats_init()
3566 * Enable NSS statistics
3567 */
3568void nss_stats_init(void)
3569{
Shashank Balashankar512cb602016-08-01 17:57:42 -07003570 int i = 0;
3571 struct dentry *edma_d = NULL;
3572 struct dentry *edma_port_dir_d = NULL;
3573 struct dentry *edma_port_d = NULL;
3574 struct dentry *edma_port_type_d = NULL;
3575 struct dentry *edma_port_stats_d = NULL;
3576 struct dentry *edma_port_ring_map_d = NULL;
3577
3578 struct dentry *edma_rings_dir_d = NULL;
3579 struct dentry *edma_tx_dir_d = NULL;
3580 struct dentry *edma_tx_d = NULL;
3581 struct dentry *edma_rx_dir_d = NULL;
3582 struct dentry *edma_rx_d = NULL;
3583 struct dentry *edma_txcmpl_dir_d = NULL;
3584 struct dentry *edma_txcmpl_d = NULL;
3585 struct dentry *edma_rxfill_dir_d = NULL;
3586 struct dentry *edma_rxfill_d = NULL;
3587
3588 char file_name[10];
3589
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303590 /*
3591 * NSS driver entry
3592 */
3593 nss_top_main.top_dentry = debugfs_create_dir("qca-nss-drv", NULL);
3594 if (unlikely(nss_top_main.top_dentry == NULL)) {
3595 nss_warning("Failed to create qca-nss-drv directory in debugfs");
3596
3597 /*
3598 * Non availability of debugfs directory is not a catastrophy
3599 * We can still go ahead with other initialization
3600 */
3601 return;
3602 }
3603
3604 nss_top_main.stats_dentry = debugfs_create_dir("stats", nss_top_main.top_dentry);
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303605 if (unlikely(nss_top_main.stats_dentry == NULL)) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303606 nss_warning("Failed to create qca-nss-drv directory in debugfs");
3607
3608 /*
3609 * Non availability of debugfs directory is not a catastrophy
3610 * We can still go ahead with rest of initialization
3611 */
3612 return;
3613 }
3614
3615 /*
3616 * Create files to obtain statistics
3617 */
3618
3619 /*
3620 * ipv4_stats
3621 */
3622 nss_top_main.ipv4_dentry = debugfs_create_file("ipv4", 0400,
3623 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv4_ops);
3624 if (unlikely(nss_top_main.ipv4_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303625 nss_warning("Failed to create qca-nss-drv/stats/ipv4 file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303626 return;
3627 }
3628
3629 /*
Selin Dag6d9b0c12014-11-04 18:27:21 -08003630 * ipv4_reasm_stats
3631 */
3632 nss_top_main.ipv4_reasm_dentry = debugfs_create_file("ipv4_reasm", 0400,
3633 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv4_reasm_ops);
3634 if (unlikely(nss_top_main.ipv4_reasm_dentry == NULL)) {
3635 nss_warning("Failed to create qca-nss-drv/stats/ipv4_reasm file in debugfs");
3636 return;
3637 }
3638
3639 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303640 * ipv6_stats
3641 */
3642 nss_top_main.ipv6_dentry = debugfs_create_file("ipv6", 0400,
3643 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv6_ops);
3644 if (unlikely(nss_top_main.ipv6_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303645 nss_warning("Failed to create qca-nss-drv/stats/ipv6 file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303646 return;
3647 }
3648
3649 /*
Selin Dag60a2f5b2015-06-29 14:39:49 -07003650 * ipv6_reasm_stats
3651 */
3652 nss_top_main.ipv6_reasm_dentry = debugfs_create_file("ipv6_reasm", 0400,
3653 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv6_reasm_ops);
3654 if (unlikely(nss_top_main.ipv6_reasm_dentry == NULL)) {
3655 nss_warning("Failed to create qca-nss-drv/stats/ipv6_reasm file in debugfs");
3656 return;
3657 }
3658
3659 /*
3660 * eth_rx__stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303661 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05303662 nss_top_main.eth_rx_dentry = debugfs_create_file("eth_rx", 0400,
3663 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_eth_rx_ops);
3664 if (unlikely(nss_top_main.eth_rx_dentry == NULL)) {
3665 nss_warning("Failed to create qca-nss-drv/stats/eth_rx file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303666 return;
3667 }
3668
3669 /*
Shashank Balashankar512cb602016-08-01 17:57:42 -07003670 * edma stats
3671 */
3672 edma_d = debugfs_create_dir("edma", nss_top_main.stats_dentry);
3673 if (unlikely(edma_d == NULL)) {
3674 nss_warning("Failed to create qca-nss-drv/stats/edma directory in debugfs");
3675 return;
3676 }
3677
3678 /*
3679 * edma port stats
3680 */
3681 edma_port_dir_d = debugfs_create_dir("ports", edma_d);
3682 if (unlikely(edma_port_dir_d == NULL)) {
3683 nss_warning("Failed to create qca-nss-drv/stats/edma/ports directory in debugfs");
3684 return;
3685 }
3686
3687 for (i = 0; i < NSS_EDMA_NUM_PORTS_MAX; i++) {
3688 memset(file_name, 0, sizeof(file_name));
3689 snprintf(file_name, sizeof(file_name), "%d", i);
3690 edma_port_d = NULL;
3691 edma_port_stats_d = NULL;
3692 edma_port_type_d = NULL;
3693 edma_port_ring_map_d = NULL;
3694
3695 edma_port_d = debugfs_create_dir(file_name, edma_port_dir_d);
3696 if (unlikely(edma_port_d == NULL)) {
3697 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d dir in debugfs", i);
3698 return;
3699 }
3700
Stephen Wangaed46332016-12-12 17:29:03 -08003701 edma_port_stats_d = debugfs_create_file("stats", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_stats_edma_port_stats_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003702 if (unlikely(edma_port_stats_d == NULL)) {
3703 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/stats file in debugfs", i);
3704 return;
3705 }
3706
Stephen Wangaed46332016-12-12 17:29:03 -08003707 edma_port_type_d = debugfs_create_file("type", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_stats_edma_port_type_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003708 if (unlikely(edma_port_type_d == NULL)) {
3709 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/type file in debugfs", i);
3710 return;
3711 }
3712
Stephen Wangaed46332016-12-12 17:29:03 -08003713 edma_port_ring_map_d = debugfs_create_file("ring_map", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_stats_edma_port_ring_map_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003714 if (unlikely(edma_port_ring_map_d == NULL)) {
3715 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/ring_map file in debugfs", i);
3716 return;
3717 }
3718 }
3719
3720 /*
3721 * edma ring stats
3722 */
3723 edma_rings_dir_d = debugfs_create_dir("rings", edma_d);
3724 if (unlikely(edma_rings_dir_d == NULL)) {
3725 nss_warning("Failed to create qca-nss-drv/stats/edma/rings directory in debugfs");
3726 return;
3727 }
3728
3729 /*
3730 * edma tx ring stats
3731 */
3732 edma_tx_dir_d = debugfs_create_dir("tx", edma_rings_dir_d);
3733 if (unlikely(edma_tx_dir_d == NULL)) {
3734 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/tx directory in debugfs");
3735 return;
3736 }
3737
3738 for (i = 0; i < NSS_EDMA_NUM_TX_RING_MAX; i++) {
3739 memset(file_name, 0, sizeof(file_name));
3740 scnprintf(file_name, sizeof(file_name), "%d", i);
3741 edma_tx_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08003742 edma_tx_d = debugfs_create_file(file_name, 0400, edma_tx_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_txring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003743 if (unlikely(edma_tx_d == NULL)) {
3744 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/tx/%d file in debugfs", i);
3745 return;
3746 }
3747 }
3748
3749 /*
3750 * edma rx ring stats
3751 */
3752 edma_rx_dir_d = debugfs_create_dir("rx", edma_rings_dir_d);
3753 if (unlikely(edma_rx_dir_d == NULL)) {
3754 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rx directory in debugfs");
3755 return;
3756 }
3757
3758 for (i = 0; i < NSS_EDMA_NUM_RX_RING_MAX; i++) {
3759 memset(file_name, 0, sizeof(file_name));
3760 scnprintf(file_name, sizeof(file_name), "%d", i);
3761 edma_rx_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08003762 edma_rx_d = debugfs_create_file(file_name, 0400, edma_rx_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_rxring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003763 if (unlikely(edma_rx_d == NULL)) {
3764 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rx/%d file in debugfs", i);
3765 return;
3766 }
3767 }
3768
3769 /*
3770 * edma tx cmpl ring stats
3771 */
3772 edma_txcmpl_dir_d = debugfs_create_dir("txcmpl", edma_rings_dir_d);
3773 if (unlikely(edma_txcmpl_dir_d == NULL)) {
3774 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/txcmpl directory in debugfs");
3775 return;
3776 }
3777
3778 for (i = 0; i < NSS_EDMA_NUM_TXCMPL_RING_MAX; i++) {
3779 memset(file_name, 0, sizeof(file_name));
3780 scnprintf(file_name, sizeof(file_name), "%d", i);
3781 edma_txcmpl_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08003782 edma_txcmpl_d = debugfs_create_file(file_name, 0400, edma_txcmpl_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_txcmplring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003783 if (unlikely(edma_txcmpl_d == NULL)) {
3784 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/txcmpl/%d file in debugfs", i);
3785 return;
3786 }
3787 }
3788
3789 /*
3790 * edma rx fill ring stats
3791 */
3792 edma_rxfill_dir_d = debugfs_create_dir("rxfill", edma_rings_dir_d);
3793 if (unlikely(edma_rxfill_dir_d == NULL)) {
3794 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rxfill directory in debugfs");
3795 return;
3796 }
3797
3798 for (i = 0; i < NSS_EDMA_NUM_RXFILL_RING_MAX; i++) {
3799 memset(file_name, 0, sizeof(file_name));
3800 scnprintf(file_name, sizeof(file_name), "%d", i);
3801 edma_rxfill_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08003802 edma_rxfill_d = debugfs_create_file(file_name, 0400, edma_rxfill_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_rxfillring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003803 if (unlikely(edma_rxfill_d == NULL)) {
3804 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rxfill/%d file in debugfs", i);
3805 return;
3806 }
3807 }
3808
3809 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303810 * n2h_stats
3811 */
3812 nss_top_main.n2h_dentry = debugfs_create_file("n2h", 0400,
3813 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_n2h_ops);
3814 if (unlikely(nss_top_main.n2h_dentry == NULL)) {
3815 nss_warning("Failed to create qca-nss-drv/stats/n2h directory in debugfs");
3816 return;
3817 }
3818
3819 /*
Thomas Wuc3e382c2014-10-29 15:35:13 -07003820 * lso_rx_stats
3821 */
3822 nss_top_main.lso_rx_dentry = debugfs_create_file("lso_rx", 0400,
3823 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_lso_rx_ops);
3824 if (unlikely(nss_top_main.lso_rx_dentry == NULL)) {
3825 nss_warning("Failed to create qca-nss-drv/stats/lso_rx file in debugfs");
3826 return;
3827 }
3828
3829 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303830 * drv_stats
3831 */
3832 nss_top_main.drv_dentry = debugfs_create_file("drv", 0400,
3833 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_drv_ops);
3834 if (unlikely(nss_top_main.drv_dentry == NULL)) {
3835 nss_warning("Failed to create qca-nss-drv/stats/drv directory in debugfs");
3836 return;
3837 }
3838
3839 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303840 * pppoe_stats
3841 */
3842 nss_top_main.pppoe_dentry = debugfs_create_file("pppoe", 0400,
3843 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_pppoe_ops);
3844 if (unlikely(nss_top_main.pppoe_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303845 nss_warning("Failed to create qca-nss-drv/stats/pppoe file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303846 return;
3847 }
3848
3849 /*
3850 * gmac_stats
3851 */
3852 nss_top_main.gmac_dentry = debugfs_create_file("gmac", 0400,
3853 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gmac_ops);
3854 if (unlikely(nss_top_main.gmac_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303855 nss_warning("Failed to create qca-nss-drv/stats/gmac file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303856 return;
3857 }
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003858
3859 /*
3860 * CAPWAP stats.
3861 */
3862 nss_top_main.capwap_encap_dentry = debugfs_create_file("capwap_encap", 0400,
3863 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_capwap_encap_ops);
3864 if (unlikely(nss_top_main.capwap_encap_dentry == NULL)) {
3865 nss_warning("Failed to create qca-nss-drv/stats/capwap_encap file in debugfs");
3866 return;
3867 }
3868
3869 nss_top_main.capwap_decap_dentry = debugfs_create_file("capwap_decap", 0400,
3870 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_capwap_decap_ops);
3871 if (unlikely(nss_top_main.capwap_decap_dentry == NULL)) {
3872 nss_warning("Failed to create qca-nss-drv/stats/capwap_decap file in debugfs");
3873 return;
3874 }
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303875
3876 /*
3877 * GRE_REDIR stats
3878 */
3879 nss_top_main.gre_redir_dentry = debugfs_create_file("gre_redir", 0400,
Ankit Dhanuka14999992014-11-12 15:35:11 +05303880 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gre_redir_ops);
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303881 if (unlikely(nss_top_main.gre_redir_dentry == NULL)) {
3882 nss_warning("Failed to create qca-nss-drv/stats/gre_redir file in debugfs");
3883 return;
3884 }
Ankit Dhanuka14999992014-11-12 15:35:11 +05303885
3886 /*
3887 * SJACK stats
3888 */
3889 nss_top_main.sjack_dentry = debugfs_create_file("sjack", 0400,
3890 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_sjack_ops);
3891 if (unlikely(nss_top_main.sjack_dentry == NULL)) {
3892 nss_warning("Failed to create qca-nss-drv/stats/sjack file in debugfs");
3893 return;
3894 }
Saurabh Misra96998db2014-07-10 12:15:48 -07003895
Bharath M Kumarcc666e92014-12-24 19:17:28 +05303896 /*
Stephen Wang9779d952015-10-28 11:39:07 -07003897 * PORTID stats
3898 */
3899 nss_top_main.portid_dentry = debugfs_create_file("portid", 0400,
3900 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_portid_ops);
3901 if (unlikely(nss_top_main.portid_dentry == NULL)) {
3902 nss_warning("Failed to create qca-nss-drv/stats/portid file in debugfs");
3903 return;
3904 }
3905
3906 /*
Bharath M Kumarcc666e92014-12-24 19:17:28 +05303907 * WIFI stats
3908 */
3909 nss_top_main.wifi_dentry = debugfs_create_file("wifi", 0400,
3910 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_wifi_ops);
3911 if (unlikely(nss_top_main.wifi_dentry == NULL)) {
3912 nss_warning("Failed to create qca-nss-drv/stats/wifi file in debugfs");
3913 return;
3914 }
3915
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -08003916 /*
3917 * wifi_if stats
3918 */
3919 nss_top_main.wifi_if_dentry = debugfs_create_file("wifi_if", 0400,
3920 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_wifi_if_ops);
3921 if (unlikely(nss_top_main.wifi_if_dentry == NULL)) {
3922 nss_warning("Failed to create qca-nss-drv/stats/wifi_if file in debugfs");
3923 return;
3924 }
3925
Sundarajan Srinivasanab2c8562015-06-09 16:14:10 -07003926 nss_top_main.virt_if_dentry = debugfs_create_file("virt_if", 0400,
3927 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_virt_if_ops);
3928 if (unlikely(nss_top_main.virt_if_dentry == NULL)) {
3929 nss_warning("Failed to create qca-nss-drv/stats/virt_if file in debugfs");
3930 return;
3931 }
3932
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -07003933 nss_top_main.tx_rx_virt_if_dentry = debugfs_create_file("tx_rx_virt_if", 0400,
3934 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_tx_rx_virt_if_ops);
3935 if (unlikely(nss_top_main.virt_if_dentry == NULL)) {
3936 nss_warning("Failed to create qca-nss-drv/stats/tx_rx_virt_if file in debugfs");
3937 return;
3938 }
3939
ratheesh kannoth7af985d2015-06-24 15:08:40 +05303940 /*
3941 * L2TPV2 Stats
3942 */
3943 nss_top_main.l2tpv2_dentry = debugfs_create_file("l2tpv2", 0400,
3944 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_l2tpv2_ops);
3945 if (unlikely(nss_top_main.l2tpv2_dentry == NULL)) {
3946 nss_warning("Failed to create qca-nss-drv/stats/l2tpv2 file in debugfs");
3947 return;
3948 }
Shyam Sunder66e889d2015-11-02 15:31:20 +05303949
3950 /*
ratheesh kannotha1245c32015-11-04 16:45:43 +05303951 * Map-t Stats
3952 */
3953 nss_top_main.map_t_dentry = debugfs_create_file("map_t", 0400,
3954 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_map_t_ops);
3955 if (unlikely(nss_top_main.map_t_dentry == NULL)) {
3956 nss_warning("Failed to create qca-nss-drv/stats/map_t file in debugfs");
3957 return;
3958 }
3959
3960 /*
ratheesh kannotheb2a0a82017-05-04 09:20:17 +05303961 * GRE statistics
3962 */
3963 nss_top_main.gre_dentry = debugfs_create_file("gre", 0400,
3964 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gre_ops);
3965 if (unlikely(nss_top_main.gre_dentry == NULL)) {
3966 nss_warning("Failed to create qca-nss-drv/stats/gre file in debugfs");
3967 return;
3968 }
3969
3970 /*
Amit Gupta316729b2016-08-12 12:21:15 +05303971 * PPE Stats
3972 */
3973 nss_top_main.ppe_dentry = debugfs_create_dir("ppe", nss_top_main.stats_dentry);
3974 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
3975 nss_warning("Failed to create qca-nss-drv directory in debugfs");
3976 return;
3977 }
3978
3979 nss_top_main.ppe_conn_dentry = debugfs_create_file("connection", 0400,
3980 nss_top_main.ppe_dentry, &nss_top_main, &nss_stats_ppe_conn_ops);
3981 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
3982 nss_warning("Failed to create qca-nss-drv/stats/ppe/connection file in debugfs");
3983 }
3984
3985 nss_top_main.ppe_l3_dentry = debugfs_create_file("l3", 0400,
3986 nss_top_main.ppe_dentry, &nss_top_main, &nss_stats_ppe_l3_ops);
3987 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
3988 nss_warning("Failed to create qca-nss-drv/stats/ppe/l3 file in debugfs");
3989 }
3990
3991 nss_top_main.ppe_l3_dentry = debugfs_create_file("ppe_code", 0400,
3992 nss_top_main.ppe_dentry, &nss_top_main, &nss_stats_ppe_code_ops);
3993 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
3994 nss_warning("Failed to create qca-nss-drv/stats/ppe/ppe_code file in debugfs");
3995 }
3996
3997 /*
Shyam Sunder66e889d2015-11-02 15:31:20 +05303998 * PPTP Stats
3999 */
4000 nss_top_main.pptp_dentry = debugfs_create_file("pptp", 0400,
4001 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_pptp_ops);
4002 if (unlikely(nss_top_main.pptp_dentry == NULL)) {
4003 nss_warning("Failed to create qca-nss-drv/stats/pptp file in debugfs");
Tushar Mathurff8741b2015-12-02 20:28:59 +05304004 }
4005
4006 /*
4007 * DTLS Stats
4008 */
4009 nss_top_main.dtls_dentry = debugfs_create_file("dtls", 0400,
4010 nss_top_main.stats_dentry,
4011 &nss_top_main,
4012 &nss_stats_dtls_ops);
4013 if (unlikely(nss_top_main.dtls_dentry == NULL)) {
4014 nss_warning("Failed to create qca-nss-drv/stats/dtls file in debugfs");
Shyam Sunder66e889d2015-11-02 15:31:20 +05304015 return;
4016 }
4017
Thomas Wu71c5ecc2016-06-21 11:15:52 -07004018 /*
4019 * GRE Tunnel Stats
4020 */
4021 nss_top_main.gre_tunnel_dentry = debugfs_create_file("gre_tunnel", 0400,
4022 nss_top_main.stats_dentry,
4023 &nss_top_main,
4024 &nss_stats_gre_tunnel_ops);
4025 if (unlikely(nss_top_main.gre_tunnel_dentry == NULL)) {
4026 nss_warning("Failed to create qca-nss-drv/stats/gre_tunnel file in debugfs");
4027 return;
4028 }
4029
Stephen Wangec5a85c2016-09-08 23:32:27 -07004030 /*
4031 * TrustSec TX Stats
4032 */
4033 nss_top_main.trustsec_tx_dentry = debugfs_create_file("trustsec_tx", 0400,
4034 nss_top_main.stats_dentry,
4035 &nss_top_main,
4036 &nss_stats_trustsec_tx_ops);
4037 if (unlikely(nss_top_main.trustsec_tx_dentry == NULL)) {
4038 nss_warning("Failed to create qca-nss-drv/stats/trustsec_tx file in debugfs");
4039 return;
4040 }
4041
Saurabh Misra96998db2014-07-10 12:15:48 -07004042 nss_log_init();
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304043}
4044
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304045/*
4046 * nss_stats_clean()
4047 * Cleanup NSS statistics files
4048 */
4049void nss_stats_clean(void)
4050{
4051 /*
4052 * Remove debugfs tree
4053 */
4054 if (likely(nss_top_main.top_dentry != NULL)) {
4055 debugfs_remove_recursive(nss_top_main.top_dentry);
Stephen Wangdc8b5322015-06-27 20:11:50 -07004056 nss_top_main.top_dentry = NULL;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05304057 }
4058}