blob: 59c1ff24388e86dc3073224ed9a2d85b6ddcb0c9 [file] [log] [blame]
Radhakrishna Jiguru1c9b2252013-08-27 23:57:48 +05301/*
2 **************************************************************************
Stephen Wangaed46332016-12-12 17:29:03 -08003 * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Radhakrishna Jiguru1c9b2252013-08-27 23:57:48 +05304 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053016
17/*
18 * nss_stats.c
19 * NSS stats APIs
20 *
21 */
22
23#include "nss_core.h"
Tushar Mathurff8741b2015-12-02 20:28:59 +053024#include "nss_dtls_stats.h"
Thomas Wu71c5ecc2016-06-21 11:15:52 -070025#include "nss_gre_tunnel_stats.h"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053026
27/*
28 * Maximum string length:
29 * This should be equal to maximum string size of any stats
30 * inclusive of stats value
31 */
32#define NSS_STATS_MAX_STR_LENGTH 96
33
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -070034extern int32_t nss_tx_rx_virt_if_copy_stats(int32_t if_num, int i, char *line);
35
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +053036uint64_t stats_shadow_pppoe_except[NSS_PPPOE_NUM_SESSION_PER_INTERFACE][NSS_PPPOE_EXCEPTION_EVENT_MAX];
Abhishek Rastogi84d95d02014-03-26 19:31:31 +053037
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053038/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -070039 * Private data for every file descriptor
40 */
41struct nss_stats_data {
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -080042 uint32_t if_num; /**< Interface number for stats */
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +053043 uint32_t index; /**< Index for GRE_REDIR stats */
Shashank Balashankar512cb602016-08-01 17:57:42 -070044 uint32_t edma_id; /**< EDMA port ID or ring ID */
Saurabh Misra09dddeb2014-09-30 16:38:07 -070045};
46
47/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053048 * Statistics structures
49 */
50
51/*
52 * nss_stats_str_ipv4
53 * IPv4 stats strings
54 */
55static int8_t *nss_stats_str_ipv4[NSS_STATS_IPV4_MAX] = {
56 "rx_pkts",
57 "rx_bytes",
58 "tx_pkts",
59 "tx_bytes",
60 "create_requests",
61 "create_collisions",
62 "create_invalid_interface",
63 "destroy_requests",
64 "destroy_misses",
65 "hash_hits",
66 "hash_reorders",
67 "flushes",
Selin Dag60ea2b22014-11-05 09:36:22 -080068 "evictions",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +053069 "fragmentations",
70 "mc_create_requests",
71 "mc_update_requests",
72 "mc_create_invalid_interface",
73 "mc_destroy_requests",
74 "mc_destroy_misses",
75 "mc_flushes",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053076};
77
78/*
Selin Dag6d9b0c12014-11-04 18:27:21 -080079 * nss_stats_str_ipv4_reasm
80 * IPv4 reassembly stats strings
81 */
82static int8_t *nss_stats_str_ipv4_reasm[NSS_STATS_IPV4_REASM_MAX] = {
83 "evictions",
84 "alloc_fails",
85 "timeouts",
86};
87
88/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +053089 * nss_stats_str_ipv6
90 * IPv6 stats strings
91 */
92static int8_t *nss_stats_str_ipv6[NSS_STATS_IPV6_MAX] = {
93 "rx_pkts",
94 "rx_bytes",
95 "tx_pkts",
96 "tx_bytes",
97 "create_requests",
98 "create_collisions",
99 "create_invalid_interface",
100 "destroy_requests",
101 "destroy_misses",
102 "hash_hits",
103 "hash_reorders",
104 "flushes",
105 "evictions",
Selin Dag5d68caa2015-05-12 13:23:33 -0700106 "fragmentations",
107 "frag_fails",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +0530108 "mc_create_requests",
109 "mc_update_requests",
110 "mc_create_invalid_interface",
111 "mc_destroy_requests",
112 "mc_destroy_misses",
113 "mc_flushes",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530114};
115
116/*
Selin Dag60a2f5b2015-06-29 14:39:49 -0700117 * nss_stats_str_ipv6_reasm
118 * IPv6 reassembly stats strings
119 */
120static int8_t *nss_stats_str_ipv6_reasm[NSS_STATS_IPV6_REASM_MAX] = {
121 "alloc_fails",
122 "timeouts",
123 "discards",
124};
125
126/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530127 * nss_stats_str_n2h
128 * N2H stats strings
129 */
130static int8_t *nss_stats_str_n2h[NSS_STATS_N2H_MAX] = {
131 "queue_dropped",
132 "ticks",
133 "worst_ticks",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700134 "iterations",
Thomas Wu3fd8dd72014-06-11 15:57:05 -0700135 "pbuf_ocm_alloc_fails",
136 "pbuf_ocm_free_count",
137 "pbuf_ocm_total_count",
138 "pbuf_default_alloc_fails",
139 "pbuf_default_free_count",
140 "pbuf_default_total_count",
Sakthi Vignesh Radhakrishnan2a8ee962014-11-22 13:35:38 -0800141 "payload_fails",
Thomas Wu53679842015-01-22 13:37:35 -0800142 "payload_free_count",
Sakthi Vignesh Radhakrishnan2a8ee962014-11-22 13:35:38 -0800143 "h2n_control_packets",
144 "h2n_control_bytes",
145 "n2h_control_packets",
146 "n2h_control_bytes",
147 "h2n_data_packets",
148 "h2n_data_bytes",
149 "n2h_data_packets",
150 "n2h_data_bytes",
Saurabh Misra71034db2015-06-04 16:18:38 -0700151 "n2h_tot_payloads",
Guojun Jin85dfa7b2015-09-02 15:13:56 -0700152 "n2h_data_interface_invalid",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530153};
154
155/*
Thomas Wuc3e382c2014-10-29 15:35:13 -0700156 * nss_stats_str_lso_rx
157 * LSO_RX stats strings
158 */
159static int8_t *nss_stats_str_lso_rx[NSS_STATS_LSO_RX_MAX] = {
160 "tx_dropped",
161 "dropped",
162 "pbuf_alloc_fail",
163 "pbuf_reference_fail"
164};
165
166/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530167 * nss_stats_str_drv
168 * Host driver stats strings
169 */
170static int8_t *nss_stats_str_drv[NSS_STATS_DRV_MAX] = {
171 "nbuf_alloc_errors",
172 "tx_queue_full[0]",
173 "tx_queue_full[1]",
174 "tx_buffers_empty",
175 "tx_buffers_pkt",
176 "tx_buffers_cmd",
177 "tx_buffers_crypto",
Murat Sezginb6e1a012015-09-29 14:06:37 -0700178 "tx_buffers_reuse",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530179 "rx_buffers_empty",
180 "rx_buffers_pkt",
181 "rx_buffers_cmd_resp",
182 "rx_buffers_status_sync",
183 "rx_buffers_crypto",
Thomas Wu0acd8162014-12-07 15:43:39 -0800184 "rx_buffers_virtual",
185 "tx_skb_simple",
186 "tx_skb_nr_frags",
187 "tx_skb_fraglist",
188 "rx_skb_simple",
189 "rx_skb_nr_frags",
190 "rx_skb_fraglist",
Sundarajan Srinivasan6e0366b2015-01-20 12:10:42 -0800191 "rx_bad_desciptor",
Thomas Wu1fbf5212015-06-04 14:38:40 -0700192 "nss_skb_count",
193 "rx_chain_seg_processed",
194 "rx_frag_seg_processed"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530195};
196
197/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530198 * nss_stats_str_pppoe
199 * PPPoE stats strings
200 */
201static int8_t *nss_stats_str_pppoe[NSS_STATS_PPPOE_MAX] = {
202 "create_requests",
203 "create_failures",
204 "destroy_requests",
205 "destroy_misses"
206};
207
208/*
209 * nss_stats_str_gmac
210 * GMAC stats strings
211 */
212static int8_t *nss_stats_str_gmac[NSS_STATS_GMAC_MAX] = {
213 "ticks",
214 "worst_ticks",
215 "iterations"
216};
217
218/*
Shashank Balashankar512cb602016-08-01 17:57:42 -0700219 * nss_stats_str_edma_tx
220 */
221static int8_t *nss_stats_str_edma_tx[NSS_STATS_EDMA_TX_MAX] = {
222 "tx_err",
223 "tx_dropped",
224 "desc_cnt"
225};
226
227/*
228 * nss_stats_str_edma_rx
229 */
230static int8_t *nss_stats_str_edma_rx[NSS_STATS_EDMA_RX_MAX] = {
231 "rx_csum_err",
232 "desc_cnt"
233};
234
235/*
236 * nss_stats_str_edma_txcmpl
237 */
238static int8_t *nss_stats_str_edma_txcmpl[NSS_STATS_EDMA_TXCMPL_MAX] = {
239 "desc_cnt"
240};
241
242/*
243 * nss_stats_str_edma_rxfill
244 */
245static int8_t *nss_stats_str_edma_rxfill[NSS_STATS_EDMA_RXFILL_MAX] = {
246 "desc_cnt"
247};
248
249/*
250 * nss_stats_str_edma_port_type
251 */
252static int8_t *nss_stats_str_edma_port_type[NSS_EDMA_PORT_TYPE_MAX] = {
253 "physical_port",
254 "virtual_port"
255};
256
257/*
258 * nss_stats_str_edma_port_ring_map
259 */
260static int8_t *nss_stats_str_edma_port_ring_map[NSS_EDMA_PORT_RING_MAP_MAX] = {
261 "rx_ring",
262 "tx_ring"
263};
264
265/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530266 * nss_stats_str_node
267 * Interface stats strings per node
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530268 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530269static int8_t *nss_stats_str_node[NSS_STATS_NODE_MAX] = {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530270 "rx_packets",
271 "rx_bytes",
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530272 "rx_dropped",
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530273 "tx_packets",
274 "tx_bytes"
275};
276
277/*
Murat Sezgin99dab642014-08-28 14:40:34 -0700278 * nss_stats_str_eth_rx
279 * eth_rx stats strings
280 */
281static int8_t *nss_stats_str_eth_rx[NSS_STATS_ETH_RX_MAX] = {
282 "ticks",
283 "worst_ticks",
284 "iterations"
285};
286
287/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530288 * nss_stats_str_if_exception_unknown
289 * Interface stats strings for unknown exceptions
290 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530291static int8_t *nss_stats_str_if_exception_eth_rx[NSS_EXCEPTION_EVENT_ETH_RX_MAX] = {
Selin Dag2e8e48c2015-02-20 15:51:55 -0800292 "UNKNOWN_L3_PROTOCOL",
293 "ETH_HDR_MISSING",
Stephen Wangec5a85c2016-09-08 23:32:27 -0700294 "VLAN_MISSING",
295 "TRUSTSEC_HDR_MISSING"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530296};
297
298/*
299 * nss_stats_str_if_exception_ipv4
300 * Interface stats strings for ipv4 exceptions
301 */
302static int8_t *nss_stats_str_if_exception_ipv4[NSS_EXCEPTION_EVENT_IPV4_MAX] = {
303 "IPV4_ICMP_HEADER_INCOMPLETE",
304 "IPV4_ICMP_UNHANDLED_TYPE",
305 "IPV4_ICMP_IPV4_HEADER_INCOMPLETE",
306 "IPV4_ICMP_IPV4_UDP_HEADER_INCOMPLETE",
307 "IPV4_ICMP_IPV4_TCP_HEADER_INCOMPLETE",
308 "IPV4_ICMP_IPV4_UNKNOWN_PROTOCOL",
309 "IPV4_ICMP_NO_ICME",
310 "IPV4_ICMP_FLUSH_TO_HOST",
311 "IPV4_TCP_HEADER_INCOMPLETE",
312 "IPV4_TCP_NO_ICME",
313 "IPV4_TCP_IP_OPTION",
314 "IPV4_TCP_IP_FRAGMENT",
315 "IPV4_TCP_SMALL_TTL",
316 "IPV4_TCP_NEEDS_FRAGMENTATION",
317 "IPV4_TCP_FLAGS",
318 "IPV4_TCP_SEQ_EXCEEDS_RIGHT_EDGE",
319 "IPV4_TCP_SMALL_DATA_OFFS",
320 "IPV4_TCP_BAD_SACK",
321 "IPV4_TCP_BIG_DATA_OFFS",
322 "IPV4_TCP_SEQ_BEFORE_LEFT_EDGE",
323 "IPV4_TCP_ACK_EXCEEDS_RIGHT_EDGE",
324 "IPV4_TCP_ACK_BEFORE_LEFT_EDGE",
325 "IPV4_UDP_HEADER_INCOMPLETE",
326 "IPV4_UDP_NO_ICME",
327 "IPV4_UDP_IP_OPTION",
328 "IPV4_UDP_IP_FRAGMENT",
329 "IPV4_UDP_SMALL_TTL",
330 "IPV4_UDP_NEEDS_FRAGMENTATION",
331 "IPV4_WRONG_TARGET_MAC",
332 "IPV4_HEADER_INCOMPLETE",
333 "IPV4_BAD_TOTAL_LENGTH",
334 "IPV4_BAD_CHECKSUM",
335 "IPV4_NON_INITIAL_FRAGMENT",
336 "IPV4_DATAGRAM_INCOMPLETE",
337 "IPV4_OPTIONS_INCOMPLETE",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530338 "IPV4_UNKNOWN_PROTOCOL",
339 "IPV4_ESP_HEADER_INCOMPLETE",
340 "IPV4_ESP_NO_ICME",
341 "IPV4_ESP_IP_OPTION",
342 "IPV4_ESP_IP_FRAGMENT",
343 "IPV4_ESP_SMALL_TTL",
344 "IPV4_ESP_NEEDS_FRAGMENTATION",
345 "IPV4_INGRESS_VID_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700346 "IPV4_INGRESS_VID_MISSING",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530347 "IPV4_6RD_NO_ICME",
348 "IPV4_6RD_IP_OPTION",
349 "IPV4_6RD_IP_FRAGMENT",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700350 "IPV4_6RD_NEEDS_FRAGMENTATION",
351 "IPV4_DSCP_MARKING_MISMATCH",
Murat Sezgin7c5956a2014-05-12 09:59:51 -0700352 "IPV4_VLAN_MARKING_MISMATCH",
Murat Sezgin49d21d12016-02-03 17:36:47 -0800353 "IPV4_DEPRECATED",
Radha krishna Simha Jiguru00cfe562014-10-21 16:22:12 +0530354 "IPV4_GRE_HEADER_INCOMPLETE",
355 "IPV4_GRE_NO_ICME",
356 "IPV4_GRE_IP_OPTION",
357 "IPV4_GRE_IP_FRAGMENT",
358 "IPV4_GRE_SMALL_TTL",
359 "IPV4_GRE_NEEDS_FRAGMENTATION",
Shyam Sundere351f1b2015-12-17 14:11:51 +0530360 "IPV4_PPTP_GRE_SESSION_MATCH_FAIL",
361 "IPV4_PPTP_GRE_INVALID_PROTO",
362 "IPV4_PPTP_GRE_NO_CME",
363 "IPV4_PPTP_GRE_IP_OPTION",
364 "IPV4_PPTP_GRE_IP_FRAGMENT",
365 "IPV4_PPTP_GRE_SMALL_TTL",
366 "IPV4_PPTP_GRE_NEEDS_FRAGMENTATION",
367 "IPV4_DESTROY",
Selin Dag60ea2b22014-11-05 09:36:22 -0800368 "IPV4_FRAG_DF_SET",
369 "IPV4_FRAG_FAIL",
Saurabh Misra5b07cc02015-01-15 14:20:58 -0800370 "IPV4_ICMP_IPV4_UDPLITE_HEADER_INCOMPLETE",
371 "IPV4_UDPLITE_HEADER_INCOMPLETE",
372 "IPV4_UDPLITE_NO_ICME",
373 "IPV4_UDPLITE_IP_OPTION",
374 "IPV4_UDPLITE_IP_FRAGMENT",
375 "IPV4_UDPLITE_SMALL_TTL",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +0530376 "IPV4_UDPLITE_NEEDS_FRAGMENTATION",
377 "IPV4_MC_UDP_NO_ICME",
378 "IPV4_MC_MEM_ALLOC_FAILURE",
379 "IPV4_MC_UPDATE_FAILURE",
380 "IPV4_MC_PBUF_ALLOC_FAILURE"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530381};
382
383/*
384 * nss_stats_str_if_exception_ipv6
385 * Interface stats strings for ipv6 exceptions
386 */
387static int8_t *nss_stats_str_if_exception_ipv6[NSS_EXCEPTION_EVENT_IPV6_MAX] = {
388 "IPV6_ICMP_HEADER_INCOMPLETE",
389 "IPV6_ICMP_UNHANDLED_TYPE",
390 "IPV6_ICMP_IPV6_HEADER_INCOMPLETE",
391 "IPV6_ICMP_IPV6_UDP_HEADER_INCOMPLETE",
392 "IPV6_ICMP_IPV6_TCP_HEADER_INCOMPLETE",
393 "IPV6_ICMP_IPV6_UNKNOWN_PROTOCOL",
394 "IPV6_ICMP_NO_ICME",
395 "IPV6_ICMP_FLUSH_TO_HOST",
396 "IPV6_TCP_HEADER_INCOMPLETE",
397 "IPV6_TCP_NO_ICME",
398 "IPV6_TCP_SMALL_HOP_LIMIT",
399 "IPV6_TCP_NEEDS_FRAGMENTATION",
400 "IPV6_TCP_FLAGS",
401 "IPV6_TCP_SEQ_EXCEEDS_RIGHT_EDGE",
402 "IPV6_TCP_SMALL_DATA_OFFS",
403 "IPV6_TCP_BAD_SACK",
404 "IPV6_TCP_BIG_DATA_OFFS",
405 "IPV6_TCP_SEQ_BEFORE_LEFT_EDGE",
406 "IPV6_TCP_ACK_EXCEEDS_RIGHT_EDGE",
407 "IPV6_TCP_ACK_BEFORE_LEFT_EDGE",
408 "IPV6_UDP_HEADER_INCOMPLETE",
409 "IPV6_UDP_NO_ICME",
410 "IPV6_UDP_SMALL_HOP_LIMIT",
411 "IPV6_UDP_NEEDS_FRAGMENTATION",
412 "IPV6_WRONG_TARGET_MAC",
413 "IPV6_HEADER_INCOMPLETE",
Radha krishna Simha Jiguru59a1a1c2014-01-27 18:29:40 +0530414 "IPV6_UNKNOWN_PROTOCOL",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700415 "IPV6_INGRESS_VID_MISMATCH",
Thomas Wu8d6f4b22014-06-09 14:46:18 -0700416 "IPV6_INGRESS_VID_MISSING",
Murat Sezgin0c0561d2014-04-09 18:55:58 -0700417 "IPV6_DSCP_MARKING_MISMATCH",
418 "IPV6_VLAN_MARKING_MISMATCH",
Murat Sezgin49d21d12016-02-03 17:36:47 -0800419 "IPV6_DEPRECATED",
Saurabh Misra5b07cc02015-01-15 14:20:58 -0800420 "IPV6_GRE_NO_ICME",
421 "IPV6_GRE_NEEDS_FRAGMENTATION",
422 "IPV6_GRE_SMALL_HOP_LIMIT",
423 "IPV6_DESTROY",
424 "IPV6_ICMP_IPV6_UDPLITE_HEADER_INCOMPLETE",
425 "IPV6_UDPLITE_HEADER_INCOMPLETE",
426 "IPV6_UDPLITE_NO_ICME",
427 "IPV6_UDPLITE_SMALL_HOP_LIMIT",
Kiran Kumar C. S. K12998002014-09-04 17:09:03 +0530428 "IPV6_UDPLITE_NEEDS_FRAGMENTATION",
429 "IPV6_MC_UDP_NO_ICME",
430 "IPV6_MC_MEM_ALLOC_FAILURE",
431 "IPV6_MC_UPDATE_FAILURE",
mandrw7125bac2016-01-14 19:36:46 +0530432 "IPV6_MC_PBUF_ALLOC_FAILURE",
433 "IPV6_ESP_HEADER_INCOMPLETE",
434 "IPV6_ESP_NO_ICME",
435 "IPV6_ESP_IP_FRAGMENT",
436 "IPV6_ESP_SMALL_HOP_LIMIT",
437 "IPV6_ESP_NEEDS_FRAGMENTATION"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530438};
439
440/*
441 * nss_stats_str_if_exception_pppoe
442 * Interface stats strings for PPPoE exceptions
443 */
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +0530444static int8_t *nss_stats_str_if_exception_pppoe[NSS_PPPOE_EXCEPTION_EVENT_MAX] = {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530445 "PPPOE_WRONG_VERSION_OR_TYPE",
446 "PPPOE_WRONG_CODE",
447 "PPPOE_HEADER_INCOMPLETE",
Murat Sezgin7c5956a2014-05-12 09:59:51 -0700448 "PPPOE_UNSUPPORTED_PPP_PROTOCOL",
Murat Sezgin49d21d12016-02-03 17:36:47 -0800449 "PPPOE_DEPRECATED"
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530450};
451
452/*
Bharath M Kumarcc666e92014-12-24 19:17:28 +0530453 * nss_stats_str_wifi
454 * Wifi statistics strings
455 */
456static int8_t *nss_stats_str_wifi[NSS_STATS_WIFI_MAX] = {
457 "RX_PACKETS",
458 "RX_DROPPED",
459 "TX_PACKETS",
460 "TX_DROPPED",
461 "TX_TRANSMIT_COMPLETED",
462 "TX_MGMT_RECEIVED",
463 "TX_MGMT_TRANSMITTED",
464 "TX_MGMT_DROPPED",
465 "TX_MGMT_COMPLETED",
466 "TX_INV_PEER_ENQ_CNT",
467 "RX_INV_PEER_RCV_CNT",
468 "RX_PN_CHECK_FAILED",
469 "RX_PKTS_DELIVERD",
Radha krishna Simha Jiguru36304d12015-12-03 20:21:02 +0530470 "RX_BYTES_DELIVERED",
471 "TX_BYTES_COMPLETED",
Pamidipati, Vijay670ce7e2016-03-15 16:46:59 +0530472 "RX_DELIVER_UNALIGNED_DROP_CNT",
473 "TIDQ_ENQUEUE_CNT_0",
474 "TIDQ_ENQUEUE_CNT_1",
475 "TIDQ_ENQUEUE_CNT_2",
476 "TIDQ_ENQUEUE_CNT_3",
477 "TIDQ_ENQUEUE_CNT_4",
478 "TIDQ_ENQUEUE_CNT_5",
479 "TIDQ_ENQUEUE_CNT_6",
480 "TIDQ_ENQUEUE_CNT_7",
481 "TIDQ_DEQUEUE_CNT_0",
482 "TIDQ_DEQUEUE_CNT_1",
483 "TIDQ_DEQUEUE_CNT_2",
484 "TIDQ_DEQUEUE_CNT_3",
485 "TIDQ_DEQUEUE_CNT_4",
486 "TIDQ_DEQUEUE_CNT_5",
487 "TIDQ_DEQUEUE_CNT_6",
488 "TIDQ_DEQUEUE_CNT_7",
489 "TIDQ_ENQUEUE_FAIL_CNT_0",
490 "TIDQ_ENQUEUE_FAIL_CNT_1",
491 "TIDQ_ENQUEUE_FAIL_CNT_2",
492 "TIDQ_ENQUEUE_FAIL_CNT_3",
493 "TIDQ_ENQUEUE_FAIL_CNT_4",
494 "TIDQ_ENQUEUE_FAIL_CNT_5",
495 "TIDQ_ENQUEUE_FAIL_CNT_6",
496 "TIDQ_ENQUEUE_FAIL_CNT_7",
497 "TIDQ_TTL_EXPIRE_CNT_0",
498 "TIDQ_TTL_EXPIRE_CNT_1",
499 "TIDQ_TTL_EXPIRE_CNT_2",
500 "TIDQ_TTL_EXPIRE_CNT_3",
501 "TIDQ_TTL_EXPIRE_CNT_4",
502 "TIDQ_TTL_EXPIRE_CNT_5",
503 "TIDQ_TTL_EXPIRE_CNT_6",
504 "TIDQ_TTL_EXPIRE_CNT_7",
505 "TIDQ_DEQUEUE_REQ_CNT_0",
506 "TIDQ_DEQUEUE_REQ_CNT_1",
507 "TIDQ_DEQUEUE_REQ_CNT_2",
508 "TIDQ_DEQUEUE_REQ_CNT_3",
509 "TIDQ_DEQUEUE_REQ_CNT_4",
510 "TIDQ_DEQUEUE_REQ_CNT_5",
511 "TIDQ_DEQUEUE_REQ_CNT_6",
512 "TIDQ_DEQUEUE_REQ_CNT_7",
513 "TOTAL_TIDQ_DEPTH",
514 "RX_HTT_FETCH_CNT",
515 "TOTAL_TIDQ_BYPASS_CNT",
516 "GLOBAL_Q_FULL_CNT",
517 "TIDQ_FULL_CNT",
Bharath M Kumarcc666e92014-12-24 19:17:28 +0530518};
519
520/*
Stephen Wang9779d952015-10-28 11:39:07 -0700521 * nss_stats_str_portid
522 * PortID statistics strings
523 */
524static int8_t *nss_stats_str_portid[NSS_STATS_PORTID_MAX] = {
525 "RX_INVALID_HEADER",
526};
527
528/*
Tushar Mathurff8741b2015-12-02 20:28:59 +0530529 * nss_stats_str_dtls_session_stats
530 * DTLS statistics strings for nss session stats
531 */
532static int8_t *nss_stats_str_dtls_session_debug_stats[NSS_STATS_DTLS_SESSION_MAX] = {
533 "RX_PKTS",
534 "TX_PKTS",
535 "RX_DROPPED",
536 "RX_AUTH_DONE",
537 "TX_AUTH_DONE",
538 "RX_CIPHER_DONE",
539 "TX_CIPHER_DONE",
540 "RX_CBUF_ALLOC_FAIL",
541 "TX_CBUF_ALLOC_FAIL",
542 "TX_CENQUEUE_FAIL",
543 "RX_CENQUEUE_FAIL",
544 "TX_DROPPED_HROOM",
545 "TX_DROPPED_TROOM",
546 "TX_FORWARD_ENQUEUE_FAIL",
547 "RX_FORWARD_ENQUEUE_FAIL",
548 "RX_INVALID_VERSION",
549 "RX_INVALID_EPOCH",
550 "RX_MALFORMED",
551 "RX_CIPHER_FAIL",
552 "RX_AUTH_FAIL",
553 "RX_CAPWAP_CLASSIFY_FAIL",
554 "RX_SINGLE_REC_DGRAM",
555 "RX_MULTI_REC_DGRAM",
556 "RX_REPLAY_FAIL",
557 "RX_REPLAY_DUPLICATE",
558 "RX_REPLAY_OUT_OF_WINDOW",
559 "OUTFLOW_QUEUE_FULL",
560 "DECAP_QUEUE_FULL",
561 "PBUF_ALLOC_FAIL",
562 "PBUF_COPY_FAIL",
563 "EPOCH",
564 "TX_SEQ_HIGH",
565 "TX_SEQ_LOW",
566};
567
568/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -0700569 * nss_stats_str_gre_tunnel_session_stats
570 * GRE Tunnel statistics strings for nss session stats
571 */
572static int8_t *nss_stats_str_gre_tunnel_session_debug_stats[NSS_STATS_GRE_TUNNEL_SESSION_MAX] = {
573 "RX_PKTS",
574 "TX_PKTS",
575 "RX_DROPPED",
576 "RX_MALFORMED",
577 "RX_INVALID_PROT",
578 "DECAP_QUEUE_FULL",
579 "RX_SINGLE_REC_DGRAM",
580 "RX_INVALID_REC_DGRAM",
581 "BUFFER_ALLOC_FAIL",
582 "BUFFER_COPY_FAIL",
583 "OUTFLOW_QUEUE_FULL",
584 "TX_DROPPED_HROOM",
585 "RX_CBUFFER_ALLOC_FAIL",
586 "RX_CENQUEUE_FAIL",
587 "RX_DECRYPT_DONE",
588 "RX_FORWARD_ENQUEUE_FAIL",
589 "TX_CBUFFER_ALLOC_FAIL",
590 "TX_CENQUEUE_FAIL",
591 "TX_DROPPED_TROOM",
592 "TX_FORWARD_ENQUEUE_FAIL",
593 "TX_CIPHER_DONE",
594 "CRYPTO_NOSUPP",
595};
596
597/*
ratheesh kannoth7af985d2015-06-24 15:08:40 +0530598 * nss_stats_str_l2tpv2_session_stats
599 * l2tpv2 statistics strings for nss session stats
600 */
601static int8_t *nss_stats_str_l2tpv2_session_debug_stats[NSS_STATS_L2TPV2_SESSION_MAX] = {
602 "RX_PPP_LCP_PKTS",
603 "RX_EXP_PKTS",
604 "ENCAP_PBUF_ALLOC_FAIL",
605 "DECAP_PBUF_ALLOC_FAIL"
606};
607
608/*
ratheesh kannotha1245c32015-11-04 16:45:43 +0530609 * nss_stats_str_map_t_instance_stats
610 * map_t statistics strings for nss session stats
611 */
612static int8_t *nss_stats_str_map_t_instance_debug_stats[NSS_STATS_MAP_T_MAX] = {
613 "MAP_T_V4_TO_V6_PBUF_EXCEPTION_PKTS",
614 "MAP_T_V4_TO_V6_PBUF_NO_MATCHING_RULE",
615 "MAP_T_V4_TO_V6_PBUF_NOT_TCP_OR_UDP",
ratheesh kannoth32b6c422016-06-05 10:08:15 +0530616 "MAP_T_V4_TO_V6_RULE_ERR_LOCAL_PSID",
ratheesh kannotha1245c32015-11-04 16:45:43 +0530617 "MAP_T_V4_TO_V6_RULE_ERR_LOCAL_IPV6",
618 "MAP_T_V4_TO_V6_RULE_ERR_REMOTE_PSID",
619 "MAP_T_V4_TO_V6_RULE_ERR_REMOTE_EA_BITS",
620 "MAP_T_V4_TO_V6_RULE_ERR_REMOTE_IPV6",
621 "MAP_T_V6_TO_V4_PBUF_EXCEPTION_PKTS",
622 "MAP_T_V6_TO_V4_PBUF_NO_MATCHING_RULE",
623 "MAP_T_V6_TO_V4_PBUF_NOT_TCP_OR_UDP",
624 "MAP_T_V6_TO_V4_RULE_ERR_LOCAL_IPV4",
625 "MAP_T_V6_TO_V4_RULE_ERR_REMOTE_IPV4"
626};
627
628/*
Amit Gupta316729b2016-08-12 12:21:15 +0530629 * nss_stats_str_ppe_conn
630 * PPE statistics strings for nss flow stats
631 */
632static int8_t *nss_stats_str_ppe_conn[NSS_STATS_PPE_CONN_MAX] = {
633 "v4 routed flows",
634 "v4 bridge flows",
635 "v4 conn create req",
636 "v4 conn create fail",
637 "v4 conn destroy req",
638 "v4 conn destroy fail",
639
640 "v6 routed flows",
641 "v6 bridge flows",
642 "v6 conn create req",
643 "v6 conn create fail",
644 "v6 conn destroy req",
645 "v6 conn destroy fail",
646
647 "conn fail - nexthop full",
648 "conn fail - flow full",
649 "conn fail - host full",
650 "conn fail - pub-ip full",
651 "conn fail - port not setup",
652 "conn fail - rw fifo full",
653 "conn fail - unknown proto",
654 "conn fail - ppe not responding",
655};
656
657/*
658 * nss_stats_str_ppe_l3
659 * PPE statistics strings for nss debug stats
660 */
661static int8_t *nss_stats_str_ppe_l3[NSS_STATS_PPE_L3_MAX] = {
662 "PPE L3 dbg reg 0",
663 "PPE L3 dbg reg 1",
664 "PPE L3 dbg reg 2",
665 "PPE L3 dbg reg 3",
666 "PPE L3 dbg reg 4",
667 "PPE L3 dbg reg port",
668};
669
670/*
671 * nss_stats_str_ppe_code
672 * PPE statistics strings for nss debug stats
673 */
674static int8_t *nss_stats_str_ppe_code[NSS_STATS_PPE_CODE_MAX] = {
675 "PPE CPU_CODE",
676 "PPE DROP_CODE",
677};
678
679/*
Shyam Sunder66e889d2015-11-02 15:31:20 +0530680 * nss_stats_str_ppt_session_stats
681 * PPTP statistics strings for nss session stats
682 */
683static int8_t *nss_stats_str_pptp_session_debug_stats[NSS_STATS_PPTP_SESSION_MAX] = {
Shyam Sundere351f1b2015-12-17 14:11:51 +0530684 "ENCAP_RX_PACKETS",
685 "ENCAP_RX_BYTES",
686 "ENCAP_TX_PACKETS",
687 "ENCAP_TX_BYTES",
688 "ENCAP_RX_DROP",
689 "DECAP_RX_PACKETS",
690 "DECAP_RX_BYTES",
691 "DECAP_TX_PACKETS",
692 "DECAP_TX_BYTES",
693 "DECAP_RX_DROP",
694 "ENCAP_HEADROOM_ERR",
695 "ENCAP_SMALL_SIZE",
696 "ENCAP_PNODE_ENQUEUE_FAIL",
697 "DECAP_NO_SEQ_NOR_ACK",
698 "DECAP_INVAL_GRE_FLAGS",
699 "DECAP_INVAL_GRE_PROTO",
700 "DECAP_WRONG_SEQ",
701 "DECAP_INVAL_PPP_HDR",
702 "DECAP_PPP_LCP",
703 "DECAP_UNSUPPORTED_PPP_PROTO",
704 "DECAP_PNODE_ENQUEUE_FAIL",
Shyam Sunder66e889d2015-11-02 15:31:20 +0530705};
706
707/*
Stephen Wangec5a85c2016-09-08 23:32:27 -0700708 * nss_stats_str_trustsec_tx
709 * Trustsec TX stats strings
710 */
711static int8_t *nss_stats_str_trustsec_tx[NSS_STATS_TRUSTSEC_TX_MAX] = {
712 "INVALID_SRC",
713 "UNCONFIGURED_SRC",
714 "HEADROOM_NOT_ENOUGH",
715};
716
717/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530718 * nss_stats_ipv4_read()
719 * Read IPV4 stats
720 */
721static ssize_t nss_stats_ipv4_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
722{
723 int32_t i;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530724 /*
725 * max output lines = #stats + start tag line + end tag line + three blank lines
726 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530727 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV4_MAX + 3) + (NSS_EXCEPTION_EVENT_IPV4_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530728 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
729 size_t size_wr = 0;
730 ssize_t bytes_read = 0;
731 uint64_t *stats_shadow;
732
733 char *lbuf = kzalloc(size_al, GFP_KERNEL);
734 if (unlikely(lbuf == NULL)) {
735 nss_warning("Could not allocate memory for local statistics buffer");
736 return 0;
737 }
738
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530739 /*
740 * Note: The assumption here is that exception event count is larger than other statistics count for IPv4
741 */
742 stats_shadow = kzalloc(NSS_EXCEPTION_EVENT_IPV4_MAX * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530743 if (unlikely(stats_shadow == NULL)) {
744 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530745 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530746 return 0;
747 }
748
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530749 size_wr = scnprintf(lbuf, size_al, "ipv4 stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530750
751 /*
752 * Common node stats
753 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530754 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530755 spin_lock_bh(&nss_top_main.stats_lock);
756 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
757 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV4_RX_INTERFACE][i];
758 }
759
760 spin_unlock_bh(&nss_top_main.stats_lock);
761
762 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
763 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
764 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
765 }
766
767 /*
768 * IPv4 node stats
769 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530770 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530771
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530772 spin_lock_bh(&nss_top_main.stats_lock);
773 for (i = 0; (i < NSS_STATS_IPV4_MAX); i++) {
774 stats_shadow[i] = nss_top_main.stats_ipv4[i];
775 }
776
777 spin_unlock_bh(&nss_top_main.stats_lock);
778
779 for (i = 0; (i < NSS_STATS_IPV4_MAX); i++) {
780 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
781 "%s = %llu\n", nss_stats_str_ipv4[i], stats_shadow[i]);
782 }
783
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530784 /*
785 * Exception stats
786 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530787 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530788
789 spin_lock_bh(&nss_top_main.stats_lock);
790 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
791 stats_shadow[i] = nss_top_main.stats_if_exception_ipv4[i];
792 }
793
794 spin_unlock_bh(&nss_top_main.stats_lock);
795
796 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
797 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
798 "%s = %llu\n", nss_stats_str_if_exception_ipv4[i], stats_shadow[i]);
799 }
800
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530801 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530802 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
803 kfree(lbuf);
804 kfree(stats_shadow);
805
806 return bytes_read;
807}
808
809/*
Selin Dag6d9b0c12014-11-04 18:27:21 -0800810 * nss_stats_ipv4_reasm_read()
811 * Read IPV4 reassembly stats
812 */
813static ssize_t nss_stats_ipv4_reasm_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
814{
815 int32_t i;
816 /*
817 * max output lines = #stats + start tag line + end tag line + three blank lines
818 */
819 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV4_REASM_MAX + 3) + 5;
820 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
821 size_t size_wr = 0;
822 ssize_t bytes_read = 0;
823 uint64_t *stats_shadow;
824
825 char *lbuf = kzalloc(size_al, GFP_KERNEL);
826 if (unlikely(lbuf == NULL)) {
827 nss_warning("Could not allocate memory for local statistics buffer");
828 return 0;
829 }
830
831 stats_shadow = kzalloc(NSS_STATS_IPV4_REASM_MAX * 8, GFP_KERNEL);
832 if (unlikely(stats_shadow == NULL)) {
833 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530834 kfree(lbuf);
Selin Dag6d9b0c12014-11-04 18:27:21 -0800835 return 0;
836 }
837
838 size_wr = scnprintf(lbuf, size_al, "ipv4 reasm stats start:\n\n");
839
840 /*
841 * Common node stats
842 */
843 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
844 spin_lock_bh(&nss_top_main.stats_lock);
845 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
846 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV4_REASM_INTERFACE][i];
847 }
848
849 spin_unlock_bh(&nss_top_main.stats_lock);
850
851 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
852 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
853 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
854 }
855
856 /*
857 * IPv4 reasm node stats
858 */
859 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 reasm node stats:\n\n");
860
861 spin_lock_bh(&nss_top_main.stats_lock);
862 for (i = 0; (i < NSS_STATS_IPV4_REASM_MAX); i++) {
863 stats_shadow[i] = nss_top_main.stats_ipv4_reasm[i];
864 }
865
866 spin_unlock_bh(&nss_top_main.stats_lock);
867
868 for (i = 0; (i < NSS_STATS_IPV4_REASM_MAX); i++) {
869 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
870 "%s = %llu\n", nss_stats_str_ipv4_reasm[i], stats_shadow[i]);
871 }
872
873 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv4 reasm stats end\n\n");
874 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
875 kfree(lbuf);
876 kfree(stats_shadow);
877
878 return bytes_read;
879}
880
881/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530882 * nss_stats_ipv6_read()
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530883 * Read IPV6 stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530884 */
885static ssize_t nss_stats_ipv6_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
886{
887 int32_t i;
888
889 /*
890 * max output lines = #stats + start tag line + end tag line + three blank lines
891 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530892 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV6_MAX + 3) + (NSS_EXCEPTION_EVENT_IPV6_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530893 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
894 size_t size_wr = 0;
895 ssize_t bytes_read = 0;
896 uint64_t *stats_shadow;
897
898 char *lbuf = kzalloc(size_al, GFP_KERNEL);
899 if (unlikely(lbuf == NULL)) {
900 nss_warning("Could not allocate memory for local statistics buffer");
901 return 0;
902 }
903
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530904 /*
905 * Note: The assumption here is that exception event count is larger than other statistics count for IPv4
906 */
907 stats_shadow = kzalloc(NSS_EXCEPTION_EVENT_IPV6_MAX * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530908 if (unlikely(stats_shadow == NULL)) {
909 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +0530910 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530911 return 0;
912 }
913
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530914 size_wr = scnprintf(lbuf, size_al, "ipv6 stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530915
916 /*
917 * Common node stats
918 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530919 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530920 spin_lock_bh(&nss_top_main.stats_lock);
921 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
922 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV6_RX_INTERFACE][i];
923 }
924
925 spin_unlock_bh(&nss_top_main.stats_lock);
926
927 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
928 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
929 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
930 }
931
932 /*
933 * IPv6 node stats
934 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530935 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530936
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530937 spin_lock_bh(&nss_top_main.stats_lock);
938 for (i = 0; (i < NSS_STATS_IPV6_MAX); i++) {
939 stats_shadow[i] = nss_top_main.stats_ipv6[i];
940 }
941
942 spin_unlock_bh(&nss_top_main.stats_lock);
943
944 for (i = 0; (i < NSS_STATS_IPV6_MAX); i++) {
945 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
946 "%s = %llu\n", nss_stats_str_ipv6[i], stats_shadow[i]);
947 }
948
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530949 /*
950 * Exception stats
951 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +0530952 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +0530953
954 spin_lock_bh(&nss_top_main.stats_lock);
955 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
956 stats_shadow[i] = nss_top_main.stats_if_exception_ipv6[i];
957 }
958
959 spin_unlock_bh(&nss_top_main.stats_lock);
960
961 for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
962 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
963 "%s = %llu\n", nss_stats_str_if_exception_ipv6[i], stats_shadow[i]);
964 }
965
966 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\nipv6 stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +0530967 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
968 kfree(lbuf);
969 kfree(stats_shadow);
970
971 return bytes_read;
972}
973
974/*
Selin Dag60a2f5b2015-06-29 14:39:49 -0700975 * nss_stats_ipv6_reasm_read()
976 * Read IPV6 reassembly stats
977 */
978static ssize_t nss_stats_ipv6_reasm_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
979{
980 int32_t i;
981 /*
982 * max output lines = #stats + start tag line + end tag line + three blank lines
983 */
984 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_IPV6_REASM_MAX + 3) + 5;
985 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
986 size_t size_wr = 0;
987 ssize_t bytes_read = 0;
988 uint64_t *stats_shadow;
989
990 char *lbuf = kzalloc(size_al, GFP_KERNEL);
991 if (unlikely(lbuf == NULL)) {
992 nss_warning("Could not allocate memory for local statistics buffer");
993 return 0;
994 }
995
996 stats_shadow = kzalloc(NSS_STATS_IPV6_REASM_MAX * 8, GFP_KERNEL);
997 if (unlikely(stats_shadow == NULL)) {
998 nss_warning("Could not allocate memory for local shadow buffer");
999 kfree(lbuf);
1000 return 0;
1001 }
1002
1003 size_wr = scnprintf(lbuf, size_al, "ipv6 reasm stats start:\n\n");
1004
1005 /*
1006 * Common node stats
1007 */
1008 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
1009 spin_lock_bh(&nss_top_main.stats_lock);
1010 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1011 stats_shadow[i] = nss_top_main.stats_node[NSS_IPV6_REASM_INTERFACE][i];
1012 }
1013
1014 spin_unlock_bh(&nss_top_main.stats_lock);
1015
1016 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1017 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1018 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1019 }
1020
1021 /*
1022 * Ipv6 reasm node stats
1023 */
1024 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 reasm node stats:\n\n");
1025
1026 spin_lock_bh(&nss_top_main.stats_lock);
1027 for (i = 0; (i < NSS_STATS_IPV6_REASM_MAX); i++) {
1028 stats_shadow[i] = nss_top_main.stats_ipv6_reasm[i];
1029 }
1030
1031 spin_unlock_bh(&nss_top_main.stats_lock);
1032
1033 for (i = 0; (i < NSS_STATS_IPV6_REASM_MAX); i++) {
1034 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1035 "%s = %llu\n", nss_stats_str_ipv6_reasm[i], stats_shadow[i]);
1036 }
1037
1038 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nipv6 reasm stats end\n\n");
1039 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1040 kfree(lbuf);
1041 kfree(stats_shadow);
1042
1043 return bytes_read;
1044}
1045
1046/*
Shashank Balashankar512cb602016-08-01 17:57:42 -07001047 * nss_stats_edma_port_stats_read()
1048 * Read EDMA port stats
1049 */
1050static ssize_t nss_stats_edma_port_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1051{
1052 int32_t i;
1053
1054 /*
1055 * max output lines = #stats + start tag line + end tag line + three blank lines
1056 */
1057 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + 3;
1058 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1059 size_t size_wr = 0;
1060 ssize_t bytes_read = 0;
1061 uint64_t *stats_shadow;
1062 struct nss_stats_data *data = fp->private_data;
1063
1064 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1065 if (unlikely(lbuf == NULL)) {
1066 nss_warning("Could not allocate memory for local statistics buffer");
1067 return 0;
1068 }
1069
1070 /*
1071 * Note: The assumption here is that we do not have more than 64 stats
1072 */
1073 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1074 if (unlikely(stats_shadow == NULL)) {
1075 nss_warning("Could not allocate memory for local shadow buffer");
1076 kfree(lbuf);
1077 return 0;
1078 }
1079
1080 size_wr = scnprintf(lbuf, size_al, "edma stats start:\n\n");
1081
1082 /*
1083 * Common node stats
1084 */
1085 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d stats:\n\n", data->edma_id);
1086 spin_lock_bh(&nss_top_main.stats_lock);
1087 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1088 stats_shadow[i] = nss_top_main.stats_edma.port[data->edma_id].port_stats[i];
1089 }
1090
1091 spin_unlock_bh(&nss_top_main.stats_lock);
1092
1093 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1094 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1095 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1096 }
1097
1098 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n\n");
1099 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1100 kfree(lbuf);
1101 kfree(stats_shadow);
1102
1103 return bytes_read;
1104}
1105
1106/*
1107 * nss_stats_edma_port_type_read()
1108 * Read EDMA port type
1109 */
1110static ssize_t nss_stats_edma_port_type_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1111{
Shashank Balashankar512cb602016-08-01 17:57:42 -07001112 /*
1113 * max output lines = #stats + start tag line + end tag line + three blank lines
1114 */
1115 uint32_t max_output_lines = (1 + 2) + 3;
1116 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1117 size_t size_wr = 0;
1118 ssize_t bytes_read = 0;
1119 uint64_t port_type;
1120 struct nss_stats_data *data = fp->private_data;
1121
1122 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1123 if (unlikely(lbuf == NULL)) {
1124 nss_warning("Could not allocate memory for local statistics buffer");
1125 return 0;
1126 }
1127
1128 size_wr = scnprintf(lbuf, size_al, "edma port type start:\n\n");
1129 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d type:\n\n", data->edma_id);
1130
1131 /*
1132 * Port type
1133 */
1134 spin_lock_bh(&nss_top_main.stats_lock);
1135 port_type = nss_top_main.stats_edma.port[data->edma_id].port_type;
1136 spin_unlock_bh(&nss_top_main.stats_lock);
1137
1138 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1139 "port_type = %s\n", nss_stats_str_edma_port_type[port_type]);
1140
1141 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n");
1142 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1143 kfree(lbuf);
1144
1145 return bytes_read;
1146}
1147
1148/*
1149 * nss_stats_edma_port_ring_map_read()
1150 * Read EDMA port ring map
1151 */
1152static ssize_t nss_stats_edma_port_ring_map_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1153{
1154 int32_t i;
1155
1156 /*
1157 * max output lines = #stats + start tag line + end tag line + three blank lines
1158 */
1159 uint32_t max_output_lines = (4 + 2) + 3;
1160 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1161 size_t size_wr = 0;
1162 ssize_t bytes_read = 0;
1163 uint64_t *stats_shadow;
1164 struct nss_stats_data *data = fp->private_data;
1165
1166 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1167 if (unlikely(lbuf == NULL)) {
1168 nss_warning("Could not allocate memory for local statistics buffer");
1169 return 0;
1170 }
1171
1172 /*
1173 * Note: The assumption here is that we do not have more than 64 stats
1174 */
1175 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1176 if (unlikely(stats_shadow == NULL)) {
1177 nss_warning("Could not allocate memory for local shadow buffer");
1178 kfree(lbuf);
1179 return 0;
1180 }
1181
1182 size_wr = scnprintf(lbuf, size_al, "edma port ring map start:\n\n");
1183
1184 /*
1185 * Port ring map
1186 */
1187 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d ring map:\n\n", data->edma_id);
1188 spin_lock_bh(&nss_top_main.stats_lock);
1189 for (i = 0; i < NSS_EDMA_PORT_RING_MAP_MAX; i++) {
1190 stats_shadow[i] = nss_top_main.stats_edma.port[data->edma_id].port_ring_map[i];
1191 }
1192
1193 spin_unlock_bh(&nss_top_main.stats_lock);
1194
1195 for (i = 0; i < NSS_EDMA_PORT_RING_MAP_MAX; i++) {
1196 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1197 "%s = %llu\n", nss_stats_str_edma_port_ring_map[i], stats_shadow[i]);
1198 }
1199
1200 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n\n");
1201 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1202 kfree(lbuf);
1203 kfree(stats_shadow);
1204
1205 return bytes_read;
1206}
1207
1208/*
1209 * nss_stats_edma_txring_read()
1210 * Read EDMA Tx ring stats
1211 */
1212static ssize_t nss_stats_edma_txring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1213{
1214 int32_t i;
1215
1216 /*
1217 * max output lines = #stats + start tag line + end tag line + three blank lines
1218 */
1219 uint32_t max_output_lines = (NSS_STATS_EDMA_TX_MAX + 2) + 3;
1220 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1221 size_t size_wr = 0;
1222 ssize_t bytes_read = 0;
1223 uint64_t *stats_shadow;
1224 struct nss_stats_data *data = fp->private_data;
1225
1226 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1227 if (unlikely(lbuf == NULL)) {
1228 nss_warning("Could not allocate memory for local statistics buffer");
1229 return 0;
1230 }
1231
1232 /*
1233 * Note: The assumption here is that we do not have more than 64 stats
1234 */
1235 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1236 if (unlikely(stats_shadow == NULL)) {
1237 nss_warning("Could not allocate memory for local shadow buffer");
1238 kfree(lbuf);
1239 return 0;
1240 }
1241
1242 size_wr = scnprintf(lbuf, size_al, "edma Tx ring stats start:\n\n");
1243
1244 /*
1245 * Tx ring stats
1246 */
1247 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Tx ring %d stats:\n\n", data->edma_id);
1248 spin_lock_bh(&nss_top_main.stats_lock);
1249 for (i = 0; i < NSS_STATS_EDMA_TX_MAX; i++) {
1250 stats_shadow[i] = nss_top_main.stats_edma.tx_stats[data->edma_id][i];
1251 }
1252
1253 spin_unlock_bh(&nss_top_main.stats_lock);
1254
1255 for (i = 0; i < NSS_STATS_EDMA_TX_MAX; i++) {
1256 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1257 "%s = %llu\n", nss_stats_str_edma_tx[i], stats_shadow[i]);
1258 }
1259
1260 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Tx ring stats end\n\n");
1261 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1262 kfree(lbuf);
1263 kfree(stats_shadow);
1264
1265 return bytes_read;
1266}
1267
1268/*
1269 * nss_stats_edma_rxring_read()
1270 * Read EDMA rxring stats
1271 */
1272static ssize_t nss_stats_edma_rxring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1273{
1274 int32_t i;
1275
1276 /*
1277 * max output lines = #stats + start tag line + end tag line + three blank lines
1278 */
1279 uint32_t max_output_lines = (NSS_STATS_EDMA_RX_MAX + 2) + 3;
1280 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1281 size_t size_wr = 0;
1282 ssize_t bytes_read = 0;
1283 uint64_t *stats_shadow;
1284 struct nss_stats_data *data = fp->private_data;
1285
1286 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1287 if (unlikely(lbuf == NULL)) {
1288 nss_warning("Could not allocate memory for local statistics buffer");
1289 return 0;
1290 }
1291
1292 /*
1293 * Note: The assumption here is that we do not have more than 64 stats
1294 */
1295 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1296 if (unlikely(stats_shadow == NULL)) {
1297 nss_warning("Could not allocate memory for local shadow buffer");
1298 kfree(lbuf);
1299 return 0;
1300 }
1301
1302 size_wr = scnprintf(lbuf, size_al, "edma Rx ring stats start:\n\n");
1303
1304 /*
1305 * RX ring stats
1306 */
1307 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Rx ring %d stats:\n\n", data->edma_id);
1308 spin_lock_bh(&nss_top_main.stats_lock);
1309 for (i = 0; i < NSS_STATS_EDMA_RX_MAX; i++) {
1310 stats_shadow[i] = nss_top_main.stats_edma.rx_stats[data->edma_id][i];
1311 }
1312
1313 spin_unlock_bh(&nss_top_main.stats_lock);
1314
1315 for (i = 0; i < NSS_STATS_EDMA_RX_MAX; i++) {
1316 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1317 "%s = %llu\n", nss_stats_str_edma_rx[i], stats_shadow[i]);
1318 }
1319
1320 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Rx ring stats end\n\n");
1321 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1322 kfree(lbuf);
1323 kfree(stats_shadow);
1324
1325 return bytes_read;
1326}
1327
1328/*
1329 * nss_stats_edma_txcmplring_read()
1330 * Read EDMA txcmplring stats
1331 */
1332static ssize_t nss_stats_edma_txcmplring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1333{
1334 int32_t i;
1335
1336 /*
1337 * max output lines = #stats + start tag line + end tag line + three blank lines
1338 */
1339 uint32_t max_output_lines = (NSS_STATS_EDMA_TXCMPL_MAX + 2) + 3;
1340 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1341 size_t size_wr = 0;
1342 ssize_t bytes_read = 0;
1343 uint64_t *stats_shadow;
1344 struct nss_stats_data *data = fp->private_data;
1345
1346 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1347 if (unlikely(lbuf == NULL)) {
1348 nss_warning("Could not allocate memory for local statistics buffer");
1349 return 0;
1350 }
1351
1352 /*
1353 * Note: The assumption here is that we do not have more than 64 stats
1354 */
1355 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1356 if (unlikely(stats_shadow == NULL)) {
1357 nss_warning("Could not allocate memory for local shadow buffer");
1358 kfree(lbuf);
1359 return 0;
1360 }
1361
1362 size_wr = scnprintf(lbuf, size_al, "edma Tx cmpl ring stats start:\n\n");
1363
1364 /*
1365 * Tx cmpl ring stats
1366 */
1367 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Tx cmpl ring %d stats:\n\n", data->edma_id);
1368 spin_lock_bh(&nss_top_main.stats_lock);
1369 for (i = 0; i < NSS_STATS_EDMA_TXCMPL_MAX; i++) {
1370 stats_shadow[i] = nss_top_main.stats_edma.txcmpl_stats[data->edma_id][i];
1371 }
1372
1373 spin_unlock_bh(&nss_top_main.stats_lock);
1374
1375 for (i = 0; i < NSS_STATS_EDMA_TXCMPL_MAX; i++) {
1376 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1377 "%s = %llu\n", nss_stats_str_edma_txcmpl[i], stats_shadow[i]);
1378 }
1379
1380 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Tx cmpl ring stats end\n\n");
1381 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1382 kfree(lbuf);
1383 kfree(stats_shadow);
1384
1385 return bytes_read;
1386}
1387
1388/*
1389 * nss_stats_edma_rxfillring_read()
1390 * Read EDMA rxfillring stats
1391 */
1392static ssize_t nss_stats_edma_rxfillring_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1393{
1394 int32_t i;
1395
1396 /*
1397 * max output lines = #stats + start tag line + end tag line + three blank lines
1398 */
1399 uint32_t max_output_lines = (NSS_STATS_EDMA_RXFILL_MAX + 2) + 3;
1400 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1401 size_t size_wr = 0;
1402 ssize_t bytes_read = 0;
1403 uint64_t *stats_shadow;
1404 struct nss_stats_data *data = fp->private_data;
1405
1406 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1407 if (unlikely(lbuf == NULL)) {
1408 nss_warning("Could not allocate memory for local statistics buffer");
1409 return 0;
1410 }
1411
1412 /*
1413 * Note: The assumption here is that we do not have more than 64 stats
1414 */
1415 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
1416 if (unlikely(stats_shadow == NULL)) {
1417 nss_warning("Could not allocate memory for local shadow buffer");
1418 kfree(lbuf);
1419 return 0;
1420 }
1421
1422 size_wr = scnprintf(lbuf, size_al, "edma Rx fill ring stats start:\n\n");
1423
1424 /*
1425 * Rx fill ring stats
1426 */
1427 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Rx fill ring %d stats:\n\n", data->edma_id);
1428 spin_lock_bh(&nss_top_main.stats_lock);
1429 for (i = 0; i < NSS_STATS_EDMA_RXFILL_MAX; i++) {
1430 stats_shadow[i] = nss_top_main.stats_edma.rxfill_stats[data->edma_id][i];
1431 }
1432
1433 spin_unlock_bh(&nss_top_main.stats_lock);
1434
1435 for (i = 0; i < NSS_STATS_EDMA_RXFILL_MAX; i++) {
1436 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1437 "%s = %llu\n", nss_stats_str_edma_rxfill[i], stats_shadow[i]);
1438 }
1439
1440 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Rx fill ring stats end\n\n");
1441 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1442 kfree(lbuf);
1443 kfree(stats_shadow);
1444
1445 return bytes_read;
1446}
1447
1448/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301449 * nss_stats_eth_rx_read()
1450 * Read ETH_RX stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301451 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301452static ssize_t nss_stats_eth_rx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301453{
1454 int32_t i;
1455
1456 /*
1457 * max output lines = #stats + start tag line + end tag line + three blank lines
1458 */
Murat Sezgin99dab642014-08-28 14:40:34 -07001459 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_ETH_RX_MAX + 3) + (NSS_EXCEPTION_EVENT_ETH_RX_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301460 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1461 size_t size_wr = 0;
1462 ssize_t bytes_read = 0;
1463 uint64_t *stats_shadow;
1464
1465 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1466 if (unlikely(lbuf == NULL)) {
1467 nss_warning("Could not allocate memory for local statistics buffer");
1468 return 0;
1469 }
1470
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301471 /*
1472 * Note: The assumption here is that we do not have more than 64 stats
1473 */
1474 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301475 if (unlikely(stats_shadow == NULL)) {
1476 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301477 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301478 return 0;
1479 }
1480
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301481 size_wr = scnprintf(lbuf, size_al,"eth_rx stats start:\n\n");
1482
1483 /*
1484 * Common node stats
1485 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301486 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301487 spin_lock_bh(&nss_top_main.stats_lock);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301488 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1489 stats_shadow[i] = nss_top_main.stats_node[NSS_ETH_RX_INTERFACE][i];
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301490 }
1491
1492 spin_unlock_bh(&nss_top_main.stats_lock);
1493
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301494 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301495 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301496 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301497 }
1498
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301499 /*
Murat Sezgin99dab642014-08-28 14:40:34 -07001500 * eth_rx node stats
1501 */
1502 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\neth_rx node stats:\n\n");
1503 spin_lock_bh(&nss_top_main.stats_lock);
1504 for (i = 0; (i < NSS_STATS_ETH_RX_MAX); i++) {
1505 stats_shadow[i] = nss_top_main.stats_eth_rx[i];
1506 }
1507
1508 spin_unlock_bh(&nss_top_main.stats_lock);
1509
1510 for (i = 0; (i < NSS_STATS_ETH_RX_MAX); i++) {
1511 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1512 "%s = %llu\n", nss_stats_str_eth_rx[i], stats_shadow[i]);
1513 }
1514
1515 /*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301516 * Exception stats
1517 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301518 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\neth_rx exception stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301519
1520 spin_lock_bh(&nss_top_main.stats_lock);
1521 for (i = 0; (i < NSS_EXCEPTION_EVENT_ETH_RX_MAX); i++) {
1522 stats_shadow[i] = nss_top_main.stats_if_exception_eth_rx[i];
1523 }
1524
1525 spin_unlock_bh(&nss_top_main.stats_lock);
1526
1527 for (i = 0; (i < NSS_EXCEPTION_EVENT_ETH_RX_MAX); i++) {
1528 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1529 "%s = %llu\n", nss_stats_str_if_exception_eth_rx[i], stats_shadow[i]);
1530 }
1531
1532 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\neth_rx stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301533 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1534 kfree(lbuf);
1535 kfree(stats_shadow);
1536
1537 return bytes_read;
1538}
1539
1540/*
1541 * nss_stats_n2h_read()
1542 * Read N2H stats
1543 */
1544static ssize_t nss_stats_n2h_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1545{
1546 int32_t i;
1547
1548 /*
1549 * max output lines = #stats + start tag line + end tag line + three blank lines
1550 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301551 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_N2H_MAX + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301552 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1553 size_t size_wr = 0;
1554 ssize_t bytes_read = 0;
1555 uint64_t *stats_shadow;
Murat Sezgin0c0561d2014-04-09 18:55:58 -07001556 int max = NSS_STATS_N2H_MAX - NSS_STATS_NODE_MAX;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301557
1558 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1559 if (unlikely(lbuf == NULL)) {
1560 nss_warning("Could not allocate memory for local statistics buffer");
1561 return 0;
1562 }
1563
1564 stats_shadow = kzalloc(NSS_STATS_N2H_MAX * 8, GFP_KERNEL);
1565 if (unlikely(stats_shadow == NULL)) {
1566 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301567 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301568 return 0;
1569 }
1570
1571 size_wr = scnprintf(lbuf, size_al, "n2h stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301572
1573 /*
1574 * Common node stats
1575 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301576 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301577 spin_lock_bh(&nss_top_main.stats_lock);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301578 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1579 stats_shadow[i] = nss_top_main.nss[0].stats_n2h[i];
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301580 }
1581
1582 spin_unlock_bh(&nss_top_main.stats_lock);
1583
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301584 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1585 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1586 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1587 }
1588
1589 /*
1590 * N2H node stats
1591 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301592 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nn2h node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301593 spin_lock_bh(&nss_top_main.stats_lock);
1594 for (i = NSS_STATS_NODE_MAX; (i < NSS_STATS_N2H_MAX); i++) {
1595 stats_shadow[i] = nss_top_main.nss[0].stats_n2h[i];
1596 }
1597
1598 spin_unlock_bh(&nss_top_main.stats_lock);
1599
Murat Sezgin0c0561d2014-04-09 18:55:58 -07001600 for (i = 0; i < max; i++) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301601 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
Murat Sezgin0c0561d2014-04-09 18:55:58 -07001602 "%s = %llu\n", nss_stats_str_n2h[i], stats_shadow[i + NSS_STATS_NODE_MAX]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301603 }
1604
1605 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nn2h stats end\n\n");
1606 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1607 kfree(lbuf);
1608 kfree(stats_shadow);
1609
1610 return bytes_read;
1611}
1612
1613/*
Thomas Wuc3e382c2014-10-29 15:35:13 -07001614 * nss_stats_lso_rx_read()
1615 * Read LSO_RX stats
1616 */
1617static ssize_t nss_stats_lso_rx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1618{
1619 int32_t i;
1620
1621 /*
1622 * max output lines = #stats + start tag line + end tag line + three blank lines
1623 */
1624 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_LSO_RX_MAX + 3) + 5;
1625 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1626 size_t size_wr = 0;
1627 ssize_t bytes_read = 0;
1628 uint64_t *stats_shadow;
1629
1630 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1631 if (unlikely(lbuf == NULL)) {
1632 nss_warning("Could not allocate memory for local statistics buffer");
1633 return 0;
1634 }
1635
1636 stats_shadow = kzalloc(NSS_STATS_LSO_RX_MAX * 8, GFP_KERNEL);
1637 if (unlikely(stats_shadow == NULL)) {
1638 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301639 kfree(lbuf);
Thomas Wuc3e382c2014-10-29 15:35:13 -07001640 return 0;
1641 }
1642
1643 size_wr = scnprintf(lbuf, size_al, "lso_rx stats start:\n\n");
1644
1645 /*
1646 * Common node stats
1647 */
1648 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
1649 spin_lock_bh(&nss_top_main.stats_lock);
1650 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1651 stats_shadow[i] = nss_top_main.stats_node[NSS_LSO_RX_INTERFACE][i];
1652 }
1653
1654 spin_unlock_bh(&nss_top_main.stats_lock);
1655
1656 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1657 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1658 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1659 }
1660
1661 /*
1662 * lso_rx node stats
1663 */
1664 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nlso_rx node stats:\n\n");
1665 spin_lock_bh(&nss_top_main.stats_lock);
1666 for (i = 0; (i < NSS_STATS_LSO_RX_MAX); i++) {
1667 stats_shadow[i] = nss_top_main.stats_lso_rx[i];
1668 }
1669
1670 spin_unlock_bh(&nss_top_main.stats_lock);
1671
1672 for (i = 0; i < NSS_STATS_LSO_RX_MAX; i++) {
1673 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1674 "%s = %llu\n", nss_stats_str_lso_rx[i], stats_shadow[i]);
1675 }
1676
1677 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nlso_rx stats end\n\n");
1678 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1679 kfree(lbuf);
1680 kfree(stats_shadow);
1681
1682 return bytes_read;
1683}
1684
1685/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301686 * nss_stats_drv_read()
1687 * Read HLOS driver stats
1688 */
1689static ssize_t nss_stats_drv_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1690{
1691 int32_t i;
1692
1693 /*
1694 * max output lines = #stats + start tag line + end tag line + three blank lines
1695 */
1696 uint32_t max_output_lines = NSS_STATS_DRV_MAX + 5;
1697 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1698 size_t size_wr = 0;
1699 ssize_t bytes_read = 0;
1700 uint64_t *stats_shadow;
1701
1702 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1703 if (unlikely(lbuf == NULL)) {
1704 nss_warning("Could not allocate memory for local statistics buffer");
1705 return 0;
1706 }
1707
1708 stats_shadow = kzalloc(NSS_STATS_DRV_MAX * 8, GFP_KERNEL);
1709 if (unlikely(stats_shadow == NULL)) {
1710 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301711 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301712 return 0;
1713 }
1714
1715 size_wr = scnprintf(lbuf, size_al, "drv stats start:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301716 for (i = 0; (i < NSS_STATS_DRV_MAX); i++) {
Sundarajan Srinivasan62fee7e2015-01-22 11:13:10 -08001717 stats_shadow[i] = NSS_PKT_STATS_READ(&nss_top_main.stats_drv[i]);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301718 }
1719
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301720 for (i = 0; (i < NSS_STATS_DRV_MAX); i++) {
1721 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1722 "%s = %llu\n", nss_stats_str_drv[i], stats_shadow[i]);
1723 }
1724
1725 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ndrv stats end\n\n");
1726 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1727 kfree(lbuf);
1728 kfree(stats_shadow);
1729
1730 return bytes_read;
1731}
1732
1733/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301734 * nss_stats_pppoe_read()
1735 * Read PPPoE stats
1736 */
1737static ssize_t nss_stats_pppoe_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1738{
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301739 int32_t i, j, k;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301740
1741 /*
1742 * max output lines = #stats + start tag line + end tag line + three blank lines
1743 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301744 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_PPPOE_MAX + 3) +
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +05301745 ((NSS_MAX_PHYSICAL_INTERFACES * NSS_PPPOE_NUM_SESSION_PER_INTERFACE * (NSS_PPPOE_EXCEPTION_EVENT_MAX + 5)) + 3) + 5;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301746 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1747 size_t size_wr = 0;
1748 ssize_t bytes_read = 0;
1749 uint64_t *stats_shadow;
1750
1751 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1752 if (unlikely(lbuf == NULL)) {
1753 nss_warning("Could not allocate memory for local statistics buffer");
1754 return 0;
1755 }
1756
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301757 stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301758 if (unlikely(stats_shadow == NULL)) {
1759 nss_warning("Could not allocate memory for local shadow buffer");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301760 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301761 return 0;
1762 }
1763
1764 size_wr = scnprintf(lbuf, size_al, "pppoe stats start:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301765
1766 /*
1767 * Common node stats
1768 */
Abhishek Rastogia1a07972014-04-01 19:43:33 +05301769 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301770 spin_lock_bh(&nss_top_main.stats_lock);
1771 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1772 stats_shadow[i] = nss_top_main.stats_node[NSS_PPPOE_RX_INTERFACE][i];
1773 }
1774
1775 spin_unlock_bh(&nss_top_main.stats_lock);
1776
1777 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
1778 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1779 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
1780 }
1781
1782 /*
1783 * PPPoE node stats
1784 */
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001785 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npppoe node stats:\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301786 spin_lock_bh(&nss_top_main.stats_lock);
1787 for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
1788 stats_shadow[i] = nss_top_main.stats_pppoe[i];
1789 }
1790
1791 spin_unlock_bh(&nss_top_main.stats_lock);
1792
1793 for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
1794 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1795 "%s = %llu\n", nss_stats_str_pppoe[i], stats_shadow[i]);
1796 }
1797
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301798 /*
1799 * Exception stats
1800 */
1801 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nException PPPoE:\n\n");
1802
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001803 for (j = 1; j <= NSS_MAX_PHYSICAL_INTERFACES; j++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301804 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nInterface %d:\n\n", j);
1805
1806 spin_lock_bh(&nss_top_main.stats_lock);
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001807 for (k = 1; k <= NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +05301808 for (i = 0; (i < NSS_PPPOE_EXCEPTION_EVENT_MAX); i++) {
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001809 stats_shadow_pppoe_except[k - 1][i] = nss_top_main.stats_if_exception_pppoe[j][k][i];
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301810 }
1811 }
1812
1813 spin_unlock_bh(&nss_top_main.stats_lock);
1814
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001815 for (k = 1; k <= NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301816 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. Session\n", k);
Ankit Dhanukaa1569ce2014-05-13 19:58:06 +05301817 for (i = 0; (i < NSS_PPPOE_EXCEPTION_EVENT_MAX); i++) {
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301818 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1819 "%s = %llu\n",
1820 nss_stats_str_if_exception_pppoe[i],
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001821 stats_shadow_pppoe_except[k - 1][i]);
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301822 }
1823 }
1824
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05301825 }
1826
Murat Sezgin2f9241a2015-06-25 13:01:51 -07001827 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npppoe stats end\n\n");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301828 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1829 kfree(lbuf);
1830 kfree(stats_shadow);
1831
1832 return bytes_read;
1833}
1834
1835/*
1836 * nss_stats_gmac_read()
1837 * Read GMAC stats
1838 */
1839static ssize_t nss_stats_gmac_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1840{
1841 uint32_t i, id;
1842
1843 /*
1844 * max output lines = ((#stats + start tag + one blank) * #GMACs) + start/end tag + 3 blank
1845 */
1846 uint32_t max_output_lines = ((NSS_STATS_GMAC_MAX + 2) * NSS_MAX_PHYSICAL_INTERFACES) + 5;
1847 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1848 size_t size_wr = 0;
1849 ssize_t bytes_read = 0;
1850 uint64_t *stats_shadow;
1851
1852 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1853 if (unlikely(lbuf == NULL)) {
1854 nss_warning("Could not allocate memory for local statistics buffer");
1855 return 0;
1856 }
1857
1858 stats_shadow = kzalloc(NSS_STATS_GMAC_MAX * 8, GFP_KERNEL);
1859 if (unlikely(stats_shadow == NULL)) {
1860 nss_warning("Could not allocate memory for local shadow buffer");
Ankit Dhanuka14999992014-11-12 15:35:11 +05301861 kfree(lbuf);
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05301862 return 0;
1863 }
1864
1865 size_wr = scnprintf(lbuf, size_al, "gmac stats start:\n\n");
1866
1867 for (id = 0; id < NSS_MAX_PHYSICAL_INTERFACES; id++) {
1868 spin_lock_bh(&nss_top_main.stats_lock);
1869 for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
1870 stats_shadow[i] = nss_top_main.stats_gmac[id][i];
1871 }
1872
1873 spin_unlock_bh(&nss_top_main.stats_lock);
1874
1875 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "GMAC ID: %d\n", id);
1876 for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
1877 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1878 "%s = %llu\n", nss_stats_str_gmac[i], stats_shadow[i]);
1879 }
1880 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\n");
1881 }
1882
1883 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngmac stats end\n\n");
1884 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1885 kfree(lbuf);
1886 kfree(stats_shadow);
1887
1888 return bytes_read;
1889}
1890
Saurabh Misra09dddeb2014-09-30 16:38:07 -07001891/*
Bharath M Kumarcc666e92014-12-24 19:17:28 +05301892 * nss_stats_wifi_read()
Stephen Wangaed46332016-12-12 17:29:03 -08001893 * Read wifi statistics
Bharath M Kumarcc666e92014-12-24 19:17:28 +05301894 */
1895static ssize_t nss_stats_wifi_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
1896{
1897 uint32_t i, id;
1898
1899 /*
1900 * max output lines = ((#stats + start tag + one blank) * #WIFI RADIOs) + start/end tag + 3 blank
1901 */
1902 uint32_t max_output_lines = ((NSS_STATS_WIFI_MAX + 2) * NSS_MAX_WIFI_RADIO_INTERFACES) + 5;
1903 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1904 size_t size_wr = 0;
1905 ssize_t bytes_read = 0;
1906 uint64_t *stats_shadow;
1907
1908 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1909 if (unlikely(lbuf == NULL)) {
1910 nss_warning("Could not allocate memory for local statistics buffer");
1911 return 0;
1912 }
1913
1914 stats_shadow = kzalloc(NSS_STATS_WIFI_MAX * 8, GFP_KERNEL);
1915 if (unlikely(stats_shadow == NULL)) {
1916 nss_warning("Could not allocate memory for local shadow buffer");
1917 kfree(lbuf);
1918 return 0;
1919 }
1920
1921 size_wr = scnprintf(lbuf, size_al, "wifi stats start:\n\n");
1922
1923 for (id = 0; id < NSS_MAX_WIFI_RADIO_INTERFACES; id++) {
1924 spin_lock_bh(&nss_top_main.stats_lock);
1925 for (i = 0; (i < NSS_STATS_WIFI_MAX); i++) {
1926 stats_shadow[i] = nss_top_main.stats_wifi[id][i];
1927 }
1928
1929 spin_unlock_bh(&nss_top_main.stats_lock);
1930
1931 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "WIFI ID: %d\n", id);
1932 for (i = 0; (i < NSS_STATS_WIFI_MAX); i++) {
1933 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1934 "%s = %llu\n", nss_stats_str_wifi[i], stats_shadow[i]);
1935 }
1936 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\n");
1937 }
1938
1939 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nwifi stats end\n\n");
1940 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
1941 kfree(lbuf);
1942 kfree(stats_shadow);
1943
1944 return bytes_read;
1945}
1946
1947/*
Tushar Mathurff8741b2015-12-02 20:28:59 +05301948 * nss_stats_dtls_read()
Thomas Wu71c5ecc2016-06-21 11:15:52 -07001949 * Read DTLS session statistics
Tushar Mathurff8741b2015-12-02 20:28:59 +05301950 */
1951static ssize_t nss_stats_dtls_read(struct file *fp, char __user *ubuf,
1952 size_t sz, loff_t *ppos)
1953{
1954 uint32_t max_output_lines = 2 + (NSS_MAX_DTLS_SESSIONS
1955 * (NSS_STATS_DTLS_SESSION_MAX + 2)) + 2;
1956 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
1957 size_t size_wr = 0;
1958 ssize_t bytes_read = 0;
1959 struct net_device *dev;
1960 int id, i;
1961 struct nss_stats_dtls_session_debug *dtls_session_stats = NULL;
1962
1963 char *lbuf = kzalloc(size_al, GFP_KERNEL);
1964 if (unlikely(lbuf == NULL)) {
1965 nss_warning("Could not allocate memory for local statistics buffer");
1966 return 0;
1967 }
1968
1969 dtls_session_stats = kzalloc((sizeof(struct nss_stats_dtls_session_debug)
1970 * NSS_MAX_DTLS_SESSIONS), GFP_KERNEL);
1971 if (unlikely(dtls_session_stats == NULL)) {
1972 nss_warning("Could not allocate memory for populating DTLS stats");
1973 kfree(lbuf);
1974 return 0;
1975 }
1976
1977 /*
1978 * Get all stats
1979 */
1980 nss_dtls_session_debug_stats_get(dtls_session_stats);
1981
1982 /*
1983 * Session stats
1984 */
1985 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1986 "\nDTLS session stats start:\n\n");
1987
1988 for (id = 0; id < NSS_MAX_DTLS_SESSIONS; id++) {
1989 if (!dtls_session_stats[id].valid)
1990 break;
1991
1992 dev = dev_get_by_index(&init_net, dtls_session_stats[id].if_index);
1993 if (likely(dev)) {
1994 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
1995 "%d. nss interface id=%d, netdevice=%s\n",
1996 id, dtls_session_stats[id].if_num,
1997 dev->name);
1998 dev_put(dev);
1999 } else {
2000 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2001 "%d. nss interface id=%d\n", id,
2002 dtls_session_stats[id].if_num);
2003 }
2004
2005 for (i = 0; i < NSS_STATS_DTLS_SESSION_MAX; i++) {
2006 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2007 "\t%s = %llu\n",
2008 nss_stats_str_dtls_session_debug_stats[i],
2009 dtls_session_stats[id].stats[i]);
2010 }
2011
2012 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2013 }
2014
2015 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2016 "\nDTLS session stats end\n");
2017 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2018
2019 kfree(dtls_session_stats);
2020 kfree(lbuf);
2021 return bytes_read;
2022}
2023
Tushar Mathurff8741b2015-12-02 20:28:59 +05302024/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002025 * nss_stats_gre_tunnel_read()
2026 * Read GRE Tunnel session statistics
2027 */
2028static ssize_t nss_stats_gre_tunnel_read(struct file *fp, char __user *ubuf,
2029 size_t sz, loff_t *ppos)
2030{
2031 uint32_t max_output_lines = 2 + (NSS_MAX_GRE_TUNNEL_SESSIONS
2032 * (NSS_STATS_GRE_TUNNEL_SESSION_MAX + 2)) + 2;
2033 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2034 size_t size_wr = 0;
2035 ssize_t bytes_read = 0;
2036 struct net_device *dev;
2037 int id, i;
2038 struct nss_stats_gre_tunnel_session_debug *gre_tunnel_session_stats = NULL;
2039
2040 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2041 if (unlikely(lbuf == NULL)) {
2042 nss_warning("Could not allocate memory for local statistics buffer");
2043 return 0;
2044 }
2045
2046 gre_tunnel_session_stats = kzalloc((sizeof(struct nss_stats_gre_tunnel_session_debug)
2047 * NSS_MAX_GRE_TUNNEL_SESSIONS), GFP_KERNEL);
2048 if (unlikely(gre_tunnel_session_stats == NULL)) {
2049 nss_warning("Could not allocate memory for populating GRE Tunnel stats");
2050 kfree(lbuf);
2051 return 0;
2052 }
2053
2054 /*
2055 * Get all stats
2056 */
2057 nss_gre_tunnel_session_debug_stats_get(gre_tunnel_session_stats);
2058
2059 /*
2060 * Session stats
2061 */
2062 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2063 "\nGRE Tunnel session stats start:\n\n");
2064
2065 for (id = 0; id < NSS_MAX_GRE_TUNNEL_SESSIONS; id++) {
2066 if (!gre_tunnel_session_stats[id].valid)
2067 break;
2068
2069 dev = dev_get_by_index(&init_net, gre_tunnel_session_stats[id].if_index);
2070 if (likely(dev)) {
2071 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2072 "%d. nss interface id=%d, netdevice=%s\n",
2073 id, gre_tunnel_session_stats[id].if_num,
2074 dev->name);
2075 dev_put(dev);
2076 } else {
2077 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2078 "%d. nss interface id=%d\n", id,
2079 gre_tunnel_session_stats[id].if_num);
2080 }
2081
2082 for (i = 0; i < NSS_STATS_GRE_TUNNEL_SESSION_MAX; i++) {
2083 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2084 "\t%s = %llu\n",
2085 nss_stats_str_gre_tunnel_session_debug_stats[i],
2086 gre_tunnel_session_stats[id].stats[i]);
2087 }
2088
2089 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2090 }
2091
2092 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2093 "\nGRE Tunnel session stats end\n");
2094 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2095
2096 kfree(gre_tunnel_session_stats);
2097 kfree(lbuf);
2098 return bytes_read;
2099}
2100
2101/*
ratheesh kannoth7af985d2015-06-24 15:08:40 +05302102 * nss_stats_l2tpv2_read()
2103 * Read l2tpv2 statistics
2104 */
2105static ssize_t nss_stats_l2tpv2_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2106{
2107
2108 uint32_t max_output_lines = 2 /* header & footer for session stats */
2109 + NSS_MAX_L2TPV2_DYNAMIC_INTERFACES * (NSS_STATS_L2TPV2_SESSION_MAX + 2) /*session stats */
2110 + 2;
2111 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines ;
2112 size_t size_wr = 0;
2113 ssize_t bytes_read = 0;
2114 struct net_device *dev;
2115 struct nss_stats_l2tpv2_session_debug l2tpv2_session_stats[NSS_MAX_L2TPV2_DYNAMIC_INTERFACES];
2116 int id, i;
2117
2118 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2119 if (unlikely(lbuf == NULL)) {
2120 nss_warning("Could not allocate memory for local statistics buffer");
2121 return 0;
2122 }
2123
2124 memset(&l2tpv2_session_stats, 0, sizeof(struct nss_stats_l2tpv2_session_debug) * NSS_MAX_L2TPV2_DYNAMIC_INTERFACES);
2125
2126 /*
2127 * Get all stats
2128 */
2129 nss_l2tpv2_session_debug_stats_get((void *)&l2tpv2_session_stats);
2130
2131 /*
2132 * Session stats
2133 */
2134 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nl2tp v2 session stats start:\n\n");
2135 for (id = 0; id < NSS_MAX_L2TPV2_DYNAMIC_INTERFACES; id++) {
2136
2137 if (!l2tpv2_session_stats[id].valid) {
2138 break;
2139 }
2140
2141 dev = dev_get_by_index(&init_net, l2tpv2_session_stats[id].if_index);
2142 if (likely(dev)) {
2143
2144 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2145 l2tpv2_session_stats[id].if_num, dev->name);
2146 dev_put(dev);
2147 } else {
2148 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2149 l2tpv2_session_stats[id].if_num);
2150 }
2151
2152 for (i = 0; i < NSS_STATS_L2TPV2_SESSION_MAX; i++) {
2153 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2154 "\t%s = %llu\n", nss_stats_str_l2tpv2_session_debug_stats[i],
2155 l2tpv2_session_stats[id].stats[i]);
2156 }
2157 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2158 }
2159
2160 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nl2tp v2 session stats end\n");
2161 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2162
2163 kfree(lbuf);
2164 return bytes_read;
2165}
2166
2167/*
ratheesh kannotha1245c32015-11-04 16:45:43 +05302168 * nss_stats_map_t_read()
2169 * Read map_t statistics
2170 */
2171static ssize_t nss_stats_map_t_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2172{
2173
2174 uint32_t max_output_lines = 2 /* header & footer for instance stats */
2175 + NSS_MAX_MAP_T_DYNAMIC_INTERFACES * (NSS_STATS_MAP_T_MAX + 2) /*instance stats */
2176 + 2;
2177 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2178 size_t size_wr = 0;
2179 ssize_t bytes_read = 0;
2180 struct net_device *dev;
2181 struct nss_stats_map_t_instance_debug map_t_instance_stats[NSS_MAX_MAP_T_DYNAMIC_INTERFACES];
2182 int id, i;
2183
2184 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2185 if (unlikely(!lbuf)) {
2186 nss_warning("Could not allocate memory for local statistics buffer");
2187 return 0;
2188 }
2189
2190 memset(&map_t_instance_stats, 0, sizeof(struct nss_stats_map_t_instance_debug) * NSS_MAX_MAP_T_DYNAMIC_INTERFACES);
2191
2192 /*
2193 * Get all stats
2194 */
2195 nss_map_t_instance_debug_stats_get((void *)&map_t_instance_stats);
2196
2197 /*
2198 * Session stats
2199 */
2200 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nmap_t instance stats start:\n\n");
2201 for (id = 0; id < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; id++) {
2202
2203 if (!map_t_instance_stats[id].valid) {
2204 break;
2205 }
2206
2207 dev = dev_get_by_index(&init_net, map_t_instance_stats[id].if_index);
2208 if (likely(dev)) {
2209
2210 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2211 map_t_instance_stats[id].if_num, dev->name);
2212 dev_put(dev);
2213 } else {
2214 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2215 map_t_instance_stats[id].if_num);
2216 }
2217
2218 for (i = 0; i < NSS_STATS_MAP_T_MAX; i++) {
2219 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2220 "\t%s = %llu\n", nss_stats_str_map_t_instance_debug_stats[i],
2221 map_t_instance_stats[id].stats[i]);
2222 }
2223 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2224 }
2225
2226 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nmap_t instance stats end\n");
2227 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2228
2229 kfree(lbuf);
2230 return bytes_read;
2231}
2232
2233/*
Amit Gupta316729b2016-08-12 12:21:15 +05302234 * nss_stats_ppe_conn_read()
2235 * Read ppe connection stats
2236 */
2237static ssize_t nss_stats_ppe_conn_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2238{
2239
2240 int i;
2241 char *lbuf = NULL;
2242 size_t size_wr = 0;
2243 ssize_t bytes_read = 0;
2244 uint32_t ppe_stats[NSS_STATS_PPE_CONN_MAX];
2245 uint32_t max_output_lines = 2 /* header & footer for session stats */
2246 + NSS_STATS_PPE_CONN_MAX /* PPE flow counters */
2247 + 2;
2248 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2249
2250
2251 lbuf = kzalloc(size_al, GFP_KERNEL);
2252 if (unlikely(lbuf == NULL)) {
2253 nss_warning("Could not allocate memory for local statistics buffer");
2254 return 0;
2255 }
2256
2257 memset(&ppe_stats, 0, sizeof(uint32_t) * NSS_STATS_PPE_CONN_MAX);
2258
2259 /*
2260 * Get all stats
2261 */
2262 nss_ppe_stats_conn_get(ppe_stats);
2263
2264 /*
2265 * flow stats
2266 */
2267 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe flow counters start:\n\n");
2268
2269 for (i = 0; i < NSS_STATS_PPE_CONN_MAX; i++) {
2270 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2271 "\t%s = %u\n", nss_stats_str_ppe_conn[i],
2272 ppe_stats[i]);
2273 }
2274
2275 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2276
2277 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe flow counters end\n");
2278 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2279
2280 kfree(lbuf);
2281 return bytes_read;
2282}
2283
2284/*
2285 * nss_stats_ppe_l3_read()
2286 * Read ppe L3 debug stats
2287 */
2288static ssize_t nss_stats_ppe_l3_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2289{
2290
2291 int i;
2292 char *lbuf = NULL;
2293 size_t size_wr = 0;
2294 ssize_t bytes_read = 0;
2295 uint32_t ppe_stats[NSS_STATS_PPE_L3_MAX];
2296 uint32_t max_output_lines = 2 /* header & footer for session stats */
2297 + NSS_STATS_PPE_L3_MAX /* PPE flow counters */
2298 + 2;
2299 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2300
2301 lbuf = kzalloc(size_al, GFP_KERNEL);
2302 if (unlikely(!lbuf)) {
2303 nss_warning("Could not allocate memory for local statistics buffer");
2304 return 0;
2305 }
2306
2307 memset(ppe_stats, 0, sizeof(uint32_t) * NSS_STATS_PPE_L3_MAX);
2308
2309 /*
2310 * Get all stats
2311 */
2312 nss_ppe_stats_l3_get(ppe_stats);
2313
2314 /*
2315 * flow stats
2316 */
2317 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe l3 debug stats start:\n\n");
2318
2319 for (i = 0; i < NSS_STATS_PPE_L3_MAX; i++) {
2320 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2321 "\t%s = 0x%x\n", nss_stats_str_ppe_l3[i],
2322 ppe_stats[i]);
2323 }
2324
2325 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2326
2327 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe l3 debug stats end\n");
2328 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2329
2330 kfree(lbuf);
2331 return bytes_read;
2332}
2333
2334/*
2335 * nss_stats_ppe_code_read()
2336 * Read ppe CPU & DROP code
2337 */
2338static ssize_t nss_stats_ppe_code_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2339{
2340
2341 int i;
2342 char *lbuf = NULL;
2343 size_t size_wr = 0;
2344 ssize_t bytes_read = 0;
2345 uint32_t ppe_stats[NSS_STATS_PPE_CODE_MAX];
2346 uint32_t max_output_lines = 2 /* header & footer for session stats */
2347 + NSS_STATS_PPE_CODE_MAX /* PPE flow counters */
2348 + 2;
2349 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2350
2351 lbuf = kzalloc(size_al, GFP_KERNEL);
2352 if (unlikely(!lbuf)) {
2353 nss_warning("Could not allocate memory for local statistics buffer");
2354 return 0;
2355 }
2356
2357 memset(ppe_stats, 0, sizeof(uint32_t) * NSS_STATS_PPE_CODE_MAX);
2358
2359 /*
2360 * Get all stats
2361 */
2362 nss_ppe_stats_code_get(ppe_stats);
2363
2364 /*
2365 * flow stats
2366 */
2367 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe session stats start:\n\n");
2368
2369 for (i = 0; i < NSS_STATS_PPE_CODE_MAX; i++) {
2370 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2371 "\t%s = %u\n", nss_stats_str_ppe_code[i],
2372 ppe_stats[i]);
2373 }
2374
2375 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2376
2377 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nppe session stats end\n");
2378 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2379
2380 kfree(lbuf);
2381 return bytes_read;
2382}
2383
2384/*
Shyam Sunder66e889d2015-11-02 15:31:20 +05302385 * nss_stats_pptp_read()
2386 * Read pptp statistics
2387 */
2388static ssize_t nss_stats_pptp_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2389{
2390
2391 uint32_t max_output_lines = 2 /* header & footer for session stats */
2392 + NSS_MAX_PPTP_DYNAMIC_INTERFACES * (NSS_STATS_PPTP_SESSION_MAX + 2) /*session stats */
2393 + 2;
2394 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines ;
2395 size_t size_wr = 0;
2396 ssize_t bytes_read = 0;
2397 struct net_device *dev;
2398 struct nss_stats_pptp_session_debug pptp_session_stats[NSS_MAX_PPTP_DYNAMIC_INTERFACES];
2399 int id, i;
2400
2401 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2402 if (unlikely(lbuf == NULL)) {
2403 nss_warning("Could not allocate memory for local statistics buffer");
2404 return 0;
2405 }
2406
2407 memset(&pptp_session_stats, 0, sizeof(struct nss_stats_pptp_session_debug) * NSS_MAX_PPTP_DYNAMIC_INTERFACES);
2408
2409 /*
2410 * Get all stats
2411 */
2412 nss_pptp_session_debug_stats_get((void *)&pptp_session_stats);
2413
2414 /*
2415 * Session stats
2416 */
2417 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npptp session stats start:\n\n");
2418 for (id = 0; id < NSS_MAX_PPTP_DYNAMIC_INTERFACES; id++) {
2419
2420 if (!pptp_session_stats[id].valid) {
2421 break;
2422 }
2423
2424 dev = dev_get_by_index(&init_net, pptp_session_stats[id].if_index);
2425 if (likely(dev)) {
2426
2427 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id,
2428 pptp_session_stats[id].if_num, dev->name);
2429 dev_put(dev);
2430 } else {
2431 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id,
2432 pptp_session_stats[id].if_num);
2433 }
2434
2435 for (i = 0; i < NSS_STATS_PPTP_SESSION_MAX; i++) {
2436 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2437 "\t%s = %llu\n", nss_stats_str_pptp_session_debug_stats[i],
2438 pptp_session_stats[id].stats[i]);
2439 }
2440 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
2441 }
2442
2443 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npptp session stats end\n");
2444 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr);
2445
2446 kfree(lbuf);
2447 return bytes_read;
2448}
2449
2450/*
Ankit Dhanuka14999992014-11-12 15:35:11 +05302451 * nss_stats_sjack_read()
2452 * Read SJACK stats
2453 */
2454static ssize_t nss_stats_sjack_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2455{
2456 int32_t i;
2457 /*
2458 * max output lines = #stats + start tag line + end tag line + three blank lines
2459 */
2460 uint32_t max_output_lines = NSS_STATS_NODE_MAX + 5;
2461 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2462 size_t size_wr = 0;
2463 ssize_t bytes_read = 0;
2464 uint64_t *stats_shadow;
2465
2466 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2467 if (unlikely(lbuf == NULL)) {
2468 nss_warning("Could not allocate memory for local statistics buffer");
2469 return 0;
2470 }
2471
2472 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
2473 if (unlikely(stats_shadow == NULL)) {
2474 nss_warning("Could not allocate memory for local shadow buffer");
2475 kfree(lbuf);
2476 return 0;
2477 }
2478
2479 size_wr = scnprintf(lbuf, size_al, "sjack stats start:\n\n");
2480
2481 /*
2482 * Common node stats
2483 */
2484 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
2485 spin_lock_bh(&nss_top_main.stats_lock);
2486 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2487 stats_shadow[i] = nss_top_main.stats_node[NSS_SJACK_INTERFACE][i];
2488 }
2489
2490 spin_unlock_bh(&nss_top_main.stats_lock);
2491
2492 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2493 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2494 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
2495 }
2496
2497 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nsjack stats end\n\n");
2498
2499 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
2500 kfree(lbuf);
2501 kfree(stats_shadow);
2502
2503 return bytes_read;
2504}
2505
2506/*
Stephen Wang9779d952015-10-28 11:39:07 -07002507 * nss_stats_portid_read()
2508 * Read PortID stats
2509 */
2510static ssize_t nss_stats_portid_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2511{
2512 int32_t i;
2513 /*
2514 * max output lines = #stats + start tag line + end tag line + three blank lines
2515 */
2516 uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_STATS_PORTID_MAX + 5;
2517 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
2518 size_t size_wr = 0;
2519 ssize_t bytes_read = 0;
2520 uint64_t *stats_shadow;
2521
2522 char *lbuf = kzalloc(size_al, GFP_KERNEL);
2523 if (unlikely(lbuf == NULL)) {
2524 nss_warning("Could not allocate memory for local statistics buffer");
2525 return 0;
2526 }
2527
2528 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
2529 if (unlikely(stats_shadow == NULL)) {
2530 nss_warning("Could not allocate memory for local shadow buffer");
2531 kfree(lbuf);
2532 return 0;
2533 }
2534
2535 size_wr = scnprintf(lbuf, size_al, "portid stats start:\n\n");
2536
2537 /*
2538 * Common node stats
2539 */
2540 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
2541 spin_lock_bh(&nss_top_main.stats_lock);
2542 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2543 stats_shadow[i] = nss_top_main.stats_node[NSS_PORTID_INTERFACE][i];
2544 }
2545
2546 spin_unlock_bh(&nss_top_main.stats_lock);
2547
2548 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
2549 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2550 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
2551 }
2552
2553 /*
2554 * PortID node stats
2555 */
2556 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nportid node stats:\n\n");
2557
2558 spin_lock_bh(&nss_top_main.stats_lock);
2559 for (i = 0; (i < NSS_STATS_PORTID_MAX); i++) {
2560 stats_shadow[i] = nss_top_main.stats_portid[i];
2561 }
2562
2563 spin_unlock_bh(&nss_top_main.stats_lock);
2564
2565 for (i = 0; (i < NSS_STATS_PORTID_MAX); i++) {
2566 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
2567 "%s = %llu\n", nss_stats_str_portid[i], stats_shadow[i]);
2568 }
2569
2570 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nportid stats end\n\n");
2571
2572 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
2573 kfree(lbuf);
2574 kfree(stats_shadow);
2575
2576 return bytes_read;
2577}
2578
2579/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002580 * nss_stats_capwap_encap()
2581 * Make a row for CAPWAP encap stats.
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002582 */
2583static ssize_t nss_stats_capwap_encap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s)
2584{
Saurabh Misra3f66e872015-04-03 11:30:42 -07002585 char *header[] = { "packets", "bytes", "fragments", "drop_ref", "drop_ver", "drop_unalign",
2586 "drop_hroom", "drop_dtls", "drop_nwireless", "drop_qfull", "drop_memfail", "unknown" };
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002587 uint64_t tcnt = 0;
2588
2589 switch (i) {
2590 case 0:
2591 tcnt = s->pnode_stats.tx_packets;
2592 break;
2593 case 1:
2594 tcnt = s->pnode_stats.tx_bytes;
2595 break;
2596 case 2:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002597 tcnt = s->tx_segments;
2598 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002599 case 3:
2600 tcnt = s->tx_dropped_sg_ref;
2601 break;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002602 case 4:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002603 tcnt = s->tx_dropped_ver_mis;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002604 break;
2605 case 5:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002606 tcnt = s->tx_dropped_unalign;
2607 break;
2608 case 6:
2609 tcnt = s->tx_dropped_hroom;
2610 break;
2611 case 7:
2612 tcnt = s->tx_dropped_dtls;
2613 break;
2614 case 8:
2615 tcnt = s->tx_dropped_nwireless;
2616 break;
2617 case 9:
2618 tcnt = s->tx_queue_full_drops;
2619 break;
2620 case 10:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002621 tcnt = s->tx_mem_failure_drops;
2622 break;
2623 default:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002624 return 0;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002625 }
2626
Saurabh Misra3f66e872015-04-03 11:30:42 -07002627 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002628}
2629
2630/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002631 * nss_stats_capwap_decap()
2632 * Make a row for CAPWAP decap stats.
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002633 */
2634static ssize_t nss_stats_capwap_decap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s)
2635{
Saurabh Misra3f66e872015-04-03 11:30:42 -07002636 char *header[] = { "packets", "bytes", "DTLS_pkts", "fragments", "rx_dropped", "drop_oversize",
2637 "drop_frag_timeout", "drop_frag_dup", "drop_frag_gap", "drop_qfull", "drop_memfail",
2638 "drop_csum", "drop_malformed", "unknown" };
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002639 uint64_t tcnt = 0;
2640
2641 switch(i) {
2642 case 0:
2643 tcnt = s->pnode_stats.rx_packets;
2644 break;
2645 case 1:
2646 tcnt = s->pnode_stats.rx_bytes;
2647 break;
2648 case 2:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002649 tcnt = s->dtls_pkts;
2650 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002651 case 3:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002652 tcnt = s->rx_segments;
2653 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002654 case 4:
2655 tcnt = s->pnode_stats.rx_dropped;
2656 break;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002657 case 5:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002658 tcnt = s->rx_oversize_drops;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002659 break;
2660 case 6:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002661 tcnt = s->rx_frag_timeout_drops;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002662 break;
2663 case 7:
2664 tcnt = s->rx_dup_frag;
2665 break;
2666 case 8:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002667 tcnt = s->rx_frag_gap_drops;
2668 break;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002669 case 9:
Saurabh Misra3f66e872015-04-03 11:30:42 -07002670 tcnt = s->rx_queue_full_drops;
2671 return (snprintf(line, len, "%s = %llu (n2h = %llu)\n", header[i], tcnt, s->rx_n2h_queue_full_drops));
2672 case 10:
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002673 tcnt = s->rx_mem_failure_drops;
2674 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002675 case 11:
2676 tcnt = s->rx_csum_drops;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002677 break;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002678 case 12:
2679 tcnt = s->rx_malformed;
2680 break;
2681 default:
2682 return 0;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002683 }
2684
Saurabh Misra3f66e872015-04-03 11:30:42 -07002685 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002686}
2687
2688/*
2689 * nss_stats_capwap_read()
2690 * Read CAPWAP stats
2691 */
2692static ssize_t nss_stats_capwap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos, uint16_t type)
2693{
2694 struct nss_stats_data *data = fp->private_data;
2695 ssize_t bytes_read = 0;
2696 struct nss_capwap_tunnel_stats stats;
2697 size_t bytes;
2698 char line[80];
Saurabh Misra3f66e872015-04-03 11:30:42 -07002699 int start;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002700 uint32_t if_num = NSS_DYNAMIC_IF_START;
2701 uint32_t max_if_num = NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES;
2702
2703 if (data) {
2704 if_num = data->if_num;
2705 }
2706
2707 /*
2708 * If we are done accomodating all the CAPWAP tunnels.
2709 */
2710 if (if_num > max_if_num) {
2711 return 0;
2712 }
2713
2714 for (; if_num <= max_if_num; if_num++) {
2715 bool isthere;
2716
2717 if (nss_is_dynamic_interface(if_num) == false) {
2718 continue;
2719 }
2720
2721 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP) {
2722 continue;
2723 }
2724
2725 /*
2726 * If CAPWAP tunnel does not exists, then isthere will be false.
2727 */
2728 isthere = nss_capwap_get_stats(if_num, &stats);
2729 if (!isthere) {
2730 continue;
2731 }
2732
Saurabh Misra3f66e872015-04-03 11:30:42 -07002733 bytes = snprintf(line, sizeof(line), "----if_num : %2d----\n", if_num);
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002734 if ((bytes_read + bytes) > sz) {
2735 break;
2736 }
2737
2738 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
2739 bytes_read = -EFAULT;
2740 goto fail;
2741 }
2742 bytes_read += bytes;
2743 start = 0;
Saurabh Misra3f66e872015-04-03 11:30:42 -07002744 while (bytes_read < sz) {
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002745 if (type == 1) {
2746 bytes = nss_stats_capwap_encap(line, sizeof(line), start, &stats);
2747 } else {
2748 bytes = nss_stats_capwap_decap(line, sizeof(line), start, &stats);
2749 }
2750
Saurabh Misra3f66e872015-04-03 11:30:42 -07002751 /*
2752 * If we don't have any more lines in decap/encap.
2753 */
2754 if (bytes == 0) {
2755 break;
2756 }
2757
Saurabh Misra09dddeb2014-09-30 16:38:07 -07002758 if ((bytes_read + bytes) > sz)
2759 break;
2760
2761 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
2762 bytes_read = -EFAULT;
2763 goto fail;
2764 }
2765
2766 bytes_read += bytes;
2767 start++;
2768 }
2769 }
2770
2771 if (bytes_read > 0) {
2772 *ppos = bytes_read;
2773 }
2774
2775 if (data) {
2776 data->if_num = if_num;
2777 }
2778fail:
2779 return bytes_read;
2780}
2781
2782/*
2783 * nss_stats_capwap_decap_read()
2784 * Read CAPWAP decap stats
2785 */
2786static ssize_t nss_stats_capwap_decap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2787{
2788 return (nss_stats_capwap_read(fp, ubuf, sz, ppos, 0));
2789}
2790
2791/*
2792 * nss_stats_capwap_encap_read()
2793 * Read CAPWAP encap stats
2794 */
2795static ssize_t nss_stats_capwap_encap_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2796{
2797 return (nss_stats_capwap_read(fp, ubuf, sz, ppos, 1));
2798}
2799
2800/*
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05302801 * nss_stats_gre_redir()
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002802 * Make a row for GRE_REDIR stats.
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05302803 */
2804static ssize_t nss_stats_gre_redir(char *line, int len, int i, struct nss_gre_redir_tunnel_stats *s)
2805{
2806 char *header[] = { "TX Packets", "TX Bytes", "TX Drops", "RX Packets", "RX Bytes", "Rx Drops" };
2807 uint64_t tcnt = 0;
2808
2809 switch (i) {
2810 case 0:
2811 tcnt = s->node_stats.tx_packets;
2812 break;
2813 case 1:
2814 tcnt = s->node_stats.tx_bytes;
2815 break;
2816 case 2:
2817 tcnt = s->tx_dropped;
2818 break;
2819 case 3:
2820 tcnt = s->node_stats.rx_packets;
2821 break;
2822 case 4:
2823 tcnt = s->node_stats.rx_bytes;
2824 break;
2825 case 5:
2826 tcnt = s->node_stats.rx_dropped;
2827 break;
2828 default:
Radha krishna Simha Jigurudf53f022015-11-09 12:31:26 +05302829 return 0;
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05302830 }
2831
2832 return (snprintf(line, len, "%s = %llu\n", header[i], tcnt));
2833}
2834
2835/*
2836 * nss_stats_gre_redir_read()
Thomas Wu71c5ecc2016-06-21 11:15:52 -07002837 * READ gre_redir tunnel stats.
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05302838 */
2839static ssize_t nss_stats_gre_redir_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2840{
2841 struct nss_stats_data *data = fp->private_data;
2842 ssize_t bytes_read = 0;
2843 struct nss_gre_redir_tunnel_stats stats;
2844 size_t bytes;
2845 char line[80];
2846 int start, end;
2847 int index = 0;
2848
2849 if (data) {
2850 index = data->index;
2851 }
2852
2853 /*
2854 * If we are done accomodating all the GRE_REDIR tunnels.
2855 */
2856 if (index >= NSS_GRE_REDIR_MAX_INTERFACES) {
2857 return 0;
2858 }
2859
2860 for (; index < NSS_GRE_REDIR_MAX_INTERFACES; index++) {
2861 bool isthere;
2862
2863 /*
2864 * If gre_redir tunnel does not exists, then isthere will be false.
2865 */
2866 isthere = nss_gre_redir_get_stats(index, &stats);
2867 if (!isthere) {
2868 continue;
2869 }
2870
2871 bytes = snprintf(line, sizeof(line), "\nTunnel if_num: %2d\n", stats.if_num);
2872 if ((bytes_read + bytes) > sz) {
2873 break;
2874 }
2875
2876 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
2877 bytes_read = -EFAULT;
2878 goto fail;
2879 }
2880 bytes_read += bytes;
2881 start = 0;
2882 end = 6;
2883 while (bytes_read < sz && start < end) {
2884 bytes = nss_stats_gre_redir(line, sizeof(line), start, &stats);
2885
2886 if ((bytes_read + bytes) > sz)
2887 break;
2888
2889 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
2890 bytes_read = -EFAULT;
2891 goto fail;
2892 }
2893
2894 bytes_read += bytes;
2895 start++;
2896 }
2897 }
2898
2899 if (bytes_read > 0) {
2900 *ppos = bytes_read;
2901 }
2902
2903 if (data) {
2904 data->index = index;
2905 }
2906
2907fail:
2908 return bytes_read;
2909}
2910
2911/*
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -08002912 * nss_stats_wifi_if_read()
2913 * Read wifi_if statistics
2914 */
2915static ssize_t nss_stats_wifi_if_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
2916{
2917 struct nss_stats_data *data = fp->private_data;
2918 int32_t if_num = NSS_DYNAMIC_IF_START;
2919 int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES;
2920 size_t bytes = 0;
2921 ssize_t bytes_read = 0;
2922 char line[80];
2923 int start, end;
2924
2925 if (data) {
2926 if_num = data->if_num;
2927 }
2928
2929 if (if_num > max_if_num) {
2930 return 0;
2931 }
2932
2933 for (; if_num < max_if_num; if_num++) {
2934 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_WIFI)
2935 continue;
2936
2937 bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num);
2938 if ((bytes_read + bytes) > sz)
2939 break;
2940
2941 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
2942 bytes_read = -EFAULT;
2943 goto end;
2944 }
2945
2946 bytes_read += bytes;
2947
2948 start = 0;
2949 end = 7;
2950 while (bytes_read < sz && start < end) {
2951 bytes = nss_wifi_if_copy_stats(if_num, start, line);
2952 if (!bytes)
2953 break;
2954
2955 if ((bytes_read + bytes) > sz)
2956 break;
2957
2958 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
2959 bytes_read = -EFAULT;
2960 goto end;
2961 }
2962
2963 bytes_read += bytes;
2964 start++;
2965 }
2966
2967 bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num);
2968 if (bytes_read > (sz - bytes))
2969 break;
2970
2971 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
2972 bytes_read = -EFAULT;
2973 goto end;
2974 }
2975
2976 bytes_read += bytes;
2977 }
2978
2979 if (bytes_read > 0) {
2980 *ppos = bytes_read;
2981 }
2982
2983 if (data) {
2984 data->if_num = if_num;
2985 }
2986
2987end:
2988 return bytes_read;
2989}
2990
2991/*
Sundarajan Srinivasanab2c8562015-06-09 16:14:10 -07002992 * nss_stats_virt_if_read()
2993 * Read virt_if statistics
2994 */
2995static ssize_t nss_stats_virt_if_read(struct file *fp, char __user *ubuf,
2996 size_t sz, loff_t *ppos)
2997{
2998 struct nss_stats_data *data = fp->private_data;
2999 int32_t if_num = NSS_DYNAMIC_IF_START;
3000 int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES;
3001 size_t bytes = 0;
3002 ssize_t bytes_read = 0;
3003 char line[80];
3004 int start, end;
3005
3006 if (data) {
3007 if_num = data->if_num;
3008 }
3009
3010 if (if_num > max_if_num) {
3011 return 0;
3012 }
3013
3014 for (; if_num < max_if_num; if_num++) {
3015 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_802_3_REDIR)
3016 continue;
3017
3018 bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num);
3019 if ((bytes_read + bytes) > sz)
3020 break;
3021
3022 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3023 bytes_read = -EFAULT;
3024 goto end;
3025 }
3026
3027 bytes_read += bytes;
3028
3029 start = 0;
3030 end = 7;
3031 while (bytes_read < sz && start < end) {
3032 bytes = nss_virt_if_copy_stats(if_num, start, line);
3033 if (!bytes)
3034 break;
3035
3036 if ((bytes_read + bytes) > sz)
3037 break;
3038
3039 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3040 bytes_read = -EFAULT;
3041 goto end;
3042 }
3043
3044 bytes_read += bytes;
3045 start++;
3046 }
3047
3048 bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num);
3049 if (bytes_read > (sz - bytes))
3050 break;
3051
3052 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3053 bytes_read = -EFAULT;
3054 goto end;
3055 }
3056
3057 bytes_read += bytes;
3058 }
3059
3060 if (bytes_read > 0) {
3061 *ppos = bytes_read;
3062 }
3063
3064 if (data) {
3065 data->if_num = if_num;
3066 }
3067
3068end:
3069 return bytes_read;
3070}
3071
3072/*
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -07003073 * nss_stats_tx_rx_virt_if_read()
3074 * Read tx_rx_virt_if statistics
3075 */
3076static ssize_t nss_stats_tx_rx_virt_if_read(struct file *fp, char __user *ubuf,
3077 size_t sz, loff_t *ppos)
3078{
3079 struct nss_stats_data *data = fp->private_data;
3080 int32_t if_num = NSS_DYNAMIC_IF_START;
3081 int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES;
3082 size_t bytes = 0;
3083 ssize_t bytes_read = 0;
3084 char line[80];
3085 int start, end;
3086
3087 if (data) {
3088 if_num = data->if_num;
3089 }
3090
3091 if (if_num > max_if_num) {
3092 return 0;
3093 }
3094
3095 for (; if_num < max_if_num; if_num++) {
3096 if (nss_dynamic_interface_get_type(if_num) != NSS_DYNAMIC_INTERFACE_TYPE_VIRTIF_DEPRECATED)
3097 continue;
3098
3099 bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num);
3100 if ((bytes_read + bytes) > sz)
3101 break;
3102
3103 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3104 bytes_read = -EFAULT;
3105 goto end;
3106 }
3107
3108 bytes_read += bytes;
3109
3110 start = 0;
3111 end = 7;
3112 while (bytes_read < sz && start < end) {
3113 bytes = nss_tx_rx_virt_if_copy_stats(if_num, start, line);
3114 if (!bytes)
3115 break;
3116
3117 if ((bytes_read + bytes) > sz)
3118 break;
3119
3120 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3121 bytes_read = -EFAULT;
3122 goto end;
3123 }
3124
3125 bytes_read += bytes;
3126 start++;
3127 }
3128
3129 bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num);
3130 if (bytes_read > (sz - bytes))
3131 break;
3132
3133 if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) {
3134 bytes_read = -EFAULT;
3135 goto end;
3136 }
3137
3138 bytes_read += bytes;
3139 }
3140
3141 if (bytes_read > 0) {
3142 *ppos = bytes_read;
3143 }
3144
3145 if (data) {
3146 data->if_num = if_num;
3147 }
3148
3149end:
3150 return bytes_read;
3151}
3152
3153/*
Stephen Wangec5a85c2016-09-08 23:32:27 -07003154 * nss_stats_trustsec_tx_read()
3155 * Read trustsec_tx stats
3156 */
3157static ssize_t nss_stats_trustsec_tx_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
3158{
3159 int32_t i;
3160
3161 /*
3162 * max output lines = #stats + start tag line + end tag line + three blank lines
3163 */
3164 uint32_t max_output_lines = (NSS_STATS_NODE_MAX + 2) + (NSS_STATS_TRUSTSEC_TX_MAX + 3) + 5;
3165 size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
3166 size_t size_wr = 0;
3167 ssize_t bytes_read = 0;
3168 uint64_t *stats_shadow;
3169
3170 char *lbuf = kzalloc(size_al, GFP_KERNEL);
3171 if (unlikely(lbuf == NULL)) {
3172 nss_warning("Could not allocate memory for local statistics buffer");
3173 return 0;
3174 }
3175
3176 stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL);
3177 if (unlikely(stats_shadow == NULL)) {
3178 nss_warning("Could not allocate memory for local shadow buffer");
3179 kfree(lbuf);
3180 return 0;
3181 }
3182
3183 size_wr = scnprintf(lbuf, size_al, "trustsec_tx stats start:\n\n");
3184
3185 /*
3186 * Common node stats
3187 */
3188 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common node stats:\n\n");
3189 spin_lock_bh(&nss_top_main.stats_lock);
3190 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
3191 stats_shadow[i] = nss_top_main.stats_node[NSS_TRUSTSEC_TX_INTERFACE][i];
3192 }
3193
3194 spin_unlock_bh(&nss_top_main.stats_lock);
3195
3196 for (i = 0; (i < NSS_STATS_NODE_MAX); i++) {
3197 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
3198 "%s = %llu\n", nss_stats_str_node[i], stats_shadow[i]);
3199 }
3200
3201 /*
3202 * TrustSec TX node stats
3203 */
3204 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ntrustsec tx node stats:\n\n");
3205
3206 spin_lock_bh(&nss_top_main.stats_lock);
3207 for (i = 0; (i < NSS_STATS_TRUSTSEC_TX_MAX); i++) {
3208 stats_shadow[i] = nss_top_main.stats_trustsec_tx[i];
3209 }
3210
3211 spin_unlock_bh(&nss_top_main.stats_lock);
3212
3213 for (i = 0; (i < NSS_STATS_TRUSTSEC_TX_MAX); i++) {
3214 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
3215 "%s = %llu\n", nss_stats_str_trustsec_tx[i], stats_shadow[i]);
3216 }
3217
3218 size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ntrustsec tx stats end\n\n");
3219 bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
3220 kfree(lbuf);
3221 kfree(stats_shadow);
3222
3223 return bytes_read;
3224}
3225
3226/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003227 * nss_stats_open()
3228 */
3229static int nss_stats_open(struct inode *inode, struct file *filp)
3230{
3231 struct nss_stats_data *data = NULL;
3232
3233 data = kzalloc(sizeof(struct nss_stats_data), GFP_KERNEL);
3234 if (!data) {
3235 return -ENOMEM;
3236 }
3237 memset(data, 0, sizeof (struct nss_stats_data));
3238 data->if_num = NSS_DYNAMIC_IF_START;
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303239 data->index = 0;
Stephen Wangaed46332016-12-12 17:29:03 -08003240 data->edma_id = (nss_ptr_t)inode->i_private;
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003241 filp->private_data = data;
3242
3243 return 0;
3244}
3245
3246/*
3247 * nss_stats_release()
3248 */
3249static int nss_stats_release(struct inode *inode, struct file *filp)
3250{
3251 struct nss_stats_data *data = filp->private_data;
3252
3253 if (data) {
3254 kfree(data);
3255 }
3256
3257 return 0;
3258}
3259
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303260#define NSS_STATS_DECLARE_FILE_OPERATIONS(name) \
3261static const struct file_operations nss_stats_##name##_ops = { \
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003262 .open = nss_stats_open, \
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303263 .read = nss_stats_##name##_read, \
3264 .llseek = generic_file_llseek, \
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003265 .release = nss_stats_release, \
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303266};
3267
3268/*
3269 * nss_ipv4_stats_ops
3270 */
3271NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4)
3272
3273/*
Selin Dag6d9b0c12014-11-04 18:27:21 -08003274 * ipv4_reasm_stats_ops
3275 */
3276NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4_reasm)
3277
3278/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303279 * ipv6_stats_ops
3280 */
3281NSS_STATS_DECLARE_FILE_OPERATIONS(ipv6)
3282
3283/*
Selin Dag60a2f5b2015-06-29 14:39:49 -07003284 * ipv6_reasm_stats_ops
3285 */
3286NSS_STATS_DECLARE_FILE_OPERATIONS(ipv6_reasm)
3287
3288/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303289 * n2h_stats_ops
3290 */
3291NSS_STATS_DECLARE_FILE_OPERATIONS(n2h)
Thomas Wuc3e382c2014-10-29 15:35:13 -07003292
3293/*
3294 * lso_rx_stats_ops
3295 */
3296NSS_STATS_DECLARE_FILE_OPERATIONS(lso_rx)
3297
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303298/*
3299 * drv_stats_ops
3300 */
3301NSS_STATS_DECLARE_FILE_OPERATIONS(drv)
3302
3303/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303304 * pppoe_stats_ops
3305 */
3306NSS_STATS_DECLARE_FILE_OPERATIONS(pppoe)
3307
3308/*
ratheesh kannoth7af985d2015-06-24 15:08:40 +05303309 * l2tpv2_stats_ops
3310 */
3311NSS_STATS_DECLARE_FILE_OPERATIONS(l2tpv2)
3312
3313/*
ratheesh kannotha1245c32015-11-04 16:45:43 +05303314 * map_t_stats_ops
3315 */
3316NSS_STATS_DECLARE_FILE_OPERATIONS(map_t)
3317
3318/*
Amit Gupta316729b2016-08-12 12:21:15 +05303319 * ppe_stats_ops
3320 */
3321NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_conn)
3322NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_l3)
3323NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_code)
3324
3325/*
Shyam Sunder66e889d2015-11-02 15:31:20 +05303326 * pptp_stats_ops
3327 */
3328NSS_STATS_DECLARE_FILE_OPERATIONS(pptp)
3329
3330/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303331 * gmac_stats_ops
3332 */
3333NSS_STATS_DECLARE_FILE_OPERATIONS(gmac)
3334
3335/*
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003336 * capwap_stats_ops
3337 */
3338NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_encap)
3339NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_decap)
3340
3341/*
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05303342 * eth_rx_stats_ops
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303343 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05303344NSS_STATS_DECLARE_FILE_OPERATIONS(eth_rx)
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303345
3346/*
Shashank Balashankar512cb602016-08-01 17:57:42 -07003347 * edma_port_stats_ops
3348 */
3349NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_stats)
3350
3351/*
3352 * edma_port_type_ops
3353 */
3354NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_type)
3355
3356/*
3357 * edma_port_ring_map_ops
3358 */
3359NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_ring_map)
3360
3361/*
3362 * edma_txring_stats_ops
3363 */
3364NSS_STATS_DECLARE_FILE_OPERATIONS(edma_txring)
3365
3366/*
3367 * edma_rxring_stats_ops
3368 */
3369NSS_STATS_DECLARE_FILE_OPERATIONS(edma_rxring)
3370
3371/*
3372 * edma_txcmplring_stats_ops
3373 */
3374NSS_STATS_DECLARE_FILE_OPERATIONS(edma_txcmplring)
3375
3376/*
3377 * edma_rxfillring_stats_ops
3378 */
3379NSS_STATS_DECLARE_FILE_OPERATIONS(edma_rxfillring)
3380
3381/*
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303382 * gre_redir_ops
3383 */
3384NSS_STATS_DECLARE_FILE_OPERATIONS(gre_redir)
3385
3386/*
Ankit Dhanuka14999992014-11-12 15:35:11 +05303387 * sjack_stats_ops
3388 */
3389NSS_STATS_DECLARE_FILE_OPERATIONS(sjack)
3390
Stephen Wang9779d952015-10-28 11:39:07 -07003391/*
3392 * portid_ops
3393 */
3394NSS_STATS_DECLARE_FILE_OPERATIONS(portid)
3395
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -08003396NSS_STATS_DECLARE_FILE_OPERATIONS(wifi_if)
3397
Sundarajan Srinivasanab2c8562015-06-09 16:14:10 -07003398NSS_STATS_DECLARE_FILE_OPERATIONS(virt_if)
3399
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -07003400NSS_STATS_DECLARE_FILE_OPERATIONS(tx_rx_virt_if)
3401
Ankit Dhanuka14999992014-11-12 15:35:11 +05303402/*
Bharath M Kumarcc666e92014-12-24 19:17:28 +05303403 * wifi_stats_ops
3404 */
3405NSS_STATS_DECLARE_FILE_OPERATIONS(wifi)
3406
3407/*
Tushar Mathurff8741b2015-12-02 20:28:59 +05303408 * dtls_stats_ops
3409 */
3410NSS_STATS_DECLARE_FILE_OPERATIONS(dtls)
3411
3412/*
Thomas Wu71c5ecc2016-06-21 11:15:52 -07003413 * gre_tunnel_stats_ops
3414 */
3415NSS_STATS_DECLARE_FILE_OPERATIONS(gre_tunnel)
3416
3417/*
Stephen Wangec5a85c2016-09-08 23:32:27 -07003418 * trustsec_tx_stats_ops
3419 */
3420NSS_STATS_DECLARE_FILE_OPERATIONS(trustsec_tx)
3421
3422/*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303423 * nss_stats_init()
3424 * Enable NSS statistics
3425 */
3426void nss_stats_init(void)
3427{
Shashank Balashankar512cb602016-08-01 17:57:42 -07003428 int i = 0;
3429 struct dentry *edma_d = NULL;
3430 struct dentry *edma_port_dir_d = NULL;
3431 struct dentry *edma_port_d = NULL;
3432 struct dentry *edma_port_type_d = NULL;
3433 struct dentry *edma_port_stats_d = NULL;
3434 struct dentry *edma_port_ring_map_d = NULL;
3435
3436 struct dentry *edma_rings_dir_d = NULL;
3437 struct dentry *edma_tx_dir_d = NULL;
3438 struct dentry *edma_tx_d = NULL;
3439 struct dentry *edma_rx_dir_d = NULL;
3440 struct dentry *edma_rx_d = NULL;
3441 struct dentry *edma_txcmpl_dir_d = NULL;
3442 struct dentry *edma_txcmpl_d = NULL;
3443 struct dentry *edma_rxfill_dir_d = NULL;
3444 struct dentry *edma_rxfill_d = NULL;
3445
3446 char file_name[10];
3447
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303448 /*
3449 * NSS driver entry
3450 */
3451 nss_top_main.top_dentry = debugfs_create_dir("qca-nss-drv", NULL);
3452 if (unlikely(nss_top_main.top_dentry == NULL)) {
3453 nss_warning("Failed to create qca-nss-drv directory in debugfs");
3454
3455 /*
3456 * Non availability of debugfs directory is not a catastrophy
3457 * We can still go ahead with other initialization
3458 */
3459 return;
3460 }
3461
3462 nss_top_main.stats_dentry = debugfs_create_dir("stats", nss_top_main.top_dentry);
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303463 if (unlikely(nss_top_main.stats_dentry == NULL)) {
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303464 nss_warning("Failed to create qca-nss-drv directory in debugfs");
3465
3466 /*
3467 * Non availability of debugfs directory is not a catastrophy
3468 * We can still go ahead with rest of initialization
3469 */
3470 return;
3471 }
3472
3473 /*
3474 * Create files to obtain statistics
3475 */
3476
3477 /*
3478 * ipv4_stats
3479 */
3480 nss_top_main.ipv4_dentry = debugfs_create_file("ipv4", 0400,
3481 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv4_ops);
3482 if (unlikely(nss_top_main.ipv4_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303483 nss_warning("Failed to create qca-nss-drv/stats/ipv4 file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303484 return;
3485 }
3486
3487 /*
Selin Dag6d9b0c12014-11-04 18:27:21 -08003488 * ipv4_reasm_stats
3489 */
3490 nss_top_main.ipv4_reasm_dentry = debugfs_create_file("ipv4_reasm", 0400,
3491 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv4_reasm_ops);
3492 if (unlikely(nss_top_main.ipv4_reasm_dentry == NULL)) {
3493 nss_warning("Failed to create qca-nss-drv/stats/ipv4_reasm file in debugfs");
3494 return;
3495 }
3496
3497 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303498 * ipv6_stats
3499 */
3500 nss_top_main.ipv6_dentry = debugfs_create_file("ipv6", 0400,
3501 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv6_ops);
3502 if (unlikely(nss_top_main.ipv6_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303503 nss_warning("Failed to create qca-nss-drv/stats/ipv6 file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303504 return;
3505 }
3506
3507 /*
Selin Dag60a2f5b2015-06-29 14:39:49 -07003508 * ipv6_reasm_stats
3509 */
3510 nss_top_main.ipv6_reasm_dentry = debugfs_create_file("ipv6_reasm", 0400,
3511 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv6_reasm_ops);
3512 if (unlikely(nss_top_main.ipv6_reasm_dentry == NULL)) {
3513 nss_warning("Failed to create qca-nss-drv/stats/ipv6_reasm file in debugfs");
3514 return;
3515 }
3516
3517 /*
3518 * eth_rx__stats
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303519 */
Abhishek Rastogi84d95d02014-03-26 19:31:31 +05303520 nss_top_main.eth_rx_dentry = debugfs_create_file("eth_rx", 0400,
3521 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_eth_rx_ops);
3522 if (unlikely(nss_top_main.eth_rx_dentry == NULL)) {
3523 nss_warning("Failed to create qca-nss-drv/stats/eth_rx file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303524 return;
3525 }
3526
3527 /*
Shashank Balashankar512cb602016-08-01 17:57:42 -07003528 * edma stats
3529 */
3530 edma_d = debugfs_create_dir("edma", nss_top_main.stats_dentry);
3531 if (unlikely(edma_d == NULL)) {
3532 nss_warning("Failed to create qca-nss-drv/stats/edma directory in debugfs");
3533 return;
3534 }
3535
3536 /*
3537 * edma port stats
3538 */
3539 edma_port_dir_d = debugfs_create_dir("ports", edma_d);
3540 if (unlikely(edma_port_dir_d == NULL)) {
3541 nss_warning("Failed to create qca-nss-drv/stats/edma/ports directory in debugfs");
3542 return;
3543 }
3544
3545 for (i = 0; i < NSS_EDMA_NUM_PORTS_MAX; i++) {
3546 memset(file_name, 0, sizeof(file_name));
3547 snprintf(file_name, sizeof(file_name), "%d", i);
3548 edma_port_d = NULL;
3549 edma_port_stats_d = NULL;
3550 edma_port_type_d = NULL;
3551 edma_port_ring_map_d = NULL;
3552
3553 edma_port_d = debugfs_create_dir(file_name, edma_port_dir_d);
3554 if (unlikely(edma_port_d == NULL)) {
3555 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d dir in debugfs", i);
3556 return;
3557 }
3558
Stephen Wangaed46332016-12-12 17:29:03 -08003559 edma_port_stats_d = debugfs_create_file("stats", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_stats_edma_port_stats_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003560 if (unlikely(edma_port_stats_d == NULL)) {
3561 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/stats file in debugfs", i);
3562 return;
3563 }
3564
Stephen Wangaed46332016-12-12 17:29:03 -08003565 edma_port_type_d = debugfs_create_file("type", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_stats_edma_port_type_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003566 if (unlikely(edma_port_type_d == NULL)) {
3567 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/type file in debugfs", i);
3568 return;
3569 }
3570
Stephen Wangaed46332016-12-12 17:29:03 -08003571 edma_port_ring_map_d = debugfs_create_file("ring_map", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_stats_edma_port_ring_map_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003572 if (unlikely(edma_port_ring_map_d == NULL)) {
3573 nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/ring_map file in debugfs", i);
3574 return;
3575 }
3576 }
3577
3578 /*
3579 * edma ring stats
3580 */
3581 edma_rings_dir_d = debugfs_create_dir("rings", edma_d);
3582 if (unlikely(edma_rings_dir_d == NULL)) {
3583 nss_warning("Failed to create qca-nss-drv/stats/edma/rings directory in debugfs");
3584 return;
3585 }
3586
3587 /*
3588 * edma tx ring stats
3589 */
3590 edma_tx_dir_d = debugfs_create_dir("tx", edma_rings_dir_d);
3591 if (unlikely(edma_tx_dir_d == NULL)) {
3592 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/tx directory in debugfs");
3593 return;
3594 }
3595
3596 for (i = 0; i < NSS_EDMA_NUM_TX_RING_MAX; i++) {
3597 memset(file_name, 0, sizeof(file_name));
3598 scnprintf(file_name, sizeof(file_name), "%d", i);
3599 edma_tx_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08003600 edma_tx_d = debugfs_create_file(file_name, 0400, edma_tx_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_txring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003601 if (unlikely(edma_tx_d == NULL)) {
3602 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/tx/%d file in debugfs", i);
3603 return;
3604 }
3605 }
3606
3607 /*
3608 * edma rx ring stats
3609 */
3610 edma_rx_dir_d = debugfs_create_dir("rx", edma_rings_dir_d);
3611 if (unlikely(edma_rx_dir_d == NULL)) {
3612 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rx directory in debugfs");
3613 return;
3614 }
3615
3616 for (i = 0; i < NSS_EDMA_NUM_RX_RING_MAX; i++) {
3617 memset(file_name, 0, sizeof(file_name));
3618 scnprintf(file_name, sizeof(file_name), "%d", i);
3619 edma_rx_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08003620 edma_rx_d = debugfs_create_file(file_name, 0400, edma_rx_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_rxring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003621 if (unlikely(edma_rx_d == NULL)) {
3622 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rx/%d file in debugfs", i);
3623 return;
3624 }
3625 }
3626
3627 /*
3628 * edma tx cmpl ring stats
3629 */
3630 edma_txcmpl_dir_d = debugfs_create_dir("txcmpl", edma_rings_dir_d);
3631 if (unlikely(edma_txcmpl_dir_d == NULL)) {
3632 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/txcmpl directory in debugfs");
3633 return;
3634 }
3635
3636 for (i = 0; i < NSS_EDMA_NUM_TXCMPL_RING_MAX; i++) {
3637 memset(file_name, 0, sizeof(file_name));
3638 scnprintf(file_name, sizeof(file_name), "%d", i);
3639 edma_txcmpl_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08003640 edma_txcmpl_d = debugfs_create_file(file_name, 0400, edma_txcmpl_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_txcmplring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003641 if (unlikely(edma_txcmpl_d == NULL)) {
3642 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/txcmpl/%d file in debugfs", i);
3643 return;
3644 }
3645 }
3646
3647 /*
3648 * edma rx fill ring stats
3649 */
3650 edma_rxfill_dir_d = debugfs_create_dir("rxfill", edma_rings_dir_d);
3651 if (unlikely(edma_rxfill_dir_d == NULL)) {
3652 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rxfill directory in debugfs");
3653 return;
3654 }
3655
3656 for (i = 0; i < NSS_EDMA_NUM_RXFILL_RING_MAX; i++) {
3657 memset(file_name, 0, sizeof(file_name));
3658 scnprintf(file_name, sizeof(file_name), "%d", i);
3659 edma_rxfill_d = NULL;
Stephen Wangaed46332016-12-12 17:29:03 -08003660 edma_rxfill_d = debugfs_create_file(file_name, 0400, edma_rxfill_dir_d, (void *)(nss_ptr_t)i, &nss_stats_edma_rxfillring_ops);
Shashank Balashankar512cb602016-08-01 17:57:42 -07003661 if (unlikely(edma_rxfill_d == NULL)) {
3662 nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rxfill/%d file in debugfs", i);
3663 return;
3664 }
3665 }
3666
3667 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303668 * n2h_stats
3669 */
3670 nss_top_main.n2h_dentry = debugfs_create_file("n2h", 0400,
3671 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_n2h_ops);
3672 if (unlikely(nss_top_main.n2h_dentry == NULL)) {
3673 nss_warning("Failed to create qca-nss-drv/stats/n2h directory in debugfs");
3674 return;
3675 }
3676
3677 /*
Thomas Wuc3e382c2014-10-29 15:35:13 -07003678 * lso_rx_stats
3679 */
3680 nss_top_main.lso_rx_dentry = debugfs_create_file("lso_rx", 0400,
3681 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_lso_rx_ops);
3682 if (unlikely(nss_top_main.lso_rx_dentry == NULL)) {
3683 nss_warning("Failed to create qca-nss-drv/stats/lso_rx file in debugfs");
3684 return;
3685 }
3686
3687 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303688 * drv_stats
3689 */
3690 nss_top_main.drv_dentry = debugfs_create_file("drv", 0400,
3691 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_drv_ops);
3692 if (unlikely(nss_top_main.drv_dentry == NULL)) {
3693 nss_warning("Failed to create qca-nss-drv/stats/drv directory in debugfs");
3694 return;
3695 }
3696
3697 /*
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303698 * pppoe_stats
3699 */
3700 nss_top_main.pppoe_dentry = debugfs_create_file("pppoe", 0400,
3701 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_pppoe_ops);
3702 if (unlikely(nss_top_main.pppoe_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303703 nss_warning("Failed to create qca-nss-drv/stats/pppoe file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303704 return;
3705 }
3706
3707 /*
3708 * gmac_stats
3709 */
3710 nss_top_main.gmac_dentry = debugfs_create_file("gmac", 0400,
3711 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gmac_ops);
3712 if (unlikely(nss_top_main.gmac_dentry == NULL)) {
Abhishek Rastogi80f4eb12013-09-24 14:31:21 +05303713 nss_warning("Failed to create qca-nss-drv/stats/gmac file in debugfs");
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303714 return;
3715 }
Saurabh Misra09dddeb2014-09-30 16:38:07 -07003716
3717 /*
3718 * CAPWAP stats.
3719 */
3720 nss_top_main.capwap_encap_dentry = debugfs_create_file("capwap_encap", 0400,
3721 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_capwap_encap_ops);
3722 if (unlikely(nss_top_main.capwap_encap_dentry == NULL)) {
3723 nss_warning("Failed to create qca-nss-drv/stats/capwap_encap file in debugfs");
3724 return;
3725 }
3726
3727 nss_top_main.capwap_decap_dentry = debugfs_create_file("capwap_decap", 0400,
3728 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_capwap_decap_ops);
3729 if (unlikely(nss_top_main.capwap_decap_dentry == NULL)) {
3730 nss_warning("Failed to create qca-nss-drv/stats/capwap_decap file in debugfs");
3731 return;
3732 }
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303733
3734 /*
3735 * GRE_REDIR stats
3736 */
3737 nss_top_main.gre_redir_dentry = debugfs_create_file("gre_redir", 0400,
Ankit Dhanuka14999992014-11-12 15:35:11 +05303738 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gre_redir_ops);
Ankit Dhanuka6228ebd2014-11-05 17:26:01 +05303739 if (unlikely(nss_top_main.gre_redir_dentry == NULL)) {
3740 nss_warning("Failed to create qca-nss-drv/stats/gre_redir file in debugfs");
3741 return;
3742 }
Ankit Dhanuka14999992014-11-12 15:35:11 +05303743
3744 /*
3745 * SJACK stats
3746 */
3747 nss_top_main.sjack_dentry = debugfs_create_file("sjack", 0400,
3748 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_sjack_ops);
3749 if (unlikely(nss_top_main.sjack_dentry == NULL)) {
3750 nss_warning("Failed to create qca-nss-drv/stats/sjack file in debugfs");
3751 return;
3752 }
Saurabh Misra96998db2014-07-10 12:15:48 -07003753
Bharath M Kumarcc666e92014-12-24 19:17:28 +05303754 /*
Stephen Wang9779d952015-10-28 11:39:07 -07003755 * PORTID stats
3756 */
3757 nss_top_main.portid_dentry = debugfs_create_file("portid", 0400,
3758 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_portid_ops);
3759 if (unlikely(nss_top_main.portid_dentry == NULL)) {
3760 nss_warning("Failed to create qca-nss-drv/stats/portid file in debugfs");
3761 return;
3762 }
3763
3764 /*
Bharath M Kumarcc666e92014-12-24 19:17:28 +05303765 * WIFI stats
3766 */
3767 nss_top_main.wifi_dentry = debugfs_create_file("wifi", 0400,
3768 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_wifi_ops);
3769 if (unlikely(nss_top_main.wifi_dentry == NULL)) {
3770 nss_warning("Failed to create qca-nss-drv/stats/wifi file in debugfs");
3771 return;
3772 }
3773
Sundarajan Srinivasan273d9002015-03-03 15:43:16 -08003774 /*
3775 * wifi_if stats
3776 */
3777 nss_top_main.wifi_if_dentry = debugfs_create_file("wifi_if", 0400,
3778 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_wifi_if_ops);
3779 if (unlikely(nss_top_main.wifi_if_dentry == NULL)) {
3780 nss_warning("Failed to create qca-nss-drv/stats/wifi_if file in debugfs");
3781 return;
3782 }
3783
Sundarajan Srinivasanab2c8562015-06-09 16:14:10 -07003784 nss_top_main.virt_if_dentry = debugfs_create_file("virt_if", 0400,
3785 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_virt_if_ops);
3786 if (unlikely(nss_top_main.virt_if_dentry == NULL)) {
3787 nss_warning("Failed to create qca-nss-drv/stats/virt_if file in debugfs");
3788 return;
3789 }
3790
Sundarajan Srinivasancd1631b2015-06-18 01:23:30 -07003791 nss_top_main.tx_rx_virt_if_dentry = debugfs_create_file("tx_rx_virt_if", 0400,
3792 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_tx_rx_virt_if_ops);
3793 if (unlikely(nss_top_main.virt_if_dentry == NULL)) {
3794 nss_warning("Failed to create qca-nss-drv/stats/tx_rx_virt_if file in debugfs");
3795 return;
3796 }
3797
ratheesh kannoth7af985d2015-06-24 15:08:40 +05303798 /*
3799 * L2TPV2 Stats
3800 */
3801 nss_top_main.l2tpv2_dentry = debugfs_create_file("l2tpv2", 0400,
3802 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_l2tpv2_ops);
3803 if (unlikely(nss_top_main.l2tpv2_dentry == NULL)) {
3804 nss_warning("Failed to create qca-nss-drv/stats/l2tpv2 file in debugfs");
3805 return;
3806 }
Shyam Sunder66e889d2015-11-02 15:31:20 +05303807
3808 /*
ratheesh kannotha1245c32015-11-04 16:45:43 +05303809 * Map-t Stats
3810 */
3811 nss_top_main.map_t_dentry = debugfs_create_file("map_t", 0400,
3812 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_map_t_ops);
3813 if (unlikely(nss_top_main.map_t_dentry == NULL)) {
3814 nss_warning("Failed to create qca-nss-drv/stats/map_t file in debugfs");
3815 return;
3816 }
3817
3818 /*
Amit Gupta316729b2016-08-12 12:21:15 +05303819 * PPE Stats
3820 */
3821 nss_top_main.ppe_dentry = debugfs_create_dir("ppe", nss_top_main.stats_dentry);
3822 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
3823 nss_warning("Failed to create qca-nss-drv directory in debugfs");
3824 return;
3825 }
3826
3827 nss_top_main.ppe_conn_dentry = debugfs_create_file("connection", 0400,
3828 nss_top_main.ppe_dentry, &nss_top_main, &nss_stats_ppe_conn_ops);
3829 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
3830 nss_warning("Failed to create qca-nss-drv/stats/ppe/connection file in debugfs");
3831 }
3832
3833 nss_top_main.ppe_l3_dentry = debugfs_create_file("l3", 0400,
3834 nss_top_main.ppe_dentry, &nss_top_main, &nss_stats_ppe_l3_ops);
3835 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
3836 nss_warning("Failed to create qca-nss-drv/stats/ppe/l3 file in debugfs");
3837 }
3838
3839 nss_top_main.ppe_l3_dentry = debugfs_create_file("ppe_code", 0400,
3840 nss_top_main.ppe_dentry, &nss_top_main, &nss_stats_ppe_code_ops);
3841 if (unlikely(nss_top_main.ppe_dentry == NULL)) {
3842 nss_warning("Failed to create qca-nss-drv/stats/ppe/ppe_code file in debugfs");
3843 }
3844
3845 /*
Shyam Sunder66e889d2015-11-02 15:31:20 +05303846 * PPTP Stats
3847 */
3848 nss_top_main.pptp_dentry = debugfs_create_file("pptp", 0400,
3849 nss_top_main.stats_dentry, &nss_top_main, &nss_stats_pptp_ops);
3850 if (unlikely(nss_top_main.pptp_dentry == NULL)) {
3851 nss_warning("Failed to create qca-nss-drv/stats/pptp file in debugfs");
Tushar Mathurff8741b2015-12-02 20:28:59 +05303852 }
3853
3854 /*
3855 * DTLS Stats
3856 */
3857 nss_top_main.dtls_dentry = debugfs_create_file("dtls", 0400,
3858 nss_top_main.stats_dentry,
3859 &nss_top_main,
3860 &nss_stats_dtls_ops);
3861 if (unlikely(nss_top_main.dtls_dentry == NULL)) {
3862 nss_warning("Failed to create qca-nss-drv/stats/dtls file in debugfs");
Shyam Sunder66e889d2015-11-02 15:31:20 +05303863 return;
3864 }
3865
Thomas Wu71c5ecc2016-06-21 11:15:52 -07003866 /*
3867 * GRE Tunnel Stats
3868 */
3869 nss_top_main.gre_tunnel_dentry = debugfs_create_file("gre_tunnel", 0400,
3870 nss_top_main.stats_dentry,
3871 &nss_top_main,
3872 &nss_stats_gre_tunnel_ops);
3873 if (unlikely(nss_top_main.gre_tunnel_dentry == NULL)) {
3874 nss_warning("Failed to create qca-nss-drv/stats/gre_tunnel file in debugfs");
3875 return;
3876 }
3877
Stephen Wangec5a85c2016-09-08 23:32:27 -07003878 /*
3879 * TrustSec TX Stats
3880 */
3881 nss_top_main.trustsec_tx_dentry = debugfs_create_file("trustsec_tx", 0400,
3882 nss_top_main.stats_dentry,
3883 &nss_top_main,
3884 &nss_stats_trustsec_tx_ops);
3885 if (unlikely(nss_top_main.trustsec_tx_dentry == NULL)) {
3886 nss_warning("Failed to create qca-nss-drv/stats/trustsec_tx file in debugfs");
3887 return;
3888 }
3889
Saurabh Misra96998db2014-07-10 12:15:48 -07003890 nss_log_init();
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303891}
3892
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303893/*
3894 * nss_stats_clean()
3895 * Cleanup NSS statistics files
3896 */
3897void nss_stats_clean(void)
3898{
3899 /*
3900 * Remove debugfs tree
3901 */
3902 if (likely(nss_top_main.top_dentry != NULL)) {
3903 debugfs_remove_recursive(nss_top_main.top_dentry);
Stephen Wangdc8b5322015-06-27 20:11:50 -07003904 nss_top_main.top_dentry = NULL;
Abhishek Rastogi38cffff2013-06-02 11:25:47 +05303905 }
3906}