blob: 0480ad07293eb67c901984970a4b17507cce27d9 [file] [log] [blame]
Saurabh Misra96998db2014-07-10 12:15:48 -07001/*
2 **************************************************************************
Stephen Wangefd38512017-01-24 14:01:02 -08003 * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Saurabh Misra96998db2014-07-10 12:15:48 -07004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16/*
17 * nss_log.c
18 * NSS FW debug logger retrieval from DDR (memory)
19 *
20 */
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/errno.h>
26#include <linux/mm.h>
27#include <linux/fs.h>
28#include <linux/miscdevice.h>
29#include <linux/posix-timers.h>
30#include <linux/interrupt.h>
31#include <linux/time.h>
32#include <linux/platform_device.h>
Saurabh Misra96998db2014-07-10 12:15:48 -070033#include <linux/device.h>
34#include <nss_hal.h>
35#include "nss_core.h"
36#include "nss_log.h"
37
38/*
Saurabh Misra96998db2014-07-10 12:15:48 -070039 * Private data for each device file open instance
40 */
41struct nss_log_data {
42 void *load_mem; /* Pointer to struct nss_log_descriptor - descriptor data */
43 dma_addr_t dma_addr; /* Handle to DMA */
44 uint32_t last_entry; /* Last known sampled entry (or index) */
45 uint32_t nentries; /* Caches the total number of entries of log buffer */
46 int nss_id; /* NSS Core id being used */
47};
48
49/*
50 * Saves the ring buffer address for logging per NSS core
51 */
52struct nss_ring_buffer_addr {
53 void *addr; /* Pointer to struct nss_log_descriptor */
54 dma_addr_t dma_addr; /* DMA Handle */
55 uint32_t nentries; /* Number of entries in the ring buffer */
56 int refcnt; /* Reference count */
57};
58
59static struct nss_ring_buffer_addr nss_rbe[NSS_MAX_CORES];
60
61static DEFINE_MUTEX(nss_log_mutex);
62static wait_queue_head_t nss_log_wq;
63static nss_log_msg_callback_t nss_debug_interface_cb;
64static void *nss_debug_interface_app_data = NULL;
65
66static wait_queue_head_t msg_wq;
67enum nss_cmn_response msg_response;
68static bool msg_event;
69
70/*
71 * nss_log_llseek()
72 * Seek operation.
73 */
74static loff_t nss_log_llseek(struct file *file, loff_t offset, int origin)
75{
76 struct nss_log_data *data = file->private_data;
77
78 switch (origin) {
79 case SEEK_SET:
80 break;
81 case SEEK_CUR:
82 offset += file->f_pos;
83 break;
84 case SEEK_END:
85 offset = ((data->nentries * sizeof(struct nss_log_entry)) + sizeof(struct nss_log_descriptor)) - offset;
86 break;
87 default:
88 return -EINVAL;
89 }
90
91 return (offset >= 0) ? (file->f_pos = offset) : -EINVAL;
92}
93
94/*
95 * nss_log_open()
96 * Open operation for our device. We let as many instance run together
97 */
98static int nss_log_open(struct inode *inode, struct file *filp)
99{
100 struct nss_log_data *data = NULL;
101 struct nss_top_instance *nss_top;
102 struct nss_ctx_instance *nss_ctx;
103 int nss_id;
104
105 /*
106 * i_private is passed to us by debug_fs_create()
107 */
Stephen Wangaed46332016-12-12 17:29:03 -0800108 nss_id = (int)(nss_ptr_t)inode->i_private;
Saurabh Misra96998db2014-07-10 12:15:48 -0700109 if (nss_id < 0 || nss_id >= NSS_MAX_CORES) {
110 nss_warning("nss_id is not valid :%d\n", nss_id);
111 return -ENODEV;
112 }
113
114 nss_top = &nss_top_main;
115 nss_ctx = &nss_top->nss[nss_id];
116
117 data = kzalloc(sizeof(struct nss_log_data), GFP_KERNEL);
118 if (!data) {
119 nss_warning("%p: Failed to allocate memory for log_data", nss_ctx);
120 return -ENOMEM;
121 }
122
123 mutex_lock(&nss_log_mutex);
124 if (!nss_rbe[nss_id].addr) {
125 mutex_unlock(&nss_log_mutex);
126 kfree(data);
127 nss_warning("%p: Ring buffer not configured yet for nss_id:%d", nss_ctx, nss_id);
128 return -EIO;
129 }
130
131 /*
132 * Actual ring buffer.
133 */
134 data->load_mem = nss_rbe[nss_id].addr;
135 data->last_entry = 0;
136 data->nentries = nss_rbe[nss_id].nentries;
137 data->dma_addr = nss_rbe[nss_id].dma_addr;
138
139 /*
140 * Increment the reference count so that we don't free
141 * the memory
142 */
143 nss_rbe[nss_id].refcnt++;
144 data->nss_id = nss_id;
145 filp->private_data = data;
146 mutex_unlock(&nss_log_mutex);
147
148 return 0;
149}
150
151/*
152 * nss_log_release()
153 * release gets called when close() is called on the file
154 * descriptor. We unmap the IO region.
155 */
156static int nss_log_release(struct inode *inode, struct file *filp)
157{
158 struct nss_log_data *data = filp->private_data;
159
160 if (!data) {
161 return -EINVAL;
162 }
163
164 mutex_lock(&nss_log_mutex);
165 nss_rbe[data->nss_id].refcnt--;
166 BUG_ON(nss_rbe[data->nss_id].refcnt < 0);
167 if (nss_rbe[data->nss_id].refcnt == 0) {
168 wake_up(&nss_log_wq);
169 }
170 mutex_unlock(&nss_log_mutex);
171 kfree(data);
172 return 0;
173}
174
175/*
Saurabh Misra6d42da72015-03-05 14:57:01 -0800176 * nss_log_current_entry()
177 * Reads current entry index from NSS log descriptor.
178 */
179static uint32_t nss_log_current_entry(struct nss_log_descriptor *desc)
180{
181 rmb();
182 return desc->current_entry;
183}
184
185/*
Saurabh Misra96998db2014-07-10 12:15:48 -0700186 * nss_log_read()
187 * Read operation lets command like cat and tail read our memory log buffer data.
188 */
189static ssize_t nss_log_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
190{
191 struct nss_log_data *data = filp->private_data;
192 struct nss_log_descriptor *desc;
193 size_t bytes = 0;
194 size_t b;
195 struct nss_log_entry *rb;
196 uint32_t entry;
197 uint32_t offset, index;
198 char msg[NSS_LOG_OUTPUT_LINE_SIZE];
199
200 if (!data) {
201 return -EINVAL;
202 }
203
204 desc = data->load_mem;
205 if (!desc) {
206 nss_warning("%p: load_mem is NULL", data);
207 return -EINVAL;
208 }
209
210 /*
211 * If buffer is too small to fit even one entry.
212 */
213 if (size < NSS_LOG_OUTPUT_LINE_SIZE) {
214 return 0;
215 }
216
217 /*
218 * Get the current index
219 */
220 dma_sync_single_for_cpu(NULL, data->dma_addr, sizeof (struct nss_log_descriptor), DMA_FROM_DEVICE);
Saurabh Misra6d42da72015-03-05 14:57:01 -0800221 entry = nss_log_current_entry(desc);
Saurabh Misra96998db2014-07-10 12:15:48 -0700222
223 /*
224 * If the current and last sampled indexes are same then bail out.
225 */
226 if (unlikely(data->last_entry == entry)) {
227 return 0;
228 }
229
230 /*
231 * If this is the first read (after open) on our device file.
232 */
233 if (unlikely(*ppos == 0)) {
234 /*
235 * If log buffer has rolled over. Almost all the time
236 * it will be true.
237 */
238 if (likely(entry > data->nentries)) {
239 /*
240 * Determine how much we can stuff in one
241 * buffer passed to us and accordingly
242 * reduce our index.
243 */
244 data->last_entry = entry - data->nentries;
245 } else {
246 data->last_entry = 0;
247 }
248 } else if (unlikely(entry > data->nentries && ((entry - data->nentries) > data->last_entry))) {
249 /*
250 * If FW is producing debug buffer at a pace faster than
251 * we can consume, then we restrict our iteration.
252 */
253 data->last_entry = entry - data->nentries;
254 }
255
256 /*
257 * Iterate over indexes.
258 */
259 while (entry > data->last_entry) {
260 index = offset = (data->last_entry % data->nentries);
261 offset = (offset * sizeof (struct nss_log_entry))
262 + offsetof(struct nss_log_descriptor, log_ring_buffer);
263
264 dma_sync_single_for_cpu(NULL, data->dma_addr + offset,
265 sizeof(struct nss_log_entry), DMA_FROM_DEVICE);
266 rb = &desc->log_ring_buffer[index];
267
268 b = snprintf(msg, sizeof(msg), NSS_LOG_LINE_FORMAT,
269 rb->thread_num, rb->timestamp, rb->message);
270
271 data->last_entry++;
272
273 /*
274 * Copy to user buffer and if we fail then we return
275 * failure.
276 */
277 if (copy_to_user(buf + bytes, msg, b) == 0) {
278 bytes += b;
279 } else {
280 bytes = -EFAULT;
281 break;
282 }
283
284 /*
285 * If we ran out of space in the buffer.
286 */
287 if ((bytes + NSS_LOG_OUTPUT_LINE_SIZE) >= size)
288 break;
289 }
290
291 if (bytes > 0)
292 *ppos = bytes;
293
294 return bytes;
295}
296
297struct file_operations nss_logs_core_ops = {
298 .owner = THIS_MODULE,
299 .open = nss_log_open,
300 .read = nss_log_read,
301 .release = nss_log_release,
302 .llseek = nss_log_llseek,
303};
304
305/*
306 * nss_debug_interface_set_callback()
307 * Sets the callback
308 */
309void nss_debug_interface_set_callback(nss_log_msg_callback_t cb, void *app_data)
310{
311 nss_debug_interface_cb = cb;
312 nss_debug_interface_app_data = app_data;
313}
314
315/*
316 * nss_debug_interface_event()
317 * Received an event from NSS FW
318 */
319static void nss_debug_interface_event(void *app_data, struct nss_debug_interface_msg *nim)
320{
321 struct nss_cmn_msg *ncm = (struct nss_cmn_msg *)nim;
322
323 msg_response = ncm->response;
324 msg_event = true;
325 wake_up(&msg_wq);
326}
327
328/*
329 * nss_debug_interface_handler()
330 * handle NSS -> HLOS messages for debug interfaces
331 */
332static void nss_debug_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data)
333{
334 struct nss_debug_interface_msg *ntm = (struct nss_debug_interface_msg *)ncm;
335 nss_log_msg_callback_t cb;
336
337 BUG_ON(ncm->interface != NSS_DEBUG_INTERFACE);
338
339 /*
340 * Is this a valid request/response packet?
341 */
342 if (ncm->type > NSS_DEBUG_INTERFACE_TYPE_MAX) {
343 nss_warning("%p: received invalid message %d for CAPWAP interface", nss_ctx, ncm->type);
344 return;
345 }
346
Suruchi Agarwalef8a8702016-01-08 12:40:08 -0800347 if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_debug_interface_msg)) {
348 nss_warning("%p: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm));
Saurabh Misra96998db2014-07-10 12:15:48 -0700349 return;
350 }
351
352 nss_core_log_msg_failures(nss_ctx, ncm);
353
354 /*
355 * Update the callback and app_data for NOTIFY messages.
356 */
357 if (ncm->response == NSS_CMM_RESPONSE_NOTIFY) {
Stephen Wangaed46332016-12-12 17:29:03 -0800358 ncm->cb = (nss_ptr_t)nss_debug_interface_cb;
359 ncm->app_data = (nss_ptr_t)nss_debug_interface_app_data;
Saurabh Misra96998db2014-07-10 12:15:48 -0700360 }
361
362 /*
363 * Do we have a callback
364 */
365 if (!ncm->cb) {
366 nss_trace("%p: cb is null for interface %d", nss_ctx, ncm->interface);
367 return;
368 }
369
370 cb = (nss_log_msg_callback_t)ncm->cb;
371 cb((void *)ncm->app_data, ntm);
372}
373
374/*
375 * nss_debug_interface_tx()
376 * Transmit a debug interface message to NSS FW
377 */
378static nss_tx_status_t nss_debug_interface_tx(struct nss_ctx_instance *nss_ctx, struct nss_debug_interface_msg *msg)
379{
380 struct nss_debug_interface_msg *nm;
381 struct nss_cmn_msg *ncm = &msg->cm;
382 struct sk_buff *nbuf;
383 int32_t status;
384
385 if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
386 nss_warning("%p: debug if msg dropped as core not ready", nss_ctx);
387 return NSS_TX_FAILURE_NOT_READY;
388 }
389
390 /*
391 * Sanity check the message
392 */
393 if (ncm->interface != NSS_DEBUG_INTERFACE) {
394 nss_warning("%p: tx request for another interface: %d", nss_ctx, ncm->interface);
395 return NSS_TX_FAILURE;
396 }
397
398 if (ncm->type > NSS_DEBUG_INTERFACE_TYPE_MAX) {
399 nss_warning("%p: message type out of range: %d", nss_ctx, ncm->type);
400 return NSS_TX_FAILURE;
401 }
402
Suruchi Agarwalef8a8702016-01-08 12:40:08 -0800403 if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_debug_interface_msg)) {
404 nss_warning("%p: message length is invalid: %d", nss_ctx, nss_cmn_get_msg_len(ncm));
Saurabh Misra96998db2014-07-10 12:15:48 -0700405 return NSS_TX_FAILURE;
406 }
407
408 nbuf = dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE);
409 if (unlikely(!nbuf)) {
Sundarajan Srinivasan62fee7e2015-01-22 11:13:10 -0800410 NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_NBUF_ALLOC_FAILS]);
Saurabh Misra96998db2014-07-10 12:15:48 -0700411 nss_warning("%p: msg dropped as command allocation failed", nss_ctx);
412 return NSS_TX_FAILURE;
413 }
414
415 /*
416 * Copy the message to our skb
417 */
418 nm = (struct nss_debug_interface_msg *)skb_put(nbuf, sizeof(struct nss_debug_interface_msg));
419 memcpy(nm, msg, sizeof(struct nss_debug_interface_msg));
420
421 status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
422 if (status != NSS_CORE_STATUS_SUCCESS) {
423 dev_kfree_skb_any(nbuf);
424 nss_warning("%p: Unable to enqueue 'debug if message' \n", nss_ctx);
425 return NSS_TX_FAILURE;
426 }
427
Stephen Wang90c67de2016-04-26 15:15:59 -0700428 nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE);
Saurabh Misra96998db2014-07-10 12:15:48 -0700429
430 NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_CMD_REQ]);
431 return NSS_TX_SUCCESS;
432}
433
434/*
435 * nss_debug_log_buffer_alloc()
436 * Allocates and Initializes log buffer for the use in NSS FW (logging)
437 */
438bool nss_debug_log_buffer_alloc(uint8_t nss_id, uint32_t nentry)
439{
440 struct nss_ring_buffer_addr old_rbe;
441 struct nss_debug_interface_msg msg;
442 struct nss_debug_log_memory_msg *dbg;
443 struct nss_top_instance *nss_top;
444 struct nss_ctx_instance *nss_ctx;
445 dma_addr_t dma_addr;
446 uint32_t size;
447 void *addr = NULL;
448 nss_tx_status_t status;
449 bool err = false;
450 bool old_state = false;
451
Radha krishna Simha Jigurudf53f022015-11-09 12:31:26 +0530452 if (nss_id >= NSS_MAX_CORES) {
Saurabh Misra96998db2014-07-10 12:15:48 -0700453 return false;
454 }
455
456 nss_top = &nss_top_main;
457 nss_ctx = &nss_top->nss[nss_id];
458
459 if (nss_ctx->state != NSS_CORE_STATE_INITIALIZED) {
460 nss_warning("%p: NSS Core:%d is not initialized yet\n", nss_ctx, nss_id);
461 return false;
462 }
463
464 memset(&msg, 0, sizeof(struct nss_debug_interface_msg));
465
466 size = sizeof (struct nss_log_descriptor) + (sizeof (struct nss_log_entry) * nentry);
467 addr = kmalloc(size, GFP_ATOMIC);
468 if (!addr) {
469 nss_warning("%p: Failed to allocate memory for logging (size:%d)\n", nss_ctx, size);
470 return false;
471 }
472
473 memset(addr, 0, size);
Stephen Wangefd38512017-01-24 14:01:02 -0800474 dma_addr = (uint32_t)dma_map_single(nss_ctx->dev, addr, size, DMA_FROM_DEVICE);
475 if (unlikely(dma_mapping_error(nss_ctx->dev, dma_addr))) {
Saurabh Misra96998db2014-07-10 12:15:48 -0700476 nss_warning("%p: Failed to map address in DMA", nss_ctx);
477 goto fail2;
478 }
479
480 /*
481 * If we already have ring buffer associated with nss_id, then
482 * we must wait before we attach a new ring buffer.
483 */
484 mutex_lock(&nss_log_mutex);
485 if (nss_rbe[nss_id].addr) {
486 mutex_unlock(&nss_log_mutex);
487 if (!wait_event_timeout(nss_log_wq, nss_rbe[nss_id].refcnt == 0, 5 * HZ)) {
488 nss_warning("%p: Timeout waiting for refcnt to become 0\n", nss_ctx);
489 goto fail1;
490 }
491
492 mutex_lock(&nss_log_mutex);
493 if (!nss_rbe[nss_id].addr) {
494 mutex_unlock(&nss_log_mutex);
495 goto fail1;
496 }
497 if (nss_rbe[nss_id].refcnt > 0) {
498 mutex_unlock(&nss_log_mutex);
499 nss_warning("%p: Some other thread is condenting..opting out\n", nss_ctx);
500 goto fail1;
501 }
502
503 /*
504 * Save the original dma buffer. In case we fail down the line, we will
505 * restore the state. Otherwise, old_state will be freed once we get
506 * ACK from NSS FW.
507 */
508 old_state = true;
509 memcpy(&old_rbe, &nss_rbe[nss_id], sizeof (struct nss_ring_buffer_addr));
510 }
511
512 nss_rbe[nss_id].addr = addr;
513 nss_rbe[nss_id].nentries = nentry;
514 nss_rbe[nss_id].refcnt = 1; /* Block other threads till we are done */
515 nss_rbe[nss_id].dma_addr = dma_addr;
516 mutex_unlock(&nss_log_mutex);
517
518 memset(&msg, 0, sizeof (struct nss_debug_interface_msg));
519 nss_cmn_msg_init(&msg.cm, NSS_DEBUG_INTERFACE, NSS_DEBUG_INTERFACE_TYPE_LOG_BUF_INIT,
520 sizeof(struct nss_debug_log_memory_msg), nss_debug_interface_event, NULL);
521
522 dbg = &msg.msg.addr;
523 dbg->nentry = nentry;
524 dbg->version = NSS_DEBUG_LOG_VERSION;
Saurabh Misra6d42da72015-03-05 14:57:01 -0800525 dbg->phy_addr = dma_addr;
Saurabh Misra96998db2014-07-10 12:15:48 -0700526
527 msg_event = false;
528 status = nss_debug_interface_tx(nss_ctx, &msg);
529 if (status != NSS_TX_SUCCESS) {
530 nss_warning("%p: Failed to send message to debug interface:%d\n", nss_ctx, status);
531 err = true;
532 } else {
533 int r;
534
535 /*
536 * Wait for 5 seconds since this is a critical operation.
537 */
538 r = wait_event_timeout(msg_wq, msg_event == true, 5 * HZ);
539 if (r == 0) {
540 nss_warning("%p: Timeout send message to debug interface\n", nss_ctx);
541 err = true;
542 } else if (msg_response != NSS_CMN_RESPONSE_ACK) {
543 nss_warning("%p: Response error for send message to debug interface:%d\n", nss_ctx, msg_response);
544 err = true;
545 }
546 }
547
548 /*
549 * If we had to free the previous allocation for ring buffer.
550 */
551 if (old_state == true) {
552 /*
553 * If we didn't fail, then we must unmap and free previous dma buffer
554 */
555 if (err == false) {
556 uint32_t old_size;
557
558 old_size = sizeof (struct nss_log_descriptor) +
559 (sizeof (struct nss_log_entry) * old_rbe.nentries);
Stephen Wangefd38512017-01-24 14:01:02 -0800560 dma_unmap_single(nss_ctx->dev, old_rbe.dma_addr, old_size, DMA_FROM_DEVICE);
Saurabh Misra96998db2014-07-10 12:15:48 -0700561 kfree(old_rbe.addr);
562 } else {
563 /*
564 * Restore the original dma buffer since we failed somewhere.
565 */
566 mutex_lock(&nss_log_mutex);
567 memcpy(&nss_rbe[nss_id], &old_rbe, sizeof (struct nss_ring_buffer_addr));
568 mutex_unlock(&nss_log_mutex);
569 wake_up(&nss_log_wq);
570 }
571 } else {
572 /*
573 * There was no logbuffer allocated from host side.
574 */
575
576 /*
577 * If there was error, then we need to reset back. Note that we are
578 * still holding refcnt.
579 */
580 if (err == true) {
581 mutex_lock(&nss_log_mutex);
582 nss_rbe[nss_id].addr = NULL;
583 nss_rbe[nss_id].nentries = 0;
584 nss_rbe[nss_id].refcnt = 0;
585 nss_rbe[nss_id].dma_addr = 0;
586 mutex_unlock(&nss_log_mutex);
587 wake_up(&nss_log_wq);
588 }
589 }
590
591 if (err == false) {
592 mutex_lock(&nss_log_mutex);
593 nss_rbe[nss_id].refcnt--; /* we are done */
594 mutex_unlock(&nss_log_mutex);
595 wake_up(&nss_log_wq);
596 return true;
597 }
598
599fail1:
600 if (addr) {
601 dma_unmap_single(NULL, dma_addr, size, DMA_FROM_DEVICE);
602 }
603fail2:
604 kfree(addr);
605 wake_up(&nss_log_wq);
606 return false;
607}
608
609/*
610 * nss_logbuffer_handler()
611 * Enable NSS debug output
612 */
Stephen Wang52e6d342016-03-29 15:02:33 -0700613int nss_logbuffer_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos)
Saurabh Misra96998db2014-07-10 12:15:48 -0700614{
615 int ret;
Thomas Wucf215082017-08-02 14:23:37 -0700616 int core_status;
Saurabh Misra96998db2014-07-10 12:15:48 -0700617 int i;
618
619 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
620 if (ret) {
621 return ret;
622 }
623
624 if (!write) {
625 return ret;
626 }
627
628 if (nss_ctl_logbuf < 32) {
629 printk("Invalid NSS FW logbuffer size:%d (must be > 32)\n", nss_ctl_logbuf);
630 nss_ctl_logbuf = 0;
631 return ret;
632 }
633
634 for (i = 0; i < NSS_MAX_CORES; i++) {
Thomas Wucf215082017-08-02 14:23:37 -0700635 /*
636 * Register the callback handler and allocate the debug log buffers
637 */
638 core_status = nss_core_register_handler(&nss_top_main.nss[i], NSS_DEBUG_INTERFACE, nss_debug_interface_handler, NULL);
639 if (core_status != NSS_CORE_STATUS_SUCCESS) {
640 nss_warning("NSS logbuffer init failed with register handler:%d\n", core_status);
641 }
642
Saurabh Misra96998db2014-07-10 12:15:48 -0700643 if (nss_debug_log_buffer_alloc(i, nss_ctl_logbuf) == false) {
644 nss_warning("%d: Failed to set debug log buffer on NSS core", i);
645 }
646 }
647
648 return ret;
649}
650
651/*
652 * nss_log_init()
653 * Initializes NSS FW logs retrieval logic from /sys
654 */
655void nss_log_init(void)
656{
Saurabh Misra96998db2014-07-10 12:15:48 -0700657 int i;
658
659 memset(nss_rbe, 0, sizeof(nss_rbe));
660 init_waitqueue_head(&nss_log_wq);
661 init_waitqueue_head(&msg_wq);
662
663 /*
664 * Create directory for obtaining NSS FW logs from each core
665 */
666 nss_top_main.logs_dentry = debugfs_create_dir("logs", nss_top_main.top_dentry);
667 if (unlikely(!nss_top_main.logs_dentry)) {
668 nss_warning("Failed to create qca-nss-drv/logs directory in debugfs");
669 return;
670 }
671
672 for (i = 0; i < NSS_MAX_CORES; i++) {
673 char file[10];
674 extern struct file_operations nss_logs_core_ops;
675
676 snprintf(file, sizeof(file), "core%d", i);
677 nss_top_main.core_log_dentry = debugfs_create_file(file, 0400,
Stephen Wangaed46332016-12-12 17:29:03 -0800678 nss_top_main.logs_dentry, (void *)(nss_ptr_t)i, &nss_logs_core_ops);
Saurabh Misra96998db2014-07-10 12:15:48 -0700679 if (unlikely(!nss_top_main.core_log_dentry)) {
680 nss_warning("Failed to create qca-nss-drv/logs/%s file in debugfs", file);
681 return;
682 }
683 }
684
685 nss_debug_interface_set_callback(nss_debug_interface_event, NULL);
Saurabh Misra96998db2014-07-10 12:15:48 -0700686}