blob: 44e98d2fae3fc12ff232f2efd768532067fcb44f [file] [log] [blame]
Casey Chencfa28352018-04-21 01:03:02 -07001/*
2 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * nss_meminfo.c
19 * NSS meminfo subsystem
20 */
21
22#include <linux/seq_file_net.h>
23#include "nss_tx_rx_common.h"
24#include "nss_core.h"
25#include "nss_arch.h"
26#include "nss_meminfo.h"
27
28/*
29 * Store user configuration
30 */
31static char nss_meminfo_user_config[NSS_MEMINFO_USER_CONFIG_MAXLEN];
32module_param_string(meminfo_user_config, nss_meminfo_user_config,
33 NSS_MEMINFO_USER_CONFIG_MAXLEN, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
34MODULE_PARM_DESC(nss_meminfo_user_config, "meminfo user configuration");
35
36static bool nss_meminfo_debugfs_exist;
37
38/*
39 * Name table of memory type presented to user.
40 */
41char *nss_meminfo_memtype_table[NSS_MEMINFO_MEMTYPE_MAX] = {"IMEM", "SDRAM"};
42
43/*
44 * nss_meminfo_alloc_sdram()
45 * Allocate a SDRAM block.
46 */
47static unsigned long nss_meminfo_alloc_sdram(struct nss_ctx_instance *nss_ctx, uint32_t size)
48{
49 unsigned long addr = 0;
50
51 /*
52 * kmalloc() return cache line aligned buffer.
53 */
54 addr = (unsigned long)kmalloc(size, GFP_KERNEL | __GFP_ZERO);
55 if (!addr)
56 nss_info_always("%p: failed to alloc a sdram block of size %u\n", nss_ctx, size);
57
58 return addr;
59}
60
61/*
62 * nss_meminfo_free_sdram()
63 * Free SDRAM memory.
64 */
65static inline void nss_meminfo_free_sdram(struct nss_ctx_instance *nss_ctx, uint32_t dma_addr,
66 unsigned long kern_addr, uint32_t size)
67{
68 /*
69 * Unmap it since every SDRAM memory had been mapped.
70 */
71 dma_unmap_single(nss_ctx->dev, dma_addr, size, DMA_FROM_DEVICE);
72 kfree((void *)kern_addr);
73}
74
75/*
76 * nss_meminfo_alloc_imem()
77 * Allocate an IMEM block in a sequential way.
78 */
79static uint32_t nss_meminfo_alloc_imem(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment)
80{
81 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
82 uint32_t new_tail;
83 uint32_t addr = 0;
84 int mask;
85
86 mask = alignment - 1;
87
88 /*
89 * Alignment has to be a power of 2.
90 */
91 nss_assert(!(alignment & mask));
92
93 new_tail = mem_ctx->imem_tail;
94
95 /*
96 * Align up the address if it not aligned.
97 */
98 if (new_tail & mask)
99 new_tail = (new_tail + mask) & ~mask;
100
101 if (size > (mem_ctx->imem_end - new_tail)) {
102 nss_info_always("%p: failed to alloc an IMEM block of size %u\n", nss_ctx, size);
103 return addr;
104 }
105
106 addr = new_tail;
107 mem_ctx->imem_tail = new_tail + size;
108
109 return addr;
110}
111
112/*
113 * nss_meminfo_free_imem()
114 * Free an IMEM block. Ignore the padding bytes for alignment requirement.
115 */
116static void nss_meminfo_free_imem(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size)
117{
118 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
119 mem_ctx->imem_tail -= size;
120}
121
122/*
123 * nss_meminfo_if_user_overwrite()
124 * Return user configured memory type. Otherwise, return -1.
125 */
126static int nss_meminfo_if_user_overwrite(struct nss_ctx_instance *nss_ctx, const char *name)
127{
128 char *user_config;
129 char **mtype_table;
130 char needle[NSS_MEMINFO_BLOCK_NAME_MAXLEN + 6];
131 char user_choice[NSS_MEMINFO_MEMTYPE_NAME_MAXLEN];
132 int i;
133 char *p;
134
135 user_config = nss_meminfo_user_config;
136 mtype_table = nss_meminfo_memtype_table;
137
138 snprintf(needle, sizeof(needle), "<%1d, %s, ", nss_ctx->id, name);
139
140 p = strstr(user_config, needle);
141 if (!p)
142 return -1;
143
144 p += strlen(needle);
145
146 for (i = 0; i < NSS_MEMINFO_MEMTYPE_NAME_MAXLEN - 1; i++) {
147 /*
148 * Each user config is like <core_id, object_name, memory_type>,
149 * it starts with '<' and ends with '>'.
150 */
151 if (*p == '>' || *p == '\0')
152 break;
153 user_choice[i] = *p;
154 p++;
155 }
156
157 user_choice[i] = '\0';
158
159 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++)
160 if (!strcmp(mtype_table[i], user_choice))
161 return i;
162
163 return -1;
164}
165
166/*
167 * nss_meminfo_free_block_lists()
168 * Free block node and memory associated with each each memory object.
169 */
170static void nss_meminfo_free_block_lists(struct nss_ctx_instance *nss_ctx)
171{
172 struct nss_meminfo_ctx *mem_ctx;
173 struct nss_meminfo_block_list *l;
174 int i;
175
176 mem_ctx = &nss_ctx->meminfo_ctx;
177 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) {
178 struct nss_meminfo_block *b;
179 l = &mem_ctx->block_lists[i];
180 b = l->head;
181 while (b) {
182 struct nss_meminfo_block *tmp;
183 /*
184 * Free IMEM/SDRAM memory.
185 */
186 switch (i) {
187 case NSS_MEMINFO_MEMTYPE_IMEM:
188 nss_meminfo_free_imem(nss_ctx, b->dma_addr, b->size);
189 break;
190 case NSS_MEMINFO_MEMTYPE_SDRAM:
191 nss_meminfo_free_sdram(nss_ctx, b->dma_addr, b->kern_addr, b->size);
192 break;
193 }
194
195 /*
196 * Free the struct nss_meminfo_block itself.
197 */
198 tmp = b;
199 b = b->next;
200 kfree(tmp);
201 }
202 }
203}
204
205/*
206 * nss_meminfo_init_block_lists()
207 * Initialize block lists and allocate memory for each block.
208 */
209static bool nss_meminfo_init_block_lists(struct nss_ctx_instance *nss_ctx)
210{
211 struct nss_meminfo_ctx *mem_ctx;
212 struct nss_meminfo_block_list *l;
213 struct nss_meminfo_request *r;
214 struct nss_meminfo_map *map;
215 int mtype;
216 unsigned long kern_addr;
217 uint32_t dma_addr;
218 int i;
219
220 mem_ctx = &nss_ctx->meminfo_ctx;
221
222 /*
223 * Fill memory type for each block list.
224 */
225 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++)
226 mem_ctx->block_lists[i].memtype = i;
227
228 map = &mem_ctx->meminfo_map;
229
230 /*
231 * Loop through all meminfo requests by checking the per-request magic.
232 */
233 for (r = map->requests; r->magic == NSS_MEMINFO_REQUEST_MAGIC; r++) {
234 struct nss_meminfo_block *b = (struct nss_meminfo_block *)
235 kmalloc(sizeof(struct nss_meminfo_block), GFP_KERNEL);
236 if (!b) {
237 nss_info_always("%p: failed to allocate meminfo block\n", nss_ctx);
238 goto cleanup;
239 }
240
241 b->index = map->num_requests++;
242 b->size = r->size;
243
244 /*
245 * Look up the user-defined memory type.
246 * Return user-defined memory type if exists. Otherwise, return -1.
247 */
248 mtype = nss_meminfo_if_user_overwrite(nss_ctx, r->name);
249 if (mtype == -1)
250 mtype = r->memtype_default;
251 r->memtype_user = mtype;
252
253 switch (mtype) {
254 case NSS_MEMINFO_MEMTYPE_IMEM:
255 /*
256 * Return SoC real address for IMEM as DMA address.
257 */
258 dma_addr = nss_meminfo_alloc_imem(nss_ctx, r->size, r->alignment);
259 if (!dma_addr) {
260 nss_info_always("%p: failed to alloc IMEM block\n", nss_ctx);
261 goto cleanup;
262 }
263
264 /*
265 * Calulate offset to the kernel address (vmap) where the
266 * whole IMEM is mapped onto instead of calling ioremap().
267 */
268 kern_addr = (unsigned long)nss_ctx->vmap + dma_addr - nss_ctx->vphys;
269 break;
270 case NSS_MEMINFO_MEMTYPE_SDRAM:
271 kern_addr = nss_meminfo_alloc_sdram(nss_ctx, r->size);
272 if (!kern_addr) {
273 nss_info_always("%p: failed to alloc SDRAM block\n", nss_ctx);
274 goto cleanup;
275 }
276
277 dma_addr = dma_map_single(nss_ctx->dev, (void *)kern_addr, r->size, DMA_TO_DEVICE);
278 if (unlikely(dma_mapping_error(nss_ctx->dev, dma_addr))) {
279 nss_info_always("%p: failed to map SDRAM block\n", nss_ctx);
280 goto cleanup;
281 }
282 break;
283 default:
284 nss_info_always("%p: %d unsupported memory type\n", nss_ctx, mtype);
285 goto cleanup;
286 }
287
288 /*
289 * Update the request with DMA address for the memory that only be used by FW.
290 */
291 r->addr = dma_addr;
292
293 /*
294 * nss_if_mem_map settings
295 */
296 if (!strcmp(r->name, "nss_if_mem_map_inst")) {
297 mem_ctx->if_map_memtype = mtype;
298 mem_ctx->if_map_dma = dma_addr;
299 mem_ctx->if_map = (struct nss_if_mem_map *)kern_addr;
300 }
301
Cemil Coskun5f51db52018-05-07 17:15:37 -0700302 if (!strcmp(r->name, "debug_boot_log_desc")) {
303 mem_ctx->logbuffer_memtype = mtype;
304 mem_ctx->logbuffer_dma = dma_addr;
305 mem_ctx->logbuffer = (struct nss_log_descriptor *)kern_addr;
306 }
307
Cemil Coskun3bb20512018-07-24 10:42:25 -0700308 if (!strcmp(r->name, "c2c_descs_if_mem_map")) {
309 mem_ctx->c2c_start_memtype = mtype;
310 mem_ctx->c2c_start_dma = dma_addr;
311 }
312
Casey Chencfa28352018-04-21 01:03:02 -0700313 /*
314 * Flush the updated meminfo request.
315 */
316 NSS_CORE_DMA_CACHE_MAINT(r, sizeof(struct nss_meminfo_request), DMA_TO_DEVICE);
317 NSS_CORE_DSB();
318
319 /*
320 * Update the list
321 */
322 l = &mem_ctx->block_lists[mtype];
323 l->num_blks++;
324 l->total_size += r->size;
325
326 b->next = l->head;
327 l->head = b;
328 }
329
330 /*
331 * Verify memory map end magic
332 */
333 if (*((uint16_t *)r) != NSS_MEMINFO_MAP_END_MAGIC)
334 goto cleanup;
335
336 return true;
337
338cleanup:
339 nss_meminfo_free_block_lists(nss_ctx);
340 return false;
341}
342
343/*
344 * nss_meminfo_init_imem()
345 * Initialize IMEM information.
346 */
347static void nss_meminfo_init_imem(struct nss_ctx_instance *nss_ctx)
348{
349 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
350
351 mem_ctx->imem_head = NSS_IMEM_START + NSS_IMEM_SIZE * nss_ctx->id;
352 mem_ctx->imem_end = mem_ctx->imem_head + NSS_IMEM_SIZE;
353 mem_ctx->imem_tail = mem_ctx->imem_head;
354
355 nss_info("%p: IMEM init: head: 0x%x end: 0x%x tail: 0x%x\n", nss_ctx,
356 mem_ctx->imem_head, mem_ctx->imem_end, mem_ctx->imem_tail);
357}
358
359
360/*
361 * nss_meminfo_allocate_n2h_h2n_rings()
362 * Allocate N2H/H2N rings.
363 */
364static bool nss_meminfo_allocate_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx,
365 struct nss_meminfo_n2h_h2n_info *info)
366{
367 switch (info->memtype) {
368 case NSS_MEMINFO_MEMTYPE_SDRAM:
369 info->kern_addr = nss_meminfo_alloc_sdram(nss_ctx, info->total_size);
370 if (!info->kern_addr)
371 return false;
372
373 info->dma_addr = dma_map_single(nss_ctx->dev, (void *)info->kern_addr,
374 info->total_size, DMA_TO_DEVICE);
375 if (unlikely(dma_mapping_error(nss_ctx->dev, info->dma_addr))) {
376 kfree((void *)info->kern_addr);
377 return false;
378 }
379 break;
380 case NSS_MEMINFO_MEMTYPE_IMEM:
381 info->dma_addr = nss_meminfo_alloc_imem(nss_ctx, info->total_size, L1_CACHE_BYTES);
382 if (!info->dma_addr)
383 return false;
384
385 info->kern_addr = (unsigned long)(nss_ctx->vmap) + info->dma_addr - nss_ctx->vphys;
386 break;
387 default:
388 return false;
389 }
390
391 return true;
392}
393
394/*
395 * nss_meminfo_configure_n2h_h2n_rings()
396 * Configure N2H/H2N rings and if_map.
397 */
398static bool nss_meminfo_configure_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx)
399{
400 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
401 struct nss_meminfo_n2h_h2n_info *h2n_info;
402 struct nss_meminfo_n2h_h2n_info *n2h_info;
403 struct nss_if_mem_map *if_map;
404 int i;
405 int mtype;
406
407 h2n_info = &mem_ctx->h2n_info;
408 n2h_info = &mem_ctx->n2h_info;
409
410 /*
411 * Check memory type. SDRAM is the default option.
412 */
413 mtype = nss_meminfo_if_user_overwrite(nss_ctx, "h2n_rings");
414 if (mtype == -1)
415 mtype = NSS_MEMINFO_MEMTYPE_SDRAM;
416
417 h2n_info->memtype = mtype;
418
419 mtype = nss_meminfo_if_user_overwrite(nss_ctx, "n2h_rings");
420 if (mtype == -1)
421 mtype = NSS_MEMINFO_MEMTYPE_SDRAM;
422
423 n2h_info->memtype = mtype;
424
425 n2h_info->total_size = sizeof(struct n2h_descriptor) * NSS_N2H_RING_COUNT * (NSS_RING_SIZE + 2);
426 h2n_info->total_size = sizeof(struct h2n_descriptor) * NSS_H2N_RING_COUNT * (NSS_RING_SIZE + 2);
427
428 /*
429 * N2H ring allocations
430 */
431 if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, n2h_info))) {
432 nss_info_always("%p: failed to allocate/map n2h rings\n", nss_ctx);
433 return false;
434 }
435
436 /*
437 * H2N ring allocations
438 */
439 if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, h2n_info))) {
440 nss_info_always("%p: failed to allocate/map h2n_rings\n", nss_ctx);
441 goto cleanup;
442 }
443
444 /*
445 * Bring a fresh copy of if_map from memory in order to read it correctly.
446 */
447 if_map = mem_ctx->if_map;
448 NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_FROM_DEVICE);
449 NSS_CORE_DSB();
450
451 if_map->n2h_rings = NSS_N2H_RING_COUNT;
452 if_map->h2n_rings = NSS_H2N_RING_COUNT;
453
454 /*
455 * N2H ring settings
456 */
457 for (i = 0; i < NSS_N2H_RING_COUNT; i++) {
458 struct hlos_n2h_desc_ring *n2h_desc_ring = &nss_ctx->n2h_desc_ring[i];
459 n2h_desc_ring->desc_ring.desc = (struct n2h_descriptor *)(n2h_info->kern_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2));
460 n2h_desc_ring->desc_ring.size = NSS_RING_SIZE;
461 n2h_desc_ring->hlos_index = if_map->n2h_hlos_index[i];
462
463 if_map->n2h_desc_if[i].size = NSS_RING_SIZE;
464 if_map->n2h_desc_if[i].desc_addr = n2h_info->dma_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2);
465 nss_info("%p: N2H ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->n2h_desc_if[i].size, if_map->n2h_desc_if[i].desc_addr);
466 }
467
468 /*
469 * H2N ring settings
470 */
471 for (i = 0; i < NSS_H2N_RING_COUNT; i++) {
472 struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[i];
473 h2n_desc_ring->desc_ring.desc = (struct h2n_descriptor *)(h2n_info->kern_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2));
474 h2n_desc_ring->desc_ring.size = NSS_RING_SIZE;
475 h2n_desc_ring->hlos_index = if_map->h2n_hlos_index[i];
476 spin_lock_init(&h2n_desc_ring->lock);
477
478 if_map->h2n_desc_if[i].size = NSS_RING_SIZE;
479 if_map->h2n_desc_if[i].desc_addr = h2n_info->dma_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2);
480 nss_info("%p: H2N ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->h2n_desc_if[i].size, if_map->h2n_desc_if[i].desc_addr);
481 }
482
483 /*
484 * Flush the updated nss_if_mem_map.
485 */
486 NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_TO_DEVICE);
487 NSS_CORE_DSB();
488
489 return true;
490
491cleanup:
492 if (n2h_info->memtype == NSS_MEMINFO_MEMTYPE_SDRAM)
493 nss_meminfo_free_sdram(nss_ctx, n2h_info->dma_addr, n2h_info->kern_addr, n2h_info->total_size);
494 else
495 nss_meminfo_free_imem(nss_ctx, n2h_info->dma_addr, n2h_info->total_size);
496
497 nss_meminfo_free_block_lists(nss_ctx);
498 return false;
499}
500
501/*
502 * nss_meminfo_config_show()
503 * function to show meinfo configuration per core.
504 */
505static int nss_meminfo_config_show(struct seq_file *seq, void *v)
506{
507 struct nss_ctx_instance *nss_ctx;
508 struct nss_meminfo_ctx *mem_ctx;
509 struct nss_meminfo_n2h_h2n_info *n2h_info;
510 struct nss_meminfo_n2h_h2n_info *h2n_info;
511 struct nss_meminfo_map *map;
512 struct nss_meminfo_request *r;
513 int nss_id;
514 int i;
515
516 /*
517 * i_private is passed to us by debug_fs_create()
518 */
519 nss_id = (int)(nss_ptr_t)seq->private;
Suman Ghosh9f7b3702018-09-21 19:51:40 +0530520 if (nss_id < 0 || nss_id >= nss_top_main.num_nss) {
Casey Chencfa28352018-04-21 01:03:02 -0700521 nss_warning("nss_id: %d is not valid\n", nss_id);
522 return -ENODEV;
523 }
524
525 nss_ctx = &nss_top_main.nss[nss_id];
526 NSS_VERIFY_CTX_MAGIC(nss_ctx);
527
528 mem_ctx = &nss_ctx->meminfo_ctx;
529 map = &mem_ctx->meminfo_map;
530 n2h_info = &mem_ctx->n2h_info;
531 h2n_info = &mem_ctx->h2n_info;
532
533 seq_printf(seq, "%-5s %-32s %-7s %-7s %-10s %-10s\n",
534 "Index", "Name", "Default", "User", "Size", "DMA Addr");
535 seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
536 "N/A", "n2h_rings", "SDRAM",
537 nss_meminfo_memtype_table[n2h_info->memtype],
538 n2h_info->total_size, n2h_info->dma_addr);
539 seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
540 "N/A", "h2n_rings", "SDRAM",
541 nss_meminfo_memtype_table[h2n_info->memtype],
542 h2n_info->total_size, h2n_info->dma_addr);
543
544 r = map->requests;
545 for (i = 0; i < map->num_requests; i++) {
546 seq_printf(seq, "%-5d %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
547 i, r[i].name,
548 nss_meminfo_memtype_table[r[i].memtype_default],
549 nss_meminfo_memtype_table[r[i].memtype_user],
550 r[i].size, r[i].addr);
551 }
552
553 seq_printf(seq, "Available IMEM: 0x%x\n", mem_ctx->imem_end - mem_ctx->imem_tail);
554 seq_printf(seq, "How to configure? \n");
555 seq_printf(seq, "Overwrite the /etc/modules.d/32-qca-nss-drv with following contents then reboot\n\n");
556 seq_printf(seq, "qca-nss-drv meminfo_user_config=\"<core_id, name, memory_type>, ..\"\n\n");
557 seq_printf(seq, "For example, <1, h2n_rings, IMEM> stands for: h2n_rings of core 1 is on IMEM\n");
558
559 return 0;
560}
561
562/*
563 * nss_meminfo_debugfs_file_open()
564 * function to open meminfo debugfs.
565 */
566static int nss_meminfo_debugfs_file_open(struct inode *inode, struct file *file)
567{
568 return single_open(file, nss_meminfo_config_show, inode->i_private);
569}
570
571static struct file_operations nss_meminfo_debugfs_ops = {
572 .owner = THIS_MODULE,
573 .open = nss_meminfo_debugfs_file_open,
574 .read = seq_read,
575 .llseek = seq_lseek,
576 .release = single_release,
577};
578
579/*
580 * nss_meminfo_init_debugfs()
581 * Init meminfo debugfs.
582 */
583static void nss_meminfo_init_debugfs(struct nss_ctx_instance *nss_ctx)
584{
585 int i;
586 struct dentry *meminfo_main_dentry;
587 struct dentry *meminfo_core_dentries[NSS_MAX_CORES];
588
589 if (nss_meminfo_debugfs_exist)
590 return;
591
592 /*
593 * Create directory for showing meminfo configuration of each core.
594 */
595 meminfo_main_dentry = debugfs_create_dir("meminfo", nss_top_main.top_dentry);
596 if (unlikely(!meminfo_main_dentry)) {
597 nss_warning("Failed to create qca-nss-drv/meminfo directory in debugfs\n");
598 return;
599 }
600
Suman Ghosh9f7b3702018-09-21 19:51:40 +0530601 for (i = 0; i < nss_top_main.num_nss; i++) {
Casey Chencfa28352018-04-21 01:03:02 -0700602 char file[10];
603 snprintf(file, sizeof(file), "core%d", i);
604 meminfo_core_dentries[i] = debugfs_create_file(file, 0400, meminfo_main_dentry,
605 (void *)(nss_ptr_t)i, &nss_meminfo_debugfs_ops);
606 if (unlikely(!meminfo_core_dentries[i])) {
607 int j;
608 for (j = 0; j < i; j++)
609 debugfs_remove(meminfo_core_dentries[j]);
610 debugfs_remove(meminfo_main_dentry);
611 nss_warning("Failed to create qca-nss-drv/meminfo/%s file in debugfs", file);
612 return;
613 }
614 }
615
616 nss_meminfo_debugfs_exist = true;
617 nss_info("nss meminfo user config: %s\n", nss_meminfo_user_config);
618}
619
620/*
621 * nss_meminfo_init
622 * Initilization
623 *
624 */
625bool nss_meminfo_init(struct nss_ctx_instance *nss_ctx)
626{
627 struct nss_meminfo_ctx *mem_ctx;
628 uint32_t *meminfo_start;
629 struct nss_meminfo_map *map;
630
631 NSS_VERIFY_CTX_MAGIC(nss_ctx);
632 mem_ctx = &nss_ctx->meminfo_ctx;
633
634 /*
635 * meminfo_start is the label where the start address of meminfo map is stored.
636 */
637 meminfo_start = (uint32_t *)ioremap_nocache(nss_ctx->load + NSS_MEMINFO_MAP_START_OFFSET,
638 NSS_MEMINFO_RESERVE_AREA_SIZE);
639 if (!meminfo_start) {
640 nss_info_always("%p: cannot remap meminfo start\n", nss_ctx);
641 return false;
642 }
643
644 /*
645 * Check meminfo start magic
646 */
647 if ((uint16_t)meminfo_start[0] != NSS_MEMINFO_RESERVE_AREA_MAGIC) {
648 nss_info_always("%p: failed to verify meminfo start magic\n", nss_ctx);
649 return false;
650 }
651
652 map = &mem_ctx->meminfo_map;
653 map->start = (uint32_t *)ioremap_cache(meminfo_start[1], NSS_MEMINFO_MAP_SIZE);
654 if (!map->start) {
655 nss_info_always("%p: failed to remap meminfo map\n", nss_ctx);
656 return false;
657 }
658
659 /*
660 * Check meminfo map magic
661 */
662 if ((uint16_t)map->start[0] != NSS_MEMINFO_MAP_START_MAGIC) {
663 nss_info_always("%p: failed to verify meminfo map magic\n", nss_ctx);
664 return false;
665 }
666
667 /*
668 * Meminfo map settings
669 */
670 map->num_requests = 0;
671 map->requests = (struct nss_meminfo_request *)(map->start + 1);
672
673 /*
674 * Init IMEM
675 */
676 nss_meminfo_init_imem(nss_ctx);
677
678 /*
679 * Init meminfo block lists
680 */
681 if (!nss_meminfo_init_block_lists(nss_ctx)) {
682 nss_info_always("%p: failed to initialize meminfo block lists\n", nss_ctx);
683 return false;
684 }
685
686 /*
687 * Configure N2H/H2N rings and nss_if_mem_map
688 */
689 if (!nss_meminfo_configure_n2h_h2n_rings(nss_ctx))
690 return false;
691
692 nss_meminfo_init_debugfs(nss_ctx);
693
694 nss_info_always("%p: meminfo init succeed\n", nss_ctx);
695 return true;
696}