blob: 8f04a4473c47a0b0730263b5aa6ed70c6aa6f7e8 [file] [log] [blame]
Casey Chencfa28352018-04-21 01:03:02 -07001/*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +05302 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
Casey Chencfa28352018-04-21 01:03:02 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * nss_meminfo.c
19 * NSS meminfo subsystem
20 */
21
22#include <linux/seq_file_net.h>
23#include "nss_tx_rx_common.h"
24#include "nss_core.h"
25#include "nss_arch.h"
26#include "nss_meminfo.h"
27
28/*
29 * Store user configuration
30 */
31static char nss_meminfo_user_config[NSS_MEMINFO_USER_CONFIG_MAXLEN];
32module_param_string(meminfo_user_config, nss_meminfo_user_config,
33 NSS_MEMINFO_USER_CONFIG_MAXLEN, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
34MODULE_PARM_DESC(nss_meminfo_user_config, "meminfo user configuration");
35
36static bool nss_meminfo_debugfs_exist;
37
38/*
39 * Name table of memory type presented to user.
40 */
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +053041char *nss_meminfo_memtype_table[NSS_MEMINFO_MEMTYPE_MAX] = {"IMEM", "SDRAM", "UTCM_SHARED"};
Casey Chencfa28352018-04-21 01:03:02 -070042
43/*
44 * nss_meminfo_alloc_sdram()
45 * Allocate a SDRAM block.
46 */
47static unsigned long nss_meminfo_alloc_sdram(struct nss_ctx_instance *nss_ctx, uint32_t size)
48{
49 unsigned long addr = 0;
50
51 /*
52 * kmalloc() return cache line aligned buffer.
53 */
54 addr = (unsigned long)kmalloc(size, GFP_KERNEL | __GFP_ZERO);
55 if (!addr)
56 nss_info_always("%p: failed to alloc a sdram block of size %u\n", nss_ctx, size);
57
Cemil Coskun3a6a1792019-06-12 14:12:41 -070058 kmemleak_not_leak((void *)addr);
Casey Chencfa28352018-04-21 01:03:02 -070059 return addr;
60}
61
62/*
63 * nss_meminfo_free_sdram()
64 * Free SDRAM memory.
65 */
66static inline void nss_meminfo_free_sdram(struct nss_ctx_instance *nss_ctx, uint32_t dma_addr,
67 unsigned long kern_addr, uint32_t size)
68{
69 /*
70 * Unmap it since every SDRAM memory had been mapped.
71 */
72 dma_unmap_single(nss_ctx->dev, dma_addr, size, DMA_FROM_DEVICE);
73 kfree((void *)kern_addr);
74}
75
76/*
77 * nss_meminfo_alloc_imem()
78 * Allocate an IMEM block in a sequential way.
79 */
80static uint32_t nss_meminfo_alloc_imem(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment)
81{
82 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
83 uint32_t new_tail;
84 uint32_t addr = 0;
85 int mask;
86
87 mask = alignment - 1;
88
89 /*
90 * Alignment has to be a power of 2.
91 */
92 nss_assert(!(alignment & mask));
93
94 new_tail = mem_ctx->imem_tail;
95
96 /*
97 * Align up the address if it not aligned.
98 */
99 if (new_tail & mask)
100 new_tail = (new_tail + mask) & ~mask;
101
102 if (size > (mem_ctx->imem_end - new_tail)) {
103 nss_info_always("%p: failed to alloc an IMEM block of size %u\n", nss_ctx, size);
104 return addr;
105 }
106
107 addr = new_tail;
108 mem_ctx->imem_tail = new_tail + size;
109
110 return addr;
111}
112
113/*
114 * nss_meminfo_free_imem()
115 * Free an IMEM block. Ignore the padding bytes for alignment requirement.
116 */
117static void nss_meminfo_free_imem(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size)
118{
119 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
120 mem_ctx->imem_tail -= size;
121}
122
123/*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530124 * nss_meminfo_alloc_utcm_shared()
125 * Allocate an UTCM_SHARED block in a sequential way.
126 */
127static uint32_t nss_meminfo_alloc_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment)
128{
129 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
130 uint32_t new_tail;
131 uint32_t addr = 0;
132 int mask;
133
134 mask = alignment - 1;
135
136 /*
137 * Alignment has to be a power of 2.
138 */
139 nss_assert(!(alignment & mask));
140
141 new_tail = mem_ctx->utcm_shared_tail;
142
143 /*
144 * Align up the address if it not aligned.
145 */
146 if (new_tail & mask)
147 new_tail = (new_tail + mask) & ~mask;
148
149 if (size > (mem_ctx->utcm_shared_end - new_tail)) {
150 nss_info_always("%p: failed to alloc an UTCM_SHARED block of size %u\n", nss_ctx, size);
151 return addr;
152 }
153
154 addr = new_tail;
155 mem_ctx->utcm_shared_tail = new_tail + size;
156
157 return addr;
158}
159
160/*
161 * nss_meminfo_free_utcm_shared()
162 * Free an UTCM_SHARED block. Ignore the padding bytes for alignment requirement.
163 */
164static void nss_meminfo_free_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size)
165{
166 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
167 mem_ctx->utcm_shared_tail -= size;
168}
169
170/*
Casey Chencfa28352018-04-21 01:03:02 -0700171 * nss_meminfo_if_user_overwrite()
172 * Return user configured memory type. Otherwise, return -1.
173 */
174static int nss_meminfo_if_user_overwrite(struct nss_ctx_instance *nss_ctx, const char *name)
175{
176 char *user_config;
177 char **mtype_table;
178 char needle[NSS_MEMINFO_BLOCK_NAME_MAXLEN + 6];
179 char user_choice[NSS_MEMINFO_MEMTYPE_NAME_MAXLEN];
180 int i;
181 char *p;
182
183 user_config = nss_meminfo_user_config;
184 mtype_table = nss_meminfo_memtype_table;
185
186 snprintf(needle, sizeof(needle), "<%1d, %s, ", nss_ctx->id, name);
187
188 p = strstr(user_config, needle);
189 if (!p)
190 return -1;
191
192 p += strlen(needle);
193
194 for (i = 0; i < NSS_MEMINFO_MEMTYPE_NAME_MAXLEN - 1; i++) {
195 /*
196 * Each user config is like <core_id, object_name, memory_type>,
197 * it starts with '<' and ends with '>'.
198 */
199 if (*p == '>' || *p == '\0')
200 break;
201 user_choice[i] = *p;
202 p++;
203 }
204
205 user_choice[i] = '\0';
206
207 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++)
208 if (!strcmp(mtype_table[i], user_choice))
209 return i;
210
211 return -1;
212}
213
214/*
215 * nss_meminfo_free_block_lists()
216 * Free block node and memory associated with each each memory object.
217 */
218static void nss_meminfo_free_block_lists(struct nss_ctx_instance *nss_ctx)
219{
220 struct nss_meminfo_ctx *mem_ctx;
221 struct nss_meminfo_block_list *l;
222 int i;
223
224 mem_ctx = &nss_ctx->meminfo_ctx;
225 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) {
226 struct nss_meminfo_block *b;
227 l = &mem_ctx->block_lists[i];
228 b = l->head;
229 while (b) {
230 struct nss_meminfo_block *tmp;
231 /*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530232 * Free IMEM/SDRAM/UTCM_SHARED memory.
Casey Chencfa28352018-04-21 01:03:02 -0700233 */
234 switch (i) {
235 case NSS_MEMINFO_MEMTYPE_IMEM:
236 nss_meminfo_free_imem(nss_ctx, b->dma_addr, b->size);
237 break;
238 case NSS_MEMINFO_MEMTYPE_SDRAM:
239 nss_meminfo_free_sdram(nss_ctx, b->dma_addr, b->kern_addr, b->size);
240 break;
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530241 case NSS_MEMINFO_MEMTYPE_UTCM_SHARED:
242 nss_meminfo_free_utcm_shared(nss_ctx, b->dma_addr, b->size);
243 break;
Casey Chencfa28352018-04-21 01:03:02 -0700244 }
245
246 /*
247 * Free the struct nss_meminfo_block itself.
248 */
249 tmp = b;
250 b = b->next;
251 kfree(tmp);
252 }
253 }
254}
255
256/*
257 * nss_meminfo_init_block_lists()
258 * Initialize block lists and allocate memory for each block.
259 */
260static bool nss_meminfo_init_block_lists(struct nss_ctx_instance *nss_ctx)
261{
Guojun Jin6ed32322019-09-11 12:32:24 -0700262 /*
263 * There is no corresponding mapped address in kernel for UTCM_SHARED.
264 * UTCM_SHARED access from kernel is not allowed. Mem Objects requesting
265 * UTCM_SHARED are not expected to use any kernel mapped address.
266 * Was for UTCM_SHARED, but move to here as default especially for KW scan.
267 * Thus, NSS_MEMINFO_POISON is the default value for non-mappable memory request.
268 */
269 unsigned long kern_addr = NSS_MEMINFO_POISON;
270 uint32_t dma_addr = 0;
Casey Chencfa28352018-04-21 01:03:02 -0700271 struct nss_meminfo_ctx *mem_ctx;
272 struct nss_meminfo_block_list *l;
273 struct nss_meminfo_request *r;
274 struct nss_meminfo_map *map;
275 int mtype;
Casey Chencfa28352018-04-21 01:03:02 -0700276 int i;
277
278 mem_ctx = &nss_ctx->meminfo_ctx;
279
280 /*
281 * Fill memory type for each block list.
282 */
283 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++)
284 mem_ctx->block_lists[i].memtype = i;
285
286 map = &mem_ctx->meminfo_map;
287
288 /*
289 * Loop through all meminfo requests by checking the per-request magic.
290 */
291 for (r = map->requests; r->magic == NSS_MEMINFO_REQUEST_MAGIC; r++) {
292 struct nss_meminfo_block *b = (struct nss_meminfo_block *)
293 kmalloc(sizeof(struct nss_meminfo_block), GFP_KERNEL);
294 if (!b) {
295 nss_info_always("%p: failed to allocate meminfo block\n", nss_ctx);
296 goto cleanup;
297 }
298
299 b->index = map->num_requests++;
300 b->size = r->size;
301
302 /*
303 * Look up the user-defined memory type.
304 * Return user-defined memory type if exists. Otherwise, return -1.
305 */
306 mtype = nss_meminfo_if_user_overwrite(nss_ctx, r->name);
307 if (mtype == -1)
308 mtype = r->memtype_default;
309 r->memtype_user = mtype;
310
311 switch (mtype) {
312 case NSS_MEMINFO_MEMTYPE_IMEM:
313 /*
314 * Return SoC real address for IMEM as DMA address.
315 */
316 dma_addr = nss_meminfo_alloc_imem(nss_ctx, r->size, r->alignment);
317 if (!dma_addr) {
318 nss_info_always("%p: failed to alloc IMEM block\n", nss_ctx);
319 goto cleanup;
320 }
321
322 /*
323 * Calulate offset to the kernel address (vmap) where the
324 * whole IMEM is mapped onto instead of calling ioremap().
325 */
326 kern_addr = (unsigned long)nss_ctx->vmap + dma_addr - nss_ctx->vphys;
327 break;
328 case NSS_MEMINFO_MEMTYPE_SDRAM:
329 kern_addr = nss_meminfo_alloc_sdram(nss_ctx, r->size);
330 if (!kern_addr) {
331 nss_info_always("%p: failed to alloc SDRAM block\n", nss_ctx);
332 goto cleanup;
333 }
334
335 dma_addr = dma_map_single(nss_ctx->dev, (void *)kern_addr, r->size, DMA_TO_DEVICE);
336 if (unlikely(dma_mapping_error(nss_ctx->dev, dma_addr))) {
337 nss_info_always("%p: failed to map SDRAM block\n", nss_ctx);
338 goto cleanup;
339 }
340 break;
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530341 case NSS_MEMINFO_MEMTYPE_UTCM_SHARED:
342 /*
343 * Return SoC real address for UTCM_SHARED as DMA address.
344 */
345 dma_addr = nss_meminfo_alloc_utcm_shared(nss_ctx, r->size, r->alignment);
346 if (!dma_addr) {
347 nss_info_always("%p: failed to alloc UTCM_SHARED block\n", nss_ctx);
348 goto cleanup;
349 }
Guojun Jin6ed32322019-09-11 12:32:24 -0700350 break;
351 case NSS_MEMINFO_MEMTYPE_INFO:
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530352 /*
Guojun Jin6ed32322019-09-11 12:32:24 -0700353 * if FW request heap_ddr_size, fill it in from DTS values.
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530354 */
Guojun Jin6ed32322019-09-11 12:32:24 -0700355 if (!strcmp(r->name, "heap_ddr_size")) {
356 struct nss_mmu_ddr_info coreinfo;
357 r->size = nss_core_ddr_info(&coreinfo);
358
359 /*
360 * split memory among the number of cores
361 */
362 r->size /= coreinfo.num_active_cores;
363 dma_addr = coreinfo.start_address + nss_ctx->id * r->size;
364 nss_info_always("%p: NSS core %d DDR from %x to %x\n", nss_ctx,
365 nss_ctx->id, dma_addr, dma_addr + r->size);
366 }
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530367 break;
Casey Chencfa28352018-04-21 01:03:02 -0700368 default:
369 nss_info_always("%p: %d unsupported memory type\n", nss_ctx, mtype);
370 goto cleanup;
371 }
372
373 /*
374 * Update the request with DMA address for the memory that only be used by FW.
375 */
376 r->addr = dma_addr;
377
378 /*
379 * nss_if_mem_map settings
380 */
381 if (!strcmp(r->name, "nss_if_mem_map_inst")) {
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530382 BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED);
Casey Chencfa28352018-04-21 01:03:02 -0700383 mem_ctx->if_map_memtype = mtype;
384 mem_ctx->if_map_dma = dma_addr;
385 mem_ctx->if_map = (struct nss_if_mem_map *)kern_addr;
386 }
387
Cemil Coskun5f51db52018-05-07 17:15:37 -0700388 if (!strcmp(r->name, "debug_boot_log_desc")) {
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530389 BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED);
Cemil Coskun5f51db52018-05-07 17:15:37 -0700390 mem_ctx->logbuffer_memtype = mtype;
391 mem_ctx->logbuffer_dma = dma_addr;
392 mem_ctx->logbuffer = (struct nss_log_descriptor *)kern_addr;
393 }
394
Cemil Coskun3bb20512018-07-24 10:42:25 -0700395 if (!strcmp(r->name, "c2c_descs_if_mem_map")) {
396 mem_ctx->c2c_start_memtype = mtype;
397 mem_ctx->c2c_start_dma = dma_addr;
398 }
399
Casey Chencfa28352018-04-21 01:03:02 -0700400 /*
401 * Flush the updated meminfo request.
402 */
403 NSS_CORE_DMA_CACHE_MAINT(r, sizeof(struct nss_meminfo_request), DMA_TO_DEVICE);
404 NSS_CORE_DSB();
405
406 /*
407 * Update the list
408 */
409 l = &mem_ctx->block_lists[mtype];
410 l->num_blks++;
411 l->total_size += r->size;
412
413 b->next = l->head;
414 l->head = b;
415 }
416
417 /*
418 * Verify memory map end magic
419 */
420 if (*((uint16_t *)r) != NSS_MEMINFO_MAP_END_MAGIC)
421 goto cleanup;
422
423 return true;
424
425cleanup:
426 nss_meminfo_free_block_lists(nss_ctx);
427 return false;
428}
429
430/*
Casey Chencfa28352018-04-21 01:03:02 -0700431 * nss_meminfo_allocate_n2h_h2n_rings()
432 * Allocate N2H/H2N rings.
433 */
434static bool nss_meminfo_allocate_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx,
435 struct nss_meminfo_n2h_h2n_info *info)
436{
437 switch (info->memtype) {
438 case NSS_MEMINFO_MEMTYPE_SDRAM:
439 info->kern_addr = nss_meminfo_alloc_sdram(nss_ctx, info->total_size);
440 if (!info->kern_addr)
441 return false;
442
443 info->dma_addr = dma_map_single(nss_ctx->dev, (void *)info->kern_addr,
444 info->total_size, DMA_TO_DEVICE);
445 if (unlikely(dma_mapping_error(nss_ctx->dev, info->dma_addr))) {
446 kfree((void *)info->kern_addr);
447 return false;
448 }
449 break;
450 case NSS_MEMINFO_MEMTYPE_IMEM:
451 info->dma_addr = nss_meminfo_alloc_imem(nss_ctx, info->total_size, L1_CACHE_BYTES);
452 if (!info->dma_addr)
453 return false;
454
455 info->kern_addr = (unsigned long)(nss_ctx->vmap) + info->dma_addr - nss_ctx->vphys;
456 break;
457 default:
458 return false;
459 }
460
461 return true;
462}
463
464/*
465 * nss_meminfo_configure_n2h_h2n_rings()
466 * Configure N2H/H2N rings and if_map.
467 */
468static bool nss_meminfo_configure_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx)
469{
470 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
471 struct nss_meminfo_n2h_h2n_info *h2n_info;
472 struct nss_meminfo_n2h_h2n_info *n2h_info;
473 struct nss_if_mem_map *if_map;
474 int i;
475 int mtype;
476
477 h2n_info = &mem_ctx->h2n_info;
478 n2h_info = &mem_ctx->n2h_info;
479
480 /*
481 * Check memory type. SDRAM is the default option.
482 */
483 mtype = nss_meminfo_if_user_overwrite(nss_ctx, "h2n_rings");
484 if (mtype == -1)
485 mtype = NSS_MEMINFO_MEMTYPE_SDRAM;
486
487 h2n_info->memtype = mtype;
488
489 mtype = nss_meminfo_if_user_overwrite(nss_ctx, "n2h_rings");
490 if (mtype == -1)
491 mtype = NSS_MEMINFO_MEMTYPE_SDRAM;
492
493 n2h_info->memtype = mtype;
494
495 n2h_info->total_size = sizeof(struct n2h_descriptor) * NSS_N2H_RING_COUNT * (NSS_RING_SIZE + 2);
496 h2n_info->total_size = sizeof(struct h2n_descriptor) * NSS_H2N_RING_COUNT * (NSS_RING_SIZE + 2);
497
498 /*
499 * N2H ring allocations
500 */
501 if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, n2h_info))) {
502 nss_info_always("%p: failed to allocate/map n2h rings\n", nss_ctx);
503 return false;
504 }
505
506 /*
507 * H2N ring allocations
508 */
509 if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, h2n_info))) {
510 nss_info_always("%p: failed to allocate/map h2n_rings\n", nss_ctx);
511 goto cleanup;
512 }
513
514 /*
515 * Bring a fresh copy of if_map from memory in order to read it correctly.
516 */
517 if_map = mem_ctx->if_map;
518 NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_FROM_DEVICE);
519 NSS_CORE_DSB();
520
521 if_map->n2h_rings = NSS_N2H_RING_COUNT;
522 if_map->h2n_rings = NSS_H2N_RING_COUNT;
523
524 /*
525 * N2H ring settings
526 */
527 for (i = 0; i < NSS_N2H_RING_COUNT; i++) {
528 struct hlos_n2h_desc_ring *n2h_desc_ring = &nss_ctx->n2h_desc_ring[i];
529 n2h_desc_ring->desc_ring.desc = (struct n2h_descriptor *)(n2h_info->kern_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2));
530 n2h_desc_ring->desc_ring.size = NSS_RING_SIZE;
531 n2h_desc_ring->hlos_index = if_map->n2h_hlos_index[i];
532
533 if_map->n2h_desc_if[i].size = NSS_RING_SIZE;
534 if_map->n2h_desc_if[i].desc_addr = n2h_info->dma_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2);
535 nss_info("%p: N2H ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->n2h_desc_if[i].size, if_map->n2h_desc_if[i].desc_addr);
536 }
537
538 /*
539 * H2N ring settings
540 */
541 for (i = 0; i < NSS_H2N_RING_COUNT; i++) {
542 struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[i];
543 h2n_desc_ring->desc_ring.desc = (struct h2n_descriptor *)(h2n_info->kern_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2));
544 h2n_desc_ring->desc_ring.size = NSS_RING_SIZE;
545 h2n_desc_ring->hlos_index = if_map->h2n_hlos_index[i];
546 spin_lock_init(&h2n_desc_ring->lock);
547
548 if_map->h2n_desc_if[i].size = NSS_RING_SIZE;
549 if_map->h2n_desc_if[i].desc_addr = h2n_info->dma_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2);
550 nss_info("%p: H2N ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->h2n_desc_if[i].size, if_map->h2n_desc_if[i].desc_addr);
551 }
552
553 /*
554 * Flush the updated nss_if_mem_map.
555 */
556 NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_TO_DEVICE);
557 NSS_CORE_DSB();
558
559 return true;
560
561cleanup:
562 if (n2h_info->memtype == NSS_MEMINFO_MEMTYPE_SDRAM)
563 nss_meminfo_free_sdram(nss_ctx, n2h_info->dma_addr, n2h_info->kern_addr, n2h_info->total_size);
564 else
565 nss_meminfo_free_imem(nss_ctx, n2h_info->dma_addr, n2h_info->total_size);
566
567 nss_meminfo_free_block_lists(nss_ctx);
568 return false;
569}
570
571/*
572 * nss_meminfo_config_show()
573 * function to show meinfo configuration per core.
574 */
575static int nss_meminfo_config_show(struct seq_file *seq, void *v)
576{
577 struct nss_ctx_instance *nss_ctx;
578 struct nss_meminfo_ctx *mem_ctx;
579 struct nss_meminfo_n2h_h2n_info *n2h_info;
580 struct nss_meminfo_n2h_h2n_info *h2n_info;
581 struct nss_meminfo_map *map;
582 struct nss_meminfo_request *r;
583 int nss_id;
584 int i;
585
586 /*
587 * i_private is passed to us by debug_fs_create()
588 */
589 nss_id = (int)(nss_ptr_t)seq->private;
Suman Ghosh9f7b3702018-09-21 19:51:40 +0530590 if (nss_id < 0 || nss_id >= nss_top_main.num_nss) {
Casey Chencfa28352018-04-21 01:03:02 -0700591 nss_warning("nss_id: %d is not valid\n", nss_id);
592 return -ENODEV;
593 }
594
595 nss_ctx = &nss_top_main.nss[nss_id];
596 NSS_VERIFY_CTX_MAGIC(nss_ctx);
597
598 mem_ctx = &nss_ctx->meminfo_ctx;
599 map = &mem_ctx->meminfo_map;
600 n2h_info = &mem_ctx->n2h_info;
601 h2n_info = &mem_ctx->h2n_info;
602
603 seq_printf(seq, "%-5s %-32s %-7s %-7s %-10s %-10s\n",
604 "Index", "Name", "Default", "User", "Size", "DMA Addr");
605 seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
606 "N/A", "n2h_rings", "SDRAM",
607 nss_meminfo_memtype_table[n2h_info->memtype],
608 n2h_info->total_size, n2h_info->dma_addr);
609 seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
610 "N/A", "h2n_rings", "SDRAM",
611 nss_meminfo_memtype_table[h2n_info->memtype],
612 h2n_info->total_size, h2n_info->dma_addr);
613
614 r = map->requests;
615 for (i = 0; i < map->num_requests; i++) {
616 seq_printf(seq, "%-5d %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
617 i, r[i].name,
618 nss_meminfo_memtype_table[r[i].memtype_default],
619 nss_meminfo_memtype_table[r[i].memtype_user],
620 r[i].size, r[i].addr);
621 }
622
623 seq_printf(seq, "Available IMEM: 0x%x\n", mem_ctx->imem_end - mem_ctx->imem_tail);
624 seq_printf(seq, "How to configure? \n");
625 seq_printf(seq, "Overwrite the /etc/modules.d/32-qca-nss-drv with following contents then reboot\n\n");
626 seq_printf(seq, "qca-nss-drv meminfo_user_config=\"<core_id, name, memory_type>, ..\"\n\n");
627 seq_printf(seq, "For example, <1, h2n_rings, IMEM> stands for: h2n_rings of core 1 is on IMEM\n");
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530628 seq_printf(seq, "Note:UTCM_SHARED cannot be used for n2h_rings, h2n_rings and debug_log_boot_desc.\n");
Casey Chencfa28352018-04-21 01:03:02 -0700629
630 return 0;
631}
632
633/*
634 * nss_meminfo_debugfs_file_open()
635 * function to open meminfo debugfs.
636 */
637static int nss_meminfo_debugfs_file_open(struct inode *inode, struct file *file)
638{
639 return single_open(file, nss_meminfo_config_show, inode->i_private);
640}
641
642static struct file_operations nss_meminfo_debugfs_ops = {
643 .owner = THIS_MODULE,
644 .open = nss_meminfo_debugfs_file_open,
645 .read = seq_read,
646 .llseek = seq_lseek,
647 .release = single_release,
648};
649
650/*
651 * nss_meminfo_init_debugfs()
652 * Init meminfo debugfs.
653 */
654static void nss_meminfo_init_debugfs(struct nss_ctx_instance *nss_ctx)
655{
656 int i;
657 struct dentry *meminfo_main_dentry;
658 struct dentry *meminfo_core_dentries[NSS_MAX_CORES];
659
660 if (nss_meminfo_debugfs_exist)
661 return;
662
663 /*
664 * Create directory for showing meminfo configuration of each core.
665 */
666 meminfo_main_dentry = debugfs_create_dir("meminfo", nss_top_main.top_dentry);
667 if (unlikely(!meminfo_main_dentry)) {
668 nss_warning("Failed to create qca-nss-drv/meminfo directory in debugfs\n");
669 return;
670 }
671
Suman Ghosh9f7b3702018-09-21 19:51:40 +0530672 for (i = 0; i < nss_top_main.num_nss; i++) {
Casey Chencfa28352018-04-21 01:03:02 -0700673 char file[10];
674 snprintf(file, sizeof(file), "core%d", i);
675 meminfo_core_dentries[i] = debugfs_create_file(file, 0400, meminfo_main_dentry,
676 (void *)(nss_ptr_t)i, &nss_meminfo_debugfs_ops);
677 if (unlikely(!meminfo_core_dentries[i])) {
678 int j;
679 for (j = 0; j < i; j++)
680 debugfs_remove(meminfo_core_dentries[j]);
681 debugfs_remove(meminfo_main_dentry);
682 nss_warning("Failed to create qca-nss-drv/meminfo/%s file in debugfs", file);
683 return;
684 }
685 }
686
687 nss_meminfo_debugfs_exist = true;
688 nss_info("nss meminfo user config: %s\n", nss_meminfo_user_config);
689}
690
691/*
692 * nss_meminfo_init
693 * Initilization
694 *
695 */
696bool nss_meminfo_init(struct nss_ctx_instance *nss_ctx)
697{
698 struct nss_meminfo_ctx *mem_ctx;
699 uint32_t *meminfo_start;
700 struct nss_meminfo_map *map;
Suman Ghosh81123f92018-09-26 21:06:47 +0530701 struct nss_top_instance *nss_top = &nss_top_main;
Casey Chencfa28352018-04-21 01:03:02 -0700702
703 NSS_VERIFY_CTX_MAGIC(nss_ctx);
704 mem_ctx = &nss_ctx->meminfo_ctx;
705
706 /*
707 * meminfo_start is the label where the start address of meminfo map is stored.
708 */
709 meminfo_start = (uint32_t *)ioremap_nocache(nss_ctx->load + NSS_MEMINFO_MAP_START_OFFSET,
710 NSS_MEMINFO_RESERVE_AREA_SIZE);
711 if (!meminfo_start) {
712 nss_info_always("%p: cannot remap meminfo start\n", nss_ctx);
713 return false;
714 }
715
716 /*
717 * Check meminfo start magic
718 */
719 if ((uint16_t)meminfo_start[0] != NSS_MEMINFO_RESERVE_AREA_MAGIC) {
720 nss_info_always("%p: failed to verify meminfo start magic\n", nss_ctx);
721 return false;
722 }
723
724 map = &mem_ctx->meminfo_map;
725 map->start = (uint32_t *)ioremap_cache(meminfo_start[1], NSS_MEMINFO_MAP_SIZE);
726 if (!map->start) {
727 nss_info_always("%p: failed to remap meminfo map\n", nss_ctx);
728 return false;
729 }
730
731 /*
732 * Check meminfo map magic
733 */
734 if ((uint16_t)map->start[0] != NSS_MEMINFO_MAP_START_MAGIC) {
735 nss_info_always("%p: failed to verify meminfo map magic\n", nss_ctx);
736 return false;
737 }
738
739 /*
740 * Meminfo map settings
741 */
742 map->num_requests = 0;
743 map->requests = (struct nss_meminfo_request *)(map->start + 1);
744
745 /*
746 * Init IMEM
747 */
Suman Ghosh81123f92018-09-26 21:06:47 +0530748 nss_top->hal_ops->init_imem(nss_ctx);
Casey Chencfa28352018-04-21 01:03:02 -0700749
750 /*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530751 * Init UTCM_SHARED if supported
752 */
753 if (!nss_top->hal_ops->init_utcm_shared(nss_ctx, meminfo_start)) {
754 nss_info_always("%p: failed to initialize UTCM_SHARED meminfo\n", nss_ctx);
755 return false;
756 }
757
758 /*
Casey Chencfa28352018-04-21 01:03:02 -0700759 * Init meminfo block lists
760 */
761 if (!nss_meminfo_init_block_lists(nss_ctx)) {
762 nss_info_always("%p: failed to initialize meminfo block lists\n", nss_ctx);
763 return false;
764 }
765
766 /*
767 * Configure N2H/H2N rings and nss_if_mem_map
768 */
769 if (!nss_meminfo_configure_n2h_h2n_rings(nss_ctx))
770 return false;
771
772 nss_meminfo_init_debugfs(nss_ctx);
773
774 nss_info_always("%p: meminfo init succeed\n", nss_ctx);
775 return true;
776}