blob: a3f26fc682d97d79150fad052d325d8acfcaed19 [file] [log] [blame]
Casey Chencfa28352018-04-21 01:03:02 -07001/*
Guojun Jin93468412019-11-04 14:02:02 -08002 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
Casey Chencfa28352018-04-21 01:03:02 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * nss_meminfo.c
19 * NSS meminfo subsystem
20 */
21
22#include <linux/seq_file_net.h>
23#include "nss_tx_rx_common.h"
24#include "nss_core.h"
25#include "nss_arch.h"
26#include "nss_meminfo.h"
27
28/*
29 * Store user configuration
30 */
31static char nss_meminfo_user_config[NSS_MEMINFO_USER_CONFIG_MAXLEN];
32module_param_string(meminfo_user_config, nss_meminfo_user_config,
33 NSS_MEMINFO_USER_CONFIG_MAXLEN, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
34MODULE_PARM_DESC(nss_meminfo_user_config, "meminfo user configuration");
35
36static bool nss_meminfo_debugfs_exist;
37
38/*
39 * Name table of memory type presented to user.
40 */
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +053041char *nss_meminfo_memtype_table[NSS_MEMINFO_MEMTYPE_MAX] = {"IMEM", "SDRAM", "UTCM_SHARED"};
Casey Chencfa28352018-04-21 01:03:02 -070042
43/*
44 * nss_meminfo_alloc_sdram()
45 * Allocate a SDRAM block.
46 */
Guojun Jin93468412019-11-04 14:02:02 -080047static void *nss_meminfo_alloc_sdram(struct nss_ctx_instance *nss_ctx, uint32_t size)
Casey Chencfa28352018-04-21 01:03:02 -070048{
Guojun Jin93468412019-11-04 14:02:02 -080049 void *addr = 0;
Casey Chencfa28352018-04-21 01:03:02 -070050
51 /*
52 * kmalloc() return cache line aligned buffer.
53 */
Guojun Jin93468412019-11-04 14:02:02 -080054 addr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
Casey Chencfa28352018-04-21 01:03:02 -070055 if (!addr)
56 nss_info_always("%p: failed to alloc a sdram block of size %u\n", nss_ctx, size);
57
Cemil Coskun3a6a1792019-06-12 14:12:41 -070058 kmemleak_not_leak((void *)addr);
Casey Chencfa28352018-04-21 01:03:02 -070059 return addr;
60}
61
62/*
63 * nss_meminfo_free_sdram()
64 * Free SDRAM memory.
65 */
66static inline void nss_meminfo_free_sdram(struct nss_ctx_instance *nss_ctx, uint32_t dma_addr,
Guojun Jin93468412019-11-04 14:02:02 -080067 void *kern_addr, uint32_t size)
Casey Chencfa28352018-04-21 01:03:02 -070068{
69 /*
70 * Unmap it since every SDRAM memory had been mapped.
71 */
72 dma_unmap_single(nss_ctx->dev, dma_addr, size, DMA_FROM_DEVICE);
Guojun Jin93468412019-11-04 14:02:02 -080073 kfree(kern_addr);
Casey Chencfa28352018-04-21 01:03:02 -070074}
75
76/*
77 * nss_meminfo_alloc_imem()
78 * Allocate an IMEM block in a sequential way.
79 */
80static uint32_t nss_meminfo_alloc_imem(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment)
81{
82 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
83 uint32_t new_tail;
84 uint32_t addr = 0;
85 int mask;
86
87 mask = alignment - 1;
88
89 /*
90 * Alignment has to be a power of 2.
91 */
92 nss_assert(!(alignment & mask));
93
94 new_tail = mem_ctx->imem_tail;
95
96 /*
97 * Align up the address if it not aligned.
98 */
99 if (new_tail & mask)
100 new_tail = (new_tail + mask) & ~mask;
101
102 if (size > (mem_ctx->imem_end - new_tail)) {
103 nss_info_always("%p: failed to alloc an IMEM block of size %u\n", nss_ctx, size);
104 return addr;
105 }
106
107 addr = new_tail;
108 mem_ctx->imem_tail = new_tail + size;
109
110 return addr;
111}
112
113/*
114 * nss_meminfo_free_imem()
115 * Free an IMEM block. Ignore the padding bytes for alignment requirement.
116 */
117static void nss_meminfo_free_imem(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size)
118{
119 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
120 mem_ctx->imem_tail -= size;
121}
122
123/*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530124 * nss_meminfo_alloc_utcm_shared()
125 * Allocate an UTCM_SHARED block in a sequential way.
126 */
127static uint32_t nss_meminfo_alloc_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment)
128{
129 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
130 uint32_t new_tail;
131 uint32_t addr = 0;
132 int mask;
133
134 mask = alignment - 1;
135
136 /*
137 * Alignment has to be a power of 2.
138 */
139 nss_assert(!(alignment & mask));
140
141 new_tail = mem_ctx->utcm_shared_tail;
142
143 /*
144 * Align up the address if it not aligned.
145 */
146 if (new_tail & mask)
147 new_tail = (new_tail + mask) & ~mask;
148
149 if (size > (mem_ctx->utcm_shared_end - new_tail)) {
150 nss_info_always("%p: failed to alloc an UTCM_SHARED block of size %u\n", nss_ctx, size);
151 return addr;
152 }
153
154 addr = new_tail;
155 mem_ctx->utcm_shared_tail = new_tail + size;
156
157 return addr;
158}
159
160/*
161 * nss_meminfo_free_utcm_shared()
162 * Free an UTCM_SHARED block. Ignore the padding bytes for alignment requirement.
163 */
164static void nss_meminfo_free_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size)
165{
166 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
167 mem_ctx->utcm_shared_tail -= size;
168}
169
170/*
Casey Chencfa28352018-04-21 01:03:02 -0700171 * nss_meminfo_if_user_overwrite()
172 * Return user configured memory type. Otherwise, return -1.
173 */
174static int nss_meminfo_if_user_overwrite(struct nss_ctx_instance *nss_ctx, const char *name)
175{
176 char *user_config;
177 char **mtype_table;
178 char needle[NSS_MEMINFO_BLOCK_NAME_MAXLEN + 6];
179 char user_choice[NSS_MEMINFO_MEMTYPE_NAME_MAXLEN];
180 int i;
181 char *p;
182
183 user_config = nss_meminfo_user_config;
184 mtype_table = nss_meminfo_memtype_table;
185
186 snprintf(needle, sizeof(needle), "<%1d, %s, ", nss_ctx->id, name);
187
188 p = strstr(user_config, needle);
189 if (!p)
190 return -1;
191
192 p += strlen(needle);
193
194 for (i = 0; i < NSS_MEMINFO_MEMTYPE_NAME_MAXLEN - 1; i++) {
195 /*
196 * Each user config is like <core_id, object_name, memory_type>,
197 * it starts with '<' and ends with '>'.
198 */
199 if (*p == '>' || *p == '\0')
200 break;
201 user_choice[i] = *p;
202 p++;
203 }
204
205 user_choice[i] = '\0';
206
207 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++)
208 if (!strcmp(mtype_table[i], user_choice))
209 return i;
210
211 return -1;
212}
213
214/*
215 * nss_meminfo_free_block_lists()
216 * Free block node and memory associated with each each memory object.
217 */
218static void nss_meminfo_free_block_lists(struct nss_ctx_instance *nss_ctx)
219{
220 struct nss_meminfo_ctx *mem_ctx;
221 struct nss_meminfo_block_list *l;
222 int i;
223
224 mem_ctx = &nss_ctx->meminfo_ctx;
225 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) {
226 struct nss_meminfo_block *b;
227 l = &mem_ctx->block_lists[i];
228 b = l->head;
229 while (b) {
230 struct nss_meminfo_block *tmp;
231 /*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530232 * Free IMEM/SDRAM/UTCM_SHARED memory.
Casey Chencfa28352018-04-21 01:03:02 -0700233 */
234 switch (i) {
235 case NSS_MEMINFO_MEMTYPE_IMEM:
236 nss_meminfo_free_imem(nss_ctx, b->dma_addr, b->size);
237 break;
238 case NSS_MEMINFO_MEMTYPE_SDRAM:
239 nss_meminfo_free_sdram(nss_ctx, b->dma_addr, b->kern_addr, b->size);
240 break;
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530241 case NSS_MEMINFO_MEMTYPE_UTCM_SHARED:
242 nss_meminfo_free_utcm_shared(nss_ctx, b->dma_addr, b->size);
243 break;
Casey Chencfa28352018-04-21 01:03:02 -0700244 }
245
246 /*
247 * Free the struct nss_meminfo_block itself.
248 */
249 tmp = b;
250 b = b->next;
251 kfree(tmp);
252 }
253 }
254}
255
256/*
257 * nss_meminfo_init_block_lists()
258 * Initialize block lists and allocate memory for each block.
259 */
260static bool nss_meminfo_init_block_lists(struct nss_ctx_instance *nss_ctx)
261{
Guojun Jin6ed32322019-09-11 12:32:24 -0700262 /*
263 * There is no corresponding mapped address in kernel for UTCM_SHARED.
264 * UTCM_SHARED access from kernel is not allowed. Mem Objects requesting
265 * UTCM_SHARED are not expected to use any kernel mapped address.
266 * Was for UTCM_SHARED, but move to here as default especially for KW scan.
267 * Thus, NSS_MEMINFO_POISON is the default value for non-mappable memory request.
268 */
Guojun Jin93468412019-11-04 14:02:02 -0800269 void *kern_addr = (void *)NSS_MEMINFO_POISON;
Guojun Jin6ed32322019-09-11 12:32:24 -0700270 uint32_t dma_addr = 0;
Casey Chencfa28352018-04-21 01:03:02 -0700271 struct nss_meminfo_ctx *mem_ctx;
272 struct nss_meminfo_block_list *l;
273 struct nss_meminfo_request *r;
274 struct nss_meminfo_map *map;
275 int mtype;
Casey Chencfa28352018-04-21 01:03:02 -0700276 int i;
277
278 mem_ctx = &nss_ctx->meminfo_ctx;
279
280 /*
281 * Fill memory type for each block list.
282 */
283 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++)
284 mem_ctx->block_lists[i].memtype = i;
285
286 map = &mem_ctx->meminfo_map;
287
288 /*
289 * Loop through all meminfo requests by checking the per-request magic.
290 */
291 for (r = map->requests; r->magic == NSS_MEMINFO_REQUEST_MAGIC; r++) {
292 struct nss_meminfo_block *b = (struct nss_meminfo_block *)
293 kmalloc(sizeof(struct nss_meminfo_block), GFP_KERNEL);
294 if (!b) {
295 nss_info_always("%p: failed to allocate meminfo block\n", nss_ctx);
296 goto cleanup;
297 }
298
299 b->index = map->num_requests++;
300 b->size = r->size;
301
302 /*
303 * Look up the user-defined memory type.
304 * Return user-defined memory type if exists. Otherwise, return -1.
305 */
306 mtype = nss_meminfo_if_user_overwrite(nss_ctx, r->name);
307 if (mtype == -1)
308 mtype = r->memtype_default;
309 r->memtype_user = mtype;
310
311 switch (mtype) {
312 case NSS_MEMINFO_MEMTYPE_IMEM:
313 /*
314 * Return SoC real address for IMEM as DMA address.
315 */
316 dma_addr = nss_meminfo_alloc_imem(nss_ctx, r->size, r->alignment);
317 if (!dma_addr) {
318 nss_info_always("%p: failed to alloc IMEM block\n", nss_ctx);
319 goto cleanup;
320 }
321
322 /*
323 * Calulate offset to the kernel address (vmap) where the
324 * whole IMEM is mapped onto instead of calling ioremap().
325 */
Guojun Jin93468412019-11-04 14:02:02 -0800326 kern_addr = nss_ctx->vmap + dma_addr - nss_ctx->vphys;
Casey Chencfa28352018-04-21 01:03:02 -0700327 break;
328 case NSS_MEMINFO_MEMTYPE_SDRAM:
329 kern_addr = nss_meminfo_alloc_sdram(nss_ctx, r->size);
330 if (!kern_addr) {
331 nss_info_always("%p: failed to alloc SDRAM block\n", nss_ctx);
332 goto cleanup;
333 }
334
Guojun Jin93468412019-11-04 14:02:02 -0800335 dma_addr = dma_map_single(nss_ctx->dev, kern_addr, r->size, DMA_TO_DEVICE);
Casey Chencfa28352018-04-21 01:03:02 -0700336 if (unlikely(dma_mapping_error(nss_ctx->dev, dma_addr))) {
337 nss_info_always("%p: failed to map SDRAM block\n", nss_ctx);
338 goto cleanup;
339 }
340 break;
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530341 case NSS_MEMINFO_MEMTYPE_UTCM_SHARED:
342 /*
343 * Return SoC real address for UTCM_SHARED as DMA address.
344 */
345 dma_addr = nss_meminfo_alloc_utcm_shared(nss_ctx, r->size, r->alignment);
346 if (!dma_addr) {
347 nss_info_always("%p: failed to alloc UTCM_SHARED block\n", nss_ctx);
348 goto cleanup;
349 }
Guojun Jin6ed32322019-09-11 12:32:24 -0700350 break;
351 case NSS_MEMINFO_MEMTYPE_INFO:
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530352 /*
Guojun Jin6ed32322019-09-11 12:32:24 -0700353 * if FW request heap_ddr_size, fill it in from DTS values.
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530354 */
Guojun Jin6ed32322019-09-11 12:32:24 -0700355 if (!strcmp(r->name, "heap_ddr_size")) {
356 struct nss_mmu_ddr_info coreinfo;
357 r->size = nss_core_ddr_info(&coreinfo);
358
359 /*
360 * split memory among the number of cores
361 */
362 r->size /= coreinfo.num_active_cores;
363 dma_addr = coreinfo.start_address + nss_ctx->id * r->size;
364 nss_info_always("%p: NSS core %d DDR from %x to %x\n", nss_ctx,
365 nss_ctx->id, dma_addr, dma_addr + r->size);
366 }
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530367 break;
Casey Chencfa28352018-04-21 01:03:02 -0700368 default:
369 nss_info_always("%p: %d unsupported memory type\n", nss_ctx, mtype);
370 goto cleanup;
371 }
372
373 /*
374 * Update the request with DMA address for the memory that only be used by FW.
375 */
376 r->addr = dma_addr;
377
378 /*
379 * nss_if_mem_map settings
380 */
381 if (!strcmp(r->name, "nss_if_mem_map_inst")) {
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530382 BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED);
Casey Chencfa28352018-04-21 01:03:02 -0700383 mem_ctx->if_map_memtype = mtype;
384 mem_ctx->if_map_dma = dma_addr;
385 mem_ctx->if_map = (struct nss_if_mem_map *)kern_addr;
386 }
387
Cemil Coskun5f51db52018-05-07 17:15:37 -0700388 if (!strcmp(r->name, "debug_boot_log_desc")) {
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530389 BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED);
Cemil Coskun5f51db52018-05-07 17:15:37 -0700390 mem_ctx->logbuffer_memtype = mtype;
391 mem_ctx->logbuffer_dma = dma_addr;
392 mem_ctx->logbuffer = (struct nss_log_descriptor *)kern_addr;
393 }
394
Cemil Coskun3bb20512018-07-24 10:42:25 -0700395 if (!strcmp(r->name, "c2c_descs_if_mem_map")) {
396 mem_ctx->c2c_start_memtype = mtype;
397 mem_ctx->c2c_start_dma = dma_addr;
398 }
399
Guojun Jin93468412019-11-04 14:02:02 -0800400 if (strcmp(r->name, "profile_dma_ctrl") == 0) {
401 mem_ctx->sdma_ctrl = kern_addr;
402 nss_info_always("%p: set sdma %p\n", nss_ctx, kern_addr);
403 }
404
Casey Chencfa28352018-04-21 01:03:02 -0700405 /*
406 * Flush the updated meminfo request.
407 */
408 NSS_CORE_DMA_CACHE_MAINT(r, sizeof(struct nss_meminfo_request), DMA_TO_DEVICE);
409 NSS_CORE_DSB();
410
411 /*
412 * Update the list
413 */
414 l = &mem_ctx->block_lists[mtype];
415 l->num_blks++;
416 l->total_size += r->size;
417
418 b->next = l->head;
419 l->head = b;
420 }
421
422 /*
423 * Verify memory map end magic
424 */
425 if (*((uint16_t *)r) != NSS_MEMINFO_MAP_END_MAGIC)
426 goto cleanup;
427
428 return true;
429
430cleanup:
431 nss_meminfo_free_block_lists(nss_ctx);
432 return false;
433}
434
435/*
Casey Chencfa28352018-04-21 01:03:02 -0700436 * nss_meminfo_allocate_n2h_h2n_rings()
437 * Allocate N2H/H2N rings.
438 */
439static bool nss_meminfo_allocate_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx,
440 struct nss_meminfo_n2h_h2n_info *info)
441{
442 switch (info->memtype) {
443 case NSS_MEMINFO_MEMTYPE_SDRAM:
444 info->kern_addr = nss_meminfo_alloc_sdram(nss_ctx, info->total_size);
445 if (!info->kern_addr)
446 return false;
447
448 info->dma_addr = dma_map_single(nss_ctx->dev, (void *)info->kern_addr,
449 info->total_size, DMA_TO_DEVICE);
450 if (unlikely(dma_mapping_error(nss_ctx->dev, info->dma_addr))) {
451 kfree((void *)info->kern_addr);
452 return false;
453 }
454 break;
455 case NSS_MEMINFO_MEMTYPE_IMEM:
456 info->dma_addr = nss_meminfo_alloc_imem(nss_ctx, info->total_size, L1_CACHE_BYTES);
457 if (!info->dma_addr)
458 return false;
459
Guojun Jin93468412019-11-04 14:02:02 -0800460 info->kern_addr = nss_ctx->vmap + info->dma_addr - nss_ctx->vphys;
Casey Chencfa28352018-04-21 01:03:02 -0700461 break;
462 default:
463 return false;
464 }
465
466 return true;
467}
468
469/*
470 * nss_meminfo_configure_n2h_h2n_rings()
471 * Configure N2H/H2N rings and if_map.
472 */
473static bool nss_meminfo_configure_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx)
474{
475 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
476 struct nss_meminfo_n2h_h2n_info *h2n_info;
477 struct nss_meminfo_n2h_h2n_info *n2h_info;
478 struct nss_if_mem_map *if_map;
479 int i;
480 int mtype;
481
482 h2n_info = &mem_ctx->h2n_info;
483 n2h_info = &mem_ctx->n2h_info;
484
485 /*
486 * Check memory type. SDRAM is the default option.
487 */
488 mtype = nss_meminfo_if_user_overwrite(nss_ctx, "h2n_rings");
489 if (mtype == -1)
490 mtype = NSS_MEMINFO_MEMTYPE_SDRAM;
491
492 h2n_info->memtype = mtype;
493
494 mtype = nss_meminfo_if_user_overwrite(nss_ctx, "n2h_rings");
495 if (mtype == -1)
496 mtype = NSS_MEMINFO_MEMTYPE_SDRAM;
497
498 n2h_info->memtype = mtype;
499
500 n2h_info->total_size = sizeof(struct n2h_descriptor) * NSS_N2H_RING_COUNT * (NSS_RING_SIZE + 2);
501 h2n_info->total_size = sizeof(struct h2n_descriptor) * NSS_H2N_RING_COUNT * (NSS_RING_SIZE + 2);
502
503 /*
504 * N2H ring allocations
505 */
506 if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, n2h_info))) {
507 nss_info_always("%p: failed to allocate/map n2h rings\n", nss_ctx);
508 return false;
509 }
510
511 /*
512 * H2N ring allocations
513 */
514 if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, h2n_info))) {
515 nss_info_always("%p: failed to allocate/map h2n_rings\n", nss_ctx);
516 goto cleanup;
517 }
518
519 /*
520 * Bring a fresh copy of if_map from memory in order to read it correctly.
521 */
522 if_map = mem_ctx->if_map;
523 NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_FROM_DEVICE);
524 NSS_CORE_DSB();
525
526 if_map->n2h_rings = NSS_N2H_RING_COUNT;
527 if_map->h2n_rings = NSS_H2N_RING_COUNT;
528
529 /*
530 * N2H ring settings
531 */
532 for (i = 0; i < NSS_N2H_RING_COUNT; i++) {
533 struct hlos_n2h_desc_ring *n2h_desc_ring = &nss_ctx->n2h_desc_ring[i];
534 n2h_desc_ring->desc_ring.desc = (struct n2h_descriptor *)(n2h_info->kern_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2));
535 n2h_desc_ring->desc_ring.size = NSS_RING_SIZE;
536 n2h_desc_ring->hlos_index = if_map->n2h_hlos_index[i];
537
538 if_map->n2h_desc_if[i].size = NSS_RING_SIZE;
539 if_map->n2h_desc_if[i].desc_addr = n2h_info->dma_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2);
540 nss_info("%p: N2H ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->n2h_desc_if[i].size, if_map->n2h_desc_if[i].desc_addr);
541 }
542
543 /*
544 * H2N ring settings
545 */
546 for (i = 0; i < NSS_H2N_RING_COUNT; i++) {
547 struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[i];
548 h2n_desc_ring->desc_ring.desc = (struct h2n_descriptor *)(h2n_info->kern_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2));
549 h2n_desc_ring->desc_ring.size = NSS_RING_SIZE;
550 h2n_desc_ring->hlos_index = if_map->h2n_hlos_index[i];
551 spin_lock_init(&h2n_desc_ring->lock);
552
553 if_map->h2n_desc_if[i].size = NSS_RING_SIZE;
554 if_map->h2n_desc_if[i].desc_addr = h2n_info->dma_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2);
555 nss_info("%p: H2N ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->h2n_desc_if[i].size, if_map->h2n_desc_if[i].desc_addr);
556 }
557
558 /*
559 * Flush the updated nss_if_mem_map.
560 */
561 NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_TO_DEVICE);
562 NSS_CORE_DSB();
563
564 return true;
565
566cleanup:
567 if (n2h_info->memtype == NSS_MEMINFO_MEMTYPE_SDRAM)
568 nss_meminfo_free_sdram(nss_ctx, n2h_info->dma_addr, n2h_info->kern_addr, n2h_info->total_size);
569 else
570 nss_meminfo_free_imem(nss_ctx, n2h_info->dma_addr, n2h_info->total_size);
571
572 nss_meminfo_free_block_lists(nss_ctx);
573 return false;
574}
575
576/*
577 * nss_meminfo_config_show()
578 * function to show meinfo configuration per core.
579 */
580static int nss_meminfo_config_show(struct seq_file *seq, void *v)
581{
582 struct nss_ctx_instance *nss_ctx;
583 struct nss_meminfo_ctx *mem_ctx;
584 struct nss_meminfo_n2h_h2n_info *n2h_info;
585 struct nss_meminfo_n2h_h2n_info *h2n_info;
586 struct nss_meminfo_map *map;
587 struct nss_meminfo_request *r;
588 int nss_id;
589 int i;
590
591 /*
592 * i_private is passed to us by debug_fs_create()
593 */
594 nss_id = (int)(nss_ptr_t)seq->private;
Suman Ghosh9f7b3702018-09-21 19:51:40 +0530595 if (nss_id < 0 || nss_id >= nss_top_main.num_nss) {
Casey Chencfa28352018-04-21 01:03:02 -0700596 nss_warning("nss_id: %d is not valid\n", nss_id);
597 return -ENODEV;
598 }
599
600 nss_ctx = &nss_top_main.nss[nss_id];
601 NSS_VERIFY_CTX_MAGIC(nss_ctx);
602
603 mem_ctx = &nss_ctx->meminfo_ctx;
604 map = &mem_ctx->meminfo_map;
605 n2h_info = &mem_ctx->n2h_info;
606 h2n_info = &mem_ctx->h2n_info;
607
608 seq_printf(seq, "%-5s %-32s %-7s %-7s %-10s %-10s\n",
609 "Index", "Name", "Default", "User", "Size", "DMA Addr");
610 seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
611 "N/A", "n2h_rings", "SDRAM",
612 nss_meminfo_memtype_table[n2h_info->memtype],
613 n2h_info->total_size, n2h_info->dma_addr);
614 seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
615 "N/A", "h2n_rings", "SDRAM",
616 nss_meminfo_memtype_table[h2n_info->memtype],
617 h2n_info->total_size, h2n_info->dma_addr);
618
619 r = map->requests;
620 for (i = 0; i < map->num_requests; i++) {
621 seq_printf(seq, "%-5d %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
622 i, r[i].name,
623 nss_meminfo_memtype_table[r[i].memtype_default],
624 nss_meminfo_memtype_table[r[i].memtype_user],
625 r[i].size, r[i].addr);
626 }
627
628 seq_printf(seq, "Available IMEM: 0x%x\n", mem_ctx->imem_end - mem_ctx->imem_tail);
629 seq_printf(seq, "How to configure? \n");
630 seq_printf(seq, "Overwrite the /etc/modules.d/32-qca-nss-drv with following contents then reboot\n\n");
631 seq_printf(seq, "qca-nss-drv meminfo_user_config=\"<core_id, name, memory_type>, ..\"\n\n");
632 seq_printf(seq, "For example, <1, h2n_rings, IMEM> stands for: h2n_rings of core 1 is on IMEM\n");
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530633 seq_printf(seq, "Note:UTCM_SHARED cannot be used for n2h_rings, h2n_rings and debug_log_boot_desc.\n");
Casey Chencfa28352018-04-21 01:03:02 -0700634
635 return 0;
636}
637
638/*
639 * nss_meminfo_debugfs_file_open()
640 * function to open meminfo debugfs.
641 */
642static int nss_meminfo_debugfs_file_open(struct inode *inode, struct file *file)
643{
644 return single_open(file, nss_meminfo_config_show, inode->i_private);
645}
646
647static struct file_operations nss_meminfo_debugfs_ops = {
648 .owner = THIS_MODULE,
649 .open = nss_meminfo_debugfs_file_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
655/*
656 * nss_meminfo_init_debugfs()
657 * Init meminfo debugfs.
658 */
659static void nss_meminfo_init_debugfs(struct nss_ctx_instance *nss_ctx)
660{
661 int i;
662 struct dentry *meminfo_main_dentry;
663 struct dentry *meminfo_core_dentries[NSS_MAX_CORES];
664
665 if (nss_meminfo_debugfs_exist)
666 return;
667
668 /*
669 * Create directory for showing meminfo configuration of each core.
670 */
671 meminfo_main_dentry = debugfs_create_dir("meminfo", nss_top_main.top_dentry);
672 if (unlikely(!meminfo_main_dentry)) {
673 nss_warning("Failed to create qca-nss-drv/meminfo directory in debugfs\n");
674 return;
675 }
676
Suman Ghosh9f7b3702018-09-21 19:51:40 +0530677 for (i = 0; i < nss_top_main.num_nss; i++) {
Casey Chencfa28352018-04-21 01:03:02 -0700678 char file[10];
679 snprintf(file, sizeof(file), "core%d", i);
680 meminfo_core_dentries[i] = debugfs_create_file(file, 0400, meminfo_main_dentry,
681 (void *)(nss_ptr_t)i, &nss_meminfo_debugfs_ops);
682 if (unlikely(!meminfo_core_dentries[i])) {
683 int j;
684 for (j = 0; j < i; j++)
685 debugfs_remove(meminfo_core_dentries[j]);
686 debugfs_remove(meminfo_main_dentry);
687 nss_warning("Failed to create qca-nss-drv/meminfo/%s file in debugfs", file);
688 return;
689 }
690 }
691
692 nss_meminfo_debugfs_exist = true;
693 nss_info("nss meminfo user config: %s\n", nss_meminfo_user_config);
694}
695
696/*
697 * nss_meminfo_init
698 * Initilization
699 *
700 */
701bool nss_meminfo_init(struct nss_ctx_instance *nss_ctx)
702{
703 struct nss_meminfo_ctx *mem_ctx;
704 uint32_t *meminfo_start;
705 struct nss_meminfo_map *map;
Suman Ghosh81123f92018-09-26 21:06:47 +0530706 struct nss_top_instance *nss_top = &nss_top_main;
Casey Chencfa28352018-04-21 01:03:02 -0700707
708 NSS_VERIFY_CTX_MAGIC(nss_ctx);
709 mem_ctx = &nss_ctx->meminfo_ctx;
710
711 /*
712 * meminfo_start is the label where the start address of meminfo map is stored.
713 */
714 meminfo_start = (uint32_t *)ioremap_nocache(nss_ctx->load + NSS_MEMINFO_MAP_START_OFFSET,
715 NSS_MEMINFO_RESERVE_AREA_SIZE);
716 if (!meminfo_start) {
717 nss_info_always("%p: cannot remap meminfo start\n", nss_ctx);
718 return false;
719 }
720
721 /*
722 * Check meminfo start magic
723 */
724 if ((uint16_t)meminfo_start[0] != NSS_MEMINFO_RESERVE_AREA_MAGIC) {
725 nss_info_always("%p: failed to verify meminfo start magic\n", nss_ctx);
726 return false;
727 }
728
729 map = &mem_ctx->meminfo_map;
730 map->start = (uint32_t *)ioremap_cache(meminfo_start[1], NSS_MEMINFO_MAP_SIZE);
731 if (!map->start) {
732 nss_info_always("%p: failed to remap meminfo map\n", nss_ctx);
733 return false;
734 }
735
736 /*
737 * Check meminfo map magic
738 */
739 if ((uint16_t)map->start[0] != NSS_MEMINFO_MAP_START_MAGIC) {
740 nss_info_always("%p: failed to verify meminfo map magic\n", nss_ctx);
741 return false;
742 }
743
744 /*
745 * Meminfo map settings
746 */
747 map->num_requests = 0;
748 map->requests = (struct nss_meminfo_request *)(map->start + 1);
749
750 /*
751 * Init IMEM
752 */
Suman Ghosh81123f92018-09-26 21:06:47 +0530753 nss_top->hal_ops->init_imem(nss_ctx);
Casey Chencfa28352018-04-21 01:03:02 -0700754
755 /*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530756 * Init UTCM_SHARED if supported
757 */
758 if (!nss_top->hal_ops->init_utcm_shared(nss_ctx, meminfo_start)) {
759 nss_info_always("%p: failed to initialize UTCM_SHARED meminfo\n", nss_ctx);
760 return false;
761 }
762
763 /*
Casey Chencfa28352018-04-21 01:03:02 -0700764 * Init meminfo block lists
765 */
766 if (!nss_meminfo_init_block_lists(nss_ctx)) {
767 nss_info_always("%p: failed to initialize meminfo block lists\n", nss_ctx);
768 return false;
769 }
770
771 /*
772 * Configure N2H/H2N rings and nss_if_mem_map
773 */
774 if (!nss_meminfo_configure_n2h_h2n_rings(nss_ctx))
775 return false;
776
777 nss_meminfo_init_debugfs(nss_ctx);
778
779 nss_info_always("%p: meminfo init succeed\n", nss_ctx);
780 return true;
781}