blob: 29bc5949ccd3f874ab70e7e56df67fdd32db556c [file] [log] [blame]
Casey Chencfa28352018-04-21 01:03:02 -07001/*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +05302 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
Casey Chencfa28352018-04-21 01:03:02 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * nss_meminfo.c
19 * NSS meminfo subsystem
20 */
21
22#include <linux/seq_file_net.h>
23#include "nss_tx_rx_common.h"
24#include "nss_core.h"
25#include "nss_arch.h"
26#include "nss_meminfo.h"
27
28/*
29 * Store user configuration
30 */
31static char nss_meminfo_user_config[NSS_MEMINFO_USER_CONFIG_MAXLEN];
32module_param_string(meminfo_user_config, nss_meminfo_user_config,
33 NSS_MEMINFO_USER_CONFIG_MAXLEN, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
34MODULE_PARM_DESC(nss_meminfo_user_config, "meminfo user configuration");
35
36static bool nss_meminfo_debugfs_exist;
37
38/*
39 * Name table of memory type presented to user.
40 */
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +053041char *nss_meminfo_memtype_table[NSS_MEMINFO_MEMTYPE_MAX] = {"IMEM", "SDRAM", "UTCM_SHARED"};
Casey Chencfa28352018-04-21 01:03:02 -070042
43/*
44 * nss_meminfo_alloc_sdram()
45 * Allocate a SDRAM block.
46 */
47static unsigned long nss_meminfo_alloc_sdram(struct nss_ctx_instance *nss_ctx, uint32_t size)
48{
49 unsigned long addr = 0;
50
51 /*
52 * kmalloc() return cache line aligned buffer.
53 */
54 addr = (unsigned long)kmalloc(size, GFP_KERNEL | __GFP_ZERO);
55 if (!addr)
56 nss_info_always("%p: failed to alloc a sdram block of size %u\n", nss_ctx, size);
57
58 return addr;
59}
60
61/*
62 * nss_meminfo_free_sdram()
63 * Free SDRAM memory.
64 */
65static inline void nss_meminfo_free_sdram(struct nss_ctx_instance *nss_ctx, uint32_t dma_addr,
66 unsigned long kern_addr, uint32_t size)
67{
68 /*
69 * Unmap it since every SDRAM memory had been mapped.
70 */
71 dma_unmap_single(nss_ctx->dev, dma_addr, size, DMA_FROM_DEVICE);
72 kfree((void *)kern_addr);
73}
74
75/*
76 * nss_meminfo_alloc_imem()
77 * Allocate an IMEM block in a sequential way.
78 */
79static uint32_t nss_meminfo_alloc_imem(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment)
80{
81 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
82 uint32_t new_tail;
83 uint32_t addr = 0;
84 int mask;
85
86 mask = alignment - 1;
87
88 /*
89 * Alignment has to be a power of 2.
90 */
91 nss_assert(!(alignment & mask));
92
93 new_tail = mem_ctx->imem_tail;
94
95 /*
96 * Align up the address if it not aligned.
97 */
98 if (new_tail & mask)
99 new_tail = (new_tail + mask) & ~mask;
100
101 if (size > (mem_ctx->imem_end - new_tail)) {
102 nss_info_always("%p: failed to alloc an IMEM block of size %u\n", nss_ctx, size);
103 return addr;
104 }
105
106 addr = new_tail;
107 mem_ctx->imem_tail = new_tail + size;
108
109 return addr;
110}
111
112/*
113 * nss_meminfo_free_imem()
114 * Free an IMEM block. Ignore the padding bytes for alignment requirement.
115 */
116static void nss_meminfo_free_imem(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size)
117{
118 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
119 mem_ctx->imem_tail -= size;
120}
121
122/*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530123 * nss_meminfo_alloc_utcm_shared()
124 * Allocate an UTCM_SHARED block in a sequential way.
125 */
126static uint32_t nss_meminfo_alloc_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment)
127{
128 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
129 uint32_t new_tail;
130 uint32_t addr = 0;
131 int mask;
132
133 mask = alignment - 1;
134
135 /*
136 * Alignment has to be a power of 2.
137 */
138 nss_assert(!(alignment & mask));
139
140 new_tail = mem_ctx->utcm_shared_tail;
141
142 /*
143 * Align up the address if it not aligned.
144 */
145 if (new_tail & mask)
146 new_tail = (new_tail + mask) & ~mask;
147
148 if (size > (mem_ctx->utcm_shared_end - new_tail)) {
149 nss_info_always("%p: failed to alloc an UTCM_SHARED block of size %u\n", nss_ctx, size);
150 return addr;
151 }
152
153 addr = new_tail;
154 mem_ctx->utcm_shared_tail = new_tail + size;
155
156 return addr;
157}
158
159/*
160 * nss_meminfo_free_utcm_shared()
161 * Free an UTCM_SHARED block. Ignore the padding bytes for alignment requirement.
162 */
163static void nss_meminfo_free_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size)
164{
165 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
166 mem_ctx->utcm_shared_tail -= size;
167}
168
169/*
Casey Chencfa28352018-04-21 01:03:02 -0700170 * nss_meminfo_if_user_overwrite()
171 * Return user configured memory type. Otherwise, return -1.
172 */
173static int nss_meminfo_if_user_overwrite(struct nss_ctx_instance *nss_ctx, const char *name)
174{
175 char *user_config;
176 char **mtype_table;
177 char needle[NSS_MEMINFO_BLOCK_NAME_MAXLEN + 6];
178 char user_choice[NSS_MEMINFO_MEMTYPE_NAME_MAXLEN];
179 int i;
180 char *p;
181
182 user_config = nss_meminfo_user_config;
183 mtype_table = nss_meminfo_memtype_table;
184
185 snprintf(needle, sizeof(needle), "<%1d, %s, ", nss_ctx->id, name);
186
187 p = strstr(user_config, needle);
188 if (!p)
189 return -1;
190
191 p += strlen(needle);
192
193 for (i = 0; i < NSS_MEMINFO_MEMTYPE_NAME_MAXLEN - 1; i++) {
194 /*
195 * Each user config is like <core_id, object_name, memory_type>,
196 * it starts with '<' and ends with '>'.
197 */
198 if (*p == '>' || *p == '\0')
199 break;
200 user_choice[i] = *p;
201 p++;
202 }
203
204 user_choice[i] = '\0';
205
206 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++)
207 if (!strcmp(mtype_table[i], user_choice))
208 return i;
209
210 return -1;
211}
212
213/*
214 * nss_meminfo_free_block_lists()
215 * Free block node and memory associated with each each memory object.
216 */
217static void nss_meminfo_free_block_lists(struct nss_ctx_instance *nss_ctx)
218{
219 struct nss_meminfo_ctx *mem_ctx;
220 struct nss_meminfo_block_list *l;
221 int i;
222
223 mem_ctx = &nss_ctx->meminfo_ctx;
224 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) {
225 struct nss_meminfo_block *b;
226 l = &mem_ctx->block_lists[i];
227 b = l->head;
228 while (b) {
229 struct nss_meminfo_block *tmp;
230 /*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530231 * Free IMEM/SDRAM/UTCM_SHARED memory.
Casey Chencfa28352018-04-21 01:03:02 -0700232 */
233 switch (i) {
234 case NSS_MEMINFO_MEMTYPE_IMEM:
235 nss_meminfo_free_imem(nss_ctx, b->dma_addr, b->size);
236 break;
237 case NSS_MEMINFO_MEMTYPE_SDRAM:
238 nss_meminfo_free_sdram(nss_ctx, b->dma_addr, b->kern_addr, b->size);
239 break;
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530240 case NSS_MEMINFO_MEMTYPE_UTCM_SHARED:
241 nss_meminfo_free_utcm_shared(nss_ctx, b->dma_addr, b->size);
242 break;
Casey Chencfa28352018-04-21 01:03:02 -0700243 }
244
245 /*
246 * Free the struct nss_meminfo_block itself.
247 */
248 tmp = b;
249 b = b->next;
250 kfree(tmp);
251 }
252 }
253}
254
255/*
256 * nss_meminfo_init_block_lists()
257 * Initialize block lists and allocate memory for each block.
258 */
259static bool nss_meminfo_init_block_lists(struct nss_ctx_instance *nss_ctx)
260{
261 struct nss_meminfo_ctx *mem_ctx;
262 struct nss_meminfo_block_list *l;
263 struct nss_meminfo_request *r;
264 struct nss_meminfo_map *map;
265 int mtype;
266 unsigned long kern_addr;
267 uint32_t dma_addr;
268 int i;
269
270 mem_ctx = &nss_ctx->meminfo_ctx;
271
272 /*
273 * Fill memory type for each block list.
274 */
275 for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++)
276 mem_ctx->block_lists[i].memtype = i;
277
278 map = &mem_ctx->meminfo_map;
279
280 /*
281 * Loop through all meminfo requests by checking the per-request magic.
282 */
283 for (r = map->requests; r->magic == NSS_MEMINFO_REQUEST_MAGIC; r++) {
284 struct nss_meminfo_block *b = (struct nss_meminfo_block *)
285 kmalloc(sizeof(struct nss_meminfo_block), GFP_KERNEL);
286 if (!b) {
287 nss_info_always("%p: failed to allocate meminfo block\n", nss_ctx);
288 goto cleanup;
289 }
290
291 b->index = map->num_requests++;
292 b->size = r->size;
293
294 /*
295 * Look up the user-defined memory type.
296 * Return user-defined memory type if exists. Otherwise, return -1.
297 */
298 mtype = nss_meminfo_if_user_overwrite(nss_ctx, r->name);
299 if (mtype == -1)
300 mtype = r->memtype_default;
301 r->memtype_user = mtype;
302
303 switch (mtype) {
304 case NSS_MEMINFO_MEMTYPE_IMEM:
305 /*
306 * Return SoC real address for IMEM as DMA address.
307 */
308 dma_addr = nss_meminfo_alloc_imem(nss_ctx, r->size, r->alignment);
309 if (!dma_addr) {
310 nss_info_always("%p: failed to alloc IMEM block\n", nss_ctx);
311 goto cleanup;
312 }
313
314 /*
315 * Calulate offset to the kernel address (vmap) where the
316 * whole IMEM is mapped onto instead of calling ioremap().
317 */
318 kern_addr = (unsigned long)nss_ctx->vmap + dma_addr - nss_ctx->vphys;
319 break;
320 case NSS_MEMINFO_MEMTYPE_SDRAM:
321 kern_addr = nss_meminfo_alloc_sdram(nss_ctx, r->size);
322 if (!kern_addr) {
323 nss_info_always("%p: failed to alloc SDRAM block\n", nss_ctx);
324 goto cleanup;
325 }
326
327 dma_addr = dma_map_single(nss_ctx->dev, (void *)kern_addr, r->size, DMA_TO_DEVICE);
328 if (unlikely(dma_mapping_error(nss_ctx->dev, dma_addr))) {
329 nss_info_always("%p: failed to map SDRAM block\n", nss_ctx);
330 goto cleanup;
331 }
332 break;
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530333 case NSS_MEMINFO_MEMTYPE_UTCM_SHARED:
334 /*
335 * Return SoC real address for UTCM_SHARED as DMA address.
336 */
337 dma_addr = nss_meminfo_alloc_utcm_shared(nss_ctx, r->size, r->alignment);
338 if (!dma_addr) {
339 nss_info_always("%p: failed to alloc UTCM_SHARED block\n", nss_ctx);
340 goto cleanup;
341 }
342 /*
343 * There is no corresponding mapped address in kernel.
344 * UTCM_SHARED access from kernel is not allowed. Mem Objects requesting
345 * UTCM_SHARED are not expected to use any kernel mapped address.
346 */
347 kern_addr = NSS_MEMINFO_POISON;
348 break;
Casey Chencfa28352018-04-21 01:03:02 -0700349 default:
350 nss_info_always("%p: %d unsupported memory type\n", nss_ctx, mtype);
351 goto cleanup;
352 }
353
354 /*
355 * Update the request with DMA address for the memory that only be used by FW.
356 */
357 r->addr = dma_addr;
358
359 /*
360 * nss_if_mem_map settings
361 */
362 if (!strcmp(r->name, "nss_if_mem_map_inst")) {
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530363 BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED);
Casey Chencfa28352018-04-21 01:03:02 -0700364 mem_ctx->if_map_memtype = mtype;
365 mem_ctx->if_map_dma = dma_addr;
366 mem_ctx->if_map = (struct nss_if_mem_map *)kern_addr;
367 }
368
Cemil Coskun5f51db52018-05-07 17:15:37 -0700369 if (!strcmp(r->name, "debug_boot_log_desc")) {
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530370 BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED);
Cemil Coskun5f51db52018-05-07 17:15:37 -0700371 mem_ctx->logbuffer_memtype = mtype;
372 mem_ctx->logbuffer_dma = dma_addr;
373 mem_ctx->logbuffer = (struct nss_log_descriptor *)kern_addr;
374 }
375
Cemil Coskun3bb20512018-07-24 10:42:25 -0700376 if (!strcmp(r->name, "c2c_descs_if_mem_map")) {
377 mem_ctx->c2c_start_memtype = mtype;
378 mem_ctx->c2c_start_dma = dma_addr;
379 }
380
Casey Chencfa28352018-04-21 01:03:02 -0700381 /*
382 * Flush the updated meminfo request.
383 */
384 NSS_CORE_DMA_CACHE_MAINT(r, sizeof(struct nss_meminfo_request), DMA_TO_DEVICE);
385 NSS_CORE_DSB();
386
387 /*
388 * Update the list
389 */
390 l = &mem_ctx->block_lists[mtype];
391 l->num_blks++;
392 l->total_size += r->size;
393
394 b->next = l->head;
395 l->head = b;
396 }
397
398 /*
399 * Verify memory map end magic
400 */
401 if (*((uint16_t *)r) != NSS_MEMINFO_MAP_END_MAGIC)
402 goto cleanup;
403
404 return true;
405
406cleanup:
407 nss_meminfo_free_block_lists(nss_ctx);
408 return false;
409}
410
411/*
Casey Chencfa28352018-04-21 01:03:02 -0700412 * nss_meminfo_allocate_n2h_h2n_rings()
413 * Allocate N2H/H2N rings.
414 */
415static bool nss_meminfo_allocate_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx,
416 struct nss_meminfo_n2h_h2n_info *info)
417{
418 switch (info->memtype) {
419 case NSS_MEMINFO_MEMTYPE_SDRAM:
420 info->kern_addr = nss_meminfo_alloc_sdram(nss_ctx, info->total_size);
421 if (!info->kern_addr)
422 return false;
423
424 info->dma_addr = dma_map_single(nss_ctx->dev, (void *)info->kern_addr,
425 info->total_size, DMA_TO_DEVICE);
426 if (unlikely(dma_mapping_error(nss_ctx->dev, info->dma_addr))) {
427 kfree((void *)info->kern_addr);
428 return false;
429 }
430 break;
431 case NSS_MEMINFO_MEMTYPE_IMEM:
432 info->dma_addr = nss_meminfo_alloc_imem(nss_ctx, info->total_size, L1_CACHE_BYTES);
433 if (!info->dma_addr)
434 return false;
435
436 info->kern_addr = (unsigned long)(nss_ctx->vmap) + info->dma_addr - nss_ctx->vphys;
437 break;
438 default:
439 return false;
440 }
441
442 return true;
443}
444
445/*
446 * nss_meminfo_configure_n2h_h2n_rings()
447 * Configure N2H/H2N rings and if_map.
448 */
449static bool nss_meminfo_configure_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx)
450{
451 struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
452 struct nss_meminfo_n2h_h2n_info *h2n_info;
453 struct nss_meminfo_n2h_h2n_info *n2h_info;
454 struct nss_if_mem_map *if_map;
455 int i;
456 int mtype;
457
458 h2n_info = &mem_ctx->h2n_info;
459 n2h_info = &mem_ctx->n2h_info;
460
461 /*
462 * Check memory type. SDRAM is the default option.
463 */
464 mtype = nss_meminfo_if_user_overwrite(nss_ctx, "h2n_rings");
465 if (mtype == -1)
466 mtype = NSS_MEMINFO_MEMTYPE_SDRAM;
467
468 h2n_info->memtype = mtype;
469
470 mtype = nss_meminfo_if_user_overwrite(nss_ctx, "n2h_rings");
471 if (mtype == -1)
472 mtype = NSS_MEMINFO_MEMTYPE_SDRAM;
473
474 n2h_info->memtype = mtype;
475
476 n2h_info->total_size = sizeof(struct n2h_descriptor) * NSS_N2H_RING_COUNT * (NSS_RING_SIZE + 2);
477 h2n_info->total_size = sizeof(struct h2n_descriptor) * NSS_H2N_RING_COUNT * (NSS_RING_SIZE + 2);
478
479 /*
480 * N2H ring allocations
481 */
482 if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, n2h_info))) {
483 nss_info_always("%p: failed to allocate/map n2h rings\n", nss_ctx);
484 return false;
485 }
486
487 /*
488 * H2N ring allocations
489 */
490 if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, h2n_info))) {
491 nss_info_always("%p: failed to allocate/map h2n_rings\n", nss_ctx);
492 goto cleanup;
493 }
494
495 /*
496 * Bring a fresh copy of if_map from memory in order to read it correctly.
497 */
498 if_map = mem_ctx->if_map;
499 NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_FROM_DEVICE);
500 NSS_CORE_DSB();
501
502 if_map->n2h_rings = NSS_N2H_RING_COUNT;
503 if_map->h2n_rings = NSS_H2N_RING_COUNT;
504
505 /*
506 * N2H ring settings
507 */
508 for (i = 0; i < NSS_N2H_RING_COUNT; i++) {
509 struct hlos_n2h_desc_ring *n2h_desc_ring = &nss_ctx->n2h_desc_ring[i];
510 n2h_desc_ring->desc_ring.desc = (struct n2h_descriptor *)(n2h_info->kern_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2));
511 n2h_desc_ring->desc_ring.size = NSS_RING_SIZE;
512 n2h_desc_ring->hlos_index = if_map->n2h_hlos_index[i];
513
514 if_map->n2h_desc_if[i].size = NSS_RING_SIZE;
515 if_map->n2h_desc_if[i].desc_addr = n2h_info->dma_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2);
516 nss_info("%p: N2H ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->n2h_desc_if[i].size, if_map->n2h_desc_if[i].desc_addr);
517 }
518
519 /*
520 * H2N ring settings
521 */
522 for (i = 0; i < NSS_H2N_RING_COUNT; i++) {
523 struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[i];
524 h2n_desc_ring->desc_ring.desc = (struct h2n_descriptor *)(h2n_info->kern_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2));
525 h2n_desc_ring->desc_ring.size = NSS_RING_SIZE;
526 h2n_desc_ring->hlos_index = if_map->h2n_hlos_index[i];
527 spin_lock_init(&h2n_desc_ring->lock);
528
529 if_map->h2n_desc_if[i].size = NSS_RING_SIZE;
530 if_map->h2n_desc_if[i].desc_addr = h2n_info->dma_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2);
531 nss_info("%p: H2N ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->h2n_desc_if[i].size, if_map->h2n_desc_if[i].desc_addr);
532 }
533
534 /*
535 * Flush the updated nss_if_mem_map.
536 */
537 NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_TO_DEVICE);
538 NSS_CORE_DSB();
539
540 return true;
541
542cleanup:
543 if (n2h_info->memtype == NSS_MEMINFO_MEMTYPE_SDRAM)
544 nss_meminfo_free_sdram(nss_ctx, n2h_info->dma_addr, n2h_info->kern_addr, n2h_info->total_size);
545 else
546 nss_meminfo_free_imem(nss_ctx, n2h_info->dma_addr, n2h_info->total_size);
547
548 nss_meminfo_free_block_lists(nss_ctx);
549 return false;
550}
551
552/*
553 * nss_meminfo_config_show()
554 * function to show meinfo configuration per core.
555 */
556static int nss_meminfo_config_show(struct seq_file *seq, void *v)
557{
558 struct nss_ctx_instance *nss_ctx;
559 struct nss_meminfo_ctx *mem_ctx;
560 struct nss_meminfo_n2h_h2n_info *n2h_info;
561 struct nss_meminfo_n2h_h2n_info *h2n_info;
562 struct nss_meminfo_map *map;
563 struct nss_meminfo_request *r;
564 int nss_id;
565 int i;
566
567 /*
568 * i_private is passed to us by debug_fs_create()
569 */
570 nss_id = (int)(nss_ptr_t)seq->private;
Suman Ghosh9f7b3702018-09-21 19:51:40 +0530571 if (nss_id < 0 || nss_id >= nss_top_main.num_nss) {
Casey Chencfa28352018-04-21 01:03:02 -0700572 nss_warning("nss_id: %d is not valid\n", nss_id);
573 return -ENODEV;
574 }
575
576 nss_ctx = &nss_top_main.nss[nss_id];
577 NSS_VERIFY_CTX_MAGIC(nss_ctx);
578
579 mem_ctx = &nss_ctx->meminfo_ctx;
580 map = &mem_ctx->meminfo_map;
581 n2h_info = &mem_ctx->n2h_info;
582 h2n_info = &mem_ctx->h2n_info;
583
584 seq_printf(seq, "%-5s %-32s %-7s %-7s %-10s %-10s\n",
585 "Index", "Name", "Default", "User", "Size", "DMA Addr");
586 seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
587 "N/A", "n2h_rings", "SDRAM",
588 nss_meminfo_memtype_table[n2h_info->memtype],
589 n2h_info->total_size, n2h_info->dma_addr);
590 seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
591 "N/A", "h2n_rings", "SDRAM",
592 nss_meminfo_memtype_table[h2n_info->memtype],
593 h2n_info->total_size, h2n_info->dma_addr);
594
595 r = map->requests;
596 for (i = 0; i < map->num_requests; i++) {
597 seq_printf(seq, "%-5d %-32s %-7s %-7s 0x%-8x 0x%-8x\n",
598 i, r[i].name,
599 nss_meminfo_memtype_table[r[i].memtype_default],
600 nss_meminfo_memtype_table[r[i].memtype_user],
601 r[i].size, r[i].addr);
602 }
603
604 seq_printf(seq, "Available IMEM: 0x%x\n", mem_ctx->imem_end - mem_ctx->imem_tail);
605 seq_printf(seq, "How to configure? \n");
606 seq_printf(seq, "Overwrite the /etc/modules.d/32-qca-nss-drv with following contents then reboot\n\n");
607 seq_printf(seq, "qca-nss-drv meminfo_user_config=\"<core_id, name, memory_type>, ..\"\n\n");
608 seq_printf(seq, "For example, <1, h2n_rings, IMEM> stands for: h2n_rings of core 1 is on IMEM\n");
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530609 seq_printf(seq, "Note:UTCM_SHARED cannot be used for n2h_rings, h2n_rings and debug_log_boot_desc.\n");
Casey Chencfa28352018-04-21 01:03:02 -0700610
611 return 0;
612}
613
614/*
615 * nss_meminfo_debugfs_file_open()
616 * function to open meminfo debugfs.
617 */
618static int nss_meminfo_debugfs_file_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, nss_meminfo_config_show, inode->i_private);
621}
622
623static struct file_operations nss_meminfo_debugfs_ops = {
624 .owner = THIS_MODULE,
625 .open = nss_meminfo_debugfs_file_open,
626 .read = seq_read,
627 .llseek = seq_lseek,
628 .release = single_release,
629};
630
631/*
632 * nss_meminfo_init_debugfs()
633 * Init meminfo debugfs.
634 */
635static void nss_meminfo_init_debugfs(struct nss_ctx_instance *nss_ctx)
636{
637 int i;
638 struct dentry *meminfo_main_dentry;
639 struct dentry *meminfo_core_dentries[NSS_MAX_CORES];
640
641 if (nss_meminfo_debugfs_exist)
642 return;
643
644 /*
645 * Create directory for showing meminfo configuration of each core.
646 */
647 meminfo_main_dentry = debugfs_create_dir("meminfo", nss_top_main.top_dentry);
648 if (unlikely(!meminfo_main_dentry)) {
649 nss_warning("Failed to create qca-nss-drv/meminfo directory in debugfs\n");
650 return;
651 }
652
Suman Ghosh9f7b3702018-09-21 19:51:40 +0530653 for (i = 0; i < nss_top_main.num_nss; i++) {
Casey Chencfa28352018-04-21 01:03:02 -0700654 char file[10];
655 snprintf(file, sizeof(file), "core%d", i);
656 meminfo_core_dentries[i] = debugfs_create_file(file, 0400, meminfo_main_dentry,
657 (void *)(nss_ptr_t)i, &nss_meminfo_debugfs_ops);
658 if (unlikely(!meminfo_core_dentries[i])) {
659 int j;
660 for (j = 0; j < i; j++)
661 debugfs_remove(meminfo_core_dentries[j]);
662 debugfs_remove(meminfo_main_dentry);
663 nss_warning("Failed to create qca-nss-drv/meminfo/%s file in debugfs", file);
664 return;
665 }
666 }
667
668 nss_meminfo_debugfs_exist = true;
669 nss_info("nss meminfo user config: %s\n", nss_meminfo_user_config);
670}
671
672/*
673 * nss_meminfo_init
674 * Initilization
675 *
676 */
677bool nss_meminfo_init(struct nss_ctx_instance *nss_ctx)
678{
679 struct nss_meminfo_ctx *mem_ctx;
680 uint32_t *meminfo_start;
681 struct nss_meminfo_map *map;
Suman Ghosh81123f92018-09-26 21:06:47 +0530682 struct nss_top_instance *nss_top = &nss_top_main;
Casey Chencfa28352018-04-21 01:03:02 -0700683
684 NSS_VERIFY_CTX_MAGIC(nss_ctx);
685 mem_ctx = &nss_ctx->meminfo_ctx;
686
687 /*
688 * meminfo_start is the label where the start address of meminfo map is stored.
689 */
690 meminfo_start = (uint32_t *)ioremap_nocache(nss_ctx->load + NSS_MEMINFO_MAP_START_OFFSET,
691 NSS_MEMINFO_RESERVE_AREA_SIZE);
692 if (!meminfo_start) {
693 nss_info_always("%p: cannot remap meminfo start\n", nss_ctx);
694 return false;
695 }
696
697 /*
698 * Check meminfo start magic
699 */
700 if ((uint16_t)meminfo_start[0] != NSS_MEMINFO_RESERVE_AREA_MAGIC) {
701 nss_info_always("%p: failed to verify meminfo start magic\n", nss_ctx);
702 return false;
703 }
704
705 map = &mem_ctx->meminfo_map;
706 map->start = (uint32_t *)ioremap_cache(meminfo_start[1], NSS_MEMINFO_MAP_SIZE);
707 if (!map->start) {
708 nss_info_always("%p: failed to remap meminfo map\n", nss_ctx);
709 return false;
710 }
711
712 /*
713 * Check meminfo map magic
714 */
715 if ((uint16_t)map->start[0] != NSS_MEMINFO_MAP_START_MAGIC) {
716 nss_info_always("%p: failed to verify meminfo map magic\n", nss_ctx);
717 return false;
718 }
719
720 /*
721 * Meminfo map settings
722 */
723 map->num_requests = 0;
724 map->requests = (struct nss_meminfo_request *)(map->start + 1);
725
726 /*
727 * Init IMEM
728 */
Suman Ghosh81123f92018-09-26 21:06:47 +0530729 nss_top->hal_ops->init_imem(nss_ctx);
Casey Chencfa28352018-04-21 01:03:02 -0700730
731 /*
Subhash Kumar Katnpallyb8304782018-10-11 11:46:13 +0530732 * Init UTCM_SHARED if supported
733 */
734 if (!nss_top->hal_ops->init_utcm_shared(nss_ctx, meminfo_start)) {
735 nss_info_always("%p: failed to initialize UTCM_SHARED meminfo\n", nss_ctx);
736 return false;
737 }
738
739 /*
Casey Chencfa28352018-04-21 01:03:02 -0700740 * Init meminfo block lists
741 */
742 if (!nss_meminfo_init_block_lists(nss_ctx)) {
743 nss_info_always("%p: failed to initialize meminfo block lists\n", nss_ctx);
744 return false;
745 }
746
747 /*
748 * Configure N2H/H2N rings and nss_if_mem_map
749 */
750 if (!nss_meminfo_configure_n2h_h2n_rings(nss_ctx))
751 return false;
752
753 nss_meminfo_init_debugfs(nss_ctx);
754
755 nss_info_always("%p: meminfo init succeed\n", nss_ctx);
756 return true;
757}