Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 1 | /* |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 2 | * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for any |
| 5 | * purpose with or without fee is hereby granted, provided that the above |
| 6 | * copyright notice and this permission notice appear in all copies. |
| 7 | * |
| 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * nss_meminfo.c |
| 19 | * NSS meminfo subsystem |
| 20 | */ |
| 21 | |
| 22 | #include <linux/seq_file_net.h> |
| 23 | #include "nss_tx_rx_common.h" |
| 24 | #include "nss_core.h" |
| 25 | #include "nss_arch.h" |
| 26 | #include "nss_meminfo.h" |
| 27 | |
| 28 | /* |
| 29 | * Store user configuration |
| 30 | */ |
| 31 | static char nss_meminfo_user_config[NSS_MEMINFO_USER_CONFIG_MAXLEN]; |
| 32 | module_param_string(meminfo_user_config, nss_meminfo_user_config, |
| 33 | NSS_MEMINFO_USER_CONFIG_MAXLEN, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); |
| 34 | MODULE_PARM_DESC(nss_meminfo_user_config, "meminfo user configuration"); |
| 35 | |
| 36 | static bool nss_meminfo_debugfs_exist; |
| 37 | |
| 38 | /* |
| 39 | * Name table of memory type presented to user. |
| 40 | */ |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 41 | char *nss_meminfo_memtype_table[NSS_MEMINFO_MEMTYPE_MAX] = {"IMEM", "SDRAM", "UTCM_SHARED"}; |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 42 | |
| 43 | /* |
| 44 | * nss_meminfo_alloc_sdram() |
| 45 | * Allocate a SDRAM block. |
| 46 | */ |
| 47 | static unsigned long nss_meminfo_alloc_sdram(struct nss_ctx_instance *nss_ctx, uint32_t size) |
| 48 | { |
| 49 | unsigned long addr = 0; |
| 50 | |
| 51 | /* |
| 52 | * kmalloc() return cache line aligned buffer. |
| 53 | */ |
| 54 | addr = (unsigned long)kmalloc(size, GFP_KERNEL | __GFP_ZERO); |
| 55 | if (!addr) |
| 56 | nss_info_always("%p: failed to alloc a sdram block of size %u\n", nss_ctx, size); |
| 57 | |
Cemil Coskun | 3a6a179 | 2019-06-12 14:12:41 -0700 | [diff] [blame^] | 58 | kmemleak_not_leak((void *)addr); |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 59 | return addr; |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * nss_meminfo_free_sdram() |
| 64 | * Free SDRAM memory. |
| 65 | */ |
| 66 | static inline void nss_meminfo_free_sdram(struct nss_ctx_instance *nss_ctx, uint32_t dma_addr, |
| 67 | unsigned long kern_addr, uint32_t size) |
| 68 | { |
| 69 | /* |
| 70 | * Unmap it since every SDRAM memory had been mapped. |
| 71 | */ |
| 72 | dma_unmap_single(nss_ctx->dev, dma_addr, size, DMA_FROM_DEVICE); |
| 73 | kfree((void *)kern_addr); |
| 74 | } |
| 75 | |
| 76 | /* |
| 77 | * nss_meminfo_alloc_imem() |
| 78 | * Allocate an IMEM block in a sequential way. |
| 79 | */ |
| 80 | static uint32_t nss_meminfo_alloc_imem(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment) |
| 81 | { |
| 82 | struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; |
| 83 | uint32_t new_tail; |
| 84 | uint32_t addr = 0; |
| 85 | int mask; |
| 86 | |
| 87 | mask = alignment - 1; |
| 88 | |
| 89 | /* |
| 90 | * Alignment has to be a power of 2. |
| 91 | */ |
| 92 | nss_assert(!(alignment & mask)); |
| 93 | |
| 94 | new_tail = mem_ctx->imem_tail; |
| 95 | |
| 96 | /* |
| 97 | * Align up the address if it not aligned. |
| 98 | */ |
| 99 | if (new_tail & mask) |
| 100 | new_tail = (new_tail + mask) & ~mask; |
| 101 | |
| 102 | if (size > (mem_ctx->imem_end - new_tail)) { |
| 103 | nss_info_always("%p: failed to alloc an IMEM block of size %u\n", nss_ctx, size); |
| 104 | return addr; |
| 105 | } |
| 106 | |
| 107 | addr = new_tail; |
| 108 | mem_ctx->imem_tail = new_tail + size; |
| 109 | |
| 110 | return addr; |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * nss_meminfo_free_imem() |
| 115 | * Free an IMEM block. Ignore the padding bytes for alignment requirement. |
| 116 | */ |
| 117 | static void nss_meminfo_free_imem(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size) |
| 118 | { |
| 119 | struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; |
| 120 | mem_ctx->imem_tail -= size; |
| 121 | } |
| 122 | |
| 123 | /* |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 124 | * nss_meminfo_alloc_utcm_shared() |
| 125 | * Allocate an UTCM_SHARED block in a sequential way. |
| 126 | */ |
| 127 | static uint32_t nss_meminfo_alloc_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment) |
| 128 | { |
| 129 | struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; |
| 130 | uint32_t new_tail; |
| 131 | uint32_t addr = 0; |
| 132 | int mask; |
| 133 | |
| 134 | mask = alignment - 1; |
| 135 | |
| 136 | /* |
| 137 | * Alignment has to be a power of 2. |
| 138 | */ |
| 139 | nss_assert(!(alignment & mask)); |
| 140 | |
| 141 | new_tail = mem_ctx->utcm_shared_tail; |
| 142 | |
| 143 | /* |
| 144 | * Align up the address if it not aligned. |
| 145 | */ |
| 146 | if (new_tail & mask) |
| 147 | new_tail = (new_tail + mask) & ~mask; |
| 148 | |
| 149 | if (size > (mem_ctx->utcm_shared_end - new_tail)) { |
| 150 | nss_info_always("%p: failed to alloc an UTCM_SHARED block of size %u\n", nss_ctx, size); |
| 151 | return addr; |
| 152 | } |
| 153 | |
| 154 | addr = new_tail; |
| 155 | mem_ctx->utcm_shared_tail = new_tail + size; |
| 156 | |
| 157 | return addr; |
| 158 | } |
| 159 | |
| 160 | /* |
| 161 | * nss_meminfo_free_utcm_shared() |
| 162 | * Free an UTCM_SHARED block. Ignore the padding bytes for alignment requirement. |
| 163 | */ |
| 164 | static void nss_meminfo_free_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size) |
| 165 | { |
| 166 | struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; |
| 167 | mem_ctx->utcm_shared_tail -= size; |
| 168 | } |
| 169 | |
| 170 | /* |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 171 | * nss_meminfo_if_user_overwrite() |
| 172 | * Return user configured memory type. Otherwise, return -1. |
| 173 | */ |
| 174 | static int nss_meminfo_if_user_overwrite(struct nss_ctx_instance *nss_ctx, const char *name) |
| 175 | { |
| 176 | char *user_config; |
| 177 | char **mtype_table; |
| 178 | char needle[NSS_MEMINFO_BLOCK_NAME_MAXLEN + 6]; |
| 179 | char user_choice[NSS_MEMINFO_MEMTYPE_NAME_MAXLEN]; |
| 180 | int i; |
| 181 | char *p; |
| 182 | |
| 183 | user_config = nss_meminfo_user_config; |
| 184 | mtype_table = nss_meminfo_memtype_table; |
| 185 | |
| 186 | snprintf(needle, sizeof(needle), "<%1d, %s, ", nss_ctx->id, name); |
| 187 | |
| 188 | p = strstr(user_config, needle); |
| 189 | if (!p) |
| 190 | return -1; |
| 191 | |
| 192 | p += strlen(needle); |
| 193 | |
| 194 | for (i = 0; i < NSS_MEMINFO_MEMTYPE_NAME_MAXLEN - 1; i++) { |
| 195 | /* |
| 196 | * Each user config is like <core_id, object_name, memory_type>, |
| 197 | * it starts with '<' and ends with '>'. |
| 198 | */ |
| 199 | if (*p == '>' || *p == '\0') |
| 200 | break; |
| 201 | user_choice[i] = *p; |
| 202 | p++; |
| 203 | } |
| 204 | |
| 205 | user_choice[i] = '\0'; |
| 206 | |
| 207 | for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) |
| 208 | if (!strcmp(mtype_table[i], user_choice)) |
| 209 | return i; |
| 210 | |
| 211 | return -1; |
| 212 | } |
| 213 | |
| 214 | /* |
| 215 | * nss_meminfo_free_block_lists() |
| 216 | * Free block node and memory associated with each each memory object. |
| 217 | */ |
| 218 | static void nss_meminfo_free_block_lists(struct nss_ctx_instance *nss_ctx) |
| 219 | { |
| 220 | struct nss_meminfo_ctx *mem_ctx; |
| 221 | struct nss_meminfo_block_list *l; |
| 222 | int i; |
| 223 | |
| 224 | mem_ctx = &nss_ctx->meminfo_ctx; |
| 225 | for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) { |
| 226 | struct nss_meminfo_block *b; |
| 227 | l = &mem_ctx->block_lists[i]; |
| 228 | b = l->head; |
| 229 | while (b) { |
| 230 | struct nss_meminfo_block *tmp; |
| 231 | /* |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 232 | * Free IMEM/SDRAM/UTCM_SHARED memory. |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 233 | */ |
| 234 | switch (i) { |
| 235 | case NSS_MEMINFO_MEMTYPE_IMEM: |
| 236 | nss_meminfo_free_imem(nss_ctx, b->dma_addr, b->size); |
| 237 | break; |
| 238 | case NSS_MEMINFO_MEMTYPE_SDRAM: |
| 239 | nss_meminfo_free_sdram(nss_ctx, b->dma_addr, b->kern_addr, b->size); |
| 240 | break; |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 241 | case NSS_MEMINFO_MEMTYPE_UTCM_SHARED: |
| 242 | nss_meminfo_free_utcm_shared(nss_ctx, b->dma_addr, b->size); |
| 243 | break; |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | /* |
| 247 | * Free the struct nss_meminfo_block itself. |
| 248 | */ |
| 249 | tmp = b; |
| 250 | b = b->next; |
| 251 | kfree(tmp); |
| 252 | } |
| 253 | } |
| 254 | } |
| 255 | |
| 256 | /* |
| 257 | * nss_meminfo_init_block_lists() |
| 258 | * Initialize block lists and allocate memory for each block. |
| 259 | */ |
| 260 | static bool nss_meminfo_init_block_lists(struct nss_ctx_instance *nss_ctx) |
| 261 | { |
| 262 | struct nss_meminfo_ctx *mem_ctx; |
| 263 | struct nss_meminfo_block_list *l; |
| 264 | struct nss_meminfo_request *r; |
| 265 | struct nss_meminfo_map *map; |
| 266 | int mtype; |
| 267 | unsigned long kern_addr; |
| 268 | uint32_t dma_addr; |
| 269 | int i; |
| 270 | |
| 271 | mem_ctx = &nss_ctx->meminfo_ctx; |
| 272 | |
| 273 | /* |
| 274 | * Fill memory type for each block list. |
| 275 | */ |
| 276 | for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) |
| 277 | mem_ctx->block_lists[i].memtype = i; |
| 278 | |
| 279 | map = &mem_ctx->meminfo_map; |
| 280 | |
| 281 | /* |
| 282 | * Loop through all meminfo requests by checking the per-request magic. |
| 283 | */ |
| 284 | for (r = map->requests; r->magic == NSS_MEMINFO_REQUEST_MAGIC; r++) { |
| 285 | struct nss_meminfo_block *b = (struct nss_meminfo_block *) |
| 286 | kmalloc(sizeof(struct nss_meminfo_block), GFP_KERNEL); |
| 287 | if (!b) { |
| 288 | nss_info_always("%p: failed to allocate meminfo block\n", nss_ctx); |
| 289 | goto cleanup; |
| 290 | } |
| 291 | |
| 292 | b->index = map->num_requests++; |
| 293 | b->size = r->size; |
| 294 | |
| 295 | /* |
| 296 | * Look up the user-defined memory type. |
| 297 | * Return user-defined memory type if exists. Otherwise, return -1. |
| 298 | */ |
| 299 | mtype = nss_meminfo_if_user_overwrite(nss_ctx, r->name); |
| 300 | if (mtype == -1) |
| 301 | mtype = r->memtype_default; |
| 302 | r->memtype_user = mtype; |
| 303 | |
| 304 | switch (mtype) { |
| 305 | case NSS_MEMINFO_MEMTYPE_IMEM: |
| 306 | /* |
| 307 | * Return SoC real address for IMEM as DMA address. |
| 308 | */ |
| 309 | dma_addr = nss_meminfo_alloc_imem(nss_ctx, r->size, r->alignment); |
| 310 | if (!dma_addr) { |
| 311 | nss_info_always("%p: failed to alloc IMEM block\n", nss_ctx); |
| 312 | goto cleanup; |
| 313 | } |
| 314 | |
| 315 | /* |
| 316 | * Calulate offset to the kernel address (vmap) where the |
| 317 | * whole IMEM is mapped onto instead of calling ioremap(). |
| 318 | */ |
| 319 | kern_addr = (unsigned long)nss_ctx->vmap + dma_addr - nss_ctx->vphys; |
| 320 | break; |
| 321 | case NSS_MEMINFO_MEMTYPE_SDRAM: |
| 322 | kern_addr = nss_meminfo_alloc_sdram(nss_ctx, r->size); |
| 323 | if (!kern_addr) { |
| 324 | nss_info_always("%p: failed to alloc SDRAM block\n", nss_ctx); |
| 325 | goto cleanup; |
| 326 | } |
| 327 | |
| 328 | dma_addr = dma_map_single(nss_ctx->dev, (void *)kern_addr, r->size, DMA_TO_DEVICE); |
| 329 | if (unlikely(dma_mapping_error(nss_ctx->dev, dma_addr))) { |
| 330 | nss_info_always("%p: failed to map SDRAM block\n", nss_ctx); |
| 331 | goto cleanup; |
| 332 | } |
| 333 | break; |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 334 | case NSS_MEMINFO_MEMTYPE_UTCM_SHARED: |
| 335 | /* |
| 336 | * Return SoC real address for UTCM_SHARED as DMA address. |
| 337 | */ |
| 338 | dma_addr = nss_meminfo_alloc_utcm_shared(nss_ctx, r->size, r->alignment); |
| 339 | if (!dma_addr) { |
| 340 | nss_info_always("%p: failed to alloc UTCM_SHARED block\n", nss_ctx); |
| 341 | goto cleanup; |
| 342 | } |
| 343 | /* |
| 344 | * There is no corresponding mapped address in kernel. |
| 345 | * UTCM_SHARED access from kernel is not allowed. Mem Objects requesting |
| 346 | * UTCM_SHARED are not expected to use any kernel mapped address. |
| 347 | */ |
| 348 | kern_addr = NSS_MEMINFO_POISON; |
| 349 | break; |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 350 | default: |
| 351 | nss_info_always("%p: %d unsupported memory type\n", nss_ctx, mtype); |
| 352 | goto cleanup; |
| 353 | } |
| 354 | |
| 355 | /* |
| 356 | * Update the request with DMA address for the memory that only be used by FW. |
| 357 | */ |
| 358 | r->addr = dma_addr; |
| 359 | |
| 360 | /* |
| 361 | * nss_if_mem_map settings |
| 362 | */ |
| 363 | if (!strcmp(r->name, "nss_if_mem_map_inst")) { |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 364 | BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED); |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 365 | mem_ctx->if_map_memtype = mtype; |
| 366 | mem_ctx->if_map_dma = dma_addr; |
| 367 | mem_ctx->if_map = (struct nss_if_mem_map *)kern_addr; |
| 368 | } |
| 369 | |
Cemil Coskun | 5f51db5 | 2018-05-07 17:15:37 -0700 | [diff] [blame] | 370 | if (!strcmp(r->name, "debug_boot_log_desc")) { |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 371 | BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED); |
Cemil Coskun | 5f51db5 | 2018-05-07 17:15:37 -0700 | [diff] [blame] | 372 | mem_ctx->logbuffer_memtype = mtype; |
| 373 | mem_ctx->logbuffer_dma = dma_addr; |
| 374 | mem_ctx->logbuffer = (struct nss_log_descriptor *)kern_addr; |
| 375 | } |
| 376 | |
Cemil Coskun | 3bb2051 | 2018-07-24 10:42:25 -0700 | [diff] [blame] | 377 | if (!strcmp(r->name, "c2c_descs_if_mem_map")) { |
| 378 | mem_ctx->c2c_start_memtype = mtype; |
| 379 | mem_ctx->c2c_start_dma = dma_addr; |
| 380 | } |
| 381 | |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 382 | /* |
| 383 | * Flush the updated meminfo request. |
| 384 | */ |
| 385 | NSS_CORE_DMA_CACHE_MAINT(r, sizeof(struct nss_meminfo_request), DMA_TO_DEVICE); |
| 386 | NSS_CORE_DSB(); |
| 387 | |
| 388 | /* |
| 389 | * Update the list |
| 390 | */ |
| 391 | l = &mem_ctx->block_lists[mtype]; |
| 392 | l->num_blks++; |
| 393 | l->total_size += r->size; |
| 394 | |
| 395 | b->next = l->head; |
| 396 | l->head = b; |
| 397 | } |
| 398 | |
| 399 | /* |
| 400 | * Verify memory map end magic |
| 401 | */ |
| 402 | if (*((uint16_t *)r) != NSS_MEMINFO_MAP_END_MAGIC) |
| 403 | goto cleanup; |
| 404 | |
| 405 | return true; |
| 406 | |
| 407 | cleanup: |
| 408 | nss_meminfo_free_block_lists(nss_ctx); |
| 409 | return false; |
| 410 | } |
| 411 | |
| 412 | /* |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 413 | * nss_meminfo_allocate_n2h_h2n_rings() |
| 414 | * Allocate N2H/H2N rings. |
| 415 | */ |
| 416 | static bool nss_meminfo_allocate_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx, |
| 417 | struct nss_meminfo_n2h_h2n_info *info) |
| 418 | { |
| 419 | switch (info->memtype) { |
| 420 | case NSS_MEMINFO_MEMTYPE_SDRAM: |
| 421 | info->kern_addr = nss_meminfo_alloc_sdram(nss_ctx, info->total_size); |
| 422 | if (!info->kern_addr) |
| 423 | return false; |
| 424 | |
| 425 | info->dma_addr = dma_map_single(nss_ctx->dev, (void *)info->kern_addr, |
| 426 | info->total_size, DMA_TO_DEVICE); |
| 427 | if (unlikely(dma_mapping_error(nss_ctx->dev, info->dma_addr))) { |
| 428 | kfree((void *)info->kern_addr); |
| 429 | return false; |
| 430 | } |
| 431 | break; |
| 432 | case NSS_MEMINFO_MEMTYPE_IMEM: |
| 433 | info->dma_addr = nss_meminfo_alloc_imem(nss_ctx, info->total_size, L1_CACHE_BYTES); |
| 434 | if (!info->dma_addr) |
| 435 | return false; |
| 436 | |
| 437 | info->kern_addr = (unsigned long)(nss_ctx->vmap) + info->dma_addr - nss_ctx->vphys; |
| 438 | break; |
| 439 | default: |
| 440 | return false; |
| 441 | } |
| 442 | |
| 443 | return true; |
| 444 | } |
| 445 | |
| 446 | /* |
| 447 | * nss_meminfo_configure_n2h_h2n_rings() |
| 448 | * Configure N2H/H2N rings and if_map. |
| 449 | */ |
| 450 | static bool nss_meminfo_configure_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx) |
| 451 | { |
| 452 | struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; |
| 453 | struct nss_meminfo_n2h_h2n_info *h2n_info; |
| 454 | struct nss_meminfo_n2h_h2n_info *n2h_info; |
| 455 | struct nss_if_mem_map *if_map; |
| 456 | int i; |
| 457 | int mtype; |
| 458 | |
| 459 | h2n_info = &mem_ctx->h2n_info; |
| 460 | n2h_info = &mem_ctx->n2h_info; |
| 461 | |
| 462 | /* |
| 463 | * Check memory type. SDRAM is the default option. |
| 464 | */ |
| 465 | mtype = nss_meminfo_if_user_overwrite(nss_ctx, "h2n_rings"); |
| 466 | if (mtype == -1) |
| 467 | mtype = NSS_MEMINFO_MEMTYPE_SDRAM; |
| 468 | |
| 469 | h2n_info->memtype = mtype; |
| 470 | |
| 471 | mtype = nss_meminfo_if_user_overwrite(nss_ctx, "n2h_rings"); |
| 472 | if (mtype == -1) |
| 473 | mtype = NSS_MEMINFO_MEMTYPE_SDRAM; |
| 474 | |
| 475 | n2h_info->memtype = mtype; |
| 476 | |
| 477 | n2h_info->total_size = sizeof(struct n2h_descriptor) * NSS_N2H_RING_COUNT * (NSS_RING_SIZE + 2); |
| 478 | h2n_info->total_size = sizeof(struct h2n_descriptor) * NSS_H2N_RING_COUNT * (NSS_RING_SIZE + 2); |
| 479 | |
| 480 | /* |
| 481 | * N2H ring allocations |
| 482 | */ |
| 483 | if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, n2h_info))) { |
| 484 | nss_info_always("%p: failed to allocate/map n2h rings\n", nss_ctx); |
| 485 | return false; |
| 486 | } |
| 487 | |
| 488 | /* |
| 489 | * H2N ring allocations |
| 490 | */ |
| 491 | if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, h2n_info))) { |
| 492 | nss_info_always("%p: failed to allocate/map h2n_rings\n", nss_ctx); |
| 493 | goto cleanup; |
| 494 | } |
| 495 | |
| 496 | /* |
| 497 | * Bring a fresh copy of if_map from memory in order to read it correctly. |
| 498 | */ |
| 499 | if_map = mem_ctx->if_map; |
| 500 | NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_FROM_DEVICE); |
| 501 | NSS_CORE_DSB(); |
| 502 | |
| 503 | if_map->n2h_rings = NSS_N2H_RING_COUNT; |
| 504 | if_map->h2n_rings = NSS_H2N_RING_COUNT; |
| 505 | |
| 506 | /* |
| 507 | * N2H ring settings |
| 508 | */ |
| 509 | for (i = 0; i < NSS_N2H_RING_COUNT; i++) { |
| 510 | struct hlos_n2h_desc_ring *n2h_desc_ring = &nss_ctx->n2h_desc_ring[i]; |
| 511 | n2h_desc_ring->desc_ring.desc = (struct n2h_descriptor *)(n2h_info->kern_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2)); |
| 512 | n2h_desc_ring->desc_ring.size = NSS_RING_SIZE; |
| 513 | n2h_desc_ring->hlos_index = if_map->n2h_hlos_index[i]; |
| 514 | |
| 515 | if_map->n2h_desc_if[i].size = NSS_RING_SIZE; |
| 516 | if_map->n2h_desc_if[i].desc_addr = n2h_info->dma_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2); |
| 517 | nss_info("%p: N2H ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->n2h_desc_if[i].size, if_map->n2h_desc_if[i].desc_addr); |
| 518 | } |
| 519 | |
| 520 | /* |
| 521 | * H2N ring settings |
| 522 | */ |
| 523 | for (i = 0; i < NSS_H2N_RING_COUNT; i++) { |
| 524 | struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[i]; |
| 525 | h2n_desc_ring->desc_ring.desc = (struct h2n_descriptor *)(h2n_info->kern_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2)); |
| 526 | h2n_desc_ring->desc_ring.size = NSS_RING_SIZE; |
| 527 | h2n_desc_ring->hlos_index = if_map->h2n_hlos_index[i]; |
| 528 | spin_lock_init(&h2n_desc_ring->lock); |
| 529 | |
| 530 | if_map->h2n_desc_if[i].size = NSS_RING_SIZE; |
| 531 | if_map->h2n_desc_if[i].desc_addr = h2n_info->dma_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2); |
| 532 | nss_info("%p: H2N ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->h2n_desc_if[i].size, if_map->h2n_desc_if[i].desc_addr); |
| 533 | } |
| 534 | |
| 535 | /* |
| 536 | * Flush the updated nss_if_mem_map. |
| 537 | */ |
| 538 | NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_TO_DEVICE); |
| 539 | NSS_CORE_DSB(); |
| 540 | |
| 541 | return true; |
| 542 | |
| 543 | cleanup: |
| 544 | if (n2h_info->memtype == NSS_MEMINFO_MEMTYPE_SDRAM) |
| 545 | nss_meminfo_free_sdram(nss_ctx, n2h_info->dma_addr, n2h_info->kern_addr, n2h_info->total_size); |
| 546 | else |
| 547 | nss_meminfo_free_imem(nss_ctx, n2h_info->dma_addr, n2h_info->total_size); |
| 548 | |
| 549 | nss_meminfo_free_block_lists(nss_ctx); |
| 550 | return false; |
| 551 | } |
| 552 | |
| 553 | /* |
| 554 | * nss_meminfo_config_show() |
| 555 | * function to show meinfo configuration per core. |
| 556 | */ |
| 557 | static int nss_meminfo_config_show(struct seq_file *seq, void *v) |
| 558 | { |
| 559 | struct nss_ctx_instance *nss_ctx; |
| 560 | struct nss_meminfo_ctx *mem_ctx; |
| 561 | struct nss_meminfo_n2h_h2n_info *n2h_info; |
| 562 | struct nss_meminfo_n2h_h2n_info *h2n_info; |
| 563 | struct nss_meminfo_map *map; |
| 564 | struct nss_meminfo_request *r; |
| 565 | int nss_id; |
| 566 | int i; |
| 567 | |
| 568 | /* |
| 569 | * i_private is passed to us by debug_fs_create() |
| 570 | */ |
| 571 | nss_id = (int)(nss_ptr_t)seq->private; |
Suman Ghosh | 9f7b370 | 2018-09-21 19:51:40 +0530 | [diff] [blame] | 572 | if (nss_id < 0 || nss_id >= nss_top_main.num_nss) { |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 573 | nss_warning("nss_id: %d is not valid\n", nss_id); |
| 574 | return -ENODEV; |
| 575 | } |
| 576 | |
| 577 | nss_ctx = &nss_top_main.nss[nss_id]; |
| 578 | NSS_VERIFY_CTX_MAGIC(nss_ctx); |
| 579 | |
| 580 | mem_ctx = &nss_ctx->meminfo_ctx; |
| 581 | map = &mem_ctx->meminfo_map; |
| 582 | n2h_info = &mem_ctx->n2h_info; |
| 583 | h2n_info = &mem_ctx->h2n_info; |
| 584 | |
| 585 | seq_printf(seq, "%-5s %-32s %-7s %-7s %-10s %-10s\n", |
| 586 | "Index", "Name", "Default", "User", "Size", "DMA Addr"); |
| 587 | seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n", |
| 588 | "N/A", "n2h_rings", "SDRAM", |
| 589 | nss_meminfo_memtype_table[n2h_info->memtype], |
| 590 | n2h_info->total_size, n2h_info->dma_addr); |
| 591 | seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n", |
| 592 | "N/A", "h2n_rings", "SDRAM", |
| 593 | nss_meminfo_memtype_table[h2n_info->memtype], |
| 594 | h2n_info->total_size, h2n_info->dma_addr); |
| 595 | |
| 596 | r = map->requests; |
| 597 | for (i = 0; i < map->num_requests; i++) { |
| 598 | seq_printf(seq, "%-5d %-32s %-7s %-7s 0x%-8x 0x%-8x\n", |
| 599 | i, r[i].name, |
| 600 | nss_meminfo_memtype_table[r[i].memtype_default], |
| 601 | nss_meminfo_memtype_table[r[i].memtype_user], |
| 602 | r[i].size, r[i].addr); |
| 603 | } |
| 604 | |
| 605 | seq_printf(seq, "Available IMEM: 0x%x\n", mem_ctx->imem_end - mem_ctx->imem_tail); |
| 606 | seq_printf(seq, "How to configure? \n"); |
| 607 | seq_printf(seq, "Overwrite the /etc/modules.d/32-qca-nss-drv with following contents then reboot\n\n"); |
| 608 | seq_printf(seq, "qca-nss-drv meminfo_user_config=\"<core_id, name, memory_type>, ..\"\n\n"); |
| 609 | seq_printf(seq, "For example, <1, h2n_rings, IMEM> stands for: h2n_rings of core 1 is on IMEM\n"); |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 610 | seq_printf(seq, "Note:UTCM_SHARED cannot be used for n2h_rings, h2n_rings and debug_log_boot_desc.\n"); |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 611 | |
| 612 | return 0; |
| 613 | } |
| 614 | |
| 615 | /* |
| 616 | * nss_meminfo_debugfs_file_open() |
| 617 | * function to open meminfo debugfs. |
| 618 | */ |
| 619 | static int nss_meminfo_debugfs_file_open(struct inode *inode, struct file *file) |
| 620 | { |
| 621 | return single_open(file, nss_meminfo_config_show, inode->i_private); |
| 622 | } |
| 623 | |
| 624 | static struct file_operations nss_meminfo_debugfs_ops = { |
| 625 | .owner = THIS_MODULE, |
| 626 | .open = nss_meminfo_debugfs_file_open, |
| 627 | .read = seq_read, |
| 628 | .llseek = seq_lseek, |
| 629 | .release = single_release, |
| 630 | }; |
| 631 | |
| 632 | /* |
| 633 | * nss_meminfo_init_debugfs() |
| 634 | * Init meminfo debugfs. |
| 635 | */ |
| 636 | static void nss_meminfo_init_debugfs(struct nss_ctx_instance *nss_ctx) |
| 637 | { |
| 638 | int i; |
| 639 | struct dentry *meminfo_main_dentry; |
| 640 | struct dentry *meminfo_core_dentries[NSS_MAX_CORES]; |
| 641 | |
| 642 | if (nss_meminfo_debugfs_exist) |
| 643 | return; |
| 644 | |
| 645 | /* |
| 646 | * Create directory for showing meminfo configuration of each core. |
| 647 | */ |
| 648 | meminfo_main_dentry = debugfs_create_dir("meminfo", nss_top_main.top_dentry); |
| 649 | if (unlikely(!meminfo_main_dentry)) { |
| 650 | nss_warning("Failed to create qca-nss-drv/meminfo directory in debugfs\n"); |
| 651 | return; |
| 652 | } |
| 653 | |
Suman Ghosh | 9f7b370 | 2018-09-21 19:51:40 +0530 | [diff] [blame] | 654 | for (i = 0; i < nss_top_main.num_nss; i++) { |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 655 | char file[10]; |
| 656 | snprintf(file, sizeof(file), "core%d", i); |
| 657 | meminfo_core_dentries[i] = debugfs_create_file(file, 0400, meminfo_main_dentry, |
| 658 | (void *)(nss_ptr_t)i, &nss_meminfo_debugfs_ops); |
| 659 | if (unlikely(!meminfo_core_dentries[i])) { |
| 660 | int j; |
| 661 | for (j = 0; j < i; j++) |
| 662 | debugfs_remove(meminfo_core_dentries[j]); |
| 663 | debugfs_remove(meminfo_main_dentry); |
| 664 | nss_warning("Failed to create qca-nss-drv/meminfo/%s file in debugfs", file); |
| 665 | return; |
| 666 | } |
| 667 | } |
| 668 | |
| 669 | nss_meminfo_debugfs_exist = true; |
| 670 | nss_info("nss meminfo user config: %s\n", nss_meminfo_user_config); |
| 671 | } |
| 672 | |
| 673 | /* |
| 674 | * nss_meminfo_init |
| 675 | * Initilization |
| 676 | * |
| 677 | */ |
| 678 | bool nss_meminfo_init(struct nss_ctx_instance *nss_ctx) |
| 679 | { |
| 680 | struct nss_meminfo_ctx *mem_ctx; |
| 681 | uint32_t *meminfo_start; |
| 682 | struct nss_meminfo_map *map; |
Suman Ghosh | 81123f9 | 2018-09-26 21:06:47 +0530 | [diff] [blame] | 683 | struct nss_top_instance *nss_top = &nss_top_main; |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 684 | |
| 685 | NSS_VERIFY_CTX_MAGIC(nss_ctx); |
| 686 | mem_ctx = &nss_ctx->meminfo_ctx; |
| 687 | |
| 688 | /* |
| 689 | * meminfo_start is the label where the start address of meminfo map is stored. |
| 690 | */ |
| 691 | meminfo_start = (uint32_t *)ioremap_nocache(nss_ctx->load + NSS_MEMINFO_MAP_START_OFFSET, |
| 692 | NSS_MEMINFO_RESERVE_AREA_SIZE); |
| 693 | if (!meminfo_start) { |
| 694 | nss_info_always("%p: cannot remap meminfo start\n", nss_ctx); |
| 695 | return false; |
| 696 | } |
| 697 | |
| 698 | /* |
| 699 | * Check meminfo start magic |
| 700 | */ |
| 701 | if ((uint16_t)meminfo_start[0] != NSS_MEMINFO_RESERVE_AREA_MAGIC) { |
| 702 | nss_info_always("%p: failed to verify meminfo start magic\n", nss_ctx); |
| 703 | return false; |
| 704 | } |
| 705 | |
| 706 | map = &mem_ctx->meminfo_map; |
| 707 | map->start = (uint32_t *)ioremap_cache(meminfo_start[1], NSS_MEMINFO_MAP_SIZE); |
| 708 | if (!map->start) { |
| 709 | nss_info_always("%p: failed to remap meminfo map\n", nss_ctx); |
| 710 | return false; |
| 711 | } |
| 712 | |
| 713 | /* |
| 714 | * Check meminfo map magic |
| 715 | */ |
| 716 | if ((uint16_t)map->start[0] != NSS_MEMINFO_MAP_START_MAGIC) { |
| 717 | nss_info_always("%p: failed to verify meminfo map magic\n", nss_ctx); |
| 718 | return false; |
| 719 | } |
| 720 | |
| 721 | /* |
| 722 | * Meminfo map settings |
| 723 | */ |
| 724 | map->num_requests = 0; |
| 725 | map->requests = (struct nss_meminfo_request *)(map->start + 1); |
| 726 | |
| 727 | /* |
| 728 | * Init IMEM |
| 729 | */ |
Suman Ghosh | 81123f9 | 2018-09-26 21:06:47 +0530 | [diff] [blame] | 730 | nss_top->hal_ops->init_imem(nss_ctx); |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 731 | |
| 732 | /* |
Subhash Kumar Katnpally | b830478 | 2018-10-11 11:46:13 +0530 | [diff] [blame] | 733 | * Init UTCM_SHARED if supported |
| 734 | */ |
| 735 | if (!nss_top->hal_ops->init_utcm_shared(nss_ctx, meminfo_start)) { |
| 736 | nss_info_always("%p: failed to initialize UTCM_SHARED meminfo\n", nss_ctx); |
| 737 | return false; |
| 738 | } |
| 739 | |
| 740 | /* |
Casey Chen | cfa2835 | 2018-04-21 01:03:02 -0700 | [diff] [blame] | 741 | * Init meminfo block lists |
| 742 | */ |
| 743 | if (!nss_meminfo_init_block_lists(nss_ctx)) { |
| 744 | nss_info_always("%p: failed to initialize meminfo block lists\n", nss_ctx); |
| 745 | return false; |
| 746 | } |
| 747 | |
| 748 | /* |
| 749 | * Configure N2H/H2N rings and nss_if_mem_map |
| 750 | */ |
| 751 | if (!nss_meminfo_configure_n2h_h2n_rings(nss_ctx)) |
| 752 | return false; |
| 753 | |
| 754 | nss_meminfo_init_debugfs(nss_ctx); |
| 755 | |
| 756 | nss_info_always("%p: meminfo init succeed\n", nss_ctx); |
| 757 | return true; |
| 758 | } |