blob: f19e4c3787ce9f7af3d26827ade0a3f15d60d75f [file] [log] [blame]
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001/*
2 **************************************************************************
Guojun Jin487c84f2019-11-05 14:56:39 -08003 * Copyright (c) 2014,2016,2018, 2020 The Linux Foundation. All rights reserved.
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17/*
18 * qsdk/qca/src/qca-nss-drv/profiler/profile.c
19 *
20 * Implementation for NetAP Profiler
21 */
22
23#include <linux/platform_device.h>
Murat Sezgin3441e772015-10-26 11:55:57 -070024#include <linux/of.h>
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -080025#include <linux/export.h>
26#include <linux/module.h>
27#include <linux/seq_file.h>
28#include <linux/proc_fs.h>
29#include <linux/mm.h>
30#include <linux/mmzone.h>
31#include <linux/fs.h>
32#include <linux/page-flags.h>
33#include <linux/sched.h>
34#include <asm/uaccess.h>
35#include <asm/page.h>
36#include <asm/thread_info.h>
Guojun Jin3deae8c2016-08-23 15:51:21 -070037#include <linux/ctype.h>
Sundarajan Srinivasand09d7dd2014-12-10 16:24:21 -080038#include <nss_api_if.h>
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -080039
40#include "profilenode.h"
41#include "profpkt.h"
42
43/*
44 * This is the driver for the NetAP Core profiler. The system interface to the driver is
45 * profile_register_performance_counter(), defined in <asm/profile.>
46 * a set of proc files (proc/profile/<*>), used by the profiler daemon
47 *
48 * communication between the profiler components is described in a set of header files.
49 * There are multiple versions of these files that must be kept synchronized:
50 * in nss/source/pkg/profile
51 * in tools/profiler
52 * in qsdk/qca/src/qca-nss-drv/profiler
53 *
54 * profilesample.h specifies the sample format used by pkg/profile, profile driver, and ip3kprof (two versions)
55 * profilenode.h specifies the driver node communication between NetAP and the profile driver. (two versions)
56 * profpkt.h specifies the network packet format between the profile driver, profile daemon, and ip3kprof (two versions)
57 *
58 *
59 * NSS profile sampler:
60 * pkg/profile/src/profile.c
61 * pkg/profile/include/profilenode.h
62 * pkg/profile/include/profilesample.h
63 *
64 * profile driver: this code
65 * qsdk/qca/src/qca-nss-drv/profiler
66 *
67 * profilerd: the user daemon that sends data to the tool
68 * qsdk/qca/feeds/qca/utils/profilerd
69 *
70 * ubicom32-prof: the Windows tool
71 * tools/profiler/src/(many files)
72 *
73 */
74
75#ifdef PROFILE_DEBUG
Guojun Jin487c84f2019-11-05 14:56:39 -080076#define profileDebug(s, ...) pr_debug("%s[%d]: " s, __func__, __LINE__, ##__VA_ARGS__)
77#define profileInfo(s, ...) pr_info("%s[%d]: " s, __func__, __LINE__, ##__VA_ARGS__)
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -080078#else
79#define profileDebug(s, ...)
80#define profileInfo(s, ...)
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -080081#endif
82
Guojun Jin487c84f2019-11-05 14:56:39 -080083#define profileWarn(s, ...) pr_warn("%s[%d]: " s, __func__, __LINE__, ##__VA_ARGS__)
84
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -080085static void profiler_handle_reply(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm);
86
87/*
88 * LINUX and Ultra counters must all fit in one packet
89 */
90#define PROFILE_LINUX_MAX_COUNTERS 40
Guojun Jin3deae8c2016-08-23 15:51:21 -070091#define PROFILE_STS_EVENT_COUNTERS 8
92#define PROFILE_STS_EVENT_THREAD_BITS 5
93
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -080094static int profile_num_counters = 0;
95static volatile unsigned int *profile_counter[PROFILE_LINUX_MAX_COUNTERS];
96static char profile_name[PROFILE_LINUX_MAX_COUNTERS][PROFILE_COUNTER_NAME_LENGTH];
97
98/*
99 * internal function to check if @name has been registered before
100 * return the found index, or -1 otherwise
101 */
102static int __profile_find_entry(char *name)
103{
104 int i;
105
106 for (i = 0; i < profile_num_counters; i++) {
107 if (!strncasecmp(name, profile_name[i], PROFILE_COUNTER_NAME_LENGTH)) {
108 return i;
109 }
110 }
111 return -1;
112}
113
114/*
115 * profile_register_performance_counter - register @counter into profile tracking list by key @name
116 * @counter: pointer of the counter variable
117 * @name: identifier of this counter
118 *
119 * Returns zero if total entries exceeding PROFILE_LINUX_MAX_COUNTERS
120 * non-zero otherwise.
121 *
122 * Each @name gives unique entry for @counter, by allocating a new array slot or just use existing one.
123 * No need of de-registration API, since a loadable module's new insmod, will replace the
124 * @counter's * new address at the same profile_counter[] slot.
125 */
126int profile_register_performance_counter(volatile unsigned int *counter, char *name)
127{
128 int i;
129
130 if (profile_num_counters >= PROFILE_LINUX_MAX_COUNTERS) {
131 return 0;
132 }
133
134 i = __profile_find_entry(name);
135 if (i < 0) {
136 i = profile_num_counters++;
137 }
138
139 profile_counter[i] = counter;
Guojun Jind3328392016-01-22 14:14:17 -0800140 strlcpy(profile_name[i], name, PROFILE_COUNTER_NAME_LENGTH);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800141 profile_name[i][PROFILE_COUNTER_NAME_LENGTH - 1] = 0;
142
143 return 1;
144}
145
146/*
Guojun Jina09b8b02018-01-25 16:34:43 -0800147 * profile_make_data_packet
148 * Make a packet full of sample data
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800149 */
150static int profile_make_data_packet(char *buf, int blen, struct profile_io *pn)
151{
152 int sp_samples = 0; /* separated samples if any */
153 int ns; /* number of samples requested */
154 struct profile_header ph;
Guojun Jinf7f90f82018-08-16 18:09:23 -0700155 struct nss_profile_sample_ctrl *psc_hd = &pn->pnc.pn2h->psc_header;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800156
Guojun Jinf7f90f82018-08-16 18:09:23 -0700157 if (blen < sizeof(ph) + sizeof(struct nss_profile_sample)) {
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800158 return -EINVAL;
159 }
160
Guojun Jin487c84f2019-11-05 14:56:39 -0800161 profileInfo("%p stat %x cnt %d %p\n", pn->pnc.pn2h, pn->pnc.pn2h->mh.md_type, psc_hd->ps_count, pn->ccl);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800162
Guojun Jinf7f90f82018-08-16 18:09:23 -0700163 if (pn->pnc.pn2h->mh.md_type == PINGPONG_EMPTY || psc_hd->ps_count < 1) {
164 struct nss_profile_n2h_sample_buf *nsb;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800165 ns = (pn->ccl_read + 1) & (CCL_SIZE-1);
166 nsb = pn->ccl + ns;
167 if (ns == pn->ccl_write || nsb->mh.md_type != PINGPONG_FULL) {
Guojun Jin487c84f2019-11-05 14:56:39 -0800168 profileInfo("waiting more data %x %p : ns %d rd %d wr %d\n", nsb->mh.md_type, nsb, ns, pn->ccl_read, pn->ccl_write);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800169 return -EAGAIN;
170 }
171 pn->ccl_read = ns;
172 profileInfo("sp %p => %p rd %d %p\n", pn->pnc.samples, nsb->samples, ns, nsb);
173 psc_hd = &nsb->psc_header;
174 pn->pnc.pn2h = nsb;
175 pn->pnc.samples = nsb->samples;
176 pn->pnc.cur = 0;
177 }
178 pn->pnc.pn2h->mh.md_type = PINGPONG_INUSE;
179
180 /*
181 * fill in the packet header
182 */
183 memset(&ph, 0, sizeof(ph));
184 ph.pph.magic = htons(PROF_MAGIC + PROFILE_VERSION);
185 ph.pph.header_size = sizeof(ph);
186 ph.pph.profile_instructions = 0;
187 ph.pph.clock_freq = pn->pnc.un.cpu_freq;
188 ph.pph.ddr_freq = pn->pnc.un.ddr_freq;
189 ph.pph.cpu_id = pn->pnc.un.cpu_id;
190 ph.pph.seq_num = htonl(pn->profile_sequence_num);
Guojun Jinf7f90f82018-08-16 18:09:23 -0700191 ph.pph.sample_stack_words = NSS_PROFILE_STACK_WORDS;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800192
Guojun Jinf7f90f82018-08-16 18:09:23 -0700193 ns = (blen - sizeof(ph)) / sizeof(struct nss_profile_sample);
Guojun Jin487c84f2019-11-05 14:56:39 -0800194 profileInfo("%X: blen %d ns = %d psc_hd count %d ssets %d phs %zu pss %zu\n",
Guojun Jinf7f90f82018-08-16 18:09:23 -0700195 pn->profile_sequence_num, blen, ns, psc_hd->ps_count,
196 psc_hd->ex_hd.sample_sets, sizeof(ph), sizeof(struct nss_profile_sample));
197 if (ns > psc_hd->ps_count)
198 ns = psc_hd->ps_count;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800199 if (ns == 0) {
Guojun Jinf7f90f82018-08-16 18:09:23 -0700200 printk("NS should not be 0: rlen %d hd cnt %d\n", blen, psc_hd->ps_count);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800201 return 0;
202 }
203
204 /*
205 * if buf cannot hold all samples, then samples must be separated by set.
206 */
Guojun Jinf7f90f82018-08-16 18:09:23 -0700207 if (ns < psc_hd->ps_count) {
208 ph.exh.sets_map = psc_hd->ex_hd.sets_map; /* save for separating sets */
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800209 do {
Guojun Jinf7f90f82018-08-16 18:09:23 -0700210 sp_samples += psc_hd->ex_hd.sets_map & 0x0F;
211 psc_hd->ex_hd.sets_map >>= 4; /* remove the last set */
212 psc_hd->ex_hd.sample_sets--;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800213 ph.exh.sample_sets++; /* save for restore later */
Guojun Jinf7f90f82018-08-16 18:09:23 -0700214 } while ((psc_hd->ps_count - sp_samples) > ns);
215 ns = psc_hd->ps_count - sp_samples;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800216 }
217 ph.pph.sample_count = ns;
218 if (copy_to_user(buf, &ph.pph, sizeof(ph.pph)) != 0) {
219 return -EFAULT;
220 }
221 buf += sizeof(ph.pph);
222
223 /*
Guojun Jinf7f90f82018-08-16 18:09:23 -0700224 * ph.exh is unused dummy; and psc_hd->ex_hd is used directly to avoid double mem copy
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800225 */
Guojun Jinf7f90f82018-08-16 18:09:23 -0700226 if (copy_to_user(buf, &psc_hd->ex_hd, sizeof(psc_hd->ex_hd)) != 0) {
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800227 return -EFAULT;
228 }
Guojun Jinf7f90f82018-08-16 18:09:23 -0700229 buf += sizeof(psc_hd->ex_hd);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800230
Guojun Jinf7f90f82018-08-16 18:09:23 -0700231 blen = ns * sizeof(struct nss_profile_sample);
Guojun Jin487c84f2019-11-05 14:56:39 -0800232 profileDebug("-profile_make_data_packet %p slen %d cur %d dcped %zd + %zd\n",
233 pn->pnc.samples, blen, pn->pnc.cur, sizeof(ph.pph), sizeof(psc_hd->ex_hd));
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800234 if (copy_to_user(buf, &pn->pnc.samples[pn->pnc.cur], blen) != 0) {
235 return -EFAULT;
236 }
237 pn->pnc.cur += ns;
Guojun Jinf7f90f82018-08-16 18:09:23 -0700238 psc_hd->ps_count -= ns;
239 if (psc_hd->ps_count < 1)
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800240 pn->pnc.pn2h->mh.md_type = PINGPONG_EMPTY;
241
242 /*
243 * restore left over sample counts; 0s for no one
244 */
245 if (sp_samples) {
Guojun Jinf7f90f82018-08-16 18:09:23 -0700246 profileDebug("%d sps %d %d: sets %d : %d map %x <> %x\n", psc_hd->ps_count, ns, sp_samples, psc_hd->ex_hd.sample_sets, ph.exh.sample_sets, psc_hd->ex_hd.sets_map, ph.exh.sets_map);
247 psc_hd->ex_hd.sample_sets = ph.exh.sample_sets;
248 psc_hd->ex_hd.sets_map = ph.exh.sets_map;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800249 }
250
251 pn->profile_sequence_num++;
252 blen += sizeof(ph);
Guojun Jin487c84f2019-11-05 14:56:39 -0800253 profileDebug("+profile_make_data_packet %d phd len %zd nsp %p rd %d cnt %d\n", blen, sizeof(ph), pn->pnc.pn2h, pn->ccl_read, psc_hd->ps_count);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800254 return blen;
255}
256
Guojun Jin487c84f2019-11-05 14:56:39 -0800257static void *profiler_get_dma(struct nss_ctx_instance *nss_ctx, struct profile_io *pn)
258{
259 struct nss_profile_sdma_producer *dma;
260 void *kaddr = nss_profiler_alloc_dma(nss_ctx, &dma);
261 pn->pnc.un.sram_start = dma->desc_ring;
262 return kaddr;
263}
264
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800265/*
266 * This is no longer needed due to NetAP and Linux use different CPUs, and profile is NetAP only.
267 * All related code will be removed after corresponging code in visual tool is corrected; otherwise
268 * visual tool will mis-behave
269 */
270struct profile_counter profile_builtin_stats[] =
271{
272 {
273 "Free memory(KB)", 0
274 },
275 {
276 "Max free Block(KB)", 0
277 }
278};
279
280/*
Guojun Jina09b8b02018-01-25 16:34:43 -0800281 * profile_make_stats_packet
282 * make a packet full of performance counters (software)
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800283 */
284static int profile_make_stats_packet(char *buf, int bytes, struct profile_io *pn)
285{
Guojun Jinf7f90f82018-08-16 18:09:23 -0700286 static char prof_pkt[NSS_PROFILE_MAX_PACKET_SIZE];
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800287
288 char *ptr;
289 int n;
290 struct profile_counter *counter_ptr;
291 struct profile_header_counters *hdr = (struct profile_header_counters *)prof_pkt;
Guojun Jinf7f90f82018-08-16 18:09:23 -0700292 struct nss_profile_sample_ctrl *psc_hd = &pn->pnc.pn2h->psc_header;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800293
Guojun Jinf7f90f82018-08-16 18:09:23 -0700294 if (bytes > NSS_PROFILE_MAX_PACKET_SIZE) {
295 bytes = NSS_PROFILE_MAX_PACKET_SIZE;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800296 }
297 n = sizeof(profile_builtin_stats) + (pn->pnc.un.num_counters + profile_num_counters) * sizeof(*counter_ptr);
298
299 if ((bytes - sizeof(hdr)) < n) {
300 profileWarn("room too small %d for cnts %d\n", bytes, n);
301 return 0;
302 }
303
304 hdr->magic = htons(PROF_MAGIC_COUNTERS);
305 hdr->ultra_count = htons(pn->pnc.un.num_counters);
306 hdr->linux_count = htonl(profile_num_counters + sizeof(profile_builtin_stats) / sizeof(*counter_ptr));
Guojun Jinf7f90f82018-08-16 18:09:23 -0700307 hdr->ultra_sample_time = psc_hd->ex_hd.clocks;
308 hdr->linux_sample_time = psc_hd->ex_hd.clocks; /* QSDK has no time func */
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800309
310 n = pn->pnc.un.num_counters; /* copy NSS counters */
311 n *= sizeof(pn->pnc.un.counters[0]);
312 ptr = (char*) (hdr + 1);
313 memcpy(ptr, (void *)(pn->pnc.un.counters), n);
314 ptr += n;
315
316 counter_ptr = (struct profile_counter *)ptr;
317 for (n = 0; n < profile_num_counters; ++n) {
318 counter_ptr->value = htonl(*profile_counter[n]);
Guojun Jind3328392016-01-22 14:14:17 -0800319 strlcpy(counter_ptr->name, profile_name[n],
320 PROFILE_COUNTER_NAME_LENGTH);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800321 counter_ptr++;
322 }
323 ptr = (char*)counter_ptr;
324
325 /*
326 * built in statistics
327 profile_get_memory_stats(&total_free, &max_free);
328 */
329 profile_builtin_stats[0].value = 0;
330 profile_builtin_stats[1].value = 0;
331 memcpy(ptr, (void *)profile_builtin_stats, sizeof(profile_builtin_stats));
332 ptr += sizeof(profile_builtin_stats);
333
334 n = ptr - prof_pkt;
335 if (copy_to_user(buf, prof_pkt, n) != 0) {
336 return -EFAULT;
337 }
338 return n;
339}
340
341/*
342 * space for all memory blocks so we can hold locks for short time when walking tables
343 */
344static struct profile_io *node[NSS_MAX_CORES];
345
Guojun Jina09b8b02018-01-25 16:34:43 -0800346/*
347 * profile_open
348 * open function of system call
349 */
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800350static int profile_open(struct inode *inode, struct file *filp)
351{
352 int n;
353 struct profile_io *pn;
354
355 if (filp->private_data)
Guojun Jin487c84f2019-11-05 14:56:39 -0800356 profileWarn("%s: %p\n", filp->f_path.dentry->d_iname, filp->private_data);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800357
Shashank Balashankar0b352e62016-03-29 15:48:36 -0700358 n = filp->f_path.dentry->d_iname[strlen(filp->f_path.dentry->d_iname) - 1] - '0';
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800359 if (n < 0 || n >= NSS_MAX_CORES)
360 n = 0;
361 pn = node[n];
362 if (!pn) {
363 return -ENOENT;
364 }
365
Guojun Jina09b8b02018-01-25 16:34:43 -0800366 profileInfo("_open: mode %x flag %x\n", filp->f_mode, filp->f_flags);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800367 if (!pn->pnc.enabled && nss_get_state(pn->ctx) == NSS_STATE_INITIALIZED) {
Guojun Jin3deae8c2016-08-23 15:51:21 -0700368 /*
369 * sw_ksp_ptr is used as event flag. NULL means normal I/O
370 */
371 pn->sw_ksp_ptr = NULL;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800372 pn->pnc.enabled = 1;
373 pn->profile_first_packet = 1;
Guojun Jina09b8b02018-01-25 16:34:43 -0800374
375 /*
376 * If profiler is opened in read only mode, it is done by START_MSG
377 * via debug interface (IF), which reads NSS-FW all registered NSS
378 * variables.
379 * Do not start engine (no sampling required) for debug IF.
380 */
381 if (FMODE_READ & filp->f_mode) {
382 nss_tx_status_t ret;
383
Guojun Jinf7f90f82018-08-16 18:09:23 -0700384 pn->pnc.un.hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_START_MSG;
Guojun Jina09b8b02018-01-25 16:34:43 -0800385 ret = nss_profiler_if_tx_buf(pn->ctx, &pn->pnc.un,
Guojun Jin3deae8c2016-08-23 15:51:21 -0700386 sizeof(pn->pnc.un), profiler_handle_reply, pn);
Guojun Jina09b8b02018-01-25 16:34:43 -0800387 profileInfo("%s: %d -- %p: ccl %p sp %p\n", __func__, ret,
388 pn, pn->ccl, pn->pnc.samples);
389 }
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800390 filp->private_data = pn;
391 return 0;
392 }
393
Guojun Jin3deae8c2016-08-23 15:51:21 -0700394 profileWarn("profile ena %d nss stat %x\n", pn->pnc.enabled,
395 nss_get_state(pn->ctx));
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800396 return -EBUSY;
397}
398
399/*
Guojun Jina09b8b02018-01-25 16:34:43 -0800400 * profile_read
401 * read syscall
402 *
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800403 * return a udp packet ready to send to the profiler tool
404 * when there are no packets left to make, return 0
405 */
Guojun Jin76cf1392017-05-02 12:02:31 -0700406static ssize_t profile_read(struct file *filp, char *buf, size_t count, loff_t *f_pos)
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800407{
408 int result = 0;
409 int slen = 0;
410 struct profile_io *pn = (struct profile_io *)filp->private_data;
411 if (!pn) {
412 return -ENOENT;
413 }
414
415 if (!pn->pnc.enabled) {
416 return -EPERM;
417 }
Guojun Jin3deae8c2016-08-23 15:51:21 -0700418 if (pn->sw_ksp_ptr) {
419 struct debug_box *db = (struct debug_box *) pn->sw_ksp_ptr;
Guojun Jin35c027c2020-02-12 15:42:01 -0800420
421 if ((void*)db != (void*)pn) {
422 profileWarn("%p: hwe data not ready %p\n", pn, db);
423 return -EAGAIN;
424 }
425
426 profileWarn("dbda %p: %x %x %x %x %x\n", db->data,
427 db->data[0], db->data[2], db->data[4], db->data[6], db->data[7]);
428
Guojun Jin3deae8c2016-08-23 15:51:21 -0700429 slen = (PROFILE_STS_EVENT_COUNTERS + 1) * sizeof(db->data[0]);
430 if (copy_to_user(buf, db->data, slen))
431 return -EFAULT;
Guojun Jin35c027c2020-02-12 15:42:01 -0800432 profileInfo("%p: sw_ksp_ptr %p slen %d\n", pn, pn->sw_ksp_ptr, slen);
Guojun Jin3deae8c2016-08-23 15:51:21 -0700433 return slen;
434 }
435
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800436 if (!pn->pnc.samples) {
Guojun Jin487c84f2019-11-05 14:56:39 -0800437 profileWarn("DEBUG %p: NULL samples\n", pn);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800438 return -ENOMEM;
439 }
440
441 if (pn->profile_first_packet) {
442 result = profile_make_stats_packet(buf, count, pn);
443 pn->profile_first_packet = 0;
Guojun Jin76cf1392017-05-02 12:02:31 -0700444 profileInfo("%d profile_make_stats_packet %zd\n", result, count);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800445
446#ifdef PROFILE_SEP_STAT
447 /*
448 * currectly, stat and sample data are combined in one pkt for efficient;
449 * but this is harder to debug and required remote tool to handle
450 * packet in all-in-one method instead of individual handler.
451 */
452 return result;
453#endif
454 }
455
456 if (result > 0) {
457 buf += result;
458 count -= result;
459 slen = result;
460 }
461 result = profile_make_data_packet(buf, count, pn);
462 if (result == 0) {
463 pn->profile_first_packet = 1;
464 }
Guojun Jin76cf1392017-05-02 12:02:31 -0700465 profileInfo("%d: profile_make_data_packet %zd %d\n", result, count, slen);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800466
467 profileInfo("%d: read\n", pn->pnc.enabled);
468 if (pn->pnc.enabled < 0) {
469 nss_tx_status_t ret;
470 pn->pnc.enabled = 1;
Guojun Jinf7f90f82018-08-16 18:09:23 -0700471 pn->pnc.un.hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_START_MSG;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700472 ret = nss_profiler_if_tx_buf(pn->ctx, &pn->pnc.un, sizeof(pn->pnc.un),
473 profiler_handle_reply, pn);
474 profileWarn("%s: restart %d -- %p: ccl %p sp %p\n", __func__,
475 ret, pn, pn->ccl, pn->pnc.samples);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800476 }
477
478 return result + slen;
479}
480
481/*
Guojun Jina09b8b02018-01-25 16:34:43 -0800482 * profile_release
483 * the close syscall paired with profiler_open
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800484 */
485static int profile_release(struct inode *inode, struct file *filp)
486{
487 struct profile_io *pn = (struct profile_io *)filp->private_data;
488 if (!pn) {
489 return -ENOENT;
490 }
491
492 if (pn->pnc.enabled) {
493 nss_tx_status_t ret;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700494 pn->sw_ksp_ptr = NULL;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800495 pn->pnc.enabled = 0;
Guojun Jinf7f90f82018-08-16 18:09:23 -0700496 pn->pnc.un.hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_STOP_MSG;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700497 ret = nss_profiler_if_tx_buf(pn->ctx, &pn->pnc.un,
498 sizeof(pn->pnc.un), profiler_handle_reply, pn);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800499 profileInfo("%s: %p %d\n", __func__, pn, ret);
500 return 0;
501 }
502 profileWarn("%s: attempt closing non-open dev %p\n", __func__, pn);
503 pn->profile_first_packet = 1;
504 return -EBADF;
505}
506
Guojun Jina09b8b02018-01-25 16:34:43 -0800507#ifndef __aarch64__
508/*
509 * counter_rate_by_uint32
510 * helper function for handling 64-bit calculation in 32-bit mode
511 *
512 * 32-bit kernel does not have 64-bit div function;
513 * to avoid overflow and underflow, use if branch
514 * to overcome this problem: slower bur more accurate.
515 */
Guojun Jinf7f90f82018-08-16 18:09:23 -0700516static void counter_rate_by_uint32(struct nss_profile_common *pnc)
Guojun Jina09b8b02018-01-25 16:34:43 -0800517{
518 static uint32_t prev_cnts[32];
519 static uint32_t last_uclk;
Guojun Jinb1642792018-03-20 16:11:26 -0700520 uint32_t mclk, uclk, ubi32_freq;
Guojun Jina09b8b02018-01-25 16:34:43 -0800521 int n = pnc->un.num_counters;
522
523 ubi32_freq = htonl(pnc->un.cpu_freq) / 1000000;
524 uclk = pnc->un.rate - last_uclk;
525 last_uclk = pnc->un.rate;
526 printk("%d nss counters: clk dif %u freq %u\n", n, uclk, ubi32_freq);
527
528 /*
529 * exactly 4G? make it maximum
530 */
531 if (!uclk)
532 uclk--;
533 while (n--) {
534 uint32_t v_dif;
535 uint32_t v = ntohl(pnc->un.counters[n].value);
536 uint32_t pv = prev_cnts[n];
537
538 prev_cnts[n] = v;
539 v_dif = v - pv;
540
541 /*
542 * threshold = MAX_UINT32 / MAX_Ubi32CPU_CLK (MHz)
543 * if counter diff is less then this threshold,
544 * 32-bit calculation can be directly applied w/o o/u flow;
545 * otherwise, tick diff (uclk) adjust needs to be done before
546 * calculating the rate to avoid over/under flow.
547 */
548 if (v_dif < (UINT_MAX / ubi32_freq)) {
549 v_dif = (v_dif * ubi32_freq) / (uclk / 1000000);
550 } else {
551 /*
552 * assume fast polling is 200ms, @ 500MHz, the minimum
553 * uclk value is 0.5M * 200 = 10M, so reduce by 1M
554 * it will still have value in 10, not zero (0).
555 * in 2.3GHz and 1 sec interval, the residual is 2300.
556 * The maximum polling interval is 2 sec for 2.3GHz,
557 * and 3 sec for 1.7GHz.
558 */
559 if (uclk > 1000000) {
Guojun Jinb1642792018-03-20 16:11:26 -0700560 mclk = uclk / 1000000;
561 v_dif = (v_dif / mclk) * ubi32_freq;
Guojun Jina09b8b02018-01-25 16:34:43 -0800562 } else {
Guojun Jinb1642792018-03-20 16:11:26 -0700563 mclk = uclk / 1000;
564 v_dif = (v_dif / mclk) * ubi32_freq * 1000;
Guojun Jina09b8b02018-01-25 16:34:43 -0800565 }
566 }
567 printk("%-32s 0x%08X %10u : %u/s\n",
568 pnc->un.counters[n].name, v, v, v_dif);
569 }
570}
571#endif
572
573/*
574 * profiler_handle_counter_event_reply()
575 * get reply from firmware for current FW stat event counter configurations
576 *
577 * Based on firmware CPU clock (cpu_freq), calculate the counter change rate in
578 * second and print both counter value and its rate.
579 */
580static void profiler_handle_counter_event_reply(struct nss_ctx_instance *nss_ctx,
581 struct nss_cmn_msg *ncm)
582{
583 struct profile_io *pio = (struct profile_io *) ncm->app_data;
Guojun Jinf7f90f82018-08-16 18:09:23 -0700584 struct nss_profile_common *pnc = &pio->pnc;
Guojun Jina09b8b02018-01-25 16:34:43 -0800585
586#ifndef __aarch64__
587 counter_rate_by_uint32(pnc);
588#else
589 static uint32_t prev_cnts[32];
590 static uint32_t last_uclk;
591 uint32_t ubi32_freq;
592 uint32_t uclk;
593 int n = pnc->un.num_counters;
594
595 ubi32_freq = htonl(pnc->un.cpu_freq);
596 uclk = pnc->un.rate - last_uclk;
597 last_uclk = pnc->un.rate;
598 printk("%d nss counters: clk dif %u freq %u\n", n, uclk, ubi32_freq);
599 while (n--) {
600 uint32_t v = ntohl(pnc->un.counters[n].value);
601 uint32_t pv = prev_cnts[n];
602
603 prev_cnts[n] = v;
604
605 printk("%-32s 0x%08X %10u : %llu/s\n",
606 pnc->un.counters[n].name, v, v,
607 (uint64_t)(v - pv) * ubi32_freq / uclk);
608 }
609#endif
610}
611
612/*
613 * parseDbgCmd()
614 * process debugging command(s).
615 *
616 * Currently supported command:
617 * "=show-nss-counter" display all values of nss variables registered
618 * by profile_register_performance_counter(&v, name)
619 */
620#define SHOW_COUNTER_CMD "show-nss-counter"
621static int parseDbgCmd(const char *buf, size_t count,
622 struct debug_box *db, struct profile_io *pio)
623{
624 int result;
625
626 if (strncmp(buf, SHOW_COUNTER_CMD, min(sizeof(SHOW_COUNTER_CMD)-1, count))) {
627 printk(KERN_ERR "%s: unsupported cmd %s %zu\n",
628 __func__, buf, strlen(buf));
629 return -EINVAL;
630 }
631
Guojun Jinf7f90f82018-08-16 18:09:23 -0700632 db->hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_COUNTERS_MSG;
Guojun Jina09b8b02018-01-25 16:34:43 -0800633 result = nss_profiler_if_tx_buf(pio->ctx, &pio->pnc.un,
634 sizeof(pio->pnc.un),
635 profiler_handle_counter_event_reply, pio);
636 profileInfo("%s: %d\n", __func__, result);
637 return result == NSS_TX_SUCCESS ? count : -EFAULT;
638}
639
Guojun Jin3deae8c2016-08-23 15:51:21 -0700640/*
641 * profiler_handle_stat_event_reply()
642 * print current FW stat event counter configurations
643 */
644static void profiler_handle_stat_event_reply(struct nss_ctx_instance *nss_ctx,
645 struct nss_cmn_msg *ncm)
646{
647 struct profile_io *pio = (struct profile_io *) ncm->app_data;
648 struct debug_box *pdb = (struct debug_box *) &pio->pnc;
649 struct debug_box *db = (struct debug_box *) &ncm[1];
650 int i, thrds;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800651
Guojun Jin3deae8c2016-08-23 15:51:21 -0700652 for (i = 0; i < db->dlen; i++)
653 printk("stat counter %d: %x\n", i, db->data[i]);
654
655 thrds = db->data[i];
656 i = (1 << PROFILE_STS_EVENT_THREAD_BITS) - 1;
657 profileInfo("%d: event end mark %x, ThrA %d ThrB %d\n",
658 ncm->len, thrds, (thrds & i) + 1,
659 ((thrds >> PROFILE_STS_EVENT_THREAD_BITS) & i) + 1);
660
661 /*
662 * save data for read()
663 */
664 memcpy(pdb->data, db->data, (db->dlen + 1) * sizeof(db->data[0]));
Guojun Jin35c027c2020-02-12 15:42:01 -0800665 pio->sw_ksp_ptr = (uint32_t *)pdb;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700666}
667
668/*
669 * parse_sys_stat_event_req()
670 * process FW stat events request: event#1 index#1 event#2 index#2 ...
671 */
672static int parse_sys_stat_event_req(const char *buf, size_t count,
673 struct debug_box *db, struct profile_io *pio)
674{
675 char *cp;
Guojun Jin3eb8dbc2016-10-10 12:42:46 -0700676 int result;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700677
Guojun Jin76cf1392017-05-02 12:02:31 -0700678 printk("%zd cmd buf %s\n", count, buf);
Guojun Jin3deae8c2016-08-23 15:51:21 -0700679 if (count < 19) /* minimum data for sys_stat_event request */
680 return -EINVAL;
681
Guojun Jinf7f90f82018-08-16 18:09:23 -0700682 if (strncmp(buf, "get-sys-stat-events", 19) == 0) {
683 db->hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_GET_SYS_STAT_EVENT;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700684 result = nss_profiler_if_tx_buf(pio->ctx, &pio->pnc.un,
685 sizeof(pio->pnc.un),
686 profiler_handle_stat_event_reply, pio);
687 profileInfo("get_sys_stat_events: %d\n", result);
688 return result == NSS_TX_SUCCESS ? count : -EFAULT;
689 }
690
691 if (strncmp(buf, "set-sys-stat-events", 19)) {
692 printk("unknow event: %s\n", buf);
693 return -EINVAL;
694 }
695
696 db->dlen = sizeof(pio->pnc.un);
697 memset(db->data, 0, PROFILE_STS_EVENT_COUNTERS * sizeof(db->data[0]));
698
699 cp = strchr(buf, ' ');
700 if (!cp) {
701 printk("no enough paramters %s\n", buf);
702 return -EINVAL;
703 }
704
705 do {
Guojun Jin35c027c2020-02-12 15:42:01 -0800706 unsigned long idx;
707 int event, e5x;
708 char *kstrp;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700709
710 while (isspace(*cp))
711 cp++;
Guojun Jin35c027c2020-02-12 15:42:01 -0800712 kstrp = strchr(cp, ' ');
713 if (!kstrp) {
714 printk(KERN_ERR "%p missing index %p %s\n", buf, cp, cp);
Guojun Jin3deae8c2016-08-23 15:51:21 -0700715 return -EINVAL;
716 }
Guojun Jin35c027c2020-02-12 15:42:01 -0800717 kstrp[0] = 0;
718
719 /*
720 * kstrtoul bugs:
721 * it does not use white space for delimiter.
722 * it cannot use base 0, thus base 10 only.
723 */
724 event = kstrtoul(cp, 10, &idx);
725 if (event) {
726 printk(KERN_ERR "kstrtoul %d: %s\n", event, cp);
727 return -EINVAL;
728 }
729 event = idx;
730
731 /*
732 * Processing thread specific events, which requires hex values.
733 * Because kstrtoul cannot use base 0, it makes this task harder
734 * in user space. Users need to convert hex value to decimal, then
735 * pass them in userland command event-counter.
736 */
737 e5x = event >> 16;
738 if (e5x) {
Guojun Jin3deae8c2016-08-23 15:51:21 -0700739 if ((event & 0x1FF) < 50) {
Guojun Jin35c027c2020-02-12 15:42:01 -0800740 printk(KERN_INFO "thr ID (%d) ignored for event %d\n",
741 e5x, event & 0x1FF);
742 } else if (e5x > 12) {
743 if ((e5x >>= 5) > 12) {
744 printk(KERN_INFO "tID %d too big [1..12]\n", e5x);
Guojun Jin3deae8c2016-08-23 15:51:21 -0700745 return -E2BIG;
746 }
747 }
748 }
Guojun Jin3eb8dbc2016-10-10 12:42:46 -0700749
Guojun Jin35c027c2020-02-12 15:42:01 -0800750 cp = kstrp + 1;
751 while (isspace(*cp))
752 cp++;
753 kstrp = strchr(cp, ' ');
754 if (kstrp) {
755 kstrp[0] = 0;
756 kstrp++;
757 }
758
759 if (kstrtoul(cp, 10, &idx) || idx < 0 || idx > 7) {
760 printk(KERN_ERR "bad index %ld [0..7]\n", idx);
Guojun Jin3deae8c2016-08-23 15:51:21 -0700761 return -ERANGE;
762 }
Guojun Jin35c027c2020-02-12 15:42:01 -0800763 printk(KERN_INFO "%p: e %d i %ld\n", db, event, idx);
Guojun Jin3deae8c2016-08-23 15:51:21 -0700764 db->data[idx] = event;
Guojun Jin35c027c2020-02-12 15:42:01 -0800765 cp = kstrp;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700766 } while (cp);
Guojun Jinf7f90f82018-08-16 18:09:23 -0700767 db->hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_SET_SYS_STAT_EVENT;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700768 result = nss_profiler_if_tx_buf(pio->ctx, &pio->pnc.un, sizeof(pio->pnc.un),
769 profiler_handle_stat_event_reply, pio);
Guojun Jin76cf1392017-05-02 12:02:31 -0700770 profileInfo("%p: %zd send cmd %x to FW ret %d\n",
Guojun Jin3deae8c2016-08-23 15:51:21 -0700771 db, count, db->hd_magic, result);
772 return count;
773}
774
775/*
776 * parseDbgData()
777 * parsing debug requests: base_address [options] cmd length
778 *
779 * cmd is either read or write
780 * option is one of mio, moveio, h [heap security verify], etc.
781 */
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800782static int parseDbgData(const char *buf, size_t count, struct debug_box *db)
783{
784 char *cp;
785 int n;
786
Guojun Jina09b8b02018-01-25 16:34:43 -0800787 printk("%p %p: buf (%s) cnt %zd\n", db, buf, buf, count);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800788 if (sscanf(buf, "%x", (uint32_t *)&db->base_addr) != 1) {
789 printk("%s: cannot get base addr\n", __func__);
790 return -EINVAL;
791 }
792
793 cp = strchr(buf, ' ');
794 if (!cp) {
795noea: printk("%s: no enough arguments\n", __func__);
796 return -EFAULT;
797 }
798
799 while (isspace(*cp)) cp++;
800 if (!strncmp(cp, "mio", 3) || !strncmp(cp, "moveio", 6)) {
801 printk("%p: cp (%s)\n", cp, cp);
802 cp = strchr(cp, ' ');
803 if (!cp) {
804 goto noea;
805 }
806 db->opts |= DEBUG_OPT_MOVEIO;
807 }
808
809 while (isspace(*cp)) cp++;
Guojun Jina09b8b02018-01-25 16:34:43 -0800810 printk("base addr %X -- %s", db->base_addr, cp);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800811
812 if (!strncmp(cp, "read", 4)) {
813 cp = strchr(cp, ' ');
814 if (cp) {
815 while (isspace(*cp)) cp++;
816 sscanf(cp, "%x", &db->dlen);
817 }
818 return 0;
819 }
820
821 n = 0;
822 do {
823 while (isspace(*cp)) cp++;
824 if (sscanf(cp, "%x", db->data+n) != 1) {
825 printk("n %d : %s\n", n, cp);
826 break;
827 }
Guojun Jin487c84f2019-11-05 14:56:39 -0800828 printk("write %x to off %zx\n", db->data[n], n * sizeof(db->data[0]));
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800829 n++;
830 cp = strchr(cp, ' ');
831 } while (cp && n < MAX_DB_WR);
832 return n;
833}
834
835/*
Guojun Jina09b8b02018-01-25 16:34:43 -0800836 * debug_if_show
837 * display memory content read from Phy addr
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800838 */
839static void debug_if_show(struct debug_box *db, int buf_len)
840{
841 int i;
842
843 for (i=0; i < db->dlen; i++) {
844 if ((i & 3) == 0)
Guojun Jina09b8b02018-01-25 16:34:43 -0800845 printk("\n%zX: ", db->base_addr + i * sizeof(db->base_addr));
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800846 printk("%9x", db->data[i]);
847 }
848 printk("\ndumped %d (extra 1) blen %d\n", db->dlen, buf_len);
849}
850
851/*
Guojun Jina09b8b02018-01-25 16:34:43 -0800852 * profiler_handle_debug_reply
853 * show debug message we requested from NSS
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800854 */
855static void profiler_handle_debug_reply(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm)
856{
857 debug_if_show((struct debug_box*)&ncm[1], ncm->len);
858}
859
860/*
Guojun Jina09b8b02018-01-25 16:34:43 -0800861 * debug_if
862 * a generic Krait <--> NSS debug interface
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800863 */
Guojun Jine4fec2f2017-05-16 17:14:23 -0700864static ssize_t debug_if(struct file *filp,
865 const char __user *ubuf, size_t count, loff_t *f_pos)
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800866{
Guojun Jine4fec2f2017-05-16 17:14:23 -0700867 char *buf;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800868 int result;
869 struct debug_box *db;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700870 struct profile_io *pio = (struct profile_io *)filp->private_data;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800871
872 if (!pio) {
873 return -ENOENT;
874 }
875
876 if (!pio->pnc.enabled) {
877 return -EPERM;
878 }
879
Guojun Jine4fec2f2017-05-16 17:14:23 -0700880 buf = kmalloc(count, GFP_KERNEL);
881 if (!buf)
882 return -ENOMEM;
883
884 if (copy_from_user(buf, ubuf, count)) {
885 kfree(buf);
886 printk(KERN_ERR "copy_from_user\n");
887 return -EIO;
888 }
Guojun Jina09b8b02018-01-25 16:34:43 -0800889 buf[count-1] = 0;
Guojun Jine4fec2f2017-05-16 17:14:23 -0700890
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800891 db = (struct debug_box *) &pio->pnc;
892 db->dlen = db->opts = 0;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700893
Guojun Jina09b8b02018-01-25 16:34:43 -0800894 /*
895 * process possible commands
896 */
897 if (buf[0] == '=') {
898 result = parseDbgCmd(buf+1, count, db, pio);
899 kfree(buf);
900 return result;
901 }
902
903 /*
904 * process stat_event request: display/change
905 */
Guojun Jin3deae8c2016-08-23 15:51:21 -0700906 if (!isdigit(buf[0])) {
907 result = parse_sys_stat_event_req(buf, count, db, pio);
Guojun Jine4fec2f2017-05-16 17:14:23 -0700908 kfree(buf);
Guojun Jin3deae8c2016-08-23 15:51:21 -0700909
910 if ((result > 0) && (filp->f_flags & O_RDWR)) {
911 /*
912 * set flag so event-counter can read the data from FW
913 */
Guojun Jin35c027c2020-02-12 15:42:01 -0800914 pio->sw_ksp_ptr = db->data;
Guojun Jin3deae8c2016-08-23 15:51:21 -0700915 }
916 return result;
917 }
918
Guojun Jina09b8b02018-01-25 16:34:43 -0800919 /*
920 * process memory I/O for debug
921 */
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800922 result = parseDbgData(buf, count, db);
Guojun Jine4fec2f2017-05-16 17:14:23 -0700923 kfree(buf);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800924 if (result < 0) {
925 return result;
926 }
927
928 if (!result) {
Guojun Jinf7f90f82018-08-16 18:09:23 -0700929 db->hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_DEBUG_RD_MSG;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800930 } else {
Guojun Jinf7f90f82018-08-16 18:09:23 -0700931 db->hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_DEBUG_WR_MSG;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800932 db->dlen = result;
933 }
Guojun Jin3deae8c2016-08-23 15:51:21 -0700934 result = nss_profiler_if_tx_buf(pio->ctx, &pio->pnc.un,
935 sizeof(pio->pnc.un), profiler_handle_debug_reply, pio);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800936 printk("dbg res %d dlen = %d opt %x\n", result, db->dlen, db->opts);
937 return count;
938}
939
940static const struct file_operations profile_fops = {
941 .open = profile_open,
942 .read = profile_read,
943 .release = profile_release,
944 .write = debug_if,
945};
946
947/*
948 * showing sample status on Linux console
949 */
950static int profile_rate_show(struct seq_file *m, void *v)
951{
952 struct profile_io *pn = node[0];
953 if (pn) {
Guojun Jinf7f90f82018-08-16 18:09:23 -0700954 struct nss_profile_sample_ctrl *psc_hd = &pn->pnc.pn2h->psc_header;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800955 seq_printf(m, "%d samples per second. %d ultra, %d linux virtual counters. %d dropped samples. %d queued of %d max sampels. %d sent packets.\n",
Guojun Jinf7f90f82018-08-16 18:09:23 -0700956 pn->pnc.un.rate, pn->pnc.un.num_counters, profile_num_counters, psc_hd->ps_dropped, psc_hd->ps_count, psc_hd->ps_max_samples, pn->profile_sequence_num);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800957 } else {
958 seq_printf(m, "Profiler is not initialized.\n");
959 }
960 return 0;
961}
962
963static int profile_rate_open(struct inode *inode, struct file *filp)
964{
965 return single_open(filp, profile_rate_show, NULL);
966}
967
Guojun Jin76cf1392017-05-02 12:02:31 -0700968static ssize_t profile_rate_write(struct file *filp, const char *buf, size_t len, loff_t *off)
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800969{
970 *off = 0;
971 return 0;
972}
973
974static const struct file_operations profile_rate_fops = {
975 .open = profile_rate_open,
976 .read = seq_read,
977 .llseek = seq_lseek,
978 .release = single_release,
979 .write = profile_rate_write,
980};
981
982/*
Guojun Jina09b8b02018-01-25 16:34:43 -0800983 * hexdump
984 * hex dump for debug
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800985 */
986static void kxdump(void *buf, int len, const char *who)
987{
Guojun Jina09b8b02018-01-25 16:34:43 -0800988 int32_t *ip = (int32_t *) buf;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800989 int lns = len >> 5; /* 32-B each line */
Guojun Jina09b8b02018-01-25 16:34:43 -0800990 if (lns > 8)
991 lns = 8;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -0800992 printk("%p: kxdump %s: len %d\n", buf, who, len);
993 do {
994 printk("%x %x %x %x %x %x %x %x\n", ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7]);
995 ip += 8;
996 } while (lns--);
997}
998
999/*
Guojun Jina09b8b02018-01-25 16:34:43 -08001000 * profiler_magic_verify
1001 * check magic # and detect Endian.
1002 *
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001003 * negtive return means failure.
1004 * return 1 means need to ntoh swap.
1005 */
Guojun Jinf7f90f82018-08-16 18:09:23 -07001006static int profiler_magic_verify(struct nss_profile_sample_ctrl *psc_hd, int buf_len)
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001007{
1008 int swap = 0;
Guojun Jinf7f90f82018-08-16 18:09:23 -07001009 if ((psc_hd->psc_magic & NSS_PROFILE_HD_MMASK) != NSS_PROFILE_HD_MAGIC) {
1010 if ((psc_hd->psc_magic & NSS_PROFILE_HD_MMASK_REV) != NSS_PROFILE_HD_MAGIC_REV) {
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001011 kxdump(psc_hd, buf_len, "bad profile packet");
Guojun Jina09b8b02018-01-25 16:34:43 -08001012 printk("bad profile HD magic 0x%x : %d\n",
Guojun Jinf7f90f82018-08-16 18:09:23 -07001013 psc_hd->psc_magic, buf_len);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001014 return -1;
1015 }
Guojun Jinf7f90f82018-08-16 18:09:23 -07001016 profileDebug("Profile data in different Endian type %x\n", psc_hd->psc_magic);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001017 swap = 1;
Guojun Jinf7f90f82018-08-16 18:09:23 -07001018 psc_hd->psc_magic = ntohl(psc_hd->psc_magic);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001019 }
1020 return swap;
1021}
1022
1023/*
Guojun Jina09b8b02018-01-25 16:34:43 -08001024 * profile_handle_nss_data
1025 * process profile sample data from NSS
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001026 */
1027static void profile_handle_nss_data(void *arg, struct nss_profiler_msg *npm)
1028{
1029 int buf_len = npm->cm.len;
1030 void *buf = &npm->payload;
1031 struct profile_io *pn;
Guojun Jinf7f90f82018-08-16 18:09:23 -07001032 struct nss_profile_n2h_sample_buf *nsb;
1033 struct nss_profile_sample_ctrl *psc_hd = (struct nss_profile_sample_ctrl *)buf;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001034 int ret, wr;
1035 int swap = 0; /* only for header and info data, not samples */
1036
Guojun Jinf7f90f82018-08-16 18:09:23 -07001037 if (buf_len < (sizeof(struct nss_profile_session) - sizeof(struct profile_counter) * (PROFILE_MAX_APP_COUNTERS))) {
Guojun Jin487c84f2019-11-05 14:56:39 -08001038 profileWarn("%p: profile data packet is too small to be useful %d %x psc_hd %p\n",
1039 npm, buf_len, npm->cm.interface, psc_hd);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001040 return;
1041 }
1042
1043 swap = profiler_magic_verify(psc_hd, buf_len);
1044 if (swap < 0) {
1045 return;
1046 }
1047
1048 pn = (struct profile_io *)arg;
Guojun Jina09b8b02018-01-25 16:34:43 -08001049 profileDebug("PN %p CM msg %d len %d\n", pn, npm->cm.type, buf_len);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001050 profileInfo("%s: dlen %d swap %d cmd %x - %d\n", __func__, buf_len, swap, npm->cm.type, (pn->ccl_read - pn->ccl_write) & (CCL_SIZE-1));
1051 //kxdump(buf, buf_len, "process profile packet");
1052
1053 if (npm->cm.type == NSS_PROFILER_FIXED_INFO_MSG) {
Guojun Jinf7f90f82018-08-16 18:09:23 -07001054 struct nss_profile_session *pTx = (struct nss_profile_session *)buf;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001055 if (swap) {
1056 pn->pnc.un.rate = ntohl(pTx->rate);
1057 pn->pnc.un.cpu_id = ntohl(pTx->cpu_id);
1058 pn->pnc.un.cpu_freq = ntohl(pTx->cpu_freq);
1059 pn->pnc.un.ddr_freq = ntohl(pTx->ddr_freq);
Guojun Jin3deae8c2016-08-23 15:51:21 -07001060 pn->pnc.un.num_counters = pTx->num_counters;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001061 } else {
1062 pn->pnc.un = *pTx;
1063 }
1064 memcpy(pn->pnc.un.counters, pTx->counters, pn->pnc.un.num_counters * sizeof(pn->pnc.un.counters[0]));
1065 pn->profile_first_packet = 1;
1066 return;
1067 }
1068
1069 wr = (pn->ccl_write + 1) & (CCL_SIZE-1);
1070 nsb = pn->ccl + wr;
1071 swap = (pn->ccl_read - wr) & (CCL_SIZE-1); /* PROFILER_FLOWCTRL */
1072 if (nsb->mh.md_type != PINGPONG_EMPTY || (swap && swap < 5)) {
1073 if (pn->pnc.enabled > 0) {
1074 pn->pnc.enabled = -1;
Guojun Jinf7f90f82018-08-16 18:09:23 -07001075 pn->pnc.un.hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_STOP_MSG;
Guojun Jin3deae8c2016-08-23 15:51:21 -07001076 ret = nss_profiler_if_tx_buf(pn->ctx,
1077 &pn->pnc.un, sizeof(pn->pnc.un),
1078 profiler_handle_reply, pn);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001079 profileWarn("%d temp stop sampling engine %d\n", swap, ret);
1080 }
1081 if (swap < 3) {
1082 profileWarn("w%p.%d: %d no room for new profile samples r%p.%d\n", nsb, wr, swap, pn->ccl+pn->ccl_read, pn->ccl_read);
1083 return; /* -EMSGSIZE */
1084 }
1085 }
1086 pn->ccl_write = wr;
1087
1088 /*
1089 * sampling data -- hdr NBO swap is done at NSS side via SWAPB.
1090 */
1091 memcpy(&nsb->psc_header, buf, buf_len); /* pn->pnc.pn2h->psc_header = *psc_hd; maybe faster, but take more memory */
1092
1093 nsb->mh.md_type = PINGPONG_FULL;
Guojun Jin3deae8c2016-08-23 15:51:21 -07001094
1095 /*
1096 * ask for perf_counters (software counters) update every 32 samples
1097 */
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001098 if (!wr) {
Guojun Jinf7f90f82018-08-16 18:09:23 -07001099 pn->pnc.un.hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_COUNTERS_MSG;
Guojun Jin3deae8c2016-08-23 15:51:21 -07001100 ret = nss_profiler_if_tx_buf(pn->ctx, &pn->pnc.un,
1101 sizeof(pn->pnc.un), profiler_handle_reply, pn);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001102 if (ret == NSS_TX_FAILURE)
Guojun Jin3deae8c2016-08-23 15:51:21 -07001103 printk("req counters Cmd failed %d %d\n", ret, wr);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001104 }
1105 profileInfo("filled %p %p wr %d\n", nsb, nsb->samples, pn->ccl_write);
1106}
1107
1108/*
Guojun Jin487c84f2019-11-05 14:56:39 -08001109 * profiler_dma_handler
1110 * Handle DMA interrupt, and map DMA to N2H to minimize changes
1111 * in profile_handle_nss_data.
1112 */
1113static void profiler_dma_handler(void *arg)
1114{
1115 int cri, idx, widx;
1116 struct nss_profiler_msg *npm;
1117 struct nss_profile_sdma_consumer *cbc;
1118 struct nss_profile_sdma_producer *dma;
1119 struct profile_io *pn = (struct profile_io *)arg;
1120 struct nss_profile_sdma_ctrl *ctrl = nss_profile_dma_get_ctrl(pn->ctx);
1121
1122 if (!ctrl) {
1123 profileWarn("%p: cannot get dma ctrl block\n", pn->ctx);
1124 return;
1125 }
1126
1127 dma = ctrl->producer;
1128 cbc = ctrl->consumer;
1129 cri = ctrl->cur_ring;
1130 idx = ctrl->cidx[cri];
1131 widx = ctrl->pidx[cri];
1132
1133 if (idx == widx) {
1134 profileInfo("%p: dma[%d]%d %p sz %d no more profile data %p (%zd)\n",
1135 ctrl, cri, idx, dma, dma->buf_size,
1136 cbc->ring.kp + idx * dma->buf_size, sizeof(*ctrl));
1137 return;
1138 }
1139
1140 do {
1141 npm = cbc->ring.kp + idx * dma->buf_size;
1142
1143 dmac_inv_range(npm, &npm->payload);
1144 dsb(sy);
1145
1146 dmac_inv_range(&npm->payload, (void *)&npm->payload + npm->cm.len);
1147 dsb(sy);
1148
1149 profile_handle_nss_data(pn, npm);
1150 idx = (idx + 1) & (dma->num_bufs - 1);
1151 } while (idx != widx);
1152
1153 ctrl->cidx[cri] = idx;
1154 profileInfo("flush %p %p r %d w %d(%d)\n", cbc, cbc->ring.kp, idx, widx, ctrl->pidx[cri]);
1155 dmac_clean_range(ctrl->cidx + cri, ctrl->cidx + cri + 1);
1156 dsb(sy);
1157}
1158
1159/*
Guojun Jina09b8b02018-01-25 16:34:43 -08001160 * profiler_handle_reply
Guojun Jin487c84f2019-11-05 14:56:39 -08001161 * process N2H reply for message we sent to NSS
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001162 */
1163static void profiler_handle_reply(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm)
1164{
1165 switch (ncm->response) {
1166 default:
Guojun Jin487c84f2019-11-05 14:56:39 -08001167 if (ncm->error == PROFILE_ERROR_NO_DMA) {
1168 struct nss_profile_sdma_consumer *cbc;
1169 struct nss_profile_sdma_ctrl *ctrl;
1170 struct profile_io *pn = node[0];
1171
1172 if (!pn || pn->ctx != nss_ctx) {
1173 pn = node[1];
1174 if (!pn || pn->ctx != nss_ctx)
1175 return;
1176 }
1177
1178 ctrl = nss_profile_dma_get_ctrl(nss_ctx);
1179 if (!ctrl) {
1180 profileWarn("%p: profiler can't get DMA\n", nss_ctx);
1181 return;
1182 }
1183
1184 cbc = ctrl->consumer;
1185 cbc->ring.kp = profiler_get_dma(nss_ctx, pn);
1186 if (cbc->ring.kp) {
1187 pn->pnc.un.hd_magic = NSS_PROFILE_HD_MAGIC | NSS_PROFILER_START_MSG;
1188 if (nss_profiler_if_tx_buf(pn->ctx, &pn->pnc.un,
1189 sizeof(pn->pnc.un), profiler_handle_reply, pn)
1190 == NSS_TX_SUCCESS)
1191 return;
1192 }
1193 }
1194
Sundarajan Srinivasand09d7dd2014-12-10 16:24:21 -08001195 profileWarn("%p: profiler had error response %d\n", nss_ctx, ncm->response);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001196 /*
1197 * fail through -- no plan to do anything yet
1198 */
1199 case NSS_CMN_RESPONSE_ACK:
1200 return;
1201 }
1202}
1203
1204/*
Guojun Jin487c84f2019-11-05 14:56:39 -08001205 * profile_prepare_dma()
1206 * Allocate DMA for profile if no DMA allocated; then register
1207 * callback to handle interrupt for reading samples.
1208 */
1209static bool profile_prepare_dma(struct profile_io *node)
1210{
1211 struct nss_profile_sdma_ctrl *ctrl = nss_profile_dma_get_ctrl(node->ctx);
1212
1213 if (!ctrl)
1214 return 0;
1215
1216 if (!ctrl->consumer[0].ring.kp)
1217 ctrl->consumer[0].ring.kp = profiler_get_dma(node->ctx, node);
1218
1219 /*
1220 * register_noncd_cb will not fail since (!ctrl) has been checked above.
1221 * The ctrl is allocated in driver side, so even in impossible event to
1222 * cause register_noncd_cb failed, no need to release dma since calling
1223 * this function will not allocate another DMA if a DMA alerady exists.
1224 */
1225 return (bool)nss_profile_dma_register_cb(node->ctx, 0, profiler_dma_handler, (void*)node);
1226}
1227
1228/*
Guojun Jina09b8b02018-01-25 16:34:43 -08001229 * profile_init
1230 * initialize basic profile data structure
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001231 */
1232static void profile_init(struct profile_io *node)
1233{
1234 int n;
1235
1236 memset(&node->pnc, 0, sizeof(node->pnc));
1237 node->ccl_read = 0;
1238 node->ccl_write = -1;
1239 node->pnc.pn2h = node->ccl;
1240 node->pnc.samples = node->ccl->samples;
1241
1242 for (n = 0; n < CCL_SIZE; n++) {
1243 node->ccl[n].mh.md_type = PINGPONG_EMPTY;
Guojun Jinf7f90f82018-08-16 18:09:23 -07001244 node->ccl[n].psc_header.ps_count = 0;
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001245 }
1246
1247 /*
Guojun Jin3deae8c2016-08-23 15:51:21 -07001248 * sw_ksp is an array of pointers to struct thread_info,
1249 * the current task executing for each linux virtual processor
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001250 node->sw_ksp_ptr = sw_ksp;
1251 */
Guojun Jin3deae8c2016-08-23 15:51:21 -07001252 node->sw_ksp_ptr = NULL;
Guojun Jinf7f90f82018-08-16 18:09:23 -07001253 /*
1254 * Old profile info: unused by now
1255 * node->task_offset = offsetof(struct thread_info, task);
1256 * node->pid_offset = offsetof(struct task_struct, tgid);
1257 */
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001258}
1259
1260static struct proc_dir_entry *pdir;
1261
1262/*
Guojun Jina09b8b02018-01-25 16:34:43 -08001263 * netap_profile_release_resource
1264 * init_module cannot call exit_MODULE, so use this wrapper
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001265 */
1266void netap_profile_release_resource(void)
1267{
1268 if (pdir) {
1269 remove_proc_entry("rate", pdir);
1270 remove_proc_entry("data", pdir);
1271 remove_proc_entry("data1", pdir);
1272 }
Guojun Jin487c84f2019-11-05 14:56:39 -08001273 nss_profile_dma_deregister_cb(node[0]->ctx, 0);
1274 nss_profile_dma_deregister_cb(node[1]->ctx, 0);
1275 nss_profiler_release_dma(node[1]->ctx);
1276 nss_profiler_release_dma(node[0]->ctx);
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001277 kfree(node[0]->ccl);
1278 kfree(node[0]);
1279 node[0] = NULL;
Guojun Jin487c84f2019-11-05 14:56:39 -08001280
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001281}
1282
1283/*
Guojun Jina09b8b02018-01-25 16:34:43 -08001284 * netap_profile_init_module
1285 * kernel module entry
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001286 */
1287int __init netap_profile_init_module(void)
1288{
Murat Sezgin3441e772015-10-26 11:55:57 -07001289#ifdef CONFIG_OF
1290 /*
1291 * If the node is not compatible, don't do anything.
1292 */
1293 if (!of_find_node_by_name(NULL, "nss-common")) {
1294 return 0;
1295 }
1296#endif
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001297 /*
1298 * we need N nodes, not one node + N ctx, for N cores
1299 */
1300 node[0] = kmalloc(sizeof(*node[0]) * NSS_MAX_CORES, GFP_KERNEL);
1301 if (!node[0]) {
Guojun Jin487c84f2019-11-05 14:56:39 -08001302 profileWarn("Profiler CTRL kmalloc failed.\n");
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001303 return -ENOMEM;
1304 }
1305
1306 node[0]->ccl = kmalloc(sizeof(*node[0]->ccl) * CCL_SIZE * NSS_MAX_CORES, GFP_KERNEL);
1307 if (!node[0]->ccl) {
Guojun Jin487c84f2019-11-05 14:56:39 -08001308 profileWarn("Profiler n2h_sample_buf kmalloc failed.\n");
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001309 kfree(node[0]);
1310 node[0] = NULL;
1311 return -ENOMEM;
1312 }
1313
1314 /*
1315 * connect to the file system
1316 */
1317 pdir = proc_mkdir("profile", NULL);
1318 if (!pdir ||
1319 !proc_create("data", 0, pdir, &profile_fops) ||
1320 !proc_create("data1", 0, pdir, &profile_fops) ||
1321 !proc_create("rate", 0, pdir, &profile_rate_fops)) {
1322 netap_profile_release_resource();
1323 return -ENOMEM;
1324 }
1325
1326 profile_init(node[0]);
1327
1328 /*
1329 * attatch the device callback to N2H channel for CPU 0
1330 */
1331 node[0]->ctx = nss_profiler_notify_register(NSS_CORE_0, profile_handle_nss_data, node[0]);
Guojun Jin487c84f2019-11-05 14:56:39 -08001332 if (!node[0]->ctx) {
1333 netap_profile_release_resource();
1334 return -ENXIO;
1335 }
1336 profile_prepare_dma(node[0]);
1337
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001338#if NSS_MAX_CORES > 1
1339 node[1] = node[0] + 1;
1340 node[1]->ccl = node[0]->ccl + CCL_SIZE;
1341
1342 profile_init(node[1]);
1343 node[1]->ctx = nss_profiler_notify_register(NSS_CORE_1, profile_handle_nss_data, node[1]);
Guojun Jin487c84f2019-11-05 14:56:39 -08001344 profile_prepare_dma(node[1]);
1345
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001346 profile_register_performance_counter(&node[1]->profile_sequence_num, "Profile1 DRV data packets");
1347#endif
1348
1349 profile_register_performance_counter(&node[0]->profile_sequence_num, "Profile0 DRV data packets");
1350 return 0;
1351}
1352
1353/*
Guojun Jina09b8b02018-01-25 16:34:43 -08001354 * netap_profile_exit_module
1355 * kernel module exit
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001356 */
1357void __exit netap_profile_exit_module(void)
1358{
Murat Sezgin3441e772015-10-26 11:55:57 -07001359#ifdef CONFIG_OF
1360 /*
1361 * If the node is not compatible, don't do anything.
1362 */
1363 if (!of_find_node_by_name(NULL, "nss-common")) {
1364 return;
1365 }
1366#endif
Sundarajan Srinivasan1b03fe22014-12-02 13:20:56 -08001367 nss_profiler_notify_unregister(NSS_CORE_0);
1368#if NSS_MAX_CORES > 1
1369 nss_profiler_notify_unregister(NSS_CORE_1);
1370#endif
1371 netap_profile_release_resource();
1372}
1373
1374module_init(netap_profile_init_module);
1375module_exit(netap_profile_exit_module);
1376
1377MODULE_LICENSE("Dual BSD/GPL");