blob: 5717ac365a07058f586bb43cb84bb398b4f2096e [file] [log] [blame]
Guojun Jin4b3707b2014-05-16 15:55:23 -07001/*
2 **************************************************************************
Guojun Jin93468412019-11-04 14:02:02 -08003 * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
Guojun Jin4b3707b2014-05-16 15:55:23 -07004 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17/*
18 * nss_profiler.c
19 * NSS profiler APIs
20 */
21
22#include "nss_tx_rx_common.h"
23
24/*
25 * nss_profiler_rx_msg_handler()
26 * Handle profiler information.
27 */
28static void nss_profiler_rx_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app)
29{
30 struct nss_profiler_msg *pm = (struct nss_profiler_msg*)ncm;
31 void *ctx = nss_ctx->nss_top->profiler_ctx[nss_ctx->id];
32 nss_profiler_callback_t cb = nss_ctx->nss_top->profiler_callback[nss_ctx->id];
33
34 if (ncm->type >= NSS_PROFILER_MAX_MSG_TYPES) {
Kyle Swensondd7b2962021-03-16 13:46:32 -060035 nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type);
Guojun Jin4b3707b2014-05-16 15:55:23 -070036 return;
37 }
38
39 if (ncm->type <= NSS_PROFILER_FLOWCTRL_MSG) {
40 if (ncm->len > sizeof(pm->payload.pcmdp)) {
Kyle Swensondd7b2962021-03-16 13:46:32 -060041 nss_warning("%px: reply for cmd %d size is wrong %d : %d\n", nss_ctx, ncm->type, ncm->len, ncm->interface);
Guojun Jin4b3707b2014-05-16 15:55:23 -070042 return;
43 }
44 } else if (ncm->type <= NSS_PROFILER_DEBUG_REPLY_MSG) {
45 if (ncm->len > sizeof(pm->payload.pdm)) {
Kyle Swensondd7b2962021-03-16 13:46:32 -060046 nss_warning("%px: reply for debug %d is too big %d\n", nss_ctx, ncm->type, ncm->len);
Guojun Jin4b3707b2014-05-16 15:55:23 -070047 return;
48 }
49 } else if (ncm->type <= NSS_PROFILER_COUNTERS_MSG) {
50 if (ncm->len < (sizeof(pm->payload.pcmdp) - (PROFILE_MAX_APP_COUNTERS - pm->payload.pcmdp.num_counters) * sizeof(pm->payload.pcmdp.counters[0])) || ncm->len > sizeof(pm->payload.pcmdp)) {
Kyle Swensondd7b2962021-03-16 13:46:32 -060051 nss_warning("%px: %d params data is too big %d : %d\n", nss_ctx, ncm->type, ncm->len, ncm->interface);
Guojun Jin4b3707b2014-05-16 15:55:23 -070052 return;
53 }
54 }
55
56 /*
57 * status per request callback
58 */
Suruchi Agarwale4ad24a2018-06-11 12:03:46 +053059 if (ncm->response != NSS_CMN_RESPONSE_NOTIFY && ncm->cb) {
Kyle Swensondd7b2962021-03-16 13:46:32 -060060 nss_info("%px: reply CB %px for %d %d\n", nss_ctx, (void *)ncm->cb, ncm->type, ncm->response);
Guojun Jin4b3707b2014-05-16 15:55:23 -070061 cb = (nss_profiler_callback_t)ncm->cb;
62 }
63
64 /*
65 * sample related callback
66 */
67 if (!cb || !ctx) {
Kyle Swensondd7b2962021-03-16 13:46:32 -060068 nss_warning("%px: Event received for profiler interface before registration", nss_ctx);
Guojun Jin4b3707b2014-05-16 15:55:23 -070069 return;
70 }
71
72 cb(ctx, (struct nss_profiler_msg *)ncm);
73}
74
75/*
76 * nss_tx_profiler_if_buf()
77 * NSS profiler Tx API
78 */
Guojun Jin52fbac52016-08-23 15:43:58 -070079nss_tx_status_t nss_profiler_if_tx_buf(void *ctx, void *buf, uint32_t len,
80 void *cb, void *app_data)
Guojun Jin4b3707b2014-05-16 15:55:23 -070081{
82 struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
Guojun Jin4b3707b2014-05-16 15:55:23 -070083 struct nss_profiler_msg *npm;
84 struct nss_profiler_data_msg *pdm = (struct nss_profiler_data_msg *)buf;
Stephen Wang3e2dbd12018-03-14 17:28:17 -070085 nss_tx_status_t ret;
Guojun Jin4b3707b2014-05-16 15:55:23 -070086
Kyle Swensondd7b2962021-03-16 13:46:32 -060087 nss_trace("%px: Profiler If Tx, buf=%px", nss_ctx, buf);
Guojun Jin4b3707b2014-05-16 15:55:23 -070088
Cemil Coskunabd2d972018-04-17 10:59:24 -070089 if (sizeof(npm->payload) < len) {
Kyle Swensondd7b2962021-03-16 13:46:32 -060090 nss_warning("%px: (%u)Bad message length(%u)", nss_ctx, NSS_PROFILER_INTERFACE, len);
Cemil Coskunabd2d972018-04-17 10:59:24 -070091 return NSS_TX_FAILURE_TOO_LARGE;
92 }
93
94 if (NSS_NBUF_PAYLOAD_SIZE < (len + sizeof(npm->cm))) {
Kyle Swensondd7b2962021-03-16 13:46:32 -060095 nss_warning("%px: (%u)Message length(%u) is larger than payload size (%u)",
Cemil Coskunabd2d972018-04-17 10:59:24 -070096 nss_ctx, NSS_PROFILER_INTERFACE, (uint32_t)(len + sizeof(npm->cm)), NSS_NBUF_PAYLOAD_SIZE);
Guojun Jin4b3707b2014-05-16 15:55:23 -070097 return NSS_TX_FAILURE_TOO_LARGE;
98 }
99
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700100 npm = kzalloc(sizeof(*npm), GFP_KERNEL);
101 if (!npm) {
Kyle Swensondd7b2962021-03-16 13:46:32 -0600102 nss_warning("%px: Failed to allocate memory for message\n", nss_ctx);
Guojun Jin4b3707b2014-05-16 15:55:23 -0700103 return NSS_TX_FAILURE;
104 }
105
Guojun Jin4b3707b2014-05-16 15:55:23 -0700106 memcpy(&npm->payload, pdm, len);
Cemil Coskunabd2d972018-04-17 10:59:24 -0700107 nss_profiler_msg_init(npm, NSS_PROFILER_INTERFACE, pdm->hd_magic & 0xFF, len,
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700108 cb, app_data);
Guojun Jin4b3707b2014-05-16 15:55:23 -0700109
Cemil Coskunabd2d972018-04-17 10:59:24 -0700110 ret = nss_core_send_cmd(nss_ctx, npm, sizeof(npm->cm) + len, NSS_NBUF_PAYLOAD_SIZE);
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700111 kfree(npm);
112 return ret;
Guojun Jin4b3707b2014-05-16 15:55:23 -0700113}
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700114EXPORT_SYMBOL(nss_profiler_if_tx_buf);
Guojun Jin4b3707b2014-05-16 15:55:23 -0700115
116/*
Guojun Jin93468412019-11-04 14:02:02 -0800117 * nss_profiler_alloc_dma()
118 * Allocate a DMA for profiler.
119 */
120void *nss_profiler_alloc_dma(struct nss_ctx_instance *nss_ctx, struct nss_profile_sdma_producer **dma_p)
121{
122 int size;
123 void *kaddr;
124 struct nss_profile_sdma_producer *dma;
125 struct nss_profile_sdma_ctrl *ctrl = (struct nss_profile_sdma_ctrl *)nss_ctx->meminfo_ctx.sdma_ctrl;
126 if (!ctrl)
127 return NULL;
128
129 dma = ctrl->producer;
130 *dma_p = dma;
131 size = dma->num_bufs * dma->buf_size;
132 kaddr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
133
134 if (kaddr) {
135 dma->desc_ring = dma_map_single(nss_ctx->dev, kaddr, size, DMA_FROM_DEVICE);
136 NSS_CORE_DSB();
137 }
138 ctrl->consumer[0].ring.kp = kaddr;
Guojun Jin93468412019-11-04 14:02:02 -0800139 return kaddr;
140}
141EXPORT_SYMBOL(nss_profiler_alloc_dma);
142
143/*
144 * nss_profiler_release_dma()
145 * Free profiler DMA.
146 */
147void nss_profiler_release_dma(struct nss_ctx_instance *nss_ctx)
148{
149 struct nss_profile_sdma_ctrl *ctrl;
150 if (!nss_ctx)
151 return;
152
153 ctrl = nss_ctx->meminfo_ctx.sdma_ctrl;
154
Kyle Swensondd7b2962021-03-16 13:46:32 -0600155 if (ctrl && ctrl->consumer[0].ring.kp) {
Guojun Jin93468412019-11-04 14:02:02 -0800156 kfree(ctrl->consumer[0].ring.kp);
Kyle Swensondd7b2962021-03-16 13:46:32 -0600157 ctrl->consumer[0].ring.kp = NULL;
158 }
Guojun Jin93468412019-11-04 14:02:02 -0800159}
160EXPORT_SYMBOL(nss_profiler_release_dma);
161
162/*
163 * nss_profile_dma_register_cb
164 * Register a handler for profile DMA.
165 */
166bool nss_profile_dma_register_cb(struct nss_ctx_instance *nss_ctx, int id,
167 void (*cb)(void*), void *arg)
168{
169 struct nss_profile_sdma_ctrl *ctrl = (struct nss_profile_sdma_ctrl *)nss_ctx->meminfo_ctx.sdma_ctrl;
Kyle Swensondd7b2962021-03-16 13:46:32 -0600170 nss_info("%px dma_register_cb %d: %px %px\n", ctrl, id, cb, arg);
Guojun Jin93468412019-11-04 14:02:02 -0800171 if (!ctrl)
172 return false;
173
174 ctrl->consumer[id].dispatch.fp = cb;
175 ctrl->consumer[id].arg.kp = arg;
176 return true;
177}
178EXPORT_SYMBOL(nss_profile_dma_register_cb);
179
180/*
181 * nss_profile_dma_deregister_cb
182 * Deregister callback for profile DMA.
183 */
184bool nss_profile_dma_deregister_cb(struct nss_ctx_instance *nss_ctx, int id)
185{
186 struct nss_profile_sdma_ctrl *ctrl = (struct nss_profile_sdma_ctrl *)nss_ctx->meminfo_ctx.sdma_ctrl;
187 if (!ctrl)
188 return false;
189
190 ctrl->consumer[id].dispatch.fp = NULL;
191 return true;
192}
193EXPORT_SYMBOL(nss_profile_dma_deregister_cb);
194
195/*
196 * nss_profile_dma_get_ctrl
197 * Wrapper to get profile DMA control.
198 */
199struct nss_profile_sdma_ctrl *nss_profile_dma_get_ctrl(struct nss_ctx_instance *nss_ctx)
200{
201 struct nss_profile_sdma_ctrl *ctrl = nss_ctx->meminfo_ctx.sdma_ctrl;
Kyle Swensondd7b2962021-03-16 13:46:32 -0600202 if (!ctrl) {
203 return ctrl;
Guojun Jin93468412019-11-04 14:02:02 -0800204 }
Kyle Swensondd7b2962021-03-16 13:46:32 -0600205
206 dmac_inv_range(ctrl, &ctrl->cidx);
207 dsb(sy);
Guojun Jin93468412019-11-04 14:02:02 -0800208 return ctrl;
209}
210EXPORT_SYMBOL(nss_profile_dma_get_ctrl);
211
212/*
Guojun Jin4b3707b2014-05-16 15:55:23 -0700213 * nss_profiler_notify_register()
214 */
215void *nss_profiler_notify_register(nss_core_id_t core_id, nss_profiler_callback_t profiler_callback, void *ctx)
216{
217 nss_assert(core_id < NSS_CORE_MAX);
218
Guojun Jin79d76982017-08-07 16:31:07 -0700219 if (NSS_CORE_STATUS_SUCCESS !=
220 nss_core_register_handler(&nss_top_main.nss[core_id], NSS_PROFILER_INTERFACE, nss_profiler_rx_msg_handler, NULL)) {
Guojun Jin4b3707b2014-05-16 15:55:23 -0700221 nss_warning("Message handler FAILED to be registered for profiler");
222 return NULL;
223 }
224
225 nss_top_main.profiler_ctx[core_id] = ctx;
226 nss_top_main.profiler_callback[core_id] = profiler_callback;
227
228 return (void *)&nss_top_main.nss[core_id];
229}
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700230EXPORT_SYMBOL(nss_profiler_notify_register);
Guojun Jin4b3707b2014-05-16 15:55:23 -0700231
232/*
233 * nss_profiler_notify_unregister()
234 */
235void nss_profiler_notify_unregister(nss_core_id_t core_id)
236{
237 nss_assert(core_id < NSS_CORE_MAX);
238
Kyle Swensondd7b2962021-03-16 13:46:32 -0600239 nss_core_unregister_handler(&nss_top_main.nss[core_id], NSS_PROFILER_INTERFACE);
Guojun Jin4b3707b2014-05-16 15:55:23 -0700240 nss_top_main.profiler_callback[core_id] = NULL;
241 nss_top_main.profiler_ctx[core_id] = NULL;
242}
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700243EXPORT_SYMBOL(nss_profiler_notify_unregister);
Guojun Jin4b3707b2014-05-16 15:55:23 -0700244
Sundarajan Srinivasan02e6c2b2014-10-06 11:51:12 -0700245/*
246 * nss_profiler_msg_init()
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700247 * Initialize profiler message.
Sundarajan Srinivasan02e6c2b2014-10-06 11:51:12 -0700248 */
249void nss_profiler_msg_init(struct nss_profiler_msg *npm, uint16_t if_num, uint32_t type, uint32_t len,
Sundarajan Srinivasan30a53d42015-01-30 10:52:08 -0800250 nss_profiler_callback_t cb, void *app_data)
Sundarajan Srinivasan02e6c2b2014-10-06 11:51:12 -0700251{
252 nss_cmn_msg_init(&npm->cm, if_num, type, len, (void *)cb, app_data);
253}
Sundarajan Srinivasan02e6c2b2014-10-06 11:51:12 -0700254EXPORT_SYMBOL(nss_profiler_msg_init);