blob: a86bbd127b8b3751f415775f8e0ebf5682ac837f [file] [log] [blame]
ratheesh kannotheb2a0a82017-05-04 09:20:17 +05301/*
2 **************************************************************************
3 * Copyright (c) 2017, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17#include "nss_tx_rx_common.h"
18
19#define NSS_GRE_TX_TIMEOUT 3000 /* 3 Seconds */
20
21/*
22 * Private data structure
23 */
24static struct {
25 struct semaphore sem;
26 struct completion complete;
27 int response;
28 void *cb;
29 void *app_data;
30} nss_gre_pvt;
31
32/*
33 * Data structures to store GRE nss debug stats
34 */
35static DEFINE_SPINLOCK(nss_gre_stats_lock);
36static struct nss_stats_gre_session_debug session_debug_stats[NSS_GRE_MAX_DEBUG_SESSION_STATS];
37static struct nss_stats_gre_base_debug base_debug_stats;
38
39/*
40 * nss_gre_session_debug_stats_sync()
41 * debug statistics sync for GRE session.
42 */
43static void nss_gre_session_debug_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_session_stats_msg *sstats, uint16_t if_num)
44{
45 int i, j;
46 spin_lock_bh(&nss_gre_stats_lock);
47 for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) {
48 if (session_debug_stats[i].if_num == if_num) {
49 for (j = 0; j < NSS_STATS_GRE_SESSION_DEBUG_MAX; j++) {
50 session_debug_stats[i].stats[j] += sstats->stats[j];
51 }
52 break;
53 }
54 }
55 spin_unlock_bh(&nss_gre_stats_lock);
56}
57
58/*
59 * nss_gre_session_debug_stats_get()
60 * Get GRE session debug statistics.
61 */
62void nss_gre_session_debug_stats_get(void *stats_mem, int size)
63{
64 struct nss_stats_gre_session_debug *stats = (struct nss_stats_gre_session_debug *)stats_mem;
65 int i;
66
67 if (!stats || (size < (sizeof(struct nss_stats_gre_session_debug) * NSS_STATS_GRE_SESSION_DEBUG_MAX))) {
68 nss_warning("No memory to copy gre stats");
69 return;
70 }
71
72 spin_lock_bh(&nss_gre_stats_lock);
73 for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) {
74 if (session_debug_stats[i].valid) {
75 memcpy(stats, &session_debug_stats[i], sizeof(struct nss_stats_gre_session_debug));
76 stats++;
77 }
78 }
79 spin_unlock_bh(&nss_gre_stats_lock);
80}
81
82/*
83 * nss_gre_base_debug_stats_sync()
84 * Debug statistics sync for GRE base node.
85 */
86void nss_gre_base_debug_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_base_stats_msg *bstats)
87{
88 int i;
89 spin_lock_bh(&nss_gre_stats_lock);
90 for (i = 0; i < NSS_STATS_GRE_BASE_DEBUG_MAX; i++) {
91 base_debug_stats.stats[i] += bstats->stats[i];
92 }
93 spin_unlock_bh(&nss_gre_stats_lock);
94}
95
96/*
97 * nss_gre_base_debug_stats_get()
98 * Get GRE debug base statistics.
99 */
100void nss_gre_base_debug_stats_get(void *stats_mem, int size)
101{
102 struct nss_stats_gre_base_debug *stats = (struct nss_stats_gre_base_debug *)stats_mem;
103
104 if (!stats) {
105 nss_warning("No memory to copy GRE base stats\n");
106 return;
107 }
108
109 if (size < sizeof(struct nss_stats_gre_base_debug)) {
110 nss_warning("Not enough memory to copy GRE base stats\n");
111 return;
112 }
113
114 spin_lock_bh(&nss_gre_stats_lock);
115 memcpy(stats, &base_debug_stats, sizeof(struct nss_stats_gre_base_debug));
116 spin_unlock_bh(&nss_gre_stats_lock);
117}
118
119/*
120 * nss_gre_msg_handler()
121 * Handle NSS -> HLOS messages for GRE
122 */
123static void nss_gre_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data)
124{
125 struct nss_gre_msg *ntm = (struct nss_gre_msg *)ncm;
126 void *ctx;
127
128 nss_gre_msg_callback_t cb;
129
130 NSS_VERIFY_CTX_MAGIC(nss_ctx);
131 BUG_ON(!(nss_is_dynamic_interface(ncm->interface) || ncm->interface == NSS_GRE_INTERFACE));
132
133 /*
134 * Is this a valid request/response packet?
135 */
136 if (ncm->type >= NSS_GRE_MSG_MAX) {
137 nss_warning("%p: received invalid message %d for GRE STD interface", nss_ctx, ncm->type);
138 return;
139 }
140
141 if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_msg)) {
142 nss_warning("%p: tx request for another interface: %d", nss_ctx, ncm->interface);
143 return;
144 }
145
146 switch (ntm->cm.type) {
147 case NSS_GRE_MSG_SESSION_STATS:
148 /*
149 * debug stats embedded in stats msg
150 */
151 nss_gre_session_debug_stats_sync(nss_ctx, &ntm->msg.sstats, ncm->interface);
152 break;
153
154 case NSS_GRE_MSG_BASE_STATS:
155 nss_gre_base_debug_stats_sync(nss_ctx, &ntm->msg.bstats);
156 break;
157
158 default:
159 break;
160
161 }
162
163 /*
164 * Update the callback and app_data for NOTIFY messages, gre sends all notify messages
165 * to the same callback/app_data.
166 */
167 if (ncm->response == NSS_CMM_RESPONSE_NOTIFY) {
168 ncm->cb = (nss_ptr_t)nss_ctx->nss_top->gre_msg_callback;
169 ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].app_data;
170 }
171
172 /*
173 * Log failures
174 */
175 nss_core_log_msg_failures(nss_ctx, ncm);
176
177 /*
178 * callback
179 */
180 cb = (nss_gre_msg_callback_t)ncm->cb;
181 ctx = (void *)ncm->app_data;
182
183 /*
184 * call gre-std callback
185 */
186 if (!cb) {
187 nss_warning("%p: No callback for gre-std interface %d",
188 nss_ctx, ncm->interface);
189 return;
190 }
191
192 cb(ctx, ntm);
193}
194
195/*
196 * nss_gre_callback()
197 * Callback to handle the completion of HLOS-->NSS messages.
198 */
199static void nss_gre_callback(void *app_data, struct nss_gre_msg *nim)
200{
201 nss_gre_msg_callback_t callback = (nss_gre_msg_callback_t)nss_gre_pvt.cb;
202 void *data = nss_gre_pvt.app_data;
203
204 nss_gre_pvt.cb = NULL;
205 nss_gre_pvt.app_data = NULL;
206
207 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
208 nss_warning("gre Error response %d\n", nim->cm.response);
209 nss_gre_pvt.response = NSS_TX_FAILURE;
210 } else {
211 nss_gre_pvt.response = NSS_TX_SUCCESS;
212 }
213
214 if (callback) {
215 callback(data, nim);
216 }
217
218 complete(&nss_gre_pvt.complete);
219}
220
221/*
222 * nss_gre_tx_msg()
223 * Transmit a GRE message to NSS firmware
224 */
225nss_tx_status_t nss_gre_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_msg *msg)
226{
227 struct nss_gre_msg *nm;
228 struct nss_cmn_msg *ncm = &msg->cm;
229 struct sk_buff *nbuf;
230 int32_t status;
231
232 NSS_VERIFY_CTX_MAGIC(nss_ctx);
233 if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
234 nss_warning("%p: gre msg dropped as core not ready", nss_ctx);
235 return NSS_TX_FAILURE_NOT_READY;
236 }
237
238 /*
239 * Sanity check the message
240 */
241 if (!nss_is_dynamic_interface(ncm->interface)) {
242 nss_warning("%p: tx request for non dynamic interface: %d", nss_ctx, ncm->interface);
243 return NSS_TX_FAILURE;
244 }
245
246 if (ncm->type > NSS_GRE_MSG_MAX) {
247 nss_warning("%p: message type out of range: %d", nss_ctx, ncm->type);
248 return NSS_TX_FAILURE;
249 }
250
251 if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_msg)) {
252 nss_warning("%p: message length is invalid: %d", nss_ctx, nss_cmn_get_msg_len(ncm));
253 return NSS_TX_FAILURE;
254 }
255
256 nbuf = dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE);
257 if (unlikely(!nbuf)) {
258 NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_NBUF_ALLOC_FAILS]);
259 nss_warning("%p: msg dropped as command allocation failed", nss_ctx);
260 return NSS_TX_FAILURE;
261 }
262
263 /*
264 * Copy the message to our skb
265 */
266 nm = (struct nss_gre_msg *)skb_put(nbuf, sizeof(struct nss_gre_msg));
267 memcpy(nm, msg, sizeof(struct nss_gre_msg));
268
269 status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
270 if (status != NSS_CORE_STATUS_SUCCESS) {
271 dev_kfree_skb_any(nbuf);
272 nss_warning("%p: Unable to enqueue 'gre message'\n", nss_ctx);
273 if (status == NSS_CORE_STATUS_FAILURE_QUEUE) {
274 return NSS_TX_FAILURE_QUEUE;
275 }
276 return NSS_TX_FAILURE;
277 }
278
279 nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE);
280
281 NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_CMD_REQ]);
282 return NSS_TX_SUCCESS;
283}
284EXPORT_SYMBOL(nss_gre_tx_msg);
285
286/*
287 * nss_gre_tx_msg_sync()
288 * Transmit a GRE message to NSS firmware synchronously.
289 */
290nss_tx_status_t nss_gre_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_msg *msg)
291{
292 nss_tx_status_t status;
293 int ret = 0;
294
295 down(&nss_gre_pvt.sem);
296 nss_gre_pvt.cb = (void *)msg->cm.cb;
297 nss_gre_pvt.app_data = (void *)msg->cm.app_data;
298
299 msg->cm.cb = (nss_ptr_t)nss_gre_callback;
300 msg->cm.app_data = (nss_ptr_t)NULL;
301
302 status = nss_gre_tx_msg(nss_ctx, msg);
303 if (status != NSS_TX_SUCCESS) {
304 nss_warning("%p: gre_tx_msg failed\n", nss_ctx);
305 up(&nss_gre_pvt.sem);
306 return status;
307 }
308 ret = wait_for_completion_timeout(&nss_gre_pvt.complete, msecs_to_jiffies(NSS_GRE_TX_TIMEOUT));
309
310 if (!ret) {
311 nss_warning("%p: GRE STD tx sync failed due to timeout\n", nss_ctx);
312 nss_gre_pvt.response = NSS_TX_FAILURE;
313 }
314
315 status = nss_gre_pvt.response;
316 up(&nss_gre_pvt.sem);
317 return status;
318}
319EXPORT_SYMBOL(nss_gre_tx_msg_sync);
320
321/*
322 * nss_gre_tx_buf()
323 * Send packet to GRE interface owned by NSS
324 */
325nss_tx_status_t nss_gre_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb)
326{
327 int32_t status;
328
329 NSS_VERIFY_CTX_MAGIC(nss_ctx);
330 if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
331 nss_warning("%p: GRE std packet dropped as core not ready", nss_ctx);
332 return NSS_TX_FAILURE_NOT_READY;
333 }
334
335 status = nss_core_send_buffer(nss_ctx, if_num, skb, NSS_IF_DATA_QUEUE_0, H2N_BUFFER_PACKET, H2N_BIT_FLAG_VIRTUAL_BUFFER);
336 if (unlikely(status != NSS_CORE_STATUS_SUCCESS)) {
337 nss_warning("%p: Unable to enqueue GRE std packet\n", nss_ctx);
338 return NSS_TX_FAILURE_QUEUE;
339 }
340
341 /*
342 * Kick the NSS awake so it can process our new entry.
343 */
344 nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE);
345
346 NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_PACKET]);
347 return NSS_TX_SUCCESS;
348
349}
350EXPORT_SYMBOL(nss_gre_tx_buf);
351
352/*
353 ***********************************
354 * Register/Unregister/Miscellaneous APIs
355 ***********************************
356 */
357
358/*
359 * nss_gre_register_if()
360 * Register data and message handlers for GRE.
361 */
362struct nss_ctx_instance *nss_gre_register_if(uint32_t if_num, nss_gre_data_callback_t gre_callback,
363 nss_gre_msg_callback_t event_callback, struct net_device *netdev, uint32_t features)
364{
365 struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_handler_id];
366 int i = 0;
367
368 nss_assert(nss_ctx);
369 nss_assert(nss_is_dynamic_interface(if_num));
370
371 nss_ctx->subsys_dp_register[if_num].ndev = netdev;
372 nss_ctx->subsys_dp_register[if_num].cb = gre_callback;
373 nss_ctx->subsys_dp_register[if_num].app_data = netdev;
374 nss_ctx->subsys_dp_register[if_num].features = features;
375
376 nss_top_main.gre_msg_callback = event_callback;
377
378 nss_core_register_handler(if_num, nss_gre_msg_handler, NULL);
379
380 spin_lock_bh(&nss_gre_stats_lock);
381 for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) {
382 if (!session_debug_stats[i].valid) {
383 session_debug_stats[i].valid = true;
384 session_debug_stats[i].if_num = if_num;
385 session_debug_stats[i].if_index = netdev->ifindex;
386 break;
387 }
388 }
389 spin_unlock_bh(&nss_gre_stats_lock);
390
391 return nss_ctx;
392}
393EXPORT_SYMBOL(nss_gre_register_if);
394
395/*
396 * nss_gre_unregister_if()
397 * Unregister data and message handler.
398 */
399void nss_gre_unregister_if(uint32_t if_num)
400{
401 struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_handler_id];
402 int i;
403
404 nss_assert(nss_ctx);
405 nss_assert(nss_is_dynamic_interface(if_num));
406
407 nss_ctx->subsys_dp_register[if_num].ndev = NULL;
408 nss_ctx->subsys_dp_register[if_num].cb = NULL;
409 nss_ctx->subsys_dp_register[if_num].app_data = NULL;
410 nss_ctx->subsys_dp_register[if_num].features = 0;
411
412 nss_top_main.gre_msg_callback = NULL;
413
414 nss_core_unregister_handler(if_num);
415
416 spin_lock_bh(&nss_gre_stats_lock);
417 for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) {
418 if (session_debug_stats[i].if_num == if_num) {
419 memset(&session_debug_stats[i], 0, sizeof(struct nss_stats_gre_session_debug));
420 break;
421 }
422 }
423 spin_unlock_bh(&nss_gre_stats_lock);
424}
425EXPORT_SYMBOL(nss_gre_unregister_if);
426
427/*
428 * nss_get_gre_context()
429 */
430struct nss_ctx_instance *nss_gre_get_context()
431{
432 return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_handler_id];
433}
434EXPORT_SYMBOL(nss_gre_get_context);
435
436/*
437 * nss_gre_msg_init()
438 * Initialize nss_gre msg.
439 */
440void nss_gre_msg_init(struct nss_gre_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data)
441{
442 nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
443}
444EXPORT_SYMBOL(nss_gre_msg_init);
445
446/*
447 * nss_gre_register_handler()
448 * debugfs stats msg handler received on static gre interface
449 */
450void nss_gre_register_handler(void)
451{
452 nss_info("nss_gre_register_handler");
453 sema_init(&nss_gre_pvt.sem, 1);
454 init_completion(&nss_gre_pvt.complete);
455 nss_core_register_handler(NSS_GRE_INTERFACE, nss_gre_msg_handler, NULL);
456}