blob: 78704ef5f063464d534d55e2e383b0aa516083dd [file] [log] [blame]
ratheesh kannotheb2a0a82017-05-04 09:20:17 +05301/*
2 **************************************************************************
3 * Copyright (c) 2017, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17#include "nss_tx_rx_common.h"
18
19#define NSS_GRE_TX_TIMEOUT 3000 /* 3 Seconds */
20
21/*
22 * Private data structure
23 */
24static struct {
25 struct semaphore sem;
26 struct completion complete;
27 int response;
28 void *cb;
29 void *app_data;
30} nss_gre_pvt;
31
32/*
33 * Data structures to store GRE nss debug stats
34 */
35static DEFINE_SPINLOCK(nss_gre_stats_lock);
36static struct nss_stats_gre_session_debug session_debug_stats[NSS_GRE_MAX_DEBUG_SESSION_STATS];
37static struct nss_stats_gre_base_debug base_debug_stats;
38
ratheesh kannoth7746db22017-05-16 14:16:43 +053039static atomic64_t pkt_cb_addr = ATOMIC64_INIT(0);
40
41/*
42 * nss_gre_rx_handler()
43 * GRE rx handler.
44 */
45static void nss_gre_rx_handler(struct net_device *dev, struct sk_buff *skb,
46 __attribute__((unused)) struct napi_struct *napi)
47{
48 nss_gre_data_callback_t cb;
49
50 nss_gre_pkt_callback_t scb = (nss_gre_pkt_callback_t)(unsigned long)atomic64_read(&pkt_cb_addr);
51 if (unlikely(scb)) {
52 struct nss_gre_info *info = (struct nss_gre_info *)netdev_priv(dev);
53 if (likely(info->next_dev)) {
54 scb(info->next_dev, skb);
55 }
56 }
57
58 cb = nss_top_main.gre_data_callback;
59 cb(dev, skb, 0);
60}
61
ratheesh kannotheb2a0a82017-05-04 09:20:17 +053062/*
63 * nss_gre_session_debug_stats_sync()
64 * debug statistics sync for GRE session.
65 */
66static void nss_gre_session_debug_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_session_stats_msg *sstats, uint16_t if_num)
67{
68 int i, j;
69 spin_lock_bh(&nss_gre_stats_lock);
70 for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) {
71 if (session_debug_stats[i].if_num == if_num) {
72 for (j = 0; j < NSS_STATS_GRE_SESSION_DEBUG_MAX; j++) {
73 session_debug_stats[i].stats[j] += sstats->stats[j];
74 }
75 break;
76 }
77 }
78 spin_unlock_bh(&nss_gre_stats_lock);
79}
80
81/*
ratheesh kannotheb2a0a82017-05-04 09:20:17 +053082 * nss_gre_base_debug_stats_sync()
83 * Debug statistics sync for GRE base node.
84 */
ratheesh kannoth7746db22017-05-16 14:16:43 +053085static void nss_gre_base_debug_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_base_stats_msg *bstats)
ratheesh kannotheb2a0a82017-05-04 09:20:17 +053086{
87 int i;
88 spin_lock_bh(&nss_gre_stats_lock);
89 for (i = 0; i < NSS_STATS_GRE_BASE_DEBUG_MAX; i++) {
90 base_debug_stats.stats[i] += bstats->stats[i];
91 }
92 spin_unlock_bh(&nss_gre_stats_lock);
93}
94
95/*
ratheesh kannotheb2a0a82017-05-04 09:20:17 +053096 * nss_gre_msg_handler()
97 * Handle NSS -> HLOS messages for GRE
98 */
99static void nss_gre_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data)
100{
101 struct nss_gre_msg *ntm = (struct nss_gre_msg *)ncm;
102 void *ctx;
103
104 nss_gre_msg_callback_t cb;
105
106 NSS_VERIFY_CTX_MAGIC(nss_ctx);
107 BUG_ON(!(nss_is_dynamic_interface(ncm->interface) || ncm->interface == NSS_GRE_INTERFACE));
108
109 /*
110 * Is this a valid request/response packet?
111 */
112 if (ncm->type >= NSS_GRE_MSG_MAX) {
113 nss_warning("%p: received invalid message %d for GRE STD interface", nss_ctx, ncm->type);
114 return;
115 }
116
117 if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_msg)) {
118 nss_warning("%p: tx request for another interface: %d", nss_ctx, ncm->interface);
119 return;
120 }
121
122 switch (ntm->cm.type) {
123 case NSS_GRE_MSG_SESSION_STATS:
124 /*
125 * debug stats embedded in stats msg
126 */
127 nss_gre_session_debug_stats_sync(nss_ctx, &ntm->msg.sstats, ncm->interface);
128 break;
129
130 case NSS_GRE_MSG_BASE_STATS:
131 nss_gre_base_debug_stats_sync(nss_ctx, &ntm->msg.bstats);
132 break;
133
134 default:
135 break;
136
137 }
138
139 /*
140 * Update the callback and app_data for NOTIFY messages, gre sends all notify messages
141 * to the same callback/app_data.
142 */
143 if (ncm->response == NSS_CMM_RESPONSE_NOTIFY) {
144 ncm->cb = (nss_ptr_t)nss_ctx->nss_top->gre_msg_callback;
145 ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].app_data;
146 }
147
148 /*
149 * Log failures
150 */
151 nss_core_log_msg_failures(nss_ctx, ncm);
152
153 /*
154 * callback
155 */
156 cb = (nss_gre_msg_callback_t)ncm->cb;
157 ctx = (void *)ncm->app_data;
158
159 /*
160 * call gre-std callback
161 */
162 if (!cb) {
163 nss_warning("%p: No callback for gre-std interface %d",
164 nss_ctx, ncm->interface);
165 return;
166 }
167
168 cb(ctx, ntm);
169}
170
171/*
172 * nss_gre_callback()
173 * Callback to handle the completion of HLOS-->NSS messages.
174 */
175static void nss_gre_callback(void *app_data, struct nss_gre_msg *nim)
176{
177 nss_gre_msg_callback_t callback = (nss_gre_msg_callback_t)nss_gre_pvt.cb;
178 void *data = nss_gre_pvt.app_data;
179
180 nss_gre_pvt.cb = NULL;
181 nss_gre_pvt.app_data = NULL;
182
183 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
184 nss_warning("gre Error response %d\n", nim->cm.response);
185 nss_gre_pvt.response = NSS_TX_FAILURE;
186 } else {
187 nss_gre_pvt.response = NSS_TX_SUCCESS;
188 }
189
190 if (callback) {
191 callback(data, nim);
192 }
193
194 complete(&nss_gre_pvt.complete);
195}
196
197/*
ratheesh kannoth7746db22017-05-16 14:16:43 +0530198 * nss_gre_session_debug_stats_get()
199 * Get GRE session debug statistics.
200 */
201void nss_gre_session_debug_stats_get(void *stats_mem, int size)
202{
203 struct nss_stats_gre_session_debug *stats = (struct nss_stats_gre_session_debug *)stats_mem;
204 int i;
205
206 if (!stats || (size < (sizeof(struct nss_stats_gre_session_debug) * NSS_STATS_GRE_SESSION_DEBUG_MAX))) {
207 nss_warning("No memory to copy gre stats");
208 return;
209 }
210
211 spin_lock_bh(&nss_gre_stats_lock);
212 for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) {
213 if (session_debug_stats[i].valid) {
214 memcpy(stats, &session_debug_stats[i], sizeof(struct nss_stats_gre_session_debug));
215 stats++;
216 }
217 }
218 spin_unlock_bh(&nss_gre_stats_lock);
219}
220
221/*
222 * nss_gre_base_debug_stats_get()
223 * Get GRE debug base statistics.
224 */
225void nss_gre_base_debug_stats_get(void *stats_mem, int size)
226{
227 struct nss_stats_gre_base_debug *stats = (struct nss_stats_gre_base_debug *)stats_mem;
228
229 if (!stats) {
230 nss_warning("No memory to copy GRE base stats\n");
231 return;
232 }
233
234 if (size < sizeof(struct nss_stats_gre_base_debug)) {
235 nss_warning("Not enough memory to copy GRE base stats\n");
236 return;
237 }
238
239 spin_lock_bh(&nss_gre_stats_lock);
240 memcpy(stats, &base_debug_stats, sizeof(struct nss_stats_gre_base_debug));
241 spin_unlock_bh(&nss_gre_stats_lock);
242}
243
244/*
245 * nss_gre_register_pkt_callback()
246 * Register for data callback.
247 */
248void nss_gre_register_pkt_callback(nss_gre_pkt_callback_t cb)
249{
250 atomic64_set(&pkt_cb_addr, (unsigned long)cb);
251}
252EXPORT_SYMBOL(nss_gre_register_pkt_callback);
253
254/*
255 * nss_gre_unregister_pkt_callback()
256 * Unregister for data callback.
257 */
258void nss_gre_unregister_pkt_callback()
259{
260 atomic64_set(&pkt_cb_addr, 0);
261}
262EXPORT_SYMBOL(nss_gre_unregister_pkt_callback);
263
264/*
ratheesh kannotheb2a0a82017-05-04 09:20:17 +0530265 * nss_gre_tx_msg()
266 * Transmit a GRE message to NSS firmware
267 */
268nss_tx_status_t nss_gre_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_msg *msg)
269{
270 struct nss_gre_msg *nm;
271 struct nss_cmn_msg *ncm = &msg->cm;
272 struct sk_buff *nbuf;
273 int32_t status;
274
275 NSS_VERIFY_CTX_MAGIC(nss_ctx);
276 if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
277 nss_warning("%p: gre msg dropped as core not ready", nss_ctx);
278 return NSS_TX_FAILURE_NOT_READY;
279 }
280
281 /*
282 * Sanity check the message
283 */
284 if (!nss_is_dynamic_interface(ncm->interface)) {
285 nss_warning("%p: tx request for non dynamic interface: %d", nss_ctx, ncm->interface);
286 return NSS_TX_FAILURE;
287 }
288
289 if (ncm->type > NSS_GRE_MSG_MAX) {
290 nss_warning("%p: message type out of range: %d", nss_ctx, ncm->type);
291 return NSS_TX_FAILURE;
292 }
293
294 if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_msg)) {
295 nss_warning("%p: message length is invalid: %d", nss_ctx, nss_cmn_get_msg_len(ncm));
296 return NSS_TX_FAILURE;
297 }
298
299 nbuf = dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE);
300 if (unlikely(!nbuf)) {
301 NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_NBUF_ALLOC_FAILS]);
302 nss_warning("%p: msg dropped as command allocation failed", nss_ctx);
303 return NSS_TX_FAILURE;
304 }
305
306 /*
307 * Copy the message to our skb
308 */
309 nm = (struct nss_gre_msg *)skb_put(nbuf, sizeof(struct nss_gre_msg));
310 memcpy(nm, msg, sizeof(struct nss_gre_msg));
311
312 status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
313 if (status != NSS_CORE_STATUS_SUCCESS) {
314 dev_kfree_skb_any(nbuf);
315 nss_warning("%p: Unable to enqueue 'gre message'\n", nss_ctx);
316 if (status == NSS_CORE_STATUS_FAILURE_QUEUE) {
317 return NSS_TX_FAILURE_QUEUE;
318 }
319 return NSS_TX_FAILURE;
320 }
321
322 nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE);
323
324 NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_CMD_REQ]);
325 return NSS_TX_SUCCESS;
326}
327EXPORT_SYMBOL(nss_gre_tx_msg);
328
329/*
330 * nss_gre_tx_msg_sync()
331 * Transmit a GRE message to NSS firmware synchronously.
332 */
333nss_tx_status_t nss_gre_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_msg *msg)
334{
335 nss_tx_status_t status;
336 int ret = 0;
337
338 down(&nss_gre_pvt.sem);
339 nss_gre_pvt.cb = (void *)msg->cm.cb;
340 nss_gre_pvt.app_data = (void *)msg->cm.app_data;
341
342 msg->cm.cb = (nss_ptr_t)nss_gre_callback;
343 msg->cm.app_data = (nss_ptr_t)NULL;
344
345 status = nss_gre_tx_msg(nss_ctx, msg);
346 if (status != NSS_TX_SUCCESS) {
347 nss_warning("%p: gre_tx_msg failed\n", nss_ctx);
348 up(&nss_gre_pvt.sem);
349 return status;
350 }
351 ret = wait_for_completion_timeout(&nss_gre_pvt.complete, msecs_to_jiffies(NSS_GRE_TX_TIMEOUT));
352
353 if (!ret) {
354 nss_warning("%p: GRE STD tx sync failed due to timeout\n", nss_ctx);
355 nss_gre_pvt.response = NSS_TX_FAILURE;
356 }
357
358 status = nss_gre_pvt.response;
359 up(&nss_gre_pvt.sem);
360 return status;
361}
362EXPORT_SYMBOL(nss_gre_tx_msg_sync);
363
364/*
365 * nss_gre_tx_buf()
366 * Send packet to GRE interface owned by NSS
367 */
368nss_tx_status_t nss_gre_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb)
369{
370 int32_t status;
371
372 NSS_VERIFY_CTX_MAGIC(nss_ctx);
373 if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
374 nss_warning("%p: GRE std packet dropped as core not ready", nss_ctx);
375 return NSS_TX_FAILURE_NOT_READY;
376 }
377
378 status = nss_core_send_buffer(nss_ctx, if_num, skb, NSS_IF_DATA_QUEUE_0, H2N_BUFFER_PACKET, H2N_BIT_FLAG_VIRTUAL_BUFFER);
379 if (unlikely(status != NSS_CORE_STATUS_SUCCESS)) {
380 nss_warning("%p: Unable to enqueue GRE std packet\n", nss_ctx);
381 return NSS_TX_FAILURE_QUEUE;
382 }
383
384 /*
385 * Kick the NSS awake so it can process our new entry.
386 */
387 nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE);
388
389 NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_PACKET]);
390 return NSS_TX_SUCCESS;
391
392}
393EXPORT_SYMBOL(nss_gre_tx_buf);
394
395/*
396 ***********************************
397 * Register/Unregister/Miscellaneous APIs
398 ***********************************
399 */
400
401/*
402 * nss_gre_register_if()
403 * Register data and message handlers for GRE.
404 */
ratheesh kannoth7746db22017-05-16 14:16:43 +0530405struct nss_ctx_instance *nss_gre_register_if(uint32_t if_num, nss_gre_data_callback_t data_callback,
ratheesh kannotheb2a0a82017-05-04 09:20:17 +0530406 nss_gre_msg_callback_t event_callback, struct net_device *netdev, uint32_t features)
407{
408 struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_handler_id];
409 int i = 0;
410
411 nss_assert(nss_ctx);
412 nss_assert(nss_is_dynamic_interface(if_num));
413
414 nss_ctx->subsys_dp_register[if_num].ndev = netdev;
ratheesh kannoth7746db22017-05-16 14:16:43 +0530415 nss_ctx->subsys_dp_register[if_num].cb = nss_gre_rx_handler;
ratheesh kannotheb2a0a82017-05-04 09:20:17 +0530416 nss_ctx->subsys_dp_register[if_num].app_data = netdev;
417 nss_ctx->subsys_dp_register[if_num].features = features;
418
419 nss_top_main.gre_msg_callback = event_callback;
ratheesh kannoth7746db22017-05-16 14:16:43 +0530420 nss_top_main.gre_data_callback = data_callback;
ratheesh kannotheb2a0a82017-05-04 09:20:17 +0530421
422 nss_core_register_handler(if_num, nss_gre_msg_handler, NULL);
423
424 spin_lock_bh(&nss_gre_stats_lock);
425 for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) {
426 if (!session_debug_stats[i].valid) {
427 session_debug_stats[i].valid = true;
428 session_debug_stats[i].if_num = if_num;
429 session_debug_stats[i].if_index = netdev->ifindex;
430 break;
431 }
432 }
433 spin_unlock_bh(&nss_gre_stats_lock);
434
435 return nss_ctx;
436}
437EXPORT_SYMBOL(nss_gre_register_if);
438
439/*
440 * nss_gre_unregister_if()
441 * Unregister data and message handler.
442 */
443void nss_gre_unregister_if(uint32_t if_num)
444{
445 struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_handler_id];
446 int i;
447
448 nss_assert(nss_ctx);
449 nss_assert(nss_is_dynamic_interface(if_num));
450
451 nss_ctx->subsys_dp_register[if_num].ndev = NULL;
452 nss_ctx->subsys_dp_register[if_num].cb = NULL;
453 nss_ctx->subsys_dp_register[if_num].app_data = NULL;
454 nss_ctx->subsys_dp_register[if_num].features = 0;
455
456 nss_top_main.gre_msg_callback = NULL;
457
458 nss_core_unregister_handler(if_num);
459
460 spin_lock_bh(&nss_gre_stats_lock);
461 for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) {
462 if (session_debug_stats[i].if_num == if_num) {
463 memset(&session_debug_stats[i], 0, sizeof(struct nss_stats_gre_session_debug));
464 break;
465 }
466 }
467 spin_unlock_bh(&nss_gre_stats_lock);
468}
469EXPORT_SYMBOL(nss_gre_unregister_if);
470
471/*
472 * nss_get_gre_context()
473 */
474struct nss_ctx_instance *nss_gre_get_context()
475{
476 return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_handler_id];
477}
478EXPORT_SYMBOL(nss_gre_get_context);
479
480/*
481 * nss_gre_msg_init()
482 * Initialize nss_gre msg.
483 */
484void nss_gre_msg_init(struct nss_gre_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data)
485{
486 nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
487}
488EXPORT_SYMBOL(nss_gre_msg_init);
489
490/*
491 * nss_gre_register_handler()
492 * debugfs stats msg handler received on static gre interface
493 */
494void nss_gre_register_handler(void)
495{
496 nss_info("nss_gre_register_handler");
497 sema_init(&nss_gre_pvt.sem, 1);
498 init_completion(&nss_gre_pvt.complete);
499 nss_core_register_handler(NSS_GRE_INTERFACE, nss_gre_msg_handler, NULL);
500}