blob: eea6d0ef108f5b63de0d5db823cf814157b02103 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05306 * Copyright (c) 2021,2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
28
29#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053030#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053031#include "sfe.h"
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053032#include "sfe_pppoe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053033
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053034extern int max_ipv4_conn;
35extern int max_ipv6_conn;
36
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053037#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053038#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
39#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
40#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
41
42typedef enum sfe_exception {
43 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
44 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
45 SFE_EXCEPTION_CONNECTION_INVALID,
46 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
47 SFE_EXCEPTION_TCP_INVALID,
48 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
49 SFE_EXCEPTION_SRC_DEV_NOT_L3,
50 SFE_EXCEPTION_DEST_DEV_NOT_L3,
51 SFE_EXCEPTION_CREATE_FAILED,
52 SFE_EXCEPTION_ENQUEUE_FAILED,
53 SFE_EXCEPTION_NOT_SUPPORT_6RD,
54 SFE_EXCEPTION_NO_SYNC_CB,
55 SFE_EXCEPTION_MAX
56} sfe_exception_t;
57
58static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
59 "IPV4_MSG_UNKNOW",
60 "IPV6_MSG_UNKNOW",
61 "CONNECTION_INVALID",
62 "NOT_SUPPORT_BRIDGE",
63 "TCP_INVALID",
64 "PROTOCOL_NOT_SUPPORT",
65 "SRC_DEV_NOT_L3",
66 "DEST_DEV_NOT_L3",
67 "CREATE_FAILED",
68 "ENQUEUE_FAILED",
69 "NOT_SUPPORT_6RD",
70 "NO_SYNC_CB"
71};
72
73/*
74 * Message type of queued response message
75 */
76typedef enum {
77 SFE_MSG_TYPE_IPV4,
78 SFE_MSG_TYPE_IPV6
79} sfe_msg_types_t;
80
81/*
82 * Queued response message,
83 * will be sent back to caller in workqueue
84 */
85struct sfe_response_msg {
86 struct list_head node;
87 sfe_msg_types_t type;
88 void *msg[0];
89};
90
91/*
92 * SFE context instance, private for SFE
93 */
94struct sfe_ctx_instance_internal {
95 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
96
97 /*
98 * Control state.
99 */
100 struct kobject *sys_sfe; /* Sysfs linkage */
101
102 struct list_head msg_queue; /* Response message queue*/
103 spinlock_t lock; /* Lock to protect message queue */
104
105 struct work_struct work; /* Work to send response message back to caller*/
106
107 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
108 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
109
110 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
111 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
112
113 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
114};
115
116static struct sfe_ctx_instance_internal __sfe_ctx;
117
118/*
119 * Convert public SFE context to internal context
120 */
121#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
122/*
123 * Convert internal SFE context to public context
124 */
125#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
126
127/*
128 * sfe_incr_exceptions()
129 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530130 *
131 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530132 */
133static inline void sfe_incr_exceptions(sfe_exception_t except)
134{
135 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
136
137 spin_lock_bh(&sfe_ctx->lock);
138 sfe_ctx->exceptions[except]++;
139 spin_unlock_bh(&sfe_ctx->lock);
140}
141
142/*
143 * sfe_dev_is_layer_3_interface()
144 * Check if a network device is ipv4 or ipv6 layer 3 interface
145 *
146 * @param dev network device to check
147 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
148 */
149inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
150{
151 struct in_device *in4_dev;
152 struct inet6_dev *in6_dev;
153
154 BUG_ON(!dev);
155
156 if (likely(check_v4)) {
157 /*
158 * Does our input device support IPv4 processing?
159 */
160 in4_dev = (struct in_device *)dev->ip_ptr;
161 if (unlikely(!in4_dev)) {
162 return false;
163 }
164
165 /*
166 * Does it have an IPv4 address? If it doesn't then we can't do anything
167 * interesting here!
168 */
169 if (unlikely(!in4_dev->ifa_list)) {
170 return false;
171 }
172
173 return true;
174 }
175
176 /*
177 * Does our input device support IPv6 processing?
178 */
179 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
180 if (unlikely(!in6_dev)) {
181 return false;
182 }
183
184 /*
185 * Does it have an IPv6 address? If it doesn't then we can't do anything
186 * interesting here!
187 */
188 if (unlikely(list_empty(&in6_dev->addr_list))) {
189 return false;
190 }
191
192 return true;
193}
194
195/*
196 * sfe_clean_response_msg_by_type()
197 * clean response message in queue when ECM exit
198 *
199 * @param sfe_ctx SFE context
200 * @param msg_type message type, ipv4 or ipv6
201 */
202static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
203{
204 struct sfe_response_msg *response, *tmp;
205
206 if (!sfe_ctx) {
207 return;
208 }
209
210 spin_lock_bh(&sfe_ctx->lock);
211 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
212 if (response->type == msg_type) {
213 list_del(&response->node);
214 /*
215 * Free response message
216 */
217 kfree(response);
218 }
219 }
220 spin_unlock_bh(&sfe_ctx->lock);
221
222}
223
224/*
225 * sfe_process_response_msg()
226 * Send all pending response message to ECM by calling callback function included in message
227 *
228 * @param work work structure
229 */
230static void sfe_process_response_msg(struct work_struct *work)
231{
232 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
233 struct sfe_response_msg *response;
234
235 spin_lock_bh(&sfe_ctx->lock);
236 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
237 list_del(&response->node);
238 spin_unlock_bh(&sfe_ctx->lock);
239 rcu_read_lock();
240
241 /*
242 * Send response message back to caller
243 */
244 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
245 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
246 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
247 if (callback) {
248 callback((void *)msg->cm.app_data, msg);
249 }
250 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
251 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
252 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
253 if (callback) {
254 callback((void *)msg->cm.app_data, msg);
255 }
256 }
257
258 rcu_read_unlock();
259 /*
260 * Free response message
261 */
262 kfree(response);
263 spin_lock_bh(&sfe_ctx->lock);
264 }
265 spin_unlock_bh(&sfe_ctx->lock);
266}
267
268/*
269 * sfe_alloc_response_msg()
270 * Alloc and construct new response message
271 *
272 * @param type message type
273 * @param msg used to construct response message if not NULL
274 *
275 * @return !NULL, success; NULL, failed
276 */
277static struct sfe_response_msg *
278sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
279{
280 struct sfe_response_msg *response;
281 int size;
282
283 switch (type) {
284 case SFE_MSG_TYPE_IPV4:
285 size = sizeof(struct sfe_ipv4_msg);
286 break;
287 case SFE_MSG_TYPE_IPV6:
288 size = sizeof(struct sfe_ipv6_msg);
289 break;
290 default:
291 DEBUG_ERROR("message type %d not supported\n", type);
292 return NULL;
293 }
294
295 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
296 if (!response) {
297 DEBUG_ERROR("allocate memory failed\n");
298 return NULL;
299 }
300
301 response->type = type;
302
303 if (msg) {
304 memcpy(response->msg, msg, size);
305 }
306
307 return response;
308}
309
310/*
311 * sfe_enqueue_msg()
312 * Queue response message
313 *
314 * @param sfe_ctx SFE context
315 * @param response response message to be queue
316 */
317static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
318{
319 spin_lock_bh(&sfe_ctx->lock);
320 list_add_tail(&response->node, &sfe_ctx->msg_queue);
321 spin_unlock_bh(&sfe_ctx->lock);
322
323 schedule_work(&sfe_ctx->work);
324}
325
326/*
327 * sfe_cmn_msg_init()
328 * Initialize the common message structure.
329 *
330 * @param ncm message to init
331 * @param if_num interface number related with this message
332 * @param type message type
333 * @param cb callback function to process repsonse of this message
334 * @param app_data argument for above callback function
335 */
336static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
337{
338 ncm->interface = if_num;
339 ncm->version = SFE_MESSAGE_VERSION;
340 ncm->type = type;
341 ncm->len = len;
342 ncm->cb = (sfe_ptr_t)cb;
343 ncm->app_data = (sfe_ptr_t)app_data;
344}
345
346/*
347 * sfe_ipv4_stats_sync_callback()
348 * Synchronize a connection's state.
349 *
350 * @param sis SFE statistics from SFE core engine
351 */
352static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
353{
354 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
355 struct sfe_ipv4_msg msg;
356 struct sfe_ipv4_conn_sync *sync_msg;
357 sfe_ipv4_msg_callback_t sync_cb;
358
359 rcu_read_lock();
360 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
361 if (!sync_cb) {
362 rcu_read_unlock();
363 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
364 return;
365 }
366
367 sync_msg = &msg.msg.conn_stats;
368
369 memset(&msg, 0, sizeof(msg));
370 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
371 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
372
373 /*
374 * Fill connection specific information
375 */
376 sync_msg->protocol = (u8)sis->protocol;
377 sync_msg->flow_ip = sis->src_ip.ip;
378 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
379 sync_msg->flow_ident = sis->src_port;
380 sync_msg->flow_ident_xlate = sis->src_port_xlate;
381
382 sync_msg->return_ip = sis->dest_ip.ip;
383 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
384 sync_msg->return_ident = sis->dest_port;
385 sync_msg->return_ident_xlate = sis->dest_port_xlate;
386
387 /*
388 * Fill TCP protocol specific information
389 */
390 if (sis->protocol == IPPROTO_TCP) {
391 sync_msg->flow_max_window = sis->src_td_max_window;
392 sync_msg->flow_end = sis->src_td_end;
393 sync_msg->flow_max_end = sis->src_td_max_end;
394
395 sync_msg->return_max_window = sis->dest_td_max_window;
396 sync_msg->return_end = sis->dest_td_end;
397 sync_msg->return_max_end = sis->dest_td_max_end;
398 }
399
400 /*
401 * Fill statistics information
402 */
403 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
404 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
405 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
406 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
407
408 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
409 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
410 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
411 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
412
413 /*
414 * Fill expiration time to extend, in unit of msec
415 */
416 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
417
418 /*
419 * Fill other information
420 */
421 switch (sis->reason) {
422 case SFE_SYNC_REASON_DESTROY:
423 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
424 break;
425 case SFE_SYNC_REASON_FLUSH:
426 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
427 break;
428 default:
429 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
430 break;
431 }
432
433 /*
434 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
435 */
436 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
437 rcu_read_unlock();
438}
439
440/*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530441 * sfe_recv_parse_l2()
442 * Parse L2 headers
443 *
444 * Returns true if the packet is parsed and false otherwise.
445 */
446static bool sfe_recv_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
447{
448 /*
449 * l2_hdr_offset will not change as we parse more L2.5 headers
450 * TODO: Move from storing offsets to storing pointers
451 */
452 sfe_l2_hdr_offset_set(l2_info, ((skb->data - ETH_HLEN) - skb->head));
453
454 /*
455 * TODO: Add VLAN parsing here.
456 * Add VLAN fields to l2_info structure and update l2_hdr_size
457 * In case of exception, use l2_hdr_size to move the data pointer back
458 */
459
460 /*
461 * PPPoE parsing
462 */
463 if (unlikely(htons(ETH_P_PPP_SES) != skb->protocol)) {
464 return false;
465 }
466
467 /*
468 * Parse only PPPoE session packets
469 * skb->data is pointing to PPPoE hdr
470 */
471 if (!sfe_pppoe_validate_hdr(skb, l2_info)) {
472
473 /*
474 * For exception from PPPoE return from here without modifying the skb->data
475 * This includes non-IPv4/v6 cases also
476 */
477 return false;
478 }
479
480 sfe_l2_parse_flag_set(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS);
481 sfe_l2_pppoe_hdr_offset_set(l2_info, (skb->data - skb->head));
482 sfe_l2_hdr_size_set(l2_info, SFE_PPPOE_HEADER_SIZE);
483
484 /*
485 * Pull by L2 header size considering all L2.5 headers
486 */
487 __skb_pull(skb, sfe_l2_hdr_size_get(l2_info));
488 return true;
489}
490
491/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530492 * sfe_create_ipv4_rule_msg()
493 * Convert create message format from ecm to sfe
494 *
495 * @param sfe_ctx SFE context
496 * @param msg The IPv4 message
497 *
498 * @return sfe_tx_status_t The status of the Tx operation
499 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530500sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530501{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530502 struct net_device *src_dev = NULL;
503 struct net_device *dest_dev = NULL;
504 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530505 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530506
507 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
508 if (!response) {
509 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
510 return SFE_TX_FAILURE_QUEUE;
511 }
512
513 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
514 ret = SFE_CMN_RESPONSE_EMSG;
515 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
516 goto failed_ret;
517 }
518
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530519 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530520 case IPPROTO_TCP:
521 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
522 ret = SFE_CMN_RESPONSE_EMSG;
523 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
524 goto failed_ret;
525 }
526
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530527 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530528 break;
529
530 default:
531 ret = SFE_CMN_RESPONSE_EMSG;
532 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
533 goto failed_ret;
534 }
535
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530536 /*
537 * Not supporting bridged flows now
538 */
539 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
540 ret = SFE_CMN_RESPONSE_EINTERFACE;
541 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
542 goto failed_ret;
543 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530544
545 /*
546 * Does our input device support IP processing?
547 */
548 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
549 if (!src_dev || !sfe_dev_is_layer_3_interface(src_dev, true)) {
550 ret = SFE_CMN_RESPONSE_EINTERFACE;
551 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
552 goto failed_ret;
553 }
554
555 /*
556 * Does our output device support IP processing?
557 */
558 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
559 if (!dest_dev || !sfe_dev_is_layer_3_interface(dest_dev, true)) {
560 ret = SFE_CMN_RESPONSE_EINTERFACE;
561 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
562 goto failed_ret;
563 }
564
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530565 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
566 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530567 ret = SFE_CMN_RESPONSE_ACK;
568 } else {
569 /* Failed */
570 ret = SFE_CMN_RESPONSE_EMSG;
571 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
572 }
573
574 /*
575 * Fall through
576 */
577failed_ret:
578 if (src_dev) {
579 dev_put(src_dev);
580 }
581
582 if (dest_dev) {
583 dev_put(dest_dev);
584 }
585
586 /*
587 * Try to queue response message
588 */
589 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
590 sfe_enqueue_msg(sfe_ctx, response);
591
592 return SFE_TX_SUCCESS;
593}
594
595/*
596 * sfe_destroy_ipv4_rule_msg()
597 * Convert destroy message format from ecm to sfe
598 *
599 * @param sfe_ctx SFE context
600 * @param msg The IPv4 message
601 *
602 * @return sfe_tx_status_t The status of the Tx operation
603 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530604sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530605{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530606 struct sfe_response_msg *response;
607
608 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
609 if (!response) {
610 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
611 return SFE_TX_FAILURE_QUEUE;
612 }
613
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530614 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530615
616 /*
617 * Try to queue response message
618 */
619 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
620 sfe_enqueue_msg(sfe_ctx, response);
621
622 return SFE_TX_SUCCESS;
623}
624
625/*
626 * sfe_ipv4_tx()
627 * Transmit an IPv4 message to the sfe
628 *
629 * @param sfe_ctx SFE context
630 * @param msg The IPv4 message
631 *
632 * @return sfe_tx_status_t The status of the Tx operation
633 */
634sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
635{
636 switch (msg->cm.type) {
637 case SFE_TX_CREATE_RULE_MSG:
638 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
639 case SFE_TX_DESTROY_RULE_MSG:
640 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
641 default:
642 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
643 return SFE_TX_FAILURE_NOT_ENABLED;
644 }
645}
646EXPORT_SYMBOL(sfe_ipv4_tx);
647
648/*
649 * sfe_ipv4_msg_init()
650 * Initialize IPv4 message.
651 */
652void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
653 sfe_ipv4_msg_callback_t cb, void *app_data)
654{
655 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
656}
657EXPORT_SYMBOL(sfe_ipv4_msg_init);
658
659/*
660 * sfe_ipv4_max_conn_count()
661 * Return maximum number of entries SFE supported
662 */
663int sfe_ipv4_max_conn_count(void)
664{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530665 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530666}
667EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
668
669/*
670 * sfe_ipv4_notify_register()
671 * Register a notifier callback for IPv4 messages from SFE
672 *
673 * @param cb The callback pointer
674 * @param app_data The application context for this message
675 *
676 * @return struct sfe_ctx_instance * The SFE context
677 */
678struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
679{
680 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
681
682 spin_lock_bh(&sfe_ctx->lock);
683 /*
684 * Hook the shortcut sync callback.
685 */
686 if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
687 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
688 }
689
690 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
691 sfe_ctx->ipv4_stats_sync_data = app_data;
692
693 spin_unlock_bh(&sfe_ctx->lock);
694
695 return SFE_CTX_TO_PUBLIC(sfe_ctx);
696}
697EXPORT_SYMBOL(sfe_ipv4_notify_register);
698
699/*
700 * sfe_ipv4_notify_unregister()
701 * Un-Register a notifier callback for IPv4 messages from SFE
702 */
703void sfe_ipv4_notify_unregister(void)
704{
705 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
706
707 spin_lock_bh(&sfe_ctx->lock);
708 /*
709 * Unregister our sync callback.
710 */
711 if (sfe_ctx->ipv4_stats_sync_cb) {
712 sfe_ipv4_register_sync_rule_callback(NULL);
713 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
714 sfe_ctx->ipv4_stats_sync_data = NULL;
715 }
716 spin_unlock_bh(&sfe_ctx->lock);
717
718 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
719
720 return;
721}
722EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
723
724/*
725 * sfe_ipv6_stats_sync_callback()
726 * Synchronize a connection's state.
727 */
728static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
729{
730 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
731 struct sfe_ipv6_msg msg;
732 struct sfe_ipv6_conn_sync *sync_msg;
733 sfe_ipv6_msg_callback_t sync_cb;
734
735 rcu_read_lock();
736 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
737 if (!sync_cb) {
738 rcu_read_unlock();
739 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
740 return;
741 }
742
743 sync_msg = &msg.msg.conn_stats;
744
745 memset(&msg, 0, sizeof(msg));
746 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
747 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
748
749 /*
750 * Fill connection specific information
751 */
752 sync_msg->protocol = (u8)sis->protocol;
753 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
754 sync_msg->flow_ident = sis->src_port;
755
756 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
757 sync_msg->return_ident = sis->dest_port;
758
759 /*
760 * Fill TCP protocol specific information
761 */
762 if (sis->protocol == IPPROTO_TCP) {
763 sync_msg->flow_max_window = sis->src_td_max_window;
764 sync_msg->flow_end = sis->src_td_end;
765 sync_msg->flow_max_end = sis->src_td_max_end;
766
767 sync_msg->return_max_window = sis->dest_td_max_window;
768 sync_msg->return_end = sis->dest_td_end;
769 sync_msg->return_max_end = sis->dest_td_max_end;
770 }
771
772 /*
773 * Fill statistics information
774 */
775 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
776 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
777 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
778 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
779
780 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
781 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
782 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
783 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
784
785 /*
786 * Fill expiration time to extend, in unit of msec
787 */
788 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
789
790 /*
791 * Fill other information
792 */
793 switch (sis->reason) {
794 case SFE_SYNC_REASON_DESTROY:
795 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
796 break;
797 case SFE_SYNC_REASON_FLUSH:
798 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
799 break;
800 default:
801 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
802 break;
803 }
804
805 /*
806 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
807 */
808 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
809 rcu_read_unlock();
810}
811
812/*
813 * sfe_create_ipv6_rule_msg()
814 * convert create message format from ecm to sfe
815 *
816 * @param sfe_ctx SFE context
817 * @param msg The IPv6 message
818 *
819 * @return sfe_tx_status_t The status of the Tx operation
820 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530821sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530822{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530823 struct net_device *src_dev = NULL;
824 struct net_device *dest_dev = NULL;
825 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530826 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530827
828 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
829 if (!response) {
830 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
831 return SFE_TX_FAILURE_QUEUE;
832 }
833
834 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
835 ret = SFE_CMN_RESPONSE_EMSG;
836 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
837 goto failed_ret;
838 }
839
840 /*
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530841 * Not supporting bridged flows now
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530842 */
843 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
844 ret = SFE_CMN_RESPONSE_EINTERFACE;
845 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
846 goto failed_ret;
847 }
848
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530849 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530850
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530851 case IPPROTO_TCP:
852 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
853 ret = SFE_CMN_RESPONSE_EMSG;
854 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
855 goto failed_ret;
856 }
857
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530858 break;
859
860 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530861 break;
862
863 default:
864 ret = SFE_CMN_RESPONSE_EMSG;
865 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
866 goto failed_ret;
867 }
868
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530869 /*
870 * Does our input device support IP processing?
871 */
872 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
873 if (!src_dev || !sfe_dev_is_layer_3_interface(src_dev, false)) {
874 ret = SFE_CMN_RESPONSE_EINTERFACE;
875 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
876 goto failed_ret;
877 }
878
879 /*
880 * Does our output device support IP processing?
881 */
882 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
883 if (!dest_dev || !sfe_dev_is_layer_3_interface(dest_dev, false)) {
884 ret = SFE_CMN_RESPONSE_EINTERFACE;
885 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
886 goto failed_ret;
887 }
888
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530889 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
890 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530891 ret = SFE_CMN_RESPONSE_ACK;
892 } else {
893 /* Failed */
894 ret = SFE_CMN_RESPONSE_EMSG;
895 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
896 }
897
898 /*
899 * Fall through
900 */
901failed_ret:
902 if (src_dev) {
903 dev_put(src_dev);
904 }
905
906 if (dest_dev) {
907 dev_put(dest_dev);
908 }
909
910 /*
911 * Try to queue response message
912 */
913 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
914 sfe_enqueue_msg(sfe_ctx, response);
915
916 return SFE_TX_SUCCESS;
917}
918
919/*
920 * sfe_destroy_ipv6_rule_msg()
921 * Convert destroy message format from ecm to sfe
922 *
923 * @param sfe_ctx SFE context
924 * @param msg The IPv6 message
925 *
926 * @return sfe_tx_status_t The status of the Tx operation
927 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530928sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530929{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530930 struct sfe_response_msg *response;
931
932 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
933 if (!response) {
934 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
935 return SFE_TX_FAILURE_QUEUE;
936 }
937
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530938 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530939
940 /*
941 * Try to queue response message
942 */
943 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
944 sfe_enqueue_msg(sfe_ctx, response);
945
946 return SFE_TX_SUCCESS;
947}
948
949/*
950 * sfe_ipv6_tx()
951 * Transmit an IPv6 message to the sfe
952 *
953 * @param sfe_ctx SFE context
954 * @param msg The IPv6 message
955 *
956 * @return sfe_tx_status_t The status of the Tx operation
957 */
958sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
959{
960 switch (msg->cm.type) {
961 case SFE_TX_CREATE_RULE_MSG:
962 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
963 case SFE_TX_DESTROY_RULE_MSG:
964 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
965 default:
966 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
967 return SFE_TX_FAILURE_NOT_ENABLED;
968 }
969}
970EXPORT_SYMBOL(sfe_ipv6_tx);
971
972/*
973 * sfe_ipv6_msg_init()
974 * Initialize IPv6 message.
975 */
976void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
977 sfe_ipv6_msg_callback_t cb, void *app_data)
978{
979 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
980}
981EXPORT_SYMBOL(sfe_ipv6_msg_init);
982
983/*
984 * sfe_ipv6_max_conn_count()
985 * Return maximum number of entries SFE supported
986 */
987int sfe_ipv6_max_conn_count(void)
988{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530989 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530990}
991EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
992
993/*
994 * sfe_ipv6_notify_register()
995 * Register a notifier callback for IPv6 messages from SFE
996 *
997 * @param cb The callback pointer
998 * @param app_data The application context for this message
999 *
1000 * @return struct sfe_ctx_instance * The SFE context
1001 */
1002struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
1003{
1004 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1005
1006 spin_lock_bh(&sfe_ctx->lock);
1007 /*
1008 * Hook the shortcut sync callback.
1009 */
1010 if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
1011 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1012 }
1013
1014 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
1015 sfe_ctx->ipv6_stats_sync_data = app_data;
1016
1017 spin_unlock_bh(&sfe_ctx->lock);
1018
1019 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1020}
1021EXPORT_SYMBOL(sfe_ipv6_notify_register);
1022
1023/*
1024 * sfe_ipv6_notify_unregister()
1025 * Un-Register a notifier callback for IPv6 messages from SFE
1026 */
1027void sfe_ipv6_notify_unregister(void)
1028{
1029 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1030
1031 spin_lock_bh(&sfe_ctx->lock);
1032 /*
1033 * Unregister our sync callback.
1034 */
1035 if (sfe_ctx->ipv6_stats_sync_cb) {
1036 sfe_ipv6_register_sync_rule_callback(NULL);
1037 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
1038 sfe_ctx->ipv6_stats_sync_data = NULL;
1039 }
1040 spin_unlock_bh(&sfe_ctx->lock);
1041
1042 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
1043
1044 return;
1045}
1046EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1047
1048/*
1049 * sfe_tun6rd_tx()
1050 * Transmit a tun6rd message to sfe engine
1051 */
1052sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1053{
1054 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1055 return SFE_TX_FAILURE_NOT_ENABLED;
1056}
1057EXPORT_SYMBOL(sfe_tun6rd_tx);
1058
1059/*
1060 * sfe_tun6rd_msg_init()
1061 * Initialize sfe_tun6rd msg.
1062 */
1063void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1064{
1065 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1066}
1067EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1068
1069/*
1070 * sfe_recv()
1071 * Handle packet receives.
1072 *
1073 * Returns 1 if the packet is forwarded or 0 if it isn't.
1074 */
1075int sfe_recv(struct sk_buff *skb)
1076{
1077 struct net_device *dev;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301078 struct sfe_l2_info l2_info;
1079 int ret;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301080
1081 /*
1082 * We know that for the vast majority of packets we need the transport
1083 * layer header so we may as well start to fetch it now!
1084 */
1085 prefetch(skb->data + 32);
1086 barrier();
1087
1088 dev = skb->dev;
1089
1090#ifdef CONFIG_NET_CLS_ACT
1091 /*
1092 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1093 * We can not accelerate this packet.
1094 */
1095#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1096 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1097 return 0;
1098 }
1099#else
1100 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1101 return 0;
1102 }
1103#endif
1104#endif
1105
1106 /*
1107 * We're only interested in IPv4 and IPv6 packets.
1108 */
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301109 switch (htons(skb->protocol)) {
1110 case ETH_P_IP:
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301111 return sfe_ipv4_recv(dev, skb, NULL, false);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301112
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301113 case ETH_P_IPV6:
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301114 return sfe_ipv6_recv(dev, skb, NULL, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301115
1116 default:
1117 break;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301118 }
1119
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301120 /*
1121 * Parse the L2 headers to find the L3 protocol and the L2 header offset
1122 */
1123 if (unlikely(!sfe_recv_parse_l2(dev, skb, &l2_info))) {
1124 DEBUG_TRACE("%px: Invalid L2.5 header format\n", skb);
1125 return 0;
1126 }
1127
1128 /*
1129 * Protocol in l2_info is expected to be in network byte order.
1130 * PPPoE is doing it in the sfe_pppoe_validate_hdr()
1131 */
1132 if (likely(l2_info.protocol == ETH_P_IP)) {
Amitesh Anand63be37d2021-12-24 20:51:48 +05301133 ret = sfe_ipv4_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301134 if (unlikely(!ret)) {
1135 goto send_to_linux;
1136 }
1137 return ret;
1138 }
1139
1140 if (likely(l2_info.protocol == ETH_P_IPV6)) {
Suruchi Suman23a279d2021-11-16 15:13:09 +05301141 ret = sfe_ipv6_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301142 if (likely(ret)) {
1143 return ret;
1144 }
1145 }
1146
1147send_to_linux:
1148 /*
1149 * Push the data back before sending to linux if -
1150 * a. There is any exception from IPV4/V6
1151 * b. If the next protocol is neither IPV4 nor IPV6
1152 */
1153 __skb_push(skb, sfe_l2_hdr_size_get(&l2_info));
1154
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301155 return 0;
1156}
1157
1158/*
1159 * sfe_get_exceptions()
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301160 * Dump exception counters
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301161 */
1162static ssize_t sfe_get_exceptions(struct device *dev,
1163 struct device_attribute *attr,
1164 char *buf)
1165{
1166 int idx, len;
1167 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1168
1169 spin_lock_bh(&sfe_ctx->lock);
1170 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1171 if (sfe_ctx->exceptions[idx]) {
1172 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1173 }
1174 }
1175 spin_unlock_bh(&sfe_ctx->lock);
1176
1177 return len;
1178}
1179
1180/*
1181 * sysfs attributes.
1182 */
1183static const struct device_attribute sfe_exceptions_attr =
1184 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1185
1186/*
1187 * sfe_init_if()
1188 */
1189int sfe_init_if(void)
1190{
1191 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1192 int result = -1;
1193
1194 /*
1195 * Create sys/sfe
1196 */
1197 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1198 if (!sfe_ctx->sys_sfe) {
1199 DEBUG_ERROR("failed to register sfe\n");
1200 goto exit1;
1201 }
1202
1203 /*
1204 * Create sys/sfe/exceptions
1205 */
1206 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1207 if (result) {
1208 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1209 goto exit2;
1210 }
1211
1212 spin_lock_init(&sfe_ctx->lock);
1213
1214 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1215 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1216
1217 /*
1218 * Hook the receive path in the network stack.
1219 */
1220 BUG_ON(athrs_fast_nat_recv);
1221 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1222
1223 return 0;
1224exit2:
1225 kobject_put(sfe_ctx->sys_sfe);
1226exit1:
1227 return result;
1228}
1229
1230/*
1231 * sfe_exit_if()
1232 */
1233void sfe_exit_if(void)
1234{
1235 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1236
1237 /*
1238 * Unregister our receive callback.
1239 */
1240 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1241
1242 /*
1243 * Wait for all callbacks to complete.
1244 */
1245 rcu_barrier();
1246
1247 /*
1248 * Destroy all connections.
1249 */
1250 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1251 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1252
1253 /*
1254 * stop work queue, and flush all pending message in queue
1255 */
1256 cancel_work_sync(&sfe_ctx->work);
1257 sfe_process_response_msg(&sfe_ctx->work);
1258
1259 /*
1260 * Unregister our sync callback.
1261 */
1262 sfe_ipv4_notify_unregister();
1263 sfe_ipv6_notify_unregister();
1264
1265 kobject_put(sfe_ctx->sys_sfe);
1266
1267 return;
1268}