blob: 4b48906603b262d12b9d26d5b232e58fcf8c75a8 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
Suruchi Sumane811dad2022-01-19 19:39:50 +053028#include <net/vxlan.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053029#include <net/gre.h>
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053030
31#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053032#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053033#include "sfe.h"
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053034#include "sfe_pppoe.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080035#include "sfe_vlan.h"
Jackson Bockus3fafbf32022-02-13 17:15:26 -080036#include "sfe_ipv4.h"
37#include "sfe_ipv6.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053038
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053039extern int max_ipv4_conn;
40extern int max_ipv6_conn;
41
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053042#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053043#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
44#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
45#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
46
47typedef enum sfe_exception {
48 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
49 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
50 SFE_EXCEPTION_CONNECTION_INVALID,
51 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
52 SFE_EXCEPTION_TCP_INVALID,
53 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
54 SFE_EXCEPTION_SRC_DEV_NOT_L3,
55 SFE_EXCEPTION_DEST_DEV_NOT_L3,
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053056 SFE_EXCEPTION_CFG_ERR,
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053057 SFE_EXCEPTION_CREATE_FAILED,
58 SFE_EXCEPTION_ENQUEUE_FAILED,
59 SFE_EXCEPTION_NOT_SUPPORT_6RD,
60 SFE_EXCEPTION_NO_SYNC_CB,
61 SFE_EXCEPTION_MAX
62} sfe_exception_t;
63
64static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
65 "IPV4_MSG_UNKNOW",
66 "IPV6_MSG_UNKNOW",
67 "CONNECTION_INVALID",
68 "NOT_SUPPORT_BRIDGE",
69 "TCP_INVALID",
70 "PROTOCOL_NOT_SUPPORT",
71 "SRC_DEV_NOT_L3",
72 "DEST_DEV_NOT_L3",
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053073 "CONFIG_ERROR",
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053074 "CREATE_FAILED",
75 "ENQUEUE_FAILED",
76 "NOT_SUPPORT_6RD",
77 "NO_SYNC_CB"
78};
79
80/*
81 * Message type of queued response message
82 */
83typedef enum {
84 SFE_MSG_TYPE_IPV4,
85 SFE_MSG_TYPE_IPV6
86} sfe_msg_types_t;
87
88/*
89 * Queued response message,
90 * will be sent back to caller in workqueue
91 */
92struct sfe_response_msg {
93 struct list_head node;
94 sfe_msg_types_t type;
95 void *msg[0];
96};
97
98/*
99 * SFE context instance, private for SFE
100 */
101struct sfe_ctx_instance_internal {
102 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
103
104 /*
105 * Control state.
106 */
107 struct kobject *sys_sfe; /* Sysfs linkage */
108
109 struct list_head msg_queue; /* Response message queue*/
110 spinlock_t lock; /* Lock to protect message queue */
111
112 struct work_struct work; /* Work to send response message back to caller*/
113
114 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
115 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
116
117 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
118 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
119
120 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530121
122 int32_t l2_feature_support; /* L2 feature support */
123
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530124};
125
126static struct sfe_ctx_instance_internal __sfe_ctx;
127
128/*
129 * Convert public SFE context to internal context
130 */
131#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
132/*
133 * Convert internal SFE context to public context
134 */
135#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
136
137/*
138 * sfe_incr_exceptions()
139 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530140 *
141 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530142 */
143static inline void sfe_incr_exceptions(sfe_exception_t except)
144{
145 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
146
147 spin_lock_bh(&sfe_ctx->lock);
148 sfe_ctx->exceptions[except]++;
149 spin_unlock_bh(&sfe_ctx->lock);
150}
151
152/*
153 * sfe_dev_is_layer_3_interface()
154 * Check if a network device is ipv4 or ipv6 layer 3 interface
155 *
156 * @param dev network device to check
157 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
158 */
159inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
160{
161 struct in_device *in4_dev;
162 struct inet6_dev *in6_dev;
163
164 BUG_ON(!dev);
165
166 if (likely(check_v4)) {
167 /*
168 * Does our input device support IPv4 processing?
169 */
170 in4_dev = (struct in_device *)dev->ip_ptr;
171 if (unlikely(!in4_dev)) {
172 return false;
173 }
174
175 /*
Tian Yang5a70f352022-02-27 22:56:29 -0800176 * Does it have an IPv4 address? If it doesn't then it could be MAP-T
177 * (or dslite) interface, else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530178 */
Tian Yang5a70f352022-02-27 22:56:29 -0800179 if (likely(in4_dev->ifa_list || (dev->priv_flags_ext & IFF_EXT_MAPT)
180 || (dev->rtnl_link_ops
181 && !strcmp(dev->rtnl_link_ops->kind, "ip6tnl")))) {
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800182 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530183 }
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800184 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530185 }
186
187 /*
188 * Does our input device support IPv6 processing?
189 */
190 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
191 if (unlikely(!in6_dev)) {
192 return false;
193 }
194
195 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800196 * Does it have an IPv6 address? If it doesn't then it could be MAP-T interface,
197 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530198 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800199 if (likely(!list_empty(&in6_dev->addr_list) || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
200 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530201 }
202
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800203 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530204}
205
206/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530207 * sfe_routed_dev_allow()
208 * check whether routed acceleration allowed
209 */
210static bool sfe_routed_dev_allow(struct net_device *dev, bool is_routed, bool check_v4)
211{
212 if (!is_routed) {
213 return true;
214 }
215
216 if (sfe_dev_is_layer_3_interface(dev, check_v4)) {
217 return true;
218 }
219
220 /*
221 * in case of GRE / vxlan, these dev does not have IP address
222 * so l3 interface check will fail. allow rule creation between gre / vxlan
223 * and wan dev for routed flow.
224 */
225 if (netif_is_vxlan(dev)) {
226 return true;
227 }
228
229#ifdef SFE_GRE_TUN_ENABLE
230 if (netif_is_gretap(dev) || netif_is_gre(dev)) {
231 return true;
232 }
233
234 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
235 return true;
236 }
237#endif
238
239 return false;
240}
241
242/* sfe_dev_has_hw_csum()
243 * check whether device supports hardware checksum offload
244 */
245bool sfe_dev_has_hw_csum(struct net_device *dev)
246{
247 if (netif_is_vxlan(dev)) {
248 return false;
249 }
250
251#ifdef SFE_GRE_TUN_ENABLE
252 if (netif_is_gre(dev) || netif_is_gretap(dev)) {
253 return false;
254 }
255
256 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
257 return false;
258 }
259#endif
Tian Yangfd7b3142022-02-01 15:34:20 -0800260 /*
261 * Tunnel MAP-E/DS-LITE and Tun6rd share the same Routing netlink operator
262 * whose kind is "ip6tnl". The HW csum for these tunnel devices should be disabled.
263 */
264 if (dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "ip6tnl")) {
265 return false;
266 }
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530267
268 return true;
269}
270
271/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530272 * sfe_clean_response_msg_by_type()
273 * clean response message in queue when ECM exit
274 *
275 * @param sfe_ctx SFE context
276 * @param msg_type message type, ipv4 or ipv6
277 */
278static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
279{
280 struct sfe_response_msg *response, *tmp;
281
282 if (!sfe_ctx) {
283 return;
284 }
285
286 spin_lock_bh(&sfe_ctx->lock);
287 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
288 if (response->type == msg_type) {
289 list_del(&response->node);
290 /*
291 * Free response message
292 */
293 kfree(response);
294 }
295 }
296 spin_unlock_bh(&sfe_ctx->lock);
297
298}
299
300/*
301 * sfe_process_response_msg()
302 * Send all pending response message to ECM by calling callback function included in message
303 *
304 * @param work work structure
305 */
306static void sfe_process_response_msg(struct work_struct *work)
307{
308 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
309 struct sfe_response_msg *response;
310
311 spin_lock_bh(&sfe_ctx->lock);
312 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
313 list_del(&response->node);
314 spin_unlock_bh(&sfe_ctx->lock);
315 rcu_read_lock();
316
317 /*
318 * Send response message back to caller
319 */
320 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
321 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
322 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
323 if (callback) {
324 callback((void *)msg->cm.app_data, msg);
325 }
326 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
327 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
328 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
329 if (callback) {
330 callback((void *)msg->cm.app_data, msg);
331 }
332 }
333
334 rcu_read_unlock();
335 /*
336 * Free response message
337 */
338 kfree(response);
339 spin_lock_bh(&sfe_ctx->lock);
340 }
341 spin_unlock_bh(&sfe_ctx->lock);
342}
343
344/*
345 * sfe_alloc_response_msg()
346 * Alloc and construct new response message
347 *
348 * @param type message type
349 * @param msg used to construct response message if not NULL
350 *
351 * @return !NULL, success; NULL, failed
352 */
353static struct sfe_response_msg *
354sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
355{
356 struct sfe_response_msg *response;
357 int size;
358
359 switch (type) {
360 case SFE_MSG_TYPE_IPV4:
361 size = sizeof(struct sfe_ipv4_msg);
362 break;
363 case SFE_MSG_TYPE_IPV6:
364 size = sizeof(struct sfe_ipv6_msg);
365 break;
366 default:
367 DEBUG_ERROR("message type %d not supported\n", type);
368 return NULL;
369 }
370
371 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
372 if (!response) {
373 DEBUG_ERROR("allocate memory failed\n");
374 return NULL;
375 }
376
377 response->type = type;
378
379 if (msg) {
380 memcpy(response->msg, msg, size);
381 }
382
383 return response;
384}
385
386/*
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800387 * sfe_fast_xmit_check()
388 * Check the fast transmit feasibility.
389 *
390 * This check the per direction's attribute that could not go fast
391 * transmit
392 * xfrm packets, come from a local socket or need sk validation on the skb
393 */
394bool sfe_fast_xmit_check(struct sk_buff *skb, netdev_features_t features)
395{
396
397#ifdef CONFIG_SOCK_VALIDATE_XMIT
398 if (skb->sk && sk_fullsock(skb->sk) && skb->sk->sk_validate_xmit_skb) {
399 DEBUG_INFO("%px:need sk validation\n", skb);
400 return false;
401#ifdef CONFIG_TLS_DEVICE
402 } else if (skb->decrypted) {
403 DEBUG_INFO("%px:SK or decrypted\n", skb);
404 return false;
405#endif
406 }
407#endif
408 if (skb_vlan_tag_present(skb)) {
409 DEBUG_INFO("%px:Vlan is present\n", skb);
410 return false;
411 }
412
413 if (netif_needs_gso(skb, features)) {
414 DEBUG_INFO("%px:Need to be gso\n", skb);
415 return false;
416 }
417
418 if (skb_sec_path(skb)) {
419 DEBUG_INFO("%px:XFRM is present\n", skb);
420 return false;
421 }
422
423 return true;
424}
425
426/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530427 * sfe_enqueue_msg()
428 * Queue response message
429 *
430 * @param sfe_ctx SFE context
431 * @param response response message to be queue
432 */
433static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
434{
435 spin_lock_bh(&sfe_ctx->lock);
436 list_add_tail(&response->node, &sfe_ctx->msg_queue);
437 spin_unlock_bh(&sfe_ctx->lock);
438
439 schedule_work(&sfe_ctx->work);
440}
441
442/*
443 * sfe_cmn_msg_init()
444 * Initialize the common message structure.
445 *
446 * @param ncm message to init
447 * @param if_num interface number related with this message
448 * @param type message type
449 * @param cb callback function to process repsonse of this message
450 * @param app_data argument for above callback function
451 */
452static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
453{
454 ncm->interface = if_num;
455 ncm->version = SFE_MESSAGE_VERSION;
456 ncm->type = type;
457 ncm->len = len;
458 ncm->cb = (sfe_ptr_t)cb;
459 ncm->app_data = (sfe_ptr_t)app_data;
460}
461
462/*
463 * sfe_ipv4_stats_sync_callback()
464 * Synchronize a connection's state.
465 *
466 * @param sis SFE statistics from SFE core engine
467 */
468static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
469{
470 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
471 struct sfe_ipv4_msg msg;
472 struct sfe_ipv4_conn_sync *sync_msg;
473 sfe_ipv4_msg_callback_t sync_cb;
474
475 rcu_read_lock();
476 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
477 if (!sync_cb) {
478 rcu_read_unlock();
479 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
480 return;
481 }
482
483 sync_msg = &msg.msg.conn_stats;
484
485 memset(&msg, 0, sizeof(msg));
486 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
487 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
488
489 /*
490 * Fill connection specific information
491 */
492 sync_msg->protocol = (u8)sis->protocol;
493 sync_msg->flow_ip = sis->src_ip.ip;
494 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
495 sync_msg->flow_ident = sis->src_port;
496 sync_msg->flow_ident_xlate = sis->src_port_xlate;
497
498 sync_msg->return_ip = sis->dest_ip.ip;
499 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
500 sync_msg->return_ident = sis->dest_port;
501 sync_msg->return_ident_xlate = sis->dest_port_xlate;
502
503 /*
504 * Fill TCP protocol specific information
505 */
506 if (sis->protocol == IPPROTO_TCP) {
507 sync_msg->flow_max_window = sis->src_td_max_window;
508 sync_msg->flow_end = sis->src_td_end;
509 sync_msg->flow_max_end = sis->src_td_max_end;
510
511 sync_msg->return_max_window = sis->dest_td_max_window;
512 sync_msg->return_end = sis->dest_td_end;
513 sync_msg->return_max_end = sis->dest_td_max_end;
514 }
515
516 /*
517 * Fill statistics information
518 */
519 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
520 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
521 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
522 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
523
524 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
525 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
526 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
527 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
528
529 /*
530 * Fill expiration time to extend, in unit of msec
531 */
532 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
533
534 /*
535 * Fill other information
536 */
537 switch (sis->reason) {
538 case SFE_SYNC_REASON_DESTROY:
539 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
540 break;
541 case SFE_SYNC_REASON_FLUSH:
542 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
543 break;
544 default:
545 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
546 break;
547 }
548
549 /*
550 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
551 */
552 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
553 rcu_read_unlock();
554}
555
556/*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530557 * sfe_recv_parse_l2()
558 * Parse L2 headers
559 *
560 * Returns true if the packet is parsed and false otherwise.
561 */
562static bool sfe_recv_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
563{
564 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800565 * VLAN parsing
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530566 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800567 if (unlikely(!sfe_vlan_check_and_parse_tag(skb, l2_info))) {
568 return false;
569 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530570
571 /*
Nitin Shetty9af87d42022-02-11 16:25:29 +0530572 * Parse only PPPoE session packets
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530573 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800574 if (htons(ETH_P_PPP_SES) == skb->protocol) {
Wayne Tanbb7f1782021-12-13 11:16:04 -0800575 if (!sfe_pppoe_parse_hdr(skb, l2_info)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530576
Wayne Tanbb7f1782021-12-13 11:16:04 -0800577 /*
578 * For exception from PPPoE return from here without modifying the skb->data
579 * This includes non-IPv4/v6 cases also
580 */
581 return false;
582 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530583 }
Wayne Tanbb7f1782021-12-13 11:16:04 -0800584 return true;
585}
586
587/*
588 * sfe_recv_undo_parse_l2()
589 */
590static void sfe_recv_undo_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
591{
592 /*
593 * PPPoE undo
594 */
Nitin Shetty9af87d42022-02-11 16:25:29 +0530595 sfe_pppoe_undo_parse(skb, l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530596
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530597 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800598 * VLAN undo
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530599 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800600 sfe_vlan_undo_parse(skb, l2_info);
Nitin Shetty9af87d42022-02-11 16:25:29 +0530601
602 /*
603 * packet is not handled by SFE, so reset the network header
604 */
605 skb_reset_network_header(skb);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530606}
607
608/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530609 * sfe_create_ipv4_rule_msg()
610 * Convert create message format from ecm to sfe
611 *
612 * @param sfe_ctx SFE context
613 * @param msg The IPv4 message
614 *
615 * @return sfe_tx_status_t The status of the Tx operation
616 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530617sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530618{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530619 struct net_device *src_dev = NULL;
620 struct net_device *dest_dev = NULL;
621 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530622 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530623 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530624 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530625
626 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
627 if (!response) {
628 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
629 return SFE_TX_FAILURE_QUEUE;
630 }
631
632 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
633 ret = SFE_CMN_RESPONSE_EMSG;
634 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
635 goto failed_ret;
636 }
637
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530638 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530639 case IPPROTO_TCP:
640 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
641 ret = SFE_CMN_RESPONSE_EMSG;
642 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
643 goto failed_ret;
644 }
645
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530646 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530647 break;
648
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530649 case IPPROTO_GRE:
650 break;
651
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530652 default:
653 ret = SFE_CMN_RESPONSE_EMSG;
654 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
655 goto failed_ret;
656 }
657
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530658 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530659 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530660 */
661 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530662 if (!sfe_is_l2_feature_enabled()) {
663 ret = SFE_CMN_RESPONSE_EINTERFACE;
664 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
665 goto failed_ret;
666 }
667
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530668 is_routed = false;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530669 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530670
671 /*
672 * Does our input device support IP processing?
673 */
674 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530675 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530676 ret = SFE_CMN_RESPONSE_EINTERFACE;
677 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
678 goto failed_ret;
679 }
680
681 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530682 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
683 */
684 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
685 if (cfg_err) {
686 ret = SFE_CMN_RESPONSE_EMSG;
687 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
688 goto failed_ret;
689 }
690
691 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530692 * Does our output device support IP processing?
693 */
694 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530695 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530696 ret = SFE_CMN_RESPONSE_EINTERFACE;
697 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
698 goto failed_ret;
699 }
700
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530701 /*
702 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
703 */
704 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
705 if (cfg_err) {
706 ret = SFE_CMN_RESPONSE_EMSG;
707 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
708 goto failed_ret;
709 }
710
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530711 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
712 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530713 ret = SFE_CMN_RESPONSE_ACK;
714 } else {
715 /* Failed */
716 ret = SFE_CMN_RESPONSE_EMSG;
717 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
718 }
719
720 /*
721 * Fall through
722 */
723failed_ret:
724 if (src_dev) {
725 dev_put(src_dev);
726 }
727
728 if (dest_dev) {
729 dev_put(dest_dev);
730 }
731
732 /*
733 * Try to queue response message
734 */
735 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
736 sfe_enqueue_msg(sfe_ctx, response);
737
738 return SFE_TX_SUCCESS;
739}
740
741/*
742 * sfe_destroy_ipv4_rule_msg()
743 * Convert destroy message format from ecm to sfe
744 *
745 * @param sfe_ctx SFE context
746 * @param msg The IPv4 message
747 *
748 * @return sfe_tx_status_t The status of the Tx operation
749 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530750sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530751{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530752 struct sfe_response_msg *response;
753
754 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
755 if (!response) {
756 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
757 return SFE_TX_FAILURE_QUEUE;
758 }
759
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530760 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530761
762 /*
763 * Try to queue response message
764 */
765 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
766 sfe_enqueue_msg(sfe_ctx, response);
767
768 return SFE_TX_SUCCESS;
769}
770
771/*
772 * sfe_ipv4_tx()
773 * Transmit an IPv4 message to the sfe
774 *
775 * @param sfe_ctx SFE context
776 * @param msg The IPv4 message
777 *
778 * @return sfe_tx_status_t The status of the Tx operation
779 */
780sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
781{
782 switch (msg->cm.type) {
783 case SFE_TX_CREATE_RULE_MSG:
784 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
785 case SFE_TX_DESTROY_RULE_MSG:
786 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
787 default:
788 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
789 return SFE_TX_FAILURE_NOT_ENABLED;
790 }
791}
792EXPORT_SYMBOL(sfe_ipv4_tx);
793
794/*
795 * sfe_ipv4_msg_init()
796 * Initialize IPv4 message.
797 */
798void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
799 sfe_ipv4_msg_callback_t cb, void *app_data)
800{
801 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
802}
803EXPORT_SYMBOL(sfe_ipv4_msg_init);
804
805/*
806 * sfe_ipv4_max_conn_count()
807 * Return maximum number of entries SFE supported
808 */
809int sfe_ipv4_max_conn_count(void)
810{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530811 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530812}
813EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
814
815/*
816 * sfe_ipv4_notify_register()
817 * Register a notifier callback for IPv4 messages from SFE
818 *
819 * @param cb The callback pointer
820 * @param app_data The application context for this message
821 *
822 * @return struct sfe_ctx_instance * The SFE context
823 */
824struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
825{
826 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
827
828 spin_lock_bh(&sfe_ctx->lock);
829 /*
830 * Hook the shortcut sync callback.
831 */
832 if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
833 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
834 }
835
836 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
837 sfe_ctx->ipv4_stats_sync_data = app_data;
838
839 spin_unlock_bh(&sfe_ctx->lock);
840
841 return SFE_CTX_TO_PUBLIC(sfe_ctx);
842}
843EXPORT_SYMBOL(sfe_ipv4_notify_register);
844
845/*
846 * sfe_ipv4_notify_unregister()
847 * Un-Register a notifier callback for IPv4 messages from SFE
848 */
849void sfe_ipv4_notify_unregister(void)
850{
851 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
852
853 spin_lock_bh(&sfe_ctx->lock);
854 /*
855 * Unregister our sync callback.
856 */
857 if (sfe_ctx->ipv4_stats_sync_cb) {
858 sfe_ipv4_register_sync_rule_callback(NULL);
859 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
860 sfe_ctx->ipv4_stats_sync_data = NULL;
861 }
862 spin_unlock_bh(&sfe_ctx->lock);
863
864 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
865
866 return;
867}
868EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
869
870/*
871 * sfe_ipv6_stats_sync_callback()
872 * Synchronize a connection's state.
873 */
874static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
875{
876 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
877 struct sfe_ipv6_msg msg;
878 struct sfe_ipv6_conn_sync *sync_msg;
879 sfe_ipv6_msg_callback_t sync_cb;
880
881 rcu_read_lock();
882 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
883 if (!sync_cb) {
884 rcu_read_unlock();
885 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
886 return;
887 }
888
889 sync_msg = &msg.msg.conn_stats;
890
891 memset(&msg, 0, sizeof(msg));
892 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
893 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
894
895 /*
896 * Fill connection specific information
897 */
898 sync_msg->protocol = (u8)sis->protocol;
899 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
900 sync_msg->flow_ident = sis->src_port;
901
902 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
903 sync_msg->return_ident = sis->dest_port;
904
905 /*
906 * Fill TCP protocol specific information
907 */
908 if (sis->protocol == IPPROTO_TCP) {
909 sync_msg->flow_max_window = sis->src_td_max_window;
910 sync_msg->flow_end = sis->src_td_end;
911 sync_msg->flow_max_end = sis->src_td_max_end;
912
913 sync_msg->return_max_window = sis->dest_td_max_window;
914 sync_msg->return_end = sis->dest_td_end;
915 sync_msg->return_max_end = sis->dest_td_max_end;
916 }
917
918 /*
919 * Fill statistics information
920 */
921 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
922 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
923 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
924 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
925
926 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
927 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
928 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
929 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
930
931 /*
932 * Fill expiration time to extend, in unit of msec
933 */
934 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
935
936 /*
937 * Fill other information
938 */
939 switch (sis->reason) {
940 case SFE_SYNC_REASON_DESTROY:
941 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
942 break;
943 case SFE_SYNC_REASON_FLUSH:
944 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
945 break;
946 default:
947 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
948 break;
949 }
950
951 /*
952 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
953 */
954 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
955 rcu_read_unlock();
956}
957
958/*
959 * sfe_create_ipv6_rule_msg()
960 * convert create message format from ecm to sfe
961 *
962 * @param sfe_ctx SFE context
963 * @param msg The IPv6 message
964 *
965 * @return sfe_tx_status_t The status of the Tx operation
966 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530967sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530968{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530969 struct net_device *src_dev = NULL;
970 struct net_device *dest_dev = NULL;
971 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530972 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530973 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530974 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530975
976 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
977 if (!response) {
978 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
979 return SFE_TX_FAILURE_QUEUE;
980 }
981
982 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
983 ret = SFE_CMN_RESPONSE_EMSG;
984 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
985 goto failed_ret;
986 }
987
988 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530989 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530990 */
991 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530992 if (!sfe_is_l2_feature_enabled()) {
993 ret = SFE_CMN_RESPONSE_EINTERFACE;
994 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
995 goto failed_ret;
996 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530997 is_routed = false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530998 }
999
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301000 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301001
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301002 case IPPROTO_TCP:
1003 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
1004 ret = SFE_CMN_RESPONSE_EMSG;
1005 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
1006 goto failed_ret;
1007 }
1008
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301009 break;
1010
1011 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301012 break;
1013
Tian Yangafb03452022-01-13 18:53:13 -08001014 case IPPROTO_IPIP:
1015 break;
1016
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301017 case IPPROTO_GRE:
1018 break;
1019
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301020 default:
1021 ret = SFE_CMN_RESPONSE_EMSG;
1022 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
1023 goto failed_ret;
1024 }
1025
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301026 /*
1027 * Does our input device support IP processing?
1028 */
1029 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301030 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301031 ret = SFE_CMN_RESPONSE_EINTERFACE;
1032 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
1033 goto failed_ret;
1034 }
1035
1036 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301037 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1038 */
1039 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1040 if (cfg_err) {
1041 ret = SFE_CMN_RESPONSE_EMSG;
1042 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1043 goto failed_ret;
1044 }
1045
1046 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301047 * Does our output device support IP processing?
1048 */
1049 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301050 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301051 ret = SFE_CMN_RESPONSE_EINTERFACE;
1052 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
1053 goto failed_ret;
1054 }
1055
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301056 /*
1057 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1058 */
1059 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1060 if (cfg_err) {
1061 ret = SFE_CMN_RESPONSE_EMSG;
1062 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1063 goto failed_ret;
1064 }
1065
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301066 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
1067 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301068 ret = SFE_CMN_RESPONSE_ACK;
1069 } else {
1070 /* Failed */
1071 ret = SFE_CMN_RESPONSE_EMSG;
1072 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
1073 }
1074
1075 /*
1076 * Fall through
1077 */
1078failed_ret:
1079 if (src_dev) {
1080 dev_put(src_dev);
1081 }
1082
1083 if (dest_dev) {
1084 dev_put(dest_dev);
1085 }
1086
1087 /*
1088 * Try to queue response message
1089 */
1090 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
1091 sfe_enqueue_msg(sfe_ctx, response);
1092
1093 return SFE_TX_SUCCESS;
1094}
1095
1096/*
1097 * sfe_destroy_ipv6_rule_msg()
1098 * Convert destroy message format from ecm to sfe
1099 *
1100 * @param sfe_ctx SFE context
1101 * @param msg The IPv6 message
1102 *
1103 * @return sfe_tx_status_t The status of the Tx operation
1104 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301105sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301106{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301107 struct sfe_response_msg *response;
1108
1109 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1110 if (!response) {
1111 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1112 return SFE_TX_FAILURE_QUEUE;
1113 }
1114
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301115 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301116
1117 /*
1118 * Try to queue response message
1119 */
1120 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
1121 sfe_enqueue_msg(sfe_ctx, response);
1122
1123 return SFE_TX_SUCCESS;
1124}
1125
1126/*
1127 * sfe_ipv6_tx()
1128 * Transmit an IPv6 message to the sfe
1129 *
1130 * @param sfe_ctx SFE context
1131 * @param msg The IPv6 message
1132 *
1133 * @return sfe_tx_status_t The status of the Tx operation
1134 */
1135sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
1136{
1137 switch (msg->cm.type) {
1138 case SFE_TX_CREATE_RULE_MSG:
1139 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1140 case SFE_TX_DESTROY_RULE_MSG:
1141 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1142 default:
1143 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
1144 return SFE_TX_FAILURE_NOT_ENABLED;
1145 }
1146}
1147EXPORT_SYMBOL(sfe_ipv6_tx);
1148
1149/*
1150 * sfe_ipv6_msg_init()
1151 * Initialize IPv6 message.
1152 */
1153void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
1154 sfe_ipv6_msg_callback_t cb, void *app_data)
1155{
1156 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
1157}
1158EXPORT_SYMBOL(sfe_ipv6_msg_init);
1159
1160/*
1161 * sfe_ipv6_max_conn_count()
1162 * Return maximum number of entries SFE supported
1163 */
1164int sfe_ipv6_max_conn_count(void)
1165{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +05301166 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301167}
1168EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
1169
1170/*
1171 * sfe_ipv6_notify_register()
1172 * Register a notifier callback for IPv6 messages from SFE
1173 *
1174 * @param cb The callback pointer
1175 * @param app_data The application context for this message
1176 *
1177 * @return struct sfe_ctx_instance * The SFE context
1178 */
1179struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
1180{
1181 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1182
1183 spin_lock_bh(&sfe_ctx->lock);
1184 /*
1185 * Hook the shortcut sync callback.
1186 */
1187 if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
1188 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1189 }
1190
1191 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
1192 sfe_ctx->ipv6_stats_sync_data = app_data;
1193
1194 spin_unlock_bh(&sfe_ctx->lock);
1195
1196 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1197}
1198EXPORT_SYMBOL(sfe_ipv6_notify_register);
1199
1200/*
1201 * sfe_ipv6_notify_unregister()
1202 * Un-Register a notifier callback for IPv6 messages from SFE
1203 */
1204void sfe_ipv6_notify_unregister(void)
1205{
1206 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1207
1208 spin_lock_bh(&sfe_ctx->lock);
1209 /*
1210 * Unregister our sync callback.
1211 */
1212 if (sfe_ctx->ipv6_stats_sync_cb) {
1213 sfe_ipv6_register_sync_rule_callback(NULL);
1214 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
1215 sfe_ctx->ipv6_stats_sync_data = NULL;
1216 }
1217 spin_unlock_bh(&sfe_ctx->lock);
1218
1219 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
1220
1221 return;
1222}
1223EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1224
1225/*
1226 * sfe_tun6rd_tx()
1227 * Transmit a tun6rd message to sfe engine
1228 */
1229sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1230{
1231 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1232 return SFE_TX_FAILURE_NOT_ENABLED;
1233}
1234EXPORT_SYMBOL(sfe_tun6rd_tx);
1235
1236/*
1237 * sfe_tun6rd_msg_init()
1238 * Initialize sfe_tun6rd msg.
1239 */
1240void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1241{
1242 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1243}
1244EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1245
1246/*
1247 * sfe_recv()
1248 * Handle packet receives.
1249 *
1250 * Returns 1 if the packet is forwarded or 0 if it isn't.
1251 */
1252int sfe_recv(struct sk_buff *skb)
1253{
1254 struct net_device *dev;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301255 struct sfe_l2_info l2_info;
1256 int ret;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301257
1258 /*
1259 * We know that for the vast majority of packets we need the transport
1260 * layer header so we may as well start to fetch it now!
1261 */
1262 prefetch(skb->data + 32);
1263 barrier();
1264
1265 dev = skb->dev;
1266
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301267 /*
1268 * Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
1269 */
1270 l2_info.parse_flags = 0;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001271 l2_info.vlan_hdr_cnt = 0;
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301272
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301273#ifdef CONFIG_NET_CLS_ACT
1274 /*
1275 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1276 * We can not accelerate this packet.
1277 */
1278#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1279 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1280 return 0;
1281 }
1282#else
1283 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1284 return 0;
1285 }
1286#endif
1287#endif
1288
1289 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301290 * If l2_feature is enabled, we need not check if src dev is L3 interface since bridge flow offload is supported.
1291 * If l2_feature is disabled, then we make sure src dev is L3 interface to avoid cost of rule lookup for L2 flows
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301292 */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301293 switch (ntohs(skb->protocol)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301294 case ETH_P_IP:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301295 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, true)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301296 return sfe_ipv4_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301297 }
1298
Wayne Tanbb7f1782021-12-13 11:16:04 -08001299 DEBUG_TRACE("No IPv4 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301300 return 0;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301301
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301302 case ETH_P_IPV6:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301303 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, false)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301304 return sfe_ipv6_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301305 }
1306
Wayne Tanbb7f1782021-12-13 11:16:04 -08001307 DEBUG_TRACE("No IPv6 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301308 return 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301309
1310 default:
1311 break;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301312 }
1313
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301314 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301315 * Stop L2 processing if L2 feature is disabled.
1316 */
1317 if (!sfe_is_l2_feature_enabled()) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001318 DEBUG_TRACE("Unsupported protocol %#x %s (L2 feature is disabled) skb=%px\n",
1319 ntohs(skb->protocol), dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301320 return 0;
1321 }
1322
1323 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301324 * Parse the L2 headers to find the L3 protocol and the L2 header offset
1325 */
1326 if (unlikely(!sfe_recv_parse_l2(dev, skb, &l2_info))) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001327 DEBUG_TRACE("%px: Invalid L2.5 header format with protocol : %x\n", skb, ntohs(skb->protocol));
1328 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301329 }
1330
1331 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301332 * Protocol in l2_info is expected to be in host byte order.
1333 * PPPoE is doing it in the sfe_pppoe_parse_hdr()
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301334 */
1335 if (likely(l2_info.protocol == ETH_P_IP)) {
Amitesh Anand63be37d2021-12-24 20:51:48 +05301336 ret = sfe_ipv4_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301337 if (unlikely(!ret)) {
1338 goto send_to_linux;
1339 }
1340 return ret;
1341 }
1342
1343 if (likely(l2_info.protocol == ETH_P_IPV6)) {
Suruchi Suman23a279d2021-11-16 15:13:09 +05301344 ret = sfe_ipv6_recv(dev, skb, &l2_info, false);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001345 if (unlikely(!ret)) {
1346 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301347 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001348 return ret;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301349 }
1350
Wayne Tanbb7f1782021-12-13 11:16:04 -08001351 DEBUG_TRACE("Non-IP(%x) %s skb=%px skb_vlan:%x/%x/%x skb_proto=%x\n",
1352 l2_info.protocol, dev->name, skb,
1353 ntohs(skb->vlan_proto), skb->vlan_tci, skb_vlan_tag_present(skb),
1354 htons(skb->protocol));
1355
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301356send_to_linux:
1357 /*
1358 * Push the data back before sending to linux if -
1359 * a. There is any exception from IPV4/V6
1360 * b. If the next protocol is neither IPV4 nor IPV6
1361 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001362 sfe_recv_undo_parse_l2(dev, skb, &l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301363
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301364 return 0;
1365}
1366
1367/*
1368 * sfe_get_exceptions()
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301369 * Dump exception counters
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301370 */
1371static ssize_t sfe_get_exceptions(struct device *dev,
1372 struct device_attribute *attr,
1373 char *buf)
1374{
1375 int idx, len;
1376 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1377
1378 spin_lock_bh(&sfe_ctx->lock);
1379 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1380 if (sfe_ctx->exceptions[idx]) {
1381 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1382 }
1383 }
1384 spin_unlock_bh(&sfe_ctx->lock);
1385
1386 return len;
1387}
1388
1389/*
1390 * sysfs attributes.
1391 */
1392static const struct device_attribute sfe_exceptions_attr =
1393 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1394
Jackson Bockus3fafbf32022-02-13 17:15:26 -08001395
1396/*
1397 * sfe_service_class_stats_get()
1398 * Collects ipv4 and ipv6 service class statistics and aggregates them.
1399 */
1400bool sfe_service_class_stats_get(uint8_t sid, uint64_t *bytes, uint64_t *packets)
1401{
1402 *bytes = 0;
1403 *packets = 0;
1404
1405 if (!sfe_ipv4_service_class_stats_get(sid, bytes, packets)) {
1406 return false;
1407 }
1408
1409 if (!sfe_ipv6_service_class_stats_get(sid, bytes, packets)) {
1410 return false;
1411 }
1412
1413 return true;
1414}
1415EXPORT_SYMBOL(sfe_service_class_stats_get);
1416
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301417/*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301418 * sfe_is_l2_feature_enabled()
1419 * Check if l2 features flag feature is enabled or not. (VLAN, PPPOE, BRIDGE and tunnels)
1420 *
1421 * 32bit read is atomic. No need of locks.
1422 */
1423bool sfe_is_l2_feature_enabled()
1424{
1425 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1426 return (sfe_ctx->l2_feature_support == 1);
1427}
1428EXPORT_SYMBOL(sfe_is_l2_feature_enabled);
1429
1430/*
1431 * sfe_get_l2_feature()
1432 * L2 feature is enabled/disabled
1433 */
1434ssize_t sfe_get_l2_feature(struct device *dev,
1435 struct device_attribute *attr,
1436 char *buf)
1437{
1438 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1439 ssize_t len;
1440
1441 spin_lock_bh(&sfe_ctx->lock);
1442 len = snprintf(buf, (ssize_t)(PAGE_SIZE), "L2 feature is %s\n", sfe_ctx->l2_feature_support ? "enabled" : "disabled");
1443 spin_unlock_bh(&sfe_ctx->lock);
1444 return len;
1445}
1446
1447/*
1448 * sfe_set_l2_feature()
1449 * Enable or disable l2 features flag.
1450 */
1451ssize_t sfe_set_l2_feature(struct device *dev, struct device_attribute *attr,
1452 const char *buf, size_t count)
1453{
1454 unsigned long val;
1455 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1456 int ret;
1457 ret = sscanf(buf, "%lu", &val);
1458
1459 if (ret != 1) {
1460 pr_err("Wrong input, %s\n", buf);
1461 return -EINVAL;
1462 }
1463
1464 if (val != 1 && val != 0) {
1465 pr_err("Input should be either 1 or 0, (%s)\n", buf);
1466 return -EINVAL;
1467 }
1468
1469 spin_lock_bh(&sfe_ctx->lock);
1470
1471 if (sfe_ctx->l2_feature_support && val) {
1472 spin_unlock_bh(&sfe_ctx->lock);
1473 pr_err("L2 feature is already enabled\n");
1474 return -EINVAL;
1475 }
1476
1477 if (!sfe_ctx->l2_feature_support && !val) {
1478 spin_unlock_bh(&sfe_ctx->lock);
1479 pr_err("L2 feature is already disabled\n");
1480 return -EINVAL;
1481 }
1482
1483 sfe_ctx->l2_feature_support = val;
1484 spin_unlock_bh(&sfe_ctx->lock);
1485
1486 return count;
1487}
1488
1489static const struct device_attribute sfe_l2_feature_attr =
1490 __ATTR(l2_feature, 0644, sfe_get_l2_feature, sfe_set_l2_feature);
1491
1492/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301493 * sfe_init_if()
1494 */
1495int sfe_init_if(void)
1496{
1497 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1498 int result = -1;
1499
1500 /*
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301501 * L2 feature is enabled by default
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301502 */
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301503 sfe_ctx->l2_feature_support = 1;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301504
1505 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301506 * Create sys/sfe
1507 */
1508 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1509 if (!sfe_ctx->sys_sfe) {
1510 DEBUG_ERROR("failed to register sfe\n");
1511 goto exit1;
1512 }
1513
1514 /*
1515 * Create sys/sfe/exceptions
1516 */
1517 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1518 if (result) {
1519 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1520 goto exit2;
1521 }
1522
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301523 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_l2_feature_attr.attr);
1524 if (result) {
1525 DEBUG_ERROR("failed to register L2 feature flag sysfs file: %d\n", result);
1526 goto exit2;
1527 }
1528
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301529 spin_lock_init(&sfe_ctx->lock);
1530
1531 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1532 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1533
1534 /*
1535 * Hook the receive path in the network stack.
1536 */
1537 BUG_ON(athrs_fast_nat_recv);
1538 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1539
1540 return 0;
1541exit2:
1542 kobject_put(sfe_ctx->sys_sfe);
1543exit1:
1544 return result;
1545}
1546
1547/*
1548 * sfe_exit_if()
1549 */
1550void sfe_exit_if(void)
1551{
1552 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1553
1554 /*
1555 * Unregister our receive callback.
1556 */
1557 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1558
1559 /*
1560 * Wait for all callbacks to complete.
1561 */
1562 rcu_barrier();
1563
1564 /*
1565 * Destroy all connections.
1566 */
1567 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1568 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1569
1570 /*
1571 * stop work queue, and flush all pending message in queue
1572 */
1573 cancel_work_sync(&sfe_ctx->work);
1574 sfe_process_response_msg(&sfe_ctx->work);
1575
1576 /*
1577 * Unregister our sync callback.
1578 */
1579 sfe_ipv4_notify_unregister();
1580 sfe_ipv6_notify_unregister();
1581
1582 kobject_put(sfe_ctx->sys_sfe);
1583
1584 return;
1585}