blob: 9e24f89e6f52a0282803dc3084e8f6cbaba8c772 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
Suruchi Sumane811dad2022-01-19 19:39:50 +053028#include <net/vxlan.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053029#include <net/gre.h>
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053030
31#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053032#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053033#include "sfe.h"
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053034#include "sfe_pppoe.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080035#include "sfe_vlan.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053036
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053037extern int max_ipv4_conn;
38extern int max_ipv6_conn;
39
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053040#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053041#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
42#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
43#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
44
45typedef enum sfe_exception {
46 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
47 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
48 SFE_EXCEPTION_CONNECTION_INVALID,
49 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
50 SFE_EXCEPTION_TCP_INVALID,
51 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
52 SFE_EXCEPTION_SRC_DEV_NOT_L3,
53 SFE_EXCEPTION_DEST_DEV_NOT_L3,
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053054 SFE_EXCEPTION_CFG_ERR,
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053055 SFE_EXCEPTION_CREATE_FAILED,
56 SFE_EXCEPTION_ENQUEUE_FAILED,
57 SFE_EXCEPTION_NOT_SUPPORT_6RD,
58 SFE_EXCEPTION_NO_SYNC_CB,
59 SFE_EXCEPTION_MAX
60} sfe_exception_t;
61
62static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
63 "IPV4_MSG_UNKNOW",
64 "IPV6_MSG_UNKNOW",
65 "CONNECTION_INVALID",
66 "NOT_SUPPORT_BRIDGE",
67 "TCP_INVALID",
68 "PROTOCOL_NOT_SUPPORT",
69 "SRC_DEV_NOT_L3",
70 "DEST_DEV_NOT_L3",
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053071 "CONFIG_ERROR",
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053072 "CREATE_FAILED",
73 "ENQUEUE_FAILED",
74 "NOT_SUPPORT_6RD",
75 "NO_SYNC_CB"
76};
77
78/*
79 * Message type of queued response message
80 */
81typedef enum {
82 SFE_MSG_TYPE_IPV4,
83 SFE_MSG_TYPE_IPV6
84} sfe_msg_types_t;
85
86/*
87 * Queued response message,
88 * will be sent back to caller in workqueue
89 */
90struct sfe_response_msg {
91 struct list_head node;
92 sfe_msg_types_t type;
93 void *msg[0];
94};
95
96/*
97 * SFE context instance, private for SFE
98 */
99struct sfe_ctx_instance_internal {
100 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
101
102 /*
103 * Control state.
104 */
105 struct kobject *sys_sfe; /* Sysfs linkage */
106
107 struct list_head msg_queue; /* Response message queue*/
108 spinlock_t lock; /* Lock to protect message queue */
109
110 struct work_struct work; /* Work to send response message back to caller*/
111
112 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
113 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
114
115 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
116 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
117
118 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530119
120 int32_t l2_feature_support; /* L2 feature support */
121
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530122};
123
124static struct sfe_ctx_instance_internal __sfe_ctx;
125
126/*
127 * Convert public SFE context to internal context
128 */
129#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
130/*
131 * Convert internal SFE context to public context
132 */
133#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
134
135/*
136 * sfe_incr_exceptions()
137 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530138 *
139 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530140 */
141static inline void sfe_incr_exceptions(sfe_exception_t except)
142{
143 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
144
145 spin_lock_bh(&sfe_ctx->lock);
146 sfe_ctx->exceptions[except]++;
147 spin_unlock_bh(&sfe_ctx->lock);
148}
149
150/*
151 * sfe_dev_is_layer_3_interface()
152 * Check if a network device is ipv4 or ipv6 layer 3 interface
153 *
154 * @param dev network device to check
155 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
156 */
157inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
158{
159 struct in_device *in4_dev;
160 struct inet6_dev *in6_dev;
161
162 BUG_ON(!dev);
163
164 if (likely(check_v4)) {
165 /*
166 * Does our input device support IPv4 processing?
167 */
168 in4_dev = (struct in_device *)dev->ip_ptr;
169 if (unlikely(!in4_dev)) {
170 return false;
171 }
172
173 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800174 * Does it have an IPv4 address? If it doesn't then it could be MAP-T interface,
175 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530176 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800177 if (likely(in4_dev->ifa_list || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
178 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530179 }
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800180 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530181 }
182
183 /*
184 * Does our input device support IPv6 processing?
185 */
186 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
187 if (unlikely(!in6_dev)) {
188 return false;
189 }
190
191 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800192 * Does it have an IPv6 address? If it doesn't then it could be MAP-T interface,
193 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530194 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800195 if (likely(!list_empty(&in6_dev->addr_list) || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
196 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530197 }
198
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800199 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530200}
201
202/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530203 * sfe_routed_dev_allow()
204 * check whether routed acceleration allowed
205 */
206static bool sfe_routed_dev_allow(struct net_device *dev, bool is_routed, bool check_v4)
207{
208 if (!is_routed) {
209 return true;
210 }
211
212 if (sfe_dev_is_layer_3_interface(dev, check_v4)) {
213 return true;
214 }
215
216 /*
217 * in case of GRE / vxlan, these dev does not have IP address
218 * so l3 interface check will fail. allow rule creation between gre / vxlan
219 * and wan dev for routed flow.
220 */
221 if (netif_is_vxlan(dev)) {
222 return true;
223 }
224
225#ifdef SFE_GRE_TUN_ENABLE
226 if (netif_is_gretap(dev) || netif_is_gre(dev)) {
227 return true;
228 }
229
230 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
231 return true;
232 }
233#endif
234
235 return false;
236}
237
238/* sfe_dev_has_hw_csum()
239 * check whether device supports hardware checksum offload
240 */
241bool sfe_dev_has_hw_csum(struct net_device *dev)
242{
243 if (netif_is_vxlan(dev)) {
244 return false;
245 }
246
247#ifdef SFE_GRE_TUN_ENABLE
248 if (netif_is_gre(dev) || netif_is_gretap(dev)) {
249 return false;
250 }
251
252 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
253 return false;
254 }
255#endif
Tian Yangfd7b3142022-02-01 15:34:20 -0800256 /*
257 * Tunnel MAP-E/DS-LITE and Tun6rd share the same Routing netlink operator
258 * whose kind is "ip6tnl". The HW csum for these tunnel devices should be disabled.
259 */
260 if (dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "ip6tnl")) {
261 return false;
262 }
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530263
264 return true;
265}
266
267/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530268 * sfe_clean_response_msg_by_type()
269 * clean response message in queue when ECM exit
270 *
271 * @param sfe_ctx SFE context
272 * @param msg_type message type, ipv4 or ipv6
273 */
274static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
275{
276 struct sfe_response_msg *response, *tmp;
277
278 if (!sfe_ctx) {
279 return;
280 }
281
282 spin_lock_bh(&sfe_ctx->lock);
283 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
284 if (response->type == msg_type) {
285 list_del(&response->node);
286 /*
287 * Free response message
288 */
289 kfree(response);
290 }
291 }
292 spin_unlock_bh(&sfe_ctx->lock);
293
294}
295
296/*
297 * sfe_process_response_msg()
298 * Send all pending response message to ECM by calling callback function included in message
299 *
300 * @param work work structure
301 */
302static void sfe_process_response_msg(struct work_struct *work)
303{
304 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
305 struct sfe_response_msg *response;
306
307 spin_lock_bh(&sfe_ctx->lock);
308 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
309 list_del(&response->node);
310 spin_unlock_bh(&sfe_ctx->lock);
311 rcu_read_lock();
312
313 /*
314 * Send response message back to caller
315 */
316 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
317 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
318 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
319 if (callback) {
320 callback((void *)msg->cm.app_data, msg);
321 }
322 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
323 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
324 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
325 if (callback) {
326 callback((void *)msg->cm.app_data, msg);
327 }
328 }
329
330 rcu_read_unlock();
331 /*
332 * Free response message
333 */
334 kfree(response);
335 spin_lock_bh(&sfe_ctx->lock);
336 }
337 spin_unlock_bh(&sfe_ctx->lock);
338}
339
340/*
341 * sfe_alloc_response_msg()
342 * Alloc and construct new response message
343 *
344 * @param type message type
345 * @param msg used to construct response message if not NULL
346 *
347 * @return !NULL, success; NULL, failed
348 */
349static struct sfe_response_msg *
350sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
351{
352 struct sfe_response_msg *response;
353 int size;
354
355 switch (type) {
356 case SFE_MSG_TYPE_IPV4:
357 size = sizeof(struct sfe_ipv4_msg);
358 break;
359 case SFE_MSG_TYPE_IPV6:
360 size = sizeof(struct sfe_ipv6_msg);
361 break;
362 default:
363 DEBUG_ERROR("message type %d not supported\n", type);
364 return NULL;
365 }
366
367 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
368 if (!response) {
369 DEBUG_ERROR("allocate memory failed\n");
370 return NULL;
371 }
372
373 response->type = type;
374
375 if (msg) {
376 memcpy(response->msg, msg, size);
377 }
378
379 return response;
380}
381
382/*
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800383 * sfe_fast_xmit_check()
384 * Check the fast transmit feasibility.
385 *
386 * This check the per direction's attribute that could not go fast
387 * transmit
388 * xfrm packets, come from a local socket or need sk validation on the skb
389 */
390bool sfe_fast_xmit_check(struct sk_buff *skb, netdev_features_t features)
391{
392
393#ifdef CONFIG_SOCK_VALIDATE_XMIT
394 if (skb->sk && sk_fullsock(skb->sk) && skb->sk->sk_validate_xmit_skb) {
395 DEBUG_INFO("%px:need sk validation\n", skb);
396 return false;
397#ifdef CONFIG_TLS_DEVICE
398 } else if (skb->decrypted) {
399 DEBUG_INFO("%px:SK or decrypted\n", skb);
400 return false;
401#endif
402 }
403#endif
404 if (skb_vlan_tag_present(skb)) {
405 DEBUG_INFO("%px:Vlan is present\n", skb);
406 return false;
407 }
408
409 if (netif_needs_gso(skb, features)) {
410 DEBUG_INFO("%px:Need to be gso\n", skb);
411 return false;
412 }
413
414 if (skb_sec_path(skb)) {
415 DEBUG_INFO("%px:XFRM is present\n", skb);
416 return false;
417 }
418
419 return true;
420}
421
422/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530423 * sfe_enqueue_msg()
424 * Queue response message
425 *
426 * @param sfe_ctx SFE context
427 * @param response response message to be queue
428 */
429static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
430{
431 spin_lock_bh(&sfe_ctx->lock);
432 list_add_tail(&response->node, &sfe_ctx->msg_queue);
433 spin_unlock_bh(&sfe_ctx->lock);
434
435 schedule_work(&sfe_ctx->work);
436}
437
438/*
439 * sfe_cmn_msg_init()
440 * Initialize the common message structure.
441 *
442 * @param ncm message to init
443 * @param if_num interface number related with this message
444 * @param type message type
445 * @param cb callback function to process repsonse of this message
446 * @param app_data argument for above callback function
447 */
448static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
449{
450 ncm->interface = if_num;
451 ncm->version = SFE_MESSAGE_VERSION;
452 ncm->type = type;
453 ncm->len = len;
454 ncm->cb = (sfe_ptr_t)cb;
455 ncm->app_data = (sfe_ptr_t)app_data;
456}
457
458/*
459 * sfe_ipv4_stats_sync_callback()
460 * Synchronize a connection's state.
461 *
462 * @param sis SFE statistics from SFE core engine
463 */
464static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
465{
466 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
467 struct sfe_ipv4_msg msg;
468 struct sfe_ipv4_conn_sync *sync_msg;
469 sfe_ipv4_msg_callback_t sync_cb;
470
471 rcu_read_lock();
472 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
473 if (!sync_cb) {
474 rcu_read_unlock();
475 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
476 return;
477 }
478
479 sync_msg = &msg.msg.conn_stats;
480
481 memset(&msg, 0, sizeof(msg));
482 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
483 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
484
485 /*
486 * Fill connection specific information
487 */
488 sync_msg->protocol = (u8)sis->protocol;
489 sync_msg->flow_ip = sis->src_ip.ip;
490 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
491 sync_msg->flow_ident = sis->src_port;
492 sync_msg->flow_ident_xlate = sis->src_port_xlate;
493
494 sync_msg->return_ip = sis->dest_ip.ip;
495 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
496 sync_msg->return_ident = sis->dest_port;
497 sync_msg->return_ident_xlate = sis->dest_port_xlate;
498
499 /*
500 * Fill TCP protocol specific information
501 */
502 if (sis->protocol == IPPROTO_TCP) {
503 sync_msg->flow_max_window = sis->src_td_max_window;
504 sync_msg->flow_end = sis->src_td_end;
505 sync_msg->flow_max_end = sis->src_td_max_end;
506
507 sync_msg->return_max_window = sis->dest_td_max_window;
508 sync_msg->return_end = sis->dest_td_end;
509 sync_msg->return_max_end = sis->dest_td_max_end;
510 }
511
512 /*
513 * Fill statistics information
514 */
515 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
516 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
517 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
518 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
519
520 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
521 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
522 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
523 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
524
525 /*
526 * Fill expiration time to extend, in unit of msec
527 */
528 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
529
530 /*
531 * Fill other information
532 */
533 switch (sis->reason) {
534 case SFE_SYNC_REASON_DESTROY:
535 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
536 break;
537 case SFE_SYNC_REASON_FLUSH:
538 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
539 break;
540 default:
541 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
542 break;
543 }
544
545 /*
546 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
547 */
548 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
549 rcu_read_unlock();
550}
551
552/*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530553 * sfe_recv_parse_l2()
554 * Parse L2 headers
555 *
556 * Returns true if the packet is parsed and false otherwise.
557 */
558static bool sfe_recv_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
559{
560 /*
561 * l2_hdr_offset will not change as we parse more L2.5 headers
562 * TODO: Move from storing offsets to storing pointers
563 */
564 sfe_l2_hdr_offset_set(l2_info, ((skb->data - ETH_HLEN) - skb->head));
565
566 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800567 * VLAN parsing
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530568 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800569 if (unlikely(!sfe_vlan_check_and_parse_tag(skb, l2_info))) {
570 return false;
571 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530572
573 /*
574 * PPPoE parsing
575 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800576 if (htons(ETH_P_PPP_SES) == skb->protocol) {
577 /*
578 * Parse only PPPoE session packets
579 * skb->data is pointing to PPPoE hdr
580 */
581 if (!sfe_pppoe_parse_hdr(skb, l2_info)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530582
Wayne Tanbb7f1782021-12-13 11:16:04 -0800583 /*
584 * For exception from PPPoE return from here without modifying the skb->data
585 * This includes non-IPv4/v6 cases also
586 */
587 return false;
588 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530589
590 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800591 * Pull by L2 header size
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530592 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800593 __skb_pull(skb, sfe_l2_hdr_size_get(l2_info));
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530594 }
Wayne Tanbb7f1782021-12-13 11:16:04 -0800595 return true;
596}
597
598/*
599 * sfe_recv_undo_parse_l2()
600 */
601static void sfe_recv_undo_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
602{
603 /*
604 * PPPoE undo
605 */
606 __skb_push(skb, sfe_l2_hdr_size_get(l2_info));
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530607
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530608 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800609 * VLAN undo
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530610 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800611 sfe_vlan_undo_parse(skb, l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530612}
613
614/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530615 * sfe_create_ipv4_rule_msg()
616 * Convert create message format from ecm to sfe
617 *
618 * @param sfe_ctx SFE context
619 * @param msg The IPv4 message
620 *
621 * @return sfe_tx_status_t The status of the Tx operation
622 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530623sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530624{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530625 struct net_device *src_dev = NULL;
626 struct net_device *dest_dev = NULL;
627 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530628 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530629 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530630 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530631
632 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
633 if (!response) {
634 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
635 return SFE_TX_FAILURE_QUEUE;
636 }
637
638 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
639 ret = SFE_CMN_RESPONSE_EMSG;
640 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
641 goto failed_ret;
642 }
643
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530644 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530645 case IPPROTO_TCP:
646 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
647 ret = SFE_CMN_RESPONSE_EMSG;
648 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
649 goto failed_ret;
650 }
651
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530652 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530653 break;
654
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530655 case IPPROTO_GRE:
656 break;
657
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530658 default:
659 ret = SFE_CMN_RESPONSE_EMSG;
660 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
661 goto failed_ret;
662 }
663
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530664 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530665 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530666 */
667 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530668 if (!sfe_is_l2_feature_enabled()) {
669 ret = SFE_CMN_RESPONSE_EINTERFACE;
670 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
671 goto failed_ret;
672 }
673
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530674 is_routed = false;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530675 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530676
677 /*
678 * Does our input device support IP processing?
679 */
680 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530681 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530682 ret = SFE_CMN_RESPONSE_EINTERFACE;
683 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
684 goto failed_ret;
685 }
686
687 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530688 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
689 */
690 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
691 if (cfg_err) {
692 ret = SFE_CMN_RESPONSE_EMSG;
693 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
694 goto failed_ret;
695 }
696
697 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530698 * Does our output device support IP processing?
699 */
700 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530701 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530702 ret = SFE_CMN_RESPONSE_EINTERFACE;
703 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
704 goto failed_ret;
705 }
706
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530707 /*
708 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
709 */
710 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
711 if (cfg_err) {
712 ret = SFE_CMN_RESPONSE_EMSG;
713 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
714 goto failed_ret;
715 }
716
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530717 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
718 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530719 ret = SFE_CMN_RESPONSE_ACK;
720 } else {
721 /* Failed */
722 ret = SFE_CMN_RESPONSE_EMSG;
723 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
724 }
725
726 /*
727 * Fall through
728 */
729failed_ret:
730 if (src_dev) {
731 dev_put(src_dev);
732 }
733
734 if (dest_dev) {
735 dev_put(dest_dev);
736 }
737
738 /*
739 * Try to queue response message
740 */
741 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
742 sfe_enqueue_msg(sfe_ctx, response);
743
744 return SFE_TX_SUCCESS;
745}
746
747/*
748 * sfe_destroy_ipv4_rule_msg()
749 * Convert destroy message format from ecm to sfe
750 *
751 * @param sfe_ctx SFE context
752 * @param msg The IPv4 message
753 *
754 * @return sfe_tx_status_t The status of the Tx operation
755 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530756sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530757{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530758 struct sfe_response_msg *response;
759
760 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
761 if (!response) {
762 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
763 return SFE_TX_FAILURE_QUEUE;
764 }
765
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530766 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530767
768 /*
769 * Try to queue response message
770 */
771 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
772 sfe_enqueue_msg(sfe_ctx, response);
773
774 return SFE_TX_SUCCESS;
775}
776
777/*
778 * sfe_ipv4_tx()
779 * Transmit an IPv4 message to the sfe
780 *
781 * @param sfe_ctx SFE context
782 * @param msg The IPv4 message
783 *
784 * @return sfe_tx_status_t The status of the Tx operation
785 */
786sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
787{
788 switch (msg->cm.type) {
789 case SFE_TX_CREATE_RULE_MSG:
790 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
791 case SFE_TX_DESTROY_RULE_MSG:
792 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
793 default:
794 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
795 return SFE_TX_FAILURE_NOT_ENABLED;
796 }
797}
798EXPORT_SYMBOL(sfe_ipv4_tx);
799
800/*
801 * sfe_ipv4_msg_init()
802 * Initialize IPv4 message.
803 */
804void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
805 sfe_ipv4_msg_callback_t cb, void *app_data)
806{
807 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
808}
809EXPORT_SYMBOL(sfe_ipv4_msg_init);
810
811/*
812 * sfe_ipv4_max_conn_count()
813 * Return maximum number of entries SFE supported
814 */
815int sfe_ipv4_max_conn_count(void)
816{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530817 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530818}
819EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
820
821/*
822 * sfe_ipv4_notify_register()
823 * Register a notifier callback for IPv4 messages from SFE
824 *
825 * @param cb The callback pointer
826 * @param app_data The application context for this message
827 *
828 * @return struct sfe_ctx_instance * The SFE context
829 */
830struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
831{
832 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
833
834 spin_lock_bh(&sfe_ctx->lock);
835 /*
836 * Hook the shortcut sync callback.
837 */
838 if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
839 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
840 }
841
842 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
843 sfe_ctx->ipv4_stats_sync_data = app_data;
844
845 spin_unlock_bh(&sfe_ctx->lock);
846
847 return SFE_CTX_TO_PUBLIC(sfe_ctx);
848}
849EXPORT_SYMBOL(sfe_ipv4_notify_register);
850
851/*
852 * sfe_ipv4_notify_unregister()
853 * Un-Register a notifier callback for IPv4 messages from SFE
854 */
855void sfe_ipv4_notify_unregister(void)
856{
857 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
858
859 spin_lock_bh(&sfe_ctx->lock);
860 /*
861 * Unregister our sync callback.
862 */
863 if (sfe_ctx->ipv4_stats_sync_cb) {
864 sfe_ipv4_register_sync_rule_callback(NULL);
865 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
866 sfe_ctx->ipv4_stats_sync_data = NULL;
867 }
868 spin_unlock_bh(&sfe_ctx->lock);
869
870 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
871
872 return;
873}
874EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
875
876/*
877 * sfe_ipv6_stats_sync_callback()
878 * Synchronize a connection's state.
879 */
880static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
881{
882 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
883 struct sfe_ipv6_msg msg;
884 struct sfe_ipv6_conn_sync *sync_msg;
885 sfe_ipv6_msg_callback_t sync_cb;
886
887 rcu_read_lock();
888 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
889 if (!sync_cb) {
890 rcu_read_unlock();
891 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
892 return;
893 }
894
895 sync_msg = &msg.msg.conn_stats;
896
897 memset(&msg, 0, sizeof(msg));
898 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
899 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
900
901 /*
902 * Fill connection specific information
903 */
904 sync_msg->protocol = (u8)sis->protocol;
905 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
906 sync_msg->flow_ident = sis->src_port;
907
908 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
909 sync_msg->return_ident = sis->dest_port;
910
911 /*
912 * Fill TCP protocol specific information
913 */
914 if (sis->protocol == IPPROTO_TCP) {
915 sync_msg->flow_max_window = sis->src_td_max_window;
916 sync_msg->flow_end = sis->src_td_end;
917 sync_msg->flow_max_end = sis->src_td_max_end;
918
919 sync_msg->return_max_window = sis->dest_td_max_window;
920 sync_msg->return_end = sis->dest_td_end;
921 sync_msg->return_max_end = sis->dest_td_max_end;
922 }
923
924 /*
925 * Fill statistics information
926 */
927 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
928 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
929 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
930 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
931
932 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
933 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
934 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
935 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
936
937 /*
938 * Fill expiration time to extend, in unit of msec
939 */
940 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
941
942 /*
943 * Fill other information
944 */
945 switch (sis->reason) {
946 case SFE_SYNC_REASON_DESTROY:
947 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
948 break;
949 case SFE_SYNC_REASON_FLUSH:
950 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
951 break;
952 default:
953 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
954 break;
955 }
956
957 /*
958 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
959 */
960 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
961 rcu_read_unlock();
962}
963
964/*
965 * sfe_create_ipv6_rule_msg()
966 * convert create message format from ecm to sfe
967 *
968 * @param sfe_ctx SFE context
969 * @param msg The IPv6 message
970 *
971 * @return sfe_tx_status_t The status of the Tx operation
972 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530973sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530974{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530975 struct net_device *src_dev = NULL;
976 struct net_device *dest_dev = NULL;
977 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530978 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530979 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530980 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530981
982 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
983 if (!response) {
984 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
985 return SFE_TX_FAILURE_QUEUE;
986 }
987
988 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
989 ret = SFE_CMN_RESPONSE_EMSG;
990 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
991 goto failed_ret;
992 }
993
994 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530995 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530996 */
997 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530998 if (!sfe_is_l2_feature_enabled()) {
999 ret = SFE_CMN_RESPONSE_EINTERFACE;
1000 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
1001 goto failed_ret;
1002 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301003 is_routed = false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301004 }
1005
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301006 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301007
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301008 case IPPROTO_TCP:
1009 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
1010 ret = SFE_CMN_RESPONSE_EMSG;
1011 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
1012 goto failed_ret;
1013 }
1014
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301015 break;
1016
1017 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301018 break;
1019
Tian Yangafb03452022-01-13 18:53:13 -08001020 case IPPROTO_IPIP:
1021 break;
1022
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301023 case IPPROTO_GRE:
1024 break;
1025
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301026 default:
1027 ret = SFE_CMN_RESPONSE_EMSG;
1028 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
1029 goto failed_ret;
1030 }
1031
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301032 /*
1033 * Does our input device support IP processing?
1034 */
1035 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301036 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301037 ret = SFE_CMN_RESPONSE_EINTERFACE;
1038 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
1039 goto failed_ret;
1040 }
1041
1042 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301043 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1044 */
1045 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1046 if (cfg_err) {
1047 ret = SFE_CMN_RESPONSE_EMSG;
1048 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1049 goto failed_ret;
1050 }
1051
1052 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301053 * Does our output device support IP processing?
1054 */
1055 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301056 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301057 ret = SFE_CMN_RESPONSE_EINTERFACE;
1058 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
1059 goto failed_ret;
1060 }
1061
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301062 /*
1063 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1064 */
1065 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1066 if (cfg_err) {
1067 ret = SFE_CMN_RESPONSE_EMSG;
1068 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1069 goto failed_ret;
1070 }
1071
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301072 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
1073 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301074 ret = SFE_CMN_RESPONSE_ACK;
1075 } else {
1076 /* Failed */
1077 ret = SFE_CMN_RESPONSE_EMSG;
1078 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
1079 }
1080
1081 /*
1082 * Fall through
1083 */
1084failed_ret:
1085 if (src_dev) {
1086 dev_put(src_dev);
1087 }
1088
1089 if (dest_dev) {
1090 dev_put(dest_dev);
1091 }
1092
1093 /*
1094 * Try to queue response message
1095 */
1096 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
1097 sfe_enqueue_msg(sfe_ctx, response);
1098
1099 return SFE_TX_SUCCESS;
1100}
1101
1102/*
1103 * sfe_destroy_ipv6_rule_msg()
1104 * Convert destroy message format from ecm to sfe
1105 *
1106 * @param sfe_ctx SFE context
1107 * @param msg The IPv6 message
1108 *
1109 * @return sfe_tx_status_t The status of the Tx operation
1110 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301111sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301112{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301113 struct sfe_response_msg *response;
1114
1115 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1116 if (!response) {
1117 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1118 return SFE_TX_FAILURE_QUEUE;
1119 }
1120
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301121 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301122
1123 /*
1124 * Try to queue response message
1125 */
1126 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
1127 sfe_enqueue_msg(sfe_ctx, response);
1128
1129 return SFE_TX_SUCCESS;
1130}
1131
1132/*
1133 * sfe_ipv6_tx()
1134 * Transmit an IPv6 message to the sfe
1135 *
1136 * @param sfe_ctx SFE context
1137 * @param msg The IPv6 message
1138 *
1139 * @return sfe_tx_status_t The status of the Tx operation
1140 */
1141sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
1142{
1143 switch (msg->cm.type) {
1144 case SFE_TX_CREATE_RULE_MSG:
1145 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1146 case SFE_TX_DESTROY_RULE_MSG:
1147 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1148 default:
1149 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
1150 return SFE_TX_FAILURE_NOT_ENABLED;
1151 }
1152}
1153EXPORT_SYMBOL(sfe_ipv6_tx);
1154
1155/*
1156 * sfe_ipv6_msg_init()
1157 * Initialize IPv6 message.
1158 */
1159void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
1160 sfe_ipv6_msg_callback_t cb, void *app_data)
1161{
1162 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
1163}
1164EXPORT_SYMBOL(sfe_ipv6_msg_init);
1165
1166/*
1167 * sfe_ipv6_max_conn_count()
1168 * Return maximum number of entries SFE supported
1169 */
1170int sfe_ipv6_max_conn_count(void)
1171{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +05301172 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301173}
1174EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
1175
1176/*
1177 * sfe_ipv6_notify_register()
1178 * Register a notifier callback for IPv6 messages from SFE
1179 *
1180 * @param cb The callback pointer
1181 * @param app_data The application context for this message
1182 *
1183 * @return struct sfe_ctx_instance * The SFE context
1184 */
1185struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
1186{
1187 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1188
1189 spin_lock_bh(&sfe_ctx->lock);
1190 /*
1191 * Hook the shortcut sync callback.
1192 */
1193 if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
1194 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1195 }
1196
1197 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
1198 sfe_ctx->ipv6_stats_sync_data = app_data;
1199
1200 spin_unlock_bh(&sfe_ctx->lock);
1201
1202 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1203}
1204EXPORT_SYMBOL(sfe_ipv6_notify_register);
1205
1206/*
1207 * sfe_ipv6_notify_unregister()
1208 * Un-Register a notifier callback for IPv6 messages from SFE
1209 */
1210void sfe_ipv6_notify_unregister(void)
1211{
1212 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1213
1214 spin_lock_bh(&sfe_ctx->lock);
1215 /*
1216 * Unregister our sync callback.
1217 */
1218 if (sfe_ctx->ipv6_stats_sync_cb) {
1219 sfe_ipv6_register_sync_rule_callback(NULL);
1220 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
1221 sfe_ctx->ipv6_stats_sync_data = NULL;
1222 }
1223 spin_unlock_bh(&sfe_ctx->lock);
1224
1225 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
1226
1227 return;
1228}
1229EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1230
1231/*
1232 * sfe_tun6rd_tx()
1233 * Transmit a tun6rd message to sfe engine
1234 */
1235sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1236{
1237 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1238 return SFE_TX_FAILURE_NOT_ENABLED;
1239}
1240EXPORT_SYMBOL(sfe_tun6rd_tx);
1241
1242/*
1243 * sfe_tun6rd_msg_init()
1244 * Initialize sfe_tun6rd msg.
1245 */
1246void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1247{
1248 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1249}
1250EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1251
1252/*
1253 * sfe_recv()
1254 * Handle packet receives.
1255 *
1256 * Returns 1 if the packet is forwarded or 0 if it isn't.
1257 */
1258int sfe_recv(struct sk_buff *skb)
1259{
1260 struct net_device *dev;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301261 struct sfe_l2_info l2_info;
1262 int ret;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301263
1264 /*
1265 * We know that for the vast majority of packets we need the transport
1266 * layer header so we may as well start to fetch it now!
1267 */
1268 prefetch(skb->data + 32);
1269 barrier();
1270
1271 dev = skb->dev;
1272
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301273 /*
1274 * Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
1275 */
1276 l2_info.parse_flags = 0;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001277 l2_info.l2_hdr_size = 0;
1278 l2_info.vlan_hdr_cnt = 0;
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301279
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301280#ifdef CONFIG_NET_CLS_ACT
1281 /*
1282 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1283 * We can not accelerate this packet.
1284 */
1285#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1286 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1287 return 0;
1288 }
1289#else
1290 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1291 return 0;
1292 }
1293#endif
1294#endif
1295
1296 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301297 * If l2_feature is enabled, we need not check if src dev is L3 interface since bridge flow offload is supported.
1298 * If l2_feature is disabled, then we make sure src dev is L3 interface to avoid cost of rule lookup for L2 flows
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301299 */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301300 switch (ntohs(skb->protocol)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301301 case ETH_P_IP:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301302 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, true)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301303 return sfe_ipv4_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301304 }
1305
Wayne Tanbb7f1782021-12-13 11:16:04 -08001306 DEBUG_TRACE("No IPv4 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301307 return 0;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301308
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301309 case ETH_P_IPV6:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301310 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, false)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301311 return sfe_ipv6_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301312 }
1313
Wayne Tanbb7f1782021-12-13 11:16:04 -08001314 DEBUG_TRACE("No IPv6 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301315 return 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301316
1317 default:
1318 break;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301319 }
1320
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301321 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301322 * Stop L2 processing if L2 feature is disabled.
1323 */
1324 if (!sfe_is_l2_feature_enabled()) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001325 DEBUG_TRACE("Unsupported protocol %#x %s (L2 feature is disabled) skb=%px\n",
1326 ntohs(skb->protocol), dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301327 return 0;
1328 }
1329
1330 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301331 * Parse the L2 headers to find the L3 protocol and the L2 header offset
1332 */
1333 if (unlikely(!sfe_recv_parse_l2(dev, skb, &l2_info))) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001334 DEBUG_TRACE("%px: Invalid L2.5 header format with protocol : %x\n", skb, ntohs(skb->protocol));
1335 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301336 }
1337
1338 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301339 * Protocol in l2_info is expected to be in host byte order.
1340 * PPPoE is doing it in the sfe_pppoe_parse_hdr()
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301341 */
1342 if (likely(l2_info.protocol == ETH_P_IP)) {
Amitesh Anand63be37d2021-12-24 20:51:48 +05301343 ret = sfe_ipv4_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301344 if (unlikely(!ret)) {
1345 goto send_to_linux;
1346 }
1347 return ret;
1348 }
1349
1350 if (likely(l2_info.protocol == ETH_P_IPV6)) {
Suruchi Suman23a279d2021-11-16 15:13:09 +05301351 ret = sfe_ipv6_recv(dev, skb, &l2_info, false);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001352 if (unlikely(!ret)) {
1353 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301354 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001355 return ret;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301356 }
1357
Wayne Tanbb7f1782021-12-13 11:16:04 -08001358 DEBUG_TRACE("Non-IP(%x) %s skb=%px skb_vlan:%x/%x/%x skb_proto=%x\n",
1359 l2_info.protocol, dev->name, skb,
1360 ntohs(skb->vlan_proto), skb->vlan_tci, skb_vlan_tag_present(skb),
1361 htons(skb->protocol));
1362
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301363send_to_linux:
1364 /*
1365 * Push the data back before sending to linux if -
1366 * a. There is any exception from IPV4/V6
1367 * b. If the next protocol is neither IPV4 nor IPV6
1368 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001369 sfe_recv_undo_parse_l2(dev, skb, &l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301370
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301371 return 0;
1372}
1373
1374/*
1375 * sfe_get_exceptions()
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301376 * Dump exception counters
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301377 */
1378static ssize_t sfe_get_exceptions(struct device *dev,
1379 struct device_attribute *attr,
1380 char *buf)
1381{
1382 int idx, len;
1383 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1384
1385 spin_lock_bh(&sfe_ctx->lock);
1386 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1387 if (sfe_ctx->exceptions[idx]) {
1388 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1389 }
1390 }
1391 spin_unlock_bh(&sfe_ctx->lock);
1392
1393 return len;
1394}
1395
1396/*
1397 * sysfs attributes.
1398 */
1399static const struct device_attribute sfe_exceptions_attr =
1400 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1401
1402/*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301403 * sfe_is_l2_feature_enabled()
1404 * Check if l2 features flag feature is enabled or not. (VLAN, PPPOE, BRIDGE and tunnels)
1405 *
1406 * 32bit read is atomic. No need of locks.
1407 */
1408bool sfe_is_l2_feature_enabled()
1409{
1410 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1411 return (sfe_ctx->l2_feature_support == 1);
1412}
1413EXPORT_SYMBOL(sfe_is_l2_feature_enabled);
1414
1415/*
1416 * sfe_get_l2_feature()
1417 * L2 feature is enabled/disabled
1418 */
1419ssize_t sfe_get_l2_feature(struct device *dev,
1420 struct device_attribute *attr,
1421 char *buf)
1422{
1423 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1424 ssize_t len;
1425
1426 spin_lock_bh(&sfe_ctx->lock);
1427 len = snprintf(buf, (ssize_t)(PAGE_SIZE), "L2 feature is %s\n", sfe_ctx->l2_feature_support ? "enabled" : "disabled");
1428 spin_unlock_bh(&sfe_ctx->lock);
1429 return len;
1430}
1431
1432/*
1433 * sfe_set_l2_feature()
1434 * Enable or disable l2 features flag.
1435 */
1436ssize_t sfe_set_l2_feature(struct device *dev, struct device_attribute *attr,
1437 const char *buf, size_t count)
1438{
1439 unsigned long val;
1440 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1441 int ret;
1442 ret = sscanf(buf, "%lu", &val);
1443
1444 if (ret != 1) {
1445 pr_err("Wrong input, %s\n", buf);
1446 return -EINVAL;
1447 }
1448
1449 if (val != 1 && val != 0) {
1450 pr_err("Input should be either 1 or 0, (%s)\n", buf);
1451 return -EINVAL;
1452 }
1453
1454 spin_lock_bh(&sfe_ctx->lock);
1455
1456 if (sfe_ctx->l2_feature_support && val) {
1457 spin_unlock_bh(&sfe_ctx->lock);
1458 pr_err("L2 feature is already enabled\n");
1459 return -EINVAL;
1460 }
1461
1462 if (!sfe_ctx->l2_feature_support && !val) {
1463 spin_unlock_bh(&sfe_ctx->lock);
1464 pr_err("L2 feature is already disabled\n");
1465 return -EINVAL;
1466 }
1467
1468 sfe_ctx->l2_feature_support = val;
1469 spin_unlock_bh(&sfe_ctx->lock);
1470
1471 return count;
1472}
1473
1474static const struct device_attribute sfe_l2_feature_attr =
1475 __ATTR(l2_feature, 0644, sfe_get_l2_feature, sfe_set_l2_feature);
1476
1477/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301478 * sfe_init_if()
1479 */
1480int sfe_init_if(void)
1481{
1482 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1483 int result = -1;
1484
1485 /*
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301486 * L2 feature is enabled by default
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301487 */
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301488 sfe_ctx->l2_feature_support = 1;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301489
1490 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301491 * Create sys/sfe
1492 */
1493 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1494 if (!sfe_ctx->sys_sfe) {
1495 DEBUG_ERROR("failed to register sfe\n");
1496 goto exit1;
1497 }
1498
1499 /*
1500 * Create sys/sfe/exceptions
1501 */
1502 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1503 if (result) {
1504 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1505 goto exit2;
1506 }
1507
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301508 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_l2_feature_attr.attr);
1509 if (result) {
1510 DEBUG_ERROR("failed to register L2 feature flag sysfs file: %d\n", result);
1511 goto exit2;
1512 }
1513
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301514 spin_lock_init(&sfe_ctx->lock);
1515
1516 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1517 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1518
1519 /*
1520 * Hook the receive path in the network stack.
1521 */
1522 BUG_ON(athrs_fast_nat_recv);
1523 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1524
1525 return 0;
1526exit2:
1527 kobject_put(sfe_ctx->sys_sfe);
1528exit1:
1529 return result;
1530}
1531
1532/*
1533 * sfe_exit_if()
1534 */
1535void sfe_exit_if(void)
1536{
1537 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1538
1539 /*
1540 * Unregister our receive callback.
1541 */
1542 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1543
1544 /*
1545 * Wait for all callbacks to complete.
1546 */
1547 rcu_barrier();
1548
1549 /*
1550 * Destroy all connections.
1551 */
1552 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1553 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1554
1555 /*
1556 * stop work queue, and flush all pending message in queue
1557 */
1558 cancel_work_sync(&sfe_ctx->work);
1559 sfe_process_response_msg(&sfe_ctx->work);
1560
1561 /*
1562 * Unregister our sync callback.
1563 */
1564 sfe_ipv4_notify_unregister();
1565 sfe_ipv6_notify_unregister();
1566
1567 kobject_put(sfe_ctx->sys_sfe);
1568
1569 return;
1570}