blob: 8c6b51ab98b877139b32699f298a1565c8cb8a49 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
7 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
28
29#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053030#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053031#include "sfe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053032
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053033extern int max_ipv4_conn;
34extern int max_ipv6_conn;
35
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053036#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053037#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
38#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
39#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
40
41typedef enum sfe_exception {
42 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
43 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
44 SFE_EXCEPTION_CONNECTION_INVALID,
45 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
46 SFE_EXCEPTION_TCP_INVALID,
47 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
48 SFE_EXCEPTION_SRC_DEV_NOT_L3,
49 SFE_EXCEPTION_DEST_DEV_NOT_L3,
50 SFE_EXCEPTION_CREATE_FAILED,
51 SFE_EXCEPTION_ENQUEUE_FAILED,
52 SFE_EXCEPTION_NOT_SUPPORT_6RD,
53 SFE_EXCEPTION_NO_SYNC_CB,
54 SFE_EXCEPTION_MAX
55} sfe_exception_t;
56
57static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
58 "IPV4_MSG_UNKNOW",
59 "IPV6_MSG_UNKNOW",
60 "CONNECTION_INVALID",
61 "NOT_SUPPORT_BRIDGE",
62 "TCP_INVALID",
63 "PROTOCOL_NOT_SUPPORT",
64 "SRC_DEV_NOT_L3",
65 "DEST_DEV_NOT_L3",
66 "CREATE_FAILED",
67 "ENQUEUE_FAILED",
68 "NOT_SUPPORT_6RD",
69 "NO_SYNC_CB"
70};
71
72/*
73 * Message type of queued response message
74 */
75typedef enum {
76 SFE_MSG_TYPE_IPV4,
77 SFE_MSG_TYPE_IPV6
78} sfe_msg_types_t;
79
80/*
81 * Queued response message,
82 * will be sent back to caller in workqueue
83 */
84struct sfe_response_msg {
85 struct list_head node;
86 sfe_msg_types_t type;
87 void *msg[0];
88};
89
90/*
91 * SFE context instance, private for SFE
92 */
93struct sfe_ctx_instance_internal {
94 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
95
96 /*
97 * Control state.
98 */
99 struct kobject *sys_sfe; /* Sysfs linkage */
100
101 struct list_head msg_queue; /* Response message queue*/
102 spinlock_t lock; /* Lock to protect message queue */
103
104 struct work_struct work; /* Work to send response message back to caller*/
105
106 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
107 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
108
109 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
110 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
111
112 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
113};
114
115static struct sfe_ctx_instance_internal __sfe_ctx;
116
117/*
118 * Convert public SFE context to internal context
119 */
120#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
121/*
122 * Convert internal SFE context to public context
123 */
124#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
125
126/*
127 * sfe_incr_exceptions()
128 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530129 *
130 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530131 */
132static inline void sfe_incr_exceptions(sfe_exception_t except)
133{
134 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
135
136 spin_lock_bh(&sfe_ctx->lock);
137 sfe_ctx->exceptions[except]++;
138 spin_unlock_bh(&sfe_ctx->lock);
139}
140
141/*
142 * sfe_dev_is_layer_3_interface()
143 * Check if a network device is ipv4 or ipv6 layer 3 interface
144 *
145 * @param dev network device to check
146 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
147 */
148inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
149{
150 struct in_device *in4_dev;
151 struct inet6_dev *in6_dev;
152
153 BUG_ON(!dev);
154
155 if (likely(check_v4)) {
156 /*
157 * Does our input device support IPv4 processing?
158 */
159 in4_dev = (struct in_device *)dev->ip_ptr;
160 if (unlikely(!in4_dev)) {
161 return false;
162 }
163
164 /*
165 * Does it have an IPv4 address? If it doesn't then we can't do anything
166 * interesting here!
167 */
168 if (unlikely(!in4_dev->ifa_list)) {
169 return false;
170 }
171
172 return true;
173 }
174
175 /*
176 * Does our input device support IPv6 processing?
177 */
178 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
179 if (unlikely(!in6_dev)) {
180 return false;
181 }
182
183 /*
184 * Does it have an IPv6 address? If it doesn't then we can't do anything
185 * interesting here!
186 */
187 if (unlikely(list_empty(&in6_dev->addr_list))) {
188 return false;
189 }
190
191 return true;
192}
193
194/*
195 * sfe_clean_response_msg_by_type()
196 * clean response message in queue when ECM exit
197 *
198 * @param sfe_ctx SFE context
199 * @param msg_type message type, ipv4 or ipv6
200 */
201static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
202{
203 struct sfe_response_msg *response, *tmp;
204
205 if (!sfe_ctx) {
206 return;
207 }
208
209 spin_lock_bh(&sfe_ctx->lock);
210 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
211 if (response->type == msg_type) {
212 list_del(&response->node);
213 /*
214 * Free response message
215 */
216 kfree(response);
217 }
218 }
219 spin_unlock_bh(&sfe_ctx->lock);
220
221}
222
223/*
224 * sfe_process_response_msg()
225 * Send all pending response message to ECM by calling callback function included in message
226 *
227 * @param work work structure
228 */
229static void sfe_process_response_msg(struct work_struct *work)
230{
231 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
232 struct sfe_response_msg *response;
233
234 spin_lock_bh(&sfe_ctx->lock);
235 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
236 list_del(&response->node);
237 spin_unlock_bh(&sfe_ctx->lock);
238 rcu_read_lock();
239
240 /*
241 * Send response message back to caller
242 */
243 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
244 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
245 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
246 if (callback) {
247 callback((void *)msg->cm.app_data, msg);
248 }
249 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
250 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
251 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
252 if (callback) {
253 callback((void *)msg->cm.app_data, msg);
254 }
255 }
256
257 rcu_read_unlock();
258 /*
259 * Free response message
260 */
261 kfree(response);
262 spin_lock_bh(&sfe_ctx->lock);
263 }
264 spin_unlock_bh(&sfe_ctx->lock);
265}
266
267/*
268 * sfe_alloc_response_msg()
269 * Alloc and construct new response message
270 *
271 * @param type message type
272 * @param msg used to construct response message if not NULL
273 *
274 * @return !NULL, success; NULL, failed
275 */
276static struct sfe_response_msg *
277sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
278{
279 struct sfe_response_msg *response;
280 int size;
281
282 switch (type) {
283 case SFE_MSG_TYPE_IPV4:
284 size = sizeof(struct sfe_ipv4_msg);
285 break;
286 case SFE_MSG_TYPE_IPV6:
287 size = sizeof(struct sfe_ipv6_msg);
288 break;
289 default:
290 DEBUG_ERROR("message type %d not supported\n", type);
291 return NULL;
292 }
293
294 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
295 if (!response) {
296 DEBUG_ERROR("allocate memory failed\n");
297 return NULL;
298 }
299
300 response->type = type;
301
302 if (msg) {
303 memcpy(response->msg, msg, size);
304 }
305
306 return response;
307}
308
309/*
310 * sfe_enqueue_msg()
311 * Queue response message
312 *
313 * @param sfe_ctx SFE context
314 * @param response response message to be queue
315 */
316static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
317{
318 spin_lock_bh(&sfe_ctx->lock);
319 list_add_tail(&response->node, &sfe_ctx->msg_queue);
320 spin_unlock_bh(&sfe_ctx->lock);
321
322 schedule_work(&sfe_ctx->work);
323}
324
325/*
326 * sfe_cmn_msg_init()
327 * Initialize the common message structure.
328 *
329 * @param ncm message to init
330 * @param if_num interface number related with this message
331 * @param type message type
332 * @param cb callback function to process repsonse of this message
333 * @param app_data argument for above callback function
334 */
335static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
336{
337 ncm->interface = if_num;
338 ncm->version = SFE_MESSAGE_VERSION;
339 ncm->type = type;
340 ncm->len = len;
341 ncm->cb = (sfe_ptr_t)cb;
342 ncm->app_data = (sfe_ptr_t)app_data;
343}
344
345/*
346 * sfe_ipv4_stats_sync_callback()
347 * Synchronize a connection's state.
348 *
349 * @param sis SFE statistics from SFE core engine
350 */
351static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
352{
353 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
354 struct sfe_ipv4_msg msg;
355 struct sfe_ipv4_conn_sync *sync_msg;
356 sfe_ipv4_msg_callback_t sync_cb;
357
358 rcu_read_lock();
359 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
360 if (!sync_cb) {
361 rcu_read_unlock();
362 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
363 return;
364 }
365
366 sync_msg = &msg.msg.conn_stats;
367
368 memset(&msg, 0, sizeof(msg));
369 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
370 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
371
372 /*
373 * Fill connection specific information
374 */
375 sync_msg->protocol = (u8)sis->protocol;
376 sync_msg->flow_ip = sis->src_ip.ip;
377 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
378 sync_msg->flow_ident = sis->src_port;
379 sync_msg->flow_ident_xlate = sis->src_port_xlate;
380
381 sync_msg->return_ip = sis->dest_ip.ip;
382 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
383 sync_msg->return_ident = sis->dest_port;
384 sync_msg->return_ident_xlate = sis->dest_port_xlate;
385
386 /*
387 * Fill TCP protocol specific information
388 */
389 if (sis->protocol == IPPROTO_TCP) {
390 sync_msg->flow_max_window = sis->src_td_max_window;
391 sync_msg->flow_end = sis->src_td_end;
392 sync_msg->flow_max_end = sis->src_td_max_end;
393
394 sync_msg->return_max_window = sis->dest_td_max_window;
395 sync_msg->return_end = sis->dest_td_end;
396 sync_msg->return_max_end = sis->dest_td_max_end;
397 }
398
399 /*
400 * Fill statistics information
401 */
402 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
403 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
404 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
405 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
406
407 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
408 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
409 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
410 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
411
412 /*
413 * Fill expiration time to extend, in unit of msec
414 */
415 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
416
417 /*
418 * Fill other information
419 */
420 switch (sis->reason) {
421 case SFE_SYNC_REASON_DESTROY:
422 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
423 break;
424 case SFE_SYNC_REASON_FLUSH:
425 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
426 break;
427 default:
428 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
429 break;
430 }
431
432 /*
433 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
434 */
435 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
436 rcu_read_unlock();
437}
438
439/*
440 * sfe_create_ipv4_rule_msg()
441 * Convert create message format from ecm to sfe
442 *
443 * @param sfe_ctx SFE context
444 * @param msg The IPv4 message
445 *
446 * @return sfe_tx_status_t The status of the Tx operation
447 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530448sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530449{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530450 struct net_device *src_dev = NULL;
451 struct net_device *dest_dev = NULL;
452 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530453 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530454
455 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
456 if (!response) {
457 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
458 return SFE_TX_FAILURE_QUEUE;
459 }
460
461 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
462 ret = SFE_CMN_RESPONSE_EMSG;
463 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
464 goto failed_ret;
465 }
466
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530467 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530468 case IPPROTO_TCP:
469 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
470 ret = SFE_CMN_RESPONSE_EMSG;
471 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
472 goto failed_ret;
473 }
474
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530475 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530476 break;
477
478 default:
479 ret = SFE_CMN_RESPONSE_EMSG;
480 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
481 goto failed_ret;
482 }
483
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530484 /*
485 * Not supporting bridged flows now
486 */
487 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
488 ret = SFE_CMN_RESPONSE_EINTERFACE;
489 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
490 goto failed_ret;
491 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530492
493 /*
494 * Does our input device support IP processing?
495 */
496 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
497 if (!src_dev || !sfe_dev_is_layer_3_interface(src_dev, true)) {
498 ret = SFE_CMN_RESPONSE_EINTERFACE;
499 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
500 goto failed_ret;
501 }
502
503 /*
504 * Does our output device support IP processing?
505 */
506 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
507 if (!dest_dev || !sfe_dev_is_layer_3_interface(dest_dev, true)) {
508 ret = SFE_CMN_RESPONSE_EINTERFACE;
509 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
510 goto failed_ret;
511 }
512
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530513 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
514 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530515 ret = SFE_CMN_RESPONSE_ACK;
516 } else {
517 /* Failed */
518 ret = SFE_CMN_RESPONSE_EMSG;
519 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
520 }
521
522 /*
523 * Fall through
524 */
525failed_ret:
526 if (src_dev) {
527 dev_put(src_dev);
528 }
529
530 if (dest_dev) {
531 dev_put(dest_dev);
532 }
533
534 /*
535 * Try to queue response message
536 */
537 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
538 sfe_enqueue_msg(sfe_ctx, response);
539
540 return SFE_TX_SUCCESS;
541}
542
543/*
544 * sfe_destroy_ipv4_rule_msg()
545 * Convert destroy message format from ecm to sfe
546 *
547 * @param sfe_ctx SFE context
548 * @param msg The IPv4 message
549 *
550 * @return sfe_tx_status_t The status of the Tx operation
551 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530552sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530553{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530554 struct sfe_response_msg *response;
555
556 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
557 if (!response) {
558 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
559 return SFE_TX_FAILURE_QUEUE;
560 }
561
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530562 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530563
564 /*
565 * Try to queue response message
566 */
567 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
568 sfe_enqueue_msg(sfe_ctx, response);
569
570 return SFE_TX_SUCCESS;
571}
572
573/*
574 * sfe_ipv4_tx()
575 * Transmit an IPv4 message to the sfe
576 *
577 * @param sfe_ctx SFE context
578 * @param msg The IPv4 message
579 *
580 * @return sfe_tx_status_t The status of the Tx operation
581 */
582sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
583{
584 switch (msg->cm.type) {
585 case SFE_TX_CREATE_RULE_MSG:
586 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
587 case SFE_TX_DESTROY_RULE_MSG:
588 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
589 default:
590 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
591 return SFE_TX_FAILURE_NOT_ENABLED;
592 }
593}
594EXPORT_SYMBOL(sfe_ipv4_tx);
595
596/*
597 * sfe_ipv4_msg_init()
598 * Initialize IPv4 message.
599 */
600void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
601 sfe_ipv4_msg_callback_t cb, void *app_data)
602{
603 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
604}
605EXPORT_SYMBOL(sfe_ipv4_msg_init);
606
607/*
608 * sfe_ipv4_max_conn_count()
609 * Return maximum number of entries SFE supported
610 */
611int sfe_ipv4_max_conn_count(void)
612{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530613 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530614}
615EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
616
617/*
618 * sfe_ipv4_notify_register()
619 * Register a notifier callback for IPv4 messages from SFE
620 *
621 * @param cb The callback pointer
622 * @param app_data The application context for this message
623 *
624 * @return struct sfe_ctx_instance * The SFE context
625 */
626struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
627{
628 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
629
630 spin_lock_bh(&sfe_ctx->lock);
631 /*
632 * Hook the shortcut sync callback.
633 */
634 if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
635 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
636 }
637
638 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
639 sfe_ctx->ipv4_stats_sync_data = app_data;
640
641 spin_unlock_bh(&sfe_ctx->lock);
642
643 return SFE_CTX_TO_PUBLIC(sfe_ctx);
644}
645EXPORT_SYMBOL(sfe_ipv4_notify_register);
646
647/*
648 * sfe_ipv4_notify_unregister()
649 * Un-Register a notifier callback for IPv4 messages from SFE
650 */
651void sfe_ipv4_notify_unregister(void)
652{
653 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
654
655 spin_lock_bh(&sfe_ctx->lock);
656 /*
657 * Unregister our sync callback.
658 */
659 if (sfe_ctx->ipv4_stats_sync_cb) {
660 sfe_ipv4_register_sync_rule_callback(NULL);
661 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
662 sfe_ctx->ipv4_stats_sync_data = NULL;
663 }
664 spin_unlock_bh(&sfe_ctx->lock);
665
666 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
667
668 return;
669}
670EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
671
672/*
673 * sfe_ipv6_stats_sync_callback()
674 * Synchronize a connection's state.
675 */
676static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
677{
678 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
679 struct sfe_ipv6_msg msg;
680 struct sfe_ipv6_conn_sync *sync_msg;
681 sfe_ipv6_msg_callback_t sync_cb;
682
683 rcu_read_lock();
684 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
685 if (!sync_cb) {
686 rcu_read_unlock();
687 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
688 return;
689 }
690
691 sync_msg = &msg.msg.conn_stats;
692
693 memset(&msg, 0, sizeof(msg));
694 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
695 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
696
697 /*
698 * Fill connection specific information
699 */
700 sync_msg->protocol = (u8)sis->protocol;
701 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
702 sync_msg->flow_ident = sis->src_port;
703
704 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
705 sync_msg->return_ident = sis->dest_port;
706
707 /*
708 * Fill TCP protocol specific information
709 */
710 if (sis->protocol == IPPROTO_TCP) {
711 sync_msg->flow_max_window = sis->src_td_max_window;
712 sync_msg->flow_end = sis->src_td_end;
713 sync_msg->flow_max_end = sis->src_td_max_end;
714
715 sync_msg->return_max_window = sis->dest_td_max_window;
716 sync_msg->return_end = sis->dest_td_end;
717 sync_msg->return_max_end = sis->dest_td_max_end;
718 }
719
720 /*
721 * Fill statistics information
722 */
723 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
724 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
725 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
726 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
727
728 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
729 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
730 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
731 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
732
733 /*
734 * Fill expiration time to extend, in unit of msec
735 */
736 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
737
738 /*
739 * Fill other information
740 */
741 switch (sis->reason) {
742 case SFE_SYNC_REASON_DESTROY:
743 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
744 break;
745 case SFE_SYNC_REASON_FLUSH:
746 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
747 break;
748 default:
749 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
750 break;
751 }
752
753 /*
754 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
755 */
756 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
757 rcu_read_unlock();
758}
759
760/*
761 * sfe_create_ipv6_rule_msg()
762 * convert create message format from ecm to sfe
763 *
764 * @param sfe_ctx SFE context
765 * @param msg The IPv6 message
766 *
767 * @return sfe_tx_status_t The status of the Tx operation
768 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530769sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530770{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530771 struct net_device *src_dev = NULL;
772 struct net_device *dest_dev = NULL;
773 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530774 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530775
776 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
777 if (!response) {
778 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
779 return SFE_TX_FAILURE_QUEUE;
780 }
781
782 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
783 ret = SFE_CMN_RESPONSE_EMSG;
784 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
785 goto failed_ret;
786 }
787
788 /*
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530789 * Not supporting bridged flows now
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530790 */
791 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
792 ret = SFE_CMN_RESPONSE_EINTERFACE;
793 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
794 goto failed_ret;
795 }
796
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530797 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530798
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530799 case IPPROTO_TCP:
800 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
801 ret = SFE_CMN_RESPONSE_EMSG;
802 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
803 goto failed_ret;
804 }
805
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530806 break;
807
808 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530809 break;
810
811 default:
812 ret = SFE_CMN_RESPONSE_EMSG;
813 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
814 goto failed_ret;
815 }
816
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530817 /*
818 * Does our input device support IP processing?
819 */
820 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
821 if (!src_dev || !sfe_dev_is_layer_3_interface(src_dev, false)) {
822 ret = SFE_CMN_RESPONSE_EINTERFACE;
823 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
824 goto failed_ret;
825 }
826
827 /*
828 * Does our output device support IP processing?
829 */
830 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
831 if (!dest_dev || !sfe_dev_is_layer_3_interface(dest_dev, false)) {
832 ret = SFE_CMN_RESPONSE_EINTERFACE;
833 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
834 goto failed_ret;
835 }
836
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530837 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
838 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530839 ret = SFE_CMN_RESPONSE_ACK;
840 } else {
841 /* Failed */
842 ret = SFE_CMN_RESPONSE_EMSG;
843 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
844 }
845
846 /*
847 * Fall through
848 */
849failed_ret:
850 if (src_dev) {
851 dev_put(src_dev);
852 }
853
854 if (dest_dev) {
855 dev_put(dest_dev);
856 }
857
858 /*
859 * Try to queue response message
860 */
861 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
862 sfe_enqueue_msg(sfe_ctx, response);
863
864 return SFE_TX_SUCCESS;
865}
866
867/*
868 * sfe_destroy_ipv6_rule_msg()
869 * Convert destroy message format from ecm to sfe
870 *
871 * @param sfe_ctx SFE context
872 * @param msg The IPv6 message
873 *
874 * @return sfe_tx_status_t The status of the Tx operation
875 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530876sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530877{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530878 struct sfe_response_msg *response;
879
880 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
881 if (!response) {
882 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
883 return SFE_TX_FAILURE_QUEUE;
884 }
885
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530886 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530887
888 /*
889 * Try to queue response message
890 */
891 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
892 sfe_enqueue_msg(sfe_ctx, response);
893
894 return SFE_TX_SUCCESS;
895}
896
897/*
898 * sfe_ipv6_tx()
899 * Transmit an IPv6 message to the sfe
900 *
901 * @param sfe_ctx SFE context
902 * @param msg The IPv6 message
903 *
904 * @return sfe_tx_status_t The status of the Tx operation
905 */
906sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
907{
908 switch (msg->cm.type) {
909 case SFE_TX_CREATE_RULE_MSG:
910 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
911 case SFE_TX_DESTROY_RULE_MSG:
912 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
913 default:
914 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
915 return SFE_TX_FAILURE_NOT_ENABLED;
916 }
917}
918EXPORT_SYMBOL(sfe_ipv6_tx);
919
920/*
921 * sfe_ipv6_msg_init()
922 * Initialize IPv6 message.
923 */
924void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
925 sfe_ipv6_msg_callback_t cb, void *app_data)
926{
927 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
928}
929EXPORT_SYMBOL(sfe_ipv6_msg_init);
930
931/*
932 * sfe_ipv6_max_conn_count()
933 * Return maximum number of entries SFE supported
934 */
935int sfe_ipv6_max_conn_count(void)
936{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530937 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530938}
939EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
940
941/*
942 * sfe_ipv6_notify_register()
943 * Register a notifier callback for IPv6 messages from SFE
944 *
945 * @param cb The callback pointer
946 * @param app_data The application context for this message
947 *
948 * @return struct sfe_ctx_instance * The SFE context
949 */
950struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
951{
952 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
953
954 spin_lock_bh(&sfe_ctx->lock);
955 /*
956 * Hook the shortcut sync callback.
957 */
958 if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
959 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
960 }
961
962 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
963 sfe_ctx->ipv6_stats_sync_data = app_data;
964
965 spin_unlock_bh(&sfe_ctx->lock);
966
967 return SFE_CTX_TO_PUBLIC(sfe_ctx);
968}
969EXPORT_SYMBOL(sfe_ipv6_notify_register);
970
971/*
972 * sfe_ipv6_notify_unregister()
973 * Un-Register a notifier callback for IPv6 messages from SFE
974 */
975void sfe_ipv6_notify_unregister(void)
976{
977 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
978
979 spin_lock_bh(&sfe_ctx->lock);
980 /*
981 * Unregister our sync callback.
982 */
983 if (sfe_ctx->ipv6_stats_sync_cb) {
984 sfe_ipv6_register_sync_rule_callback(NULL);
985 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
986 sfe_ctx->ipv6_stats_sync_data = NULL;
987 }
988 spin_unlock_bh(&sfe_ctx->lock);
989
990 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
991
992 return;
993}
994EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
995
996/*
997 * sfe_tun6rd_tx()
998 * Transmit a tun6rd message to sfe engine
999 */
1000sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1001{
1002 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1003 return SFE_TX_FAILURE_NOT_ENABLED;
1004}
1005EXPORT_SYMBOL(sfe_tun6rd_tx);
1006
1007/*
1008 * sfe_tun6rd_msg_init()
1009 * Initialize sfe_tun6rd msg.
1010 */
1011void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1012{
1013 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1014}
1015EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1016
1017/*
1018 * sfe_recv()
1019 * Handle packet receives.
1020 *
1021 * Returns 1 if the packet is forwarded or 0 if it isn't.
1022 */
1023int sfe_recv(struct sk_buff *skb)
1024{
1025 struct net_device *dev;
1026
1027 /*
1028 * We know that for the vast majority of packets we need the transport
1029 * layer header so we may as well start to fetch it now!
1030 */
1031 prefetch(skb->data + 32);
1032 barrier();
1033
1034 dev = skb->dev;
1035
1036#ifdef CONFIG_NET_CLS_ACT
1037 /*
1038 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1039 * We can not accelerate this packet.
1040 */
1041#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1042 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1043 return 0;
1044 }
1045#else
1046 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1047 return 0;
1048 }
1049#endif
1050#endif
1051
1052 /*
1053 * We're only interested in IPv4 and IPv6 packets.
1054 */
1055 if (likely(htons(ETH_P_IP) == skb->protocol)) {
1056 if (sfe_dev_is_layer_3_interface(dev, true)) {
1057 return sfe_ipv4_recv(dev, skb);
1058 } else {
1059 DEBUG_TRACE("no IPv4 address for device: %s\n", dev->name);
1060 return 0;
1061 }
1062 }
1063
1064 if (likely(htons(ETH_P_IPV6) == skb->protocol)) {
1065 if (sfe_dev_is_layer_3_interface(dev, false)) {
1066 return sfe_ipv6_recv(dev, skb);
1067 } else {
1068 DEBUG_TRACE("no IPv6 address for device: %s\n", dev->name);
1069 return 0;
1070 }
1071 }
1072
1073 DEBUG_TRACE("not IP packet\n");
1074 return 0;
1075}
1076
1077/*
1078 * sfe_get_exceptions()
1079 * Dump exception counters
1080 */
1081static ssize_t sfe_get_exceptions(struct device *dev,
1082 struct device_attribute *attr,
1083 char *buf)
1084{
1085 int idx, len;
1086 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1087
1088 spin_lock_bh(&sfe_ctx->lock);
1089 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1090 if (sfe_ctx->exceptions[idx]) {
1091 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1092 }
1093 }
1094 spin_unlock_bh(&sfe_ctx->lock);
1095
1096 return len;
1097}
1098
1099/*
1100 * sysfs attributes.
1101 */
1102static const struct device_attribute sfe_exceptions_attr =
1103 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1104
1105/*
1106 * sfe_init_if()
1107 */
1108int sfe_init_if(void)
1109{
1110 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1111 int result = -1;
1112
1113 /*
1114 * Create sys/sfe
1115 */
1116 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1117 if (!sfe_ctx->sys_sfe) {
1118 DEBUG_ERROR("failed to register sfe\n");
1119 goto exit1;
1120 }
1121
1122 /*
1123 * Create sys/sfe/exceptions
1124 */
1125 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1126 if (result) {
1127 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1128 goto exit2;
1129 }
1130
1131 spin_lock_init(&sfe_ctx->lock);
1132
1133 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1134 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1135
1136 /*
1137 * Hook the receive path in the network stack.
1138 */
1139 BUG_ON(athrs_fast_nat_recv);
1140 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1141
1142 return 0;
1143exit2:
1144 kobject_put(sfe_ctx->sys_sfe);
1145exit1:
1146 return result;
1147}
1148
1149/*
1150 * sfe_exit_if()
1151 */
1152void sfe_exit_if(void)
1153{
1154 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1155
1156 /*
1157 * Unregister our receive callback.
1158 */
1159 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1160
1161 /*
1162 * Wait for all callbacks to complete.
1163 */
1164 rcu_barrier();
1165
1166 /*
1167 * Destroy all connections.
1168 */
1169 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1170 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1171
1172 /*
1173 * stop work queue, and flush all pending message in queue
1174 */
1175 cancel_work_sync(&sfe_ctx->work);
1176 sfe_process_response_msg(&sfe_ctx->work);
1177
1178 /*
1179 * Unregister our sync callback.
1180 */
1181 sfe_ipv4_notify_unregister();
1182 sfe_ipv6_notify_unregister();
1183
1184 kobject_put(sfe_ctx->sys_sfe);
1185
1186 return;
1187}