blob: 5c3011271e024e2d68e782d96d9d71024e2eaee3 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
7 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
28
29#include "sfe_debug.h"
30#include "sfe.h"
31#include "sfe_api.h"
32
33#define SFE_MESSAGE_VERSION 0x1
34#define SFE_MAX_CONNECTION_NUM 65535
35#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
36#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
37#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
38
39typedef enum sfe_exception {
40 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
41 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
42 SFE_EXCEPTION_CONNECTION_INVALID,
43 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
44 SFE_EXCEPTION_TCP_INVALID,
45 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
46 SFE_EXCEPTION_SRC_DEV_NOT_L3,
47 SFE_EXCEPTION_DEST_DEV_NOT_L3,
48 SFE_EXCEPTION_CREATE_FAILED,
49 SFE_EXCEPTION_ENQUEUE_FAILED,
50 SFE_EXCEPTION_NOT_SUPPORT_6RD,
51 SFE_EXCEPTION_NO_SYNC_CB,
52 SFE_EXCEPTION_MAX
53} sfe_exception_t;
54
55static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
56 "IPV4_MSG_UNKNOW",
57 "IPV6_MSG_UNKNOW",
58 "CONNECTION_INVALID",
59 "NOT_SUPPORT_BRIDGE",
60 "TCP_INVALID",
61 "PROTOCOL_NOT_SUPPORT",
62 "SRC_DEV_NOT_L3",
63 "DEST_DEV_NOT_L3",
64 "CREATE_FAILED",
65 "ENQUEUE_FAILED",
66 "NOT_SUPPORT_6RD",
67 "NO_SYNC_CB"
68};
69
70/*
71 * Message type of queued response message
72 */
73typedef enum {
74 SFE_MSG_TYPE_IPV4,
75 SFE_MSG_TYPE_IPV6
76} sfe_msg_types_t;
77
78/*
79 * Queued response message,
80 * will be sent back to caller in workqueue
81 */
82struct sfe_response_msg {
83 struct list_head node;
84 sfe_msg_types_t type;
85 void *msg[0];
86};
87
88/*
89 * SFE context instance, private for SFE
90 */
91struct sfe_ctx_instance_internal {
92 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
93
94 /*
95 * Control state.
96 */
97 struct kobject *sys_sfe; /* Sysfs linkage */
98
99 struct list_head msg_queue; /* Response message queue*/
100 spinlock_t lock; /* Lock to protect message queue */
101
102 struct work_struct work; /* Work to send response message back to caller*/
103
104 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
105 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
106
107 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
108 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
109
110 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
111};
112
113static struct sfe_ctx_instance_internal __sfe_ctx;
114
115/*
116 * Convert public SFE context to internal context
117 */
118#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
119/*
120 * Convert internal SFE context to public context
121 */
122#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
123
124/*
125 * sfe_incr_exceptions()
126 * Increase an exception counter.
127 */
128static inline void sfe_incr_exceptions(sfe_exception_t except)
129{
130 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
131
132 spin_lock_bh(&sfe_ctx->lock);
133 sfe_ctx->exceptions[except]++;
134 spin_unlock_bh(&sfe_ctx->lock);
135}
136
137/*
138 * sfe_dev_is_layer_3_interface()
139 * Check if a network device is ipv4 or ipv6 layer 3 interface
140 *
141 * @param dev network device to check
142 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
143 */
144inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
145{
146 struct in_device *in4_dev;
147 struct inet6_dev *in6_dev;
148
149 BUG_ON(!dev);
150
151 if (likely(check_v4)) {
152 /*
153 * Does our input device support IPv4 processing?
154 */
155 in4_dev = (struct in_device *)dev->ip_ptr;
156 if (unlikely(!in4_dev)) {
157 return false;
158 }
159
160 /*
161 * Does it have an IPv4 address? If it doesn't then we can't do anything
162 * interesting here!
163 */
164 if (unlikely(!in4_dev->ifa_list)) {
165 return false;
166 }
167
168 return true;
169 }
170
171 /*
172 * Does our input device support IPv6 processing?
173 */
174 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
175 if (unlikely(!in6_dev)) {
176 return false;
177 }
178
179 /*
180 * Does it have an IPv6 address? If it doesn't then we can't do anything
181 * interesting here!
182 */
183 if (unlikely(list_empty(&in6_dev->addr_list))) {
184 return false;
185 }
186
187 return true;
188}
189
190/*
191 * sfe_clean_response_msg_by_type()
192 * clean response message in queue when ECM exit
193 *
194 * @param sfe_ctx SFE context
195 * @param msg_type message type, ipv4 or ipv6
196 */
197static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
198{
199 struct sfe_response_msg *response, *tmp;
200
201 if (!sfe_ctx) {
202 return;
203 }
204
205 spin_lock_bh(&sfe_ctx->lock);
206 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
207 if (response->type == msg_type) {
208 list_del(&response->node);
209 /*
210 * Free response message
211 */
212 kfree(response);
213 }
214 }
215 spin_unlock_bh(&sfe_ctx->lock);
216
217}
218
219/*
220 * sfe_process_response_msg()
221 * Send all pending response message to ECM by calling callback function included in message
222 *
223 * @param work work structure
224 */
225static void sfe_process_response_msg(struct work_struct *work)
226{
227 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
228 struct sfe_response_msg *response;
229
230 spin_lock_bh(&sfe_ctx->lock);
231 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
232 list_del(&response->node);
233 spin_unlock_bh(&sfe_ctx->lock);
234 rcu_read_lock();
235
236 /*
237 * Send response message back to caller
238 */
239 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
240 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
241 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
242 if (callback) {
243 callback((void *)msg->cm.app_data, msg);
244 }
245 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
246 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
247 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
248 if (callback) {
249 callback((void *)msg->cm.app_data, msg);
250 }
251 }
252
253 rcu_read_unlock();
254 /*
255 * Free response message
256 */
257 kfree(response);
258 spin_lock_bh(&sfe_ctx->lock);
259 }
260 spin_unlock_bh(&sfe_ctx->lock);
261}
262
263/*
264 * sfe_alloc_response_msg()
265 * Alloc and construct new response message
266 *
267 * @param type message type
268 * @param msg used to construct response message if not NULL
269 *
270 * @return !NULL, success; NULL, failed
271 */
272static struct sfe_response_msg *
273sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
274{
275 struct sfe_response_msg *response;
276 int size;
277
278 switch (type) {
279 case SFE_MSG_TYPE_IPV4:
280 size = sizeof(struct sfe_ipv4_msg);
281 break;
282 case SFE_MSG_TYPE_IPV6:
283 size = sizeof(struct sfe_ipv6_msg);
284 break;
285 default:
286 DEBUG_ERROR("message type %d not supported\n", type);
287 return NULL;
288 }
289
290 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
291 if (!response) {
292 DEBUG_ERROR("allocate memory failed\n");
293 return NULL;
294 }
295
296 response->type = type;
297
298 if (msg) {
299 memcpy(response->msg, msg, size);
300 }
301
302 return response;
303}
304
305/*
306 * sfe_enqueue_msg()
307 * Queue response message
308 *
309 * @param sfe_ctx SFE context
310 * @param response response message to be queue
311 */
312static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
313{
314 spin_lock_bh(&sfe_ctx->lock);
315 list_add_tail(&response->node, &sfe_ctx->msg_queue);
316 spin_unlock_bh(&sfe_ctx->lock);
317
318 schedule_work(&sfe_ctx->work);
319}
320
321/*
322 * sfe_cmn_msg_init()
323 * Initialize the common message structure.
324 *
325 * @param ncm message to init
326 * @param if_num interface number related with this message
327 * @param type message type
328 * @param cb callback function to process repsonse of this message
329 * @param app_data argument for above callback function
330 */
331static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
332{
333 ncm->interface = if_num;
334 ncm->version = SFE_MESSAGE_VERSION;
335 ncm->type = type;
336 ncm->len = len;
337 ncm->cb = (sfe_ptr_t)cb;
338 ncm->app_data = (sfe_ptr_t)app_data;
339}
340
341/*
342 * sfe_ipv4_stats_sync_callback()
343 * Synchronize a connection's state.
344 *
345 * @param sis SFE statistics from SFE core engine
346 */
347static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
348{
349 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
350 struct sfe_ipv4_msg msg;
351 struct sfe_ipv4_conn_sync *sync_msg;
352 sfe_ipv4_msg_callback_t sync_cb;
353
354 rcu_read_lock();
355 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
356 if (!sync_cb) {
357 rcu_read_unlock();
358 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
359 return;
360 }
361
362 sync_msg = &msg.msg.conn_stats;
363
364 memset(&msg, 0, sizeof(msg));
365 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
366 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
367
368 /*
369 * Fill connection specific information
370 */
371 sync_msg->protocol = (u8)sis->protocol;
372 sync_msg->flow_ip = sis->src_ip.ip;
373 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
374 sync_msg->flow_ident = sis->src_port;
375 sync_msg->flow_ident_xlate = sis->src_port_xlate;
376
377 sync_msg->return_ip = sis->dest_ip.ip;
378 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
379 sync_msg->return_ident = sis->dest_port;
380 sync_msg->return_ident_xlate = sis->dest_port_xlate;
381
382 /*
383 * Fill TCP protocol specific information
384 */
385 if (sis->protocol == IPPROTO_TCP) {
386 sync_msg->flow_max_window = sis->src_td_max_window;
387 sync_msg->flow_end = sis->src_td_end;
388 sync_msg->flow_max_end = sis->src_td_max_end;
389
390 sync_msg->return_max_window = sis->dest_td_max_window;
391 sync_msg->return_end = sis->dest_td_end;
392 sync_msg->return_max_end = sis->dest_td_max_end;
393 }
394
395 /*
396 * Fill statistics information
397 */
398 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
399 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
400 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
401 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
402
403 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
404 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
405 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
406 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
407
408 /*
409 * Fill expiration time to extend, in unit of msec
410 */
411 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
412
413 /*
414 * Fill other information
415 */
416 switch (sis->reason) {
417 case SFE_SYNC_REASON_DESTROY:
418 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
419 break;
420 case SFE_SYNC_REASON_FLUSH:
421 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
422 break;
423 default:
424 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
425 break;
426 }
427
428 /*
429 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
430 */
431 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
432 rcu_read_unlock();
433}
434
435/*
436 * sfe_create_ipv4_rule_msg()
437 * Convert create message format from ecm to sfe
438 *
439 * @param sfe_ctx SFE context
440 * @param msg The IPv4 message
441 *
442 * @return sfe_tx_status_t The status of the Tx operation
443 */
444static sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
445{
446 struct sfe_connection_create sic;
447 struct net_device *src_dev = NULL;
448 struct net_device *dest_dev = NULL;
449 struct sfe_response_msg *response;
450 enum sfe_cmn_response ret;
451
452 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
453 if (!response) {
454 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
455 return SFE_TX_FAILURE_QUEUE;
456 }
457
458 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
459 ret = SFE_CMN_RESPONSE_EMSG;
460 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
461 goto failed_ret;
462 }
463
464 /*
465 * Not support bridged flows now
466 */
467 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
468 ret = SFE_CMN_RESPONSE_EINTERFACE;
469 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
470 goto failed_ret;
471 }
472
473 sic.protocol = msg->msg.rule_create.tuple.protocol;
474 sic.src_ip.ip = msg->msg.rule_create.tuple.flow_ip;
475 sic.dest_ip.ip = msg->msg.rule_create.tuple.return_ip;
476 sic.src_ip_xlate.ip = msg->msg.rule_create.conn_rule.flow_ip_xlate;
477 sic.dest_ip_xlate.ip = msg->msg.rule_create.conn_rule.return_ip_xlate;
478
479 sic.flags = 0;
480 switch (sic.protocol) {
481 case IPPROTO_TCP:
482 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
483 ret = SFE_CMN_RESPONSE_EMSG;
484 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
485 goto failed_ret;
486 }
487
488 sic.src_port = msg->msg.rule_create.tuple.flow_ident;
489 sic.dest_port = msg->msg.rule_create.tuple.return_ident;
490 sic.src_port_xlate = msg->msg.rule_create.conn_rule.flow_ident_xlate;
491 sic.dest_port_xlate = msg->msg.rule_create.conn_rule.return_ident_xlate;
492 sic.src_td_window_scale = msg->msg.rule_create.tcp_rule.flow_window_scale;
493 sic.src_td_max_window = msg->msg.rule_create.tcp_rule.flow_max_window;
494 sic.src_td_end = msg->msg.rule_create.tcp_rule.flow_end;
495 sic.src_td_max_end = msg->msg.rule_create.tcp_rule.flow_max_end;
496 sic.dest_td_window_scale = msg->msg.rule_create.tcp_rule.return_window_scale;
497 sic.dest_td_max_window = msg->msg.rule_create.tcp_rule.return_max_window;
498 sic.dest_td_end = msg->msg.rule_create.tcp_rule.return_end;
499 sic.dest_td_max_end = msg->msg.rule_create.tcp_rule.return_max_end;
500 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
501 sic.flags |= SFE_CREATE_FLAG_NO_SEQ_CHECK;
502 }
503 break;
504
505 case IPPROTO_UDP:
506 sic.src_port = msg->msg.rule_create.tuple.flow_ident;
507 sic.dest_port = msg->msg.rule_create.tuple.return_ident;
508 sic.src_port_xlate = msg->msg.rule_create.conn_rule.flow_ident_xlate;
509 sic.dest_port_xlate = msg->msg.rule_create.conn_rule.return_ident_xlate;
510 break;
511
512 default:
513 ret = SFE_CMN_RESPONSE_EMSG;
514 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
515 goto failed_ret;
516 }
517
518 memcpy(sic.src_mac, msg->msg.rule_create.conn_rule.flow_mac, ETH_ALEN);
519 memset(sic.src_mac_xlate, 0, ETH_ALEN);
520 memset(sic.dest_mac, 0, ETH_ALEN);
521 memcpy(sic.dest_mac_xlate, msg->msg.rule_create.conn_rule.return_mac, ETH_ALEN);
522
523 /*
524 * Does our input device support IP processing?
525 */
526 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
527 if (!src_dev || !sfe_dev_is_layer_3_interface(src_dev, true)) {
528 ret = SFE_CMN_RESPONSE_EINTERFACE;
529 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
530 goto failed_ret;
531 }
532
533 /*
534 * Does our output device support IP processing?
535 */
536 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
537 if (!dest_dev || !sfe_dev_is_layer_3_interface(dest_dev, true)) {
538 ret = SFE_CMN_RESPONSE_EINTERFACE;
539 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
540 goto failed_ret;
541 }
542
543 sic.src_dev = src_dev;
544 sic.dest_dev = dest_dev;
545
546 sic.src_mtu = msg->msg.rule_create.conn_rule.flow_mtu;
547 sic.dest_mtu = msg->msg.rule_create.conn_rule.return_mtu;
548
549 if (msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_QOS_VALID) {
550 sic.src_priority = msg->msg.rule_create.qos_rule.flow_qos_tag;
551 sic.dest_priority = msg->msg.rule_create.qos_rule.return_qos_tag;
552 sic.flags |= SFE_CREATE_FLAG_REMARK_PRIORITY;
553 }
554
555 if (msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
556 sic.src_dscp = msg->msg.rule_create.dscp_rule.flow_dscp;
557 sic.dest_dscp = msg->msg.rule_create.dscp_rule.return_dscp;
558 sic.flags |= SFE_CREATE_FLAG_REMARK_DSCP;
559 }
560
561#ifdef CONFIG_XFRM
562 if (msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
563 sic.original_accel = msg->msg.rule_create.direction_rule.flow_accel;
564 sic.reply_accel = msg->msg.rule_create.direction_rule.return_accel;
565 } else {
566 sic.original_accel = sic.reply_accel = 1;
567 }
568#endif
569
570 if (!sfe_ipv4_create_rule(&sic)) {
571 /* Success */
572 ret = SFE_CMN_RESPONSE_ACK;
573 } else {
574 /* Failed */
575 ret = SFE_CMN_RESPONSE_EMSG;
576 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
577 }
578
579 /*
580 * Fall through
581 */
582failed_ret:
583 if (src_dev) {
584 dev_put(src_dev);
585 }
586
587 if (dest_dev) {
588 dev_put(dest_dev);
589 }
590
591 /*
592 * Try to queue response message
593 */
594 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
595 sfe_enqueue_msg(sfe_ctx, response);
596
597 return SFE_TX_SUCCESS;
598}
599
600/*
601 * sfe_destroy_ipv4_rule_msg()
602 * Convert destroy message format from ecm to sfe
603 *
604 * @param sfe_ctx SFE context
605 * @param msg The IPv4 message
606 *
607 * @return sfe_tx_status_t The status of the Tx operation
608 */
609static sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
610{
611 struct sfe_connection_destroy sid;
612 struct sfe_response_msg *response;
613
614 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
615 if (!response) {
616 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
617 return SFE_TX_FAILURE_QUEUE;
618 }
619
620 sid.protocol = msg->msg.rule_destroy.tuple.protocol;
621 sid.src_ip.ip = msg->msg.rule_destroy.tuple.flow_ip;
622 sid.dest_ip.ip = msg->msg.rule_destroy.tuple.return_ip;
623 sid.src_port = msg->msg.rule_destroy.tuple.flow_ident;
624 sid.dest_port = msg->msg.rule_destroy.tuple.return_ident;
625
626 sfe_ipv4_destroy_rule(&sid);
627
628 /*
629 * Try to queue response message
630 */
631 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
632 sfe_enqueue_msg(sfe_ctx, response);
633
634 return SFE_TX_SUCCESS;
635}
636
637/*
638 * sfe_ipv4_tx()
639 * Transmit an IPv4 message to the sfe
640 *
641 * @param sfe_ctx SFE context
642 * @param msg The IPv4 message
643 *
644 * @return sfe_tx_status_t The status of the Tx operation
645 */
646sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
647{
648 switch (msg->cm.type) {
649 case SFE_TX_CREATE_RULE_MSG:
650 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
651 case SFE_TX_DESTROY_RULE_MSG:
652 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
653 default:
654 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
655 return SFE_TX_FAILURE_NOT_ENABLED;
656 }
657}
658EXPORT_SYMBOL(sfe_ipv4_tx);
659
660/*
661 * sfe_ipv4_msg_init()
662 * Initialize IPv4 message.
663 */
664void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
665 sfe_ipv4_msg_callback_t cb, void *app_data)
666{
667 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
668}
669EXPORT_SYMBOL(sfe_ipv4_msg_init);
670
671/*
672 * sfe_ipv4_max_conn_count()
673 * Return maximum number of entries SFE supported
674 */
675int sfe_ipv4_max_conn_count(void)
676{
677 return SFE_MAX_CONNECTION_NUM;
678}
679EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
680
681/*
682 * sfe_ipv4_notify_register()
683 * Register a notifier callback for IPv4 messages from SFE
684 *
685 * @param cb The callback pointer
686 * @param app_data The application context for this message
687 *
688 * @return struct sfe_ctx_instance * The SFE context
689 */
690struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
691{
692 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
693
694 spin_lock_bh(&sfe_ctx->lock);
695 /*
696 * Hook the shortcut sync callback.
697 */
698 if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
699 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
700 }
701
702 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
703 sfe_ctx->ipv4_stats_sync_data = app_data;
704
705 spin_unlock_bh(&sfe_ctx->lock);
706
707 return SFE_CTX_TO_PUBLIC(sfe_ctx);
708}
709EXPORT_SYMBOL(sfe_ipv4_notify_register);
710
711/*
712 * sfe_ipv4_notify_unregister()
713 * Un-Register a notifier callback for IPv4 messages from SFE
714 */
715void sfe_ipv4_notify_unregister(void)
716{
717 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
718
719 spin_lock_bh(&sfe_ctx->lock);
720 /*
721 * Unregister our sync callback.
722 */
723 if (sfe_ctx->ipv4_stats_sync_cb) {
724 sfe_ipv4_register_sync_rule_callback(NULL);
725 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
726 sfe_ctx->ipv4_stats_sync_data = NULL;
727 }
728 spin_unlock_bh(&sfe_ctx->lock);
729
730 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
731
732 return;
733}
734EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
735
736/*
737 * sfe_ipv6_stats_sync_callback()
738 * Synchronize a connection's state.
739 */
740static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
741{
742 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
743 struct sfe_ipv6_msg msg;
744 struct sfe_ipv6_conn_sync *sync_msg;
745 sfe_ipv6_msg_callback_t sync_cb;
746
747 rcu_read_lock();
748 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
749 if (!sync_cb) {
750 rcu_read_unlock();
751 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
752 return;
753 }
754
755 sync_msg = &msg.msg.conn_stats;
756
757 memset(&msg, 0, sizeof(msg));
758 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
759 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
760
761 /*
762 * Fill connection specific information
763 */
764 sync_msg->protocol = (u8)sis->protocol;
765 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
766 sync_msg->flow_ident = sis->src_port;
767
768 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
769 sync_msg->return_ident = sis->dest_port;
770
771 /*
772 * Fill TCP protocol specific information
773 */
774 if (sis->protocol == IPPROTO_TCP) {
775 sync_msg->flow_max_window = sis->src_td_max_window;
776 sync_msg->flow_end = sis->src_td_end;
777 sync_msg->flow_max_end = sis->src_td_max_end;
778
779 sync_msg->return_max_window = sis->dest_td_max_window;
780 sync_msg->return_end = sis->dest_td_end;
781 sync_msg->return_max_end = sis->dest_td_max_end;
782 }
783
784 /*
785 * Fill statistics information
786 */
787 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
788 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
789 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
790 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
791
792 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
793 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
794 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
795 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
796
797 /*
798 * Fill expiration time to extend, in unit of msec
799 */
800 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
801
802 /*
803 * Fill other information
804 */
805 switch (sis->reason) {
806 case SFE_SYNC_REASON_DESTROY:
807 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
808 break;
809 case SFE_SYNC_REASON_FLUSH:
810 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
811 break;
812 default:
813 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
814 break;
815 }
816
817 /*
818 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
819 */
820 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
821 rcu_read_unlock();
822}
823
824/*
825 * sfe_create_ipv6_rule_msg()
826 * convert create message format from ecm to sfe
827 *
828 * @param sfe_ctx SFE context
829 * @param msg The IPv6 message
830 *
831 * @return sfe_tx_status_t The status of the Tx operation
832 */
833static sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
834{
835 struct sfe_connection_create sic;
836 struct net_device *src_dev = NULL;
837 struct net_device *dest_dev = NULL;
838 struct sfe_response_msg *response;
839 enum sfe_cmn_response ret;
840
841 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
842 if (!response) {
843 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
844 return SFE_TX_FAILURE_QUEUE;
845 }
846
847 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
848 ret = SFE_CMN_RESPONSE_EMSG;
849 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
850 goto failed_ret;
851 }
852
853 /*
854 * Not support bridged flows now
855 */
856 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
857 ret = SFE_CMN_RESPONSE_EINTERFACE;
858 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
859 goto failed_ret;
860 }
861
862 sic.protocol = msg->msg.rule_create.tuple.protocol;
863 sfe_ipv6_addr_copy(msg->msg.rule_create.tuple.flow_ip, sic.src_ip.ip6);
864 sfe_ipv6_addr_copy(msg->msg.rule_create.tuple.return_ip, sic.dest_ip.ip6);
865 sfe_ipv6_addr_copy(msg->msg.rule_create.tuple.flow_ip, sic.src_ip_xlate.ip6);
866 sfe_ipv6_addr_copy(msg->msg.rule_create.tuple.return_ip, sic.dest_ip_xlate.ip6);
867
868 sic.flags = 0;
869 switch (sic.protocol) {
870 case IPPROTO_TCP:
871 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
872 ret = SFE_CMN_RESPONSE_EMSG;
873 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
874 goto failed_ret;
875 }
876
877 sic.src_port = msg->msg.rule_create.tuple.flow_ident;
878 sic.dest_port = msg->msg.rule_create.tuple.return_ident;
879 sic.src_port_xlate = msg->msg.rule_create.tuple.flow_ident;
880 sic.dest_port_xlate = msg->msg.rule_create.tuple.return_ident;
881 sic.src_td_window_scale = msg->msg.rule_create.tcp_rule.flow_window_scale;
882 sic.src_td_max_window = msg->msg.rule_create.tcp_rule.flow_max_window;
883 sic.src_td_end = msg->msg.rule_create.tcp_rule.flow_end;
884 sic.src_td_max_end = msg->msg.rule_create.tcp_rule.flow_max_end;
885 sic.dest_td_window_scale = msg->msg.rule_create.tcp_rule.return_window_scale;
886 sic.dest_td_max_window = msg->msg.rule_create.tcp_rule.return_max_window;
887 sic.dest_td_end = msg->msg.rule_create.tcp_rule.return_end;
888 sic.dest_td_max_end = msg->msg.rule_create.tcp_rule.return_max_end;
889 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
890 sic.flags |= SFE_CREATE_FLAG_NO_SEQ_CHECK;
891 }
892 break;
893
894 case IPPROTO_UDP:
895 sic.src_port = msg->msg.rule_create.tuple.flow_ident;
896 sic.dest_port = msg->msg.rule_create.tuple.return_ident;
897 sic.src_port_xlate = msg->msg.rule_create.tuple.flow_ident;
898 sic.dest_port_xlate = msg->msg.rule_create.tuple.return_ident;
899 break;
900
901 default:
902 ret = SFE_CMN_RESPONSE_EMSG;
903 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
904 goto failed_ret;
905 }
906
907 memcpy(sic.src_mac, msg->msg.rule_create.conn_rule.flow_mac, ETH_ALEN);
908 memset(sic.src_mac_xlate, 0, ETH_ALEN);
909 memset(sic.dest_mac, 0, ETH_ALEN);
910 memcpy(sic.dest_mac_xlate, msg->msg.rule_create.conn_rule.return_mac, ETH_ALEN);
911 /*
912 * Does our input device support IP processing?
913 */
914 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
915 if (!src_dev || !sfe_dev_is_layer_3_interface(src_dev, false)) {
916 ret = SFE_CMN_RESPONSE_EINTERFACE;
917 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
918 goto failed_ret;
919 }
920
921 /*
922 * Does our output device support IP processing?
923 */
924 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
925 if (!dest_dev || !sfe_dev_is_layer_3_interface(dest_dev, false)) {
926 ret = SFE_CMN_RESPONSE_EINTERFACE;
927 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
928 goto failed_ret;
929 }
930
931 sic.src_dev = src_dev;
932 sic.dest_dev = dest_dev;
933
934 sic.src_mtu = msg->msg.rule_create.conn_rule.flow_mtu;
935 sic.dest_mtu = msg->msg.rule_create.conn_rule.return_mtu;
936
937 if (msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_QOS_VALID) {
938 sic.src_priority = msg->msg.rule_create.qos_rule.flow_qos_tag;
939 sic.dest_priority = msg->msg.rule_create.qos_rule.return_qos_tag;
940 sic.flags |= SFE_CREATE_FLAG_REMARK_PRIORITY;
941 }
942
943 if (msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
944 sic.src_dscp = msg->msg.rule_create.dscp_rule.flow_dscp;
945 sic.dest_dscp = msg->msg.rule_create.dscp_rule.return_dscp;
946 sic.flags |= SFE_CREATE_FLAG_REMARK_DSCP;
947 }
948
949#ifdef CONFIG_XFRM
950 if (msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
951 sic.original_accel = msg->msg.rule_create.direction_rule.flow_accel;
952 sic.reply_accel = msg->msg.rule_create.direction_rule.return_accel;
953 } else {
954 sic.original_accel = sic.reply_accel = 1;
955 }
956#endif
957
958 if (!sfe_ipv6_create_rule(&sic)) {
959 /* Success */
960 ret = SFE_CMN_RESPONSE_ACK;
961 } else {
962 /* Failed */
963 ret = SFE_CMN_RESPONSE_EMSG;
964 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
965 }
966
967 /*
968 * Fall through
969 */
970failed_ret:
971 if (src_dev) {
972 dev_put(src_dev);
973 }
974
975 if (dest_dev) {
976 dev_put(dest_dev);
977 }
978
979 /*
980 * Try to queue response message
981 */
982 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
983 sfe_enqueue_msg(sfe_ctx, response);
984
985 return SFE_TX_SUCCESS;
986}
987
988/*
989 * sfe_destroy_ipv6_rule_msg()
990 * Convert destroy message format from ecm to sfe
991 *
992 * @param sfe_ctx SFE context
993 * @param msg The IPv6 message
994 *
995 * @return sfe_tx_status_t The status of the Tx operation
996 */
997static sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
998{
999 struct sfe_connection_destroy sid;
1000 struct sfe_response_msg *response;
1001
1002 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1003 if (!response) {
1004 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1005 return SFE_TX_FAILURE_QUEUE;
1006 }
1007
1008 sid.protocol = msg->msg.rule_destroy.tuple.protocol;
1009 sfe_ipv6_addr_copy(msg->msg.rule_destroy.tuple.flow_ip, sid.src_ip.ip6);
1010 sfe_ipv6_addr_copy(msg->msg.rule_destroy.tuple.return_ip, sid.dest_ip.ip6);
1011 sid.src_port = msg->msg.rule_destroy.tuple.flow_ident;
1012 sid.dest_port = msg->msg.rule_destroy.tuple.return_ident;
1013
1014 sfe_ipv6_destroy_rule(&sid);
1015
1016 /*
1017 * Try to queue response message
1018 */
1019 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
1020 sfe_enqueue_msg(sfe_ctx, response);
1021
1022 return SFE_TX_SUCCESS;
1023}
1024
1025/*
1026 * sfe_ipv6_tx()
1027 * Transmit an IPv6 message to the sfe
1028 *
1029 * @param sfe_ctx SFE context
1030 * @param msg The IPv6 message
1031 *
1032 * @return sfe_tx_status_t The status of the Tx operation
1033 */
1034sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
1035{
1036 switch (msg->cm.type) {
1037 case SFE_TX_CREATE_RULE_MSG:
1038 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1039 case SFE_TX_DESTROY_RULE_MSG:
1040 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1041 default:
1042 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
1043 return SFE_TX_FAILURE_NOT_ENABLED;
1044 }
1045}
1046EXPORT_SYMBOL(sfe_ipv6_tx);
1047
1048/*
1049 * sfe_ipv6_msg_init()
1050 * Initialize IPv6 message.
1051 */
1052void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
1053 sfe_ipv6_msg_callback_t cb, void *app_data)
1054{
1055 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
1056}
1057EXPORT_SYMBOL(sfe_ipv6_msg_init);
1058
1059/*
1060 * sfe_ipv6_max_conn_count()
1061 * Return maximum number of entries SFE supported
1062 */
1063int sfe_ipv6_max_conn_count(void)
1064{
1065 return SFE_MAX_CONNECTION_NUM;
1066}
1067EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
1068
1069/*
1070 * sfe_ipv6_notify_register()
1071 * Register a notifier callback for IPv6 messages from SFE
1072 *
1073 * @param cb The callback pointer
1074 * @param app_data The application context for this message
1075 *
1076 * @return struct sfe_ctx_instance * The SFE context
1077 */
1078struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
1079{
1080 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1081
1082 spin_lock_bh(&sfe_ctx->lock);
1083 /*
1084 * Hook the shortcut sync callback.
1085 */
1086 if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
1087 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1088 }
1089
1090 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
1091 sfe_ctx->ipv6_stats_sync_data = app_data;
1092
1093 spin_unlock_bh(&sfe_ctx->lock);
1094
1095 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1096}
1097EXPORT_SYMBOL(sfe_ipv6_notify_register);
1098
1099/*
1100 * sfe_ipv6_notify_unregister()
1101 * Un-Register a notifier callback for IPv6 messages from SFE
1102 */
1103void sfe_ipv6_notify_unregister(void)
1104{
1105 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1106
1107 spin_lock_bh(&sfe_ctx->lock);
1108 /*
1109 * Unregister our sync callback.
1110 */
1111 if (sfe_ctx->ipv6_stats_sync_cb) {
1112 sfe_ipv6_register_sync_rule_callback(NULL);
1113 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
1114 sfe_ctx->ipv6_stats_sync_data = NULL;
1115 }
1116 spin_unlock_bh(&sfe_ctx->lock);
1117
1118 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
1119
1120 return;
1121}
1122EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1123
1124/*
1125 * sfe_tun6rd_tx()
1126 * Transmit a tun6rd message to sfe engine
1127 */
1128sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1129{
1130 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1131 return SFE_TX_FAILURE_NOT_ENABLED;
1132}
1133EXPORT_SYMBOL(sfe_tun6rd_tx);
1134
1135/*
1136 * sfe_tun6rd_msg_init()
1137 * Initialize sfe_tun6rd msg.
1138 */
1139void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1140{
1141 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1142}
1143EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1144
1145/*
1146 * sfe_recv()
1147 * Handle packet receives.
1148 *
1149 * Returns 1 if the packet is forwarded or 0 if it isn't.
1150 */
1151int sfe_recv(struct sk_buff *skb)
1152{
1153 struct net_device *dev;
1154
1155 /*
1156 * We know that for the vast majority of packets we need the transport
1157 * layer header so we may as well start to fetch it now!
1158 */
1159 prefetch(skb->data + 32);
1160 barrier();
1161
1162 dev = skb->dev;
1163
1164#ifdef CONFIG_NET_CLS_ACT
1165 /*
1166 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1167 * We can not accelerate this packet.
1168 */
1169#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1170 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1171 return 0;
1172 }
1173#else
1174 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1175 return 0;
1176 }
1177#endif
1178#endif
1179
1180 /*
1181 * We're only interested in IPv4 and IPv6 packets.
1182 */
1183 if (likely(htons(ETH_P_IP) == skb->protocol)) {
1184 if (sfe_dev_is_layer_3_interface(dev, true)) {
1185 return sfe_ipv4_recv(dev, skb);
1186 } else {
1187 DEBUG_TRACE("no IPv4 address for device: %s\n", dev->name);
1188 return 0;
1189 }
1190 }
1191
1192 if (likely(htons(ETH_P_IPV6) == skb->protocol)) {
1193 if (sfe_dev_is_layer_3_interface(dev, false)) {
1194 return sfe_ipv6_recv(dev, skb);
1195 } else {
1196 DEBUG_TRACE("no IPv6 address for device: %s\n", dev->name);
1197 return 0;
1198 }
1199 }
1200
1201 DEBUG_TRACE("not IP packet\n");
1202 return 0;
1203}
1204
1205/*
1206 * sfe_get_exceptions()
1207 * Dump exception counters
1208 */
1209static ssize_t sfe_get_exceptions(struct device *dev,
1210 struct device_attribute *attr,
1211 char *buf)
1212{
1213 int idx, len;
1214 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1215
1216 spin_lock_bh(&sfe_ctx->lock);
1217 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1218 if (sfe_ctx->exceptions[idx]) {
1219 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1220 }
1221 }
1222 spin_unlock_bh(&sfe_ctx->lock);
1223
1224 return len;
1225}
1226
1227/*
1228 * sysfs attributes.
1229 */
1230static const struct device_attribute sfe_exceptions_attr =
1231 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1232
1233/*
1234 * sfe_init_if()
1235 */
1236int sfe_init_if(void)
1237{
1238 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1239 int result = -1;
1240
1241 /*
1242 * Create sys/sfe
1243 */
1244 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1245 if (!sfe_ctx->sys_sfe) {
1246 DEBUG_ERROR("failed to register sfe\n");
1247 goto exit1;
1248 }
1249
1250 /*
1251 * Create sys/sfe/exceptions
1252 */
1253 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1254 if (result) {
1255 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1256 goto exit2;
1257 }
1258
1259 spin_lock_init(&sfe_ctx->lock);
1260
1261 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1262 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1263
1264 /*
1265 * Hook the receive path in the network stack.
1266 */
1267 BUG_ON(athrs_fast_nat_recv);
1268 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1269
1270 return 0;
1271exit2:
1272 kobject_put(sfe_ctx->sys_sfe);
1273exit1:
1274 return result;
1275}
1276
1277/*
1278 * sfe_exit_if()
1279 */
1280void sfe_exit_if(void)
1281{
1282 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1283
1284 /*
1285 * Unregister our receive callback.
1286 */
1287 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1288
1289 /*
1290 * Wait for all callbacks to complete.
1291 */
1292 rcu_barrier();
1293
1294 /*
1295 * Destroy all connections.
1296 */
1297 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1298 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1299
1300 /*
1301 * stop work queue, and flush all pending message in queue
1302 */
1303 cancel_work_sync(&sfe_ctx->work);
1304 sfe_process_response_msg(&sfe_ctx->work);
1305
1306 /*
1307 * Unregister our sync callback.
1308 */
1309 sfe_ipv4_notify_unregister();
1310 sfe_ipv6_notify_unregister();
1311
1312 kobject_put(sfe_ctx->sys_sfe);
1313
1314 return;
1315}