blob: 8dbfdf43cf126d9dfd40f0e938006505ceab7a62 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
Suruchi Sumane811dad2022-01-19 19:39:50 +053028#include <net/vxlan.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053029#include <net/gre.h>
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053030
31#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053032#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053033#include "sfe.h"
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053034#include "sfe_pppoe.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080035#include "sfe_vlan.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053036
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053037extern int max_ipv4_conn;
38extern int max_ipv6_conn;
39
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053040#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053041#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
42#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
43#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
44
45typedef enum sfe_exception {
46 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
47 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
48 SFE_EXCEPTION_CONNECTION_INVALID,
49 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
50 SFE_EXCEPTION_TCP_INVALID,
51 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
52 SFE_EXCEPTION_SRC_DEV_NOT_L3,
53 SFE_EXCEPTION_DEST_DEV_NOT_L3,
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053054 SFE_EXCEPTION_CFG_ERR,
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053055 SFE_EXCEPTION_CREATE_FAILED,
56 SFE_EXCEPTION_ENQUEUE_FAILED,
57 SFE_EXCEPTION_NOT_SUPPORT_6RD,
58 SFE_EXCEPTION_NO_SYNC_CB,
59 SFE_EXCEPTION_MAX
60} sfe_exception_t;
61
62static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
63 "IPV4_MSG_UNKNOW",
64 "IPV6_MSG_UNKNOW",
65 "CONNECTION_INVALID",
66 "NOT_SUPPORT_BRIDGE",
67 "TCP_INVALID",
68 "PROTOCOL_NOT_SUPPORT",
69 "SRC_DEV_NOT_L3",
70 "DEST_DEV_NOT_L3",
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053071 "CONFIG_ERROR",
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053072 "CREATE_FAILED",
73 "ENQUEUE_FAILED",
74 "NOT_SUPPORT_6RD",
75 "NO_SYNC_CB"
76};
77
78/*
79 * Message type of queued response message
80 */
81typedef enum {
82 SFE_MSG_TYPE_IPV4,
83 SFE_MSG_TYPE_IPV6
84} sfe_msg_types_t;
85
86/*
87 * Queued response message,
88 * will be sent back to caller in workqueue
89 */
90struct sfe_response_msg {
91 struct list_head node;
92 sfe_msg_types_t type;
93 void *msg[0];
94};
95
96/*
97 * SFE context instance, private for SFE
98 */
99struct sfe_ctx_instance_internal {
100 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
101
102 /*
103 * Control state.
104 */
105 struct kobject *sys_sfe; /* Sysfs linkage */
106
107 struct list_head msg_queue; /* Response message queue*/
108 spinlock_t lock; /* Lock to protect message queue */
109
110 struct work_struct work; /* Work to send response message back to caller*/
111
112 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
Ken Zhu7a43d882022-01-04 10:51:44 -0800113 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_many_cb; /* Callback to call to sync many ipv4 statistics */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530114 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
115
116 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
Ken Zhu7a43d882022-01-04 10:51:44 -0800117 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_many_cb; /* Callback to call to sync many ipv6 statistics */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530118 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
119
120 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530121
122 int32_t l2_feature_support; /* L2 feature support */
123
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530124};
125
126static struct sfe_ctx_instance_internal __sfe_ctx;
127
128/*
129 * Convert public SFE context to internal context
130 */
131#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
132/*
133 * Convert internal SFE context to public context
134 */
135#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
136
137/*
138 * sfe_incr_exceptions()
139 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530140 *
141 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530142 */
143static inline void sfe_incr_exceptions(sfe_exception_t except)
144{
145 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
146
147 spin_lock_bh(&sfe_ctx->lock);
148 sfe_ctx->exceptions[except]++;
149 spin_unlock_bh(&sfe_ctx->lock);
150}
151
152/*
153 * sfe_dev_is_layer_3_interface()
154 * Check if a network device is ipv4 or ipv6 layer 3 interface
155 *
156 * @param dev network device to check
157 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
158 */
159inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
160{
161 struct in_device *in4_dev;
162 struct inet6_dev *in6_dev;
163
164 BUG_ON(!dev);
165
166 if (likely(check_v4)) {
167 /*
168 * Does our input device support IPv4 processing?
169 */
170 in4_dev = (struct in_device *)dev->ip_ptr;
171 if (unlikely(!in4_dev)) {
172 return false;
173 }
174
175 /*
Tian Yang5a70f352022-02-27 22:56:29 -0800176 * Does it have an IPv4 address? If it doesn't then it could be MAP-T
177 * (or dslite) interface, else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530178 */
Tian Yang5a70f352022-02-27 22:56:29 -0800179 if (likely(in4_dev->ifa_list || (dev->priv_flags_ext & IFF_EXT_MAPT)
180 || (dev->rtnl_link_ops
181 && !strcmp(dev->rtnl_link_ops->kind, "ip6tnl")))) {
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800182 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530183 }
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800184 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530185 }
186
187 /*
188 * Does our input device support IPv6 processing?
189 */
190 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
191 if (unlikely(!in6_dev)) {
192 return false;
193 }
194
195 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800196 * Does it have an IPv6 address? If it doesn't then it could be MAP-T interface,
197 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530198 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800199 if (likely(!list_empty(&in6_dev->addr_list) || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
200 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530201 }
202
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800203 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530204}
205
206/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530207 * sfe_routed_dev_allow()
208 * check whether routed acceleration allowed
209 */
210static bool sfe_routed_dev_allow(struct net_device *dev, bool is_routed, bool check_v4)
211{
212 if (!is_routed) {
213 return true;
214 }
215
216 if (sfe_dev_is_layer_3_interface(dev, check_v4)) {
217 return true;
218 }
219
220 /*
221 * in case of GRE / vxlan, these dev does not have IP address
222 * so l3 interface check will fail. allow rule creation between gre / vxlan
223 * and wan dev for routed flow.
224 */
225 if (netif_is_vxlan(dev)) {
226 return true;
227 }
228
229#ifdef SFE_GRE_TUN_ENABLE
230 if (netif_is_gretap(dev) || netif_is_gre(dev)) {
231 return true;
232 }
233
234 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
235 return true;
236 }
237#endif
238
239 return false;
240}
241
242/* sfe_dev_has_hw_csum()
243 * check whether device supports hardware checksum offload
244 */
245bool sfe_dev_has_hw_csum(struct net_device *dev)
246{
247 if (netif_is_vxlan(dev)) {
248 return false;
249 }
250
251#ifdef SFE_GRE_TUN_ENABLE
252 if (netif_is_gre(dev) || netif_is_gretap(dev)) {
253 return false;
254 }
255
256 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
257 return false;
258 }
259#endif
Tian Yangfd7b3142022-02-01 15:34:20 -0800260 /*
261 * Tunnel MAP-E/DS-LITE and Tun6rd share the same Routing netlink operator
262 * whose kind is "ip6tnl". The HW csum for these tunnel devices should be disabled.
263 */
264 if (dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "ip6tnl")) {
265 return false;
266 }
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530267
268 return true;
269}
270
271/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530272 * sfe_clean_response_msg_by_type()
273 * clean response message in queue when ECM exit
274 *
275 * @param sfe_ctx SFE context
276 * @param msg_type message type, ipv4 or ipv6
277 */
278static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
279{
280 struct sfe_response_msg *response, *tmp;
281
282 if (!sfe_ctx) {
283 return;
284 }
285
286 spin_lock_bh(&sfe_ctx->lock);
287 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
288 if (response->type == msg_type) {
289 list_del(&response->node);
290 /*
291 * Free response message
292 */
293 kfree(response);
294 }
295 }
296 spin_unlock_bh(&sfe_ctx->lock);
297
298}
299
300/*
301 * sfe_process_response_msg()
302 * Send all pending response message to ECM by calling callback function included in message
303 *
304 * @param work work structure
305 */
306static void sfe_process_response_msg(struct work_struct *work)
307{
308 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
309 struct sfe_response_msg *response;
310
311 spin_lock_bh(&sfe_ctx->lock);
312 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
313 list_del(&response->node);
314 spin_unlock_bh(&sfe_ctx->lock);
315 rcu_read_lock();
316
317 /*
318 * Send response message back to caller
319 */
320 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
321 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
322 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
323 if (callback) {
324 callback((void *)msg->cm.app_data, msg);
325 }
326 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
327 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
328 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
329 if (callback) {
330 callback((void *)msg->cm.app_data, msg);
331 }
332 }
333
334 rcu_read_unlock();
335 /*
336 * Free response message
337 */
338 kfree(response);
339 spin_lock_bh(&sfe_ctx->lock);
340 }
341 spin_unlock_bh(&sfe_ctx->lock);
342}
343
344/*
345 * sfe_alloc_response_msg()
346 * Alloc and construct new response message
347 *
348 * @param type message type
349 * @param msg used to construct response message if not NULL
350 *
351 * @return !NULL, success; NULL, failed
352 */
353static struct sfe_response_msg *
354sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
355{
356 struct sfe_response_msg *response;
357 int size;
358
359 switch (type) {
360 case SFE_MSG_TYPE_IPV4:
361 size = sizeof(struct sfe_ipv4_msg);
362 break;
363 case SFE_MSG_TYPE_IPV6:
364 size = sizeof(struct sfe_ipv6_msg);
365 break;
366 default:
367 DEBUG_ERROR("message type %d not supported\n", type);
368 return NULL;
369 }
370
371 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
372 if (!response) {
373 DEBUG_ERROR("allocate memory failed\n");
374 return NULL;
375 }
376
377 response->type = type;
378
379 if (msg) {
380 memcpy(response->msg, msg, size);
381 }
382
383 return response;
384}
385
386/*
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800387 * sfe_fast_xmit_check()
388 * Check the fast transmit feasibility.
389 *
390 * This check the per direction's attribute that could not go fast
391 * transmit
392 * xfrm packets, come from a local socket or need sk validation on the skb
393 */
394bool sfe_fast_xmit_check(struct sk_buff *skb, netdev_features_t features)
395{
396
397#ifdef CONFIG_SOCK_VALIDATE_XMIT
398 if (skb->sk && sk_fullsock(skb->sk) && skb->sk->sk_validate_xmit_skb) {
399 DEBUG_INFO("%px:need sk validation\n", skb);
400 return false;
401#ifdef CONFIG_TLS_DEVICE
402 } else if (skb->decrypted) {
403 DEBUG_INFO("%px:SK or decrypted\n", skb);
404 return false;
405#endif
406 }
407#endif
408 if (skb_vlan_tag_present(skb)) {
409 DEBUG_INFO("%px:Vlan is present\n", skb);
410 return false;
411 }
412
413 if (netif_needs_gso(skb, features)) {
414 DEBUG_INFO("%px:Need to be gso\n", skb);
415 return false;
416 }
417
418 if (skb_sec_path(skb)) {
419 DEBUG_INFO("%px:XFRM is present\n", skb);
420 return false;
421 }
422
423 return true;
424}
425
426/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530427 * sfe_enqueue_msg()
428 * Queue response message
429 *
430 * @param sfe_ctx SFE context
431 * @param response response message to be queue
432 */
433static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
434{
435 spin_lock_bh(&sfe_ctx->lock);
436 list_add_tail(&response->node, &sfe_ctx->msg_queue);
437 spin_unlock_bh(&sfe_ctx->lock);
438
439 schedule_work(&sfe_ctx->work);
440}
441
442/*
443 * sfe_cmn_msg_init()
444 * Initialize the common message structure.
445 *
446 * @param ncm message to init
447 * @param if_num interface number related with this message
448 * @param type message type
449 * @param cb callback function to process repsonse of this message
450 * @param app_data argument for above callback function
451 */
452static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
453{
454 ncm->interface = if_num;
455 ncm->version = SFE_MESSAGE_VERSION;
456 ncm->type = type;
457 ncm->len = len;
458 ncm->cb = (sfe_ptr_t)cb;
459 ncm->app_data = (sfe_ptr_t)app_data;
460}
461
462/*
Ken Zhu7a43d882022-01-04 10:51:44 -0800463 * sfe_ipv4_stats_many_sync_callback()
464 * Synchronize many connection's state.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530465 *
Ken Zhu7a43d882022-01-04 10:51:44 -0800466 * @param SFE statistics from SFE core engine
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530467 */
Ken Zhu7a43d882022-01-04 10:51:44 -0800468static void sfe_ipv4_stats_many_sync_callback(struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530469{
470 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530471 sfe_ipv4_msg_callback_t sync_cb;
472
473 rcu_read_lock();
Ken Zhu7a43d882022-01-04 10:51:44 -0800474 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_many_cb);
475 rcu_read_unlock();
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530476 if (!sync_cb) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530477 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
478 return;
479 }
Ken Zhu7a43d882022-01-04 10:51:44 -0800480 sync_cb(sfe_ctx->ipv4_stats_sync_data, msg);
481}
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530482
Ken Zhu7a43d882022-01-04 10:51:44 -0800483/*
484 * sfe_ipv4_stats_convert()
485 * Convert the internal message format to ecm format.
486 *
487 * @param sync_msg stat msg to ecm
488 * @param sis SFE statistics from SFE core engine
489 */
490void sfe_ipv4_stats_convert(struct sfe_ipv4_conn_sync *sync_msg, struct sfe_connection_sync *sis)
491{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530492 /*
493 * Fill connection specific information
494 */
495 sync_msg->protocol = (u8)sis->protocol;
496 sync_msg->flow_ip = sis->src_ip.ip;
497 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
498 sync_msg->flow_ident = sis->src_port;
499 sync_msg->flow_ident_xlate = sis->src_port_xlate;
500
501 sync_msg->return_ip = sis->dest_ip.ip;
502 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
503 sync_msg->return_ident = sis->dest_port;
504 sync_msg->return_ident_xlate = sis->dest_port_xlate;
505
506 /*
507 * Fill TCP protocol specific information
508 */
509 if (sis->protocol == IPPROTO_TCP) {
510 sync_msg->flow_max_window = sis->src_td_max_window;
511 sync_msg->flow_end = sis->src_td_end;
512 sync_msg->flow_max_end = sis->src_td_max_end;
513
514 sync_msg->return_max_window = sis->dest_td_max_window;
515 sync_msg->return_end = sis->dest_td_end;
516 sync_msg->return_max_end = sis->dest_td_max_end;
517 }
518
519 /*
520 * Fill statistics information
521 */
522 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
523 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
524 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
525 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
526
527 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
528 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
529 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
530 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
531
532 /*
533 * Fill expiration time to extend, in unit of msec
534 */
535 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
536
537 /*
538 * Fill other information
539 */
540 switch (sis->reason) {
541 case SFE_SYNC_REASON_DESTROY:
542 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
543 break;
544 case SFE_SYNC_REASON_FLUSH:
545 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
546 break;
547 default:
548 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
549 break;
550 }
Ken Zhu7a43d882022-01-04 10:51:44 -0800551 return;
552}
553
554/*
555 * sfe_ipv4_stats_one_sync_callback()
556 * Synchronize a connection's state.
557 *
558 * @param sis SFE statistics from SFE core engine
559 */
560static void sfe_ipv4_stats_one_sync_callback(struct sfe_connection_sync *sis)
561{
562 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
563 struct sfe_ipv4_msg msg;
564 struct sfe_ipv4_conn_sync *sync_msg;
565 sfe_ipv4_msg_callback_t sync_cb;
566
567 rcu_read_lock();
568 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
569 rcu_read_unlock();
570 if (!sync_cb) {
571 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
572 return;
573 }
574
575 sync_msg = &msg.msg.conn_stats;
576
577 memset(&msg, 0, sizeof(msg));
578 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
579 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
580
581 sfe_ipv4_stats_convert(sync_msg, sis);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530582
583 /*
584 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
585 */
586 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530587}
588
589/*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530590 * sfe_recv_parse_l2()
591 * Parse L2 headers
592 *
593 * Returns true if the packet is parsed and false otherwise.
594 */
595static bool sfe_recv_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
596{
597 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800598 * VLAN parsing
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530599 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800600 if (unlikely(!sfe_vlan_check_and_parse_tag(skb, l2_info))) {
601 return false;
602 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530603
604 /*
Nitin Shetty9af87d42022-02-11 16:25:29 +0530605 * Parse only PPPoE session packets
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530606 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800607 if (htons(ETH_P_PPP_SES) == skb->protocol) {
Wayne Tanbb7f1782021-12-13 11:16:04 -0800608 if (!sfe_pppoe_parse_hdr(skb, l2_info)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530609
Wayne Tanbb7f1782021-12-13 11:16:04 -0800610 /*
611 * For exception from PPPoE return from here without modifying the skb->data
612 * This includes non-IPv4/v6 cases also
613 */
614 return false;
615 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530616 }
Wayne Tanbb7f1782021-12-13 11:16:04 -0800617 return true;
618}
619
620/*
621 * sfe_recv_undo_parse_l2()
622 */
623static void sfe_recv_undo_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
624{
625 /*
626 * PPPoE undo
627 */
Nitin Shetty9af87d42022-02-11 16:25:29 +0530628 sfe_pppoe_undo_parse(skb, l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530629
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530630 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800631 * VLAN undo
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530632 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800633 sfe_vlan_undo_parse(skb, l2_info);
Nitin Shetty9af87d42022-02-11 16:25:29 +0530634
635 /*
636 * packet is not handled by SFE, so reset the network header
637 */
638 skb_reset_network_header(skb);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530639}
640
641/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530642 * sfe_create_ipv4_rule_msg()
643 * Convert create message format from ecm to sfe
644 *
645 * @param sfe_ctx SFE context
646 * @param msg The IPv4 message
647 *
648 * @return sfe_tx_status_t The status of the Tx operation
649 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530650sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530651{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530652 struct net_device *src_dev = NULL;
653 struct net_device *dest_dev = NULL;
654 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530655 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530656 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530657 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530658
659 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
660 if (!response) {
661 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
662 return SFE_TX_FAILURE_QUEUE;
663 }
664
665 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
666 ret = SFE_CMN_RESPONSE_EMSG;
667 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
668 goto failed_ret;
669 }
670
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530671 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530672 case IPPROTO_TCP:
673 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
674 ret = SFE_CMN_RESPONSE_EMSG;
675 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
676 goto failed_ret;
677 }
678
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530679 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530680 break;
681
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530682 case IPPROTO_GRE:
683 break;
684
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530685 default:
686 ret = SFE_CMN_RESPONSE_EMSG;
687 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
688 goto failed_ret;
689 }
690
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530691 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530692 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530693 */
694 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530695 if (!sfe_is_l2_feature_enabled()) {
696 ret = SFE_CMN_RESPONSE_EINTERFACE;
697 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
698 goto failed_ret;
699 }
700
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530701 is_routed = false;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530702 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530703
704 /*
705 * Does our input device support IP processing?
706 */
707 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530708 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530709 ret = SFE_CMN_RESPONSE_EINTERFACE;
710 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
711 goto failed_ret;
712 }
713
714 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530715 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
716 */
717 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
718 if (cfg_err) {
719 ret = SFE_CMN_RESPONSE_EMSG;
720 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
721 goto failed_ret;
722 }
723
724 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530725 * Does our output device support IP processing?
726 */
727 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530728 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530729 ret = SFE_CMN_RESPONSE_EINTERFACE;
730 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
731 goto failed_ret;
732 }
733
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530734 /*
735 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
736 */
737 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
738 if (cfg_err) {
739 ret = SFE_CMN_RESPONSE_EMSG;
740 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
741 goto failed_ret;
742 }
743
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530744 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
745 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530746 ret = SFE_CMN_RESPONSE_ACK;
747 } else {
748 /* Failed */
749 ret = SFE_CMN_RESPONSE_EMSG;
750 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
751 }
752
753 /*
754 * Fall through
755 */
756failed_ret:
757 if (src_dev) {
758 dev_put(src_dev);
759 }
760
761 if (dest_dev) {
762 dev_put(dest_dev);
763 }
764
765 /*
766 * Try to queue response message
767 */
768 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
769 sfe_enqueue_msg(sfe_ctx, response);
770
771 return SFE_TX_SUCCESS;
772}
773
774/*
775 * sfe_destroy_ipv4_rule_msg()
776 * Convert destroy message format from ecm to sfe
777 *
778 * @param sfe_ctx SFE context
779 * @param msg The IPv4 message
780 *
781 * @return sfe_tx_status_t The status of the Tx operation
782 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530783sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530784{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530785 struct sfe_response_msg *response;
786
787 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
788 if (!response) {
789 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
790 return SFE_TX_FAILURE_QUEUE;
791 }
792
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530793 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530794
795 /*
796 * Try to queue response message
797 */
798 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
799 sfe_enqueue_msg(sfe_ctx, response);
800
801 return SFE_TX_SUCCESS;
802}
803
804/*
Ken Zhu7a43d882022-01-04 10:51:44 -0800805 * sfe_sync_ipv4_stats_many_msg()
806 * sync con stats msg from the ecm
807 *
808 * @param sfe_ctx SFE context
809 * @param msg The IPv4 message
810 *
811 * @return sfe_tx_status_t The status of the Tx operation
812 */
813sfe_tx_status_t sfe_sync_ipv4_stats_many_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
814{
815 struct sfe_ipv4_conn_sync_many_msg *nicsm;
816 nicsm = &(msg->msg.conn_stats_many);
817
818 if (sfe_ipv4_sync_invoke(nicsm->index)) {
819 return SFE_TX_SUCCESS;
820 }
821 return SFE_TX_FAILURE;
822}
823
824/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530825 * sfe_ipv4_tx()
826 * Transmit an IPv4 message to the sfe
827 *
828 * @param sfe_ctx SFE context
829 * @param msg The IPv4 message
830 *
831 * @return sfe_tx_status_t The status of the Tx operation
832 */
833sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
834{
835 switch (msg->cm.type) {
836 case SFE_TX_CREATE_RULE_MSG:
837 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
838 case SFE_TX_DESTROY_RULE_MSG:
839 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
Ken Zhu7a43d882022-01-04 10:51:44 -0800840 case SFE_TX_CONN_STATS_SYNC_MANY_MSG:
841 return sfe_sync_ipv4_stats_many_msg(SFE_CTX_TO_PRIVATE(sfe_ctx),msg);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530842 default:
843 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
844 return SFE_TX_FAILURE_NOT_ENABLED;
845 }
846}
847EXPORT_SYMBOL(sfe_ipv4_tx);
848
849/*
850 * sfe_ipv4_msg_init()
851 * Initialize IPv4 message.
852 */
853void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
854 sfe_ipv4_msg_callback_t cb, void *app_data)
855{
856 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
857}
858EXPORT_SYMBOL(sfe_ipv4_msg_init);
859
860/*
861 * sfe_ipv4_max_conn_count()
862 * Return maximum number of entries SFE supported
863 */
864int sfe_ipv4_max_conn_count(void)
865{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530866 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530867}
868EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
869
870/*
871 * sfe_ipv4_notify_register()
872 * Register a notifier callback for IPv4 messages from SFE
873 *
874 * @param cb The callback pointer
875 * @param app_data The application context for this message
876 *
877 * @return struct sfe_ctx_instance * The SFE context
878 */
Ken Zhu7a43d882022-01-04 10:51:44 -0800879struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t one_rule_cb,
880 sfe_ipv4_msg_callback_t many_rules_cb,void *app_data)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530881{
882 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
883
884 spin_lock_bh(&sfe_ctx->lock);
885 /*
886 * Hook the shortcut sync callback.
887 */
Ken Zhu7a43d882022-01-04 10:51:44 -0800888 if (one_rule_cb && !sfe_ctx->ipv4_stats_sync_cb) {
889 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_one_sync_callback);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530890 }
Ken Zhu7a43d882022-01-04 10:51:44 -0800891 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, one_rule_cb);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530892
Ken Zhu7a43d882022-01-04 10:51:44 -0800893 if (many_rules_cb && !sfe_ctx->ipv4_stats_sync_many_cb) {
894 sfe_ipv4_register_many_sync_callback(sfe_ipv4_stats_many_sync_callback);
895 }
896 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_many_cb, many_rules_cb);
897
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530898 sfe_ctx->ipv4_stats_sync_data = app_data;
899
900 spin_unlock_bh(&sfe_ctx->lock);
901
902 return SFE_CTX_TO_PUBLIC(sfe_ctx);
903}
904EXPORT_SYMBOL(sfe_ipv4_notify_register);
905
906/*
907 * sfe_ipv4_notify_unregister()
Ken Zhu7a43d882022-01-04 10:51:44 -0800908 * Un-Register the notifier callback for IPv4 messages from SFE
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530909 */
910void sfe_ipv4_notify_unregister(void)
911{
912 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
913
914 spin_lock_bh(&sfe_ctx->lock);
Ken Zhu7a43d882022-01-04 10:51:44 -0800915
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530916 /*
Ken Zhu7a43d882022-01-04 10:51:44 -0800917 * Unregister our single rule msg sync callback.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530918 */
919 if (sfe_ctx->ipv4_stats_sync_cb) {
920 sfe_ipv4_register_sync_rule_callback(NULL);
921 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530922 }
Ken Zhu7a43d882022-01-04 10:51:44 -0800923
924 /*
925 * Unregister our many rule msg sync callback.
926 */
927 if (sfe_ctx->ipv4_stats_sync_many_cb) {
928 sfe_ipv4_register_many_sync_callback(NULL);
929 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_many_cb, NULL);
930 }
931
932 sfe_ctx->ipv4_stats_sync_data = NULL;
933
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530934 spin_unlock_bh(&sfe_ctx->lock);
935
936 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530937 return;
938}
939EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
940
941/*
Ken Zhu7a43d882022-01-04 10:51:44 -0800942 * sfe_ipv6_many_stats_sync_callback()
943 * Synchronize many connection's state.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530944 */
Ken Zhu7a43d882022-01-04 10:51:44 -0800945static void sfe_ipv6_many_stats_sync_callback(struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530946{
947 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530948 sfe_ipv6_msg_callback_t sync_cb;
949
950 rcu_read_lock();
Ken Zhu7a43d882022-01-04 10:51:44 -0800951 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_many_cb);
952 rcu_read_unlock();
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530953 if (!sync_cb) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530954 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
955 return;
956 }
957
Ken Zhu7a43d882022-01-04 10:51:44 -0800958 sync_cb(sfe_ctx->ipv6_stats_sync_data, msg);
959}
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530960
Ken Zhu7a43d882022-01-04 10:51:44 -0800961/*
962 * sfe_ipv6_stats_convert()
963 * Convert the internal message format to ecm format.
964 *
965 * @param sync_msg stat msg to ecm
966 * @param sis SFE statistics from SFE core engine
967 */
968void sfe_ipv6_stats_convert(struct sfe_ipv6_conn_sync *sync_msg, struct sfe_connection_sync *sis)
969{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530970 /*
971 * Fill connection specific information
972 */
973 sync_msg->protocol = (u8)sis->protocol;
974 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
975 sync_msg->flow_ident = sis->src_port;
976
977 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
978 sync_msg->return_ident = sis->dest_port;
979
980 /*
981 * Fill TCP protocol specific information
982 */
983 if (sis->protocol == IPPROTO_TCP) {
984 sync_msg->flow_max_window = sis->src_td_max_window;
985 sync_msg->flow_end = sis->src_td_end;
986 sync_msg->flow_max_end = sis->src_td_max_end;
987
988 sync_msg->return_max_window = sis->dest_td_max_window;
989 sync_msg->return_end = sis->dest_td_end;
990 sync_msg->return_max_end = sis->dest_td_max_end;
991 }
992
993 /*
994 * Fill statistics information
995 */
996 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
997 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
998 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
999 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
1000
1001 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
1002 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
1003 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
1004 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
1005
1006 /*
1007 * Fill expiration time to extend, in unit of msec
1008 */
1009 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
1010
1011 /*
1012 * Fill other information
1013 */
1014 switch (sis->reason) {
1015 case SFE_SYNC_REASON_DESTROY:
1016 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
1017 break;
1018 case SFE_SYNC_REASON_FLUSH:
1019 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
1020 break;
1021 default:
1022 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
1023 break;
1024 }
1025
Ken Zhu7a43d882022-01-04 10:51:44 -08001026 return;
1027}
1028
1029/*
1030 * sfe_ipv6_stats_sync_callback()
1031 * Synchronize a connection's state.
1032 */
1033static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
1034{
1035 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1036 struct sfe_ipv6_msg msg;
1037 struct sfe_ipv6_conn_sync *sync_msg;
1038 sfe_ipv6_msg_callback_t sync_cb;
1039
1040 rcu_read_lock();
1041 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
1042 rcu_read_unlock();
1043 if (!sync_cb) {
1044 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
1045 return;
1046 }
1047
1048 sync_msg = &msg.msg.conn_stats;
1049
1050 memset(&msg, 0, sizeof(msg));
1051 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
1052 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
1053
1054 sfe_ipv6_stats_convert(sync_msg, sis);
1055
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301056 /*
1057 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
1058 */
1059 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301060}
1061
1062/*
1063 * sfe_create_ipv6_rule_msg()
1064 * convert create message format from ecm to sfe
1065 *
1066 * @param sfe_ctx SFE context
1067 * @param msg The IPv6 message
1068 *
1069 * @return sfe_tx_status_t The status of the Tx operation
1070 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301071sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301072{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301073 struct net_device *src_dev = NULL;
1074 struct net_device *dest_dev = NULL;
1075 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301076 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301077 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301078 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301079
1080 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1081 if (!response) {
1082 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1083 return SFE_TX_FAILURE_QUEUE;
1084 }
1085
1086 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
1087 ret = SFE_CMN_RESPONSE_EMSG;
1088 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
1089 goto failed_ret;
1090 }
1091
1092 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301093 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301094 */
1095 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301096 if (!sfe_is_l2_feature_enabled()) {
1097 ret = SFE_CMN_RESPONSE_EINTERFACE;
1098 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
1099 goto failed_ret;
1100 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301101 is_routed = false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301102 }
1103
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301104 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301105
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301106 case IPPROTO_TCP:
1107 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
1108 ret = SFE_CMN_RESPONSE_EMSG;
1109 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
1110 goto failed_ret;
1111 }
1112
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301113 break;
1114
1115 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301116 break;
1117
Tian Yangafb03452022-01-13 18:53:13 -08001118 case IPPROTO_IPIP:
1119 break;
1120
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301121 case IPPROTO_GRE:
1122 break;
1123
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301124 default:
1125 ret = SFE_CMN_RESPONSE_EMSG;
1126 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
1127 goto failed_ret;
1128 }
1129
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301130 /*
1131 * Does our input device support IP processing?
1132 */
1133 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301134 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301135 ret = SFE_CMN_RESPONSE_EINTERFACE;
1136 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
1137 goto failed_ret;
1138 }
1139
1140 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301141 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1142 */
1143 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1144 if (cfg_err) {
1145 ret = SFE_CMN_RESPONSE_EMSG;
1146 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1147 goto failed_ret;
1148 }
1149
1150 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301151 * Does our output device support IP processing?
1152 */
1153 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301154 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301155 ret = SFE_CMN_RESPONSE_EINTERFACE;
1156 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
1157 goto failed_ret;
1158 }
1159
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301160 /*
1161 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1162 */
1163 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1164 if (cfg_err) {
1165 ret = SFE_CMN_RESPONSE_EMSG;
1166 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1167 goto failed_ret;
1168 }
1169
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301170 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
1171 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301172 ret = SFE_CMN_RESPONSE_ACK;
1173 } else {
1174 /* Failed */
1175 ret = SFE_CMN_RESPONSE_EMSG;
1176 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
1177 }
1178
1179 /*
1180 * Fall through
1181 */
1182failed_ret:
1183 if (src_dev) {
1184 dev_put(src_dev);
1185 }
1186
1187 if (dest_dev) {
1188 dev_put(dest_dev);
1189 }
1190
1191 /*
1192 * Try to queue response message
1193 */
1194 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
1195 sfe_enqueue_msg(sfe_ctx, response);
1196
1197 return SFE_TX_SUCCESS;
1198}
1199
1200/*
1201 * sfe_destroy_ipv6_rule_msg()
1202 * Convert destroy message format from ecm to sfe
1203 *
1204 * @param sfe_ctx SFE context
1205 * @param msg The IPv6 message
1206 *
1207 * @return sfe_tx_status_t The status of the Tx operation
1208 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301209sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301210{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301211 struct sfe_response_msg *response;
1212
1213 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1214 if (!response) {
1215 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1216 return SFE_TX_FAILURE_QUEUE;
1217 }
1218
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301219 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301220
1221 /*
1222 * Try to queue response message
1223 */
1224 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
1225 sfe_enqueue_msg(sfe_ctx, response);
1226
1227 return SFE_TX_SUCCESS;
1228}
1229
1230/*
Ken Zhu7a43d882022-01-04 10:51:44 -08001231 * sfe_sync_ipv6_stats_many_msg()
1232 * sync con stats msg from the ecm
1233 *
1234 * @param sfe_ctx SFE context
1235 * @param msg The IPv6 message
1236 *
1237 * @return sfe_tx_status_t The status of the Tx operation
1238 */
1239sfe_tx_status_t sfe_sync_ipv6_stats_many_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
1240{
1241 struct sfe_ipv6_conn_sync_many_msg *nicsm;
1242 nicsm = &(msg->msg.conn_stats_many);
1243
1244 if (sfe_ipv6_sync_invoke(nicsm->index)) {
1245 return SFE_TX_SUCCESS;
1246 }
1247 return SFE_TX_FAILURE;
1248}
1249
1250/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301251 * sfe_ipv6_tx()
1252 * Transmit an IPv6 message to the sfe
1253 *
1254 * @param sfe_ctx SFE context
1255 * @param msg The IPv6 message
1256 *
1257 * @return sfe_tx_status_t The status of the Tx operation
1258 */
1259sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
1260{
1261 switch (msg->cm.type) {
1262 case SFE_TX_CREATE_RULE_MSG:
1263 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1264 case SFE_TX_DESTROY_RULE_MSG:
1265 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
Ken Zhu7a43d882022-01-04 10:51:44 -08001266 case SFE_TX_CONN_STATS_SYNC_MANY_MSG:
1267 return sfe_sync_ipv6_stats_many_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301268 default:
1269 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
1270 return SFE_TX_FAILURE_NOT_ENABLED;
1271 }
1272}
1273EXPORT_SYMBOL(sfe_ipv6_tx);
1274
1275/*
1276 * sfe_ipv6_msg_init()
1277 * Initialize IPv6 message.
1278 */
1279void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
1280 sfe_ipv6_msg_callback_t cb, void *app_data)
1281{
1282 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
1283}
1284EXPORT_SYMBOL(sfe_ipv6_msg_init);
1285
1286/*
1287 * sfe_ipv6_max_conn_count()
1288 * Return maximum number of entries SFE supported
1289 */
1290int sfe_ipv6_max_conn_count(void)
1291{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +05301292 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301293}
1294EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
1295
1296/*
1297 * sfe_ipv6_notify_register()
1298 * Register a notifier callback for IPv6 messages from SFE
1299 *
Ken Zhu7a43d882022-01-04 10:51:44 -08001300 * @param one_rule_cb The callback pointer of one rule sync
1301 * @param many_rule_cb The callback pointer of many rule sync
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301302 * @param app_data The application context for this message
1303 *
1304 * @return struct sfe_ctx_instance * The SFE context
1305 */
Ken Zhu7a43d882022-01-04 10:51:44 -08001306struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t one_rule_cb,
1307 sfe_ipv6_msg_callback_t many_rule_cb, void *app_data)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301308{
1309 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1310
1311 spin_lock_bh(&sfe_ctx->lock);
1312 /*
1313 * Hook the shortcut sync callback.
1314 */
Ken Zhu7a43d882022-01-04 10:51:44 -08001315 if (one_rule_cb && !sfe_ctx->ipv6_stats_sync_cb) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301316 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1317 }
Ken Zhu7a43d882022-01-04 10:51:44 -08001318 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, one_rule_cb);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301319
Ken Zhu7a43d882022-01-04 10:51:44 -08001320 if (many_rule_cb && !sfe_ctx->ipv6_stats_sync_many_cb) {
1321 sfe_ipv6_register_many_sync_callback(sfe_ipv6_many_stats_sync_callback);
1322 }
1323 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_many_cb, many_rule_cb);
1324
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301325 sfe_ctx->ipv6_stats_sync_data = app_data;
1326
1327 spin_unlock_bh(&sfe_ctx->lock);
1328
1329 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1330}
1331EXPORT_SYMBOL(sfe_ipv6_notify_register);
1332
1333/*
1334 * sfe_ipv6_notify_unregister()
1335 * Un-Register a notifier callback for IPv6 messages from SFE
1336 */
1337void sfe_ipv6_notify_unregister(void)
1338{
1339 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1340
1341 spin_lock_bh(&sfe_ctx->lock);
1342 /*
1343 * Unregister our sync callback.
1344 */
1345 if (sfe_ctx->ipv6_stats_sync_cb) {
1346 sfe_ipv6_register_sync_rule_callback(NULL);
1347 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301348 }
Ken Zhu7a43d882022-01-04 10:51:44 -08001349
1350 if (sfe_ctx->ipv6_stats_sync_many_cb) {
1351 sfe_ipv6_register_many_sync_callback(NULL);
1352 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_many_cb, NULL);
1353 }
1354
1355 sfe_ctx->ipv6_stats_sync_data = NULL;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301356 spin_unlock_bh(&sfe_ctx->lock);
1357
1358 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301359 return;
1360}
1361EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1362
1363/*
1364 * sfe_tun6rd_tx()
1365 * Transmit a tun6rd message to sfe engine
1366 */
1367sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1368{
1369 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1370 return SFE_TX_FAILURE_NOT_ENABLED;
1371}
1372EXPORT_SYMBOL(sfe_tun6rd_tx);
1373
1374/*
1375 * sfe_tun6rd_msg_init()
1376 * Initialize sfe_tun6rd msg.
1377 */
1378void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1379{
1380 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1381}
1382EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1383
1384/*
1385 * sfe_recv()
1386 * Handle packet receives.
1387 *
1388 * Returns 1 if the packet is forwarded or 0 if it isn't.
1389 */
1390int sfe_recv(struct sk_buff *skb)
1391{
1392 struct net_device *dev;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301393 struct sfe_l2_info l2_info;
1394 int ret;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301395
1396 /*
1397 * We know that for the vast majority of packets we need the transport
1398 * layer header so we may as well start to fetch it now!
1399 */
1400 prefetch(skb->data + 32);
1401 barrier();
1402
1403 dev = skb->dev;
1404
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301405 /*
1406 * Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
1407 */
1408 l2_info.parse_flags = 0;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001409 l2_info.vlan_hdr_cnt = 0;
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301410
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301411#ifdef CONFIG_NET_CLS_ACT
1412 /*
1413 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1414 * We can not accelerate this packet.
1415 */
1416#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1417 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1418 return 0;
1419 }
1420#else
1421 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1422 return 0;
1423 }
1424#endif
1425#endif
1426
1427 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301428 * If l2_feature is enabled, we need not check if src dev is L3 interface since bridge flow offload is supported.
1429 * If l2_feature is disabled, then we make sure src dev is L3 interface to avoid cost of rule lookup for L2 flows
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301430 */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301431 switch (ntohs(skb->protocol)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301432 case ETH_P_IP:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301433 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, true)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301434 return sfe_ipv4_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301435 }
1436
Wayne Tanbb7f1782021-12-13 11:16:04 -08001437 DEBUG_TRACE("No IPv4 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301438 return 0;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301439
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301440 case ETH_P_IPV6:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301441 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, false)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301442 return sfe_ipv6_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301443 }
1444
Wayne Tanbb7f1782021-12-13 11:16:04 -08001445 DEBUG_TRACE("No IPv6 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301446 return 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301447
1448 default:
1449 break;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301450 }
1451
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301452 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301453 * Stop L2 processing if L2 feature is disabled.
1454 */
1455 if (!sfe_is_l2_feature_enabled()) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001456 DEBUG_TRACE("Unsupported protocol %#x %s (L2 feature is disabled) skb=%px\n",
1457 ntohs(skb->protocol), dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301458 return 0;
1459 }
1460
1461 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301462 * Parse the L2 headers to find the L3 protocol and the L2 header offset
1463 */
1464 if (unlikely(!sfe_recv_parse_l2(dev, skb, &l2_info))) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001465 DEBUG_TRACE("%px: Invalid L2.5 header format with protocol : %x\n", skb, ntohs(skb->protocol));
1466 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301467 }
1468
1469 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301470 * Protocol in l2_info is expected to be in host byte order.
1471 * PPPoE is doing it in the sfe_pppoe_parse_hdr()
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301472 */
1473 if (likely(l2_info.protocol == ETH_P_IP)) {
Amitesh Anand63be37d2021-12-24 20:51:48 +05301474 ret = sfe_ipv4_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301475 if (unlikely(!ret)) {
1476 goto send_to_linux;
1477 }
1478 return ret;
1479 }
1480
1481 if (likely(l2_info.protocol == ETH_P_IPV6)) {
Suruchi Suman23a279d2021-11-16 15:13:09 +05301482 ret = sfe_ipv6_recv(dev, skb, &l2_info, false);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001483 if (unlikely(!ret)) {
1484 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301485 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001486 return ret;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301487 }
1488
Wayne Tanbb7f1782021-12-13 11:16:04 -08001489 DEBUG_TRACE("Non-IP(%x) %s skb=%px skb_vlan:%x/%x/%x skb_proto=%x\n",
1490 l2_info.protocol, dev->name, skb,
1491 ntohs(skb->vlan_proto), skb->vlan_tci, skb_vlan_tag_present(skb),
1492 htons(skb->protocol));
1493
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301494send_to_linux:
1495 /*
1496 * Push the data back before sending to linux if -
1497 * a. There is any exception from IPV4/V6
1498 * b. If the next protocol is neither IPV4 nor IPV6
1499 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001500 sfe_recv_undo_parse_l2(dev, skb, &l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301501
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301502 return 0;
1503}
1504
1505/*
1506 * sfe_get_exceptions()
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301507 * Dump exception counters
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301508 */
1509static ssize_t sfe_get_exceptions(struct device *dev,
1510 struct device_attribute *attr,
1511 char *buf)
1512{
1513 int idx, len;
1514 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1515
1516 spin_lock_bh(&sfe_ctx->lock);
1517 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1518 if (sfe_ctx->exceptions[idx]) {
1519 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1520 }
1521 }
1522 spin_unlock_bh(&sfe_ctx->lock);
1523
1524 return len;
1525}
1526
1527/*
1528 * sysfs attributes.
1529 */
1530static const struct device_attribute sfe_exceptions_attr =
1531 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1532
1533/*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301534 * sfe_is_l2_feature_enabled()
1535 * Check if l2 features flag feature is enabled or not. (VLAN, PPPOE, BRIDGE and tunnels)
1536 *
1537 * 32bit read is atomic. No need of locks.
1538 */
1539bool sfe_is_l2_feature_enabled()
1540{
1541 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1542 return (sfe_ctx->l2_feature_support == 1);
1543}
1544EXPORT_SYMBOL(sfe_is_l2_feature_enabled);
1545
1546/*
1547 * sfe_get_l2_feature()
1548 * L2 feature is enabled/disabled
1549 */
1550ssize_t sfe_get_l2_feature(struct device *dev,
1551 struct device_attribute *attr,
1552 char *buf)
1553{
1554 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1555 ssize_t len;
1556
1557 spin_lock_bh(&sfe_ctx->lock);
1558 len = snprintf(buf, (ssize_t)(PAGE_SIZE), "L2 feature is %s\n", sfe_ctx->l2_feature_support ? "enabled" : "disabled");
1559 spin_unlock_bh(&sfe_ctx->lock);
1560 return len;
1561}
1562
1563/*
1564 * sfe_set_l2_feature()
1565 * Enable or disable l2 features flag.
1566 */
1567ssize_t sfe_set_l2_feature(struct device *dev, struct device_attribute *attr,
1568 const char *buf, size_t count)
1569{
1570 unsigned long val;
1571 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1572 int ret;
1573 ret = sscanf(buf, "%lu", &val);
1574
1575 if (ret != 1) {
1576 pr_err("Wrong input, %s\n", buf);
1577 return -EINVAL;
1578 }
1579
1580 if (val != 1 && val != 0) {
1581 pr_err("Input should be either 1 or 0, (%s)\n", buf);
1582 return -EINVAL;
1583 }
1584
1585 spin_lock_bh(&sfe_ctx->lock);
1586
1587 if (sfe_ctx->l2_feature_support && val) {
1588 spin_unlock_bh(&sfe_ctx->lock);
1589 pr_err("L2 feature is already enabled\n");
1590 return -EINVAL;
1591 }
1592
1593 if (!sfe_ctx->l2_feature_support && !val) {
1594 spin_unlock_bh(&sfe_ctx->lock);
1595 pr_err("L2 feature is already disabled\n");
1596 return -EINVAL;
1597 }
1598
1599 sfe_ctx->l2_feature_support = val;
1600 spin_unlock_bh(&sfe_ctx->lock);
1601
1602 return count;
1603}
1604
1605static const struct device_attribute sfe_l2_feature_attr =
1606 __ATTR(l2_feature, 0644, sfe_get_l2_feature, sfe_set_l2_feature);
1607
1608/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301609 * sfe_init_if()
1610 */
1611int sfe_init_if(void)
1612{
1613 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1614 int result = -1;
1615
1616 /*
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301617 * L2 feature is enabled by default
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301618 */
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301619 sfe_ctx->l2_feature_support = 1;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301620
1621 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301622 * Create sys/sfe
1623 */
1624 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1625 if (!sfe_ctx->sys_sfe) {
1626 DEBUG_ERROR("failed to register sfe\n");
1627 goto exit1;
1628 }
1629
1630 /*
1631 * Create sys/sfe/exceptions
1632 */
1633 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1634 if (result) {
1635 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1636 goto exit2;
1637 }
1638
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301639 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_l2_feature_attr.attr);
1640 if (result) {
1641 DEBUG_ERROR("failed to register L2 feature flag sysfs file: %d\n", result);
1642 goto exit2;
1643 }
1644
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301645 spin_lock_init(&sfe_ctx->lock);
1646
1647 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1648 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1649
1650 /*
1651 * Hook the receive path in the network stack.
1652 */
1653 BUG_ON(athrs_fast_nat_recv);
1654 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1655
1656 return 0;
1657exit2:
1658 kobject_put(sfe_ctx->sys_sfe);
1659exit1:
1660 return result;
1661}
1662
1663/*
1664 * sfe_exit_if()
1665 */
1666void sfe_exit_if(void)
1667{
1668 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1669
1670 /*
1671 * Unregister our receive callback.
1672 */
1673 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1674
1675 /*
1676 * Wait for all callbacks to complete.
1677 */
1678 rcu_barrier();
1679
1680 /*
1681 * Destroy all connections.
1682 */
1683 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1684 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1685
1686 /*
1687 * stop work queue, and flush all pending message in queue
1688 */
1689 cancel_work_sync(&sfe_ctx->work);
1690 sfe_process_response_msg(&sfe_ctx->work);
1691
1692 /*
1693 * Unregister our sync callback.
1694 */
1695 sfe_ipv4_notify_unregister();
1696 sfe_ipv6_notify_unregister();
1697
1698 kobject_put(sfe_ctx->sys_sfe);
1699
1700 return;
1701}