blob: 885ebd73976529ad51c519049d47c977eded2bb8 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
Suruchi Sumane811dad2022-01-19 19:39:50 +053028#include <net/vxlan.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053029#include <net/gre.h>
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053030
31#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053032#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053033#include "sfe.h"
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053034#include "sfe_pppoe.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080035#include "sfe_vlan.h"
Jackson Bockus3fafbf32022-02-13 17:15:26 -080036#include "sfe_ipv4.h"
37#include "sfe_ipv6.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053038
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053039extern int max_ipv4_conn;
40extern int max_ipv6_conn;
41
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053042#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053043#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
44#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
45#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
46
47typedef enum sfe_exception {
48 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
49 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
50 SFE_EXCEPTION_CONNECTION_INVALID,
51 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
52 SFE_EXCEPTION_TCP_INVALID,
53 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
54 SFE_EXCEPTION_SRC_DEV_NOT_L3,
55 SFE_EXCEPTION_DEST_DEV_NOT_L3,
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053056 SFE_EXCEPTION_CFG_ERR,
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053057 SFE_EXCEPTION_CREATE_FAILED,
58 SFE_EXCEPTION_ENQUEUE_FAILED,
59 SFE_EXCEPTION_NOT_SUPPORT_6RD,
60 SFE_EXCEPTION_NO_SYNC_CB,
61 SFE_EXCEPTION_MAX
62} sfe_exception_t;
63
64static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
65 "IPV4_MSG_UNKNOW",
66 "IPV6_MSG_UNKNOW",
67 "CONNECTION_INVALID",
68 "NOT_SUPPORT_BRIDGE",
69 "TCP_INVALID",
70 "PROTOCOL_NOT_SUPPORT",
71 "SRC_DEV_NOT_L3",
72 "DEST_DEV_NOT_L3",
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053073 "CONFIG_ERROR",
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053074 "CREATE_FAILED",
75 "ENQUEUE_FAILED",
76 "NOT_SUPPORT_6RD",
77 "NO_SYNC_CB"
78};
79
80/*
81 * Message type of queued response message
82 */
83typedef enum {
84 SFE_MSG_TYPE_IPV4,
85 SFE_MSG_TYPE_IPV6
86} sfe_msg_types_t;
87
88/*
89 * Queued response message,
90 * will be sent back to caller in workqueue
91 */
92struct sfe_response_msg {
93 struct list_head node;
94 sfe_msg_types_t type;
95 void *msg[0];
96};
97
98/*
99 * SFE context instance, private for SFE
100 */
101struct sfe_ctx_instance_internal {
102 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
103
104 /*
105 * Control state.
106 */
107 struct kobject *sys_sfe; /* Sysfs linkage */
108
109 struct list_head msg_queue; /* Response message queue*/
110 spinlock_t lock; /* Lock to protect message queue */
111
112 struct work_struct work; /* Work to send response message back to caller*/
113
114 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
Ken Zhu7a43d882022-01-04 10:51:44 -0800115 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_many_cb; /* Callback to call to sync many ipv4 statistics */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530116 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
117
118 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
Ken Zhu7a43d882022-01-04 10:51:44 -0800119 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_many_cb; /* Callback to call to sync many ipv6 statistics */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530120 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
121
122 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530123
124 int32_t l2_feature_support; /* L2 feature support */
125
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530126};
127
128static struct sfe_ctx_instance_internal __sfe_ctx;
129
130/*
131 * Convert public SFE context to internal context
132 */
133#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
134/*
135 * Convert internal SFE context to public context
136 */
137#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
138
139/*
140 * sfe_incr_exceptions()
141 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530142 *
143 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530144 */
145static inline void sfe_incr_exceptions(sfe_exception_t except)
146{
147 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
148
149 spin_lock_bh(&sfe_ctx->lock);
150 sfe_ctx->exceptions[except]++;
151 spin_unlock_bh(&sfe_ctx->lock);
152}
153
154/*
155 * sfe_dev_is_layer_3_interface()
156 * Check if a network device is ipv4 or ipv6 layer 3 interface
157 *
158 * @param dev network device to check
159 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
160 */
161inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
162{
163 struct in_device *in4_dev;
164 struct inet6_dev *in6_dev;
165
166 BUG_ON(!dev);
167
168 if (likely(check_v4)) {
169 /*
170 * Does our input device support IPv4 processing?
171 */
172 in4_dev = (struct in_device *)dev->ip_ptr;
173 if (unlikely(!in4_dev)) {
174 return false;
175 }
176
177 /*
Tian Yangd98d91b2022-03-09 14:50:12 -0800178 * Does it have an IPv4 address? If it doesn't then it
179 * could be map-t, dslite or tun6rd interface, otherwise we
180 * can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530181 */
Tian Yang5a70f352022-02-27 22:56:29 -0800182 if (likely(in4_dev->ifa_list || (dev->priv_flags_ext & IFF_EXT_MAPT)
183 || (dev->rtnl_link_ops
Tian Yangd98d91b2022-03-09 14:50:12 -0800184 && (!strcmp(dev->rtnl_link_ops->kind, "ip6tnl")
185 || !strcmp(dev->rtnl_link_ops->kind, "sit"))))) {
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800186 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530187 }
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800188 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530189 }
190
191 /*
192 * Does our input device support IPv6 processing?
193 */
194 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
195 if (unlikely(!in6_dev)) {
196 return false;
197 }
198
199 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800200 * Does it have an IPv6 address? If it doesn't then it could be MAP-T interface,
201 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530202 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800203 if (likely(!list_empty(&in6_dev->addr_list) || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
204 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530205 }
206
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800207 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530208}
209
210/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530211 * sfe_routed_dev_allow()
212 * check whether routed acceleration allowed
213 */
214static bool sfe_routed_dev_allow(struct net_device *dev, bool is_routed, bool check_v4)
215{
216 if (!is_routed) {
217 return true;
218 }
219
220 if (sfe_dev_is_layer_3_interface(dev, check_v4)) {
221 return true;
222 }
223
224 /*
225 * in case of GRE / vxlan, these dev does not have IP address
226 * so l3 interface check will fail. allow rule creation between gre / vxlan
227 * and wan dev for routed flow.
228 */
229 if (netif_is_vxlan(dev)) {
230 return true;
231 }
232
233#ifdef SFE_GRE_TUN_ENABLE
234 if (netif_is_gretap(dev) || netif_is_gre(dev)) {
235 return true;
236 }
237
238 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
239 return true;
240 }
241#endif
242
243 return false;
244}
245
246/* sfe_dev_has_hw_csum()
247 * check whether device supports hardware checksum offload
248 */
249bool sfe_dev_has_hw_csum(struct net_device *dev)
250{
251 if (netif_is_vxlan(dev)) {
252 return false;
253 }
254
255#ifdef SFE_GRE_TUN_ENABLE
256 if (netif_is_gre(dev) || netif_is_gretap(dev)) {
257 return false;
258 }
259
260 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
261 return false;
262 }
263#endif
Tian Yangfd7b3142022-02-01 15:34:20 -0800264 /*
265 * Tunnel MAP-E/DS-LITE and Tun6rd share the same Routing netlink operator
266 * whose kind is "ip6tnl". The HW csum for these tunnel devices should be disabled.
267 */
268 if (dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "ip6tnl")) {
269 return false;
270 }
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530271
272 return true;
273}
274
275/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530276 * sfe_clean_response_msg_by_type()
277 * clean response message in queue when ECM exit
278 *
279 * @param sfe_ctx SFE context
280 * @param msg_type message type, ipv4 or ipv6
281 */
282static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
283{
284 struct sfe_response_msg *response, *tmp;
285
286 if (!sfe_ctx) {
287 return;
288 }
289
290 spin_lock_bh(&sfe_ctx->lock);
291 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
292 if (response->type == msg_type) {
293 list_del(&response->node);
294 /*
295 * Free response message
296 */
297 kfree(response);
298 }
299 }
300 spin_unlock_bh(&sfe_ctx->lock);
301
302}
303
304/*
305 * sfe_process_response_msg()
306 * Send all pending response message to ECM by calling callback function included in message
307 *
308 * @param work work structure
309 */
310static void sfe_process_response_msg(struct work_struct *work)
311{
312 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
313 struct sfe_response_msg *response;
314
315 spin_lock_bh(&sfe_ctx->lock);
316 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
317 list_del(&response->node);
318 spin_unlock_bh(&sfe_ctx->lock);
319 rcu_read_lock();
320
321 /*
322 * Send response message back to caller
323 */
324 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
325 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
326 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
327 if (callback) {
328 callback((void *)msg->cm.app_data, msg);
329 }
330 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
331 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
332 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
333 if (callback) {
334 callback((void *)msg->cm.app_data, msg);
335 }
336 }
337
338 rcu_read_unlock();
339 /*
340 * Free response message
341 */
342 kfree(response);
343 spin_lock_bh(&sfe_ctx->lock);
344 }
345 spin_unlock_bh(&sfe_ctx->lock);
346}
347
348/*
349 * sfe_alloc_response_msg()
350 * Alloc and construct new response message
351 *
352 * @param type message type
353 * @param msg used to construct response message if not NULL
354 *
355 * @return !NULL, success; NULL, failed
356 */
357static struct sfe_response_msg *
358sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
359{
360 struct sfe_response_msg *response;
361 int size;
362
363 switch (type) {
364 case SFE_MSG_TYPE_IPV4:
365 size = sizeof(struct sfe_ipv4_msg);
366 break;
367 case SFE_MSG_TYPE_IPV6:
368 size = sizeof(struct sfe_ipv6_msg);
369 break;
370 default:
371 DEBUG_ERROR("message type %d not supported\n", type);
372 return NULL;
373 }
374
375 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
376 if (!response) {
377 DEBUG_ERROR("allocate memory failed\n");
378 return NULL;
379 }
380
381 response->type = type;
382
383 if (msg) {
384 memcpy(response->msg, msg, size);
385 }
386
387 return response;
388}
389
390/*
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800391 * sfe_fast_xmit_check()
392 * Check the fast transmit feasibility.
393 *
394 * This check the per direction's attribute that could not go fast
395 * transmit
396 * xfrm packets, come from a local socket or need sk validation on the skb
397 */
398bool sfe_fast_xmit_check(struct sk_buff *skb, netdev_features_t features)
399{
400
401#ifdef CONFIG_SOCK_VALIDATE_XMIT
402 if (skb->sk && sk_fullsock(skb->sk) && skb->sk->sk_validate_xmit_skb) {
403 DEBUG_INFO("%px:need sk validation\n", skb);
404 return false;
405#ifdef CONFIG_TLS_DEVICE
406 } else if (skb->decrypted) {
407 DEBUG_INFO("%px:SK or decrypted\n", skb);
408 return false;
409#endif
410 }
411#endif
412 if (skb_vlan_tag_present(skb)) {
413 DEBUG_INFO("%px:Vlan is present\n", skb);
414 return false;
415 }
416
417 if (netif_needs_gso(skb, features)) {
418 DEBUG_INFO("%px:Need to be gso\n", skb);
419 return false;
420 }
421
422 if (skb_sec_path(skb)) {
423 DEBUG_INFO("%px:XFRM is present\n", skb);
424 return false;
425 }
426
427 return true;
428}
429
430/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530431 * sfe_enqueue_msg()
432 * Queue response message
433 *
434 * @param sfe_ctx SFE context
435 * @param response response message to be queue
436 */
437static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
438{
439 spin_lock_bh(&sfe_ctx->lock);
440 list_add_tail(&response->node, &sfe_ctx->msg_queue);
441 spin_unlock_bh(&sfe_ctx->lock);
442
443 schedule_work(&sfe_ctx->work);
444}
445
446/*
447 * sfe_cmn_msg_init()
448 * Initialize the common message structure.
449 *
450 * @param ncm message to init
451 * @param if_num interface number related with this message
452 * @param type message type
453 * @param cb callback function to process repsonse of this message
454 * @param app_data argument for above callback function
455 */
456static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
457{
458 ncm->interface = if_num;
459 ncm->version = SFE_MESSAGE_VERSION;
460 ncm->type = type;
461 ncm->len = len;
462 ncm->cb = (sfe_ptr_t)cb;
463 ncm->app_data = (sfe_ptr_t)app_data;
464}
465
466/*
Ken Zhu7a43d882022-01-04 10:51:44 -0800467 * sfe_ipv4_stats_many_sync_callback()
468 * Synchronize many connection's state.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530469 *
Ken Zhu7a43d882022-01-04 10:51:44 -0800470 * @param SFE statistics from SFE core engine
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530471 */
Ken Zhu7a43d882022-01-04 10:51:44 -0800472static void sfe_ipv4_stats_many_sync_callback(struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530473{
474 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530475 sfe_ipv4_msg_callback_t sync_cb;
476
477 rcu_read_lock();
Ken Zhu7a43d882022-01-04 10:51:44 -0800478 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_many_cb);
479 rcu_read_unlock();
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530480 if (!sync_cb) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530481 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
482 return;
483 }
Ken Zhu7a43d882022-01-04 10:51:44 -0800484 sync_cb(sfe_ctx->ipv4_stats_sync_data, msg);
485}
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530486
Ken Zhu7a43d882022-01-04 10:51:44 -0800487/*
488 * sfe_ipv4_stats_convert()
489 * Convert the internal message format to ecm format.
490 *
491 * @param sync_msg stat msg to ecm
492 * @param sis SFE statistics from SFE core engine
493 */
494void sfe_ipv4_stats_convert(struct sfe_ipv4_conn_sync *sync_msg, struct sfe_connection_sync *sis)
495{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530496 /*
497 * Fill connection specific information
498 */
499 sync_msg->protocol = (u8)sis->protocol;
500 sync_msg->flow_ip = sis->src_ip.ip;
501 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
502 sync_msg->flow_ident = sis->src_port;
503 sync_msg->flow_ident_xlate = sis->src_port_xlate;
504
505 sync_msg->return_ip = sis->dest_ip.ip;
506 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
507 sync_msg->return_ident = sis->dest_port;
508 sync_msg->return_ident_xlate = sis->dest_port_xlate;
509
510 /*
511 * Fill TCP protocol specific information
512 */
513 if (sis->protocol == IPPROTO_TCP) {
514 sync_msg->flow_max_window = sis->src_td_max_window;
515 sync_msg->flow_end = sis->src_td_end;
516 sync_msg->flow_max_end = sis->src_td_max_end;
517
518 sync_msg->return_max_window = sis->dest_td_max_window;
519 sync_msg->return_end = sis->dest_td_end;
520 sync_msg->return_max_end = sis->dest_td_max_end;
521 }
522
523 /*
524 * Fill statistics information
525 */
526 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
527 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
528 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
529 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
530
531 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
532 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
533 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
534 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
535
536 /*
537 * Fill expiration time to extend, in unit of msec
538 */
539 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
540
541 /*
542 * Fill other information
543 */
544 switch (sis->reason) {
545 case SFE_SYNC_REASON_DESTROY:
546 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
547 break;
548 case SFE_SYNC_REASON_FLUSH:
549 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
550 break;
551 default:
552 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
553 break;
554 }
Ken Zhu7a43d882022-01-04 10:51:44 -0800555 return;
556}
557
558/*
559 * sfe_ipv4_stats_one_sync_callback()
560 * Synchronize a connection's state.
561 *
562 * @param sis SFE statistics from SFE core engine
563 */
564static void sfe_ipv4_stats_one_sync_callback(struct sfe_connection_sync *sis)
565{
566 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
567 struct sfe_ipv4_msg msg;
568 struct sfe_ipv4_conn_sync *sync_msg;
569 sfe_ipv4_msg_callback_t sync_cb;
570
571 rcu_read_lock();
572 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
573 rcu_read_unlock();
574 if (!sync_cb) {
575 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
576 return;
577 }
578
579 sync_msg = &msg.msg.conn_stats;
580
581 memset(&msg, 0, sizeof(msg));
582 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
583 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
584
585 sfe_ipv4_stats_convert(sync_msg, sis);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530586
587 /*
588 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
589 */
590 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530591}
592
593/*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530594 * sfe_recv_parse_l2()
595 * Parse L2 headers
596 *
597 * Returns true if the packet is parsed and false otherwise.
598 */
599static bool sfe_recv_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
600{
601 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800602 * VLAN parsing
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530603 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800604 if (unlikely(!sfe_vlan_check_and_parse_tag(skb, l2_info))) {
605 return false;
606 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530607
608 /*
Nitin Shetty9af87d42022-02-11 16:25:29 +0530609 * Parse only PPPoE session packets
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530610 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800611 if (htons(ETH_P_PPP_SES) == skb->protocol) {
Wayne Tanbb7f1782021-12-13 11:16:04 -0800612 if (!sfe_pppoe_parse_hdr(skb, l2_info)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530613
Wayne Tanbb7f1782021-12-13 11:16:04 -0800614 /*
615 * For exception from PPPoE return from here without modifying the skb->data
616 * This includes non-IPv4/v6 cases also
617 */
618 return false;
619 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530620 }
Wayne Tanbb7f1782021-12-13 11:16:04 -0800621 return true;
622}
623
624/*
625 * sfe_recv_undo_parse_l2()
626 */
627static void sfe_recv_undo_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
628{
629 /*
630 * PPPoE undo
631 */
Nitin Shetty9af87d42022-02-11 16:25:29 +0530632 sfe_pppoe_undo_parse(skb, l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530633
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530634 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800635 * VLAN undo
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530636 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800637 sfe_vlan_undo_parse(skb, l2_info);
Nitin Shetty9af87d42022-02-11 16:25:29 +0530638
639 /*
640 * packet is not handled by SFE, so reset the network header
641 */
642 skb_reset_network_header(skb);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530643}
644
645/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530646 * sfe_create_ipv4_rule_msg()
647 * Convert create message format from ecm to sfe
648 *
649 * @param sfe_ctx SFE context
650 * @param msg The IPv4 message
651 *
652 * @return sfe_tx_status_t The status of the Tx operation
653 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530654sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530655{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530656 struct net_device *src_dev = NULL;
657 struct net_device *dest_dev = NULL;
658 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530659 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530660 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530661 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530662
663 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
664 if (!response) {
665 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
666 return SFE_TX_FAILURE_QUEUE;
667 }
668
669 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
670 ret = SFE_CMN_RESPONSE_EMSG;
671 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
672 goto failed_ret;
673 }
674
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530675 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530676 case IPPROTO_TCP:
677 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
678 ret = SFE_CMN_RESPONSE_EMSG;
679 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
680 goto failed_ret;
681 }
682
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530683 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530684 break;
685
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530686 case IPPROTO_GRE:
687 break;
688
Tian Yangd98d91b2022-03-09 14:50:12 -0800689 case IPPROTO_IPV6:
690 break;
691
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530692 default:
693 ret = SFE_CMN_RESPONSE_EMSG;
694 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
695 goto failed_ret;
696 }
697
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530698 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530699 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530700 */
701 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530702 if (!sfe_is_l2_feature_enabled()) {
703 ret = SFE_CMN_RESPONSE_EINTERFACE;
704 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
705 goto failed_ret;
706 }
707
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530708 is_routed = false;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530709 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530710
711 /*
712 * Does our input device support IP processing?
713 */
714 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530715 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530716 ret = SFE_CMN_RESPONSE_EINTERFACE;
717 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
718 goto failed_ret;
719 }
720
721 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530722 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
723 */
724 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
725 if (cfg_err) {
726 ret = SFE_CMN_RESPONSE_EMSG;
727 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
728 goto failed_ret;
729 }
730
731 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530732 * Does our output device support IP processing?
733 */
734 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530735 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530736 ret = SFE_CMN_RESPONSE_EINTERFACE;
737 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
738 goto failed_ret;
739 }
740
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530741 /*
742 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
743 */
744 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
745 if (cfg_err) {
746 ret = SFE_CMN_RESPONSE_EMSG;
747 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
748 goto failed_ret;
749 }
750
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530751 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
752 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530753 ret = SFE_CMN_RESPONSE_ACK;
754 } else {
755 /* Failed */
756 ret = SFE_CMN_RESPONSE_EMSG;
757 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
758 }
759
760 /*
761 * Fall through
762 */
763failed_ret:
764 if (src_dev) {
765 dev_put(src_dev);
766 }
767
768 if (dest_dev) {
769 dev_put(dest_dev);
770 }
771
772 /*
773 * Try to queue response message
774 */
775 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
776 sfe_enqueue_msg(sfe_ctx, response);
777
778 return SFE_TX_SUCCESS;
779}
780
781/*
782 * sfe_destroy_ipv4_rule_msg()
783 * Convert destroy message format from ecm to sfe
784 *
785 * @param sfe_ctx SFE context
786 * @param msg The IPv4 message
787 *
788 * @return sfe_tx_status_t The status of the Tx operation
789 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530790sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530791{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530792 struct sfe_response_msg *response;
793
794 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
795 if (!response) {
796 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
797 return SFE_TX_FAILURE_QUEUE;
798 }
799
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530800 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530801
802 /*
803 * Try to queue response message
804 */
805 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
806 sfe_enqueue_msg(sfe_ctx, response);
807
808 return SFE_TX_SUCCESS;
809}
810
811/*
Ken Zhu7a43d882022-01-04 10:51:44 -0800812 * sfe_sync_ipv4_stats_many_msg()
813 * sync con stats msg from the ecm
814 *
815 * @param sfe_ctx SFE context
816 * @param msg The IPv4 message
817 *
818 * @return sfe_tx_status_t The status of the Tx operation
819 */
820sfe_tx_status_t sfe_sync_ipv4_stats_many_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
821{
822 struct sfe_ipv4_conn_sync_many_msg *nicsm;
823 nicsm = &(msg->msg.conn_stats_many);
824
825 if (sfe_ipv4_sync_invoke(nicsm->index)) {
826 return SFE_TX_SUCCESS;
827 }
828 return SFE_TX_FAILURE;
829}
830
831/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530832 * sfe_ipv4_tx()
833 * Transmit an IPv4 message to the sfe
834 *
835 * @param sfe_ctx SFE context
836 * @param msg The IPv4 message
837 *
838 * @return sfe_tx_status_t The status of the Tx operation
839 */
840sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
841{
842 switch (msg->cm.type) {
843 case SFE_TX_CREATE_RULE_MSG:
844 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
845 case SFE_TX_DESTROY_RULE_MSG:
846 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
Ken Zhu7a43d882022-01-04 10:51:44 -0800847 case SFE_TX_CONN_STATS_SYNC_MANY_MSG:
848 return sfe_sync_ipv4_stats_many_msg(SFE_CTX_TO_PRIVATE(sfe_ctx),msg);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530849 default:
850 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
851 return SFE_TX_FAILURE_NOT_ENABLED;
852 }
853}
854EXPORT_SYMBOL(sfe_ipv4_tx);
855
856/*
857 * sfe_ipv4_msg_init()
858 * Initialize IPv4 message.
859 */
860void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
861 sfe_ipv4_msg_callback_t cb, void *app_data)
862{
863 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
864}
865EXPORT_SYMBOL(sfe_ipv4_msg_init);
866
867/*
868 * sfe_ipv4_max_conn_count()
869 * Return maximum number of entries SFE supported
870 */
871int sfe_ipv4_max_conn_count(void)
872{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530873 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530874}
875EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
876
877/*
878 * sfe_ipv4_notify_register()
879 * Register a notifier callback for IPv4 messages from SFE
880 *
881 * @param cb The callback pointer
882 * @param app_data The application context for this message
883 *
884 * @return struct sfe_ctx_instance * The SFE context
885 */
Ken Zhu7a43d882022-01-04 10:51:44 -0800886struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t one_rule_cb,
887 sfe_ipv4_msg_callback_t many_rules_cb,void *app_data)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530888{
889 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
890
891 spin_lock_bh(&sfe_ctx->lock);
892 /*
893 * Hook the shortcut sync callback.
894 */
Ken Zhu7a43d882022-01-04 10:51:44 -0800895 if (one_rule_cb && !sfe_ctx->ipv4_stats_sync_cb) {
896 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_one_sync_callback);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530897 }
Ken Zhu7a43d882022-01-04 10:51:44 -0800898 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, one_rule_cb);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530899
Ken Zhu7a43d882022-01-04 10:51:44 -0800900 if (many_rules_cb && !sfe_ctx->ipv4_stats_sync_many_cb) {
901 sfe_ipv4_register_many_sync_callback(sfe_ipv4_stats_many_sync_callback);
902 }
903 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_many_cb, many_rules_cb);
904
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530905 sfe_ctx->ipv4_stats_sync_data = app_data;
906
907 spin_unlock_bh(&sfe_ctx->lock);
908
909 return SFE_CTX_TO_PUBLIC(sfe_ctx);
910}
911EXPORT_SYMBOL(sfe_ipv4_notify_register);
912
913/*
914 * sfe_ipv4_notify_unregister()
Ken Zhu7a43d882022-01-04 10:51:44 -0800915 * Un-Register the notifier callback for IPv4 messages from SFE
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530916 */
917void sfe_ipv4_notify_unregister(void)
918{
919 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
920
921 spin_lock_bh(&sfe_ctx->lock);
Ken Zhu7a43d882022-01-04 10:51:44 -0800922
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530923 /*
Ken Zhu7a43d882022-01-04 10:51:44 -0800924 * Unregister our single rule msg sync callback.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530925 */
926 if (sfe_ctx->ipv4_stats_sync_cb) {
927 sfe_ipv4_register_sync_rule_callback(NULL);
928 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530929 }
Ken Zhu7a43d882022-01-04 10:51:44 -0800930
931 /*
932 * Unregister our many rule msg sync callback.
933 */
934 if (sfe_ctx->ipv4_stats_sync_many_cb) {
935 sfe_ipv4_register_many_sync_callback(NULL);
936 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_many_cb, NULL);
937 }
938
939 sfe_ctx->ipv4_stats_sync_data = NULL;
940
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530941 spin_unlock_bh(&sfe_ctx->lock);
942
943 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530944 return;
945}
946EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
947
948/*
Ken Zhu7a43d882022-01-04 10:51:44 -0800949 * sfe_ipv6_many_stats_sync_callback()
950 * Synchronize many connection's state.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530951 */
Ken Zhu7a43d882022-01-04 10:51:44 -0800952static void sfe_ipv6_many_stats_sync_callback(struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530953{
954 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530955 sfe_ipv6_msg_callback_t sync_cb;
956
957 rcu_read_lock();
Ken Zhu7a43d882022-01-04 10:51:44 -0800958 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_many_cb);
959 rcu_read_unlock();
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530960 if (!sync_cb) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530961 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
962 return;
963 }
964
Ken Zhu7a43d882022-01-04 10:51:44 -0800965 sync_cb(sfe_ctx->ipv6_stats_sync_data, msg);
966}
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530967
Ken Zhu7a43d882022-01-04 10:51:44 -0800968/*
969 * sfe_ipv6_stats_convert()
970 * Convert the internal message format to ecm format.
971 *
972 * @param sync_msg stat msg to ecm
973 * @param sis SFE statistics from SFE core engine
974 */
975void sfe_ipv6_stats_convert(struct sfe_ipv6_conn_sync *sync_msg, struct sfe_connection_sync *sis)
976{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530977 /*
978 * Fill connection specific information
979 */
980 sync_msg->protocol = (u8)sis->protocol;
981 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
982 sync_msg->flow_ident = sis->src_port;
983
984 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
985 sync_msg->return_ident = sis->dest_port;
986
987 /*
988 * Fill TCP protocol specific information
989 */
990 if (sis->protocol == IPPROTO_TCP) {
991 sync_msg->flow_max_window = sis->src_td_max_window;
992 sync_msg->flow_end = sis->src_td_end;
993 sync_msg->flow_max_end = sis->src_td_max_end;
994
995 sync_msg->return_max_window = sis->dest_td_max_window;
996 sync_msg->return_end = sis->dest_td_end;
997 sync_msg->return_max_end = sis->dest_td_max_end;
998 }
999
1000 /*
1001 * Fill statistics information
1002 */
1003 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
1004 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
1005 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
1006 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
1007
1008 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
1009 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
1010 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
1011 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
1012
1013 /*
1014 * Fill expiration time to extend, in unit of msec
1015 */
1016 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
1017
1018 /*
1019 * Fill other information
1020 */
1021 switch (sis->reason) {
1022 case SFE_SYNC_REASON_DESTROY:
1023 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
1024 break;
1025 case SFE_SYNC_REASON_FLUSH:
1026 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
1027 break;
1028 default:
1029 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
1030 break;
1031 }
1032
Ken Zhu7a43d882022-01-04 10:51:44 -08001033 return;
1034}
1035
1036/*
1037 * sfe_ipv6_stats_sync_callback()
1038 * Synchronize a connection's state.
1039 */
1040static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
1041{
1042 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1043 struct sfe_ipv6_msg msg;
1044 struct sfe_ipv6_conn_sync *sync_msg;
1045 sfe_ipv6_msg_callback_t sync_cb;
1046
1047 rcu_read_lock();
1048 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
1049 rcu_read_unlock();
1050 if (!sync_cb) {
1051 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
1052 return;
1053 }
1054
1055 sync_msg = &msg.msg.conn_stats;
1056
1057 memset(&msg, 0, sizeof(msg));
1058 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
1059 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
1060
1061 sfe_ipv6_stats_convert(sync_msg, sis);
1062
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301063 /*
1064 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
1065 */
1066 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301067}
1068
1069/*
1070 * sfe_create_ipv6_rule_msg()
1071 * convert create message format from ecm to sfe
1072 *
1073 * @param sfe_ctx SFE context
1074 * @param msg The IPv6 message
1075 *
1076 * @return sfe_tx_status_t The status of the Tx operation
1077 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301078sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301079{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301080 struct net_device *src_dev = NULL;
1081 struct net_device *dest_dev = NULL;
1082 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301083 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301084 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301085 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301086
1087 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1088 if (!response) {
1089 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1090 return SFE_TX_FAILURE_QUEUE;
1091 }
1092
1093 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
1094 ret = SFE_CMN_RESPONSE_EMSG;
1095 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
1096 goto failed_ret;
1097 }
1098
1099 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301100 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301101 */
1102 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301103 if (!sfe_is_l2_feature_enabled()) {
1104 ret = SFE_CMN_RESPONSE_EINTERFACE;
1105 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
1106 goto failed_ret;
1107 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301108 is_routed = false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301109 }
1110
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301111 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301112
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301113 case IPPROTO_TCP:
1114 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
1115 ret = SFE_CMN_RESPONSE_EMSG;
1116 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
1117 goto failed_ret;
1118 }
1119
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301120 break;
1121
1122 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301123 break;
1124
Tian Yangafb03452022-01-13 18:53:13 -08001125 case IPPROTO_IPIP:
1126 break;
1127
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301128 case IPPROTO_GRE:
1129 break;
1130
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301131 default:
1132 ret = SFE_CMN_RESPONSE_EMSG;
1133 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
1134 goto failed_ret;
1135 }
1136
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301137 /*
1138 * Does our input device support IP processing?
1139 */
1140 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301141 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301142 ret = SFE_CMN_RESPONSE_EINTERFACE;
1143 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
1144 goto failed_ret;
1145 }
1146
1147 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301148 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1149 */
1150 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1151 if (cfg_err) {
1152 ret = SFE_CMN_RESPONSE_EMSG;
1153 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1154 goto failed_ret;
1155 }
1156
1157 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301158 * Does our output device support IP processing?
1159 */
1160 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301161 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301162 ret = SFE_CMN_RESPONSE_EINTERFACE;
1163 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
1164 goto failed_ret;
1165 }
1166
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301167 /*
1168 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1169 */
1170 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1171 if (cfg_err) {
1172 ret = SFE_CMN_RESPONSE_EMSG;
1173 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1174 goto failed_ret;
1175 }
1176
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301177 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
1178 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301179 ret = SFE_CMN_RESPONSE_ACK;
1180 } else {
1181 /* Failed */
1182 ret = SFE_CMN_RESPONSE_EMSG;
1183 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
1184 }
1185
1186 /*
1187 * Fall through
1188 */
1189failed_ret:
1190 if (src_dev) {
1191 dev_put(src_dev);
1192 }
1193
1194 if (dest_dev) {
1195 dev_put(dest_dev);
1196 }
1197
1198 /*
1199 * Try to queue response message
1200 */
1201 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
1202 sfe_enqueue_msg(sfe_ctx, response);
1203
1204 return SFE_TX_SUCCESS;
1205}
1206
1207/*
1208 * sfe_destroy_ipv6_rule_msg()
1209 * Convert destroy message format from ecm to sfe
1210 *
1211 * @param sfe_ctx SFE context
1212 * @param msg The IPv6 message
1213 *
1214 * @return sfe_tx_status_t The status of the Tx operation
1215 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301216sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301217{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301218 struct sfe_response_msg *response;
1219
1220 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1221 if (!response) {
1222 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1223 return SFE_TX_FAILURE_QUEUE;
1224 }
1225
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301226 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301227
1228 /*
1229 * Try to queue response message
1230 */
1231 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
1232 sfe_enqueue_msg(sfe_ctx, response);
1233
1234 return SFE_TX_SUCCESS;
1235}
1236
1237/*
Ken Zhu7a43d882022-01-04 10:51:44 -08001238 * sfe_sync_ipv6_stats_many_msg()
1239 * sync con stats msg from the ecm
1240 *
1241 * @param sfe_ctx SFE context
1242 * @param msg The IPv6 message
1243 *
1244 * @return sfe_tx_status_t The status of the Tx operation
1245 */
1246sfe_tx_status_t sfe_sync_ipv6_stats_many_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
1247{
1248 struct sfe_ipv6_conn_sync_many_msg *nicsm;
1249 nicsm = &(msg->msg.conn_stats_many);
1250
1251 if (sfe_ipv6_sync_invoke(nicsm->index)) {
1252 return SFE_TX_SUCCESS;
1253 }
1254 return SFE_TX_FAILURE;
1255}
1256
1257/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301258 * sfe_ipv6_tx()
1259 * Transmit an IPv6 message to the sfe
1260 *
1261 * @param sfe_ctx SFE context
1262 * @param msg The IPv6 message
1263 *
1264 * @return sfe_tx_status_t The status of the Tx operation
1265 */
1266sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
1267{
1268 switch (msg->cm.type) {
1269 case SFE_TX_CREATE_RULE_MSG:
1270 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1271 case SFE_TX_DESTROY_RULE_MSG:
1272 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
Ken Zhu7a43d882022-01-04 10:51:44 -08001273 case SFE_TX_CONN_STATS_SYNC_MANY_MSG:
1274 return sfe_sync_ipv6_stats_many_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301275 default:
1276 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
1277 return SFE_TX_FAILURE_NOT_ENABLED;
1278 }
1279}
1280EXPORT_SYMBOL(sfe_ipv6_tx);
1281
1282/*
1283 * sfe_ipv6_msg_init()
1284 * Initialize IPv6 message.
1285 */
1286void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
1287 sfe_ipv6_msg_callback_t cb, void *app_data)
1288{
1289 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
1290}
1291EXPORT_SYMBOL(sfe_ipv6_msg_init);
1292
1293/*
1294 * sfe_ipv6_max_conn_count()
1295 * Return maximum number of entries SFE supported
1296 */
1297int sfe_ipv6_max_conn_count(void)
1298{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +05301299 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301300}
1301EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
1302
1303/*
1304 * sfe_ipv6_notify_register()
1305 * Register a notifier callback for IPv6 messages from SFE
1306 *
Ken Zhu7a43d882022-01-04 10:51:44 -08001307 * @param one_rule_cb The callback pointer of one rule sync
1308 * @param many_rule_cb The callback pointer of many rule sync
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301309 * @param app_data The application context for this message
1310 *
1311 * @return struct sfe_ctx_instance * The SFE context
1312 */
Ken Zhu7a43d882022-01-04 10:51:44 -08001313struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t one_rule_cb,
1314 sfe_ipv6_msg_callback_t many_rule_cb, void *app_data)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301315{
1316 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1317
1318 spin_lock_bh(&sfe_ctx->lock);
1319 /*
1320 * Hook the shortcut sync callback.
1321 */
Ken Zhu7a43d882022-01-04 10:51:44 -08001322 if (one_rule_cb && !sfe_ctx->ipv6_stats_sync_cb) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301323 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1324 }
Ken Zhu7a43d882022-01-04 10:51:44 -08001325 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, one_rule_cb);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301326
Ken Zhu7a43d882022-01-04 10:51:44 -08001327 if (many_rule_cb && !sfe_ctx->ipv6_stats_sync_many_cb) {
1328 sfe_ipv6_register_many_sync_callback(sfe_ipv6_many_stats_sync_callback);
1329 }
1330 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_many_cb, many_rule_cb);
1331
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301332 sfe_ctx->ipv6_stats_sync_data = app_data;
1333
1334 spin_unlock_bh(&sfe_ctx->lock);
1335
1336 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1337}
1338EXPORT_SYMBOL(sfe_ipv6_notify_register);
1339
1340/*
1341 * sfe_ipv6_notify_unregister()
1342 * Un-Register a notifier callback for IPv6 messages from SFE
1343 */
1344void sfe_ipv6_notify_unregister(void)
1345{
1346 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1347
1348 spin_lock_bh(&sfe_ctx->lock);
1349 /*
1350 * Unregister our sync callback.
1351 */
1352 if (sfe_ctx->ipv6_stats_sync_cb) {
1353 sfe_ipv6_register_sync_rule_callback(NULL);
1354 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301355 }
Ken Zhu7a43d882022-01-04 10:51:44 -08001356
1357 if (sfe_ctx->ipv6_stats_sync_many_cb) {
1358 sfe_ipv6_register_many_sync_callback(NULL);
1359 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_many_cb, NULL);
1360 }
1361
1362 sfe_ctx->ipv6_stats_sync_data = NULL;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301363 spin_unlock_bh(&sfe_ctx->lock);
1364
1365 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301366 return;
1367}
1368EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1369
1370/*
1371 * sfe_tun6rd_tx()
1372 * Transmit a tun6rd message to sfe engine
1373 */
1374sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1375{
1376 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1377 return SFE_TX_FAILURE_NOT_ENABLED;
1378}
1379EXPORT_SYMBOL(sfe_tun6rd_tx);
1380
1381/*
1382 * sfe_tun6rd_msg_init()
1383 * Initialize sfe_tun6rd msg.
1384 */
1385void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1386{
1387 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1388}
1389EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1390
1391/*
1392 * sfe_recv()
1393 * Handle packet receives.
1394 *
1395 * Returns 1 if the packet is forwarded or 0 if it isn't.
1396 */
1397int sfe_recv(struct sk_buff *skb)
1398{
1399 struct net_device *dev;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301400 struct sfe_l2_info l2_info;
1401 int ret;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301402
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301403 dev = skb->dev;
1404
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301405 /*
1406 * Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
1407 */
1408 l2_info.parse_flags = 0;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001409 l2_info.vlan_hdr_cnt = 0;
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301410
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301411#ifdef CONFIG_NET_CLS_ACT
1412 /*
1413 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1414 * We can not accelerate this packet.
1415 */
1416#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1417 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1418 return 0;
1419 }
1420#else
1421 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1422 return 0;
1423 }
1424#endif
1425#endif
1426
1427 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301428 * If l2_feature is enabled, we need not check if src dev is L3 interface since bridge flow offload is supported.
1429 * If l2_feature is disabled, then we make sure src dev is L3 interface to avoid cost of rule lookup for L2 flows
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301430 */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301431 switch (ntohs(skb->protocol)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301432 case ETH_P_IP:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301433 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, true)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301434 return sfe_ipv4_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301435 }
1436
Wayne Tanbb7f1782021-12-13 11:16:04 -08001437 DEBUG_TRACE("No IPv4 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301438 return 0;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301439
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301440 case ETH_P_IPV6:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301441 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, false)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301442 return sfe_ipv6_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301443 }
1444
Wayne Tanbb7f1782021-12-13 11:16:04 -08001445 DEBUG_TRACE("No IPv6 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301446 return 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301447
1448 default:
1449 break;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301450 }
1451
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301452 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301453 * Stop L2 processing if L2 feature is disabled.
1454 */
1455 if (!sfe_is_l2_feature_enabled()) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001456 DEBUG_TRACE("Unsupported protocol %#x %s (L2 feature is disabled) skb=%px\n",
1457 ntohs(skb->protocol), dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301458 return 0;
1459 }
1460
1461 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301462 * Parse the L2 headers to find the L3 protocol and the L2 header offset
1463 */
1464 if (unlikely(!sfe_recv_parse_l2(dev, skb, &l2_info))) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001465 DEBUG_TRACE("%px: Invalid L2.5 header format with protocol : %x\n", skb, ntohs(skb->protocol));
1466 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301467 }
1468
1469 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301470 * Protocol in l2_info is expected to be in host byte order.
1471 * PPPoE is doing it in the sfe_pppoe_parse_hdr()
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301472 */
1473 if (likely(l2_info.protocol == ETH_P_IP)) {
Amitesh Anand63be37d2021-12-24 20:51:48 +05301474 ret = sfe_ipv4_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301475 if (unlikely(!ret)) {
1476 goto send_to_linux;
1477 }
1478 return ret;
1479 }
1480
1481 if (likely(l2_info.protocol == ETH_P_IPV6)) {
Suruchi Suman23a279d2021-11-16 15:13:09 +05301482 ret = sfe_ipv6_recv(dev, skb, &l2_info, false);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001483 if (unlikely(!ret)) {
1484 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301485 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001486 return ret;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301487 }
1488
Wayne Tanbb7f1782021-12-13 11:16:04 -08001489 DEBUG_TRACE("Non-IP(%x) %s skb=%px skb_vlan:%x/%x/%x skb_proto=%x\n",
1490 l2_info.protocol, dev->name, skb,
1491 ntohs(skb->vlan_proto), skb->vlan_tci, skb_vlan_tag_present(skb),
1492 htons(skb->protocol));
1493
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301494send_to_linux:
1495 /*
1496 * Push the data back before sending to linux if -
1497 * a. There is any exception from IPV4/V6
1498 * b. If the next protocol is neither IPV4 nor IPV6
1499 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001500 sfe_recv_undo_parse_l2(dev, skb, &l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301501
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301502 return 0;
1503}
1504
1505/*
1506 * sfe_get_exceptions()
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301507 * Dump exception counters
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301508 */
1509static ssize_t sfe_get_exceptions(struct device *dev,
1510 struct device_attribute *attr,
1511 char *buf)
1512{
1513 int idx, len;
1514 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1515
1516 spin_lock_bh(&sfe_ctx->lock);
1517 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1518 if (sfe_ctx->exceptions[idx]) {
1519 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1520 }
1521 }
1522 spin_unlock_bh(&sfe_ctx->lock);
1523
1524 return len;
1525}
1526
1527/*
1528 * sysfs attributes.
1529 */
1530static const struct device_attribute sfe_exceptions_attr =
1531 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1532
Jackson Bockus3fafbf32022-02-13 17:15:26 -08001533
1534/*
1535 * sfe_service_class_stats_get()
1536 * Collects ipv4 and ipv6 service class statistics and aggregates them.
1537 */
1538bool sfe_service_class_stats_get(uint8_t sid, uint64_t *bytes, uint64_t *packets)
1539{
1540 *bytes = 0;
1541 *packets = 0;
1542
1543 if (!sfe_ipv4_service_class_stats_get(sid, bytes, packets)) {
1544 return false;
1545 }
1546
1547 if (!sfe_ipv6_service_class_stats_get(sid, bytes, packets)) {
1548 return false;
1549 }
1550
1551 return true;
1552}
1553EXPORT_SYMBOL(sfe_service_class_stats_get);
1554
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301555/*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301556 * sfe_is_l2_feature_enabled()
1557 * Check if l2 features flag feature is enabled or not. (VLAN, PPPOE, BRIDGE and tunnels)
1558 *
1559 * 32bit read is atomic. No need of locks.
1560 */
1561bool sfe_is_l2_feature_enabled()
1562{
1563 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1564 return (sfe_ctx->l2_feature_support == 1);
1565}
1566EXPORT_SYMBOL(sfe_is_l2_feature_enabled);
1567
1568/*
1569 * sfe_get_l2_feature()
1570 * L2 feature is enabled/disabled
1571 */
1572ssize_t sfe_get_l2_feature(struct device *dev,
1573 struct device_attribute *attr,
1574 char *buf)
1575{
1576 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1577 ssize_t len;
1578
1579 spin_lock_bh(&sfe_ctx->lock);
1580 len = snprintf(buf, (ssize_t)(PAGE_SIZE), "L2 feature is %s\n", sfe_ctx->l2_feature_support ? "enabled" : "disabled");
1581 spin_unlock_bh(&sfe_ctx->lock);
1582 return len;
1583}
1584
1585/*
1586 * sfe_set_l2_feature()
1587 * Enable or disable l2 features flag.
1588 */
1589ssize_t sfe_set_l2_feature(struct device *dev, struct device_attribute *attr,
1590 const char *buf, size_t count)
1591{
1592 unsigned long val;
1593 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1594 int ret;
1595 ret = sscanf(buf, "%lu", &val);
1596
1597 if (ret != 1) {
1598 pr_err("Wrong input, %s\n", buf);
1599 return -EINVAL;
1600 }
1601
1602 if (val != 1 && val != 0) {
1603 pr_err("Input should be either 1 or 0, (%s)\n", buf);
1604 return -EINVAL;
1605 }
1606
1607 spin_lock_bh(&sfe_ctx->lock);
1608
1609 if (sfe_ctx->l2_feature_support && val) {
1610 spin_unlock_bh(&sfe_ctx->lock);
1611 pr_err("L2 feature is already enabled\n");
1612 return -EINVAL;
1613 }
1614
1615 if (!sfe_ctx->l2_feature_support && !val) {
1616 spin_unlock_bh(&sfe_ctx->lock);
1617 pr_err("L2 feature is already disabled\n");
1618 return -EINVAL;
1619 }
1620
1621 sfe_ctx->l2_feature_support = val;
1622 spin_unlock_bh(&sfe_ctx->lock);
1623
1624 return count;
1625}
1626
1627static const struct device_attribute sfe_l2_feature_attr =
1628 __ATTR(l2_feature, 0644, sfe_get_l2_feature, sfe_set_l2_feature);
1629
1630/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301631 * sfe_init_if()
1632 */
1633int sfe_init_if(void)
1634{
1635 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1636 int result = -1;
1637
1638 /*
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301639 * L2 feature is enabled by default
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301640 */
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301641 sfe_ctx->l2_feature_support = 1;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301642
1643 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301644 * Create sys/sfe
1645 */
1646 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1647 if (!sfe_ctx->sys_sfe) {
1648 DEBUG_ERROR("failed to register sfe\n");
1649 goto exit1;
1650 }
1651
1652 /*
1653 * Create sys/sfe/exceptions
1654 */
1655 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1656 if (result) {
1657 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1658 goto exit2;
1659 }
1660
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301661 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_l2_feature_attr.attr);
1662 if (result) {
1663 DEBUG_ERROR("failed to register L2 feature flag sysfs file: %d\n", result);
1664 goto exit2;
1665 }
1666
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301667 spin_lock_init(&sfe_ctx->lock);
1668
1669 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1670 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1671
1672 /*
1673 * Hook the receive path in the network stack.
1674 */
1675 BUG_ON(athrs_fast_nat_recv);
1676 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1677
1678 return 0;
1679exit2:
1680 kobject_put(sfe_ctx->sys_sfe);
1681exit1:
1682 return result;
1683}
1684
1685/*
1686 * sfe_exit_if()
1687 */
1688void sfe_exit_if(void)
1689{
1690 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1691
1692 /*
1693 * Unregister our receive callback.
1694 */
1695 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1696
1697 /*
1698 * Wait for all callbacks to complete.
1699 */
1700 rcu_barrier();
1701
1702 /*
1703 * Destroy all connections.
1704 */
1705 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1706 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1707
1708 /*
1709 * stop work queue, and flush all pending message in queue
1710 */
1711 cancel_work_sync(&sfe_ctx->work);
1712 sfe_process_response_msg(&sfe_ctx->work);
1713
1714 /*
1715 * Unregister our sync callback.
1716 */
1717 sfe_ipv4_notify_unregister();
1718 sfe_ipv6_notify_unregister();
1719
1720 kobject_put(sfe_ctx->sys_sfe);
1721
1722 return;
1723}