blob: da012d790adadea51daf430808963428c0fad169 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
Suruchi Sumane811dad2022-01-19 19:39:50 +053028#include <net/vxlan.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053029#include <net/gre.h>
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053030
31#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053032#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053033#include "sfe.h"
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053034#include "sfe_pppoe.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080035#include "sfe_vlan.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053036
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053037extern int max_ipv4_conn;
38extern int max_ipv6_conn;
39
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053040#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053041#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
42#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
43#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
44
45typedef enum sfe_exception {
46 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
47 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
48 SFE_EXCEPTION_CONNECTION_INVALID,
49 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
50 SFE_EXCEPTION_TCP_INVALID,
51 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
52 SFE_EXCEPTION_SRC_DEV_NOT_L3,
53 SFE_EXCEPTION_DEST_DEV_NOT_L3,
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053054 SFE_EXCEPTION_CFG_ERR,
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053055 SFE_EXCEPTION_CREATE_FAILED,
56 SFE_EXCEPTION_ENQUEUE_FAILED,
57 SFE_EXCEPTION_NOT_SUPPORT_6RD,
58 SFE_EXCEPTION_NO_SYNC_CB,
59 SFE_EXCEPTION_MAX
60} sfe_exception_t;
61
62static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
63 "IPV4_MSG_UNKNOW",
64 "IPV6_MSG_UNKNOW",
65 "CONNECTION_INVALID",
66 "NOT_SUPPORT_BRIDGE",
67 "TCP_INVALID",
68 "PROTOCOL_NOT_SUPPORT",
69 "SRC_DEV_NOT_L3",
70 "DEST_DEV_NOT_L3",
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053071 "CONFIG_ERROR",
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053072 "CREATE_FAILED",
73 "ENQUEUE_FAILED",
74 "NOT_SUPPORT_6RD",
75 "NO_SYNC_CB"
76};
77
78/*
79 * Message type of queued response message
80 */
81typedef enum {
82 SFE_MSG_TYPE_IPV4,
83 SFE_MSG_TYPE_IPV6
84} sfe_msg_types_t;
85
86/*
87 * Queued response message,
88 * will be sent back to caller in workqueue
89 */
90struct sfe_response_msg {
91 struct list_head node;
92 sfe_msg_types_t type;
93 void *msg[0];
94};
95
96/*
97 * SFE context instance, private for SFE
98 */
99struct sfe_ctx_instance_internal {
100 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
101
102 /*
103 * Control state.
104 */
105 struct kobject *sys_sfe; /* Sysfs linkage */
106
107 struct list_head msg_queue; /* Response message queue*/
108 spinlock_t lock; /* Lock to protect message queue */
109
110 struct work_struct work; /* Work to send response message back to caller*/
111
112 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
113 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
114
115 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
116 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
117
118 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530119
120 int32_t l2_feature_support; /* L2 feature support */
121
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530122};
123
124static struct sfe_ctx_instance_internal __sfe_ctx;
125
126/*
127 * Convert public SFE context to internal context
128 */
129#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
130/*
131 * Convert internal SFE context to public context
132 */
133#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
134
135/*
136 * sfe_incr_exceptions()
137 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530138 *
139 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530140 */
141static inline void sfe_incr_exceptions(sfe_exception_t except)
142{
143 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
144
145 spin_lock_bh(&sfe_ctx->lock);
146 sfe_ctx->exceptions[except]++;
147 spin_unlock_bh(&sfe_ctx->lock);
148}
149
150/*
151 * sfe_dev_is_layer_3_interface()
152 * Check if a network device is ipv4 or ipv6 layer 3 interface
153 *
154 * @param dev network device to check
155 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
156 */
157inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
158{
159 struct in_device *in4_dev;
160 struct inet6_dev *in6_dev;
161
162 BUG_ON(!dev);
163
164 if (likely(check_v4)) {
165 /*
166 * Does our input device support IPv4 processing?
167 */
168 in4_dev = (struct in_device *)dev->ip_ptr;
169 if (unlikely(!in4_dev)) {
170 return false;
171 }
172
173 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800174 * Does it have an IPv4 address? If it doesn't then it could be MAP-T interface,
175 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530176 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800177 if (likely(in4_dev->ifa_list || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
178 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530179 }
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800180 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530181 }
182
183 /*
184 * Does our input device support IPv6 processing?
185 */
186 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
187 if (unlikely(!in6_dev)) {
188 return false;
189 }
190
191 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800192 * Does it have an IPv6 address? If it doesn't then it could be MAP-T interface,
193 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530194 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800195 if (likely(!list_empty(&in6_dev->addr_list) || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
196 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530197 }
198
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800199 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530200}
201
202/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530203 * sfe_routed_dev_allow()
204 * check whether routed acceleration allowed
205 */
206static bool sfe_routed_dev_allow(struct net_device *dev, bool is_routed, bool check_v4)
207{
208 if (!is_routed) {
209 return true;
210 }
211
212 if (sfe_dev_is_layer_3_interface(dev, check_v4)) {
213 return true;
214 }
215
216 /*
217 * in case of GRE / vxlan, these dev does not have IP address
218 * so l3 interface check will fail. allow rule creation between gre / vxlan
219 * and wan dev for routed flow.
220 */
221 if (netif_is_vxlan(dev)) {
222 return true;
223 }
224
225#ifdef SFE_GRE_TUN_ENABLE
226 if (netif_is_gretap(dev) || netif_is_gre(dev)) {
227 return true;
228 }
229
230 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
231 return true;
232 }
233#endif
234
235 return false;
236}
237
238/* sfe_dev_has_hw_csum()
239 * check whether device supports hardware checksum offload
240 */
241bool sfe_dev_has_hw_csum(struct net_device *dev)
242{
243 if (netif_is_vxlan(dev)) {
244 return false;
245 }
246
247#ifdef SFE_GRE_TUN_ENABLE
248 if (netif_is_gre(dev) || netif_is_gretap(dev)) {
249 return false;
250 }
251
252 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
253 return false;
254 }
255#endif
256
257 return true;
258}
259
260/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530261 * sfe_clean_response_msg_by_type()
262 * clean response message in queue when ECM exit
263 *
264 * @param sfe_ctx SFE context
265 * @param msg_type message type, ipv4 or ipv6
266 */
267static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
268{
269 struct sfe_response_msg *response, *tmp;
270
271 if (!sfe_ctx) {
272 return;
273 }
274
275 spin_lock_bh(&sfe_ctx->lock);
276 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
277 if (response->type == msg_type) {
278 list_del(&response->node);
279 /*
280 * Free response message
281 */
282 kfree(response);
283 }
284 }
285 spin_unlock_bh(&sfe_ctx->lock);
286
287}
288
289/*
290 * sfe_process_response_msg()
291 * Send all pending response message to ECM by calling callback function included in message
292 *
293 * @param work work structure
294 */
295static void sfe_process_response_msg(struct work_struct *work)
296{
297 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
298 struct sfe_response_msg *response;
299
300 spin_lock_bh(&sfe_ctx->lock);
301 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
302 list_del(&response->node);
303 spin_unlock_bh(&sfe_ctx->lock);
304 rcu_read_lock();
305
306 /*
307 * Send response message back to caller
308 */
309 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
310 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
311 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
312 if (callback) {
313 callback((void *)msg->cm.app_data, msg);
314 }
315 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
316 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
317 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
318 if (callback) {
319 callback((void *)msg->cm.app_data, msg);
320 }
321 }
322
323 rcu_read_unlock();
324 /*
325 * Free response message
326 */
327 kfree(response);
328 spin_lock_bh(&sfe_ctx->lock);
329 }
330 spin_unlock_bh(&sfe_ctx->lock);
331}
332
333/*
334 * sfe_alloc_response_msg()
335 * Alloc and construct new response message
336 *
337 * @param type message type
338 * @param msg used to construct response message if not NULL
339 *
340 * @return !NULL, success; NULL, failed
341 */
342static struct sfe_response_msg *
343sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
344{
345 struct sfe_response_msg *response;
346 int size;
347
348 switch (type) {
349 case SFE_MSG_TYPE_IPV4:
350 size = sizeof(struct sfe_ipv4_msg);
351 break;
352 case SFE_MSG_TYPE_IPV6:
353 size = sizeof(struct sfe_ipv6_msg);
354 break;
355 default:
356 DEBUG_ERROR("message type %d not supported\n", type);
357 return NULL;
358 }
359
360 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
361 if (!response) {
362 DEBUG_ERROR("allocate memory failed\n");
363 return NULL;
364 }
365
366 response->type = type;
367
368 if (msg) {
369 memcpy(response->msg, msg, size);
370 }
371
372 return response;
373}
374
375/*
376 * sfe_enqueue_msg()
377 * Queue response message
378 *
379 * @param sfe_ctx SFE context
380 * @param response response message to be queue
381 */
382static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
383{
384 spin_lock_bh(&sfe_ctx->lock);
385 list_add_tail(&response->node, &sfe_ctx->msg_queue);
386 spin_unlock_bh(&sfe_ctx->lock);
387
388 schedule_work(&sfe_ctx->work);
389}
390
391/*
392 * sfe_cmn_msg_init()
393 * Initialize the common message structure.
394 *
395 * @param ncm message to init
396 * @param if_num interface number related with this message
397 * @param type message type
398 * @param cb callback function to process repsonse of this message
399 * @param app_data argument for above callback function
400 */
401static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
402{
403 ncm->interface = if_num;
404 ncm->version = SFE_MESSAGE_VERSION;
405 ncm->type = type;
406 ncm->len = len;
407 ncm->cb = (sfe_ptr_t)cb;
408 ncm->app_data = (sfe_ptr_t)app_data;
409}
410
411/*
412 * sfe_ipv4_stats_sync_callback()
413 * Synchronize a connection's state.
414 *
415 * @param sis SFE statistics from SFE core engine
416 */
417static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
418{
419 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
420 struct sfe_ipv4_msg msg;
421 struct sfe_ipv4_conn_sync *sync_msg;
422 sfe_ipv4_msg_callback_t sync_cb;
423
424 rcu_read_lock();
425 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
426 if (!sync_cb) {
427 rcu_read_unlock();
428 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
429 return;
430 }
431
432 sync_msg = &msg.msg.conn_stats;
433
434 memset(&msg, 0, sizeof(msg));
435 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
436 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
437
438 /*
439 * Fill connection specific information
440 */
441 sync_msg->protocol = (u8)sis->protocol;
442 sync_msg->flow_ip = sis->src_ip.ip;
443 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
444 sync_msg->flow_ident = sis->src_port;
445 sync_msg->flow_ident_xlate = sis->src_port_xlate;
446
447 sync_msg->return_ip = sis->dest_ip.ip;
448 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
449 sync_msg->return_ident = sis->dest_port;
450 sync_msg->return_ident_xlate = sis->dest_port_xlate;
451
452 /*
453 * Fill TCP protocol specific information
454 */
455 if (sis->protocol == IPPROTO_TCP) {
456 sync_msg->flow_max_window = sis->src_td_max_window;
457 sync_msg->flow_end = sis->src_td_end;
458 sync_msg->flow_max_end = sis->src_td_max_end;
459
460 sync_msg->return_max_window = sis->dest_td_max_window;
461 sync_msg->return_end = sis->dest_td_end;
462 sync_msg->return_max_end = sis->dest_td_max_end;
463 }
464
465 /*
466 * Fill statistics information
467 */
468 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
469 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
470 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
471 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
472
473 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
474 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
475 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
476 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
477
478 /*
479 * Fill expiration time to extend, in unit of msec
480 */
481 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
482
483 /*
484 * Fill other information
485 */
486 switch (sis->reason) {
487 case SFE_SYNC_REASON_DESTROY:
488 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
489 break;
490 case SFE_SYNC_REASON_FLUSH:
491 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
492 break;
493 default:
494 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
495 break;
496 }
497
498 /*
499 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
500 */
501 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
502 rcu_read_unlock();
503}
504
505/*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530506 * sfe_recv_parse_l2()
507 * Parse L2 headers
508 *
509 * Returns true if the packet is parsed and false otherwise.
510 */
511static bool sfe_recv_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
512{
513 /*
514 * l2_hdr_offset will not change as we parse more L2.5 headers
515 * TODO: Move from storing offsets to storing pointers
516 */
517 sfe_l2_hdr_offset_set(l2_info, ((skb->data - ETH_HLEN) - skb->head));
518
519 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800520 * VLAN parsing
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530521 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800522 if (unlikely(!sfe_vlan_check_and_parse_tag(skb, l2_info))) {
523 return false;
524 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530525
526 /*
527 * PPPoE parsing
528 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800529 if (htons(ETH_P_PPP_SES) == skb->protocol) {
530 /*
531 * Parse only PPPoE session packets
532 * skb->data is pointing to PPPoE hdr
533 */
534 if (!sfe_pppoe_parse_hdr(skb, l2_info)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530535
Wayne Tanbb7f1782021-12-13 11:16:04 -0800536 /*
537 * For exception from PPPoE return from here without modifying the skb->data
538 * This includes non-IPv4/v6 cases also
539 */
540 return false;
541 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530542
543 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800544 * Pull by L2 header size
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530545 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800546 __skb_pull(skb, sfe_l2_hdr_size_get(l2_info));
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530547 }
Wayne Tanbb7f1782021-12-13 11:16:04 -0800548 return true;
549}
550
551/*
552 * sfe_recv_undo_parse_l2()
553 */
554static void sfe_recv_undo_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
555{
556 /*
557 * PPPoE undo
558 */
559 __skb_push(skb, sfe_l2_hdr_size_get(l2_info));
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530560
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530561 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800562 * VLAN undo
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530563 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800564 sfe_vlan_undo_parse(skb, l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530565}
566
567/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530568 * sfe_create_ipv4_rule_msg()
569 * Convert create message format from ecm to sfe
570 *
571 * @param sfe_ctx SFE context
572 * @param msg The IPv4 message
573 *
574 * @return sfe_tx_status_t The status of the Tx operation
575 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530576sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530577{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530578 struct net_device *src_dev = NULL;
579 struct net_device *dest_dev = NULL;
580 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530581 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530582 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530583 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530584
585 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
586 if (!response) {
587 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
588 return SFE_TX_FAILURE_QUEUE;
589 }
590
591 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
592 ret = SFE_CMN_RESPONSE_EMSG;
593 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
594 goto failed_ret;
595 }
596
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530597 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530598 case IPPROTO_TCP:
599 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
600 ret = SFE_CMN_RESPONSE_EMSG;
601 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
602 goto failed_ret;
603 }
604
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530605 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530606 break;
607
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530608 case IPPROTO_GRE:
609 break;
610
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530611 default:
612 ret = SFE_CMN_RESPONSE_EMSG;
613 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
614 goto failed_ret;
615 }
616
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530617 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530618 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530619 */
620 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530621 if (!sfe_is_l2_feature_enabled()) {
622 ret = SFE_CMN_RESPONSE_EINTERFACE;
623 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
624 goto failed_ret;
625 }
626
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530627 is_routed = false;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530628 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530629
630 /*
631 * Does our input device support IP processing?
632 */
633 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530634 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530635 ret = SFE_CMN_RESPONSE_EINTERFACE;
636 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
637 goto failed_ret;
638 }
639
640 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530641 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
642 */
643 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
644 if (cfg_err) {
645 ret = SFE_CMN_RESPONSE_EMSG;
646 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
647 goto failed_ret;
648 }
649
650 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530651 * Does our output device support IP processing?
652 */
653 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530654 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530655 ret = SFE_CMN_RESPONSE_EINTERFACE;
656 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
657 goto failed_ret;
658 }
659
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530660 /*
661 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
662 */
663 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
664 if (cfg_err) {
665 ret = SFE_CMN_RESPONSE_EMSG;
666 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
667 goto failed_ret;
668 }
669
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530670 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
671 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530672 ret = SFE_CMN_RESPONSE_ACK;
673 } else {
674 /* Failed */
675 ret = SFE_CMN_RESPONSE_EMSG;
676 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
677 }
678
679 /*
680 * Fall through
681 */
682failed_ret:
683 if (src_dev) {
684 dev_put(src_dev);
685 }
686
687 if (dest_dev) {
688 dev_put(dest_dev);
689 }
690
691 /*
692 * Try to queue response message
693 */
694 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
695 sfe_enqueue_msg(sfe_ctx, response);
696
697 return SFE_TX_SUCCESS;
698}
699
700/*
701 * sfe_destroy_ipv4_rule_msg()
702 * Convert destroy message format from ecm to sfe
703 *
704 * @param sfe_ctx SFE context
705 * @param msg The IPv4 message
706 *
707 * @return sfe_tx_status_t The status of the Tx operation
708 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530709sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530710{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530711 struct sfe_response_msg *response;
712
713 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
714 if (!response) {
715 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
716 return SFE_TX_FAILURE_QUEUE;
717 }
718
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530719 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530720
721 /*
722 * Try to queue response message
723 */
724 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
725 sfe_enqueue_msg(sfe_ctx, response);
726
727 return SFE_TX_SUCCESS;
728}
729
730/*
731 * sfe_ipv4_tx()
732 * Transmit an IPv4 message to the sfe
733 *
734 * @param sfe_ctx SFE context
735 * @param msg The IPv4 message
736 *
737 * @return sfe_tx_status_t The status of the Tx operation
738 */
739sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
740{
741 switch (msg->cm.type) {
742 case SFE_TX_CREATE_RULE_MSG:
743 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
744 case SFE_TX_DESTROY_RULE_MSG:
745 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
746 default:
747 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
748 return SFE_TX_FAILURE_NOT_ENABLED;
749 }
750}
751EXPORT_SYMBOL(sfe_ipv4_tx);
752
753/*
754 * sfe_ipv4_msg_init()
755 * Initialize IPv4 message.
756 */
757void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
758 sfe_ipv4_msg_callback_t cb, void *app_data)
759{
760 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
761}
762EXPORT_SYMBOL(sfe_ipv4_msg_init);
763
764/*
765 * sfe_ipv4_max_conn_count()
766 * Return maximum number of entries SFE supported
767 */
768int sfe_ipv4_max_conn_count(void)
769{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530770 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530771}
772EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
773
774/*
775 * sfe_ipv4_notify_register()
776 * Register a notifier callback for IPv4 messages from SFE
777 *
778 * @param cb The callback pointer
779 * @param app_data The application context for this message
780 *
781 * @return struct sfe_ctx_instance * The SFE context
782 */
783struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
784{
785 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
786
787 spin_lock_bh(&sfe_ctx->lock);
788 /*
789 * Hook the shortcut sync callback.
790 */
791 if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
792 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
793 }
794
795 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
796 sfe_ctx->ipv4_stats_sync_data = app_data;
797
798 spin_unlock_bh(&sfe_ctx->lock);
799
800 return SFE_CTX_TO_PUBLIC(sfe_ctx);
801}
802EXPORT_SYMBOL(sfe_ipv4_notify_register);
803
804/*
805 * sfe_ipv4_notify_unregister()
806 * Un-Register a notifier callback for IPv4 messages from SFE
807 */
808void sfe_ipv4_notify_unregister(void)
809{
810 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
811
812 spin_lock_bh(&sfe_ctx->lock);
813 /*
814 * Unregister our sync callback.
815 */
816 if (sfe_ctx->ipv4_stats_sync_cb) {
817 sfe_ipv4_register_sync_rule_callback(NULL);
818 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
819 sfe_ctx->ipv4_stats_sync_data = NULL;
820 }
821 spin_unlock_bh(&sfe_ctx->lock);
822
823 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
824
825 return;
826}
827EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
828
829/*
830 * sfe_ipv6_stats_sync_callback()
831 * Synchronize a connection's state.
832 */
833static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
834{
835 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
836 struct sfe_ipv6_msg msg;
837 struct sfe_ipv6_conn_sync *sync_msg;
838 sfe_ipv6_msg_callback_t sync_cb;
839
840 rcu_read_lock();
841 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
842 if (!sync_cb) {
843 rcu_read_unlock();
844 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
845 return;
846 }
847
848 sync_msg = &msg.msg.conn_stats;
849
850 memset(&msg, 0, sizeof(msg));
851 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
852 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
853
854 /*
855 * Fill connection specific information
856 */
857 sync_msg->protocol = (u8)sis->protocol;
858 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
859 sync_msg->flow_ident = sis->src_port;
860
861 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
862 sync_msg->return_ident = sis->dest_port;
863
864 /*
865 * Fill TCP protocol specific information
866 */
867 if (sis->protocol == IPPROTO_TCP) {
868 sync_msg->flow_max_window = sis->src_td_max_window;
869 sync_msg->flow_end = sis->src_td_end;
870 sync_msg->flow_max_end = sis->src_td_max_end;
871
872 sync_msg->return_max_window = sis->dest_td_max_window;
873 sync_msg->return_end = sis->dest_td_end;
874 sync_msg->return_max_end = sis->dest_td_max_end;
875 }
876
877 /*
878 * Fill statistics information
879 */
880 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
881 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
882 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
883 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
884
885 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
886 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
887 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
888 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
889
890 /*
891 * Fill expiration time to extend, in unit of msec
892 */
893 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
894
895 /*
896 * Fill other information
897 */
898 switch (sis->reason) {
899 case SFE_SYNC_REASON_DESTROY:
900 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
901 break;
902 case SFE_SYNC_REASON_FLUSH:
903 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
904 break;
905 default:
906 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
907 break;
908 }
909
910 /*
911 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
912 */
913 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
914 rcu_read_unlock();
915}
916
917/*
918 * sfe_create_ipv6_rule_msg()
919 * convert create message format from ecm to sfe
920 *
921 * @param sfe_ctx SFE context
922 * @param msg The IPv6 message
923 *
924 * @return sfe_tx_status_t The status of the Tx operation
925 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530926sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530927{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530928 struct net_device *src_dev = NULL;
929 struct net_device *dest_dev = NULL;
930 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530931 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530932 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530933 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530934
935 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
936 if (!response) {
937 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
938 return SFE_TX_FAILURE_QUEUE;
939 }
940
941 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
942 ret = SFE_CMN_RESPONSE_EMSG;
943 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
944 goto failed_ret;
945 }
946
947 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530948 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530949 */
950 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530951 if (!sfe_is_l2_feature_enabled()) {
952 ret = SFE_CMN_RESPONSE_EINTERFACE;
953 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
954 goto failed_ret;
955 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530956 is_routed = false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530957 }
958
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530959 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530960
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530961 case IPPROTO_TCP:
962 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
963 ret = SFE_CMN_RESPONSE_EMSG;
964 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
965 goto failed_ret;
966 }
967
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530968 break;
969
970 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530971 break;
972
Tian Yangafb03452022-01-13 18:53:13 -0800973 case IPPROTO_IPIP:
974 break;
975
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530976 case IPPROTO_GRE:
977 break;
978
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530979 default:
980 ret = SFE_CMN_RESPONSE_EMSG;
981 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
982 goto failed_ret;
983 }
984
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530985 /*
986 * Does our input device support IP processing?
987 */
988 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530989 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530990 ret = SFE_CMN_RESPONSE_EINTERFACE;
991 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
992 goto failed_ret;
993 }
994
995 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530996 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
997 */
998 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
999 if (cfg_err) {
1000 ret = SFE_CMN_RESPONSE_EMSG;
1001 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1002 goto failed_ret;
1003 }
1004
1005 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301006 * Does our output device support IP processing?
1007 */
1008 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301009 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301010 ret = SFE_CMN_RESPONSE_EINTERFACE;
1011 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
1012 goto failed_ret;
1013 }
1014
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301015 /*
1016 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1017 */
1018 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1019 if (cfg_err) {
1020 ret = SFE_CMN_RESPONSE_EMSG;
1021 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1022 goto failed_ret;
1023 }
1024
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301025 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
1026 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301027 ret = SFE_CMN_RESPONSE_ACK;
1028 } else {
1029 /* Failed */
1030 ret = SFE_CMN_RESPONSE_EMSG;
1031 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
1032 }
1033
1034 /*
1035 * Fall through
1036 */
1037failed_ret:
1038 if (src_dev) {
1039 dev_put(src_dev);
1040 }
1041
1042 if (dest_dev) {
1043 dev_put(dest_dev);
1044 }
1045
1046 /*
1047 * Try to queue response message
1048 */
1049 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
1050 sfe_enqueue_msg(sfe_ctx, response);
1051
1052 return SFE_TX_SUCCESS;
1053}
1054
1055/*
1056 * sfe_destroy_ipv6_rule_msg()
1057 * Convert destroy message format from ecm to sfe
1058 *
1059 * @param sfe_ctx SFE context
1060 * @param msg The IPv6 message
1061 *
1062 * @return sfe_tx_status_t The status of the Tx operation
1063 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301064sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301065{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301066 struct sfe_response_msg *response;
1067
1068 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1069 if (!response) {
1070 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1071 return SFE_TX_FAILURE_QUEUE;
1072 }
1073
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301074 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301075
1076 /*
1077 * Try to queue response message
1078 */
1079 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
1080 sfe_enqueue_msg(sfe_ctx, response);
1081
1082 return SFE_TX_SUCCESS;
1083}
1084
1085/*
1086 * sfe_ipv6_tx()
1087 * Transmit an IPv6 message to the sfe
1088 *
1089 * @param sfe_ctx SFE context
1090 * @param msg The IPv6 message
1091 *
1092 * @return sfe_tx_status_t The status of the Tx operation
1093 */
1094sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
1095{
1096 switch (msg->cm.type) {
1097 case SFE_TX_CREATE_RULE_MSG:
1098 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1099 case SFE_TX_DESTROY_RULE_MSG:
1100 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1101 default:
1102 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
1103 return SFE_TX_FAILURE_NOT_ENABLED;
1104 }
1105}
1106EXPORT_SYMBOL(sfe_ipv6_tx);
1107
1108/*
1109 * sfe_ipv6_msg_init()
1110 * Initialize IPv6 message.
1111 */
1112void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
1113 sfe_ipv6_msg_callback_t cb, void *app_data)
1114{
1115 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
1116}
1117EXPORT_SYMBOL(sfe_ipv6_msg_init);
1118
1119/*
1120 * sfe_ipv6_max_conn_count()
1121 * Return maximum number of entries SFE supported
1122 */
1123int sfe_ipv6_max_conn_count(void)
1124{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +05301125 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301126}
1127EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
1128
1129/*
1130 * sfe_ipv6_notify_register()
1131 * Register a notifier callback for IPv6 messages from SFE
1132 *
1133 * @param cb The callback pointer
1134 * @param app_data The application context for this message
1135 *
1136 * @return struct sfe_ctx_instance * The SFE context
1137 */
1138struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
1139{
1140 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1141
1142 spin_lock_bh(&sfe_ctx->lock);
1143 /*
1144 * Hook the shortcut sync callback.
1145 */
1146 if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
1147 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1148 }
1149
1150 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
1151 sfe_ctx->ipv6_stats_sync_data = app_data;
1152
1153 spin_unlock_bh(&sfe_ctx->lock);
1154
1155 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1156}
1157EXPORT_SYMBOL(sfe_ipv6_notify_register);
1158
1159/*
1160 * sfe_ipv6_notify_unregister()
1161 * Un-Register a notifier callback for IPv6 messages from SFE
1162 */
1163void sfe_ipv6_notify_unregister(void)
1164{
1165 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1166
1167 spin_lock_bh(&sfe_ctx->lock);
1168 /*
1169 * Unregister our sync callback.
1170 */
1171 if (sfe_ctx->ipv6_stats_sync_cb) {
1172 sfe_ipv6_register_sync_rule_callback(NULL);
1173 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
1174 sfe_ctx->ipv6_stats_sync_data = NULL;
1175 }
1176 spin_unlock_bh(&sfe_ctx->lock);
1177
1178 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
1179
1180 return;
1181}
1182EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1183
1184/*
1185 * sfe_tun6rd_tx()
1186 * Transmit a tun6rd message to sfe engine
1187 */
1188sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1189{
1190 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1191 return SFE_TX_FAILURE_NOT_ENABLED;
1192}
1193EXPORT_SYMBOL(sfe_tun6rd_tx);
1194
1195/*
1196 * sfe_tun6rd_msg_init()
1197 * Initialize sfe_tun6rd msg.
1198 */
1199void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1200{
1201 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1202}
1203EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1204
1205/*
1206 * sfe_recv()
1207 * Handle packet receives.
1208 *
1209 * Returns 1 if the packet is forwarded or 0 if it isn't.
1210 */
1211int sfe_recv(struct sk_buff *skb)
1212{
1213 struct net_device *dev;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301214 struct sfe_l2_info l2_info;
1215 int ret;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301216
1217 /*
1218 * We know that for the vast majority of packets we need the transport
1219 * layer header so we may as well start to fetch it now!
1220 */
1221 prefetch(skb->data + 32);
1222 barrier();
1223
1224 dev = skb->dev;
1225
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301226 /*
1227 * Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
1228 */
1229 l2_info.parse_flags = 0;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001230 l2_info.l2_hdr_size = 0;
1231 l2_info.vlan_hdr_cnt = 0;
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301232
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301233#ifdef CONFIG_NET_CLS_ACT
1234 /*
1235 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1236 * We can not accelerate this packet.
1237 */
1238#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1239 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1240 return 0;
1241 }
1242#else
1243 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1244 return 0;
1245 }
1246#endif
1247#endif
1248
1249 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301250 * If l2_feature is enabled, we need not check if src dev is L3 interface since bridge flow offload is supported.
1251 * If l2_feature is disabled, then we make sure src dev is L3 interface to avoid cost of rule lookup for L2 flows
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301252 */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301253 switch (ntohs(skb->protocol)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301254 case ETH_P_IP:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301255 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, true)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301256 return sfe_ipv4_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301257 }
1258
Wayne Tanbb7f1782021-12-13 11:16:04 -08001259 DEBUG_TRACE("No IPv4 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301260 return 0;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301261
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301262 case ETH_P_IPV6:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301263 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, false)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301264 return sfe_ipv6_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301265 }
1266
Wayne Tanbb7f1782021-12-13 11:16:04 -08001267 DEBUG_TRACE("No IPv6 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301268 return 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301269
1270 default:
1271 break;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301272 }
1273
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301274 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301275 * Stop L2 processing if L2 feature is disabled.
1276 */
1277 if (!sfe_is_l2_feature_enabled()) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001278 DEBUG_TRACE("Unsupported protocol %#x %s (L2 feature is disabled) skb=%px\n",
1279 ntohs(skb->protocol), dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301280 return 0;
1281 }
1282
1283 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301284 * Parse the L2 headers to find the L3 protocol and the L2 header offset
1285 */
1286 if (unlikely(!sfe_recv_parse_l2(dev, skb, &l2_info))) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001287 DEBUG_TRACE("%px: Invalid L2.5 header format with protocol : %x\n", skb, ntohs(skb->protocol));
1288 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301289 }
1290
1291 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301292 * Protocol in l2_info is expected to be in host byte order.
1293 * PPPoE is doing it in the sfe_pppoe_parse_hdr()
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301294 */
1295 if (likely(l2_info.protocol == ETH_P_IP)) {
Amitesh Anand63be37d2021-12-24 20:51:48 +05301296 ret = sfe_ipv4_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301297 if (unlikely(!ret)) {
1298 goto send_to_linux;
1299 }
1300 return ret;
1301 }
1302
1303 if (likely(l2_info.protocol == ETH_P_IPV6)) {
Suruchi Suman23a279d2021-11-16 15:13:09 +05301304 ret = sfe_ipv6_recv(dev, skb, &l2_info, false);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001305 if (unlikely(!ret)) {
1306 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301307 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001308 return ret;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301309 }
1310
Wayne Tanbb7f1782021-12-13 11:16:04 -08001311 DEBUG_TRACE("Non-IP(%x) %s skb=%px skb_vlan:%x/%x/%x skb_proto=%x\n",
1312 l2_info.protocol, dev->name, skb,
1313 ntohs(skb->vlan_proto), skb->vlan_tci, skb_vlan_tag_present(skb),
1314 htons(skb->protocol));
1315
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301316send_to_linux:
1317 /*
1318 * Push the data back before sending to linux if -
1319 * a. There is any exception from IPV4/V6
1320 * b. If the next protocol is neither IPV4 nor IPV6
1321 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001322 sfe_recv_undo_parse_l2(dev, skb, &l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301323
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301324 return 0;
1325}
1326
1327/*
1328 * sfe_get_exceptions()
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301329 * Dump exception counters
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301330 */
1331static ssize_t sfe_get_exceptions(struct device *dev,
1332 struct device_attribute *attr,
1333 char *buf)
1334{
1335 int idx, len;
1336 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1337
1338 spin_lock_bh(&sfe_ctx->lock);
1339 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1340 if (sfe_ctx->exceptions[idx]) {
1341 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1342 }
1343 }
1344 spin_unlock_bh(&sfe_ctx->lock);
1345
1346 return len;
1347}
1348
1349/*
1350 * sysfs attributes.
1351 */
1352static const struct device_attribute sfe_exceptions_attr =
1353 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1354
1355/*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301356 * sfe_is_l2_feature_enabled()
1357 * Check if l2 features flag feature is enabled or not. (VLAN, PPPOE, BRIDGE and tunnels)
1358 *
1359 * 32bit read is atomic. No need of locks.
1360 */
1361bool sfe_is_l2_feature_enabled()
1362{
1363 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1364 return (sfe_ctx->l2_feature_support == 1);
1365}
1366EXPORT_SYMBOL(sfe_is_l2_feature_enabled);
1367
1368/*
1369 * sfe_get_l2_feature()
1370 * L2 feature is enabled/disabled
1371 */
1372ssize_t sfe_get_l2_feature(struct device *dev,
1373 struct device_attribute *attr,
1374 char *buf)
1375{
1376 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1377 ssize_t len;
1378
1379 spin_lock_bh(&sfe_ctx->lock);
1380 len = snprintf(buf, (ssize_t)(PAGE_SIZE), "L2 feature is %s\n", sfe_ctx->l2_feature_support ? "enabled" : "disabled");
1381 spin_unlock_bh(&sfe_ctx->lock);
1382 return len;
1383}
1384
1385/*
1386 * sfe_set_l2_feature()
1387 * Enable or disable l2 features flag.
1388 */
1389ssize_t sfe_set_l2_feature(struct device *dev, struct device_attribute *attr,
1390 const char *buf, size_t count)
1391{
1392 unsigned long val;
1393 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1394 int ret;
1395 ret = sscanf(buf, "%lu", &val);
1396
1397 if (ret != 1) {
1398 pr_err("Wrong input, %s\n", buf);
1399 return -EINVAL;
1400 }
1401
1402 if (val != 1 && val != 0) {
1403 pr_err("Input should be either 1 or 0, (%s)\n", buf);
1404 return -EINVAL;
1405 }
1406
1407 spin_lock_bh(&sfe_ctx->lock);
1408
1409 if (sfe_ctx->l2_feature_support && val) {
1410 spin_unlock_bh(&sfe_ctx->lock);
1411 pr_err("L2 feature is already enabled\n");
1412 return -EINVAL;
1413 }
1414
1415 if (!sfe_ctx->l2_feature_support && !val) {
1416 spin_unlock_bh(&sfe_ctx->lock);
1417 pr_err("L2 feature is already disabled\n");
1418 return -EINVAL;
1419 }
1420
1421 sfe_ctx->l2_feature_support = val;
1422 spin_unlock_bh(&sfe_ctx->lock);
1423
1424 return count;
1425}
1426
1427static const struct device_attribute sfe_l2_feature_attr =
1428 __ATTR(l2_feature, 0644, sfe_get_l2_feature, sfe_set_l2_feature);
1429
1430/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301431 * sfe_init_if()
1432 */
1433int sfe_init_if(void)
1434{
1435 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1436 int result = -1;
1437
1438 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301439 * L2 feature is disabled by default
1440 */
1441 sfe_ctx->l2_feature_support = 0;
1442
1443 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301444 * Create sys/sfe
1445 */
1446 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1447 if (!sfe_ctx->sys_sfe) {
1448 DEBUG_ERROR("failed to register sfe\n");
1449 goto exit1;
1450 }
1451
1452 /*
1453 * Create sys/sfe/exceptions
1454 */
1455 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1456 if (result) {
1457 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1458 goto exit2;
1459 }
1460
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301461 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_l2_feature_attr.attr);
1462 if (result) {
1463 DEBUG_ERROR("failed to register L2 feature flag sysfs file: %d\n", result);
1464 goto exit2;
1465 }
1466
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301467 spin_lock_init(&sfe_ctx->lock);
1468
1469 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1470 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1471
1472 /*
1473 * Hook the receive path in the network stack.
1474 */
1475 BUG_ON(athrs_fast_nat_recv);
1476 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1477
1478 return 0;
1479exit2:
1480 kobject_put(sfe_ctx->sys_sfe);
1481exit1:
1482 return result;
1483}
1484
1485/*
1486 * sfe_exit_if()
1487 */
1488void sfe_exit_if(void)
1489{
1490 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1491
1492 /*
1493 * Unregister our receive callback.
1494 */
1495 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1496
1497 /*
1498 * Wait for all callbacks to complete.
1499 */
1500 rcu_barrier();
1501
1502 /*
1503 * Destroy all connections.
1504 */
1505 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1506 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1507
1508 /*
1509 * stop work queue, and flush all pending message in queue
1510 */
1511 cancel_work_sync(&sfe_ctx->work);
1512 sfe_process_response_msg(&sfe_ctx->work);
1513
1514 /*
1515 * Unregister our sync callback.
1516 */
1517 sfe_ipv4_notify_unregister();
1518 sfe_ipv6_notify_unregister();
1519
1520 kobject_put(sfe_ctx->sys_sfe);
1521
1522 return;
1523}