blob: eca2321f5aa7a8d4ca0d9d0f355a6723b282188a [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
Suruchi Sumane811dad2022-01-19 19:39:50 +053028#include <net/vxlan.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053029#include <net/gre.h>
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053030
31#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053032#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053033#include "sfe.h"
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053034#include "sfe_pppoe.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080035#include "sfe_vlan.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053036
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053037extern int max_ipv4_conn;
38extern int max_ipv6_conn;
39
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053040#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053041#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
42#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
43#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
44
45typedef enum sfe_exception {
46 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
47 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
48 SFE_EXCEPTION_CONNECTION_INVALID,
49 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
50 SFE_EXCEPTION_TCP_INVALID,
51 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
52 SFE_EXCEPTION_SRC_DEV_NOT_L3,
53 SFE_EXCEPTION_DEST_DEV_NOT_L3,
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053054 SFE_EXCEPTION_CFG_ERR,
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053055 SFE_EXCEPTION_CREATE_FAILED,
56 SFE_EXCEPTION_ENQUEUE_FAILED,
57 SFE_EXCEPTION_NOT_SUPPORT_6RD,
58 SFE_EXCEPTION_NO_SYNC_CB,
59 SFE_EXCEPTION_MAX
60} sfe_exception_t;
61
62static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
63 "IPV4_MSG_UNKNOW",
64 "IPV6_MSG_UNKNOW",
65 "CONNECTION_INVALID",
66 "NOT_SUPPORT_BRIDGE",
67 "TCP_INVALID",
68 "PROTOCOL_NOT_SUPPORT",
69 "SRC_DEV_NOT_L3",
70 "DEST_DEV_NOT_L3",
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053071 "CONFIG_ERROR",
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053072 "CREATE_FAILED",
73 "ENQUEUE_FAILED",
74 "NOT_SUPPORT_6RD",
75 "NO_SYNC_CB"
76};
77
78/*
79 * Message type of queued response message
80 */
81typedef enum {
82 SFE_MSG_TYPE_IPV4,
83 SFE_MSG_TYPE_IPV6
84} sfe_msg_types_t;
85
86/*
87 * Queued response message,
88 * will be sent back to caller in workqueue
89 */
90struct sfe_response_msg {
91 struct list_head node;
92 sfe_msg_types_t type;
93 void *msg[0];
94};
95
96/*
97 * SFE context instance, private for SFE
98 */
99struct sfe_ctx_instance_internal {
100 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
101
102 /*
103 * Control state.
104 */
105 struct kobject *sys_sfe; /* Sysfs linkage */
106
107 struct list_head msg_queue; /* Response message queue*/
108 spinlock_t lock; /* Lock to protect message queue */
109
110 struct work_struct work; /* Work to send response message back to caller*/
111
112 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
113 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
114
115 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
116 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
117
118 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530119
120 int32_t l2_feature_support; /* L2 feature support */
121
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530122};
123
124static struct sfe_ctx_instance_internal __sfe_ctx;
125
126/*
127 * Convert public SFE context to internal context
128 */
129#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
130/*
131 * Convert internal SFE context to public context
132 */
133#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
134
135/*
136 * sfe_incr_exceptions()
137 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530138 *
139 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530140 */
141static inline void sfe_incr_exceptions(sfe_exception_t except)
142{
143 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
144
145 spin_lock_bh(&sfe_ctx->lock);
146 sfe_ctx->exceptions[except]++;
147 spin_unlock_bh(&sfe_ctx->lock);
148}
149
150/*
151 * sfe_dev_is_layer_3_interface()
152 * Check if a network device is ipv4 or ipv6 layer 3 interface
153 *
154 * @param dev network device to check
155 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
156 */
157inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
158{
159 struct in_device *in4_dev;
160 struct inet6_dev *in6_dev;
161
162 BUG_ON(!dev);
163
164 if (likely(check_v4)) {
165 /*
166 * Does our input device support IPv4 processing?
167 */
168 in4_dev = (struct in_device *)dev->ip_ptr;
169 if (unlikely(!in4_dev)) {
170 return false;
171 }
172
173 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800174 * Does it have an IPv4 address? If it doesn't then it could be MAP-T interface,
175 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530176 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800177 if (likely(in4_dev->ifa_list || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
178 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530179 }
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800180 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530181 }
182
183 /*
184 * Does our input device support IPv6 processing?
185 */
186 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
187 if (unlikely(!in6_dev)) {
188 return false;
189 }
190
191 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800192 * Does it have an IPv6 address? If it doesn't then it could be MAP-T interface,
193 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530194 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800195 if (likely(!list_empty(&in6_dev->addr_list) || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
196 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530197 }
198
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800199 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530200}
201
202/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530203 * sfe_routed_dev_allow()
204 * check whether routed acceleration allowed
205 */
206static bool sfe_routed_dev_allow(struct net_device *dev, bool is_routed, bool check_v4)
207{
208 if (!is_routed) {
209 return true;
210 }
211
212 if (sfe_dev_is_layer_3_interface(dev, check_v4)) {
213 return true;
214 }
215
216 /*
217 * in case of GRE / vxlan, these dev does not have IP address
218 * so l3 interface check will fail. allow rule creation between gre / vxlan
219 * and wan dev for routed flow.
220 */
221 if (netif_is_vxlan(dev)) {
222 return true;
223 }
224
225#ifdef SFE_GRE_TUN_ENABLE
226 if (netif_is_gretap(dev) || netif_is_gre(dev)) {
227 return true;
228 }
229
230 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
231 return true;
232 }
233#endif
234
235 return false;
236}
237
238/* sfe_dev_has_hw_csum()
239 * check whether device supports hardware checksum offload
240 */
241bool sfe_dev_has_hw_csum(struct net_device *dev)
242{
243 if (netif_is_vxlan(dev)) {
244 return false;
245 }
246
247#ifdef SFE_GRE_TUN_ENABLE
248 if (netif_is_gre(dev) || netif_is_gretap(dev)) {
249 return false;
250 }
251
252 if (netif_is_ip6gre(dev) || netif_is_ip6gretap(dev)) {
253 return false;
254 }
255#endif
Tian Yangfd7b3142022-02-01 15:34:20 -0800256 /*
257 * Tunnel MAP-E/DS-LITE and Tun6rd share the same Routing netlink operator
258 * whose kind is "ip6tnl". The HW csum for these tunnel devices should be disabled.
259 */
260 if (dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "ip6tnl")) {
261 return false;
262 }
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530263
264 return true;
265}
266
267/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530268 * sfe_clean_response_msg_by_type()
269 * clean response message in queue when ECM exit
270 *
271 * @param sfe_ctx SFE context
272 * @param msg_type message type, ipv4 or ipv6
273 */
274static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
275{
276 struct sfe_response_msg *response, *tmp;
277
278 if (!sfe_ctx) {
279 return;
280 }
281
282 spin_lock_bh(&sfe_ctx->lock);
283 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
284 if (response->type == msg_type) {
285 list_del(&response->node);
286 /*
287 * Free response message
288 */
289 kfree(response);
290 }
291 }
292 spin_unlock_bh(&sfe_ctx->lock);
293
294}
295
296/*
297 * sfe_process_response_msg()
298 * Send all pending response message to ECM by calling callback function included in message
299 *
300 * @param work work structure
301 */
302static void sfe_process_response_msg(struct work_struct *work)
303{
304 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
305 struct sfe_response_msg *response;
306
307 spin_lock_bh(&sfe_ctx->lock);
308 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
309 list_del(&response->node);
310 spin_unlock_bh(&sfe_ctx->lock);
311 rcu_read_lock();
312
313 /*
314 * Send response message back to caller
315 */
316 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
317 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
318 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
319 if (callback) {
320 callback((void *)msg->cm.app_data, msg);
321 }
322 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
323 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
324 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
325 if (callback) {
326 callback((void *)msg->cm.app_data, msg);
327 }
328 }
329
330 rcu_read_unlock();
331 /*
332 * Free response message
333 */
334 kfree(response);
335 spin_lock_bh(&sfe_ctx->lock);
336 }
337 spin_unlock_bh(&sfe_ctx->lock);
338}
339
340/*
341 * sfe_alloc_response_msg()
342 * Alloc and construct new response message
343 *
344 * @param type message type
345 * @param msg used to construct response message if not NULL
346 *
347 * @return !NULL, success; NULL, failed
348 */
349static struct sfe_response_msg *
350sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
351{
352 struct sfe_response_msg *response;
353 int size;
354
355 switch (type) {
356 case SFE_MSG_TYPE_IPV4:
357 size = sizeof(struct sfe_ipv4_msg);
358 break;
359 case SFE_MSG_TYPE_IPV6:
360 size = sizeof(struct sfe_ipv6_msg);
361 break;
362 default:
363 DEBUG_ERROR("message type %d not supported\n", type);
364 return NULL;
365 }
366
367 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
368 if (!response) {
369 DEBUG_ERROR("allocate memory failed\n");
370 return NULL;
371 }
372
373 response->type = type;
374
375 if (msg) {
376 memcpy(response->msg, msg, size);
377 }
378
379 return response;
380}
381
382/*
383 * sfe_enqueue_msg()
384 * Queue response message
385 *
386 * @param sfe_ctx SFE context
387 * @param response response message to be queue
388 */
389static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
390{
391 spin_lock_bh(&sfe_ctx->lock);
392 list_add_tail(&response->node, &sfe_ctx->msg_queue);
393 spin_unlock_bh(&sfe_ctx->lock);
394
395 schedule_work(&sfe_ctx->work);
396}
397
398/*
399 * sfe_cmn_msg_init()
400 * Initialize the common message structure.
401 *
402 * @param ncm message to init
403 * @param if_num interface number related with this message
404 * @param type message type
405 * @param cb callback function to process repsonse of this message
406 * @param app_data argument for above callback function
407 */
408static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
409{
410 ncm->interface = if_num;
411 ncm->version = SFE_MESSAGE_VERSION;
412 ncm->type = type;
413 ncm->len = len;
414 ncm->cb = (sfe_ptr_t)cb;
415 ncm->app_data = (sfe_ptr_t)app_data;
416}
417
418/*
419 * sfe_ipv4_stats_sync_callback()
420 * Synchronize a connection's state.
421 *
422 * @param sis SFE statistics from SFE core engine
423 */
424static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
425{
426 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
427 struct sfe_ipv4_msg msg;
428 struct sfe_ipv4_conn_sync *sync_msg;
429 sfe_ipv4_msg_callback_t sync_cb;
430
431 rcu_read_lock();
432 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
433 if (!sync_cb) {
434 rcu_read_unlock();
435 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
436 return;
437 }
438
439 sync_msg = &msg.msg.conn_stats;
440
441 memset(&msg, 0, sizeof(msg));
442 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
443 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
444
445 /*
446 * Fill connection specific information
447 */
448 sync_msg->protocol = (u8)sis->protocol;
449 sync_msg->flow_ip = sis->src_ip.ip;
450 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
451 sync_msg->flow_ident = sis->src_port;
452 sync_msg->flow_ident_xlate = sis->src_port_xlate;
453
454 sync_msg->return_ip = sis->dest_ip.ip;
455 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
456 sync_msg->return_ident = sis->dest_port;
457 sync_msg->return_ident_xlate = sis->dest_port_xlate;
458
459 /*
460 * Fill TCP protocol specific information
461 */
462 if (sis->protocol == IPPROTO_TCP) {
463 sync_msg->flow_max_window = sis->src_td_max_window;
464 sync_msg->flow_end = sis->src_td_end;
465 sync_msg->flow_max_end = sis->src_td_max_end;
466
467 sync_msg->return_max_window = sis->dest_td_max_window;
468 sync_msg->return_end = sis->dest_td_end;
469 sync_msg->return_max_end = sis->dest_td_max_end;
470 }
471
472 /*
473 * Fill statistics information
474 */
475 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
476 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
477 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
478 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
479
480 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
481 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
482 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
483 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
484
485 /*
486 * Fill expiration time to extend, in unit of msec
487 */
488 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
489
490 /*
491 * Fill other information
492 */
493 switch (sis->reason) {
494 case SFE_SYNC_REASON_DESTROY:
495 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
496 break;
497 case SFE_SYNC_REASON_FLUSH:
498 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
499 break;
500 default:
501 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
502 break;
503 }
504
505 /*
506 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
507 */
508 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
509 rcu_read_unlock();
510}
511
512/*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530513 * sfe_recv_parse_l2()
514 * Parse L2 headers
515 *
516 * Returns true if the packet is parsed and false otherwise.
517 */
518static bool sfe_recv_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
519{
520 /*
521 * l2_hdr_offset will not change as we parse more L2.5 headers
522 * TODO: Move from storing offsets to storing pointers
523 */
524 sfe_l2_hdr_offset_set(l2_info, ((skb->data - ETH_HLEN) - skb->head));
525
526 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800527 * VLAN parsing
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530528 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800529 if (unlikely(!sfe_vlan_check_and_parse_tag(skb, l2_info))) {
530 return false;
531 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530532
533 /*
534 * PPPoE parsing
535 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800536 if (htons(ETH_P_PPP_SES) == skb->protocol) {
537 /*
538 * Parse only PPPoE session packets
539 * skb->data is pointing to PPPoE hdr
540 */
541 if (!sfe_pppoe_parse_hdr(skb, l2_info)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530542
Wayne Tanbb7f1782021-12-13 11:16:04 -0800543 /*
544 * For exception from PPPoE return from here without modifying the skb->data
545 * This includes non-IPv4/v6 cases also
546 */
547 return false;
548 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530549
550 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800551 * Pull by L2 header size
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530552 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800553 __skb_pull(skb, sfe_l2_hdr_size_get(l2_info));
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530554 }
Wayne Tanbb7f1782021-12-13 11:16:04 -0800555 return true;
556}
557
558/*
559 * sfe_recv_undo_parse_l2()
560 */
561static void sfe_recv_undo_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
562{
563 /*
564 * PPPoE undo
565 */
566 __skb_push(skb, sfe_l2_hdr_size_get(l2_info));
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530567
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530568 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800569 * VLAN undo
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530570 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800571 sfe_vlan_undo_parse(skb, l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530572}
573
574/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530575 * sfe_create_ipv4_rule_msg()
576 * Convert create message format from ecm to sfe
577 *
578 * @param sfe_ctx SFE context
579 * @param msg The IPv4 message
580 *
581 * @return sfe_tx_status_t The status of the Tx operation
582 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530583sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530584{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530585 struct net_device *src_dev = NULL;
586 struct net_device *dest_dev = NULL;
587 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530588 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530589 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530590 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530591
592 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
593 if (!response) {
594 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
595 return SFE_TX_FAILURE_QUEUE;
596 }
597
598 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
599 ret = SFE_CMN_RESPONSE_EMSG;
600 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
601 goto failed_ret;
602 }
603
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530604 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530605 case IPPROTO_TCP:
606 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
607 ret = SFE_CMN_RESPONSE_EMSG;
608 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
609 goto failed_ret;
610 }
611
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530612 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530613 break;
614
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530615 case IPPROTO_GRE:
616 break;
617
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530618 default:
619 ret = SFE_CMN_RESPONSE_EMSG;
620 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
621 goto failed_ret;
622 }
623
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530624 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530625 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530626 */
627 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530628 if (!sfe_is_l2_feature_enabled()) {
629 ret = SFE_CMN_RESPONSE_EINTERFACE;
630 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
631 goto failed_ret;
632 }
633
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530634 is_routed = false;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530635 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530636
637 /*
638 * Does our input device support IP processing?
639 */
640 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530641 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530642 ret = SFE_CMN_RESPONSE_EINTERFACE;
643 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
644 goto failed_ret;
645 }
646
647 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530648 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
649 */
650 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
651 if (cfg_err) {
652 ret = SFE_CMN_RESPONSE_EMSG;
653 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
654 goto failed_ret;
655 }
656
657 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530658 * Does our output device support IP processing?
659 */
660 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530661 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, true)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530662 ret = SFE_CMN_RESPONSE_EINTERFACE;
663 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
664 goto failed_ret;
665 }
666
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530667 /*
668 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
669 */
670 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
671 if (cfg_err) {
672 ret = SFE_CMN_RESPONSE_EMSG;
673 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
674 goto failed_ret;
675 }
676
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530677 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
678 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530679 ret = SFE_CMN_RESPONSE_ACK;
680 } else {
681 /* Failed */
682 ret = SFE_CMN_RESPONSE_EMSG;
683 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
684 }
685
686 /*
687 * Fall through
688 */
689failed_ret:
690 if (src_dev) {
691 dev_put(src_dev);
692 }
693
694 if (dest_dev) {
695 dev_put(dest_dev);
696 }
697
698 /*
699 * Try to queue response message
700 */
701 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
702 sfe_enqueue_msg(sfe_ctx, response);
703
704 return SFE_TX_SUCCESS;
705}
706
707/*
708 * sfe_destroy_ipv4_rule_msg()
709 * Convert destroy message format from ecm to sfe
710 *
711 * @param sfe_ctx SFE context
712 * @param msg The IPv4 message
713 *
714 * @return sfe_tx_status_t The status of the Tx operation
715 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530716sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530717{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530718 struct sfe_response_msg *response;
719
720 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
721 if (!response) {
722 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
723 return SFE_TX_FAILURE_QUEUE;
724 }
725
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530726 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530727
728 /*
729 * Try to queue response message
730 */
731 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
732 sfe_enqueue_msg(sfe_ctx, response);
733
734 return SFE_TX_SUCCESS;
735}
736
737/*
738 * sfe_ipv4_tx()
739 * Transmit an IPv4 message to the sfe
740 *
741 * @param sfe_ctx SFE context
742 * @param msg The IPv4 message
743 *
744 * @return sfe_tx_status_t The status of the Tx operation
745 */
746sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
747{
748 switch (msg->cm.type) {
749 case SFE_TX_CREATE_RULE_MSG:
750 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
751 case SFE_TX_DESTROY_RULE_MSG:
752 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
753 default:
754 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
755 return SFE_TX_FAILURE_NOT_ENABLED;
756 }
757}
758EXPORT_SYMBOL(sfe_ipv4_tx);
759
760/*
761 * sfe_ipv4_msg_init()
762 * Initialize IPv4 message.
763 */
764void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
765 sfe_ipv4_msg_callback_t cb, void *app_data)
766{
767 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
768}
769EXPORT_SYMBOL(sfe_ipv4_msg_init);
770
771/*
772 * sfe_ipv4_max_conn_count()
773 * Return maximum number of entries SFE supported
774 */
775int sfe_ipv4_max_conn_count(void)
776{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530777 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530778}
779EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
780
781/*
782 * sfe_ipv4_notify_register()
783 * Register a notifier callback for IPv4 messages from SFE
784 *
785 * @param cb The callback pointer
786 * @param app_data The application context for this message
787 *
788 * @return struct sfe_ctx_instance * The SFE context
789 */
790struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
791{
792 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
793
794 spin_lock_bh(&sfe_ctx->lock);
795 /*
796 * Hook the shortcut sync callback.
797 */
798 if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
799 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
800 }
801
802 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
803 sfe_ctx->ipv4_stats_sync_data = app_data;
804
805 spin_unlock_bh(&sfe_ctx->lock);
806
807 return SFE_CTX_TO_PUBLIC(sfe_ctx);
808}
809EXPORT_SYMBOL(sfe_ipv4_notify_register);
810
811/*
812 * sfe_ipv4_notify_unregister()
813 * Un-Register a notifier callback for IPv4 messages from SFE
814 */
815void sfe_ipv4_notify_unregister(void)
816{
817 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
818
819 spin_lock_bh(&sfe_ctx->lock);
820 /*
821 * Unregister our sync callback.
822 */
823 if (sfe_ctx->ipv4_stats_sync_cb) {
824 sfe_ipv4_register_sync_rule_callback(NULL);
825 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
826 sfe_ctx->ipv4_stats_sync_data = NULL;
827 }
828 spin_unlock_bh(&sfe_ctx->lock);
829
830 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
831
832 return;
833}
834EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
835
836/*
837 * sfe_ipv6_stats_sync_callback()
838 * Synchronize a connection's state.
839 */
840static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
841{
842 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
843 struct sfe_ipv6_msg msg;
844 struct sfe_ipv6_conn_sync *sync_msg;
845 sfe_ipv6_msg_callback_t sync_cb;
846
847 rcu_read_lock();
848 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
849 if (!sync_cb) {
850 rcu_read_unlock();
851 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
852 return;
853 }
854
855 sync_msg = &msg.msg.conn_stats;
856
857 memset(&msg, 0, sizeof(msg));
858 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
859 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
860
861 /*
862 * Fill connection specific information
863 */
864 sync_msg->protocol = (u8)sis->protocol;
865 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
866 sync_msg->flow_ident = sis->src_port;
867
868 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
869 sync_msg->return_ident = sis->dest_port;
870
871 /*
872 * Fill TCP protocol specific information
873 */
874 if (sis->protocol == IPPROTO_TCP) {
875 sync_msg->flow_max_window = sis->src_td_max_window;
876 sync_msg->flow_end = sis->src_td_end;
877 sync_msg->flow_max_end = sis->src_td_max_end;
878
879 sync_msg->return_max_window = sis->dest_td_max_window;
880 sync_msg->return_end = sis->dest_td_end;
881 sync_msg->return_max_end = sis->dest_td_max_end;
882 }
883
884 /*
885 * Fill statistics information
886 */
887 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
888 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
889 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
890 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
891
892 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
893 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
894 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
895 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
896
897 /*
898 * Fill expiration time to extend, in unit of msec
899 */
900 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
901
902 /*
903 * Fill other information
904 */
905 switch (sis->reason) {
906 case SFE_SYNC_REASON_DESTROY:
907 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
908 break;
909 case SFE_SYNC_REASON_FLUSH:
910 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
911 break;
912 default:
913 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
914 break;
915 }
916
917 /*
918 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
919 */
920 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
921 rcu_read_unlock();
922}
923
924/*
925 * sfe_create_ipv6_rule_msg()
926 * convert create message format from ecm to sfe
927 *
928 * @param sfe_ctx SFE context
929 * @param msg The IPv6 message
930 *
931 * @return sfe_tx_status_t The status of the Tx operation
932 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530933sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530934{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530935 struct net_device *src_dev = NULL;
936 struct net_device *dest_dev = NULL;
937 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530938 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530939 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530940 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530941
942 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
943 if (!response) {
944 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
945 return SFE_TX_FAILURE_QUEUE;
946 }
947
948 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
949 ret = SFE_CMN_RESPONSE_EMSG;
950 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
951 goto failed_ret;
952 }
953
954 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530955 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530956 */
957 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530958 if (!sfe_is_l2_feature_enabled()) {
959 ret = SFE_CMN_RESPONSE_EINTERFACE;
960 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
961 goto failed_ret;
962 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530963 is_routed = false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530964 }
965
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530966 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530967
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530968 case IPPROTO_TCP:
969 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
970 ret = SFE_CMN_RESPONSE_EMSG;
971 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
972 goto failed_ret;
973 }
974
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530975 break;
976
977 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530978 break;
979
Tian Yangafb03452022-01-13 18:53:13 -0800980 case IPPROTO_IPIP:
981 break;
982
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530983 case IPPROTO_GRE:
984 break;
985
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530986 default:
987 ret = SFE_CMN_RESPONSE_EMSG;
988 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
989 goto failed_ret;
990 }
991
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530992 /*
993 * Does our input device support IP processing?
994 */
995 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530996 if (!src_dev || !sfe_routed_dev_allow(src_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530997 ret = SFE_CMN_RESPONSE_EINTERFACE;
998 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
999 goto failed_ret;
1000 }
1001
1002 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301003 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1004 */
1005 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1006 if (cfg_err) {
1007 ret = SFE_CMN_RESPONSE_EMSG;
1008 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1009 goto failed_ret;
1010 }
1011
1012 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301013 * Does our output device support IP processing?
1014 */
1015 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301016 if (!dest_dev || !sfe_routed_dev_allow(dest_dev, is_routed, false)) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301017 ret = SFE_CMN_RESPONSE_EINTERFACE;
1018 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
1019 goto failed_ret;
1020 }
1021
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301022 /*
1023 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
1024 */
1025 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
1026 if (cfg_err) {
1027 ret = SFE_CMN_RESPONSE_EMSG;
1028 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
1029 goto failed_ret;
1030 }
1031
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301032 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
1033 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301034 ret = SFE_CMN_RESPONSE_ACK;
1035 } else {
1036 /* Failed */
1037 ret = SFE_CMN_RESPONSE_EMSG;
1038 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
1039 }
1040
1041 /*
1042 * Fall through
1043 */
1044failed_ret:
1045 if (src_dev) {
1046 dev_put(src_dev);
1047 }
1048
1049 if (dest_dev) {
1050 dev_put(dest_dev);
1051 }
1052
1053 /*
1054 * Try to queue response message
1055 */
1056 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
1057 sfe_enqueue_msg(sfe_ctx, response);
1058
1059 return SFE_TX_SUCCESS;
1060}
1061
1062/*
1063 * sfe_destroy_ipv6_rule_msg()
1064 * Convert destroy message format from ecm to sfe
1065 *
1066 * @param sfe_ctx SFE context
1067 * @param msg The IPv6 message
1068 *
1069 * @return sfe_tx_status_t The status of the Tx operation
1070 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301071sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301072{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301073 struct sfe_response_msg *response;
1074
1075 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1076 if (!response) {
1077 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1078 return SFE_TX_FAILURE_QUEUE;
1079 }
1080
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301081 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301082
1083 /*
1084 * Try to queue response message
1085 */
1086 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
1087 sfe_enqueue_msg(sfe_ctx, response);
1088
1089 return SFE_TX_SUCCESS;
1090}
1091
1092/*
1093 * sfe_ipv6_tx()
1094 * Transmit an IPv6 message to the sfe
1095 *
1096 * @param sfe_ctx SFE context
1097 * @param msg The IPv6 message
1098 *
1099 * @return sfe_tx_status_t The status of the Tx operation
1100 */
1101sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
1102{
1103 switch (msg->cm.type) {
1104 case SFE_TX_CREATE_RULE_MSG:
1105 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1106 case SFE_TX_DESTROY_RULE_MSG:
1107 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1108 default:
1109 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
1110 return SFE_TX_FAILURE_NOT_ENABLED;
1111 }
1112}
1113EXPORT_SYMBOL(sfe_ipv6_tx);
1114
1115/*
1116 * sfe_ipv6_msg_init()
1117 * Initialize IPv6 message.
1118 */
1119void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
1120 sfe_ipv6_msg_callback_t cb, void *app_data)
1121{
1122 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
1123}
1124EXPORT_SYMBOL(sfe_ipv6_msg_init);
1125
1126/*
1127 * sfe_ipv6_max_conn_count()
1128 * Return maximum number of entries SFE supported
1129 */
1130int sfe_ipv6_max_conn_count(void)
1131{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +05301132 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301133}
1134EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
1135
1136/*
1137 * sfe_ipv6_notify_register()
1138 * Register a notifier callback for IPv6 messages from SFE
1139 *
1140 * @param cb The callback pointer
1141 * @param app_data The application context for this message
1142 *
1143 * @return struct sfe_ctx_instance * The SFE context
1144 */
1145struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
1146{
1147 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1148
1149 spin_lock_bh(&sfe_ctx->lock);
1150 /*
1151 * Hook the shortcut sync callback.
1152 */
1153 if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
1154 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1155 }
1156
1157 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
1158 sfe_ctx->ipv6_stats_sync_data = app_data;
1159
1160 spin_unlock_bh(&sfe_ctx->lock);
1161
1162 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1163}
1164EXPORT_SYMBOL(sfe_ipv6_notify_register);
1165
1166/*
1167 * sfe_ipv6_notify_unregister()
1168 * Un-Register a notifier callback for IPv6 messages from SFE
1169 */
1170void sfe_ipv6_notify_unregister(void)
1171{
1172 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1173
1174 spin_lock_bh(&sfe_ctx->lock);
1175 /*
1176 * Unregister our sync callback.
1177 */
1178 if (sfe_ctx->ipv6_stats_sync_cb) {
1179 sfe_ipv6_register_sync_rule_callback(NULL);
1180 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
1181 sfe_ctx->ipv6_stats_sync_data = NULL;
1182 }
1183 spin_unlock_bh(&sfe_ctx->lock);
1184
1185 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
1186
1187 return;
1188}
1189EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1190
1191/*
1192 * sfe_tun6rd_tx()
1193 * Transmit a tun6rd message to sfe engine
1194 */
1195sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1196{
1197 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1198 return SFE_TX_FAILURE_NOT_ENABLED;
1199}
1200EXPORT_SYMBOL(sfe_tun6rd_tx);
1201
1202/*
1203 * sfe_tun6rd_msg_init()
1204 * Initialize sfe_tun6rd msg.
1205 */
1206void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1207{
1208 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1209}
1210EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1211
1212/*
1213 * sfe_recv()
1214 * Handle packet receives.
1215 *
1216 * Returns 1 if the packet is forwarded or 0 if it isn't.
1217 */
1218int sfe_recv(struct sk_buff *skb)
1219{
1220 struct net_device *dev;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301221 struct sfe_l2_info l2_info;
1222 int ret;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301223
1224 /*
1225 * We know that for the vast majority of packets we need the transport
1226 * layer header so we may as well start to fetch it now!
1227 */
1228 prefetch(skb->data + 32);
1229 barrier();
1230
1231 dev = skb->dev;
1232
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301233 /*
1234 * Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
1235 */
1236 l2_info.parse_flags = 0;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001237 l2_info.l2_hdr_size = 0;
1238 l2_info.vlan_hdr_cnt = 0;
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301239
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301240#ifdef CONFIG_NET_CLS_ACT
1241 /*
1242 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1243 * We can not accelerate this packet.
1244 */
1245#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1246 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1247 return 0;
1248 }
1249#else
1250 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1251 return 0;
1252 }
1253#endif
1254#endif
1255
1256 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301257 * If l2_feature is enabled, we need not check if src dev is L3 interface since bridge flow offload is supported.
1258 * If l2_feature is disabled, then we make sure src dev is L3 interface to avoid cost of rule lookup for L2 flows
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301259 */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301260 switch (ntohs(skb->protocol)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301261 case ETH_P_IP:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301262 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, true)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301263 return sfe_ipv4_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301264 }
1265
Wayne Tanbb7f1782021-12-13 11:16:04 -08001266 DEBUG_TRACE("No IPv4 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301267 return 0;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301268
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301269 case ETH_P_IPV6:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301270 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, false)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301271 return sfe_ipv6_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301272 }
1273
Wayne Tanbb7f1782021-12-13 11:16:04 -08001274 DEBUG_TRACE("No IPv6 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301275 return 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301276
1277 default:
1278 break;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301279 }
1280
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301281 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301282 * Stop L2 processing if L2 feature is disabled.
1283 */
1284 if (!sfe_is_l2_feature_enabled()) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001285 DEBUG_TRACE("Unsupported protocol %#x %s (L2 feature is disabled) skb=%px\n",
1286 ntohs(skb->protocol), dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301287 return 0;
1288 }
1289
1290 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301291 * Parse the L2 headers to find the L3 protocol and the L2 header offset
1292 */
1293 if (unlikely(!sfe_recv_parse_l2(dev, skb, &l2_info))) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001294 DEBUG_TRACE("%px: Invalid L2.5 header format with protocol : %x\n", skb, ntohs(skb->protocol));
1295 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301296 }
1297
1298 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301299 * Protocol in l2_info is expected to be in host byte order.
1300 * PPPoE is doing it in the sfe_pppoe_parse_hdr()
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301301 */
1302 if (likely(l2_info.protocol == ETH_P_IP)) {
Amitesh Anand63be37d2021-12-24 20:51:48 +05301303 ret = sfe_ipv4_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301304 if (unlikely(!ret)) {
1305 goto send_to_linux;
1306 }
1307 return ret;
1308 }
1309
1310 if (likely(l2_info.protocol == ETH_P_IPV6)) {
Suruchi Suman23a279d2021-11-16 15:13:09 +05301311 ret = sfe_ipv6_recv(dev, skb, &l2_info, false);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001312 if (unlikely(!ret)) {
1313 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301314 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001315 return ret;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301316 }
1317
Wayne Tanbb7f1782021-12-13 11:16:04 -08001318 DEBUG_TRACE("Non-IP(%x) %s skb=%px skb_vlan:%x/%x/%x skb_proto=%x\n",
1319 l2_info.protocol, dev->name, skb,
1320 ntohs(skb->vlan_proto), skb->vlan_tci, skb_vlan_tag_present(skb),
1321 htons(skb->protocol));
1322
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301323send_to_linux:
1324 /*
1325 * Push the data back before sending to linux if -
1326 * a. There is any exception from IPV4/V6
1327 * b. If the next protocol is neither IPV4 nor IPV6
1328 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001329 sfe_recv_undo_parse_l2(dev, skb, &l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301330
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301331 return 0;
1332}
1333
1334/*
1335 * sfe_get_exceptions()
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301336 * Dump exception counters
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301337 */
1338static ssize_t sfe_get_exceptions(struct device *dev,
1339 struct device_attribute *attr,
1340 char *buf)
1341{
1342 int idx, len;
1343 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1344
1345 spin_lock_bh(&sfe_ctx->lock);
1346 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1347 if (sfe_ctx->exceptions[idx]) {
1348 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1349 }
1350 }
1351 spin_unlock_bh(&sfe_ctx->lock);
1352
1353 return len;
1354}
1355
1356/*
1357 * sysfs attributes.
1358 */
1359static const struct device_attribute sfe_exceptions_attr =
1360 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1361
1362/*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301363 * sfe_is_l2_feature_enabled()
1364 * Check if l2 features flag feature is enabled or not. (VLAN, PPPOE, BRIDGE and tunnels)
1365 *
1366 * 32bit read is atomic. No need of locks.
1367 */
1368bool sfe_is_l2_feature_enabled()
1369{
1370 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1371 return (sfe_ctx->l2_feature_support == 1);
1372}
1373EXPORT_SYMBOL(sfe_is_l2_feature_enabled);
1374
1375/*
1376 * sfe_get_l2_feature()
1377 * L2 feature is enabled/disabled
1378 */
1379ssize_t sfe_get_l2_feature(struct device *dev,
1380 struct device_attribute *attr,
1381 char *buf)
1382{
1383 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1384 ssize_t len;
1385
1386 spin_lock_bh(&sfe_ctx->lock);
1387 len = snprintf(buf, (ssize_t)(PAGE_SIZE), "L2 feature is %s\n", sfe_ctx->l2_feature_support ? "enabled" : "disabled");
1388 spin_unlock_bh(&sfe_ctx->lock);
1389 return len;
1390}
1391
1392/*
1393 * sfe_set_l2_feature()
1394 * Enable or disable l2 features flag.
1395 */
1396ssize_t sfe_set_l2_feature(struct device *dev, struct device_attribute *attr,
1397 const char *buf, size_t count)
1398{
1399 unsigned long val;
1400 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1401 int ret;
1402 ret = sscanf(buf, "%lu", &val);
1403
1404 if (ret != 1) {
1405 pr_err("Wrong input, %s\n", buf);
1406 return -EINVAL;
1407 }
1408
1409 if (val != 1 && val != 0) {
1410 pr_err("Input should be either 1 or 0, (%s)\n", buf);
1411 return -EINVAL;
1412 }
1413
1414 spin_lock_bh(&sfe_ctx->lock);
1415
1416 if (sfe_ctx->l2_feature_support && val) {
1417 spin_unlock_bh(&sfe_ctx->lock);
1418 pr_err("L2 feature is already enabled\n");
1419 return -EINVAL;
1420 }
1421
1422 if (!sfe_ctx->l2_feature_support && !val) {
1423 spin_unlock_bh(&sfe_ctx->lock);
1424 pr_err("L2 feature is already disabled\n");
1425 return -EINVAL;
1426 }
1427
1428 sfe_ctx->l2_feature_support = val;
1429 spin_unlock_bh(&sfe_ctx->lock);
1430
1431 return count;
1432}
1433
1434static const struct device_attribute sfe_l2_feature_attr =
1435 __ATTR(l2_feature, 0644, sfe_get_l2_feature, sfe_set_l2_feature);
1436
1437/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301438 * sfe_init_if()
1439 */
1440int sfe_init_if(void)
1441{
1442 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1443 int result = -1;
1444
1445 /*
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301446 * L2 feature is enabled by default
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301447 */
Ratheesh Kannothb8f70ff2022-01-21 12:59:05 +05301448 sfe_ctx->l2_feature_support = 1;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301449
1450 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301451 * Create sys/sfe
1452 */
1453 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1454 if (!sfe_ctx->sys_sfe) {
1455 DEBUG_ERROR("failed to register sfe\n");
1456 goto exit1;
1457 }
1458
1459 /*
1460 * Create sys/sfe/exceptions
1461 */
1462 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1463 if (result) {
1464 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1465 goto exit2;
1466 }
1467
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301468 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_l2_feature_attr.attr);
1469 if (result) {
1470 DEBUG_ERROR("failed to register L2 feature flag sysfs file: %d\n", result);
1471 goto exit2;
1472 }
1473
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301474 spin_lock_init(&sfe_ctx->lock);
1475
1476 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1477 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1478
1479 /*
1480 * Hook the receive path in the network stack.
1481 */
1482 BUG_ON(athrs_fast_nat_recv);
1483 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1484
1485 return 0;
1486exit2:
1487 kobject_put(sfe_ctx->sys_sfe);
1488exit1:
1489 return result;
1490}
1491
1492/*
1493 * sfe_exit_if()
1494 */
1495void sfe_exit_if(void)
1496{
1497 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1498
1499 /*
1500 * Unregister our receive callback.
1501 */
1502 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1503
1504 /*
1505 * Wait for all callbacks to complete.
1506 */
1507 rcu_barrier();
1508
1509 /*
1510 * Destroy all connections.
1511 */
1512 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1513 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1514
1515 /*
1516 * stop work queue, and flush all pending message in queue
1517 */
1518 cancel_work_sync(&sfe_ctx->work);
1519 sfe_process_response_msg(&sfe_ctx->work);
1520
1521 /*
1522 * Unregister our sync callback.
1523 */
1524 sfe_ipv4_notify_unregister();
1525 sfe_ipv6_notify_unregister();
1526
1527 kobject_put(sfe_ctx->sys_sfe);
1528
1529 return;
1530}