blob: 1cb88a2bceb664f917b327e2a4b974e72ae9f911 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
Suruchi Sumane811dad2022-01-19 19:39:50 +053028#include <net/vxlan.h>
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053029
30#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053031#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053032#include "sfe.h"
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053033#include "sfe_pppoe.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080034#include "sfe_vlan.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053035
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053036extern int max_ipv4_conn;
37extern int max_ipv6_conn;
38
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053039#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053040#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
41#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
42#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
43
44typedef enum sfe_exception {
45 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
46 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
47 SFE_EXCEPTION_CONNECTION_INVALID,
48 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
49 SFE_EXCEPTION_TCP_INVALID,
50 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
51 SFE_EXCEPTION_SRC_DEV_NOT_L3,
52 SFE_EXCEPTION_DEST_DEV_NOT_L3,
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053053 SFE_EXCEPTION_CFG_ERR,
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053054 SFE_EXCEPTION_CREATE_FAILED,
55 SFE_EXCEPTION_ENQUEUE_FAILED,
56 SFE_EXCEPTION_NOT_SUPPORT_6RD,
57 SFE_EXCEPTION_NO_SYNC_CB,
58 SFE_EXCEPTION_MAX
59} sfe_exception_t;
60
61static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
62 "IPV4_MSG_UNKNOW",
63 "IPV6_MSG_UNKNOW",
64 "CONNECTION_INVALID",
65 "NOT_SUPPORT_BRIDGE",
66 "TCP_INVALID",
67 "PROTOCOL_NOT_SUPPORT",
68 "SRC_DEV_NOT_L3",
69 "DEST_DEV_NOT_L3",
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053070 "CONFIG_ERROR",
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053071 "CREATE_FAILED",
72 "ENQUEUE_FAILED",
73 "NOT_SUPPORT_6RD",
74 "NO_SYNC_CB"
75};
76
77/*
78 * Message type of queued response message
79 */
80typedef enum {
81 SFE_MSG_TYPE_IPV4,
82 SFE_MSG_TYPE_IPV6
83} sfe_msg_types_t;
84
85/*
86 * Queued response message,
87 * will be sent back to caller in workqueue
88 */
89struct sfe_response_msg {
90 struct list_head node;
91 sfe_msg_types_t type;
92 void *msg[0];
93};
94
95/*
96 * SFE context instance, private for SFE
97 */
98struct sfe_ctx_instance_internal {
99 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
100
101 /*
102 * Control state.
103 */
104 struct kobject *sys_sfe; /* Sysfs linkage */
105
106 struct list_head msg_queue; /* Response message queue*/
107 spinlock_t lock; /* Lock to protect message queue */
108
109 struct work_struct work; /* Work to send response message back to caller*/
110
111 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
112 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
113
114 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
115 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
116
117 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530118
119 int32_t l2_feature_support; /* L2 feature support */
120
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530121};
122
123static struct sfe_ctx_instance_internal __sfe_ctx;
124
125/*
126 * Convert public SFE context to internal context
127 */
128#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
129/*
130 * Convert internal SFE context to public context
131 */
132#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
133
134/*
135 * sfe_incr_exceptions()
136 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530137 *
138 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530139 */
140static inline void sfe_incr_exceptions(sfe_exception_t except)
141{
142 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
143
144 spin_lock_bh(&sfe_ctx->lock);
145 sfe_ctx->exceptions[except]++;
146 spin_unlock_bh(&sfe_ctx->lock);
147}
148
149/*
150 * sfe_dev_is_layer_3_interface()
151 * Check if a network device is ipv4 or ipv6 layer 3 interface
152 *
153 * @param dev network device to check
154 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
155 */
156inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
157{
158 struct in_device *in4_dev;
159 struct inet6_dev *in6_dev;
160
161 BUG_ON(!dev);
162
163 if (likely(check_v4)) {
164 /*
165 * Does our input device support IPv4 processing?
166 */
167 in4_dev = (struct in_device *)dev->ip_ptr;
168 if (unlikely(!in4_dev)) {
169 return false;
170 }
171
172 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800173 * Does it have an IPv4 address? If it doesn't then it could be MAP-T interface,
174 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530175 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800176 if (likely(in4_dev->ifa_list || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
177 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530178 }
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800179 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530180 }
181
182 /*
183 * Does our input device support IPv6 processing?
184 */
185 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
186 if (unlikely(!in6_dev)) {
187 return false;
188 }
189
190 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800191 * Does it have an IPv6 address? If it doesn't then it could be MAP-T interface,
192 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530193 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800194 if (likely(!list_empty(&in6_dev->addr_list) || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
195 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530196 }
197
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800198 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530199}
200
201/*
202 * sfe_clean_response_msg_by_type()
203 * clean response message in queue when ECM exit
204 *
205 * @param sfe_ctx SFE context
206 * @param msg_type message type, ipv4 or ipv6
207 */
208static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
209{
210 struct sfe_response_msg *response, *tmp;
211
212 if (!sfe_ctx) {
213 return;
214 }
215
216 spin_lock_bh(&sfe_ctx->lock);
217 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
218 if (response->type == msg_type) {
219 list_del(&response->node);
220 /*
221 * Free response message
222 */
223 kfree(response);
224 }
225 }
226 spin_unlock_bh(&sfe_ctx->lock);
227
228}
229
230/*
231 * sfe_process_response_msg()
232 * Send all pending response message to ECM by calling callback function included in message
233 *
234 * @param work work structure
235 */
236static void sfe_process_response_msg(struct work_struct *work)
237{
238 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
239 struct sfe_response_msg *response;
240
241 spin_lock_bh(&sfe_ctx->lock);
242 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
243 list_del(&response->node);
244 spin_unlock_bh(&sfe_ctx->lock);
245 rcu_read_lock();
246
247 /*
248 * Send response message back to caller
249 */
250 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
251 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
252 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
253 if (callback) {
254 callback((void *)msg->cm.app_data, msg);
255 }
256 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
257 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
258 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
259 if (callback) {
260 callback((void *)msg->cm.app_data, msg);
261 }
262 }
263
264 rcu_read_unlock();
265 /*
266 * Free response message
267 */
268 kfree(response);
269 spin_lock_bh(&sfe_ctx->lock);
270 }
271 spin_unlock_bh(&sfe_ctx->lock);
272}
273
274/*
275 * sfe_alloc_response_msg()
276 * Alloc and construct new response message
277 *
278 * @param type message type
279 * @param msg used to construct response message if not NULL
280 *
281 * @return !NULL, success; NULL, failed
282 */
283static struct sfe_response_msg *
284sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
285{
286 struct sfe_response_msg *response;
287 int size;
288
289 switch (type) {
290 case SFE_MSG_TYPE_IPV4:
291 size = sizeof(struct sfe_ipv4_msg);
292 break;
293 case SFE_MSG_TYPE_IPV6:
294 size = sizeof(struct sfe_ipv6_msg);
295 break;
296 default:
297 DEBUG_ERROR("message type %d not supported\n", type);
298 return NULL;
299 }
300
301 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
302 if (!response) {
303 DEBUG_ERROR("allocate memory failed\n");
304 return NULL;
305 }
306
307 response->type = type;
308
309 if (msg) {
310 memcpy(response->msg, msg, size);
311 }
312
313 return response;
314}
315
316/*
317 * sfe_enqueue_msg()
318 * Queue response message
319 *
320 * @param sfe_ctx SFE context
321 * @param response response message to be queue
322 */
323static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
324{
325 spin_lock_bh(&sfe_ctx->lock);
326 list_add_tail(&response->node, &sfe_ctx->msg_queue);
327 spin_unlock_bh(&sfe_ctx->lock);
328
329 schedule_work(&sfe_ctx->work);
330}
331
332/*
333 * sfe_cmn_msg_init()
334 * Initialize the common message structure.
335 *
336 * @param ncm message to init
337 * @param if_num interface number related with this message
338 * @param type message type
339 * @param cb callback function to process repsonse of this message
340 * @param app_data argument for above callback function
341 */
342static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
343{
344 ncm->interface = if_num;
345 ncm->version = SFE_MESSAGE_VERSION;
346 ncm->type = type;
347 ncm->len = len;
348 ncm->cb = (sfe_ptr_t)cb;
349 ncm->app_data = (sfe_ptr_t)app_data;
350}
351
352/*
353 * sfe_ipv4_stats_sync_callback()
354 * Synchronize a connection's state.
355 *
356 * @param sis SFE statistics from SFE core engine
357 */
358static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
359{
360 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
361 struct sfe_ipv4_msg msg;
362 struct sfe_ipv4_conn_sync *sync_msg;
363 sfe_ipv4_msg_callback_t sync_cb;
364
365 rcu_read_lock();
366 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
367 if (!sync_cb) {
368 rcu_read_unlock();
369 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
370 return;
371 }
372
373 sync_msg = &msg.msg.conn_stats;
374
375 memset(&msg, 0, sizeof(msg));
376 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
377 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
378
379 /*
380 * Fill connection specific information
381 */
382 sync_msg->protocol = (u8)sis->protocol;
383 sync_msg->flow_ip = sis->src_ip.ip;
384 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
385 sync_msg->flow_ident = sis->src_port;
386 sync_msg->flow_ident_xlate = sis->src_port_xlate;
387
388 sync_msg->return_ip = sis->dest_ip.ip;
389 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
390 sync_msg->return_ident = sis->dest_port;
391 sync_msg->return_ident_xlate = sis->dest_port_xlate;
392
393 /*
394 * Fill TCP protocol specific information
395 */
396 if (sis->protocol == IPPROTO_TCP) {
397 sync_msg->flow_max_window = sis->src_td_max_window;
398 sync_msg->flow_end = sis->src_td_end;
399 sync_msg->flow_max_end = sis->src_td_max_end;
400
401 sync_msg->return_max_window = sis->dest_td_max_window;
402 sync_msg->return_end = sis->dest_td_end;
403 sync_msg->return_max_end = sis->dest_td_max_end;
404 }
405
406 /*
407 * Fill statistics information
408 */
409 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
410 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
411 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
412 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
413
414 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
415 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
416 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
417 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
418
419 /*
420 * Fill expiration time to extend, in unit of msec
421 */
422 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
423
424 /*
425 * Fill other information
426 */
427 switch (sis->reason) {
428 case SFE_SYNC_REASON_DESTROY:
429 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
430 break;
431 case SFE_SYNC_REASON_FLUSH:
432 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
433 break;
434 default:
435 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
436 break;
437 }
438
439 /*
440 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
441 */
442 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
443 rcu_read_unlock();
444}
445
446/*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530447 * sfe_recv_parse_l2()
448 * Parse L2 headers
449 *
450 * Returns true if the packet is parsed and false otherwise.
451 */
452static bool sfe_recv_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
453{
454 /*
455 * l2_hdr_offset will not change as we parse more L2.5 headers
456 * TODO: Move from storing offsets to storing pointers
457 */
458 sfe_l2_hdr_offset_set(l2_info, ((skb->data - ETH_HLEN) - skb->head));
459
460 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800461 * VLAN parsing
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530462 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800463 if (unlikely(!sfe_vlan_check_and_parse_tag(skb, l2_info))) {
464 return false;
465 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530466
467 /*
468 * PPPoE parsing
469 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800470 if (htons(ETH_P_PPP_SES) == skb->protocol) {
471 /*
472 * Parse only PPPoE session packets
473 * skb->data is pointing to PPPoE hdr
474 */
475 if (!sfe_pppoe_parse_hdr(skb, l2_info)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530476
Wayne Tanbb7f1782021-12-13 11:16:04 -0800477 /*
478 * For exception from PPPoE return from here without modifying the skb->data
479 * This includes non-IPv4/v6 cases also
480 */
481 return false;
482 }
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530483
484 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800485 * Pull by L2 header size
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530486 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800487 __skb_pull(skb, sfe_l2_hdr_size_get(l2_info));
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530488 }
Wayne Tanbb7f1782021-12-13 11:16:04 -0800489 return true;
490}
491
492/*
493 * sfe_recv_undo_parse_l2()
494 */
495static void sfe_recv_undo_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
496{
497 /*
498 * PPPoE undo
499 */
500 __skb_push(skb, sfe_l2_hdr_size_get(l2_info));
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530501
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530502 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800503 * VLAN undo
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530504 */
Wayne Tanbb7f1782021-12-13 11:16:04 -0800505 sfe_vlan_undo_parse(skb, l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530506}
507
508/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530509 * sfe_create_ipv4_rule_msg()
510 * Convert create message format from ecm to sfe
511 *
512 * @param sfe_ctx SFE context
513 * @param msg The IPv4 message
514 *
515 * @return sfe_tx_status_t The status of the Tx operation
516 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530517sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530518{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530519 struct net_device *src_dev = NULL;
520 struct net_device *dest_dev = NULL;
521 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530522 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530523 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530524 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530525
526 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
527 if (!response) {
528 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
529 return SFE_TX_FAILURE_QUEUE;
530 }
531
532 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
533 ret = SFE_CMN_RESPONSE_EMSG;
534 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
535 goto failed_ret;
536 }
537
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530538 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530539 case IPPROTO_TCP:
540 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
541 ret = SFE_CMN_RESPONSE_EMSG;
542 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
543 goto failed_ret;
544 }
545
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530546 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530547 break;
548
549 default:
550 ret = SFE_CMN_RESPONSE_EMSG;
551 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
552 goto failed_ret;
553 }
554
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530555 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530556 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530557 */
558 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530559 if (!sfe_is_l2_feature_enabled()) {
560 ret = SFE_CMN_RESPONSE_EINTERFACE;
561 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
562 goto failed_ret;
563 }
564
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530565 is_routed = false;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530566 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530567
568 /*
569 * Does our input device support IP processing?
570 */
571 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Suruchi Sumane811dad2022-01-19 19:39:50 +0530572 if (!src_dev || (is_routed && !sfe_dev_is_layer_3_interface(src_dev, true) && !netif_is_vxlan(src_dev))) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530573 ret = SFE_CMN_RESPONSE_EINTERFACE;
574 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
575 goto failed_ret;
576 }
577
578 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530579 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
580 */
581 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
582 if (cfg_err) {
583 ret = SFE_CMN_RESPONSE_EMSG;
584 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
585 goto failed_ret;
586 }
587
588 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530589 * Does our output device support IP processing?
590 */
591 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Suruchi Sumane811dad2022-01-19 19:39:50 +0530592 if (!dest_dev || (is_routed && !sfe_dev_is_layer_3_interface(dest_dev, true) && !netif_is_vxlan(dest_dev))) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530593 ret = SFE_CMN_RESPONSE_EINTERFACE;
594 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
595 goto failed_ret;
596 }
597
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530598 /*
599 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
600 */
601 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
602 if (cfg_err) {
603 ret = SFE_CMN_RESPONSE_EMSG;
604 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
605 goto failed_ret;
606 }
607
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530608 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
609 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530610 ret = SFE_CMN_RESPONSE_ACK;
611 } else {
612 /* Failed */
613 ret = SFE_CMN_RESPONSE_EMSG;
614 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
615 }
616
617 /*
618 * Fall through
619 */
620failed_ret:
621 if (src_dev) {
622 dev_put(src_dev);
623 }
624
625 if (dest_dev) {
626 dev_put(dest_dev);
627 }
628
629 /*
630 * Try to queue response message
631 */
632 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
633 sfe_enqueue_msg(sfe_ctx, response);
634
635 return SFE_TX_SUCCESS;
636}
637
638/*
639 * sfe_destroy_ipv4_rule_msg()
640 * Convert destroy message format from ecm to sfe
641 *
642 * @param sfe_ctx SFE context
643 * @param msg The IPv4 message
644 *
645 * @return sfe_tx_status_t The status of the Tx operation
646 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530647sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530648{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530649 struct sfe_response_msg *response;
650
651 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
652 if (!response) {
653 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
654 return SFE_TX_FAILURE_QUEUE;
655 }
656
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530657 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530658
659 /*
660 * Try to queue response message
661 */
662 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
663 sfe_enqueue_msg(sfe_ctx, response);
664
665 return SFE_TX_SUCCESS;
666}
667
668/*
669 * sfe_ipv4_tx()
670 * Transmit an IPv4 message to the sfe
671 *
672 * @param sfe_ctx SFE context
673 * @param msg The IPv4 message
674 *
675 * @return sfe_tx_status_t The status of the Tx operation
676 */
677sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
678{
679 switch (msg->cm.type) {
680 case SFE_TX_CREATE_RULE_MSG:
681 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
682 case SFE_TX_DESTROY_RULE_MSG:
683 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
684 default:
685 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
686 return SFE_TX_FAILURE_NOT_ENABLED;
687 }
688}
689EXPORT_SYMBOL(sfe_ipv4_tx);
690
691/*
692 * sfe_ipv4_msg_init()
693 * Initialize IPv4 message.
694 */
695void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
696 sfe_ipv4_msg_callback_t cb, void *app_data)
697{
698 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
699}
700EXPORT_SYMBOL(sfe_ipv4_msg_init);
701
702/*
703 * sfe_ipv4_max_conn_count()
704 * Return maximum number of entries SFE supported
705 */
706int sfe_ipv4_max_conn_count(void)
707{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530708 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530709}
710EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
711
712/*
713 * sfe_ipv4_notify_register()
714 * Register a notifier callback for IPv4 messages from SFE
715 *
716 * @param cb The callback pointer
717 * @param app_data The application context for this message
718 *
719 * @return struct sfe_ctx_instance * The SFE context
720 */
721struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
722{
723 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
724
725 spin_lock_bh(&sfe_ctx->lock);
726 /*
727 * Hook the shortcut sync callback.
728 */
729 if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
730 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
731 }
732
733 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
734 sfe_ctx->ipv4_stats_sync_data = app_data;
735
736 spin_unlock_bh(&sfe_ctx->lock);
737
738 return SFE_CTX_TO_PUBLIC(sfe_ctx);
739}
740EXPORT_SYMBOL(sfe_ipv4_notify_register);
741
742/*
743 * sfe_ipv4_notify_unregister()
744 * Un-Register a notifier callback for IPv4 messages from SFE
745 */
746void sfe_ipv4_notify_unregister(void)
747{
748 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
749
750 spin_lock_bh(&sfe_ctx->lock);
751 /*
752 * Unregister our sync callback.
753 */
754 if (sfe_ctx->ipv4_stats_sync_cb) {
755 sfe_ipv4_register_sync_rule_callback(NULL);
756 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
757 sfe_ctx->ipv4_stats_sync_data = NULL;
758 }
759 spin_unlock_bh(&sfe_ctx->lock);
760
761 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
762
763 return;
764}
765EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
766
767/*
768 * sfe_ipv6_stats_sync_callback()
769 * Synchronize a connection's state.
770 */
771static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
772{
773 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
774 struct sfe_ipv6_msg msg;
775 struct sfe_ipv6_conn_sync *sync_msg;
776 sfe_ipv6_msg_callback_t sync_cb;
777
778 rcu_read_lock();
779 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
780 if (!sync_cb) {
781 rcu_read_unlock();
782 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
783 return;
784 }
785
786 sync_msg = &msg.msg.conn_stats;
787
788 memset(&msg, 0, sizeof(msg));
789 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
790 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
791
792 /*
793 * Fill connection specific information
794 */
795 sync_msg->protocol = (u8)sis->protocol;
796 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
797 sync_msg->flow_ident = sis->src_port;
798
799 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
800 sync_msg->return_ident = sis->dest_port;
801
802 /*
803 * Fill TCP protocol specific information
804 */
805 if (sis->protocol == IPPROTO_TCP) {
806 sync_msg->flow_max_window = sis->src_td_max_window;
807 sync_msg->flow_end = sis->src_td_end;
808 sync_msg->flow_max_end = sis->src_td_max_end;
809
810 sync_msg->return_max_window = sis->dest_td_max_window;
811 sync_msg->return_end = sis->dest_td_end;
812 sync_msg->return_max_end = sis->dest_td_max_end;
813 }
814
815 /*
816 * Fill statistics information
817 */
818 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
819 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
820 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
821 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
822
823 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
824 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
825 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
826 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
827
828 /*
829 * Fill expiration time to extend, in unit of msec
830 */
831 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
832
833 /*
834 * Fill other information
835 */
836 switch (sis->reason) {
837 case SFE_SYNC_REASON_DESTROY:
838 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
839 break;
840 case SFE_SYNC_REASON_FLUSH:
841 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
842 break;
843 default:
844 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
845 break;
846 }
847
848 /*
849 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
850 */
851 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
852 rcu_read_unlock();
853}
854
855/*
856 * sfe_create_ipv6_rule_msg()
857 * convert create message format from ecm to sfe
858 *
859 * @param sfe_ctx SFE context
860 * @param msg The IPv6 message
861 *
862 * @return sfe_tx_status_t The status of the Tx operation
863 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530864sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530865{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530866 struct net_device *src_dev = NULL;
867 struct net_device *dest_dev = NULL;
868 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530869 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530870 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530871 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530872
873 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
874 if (!response) {
875 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
876 return SFE_TX_FAILURE_QUEUE;
877 }
878
879 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
880 ret = SFE_CMN_RESPONSE_EMSG;
881 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
882 goto failed_ret;
883 }
884
885 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530886 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530887 */
888 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530889 if (!sfe_is_l2_feature_enabled()) {
890 ret = SFE_CMN_RESPONSE_EINTERFACE;
891 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
892 goto failed_ret;
893 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530894 is_routed = false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530895 }
896
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530897 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530898
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530899 case IPPROTO_TCP:
900 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
901 ret = SFE_CMN_RESPONSE_EMSG;
902 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
903 goto failed_ret;
904 }
905
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530906 break;
907
908 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530909 break;
910
911 default:
912 ret = SFE_CMN_RESPONSE_EMSG;
913 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
914 goto failed_ret;
915 }
916
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530917 /*
918 * Does our input device support IP processing?
919 */
920 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Suruchi Sumane811dad2022-01-19 19:39:50 +0530921 if (!src_dev || (is_routed && !sfe_dev_is_layer_3_interface(src_dev, false) && !netif_is_vxlan(src_dev))) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530922 ret = SFE_CMN_RESPONSE_EINTERFACE;
923 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
924 goto failed_ret;
925 }
926
927 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530928 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
929 */
930 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
931 if (cfg_err) {
932 ret = SFE_CMN_RESPONSE_EMSG;
933 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
934 goto failed_ret;
935 }
936
937 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530938 * Does our output device support IP processing?
939 */
940 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Suruchi Sumane811dad2022-01-19 19:39:50 +0530941 if (!dest_dev || (is_routed && !sfe_dev_is_layer_3_interface(dest_dev, false) && !netif_is_vxlan(dest_dev))) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530942 ret = SFE_CMN_RESPONSE_EINTERFACE;
943 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
944 goto failed_ret;
945 }
946
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530947 /*
948 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
949 */
950 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
951 if (cfg_err) {
952 ret = SFE_CMN_RESPONSE_EMSG;
953 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
954 goto failed_ret;
955 }
956
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530957 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
958 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530959 ret = SFE_CMN_RESPONSE_ACK;
960 } else {
961 /* Failed */
962 ret = SFE_CMN_RESPONSE_EMSG;
963 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
964 }
965
966 /*
967 * Fall through
968 */
969failed_ret:
970 if (src_dev) {
971 dev_put(src_dev);
972 }
973
974 if (dest_dev) {
975 dev_put(dest_dev);
976 }
977
978 /*
979 * Try to queue response message
980 */
981 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
982 sfe_enqueue_msg(sfe_ctx, response);
983
984 return SFE_TX_SUCCESS;
985}
986
987/*
988 * sfe_destroy_ipv6_rule_msg()
989 * Convert destroy message format from ecm to sfe
990 *
991 * @param sfe_ctx SFE context
992 * @param msg The IPv6 message
993 *
994 * @return sfe_tx_status_t The status of the Tx operation
995 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530996sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530997{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530998 struct sfe_response_msg *response;
999
1000 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
1001 if (!response) {
1002 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
1003 return SFE_TX_FAILURE_QUEUE;
1004 }
1005
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301006 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301007
1008 /*
1009 * Try to queue response message
1010 */
1011 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
1012 sfe_enqueue_msg(sfe_ctx, response);
1013
1014 return SFE_TX_SUCCESS;
1015}
1016
1017/*
1018 * sfe_ipv6_tx()
1019 * Transmit an IPv6 message to the sfe
1020 *
1021 * @param sfe_ctx SFE context
1022 * @param msg The IPv6 message
1023 *
1024 * @return sfe_tx_status_t The status of the Tx operation
1025 */
1026sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
1027{
1028 switch (msg->cm.type) {
1029 case SFE_TX_CREATE_RULE_MSG:
1030 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1031 case SFE_TX_DESTROY_RULE_MSG:
1032 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1033 default:
1034 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
1035 return SFE_TX_FAILURE_NOT_ENABLED;
1036 }
1037}
1038EXPORT_SYMBOL(sfe_ipv6_tx);
1039
1040/*
1041 * sfe_ipv6_msg_init()
1042 * Initialize IPv6 message.
1043 */
1044void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
1045 sfe_ipv6_msg_callback_t cb, void *app_data)
1046{
1047 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
1048}
1049EXPORT_SYMBOL(sfe_ipv6_msg_init);
1050
1051/*
1052 * sfe_ipv6_max_conn_count()
1053 * Return maximum number of entries SFE supported
1054 */
1055int sfe_ipv6_max_conn_count(void)
1056{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +05301057 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301058}
1059EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
1060
1061/*
1062 * sfe_ipv6_notify_register()
1063 * Register a notifier callback for IPv6 messages from SFE
1064 *
1065 * @param cb The callback pointer
1066 * @param app_data The application context for this message
1067 *
1068 * @return struct sfe_ctx_instance * The SFE context
1069 */
1070struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
1071{
1072 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1073
1074 spin_lock_bh(&sfe_ctx->lock);
1075 /*
1076 * Hook the shortcut sync callback.
1077 */
1078 if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
1079 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1080 }
1081
1082 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
1083 sfe_ctx->ipv6_stats_sync_data = app_data;
1084
1085 spin_unlock_bh(&sfe_ctx->lock);
1086
1087 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1088}
1089EXPORT_SYMBOL(sfe_ipv6_notify_register);
1090
1091/*
1092 * sfe_ipv6_notify_unregister()
1093 * Un-Register a notifier callback for IPv6 messages from SFE
1094 */
1095void sfe_ipv6_notify_unregister(void)
1096{
1097 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1098
1099 spin_lock_bh(&sfe_ctx->lock);
1100 /*
1101 * Unregister our sync callback.
1102 */
1103 if (sfe_ctx->ipv6_stats_sync_cb) {
1104 sfe_ipv6_register_sync_rule_callback(NULL);
1105 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
1106 sfe_ctx->ipv6_stats_sync_data = NULL;
1107 }
1108 spin_unlock_bh(&sfe_ctx->lock);
1109
1110 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
1111
1112 return;
1113}
1114EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1115
1116/*
1117 * sfe_tun6rd_tx()
1118 * Transmit a tun6rd message to sfe engine
1119 */
1120sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1121{
1122 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1123 return SFE_TX_FAILURE_NOT_ENABLED;
1124}
1125EXPORT_SYMBOL(sfe_tun6rd_tx);
1126
1127/*
1128 * sfe_tun6rd_msg_init()
1129 * Initialize sfe_tun6rd msg.
1130 */
1131void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1132{
1133 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1134}
1135EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1136
1137/*
1138 * sfe_recv()
1139 * Handle packet receives.
1140 *
1141 * Returns 1 if the packet is forwarded or 0 if it isn't.
1142 */
1143int sfe_recv(struct sk_buff *skb)
1144{
1145 struct net_device *dev;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301146 struct sfe_l2_info l2_info;
1147 int ret;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301148
1149 /*
1150 * We know that for the vast majority of packets we need the transport
1151 * layer header so we may as well start to fetch it now!
1152 */
1153 prefetch(skb->data + 32);
1154 barrier();
1155
1156 dev = skb->dev;
1157
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301158 /*
1159 * Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
1160 */
1161 l2_info.parse_flags = 0;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001162 l2_info.l2_hdr_size = 0;
1163 l2_info.vlan_hdr_cnt = 0;
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301164
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301165#ifdef CONFIG_NET_CLS_ACT
1166 /*
1167 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1168 * We can not accelerate this packet.
1169 */
1170#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1171 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1172 return 0;
1173 }
1174#else
1175 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1176 return 0;
1177 }
1178#endif
1179#endif
1180
1181 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301182 * If l2_feature is enabled, we need not check if src dev is L3 interface since bridge flow offload is supported.
1183 * If l2_feature is disabled, then we make sure src dev is L3 interface to avoid cost of rule lookup for L2 flows
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301184 */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301185 switch (ntohs(skb->protocol)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301186 case ETH_P_IP:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301187 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, true)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301188 return sfe_ipv4_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301189 }
1190
Wayne Tanbb7f1782021-12-13 11:16:04 -08001191 DEBUG_TRACE("No IPv4 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301192 return 0;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301193
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301194 case ETH_P_IPV6:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301195 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, false)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301196 return sfe_ipv6_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301197 }
1198
Wayne Tanbb7f1782021-12-13 11:16:04 -08001199 DEBUG_TRACE("No IPv6 address for device: %s skb=%px\n", dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301200 return 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301201
1202 default:
1203 break;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301204 }
1205
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301206 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301207 * Stop L2 processing if L2 feature is disabled.
1208 */
1209 if (!sfe_is_l2_feature_enabled()) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001210 DEBUG_TRACE("Unsupported protocol %#x %s (L2 feature is disabled) skb=%px\n",
1211 ntohs(skb->protocol), dev->name, skb);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301212 return 0;
1213 }
1214
1215 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301216 * Parse the L2 headers to find the L3 protocol and the L2 header offset
1217 */
1218 if (unlikely(!sfe_recv_parse_l2(dev, skb, &l2_info))) {
Wayne Tanbb7f1782021-12-13 11:16:04 -08001219 DEBUG_TRACE("%px: Invalid L2.5 header format with protocol : %x\n", skb, ntohs(skb->protocol));
1220 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301221 }
1222
1223 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301224 * Protocol in l2_info is expected to be in host byte order.
1225 * PPPoE is doing it in the sfe_pppoe_parse_hdr()
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301226 */
1227 if (likely(l2_info.protocol == ETH_P_IP)) {
Amitesh Anand63be37d2021-12-24 20:51:48 +05301228 ret = sfe_ipv4_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301229 if (unlikely(!ret)) {
1230 goto send_to_linux;
1231 }
1232 return ret;
1233 }
1234
1235 if (likely(l2_info.protocol == ETH_P_IPV6)) {
Suruchi Suman23a279d2021-11-16 15:13:09 +05301236 ret = sfe_ipv6_recv(dev, skb, &l2_info, false);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001237 if (unlikely(!ret)) {
1238 goto send_to_linux;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301239 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001240 return ret;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301241 }
1242
Wayne Tanbb7f1782021-12-13 11:16:04 -08001243 DEBUG_TRACE("Non-IP(%x) %s skb=%px skb_vlan:%x/%x/%x skb_proto=%x\n",
1244 l2_info.protocol, dev->name, skb,
1245 ntohs(skb->vlan_proto), skb->vlan_tci, skb_vlan_tag_present(skb),
1246 htons(skb->protocol));
1247
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301248send_to_linux:
1249 /*
1250 * Push the data back before sending to linux if -
1251 * a. There is any exception from IPV4/V6
1252 * b. If the next protocol is neither IPV4 nor IPV6
1253 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001254 sfe_recv_undo_parse_l2(dev, skb, &l2_info);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301255
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301256 return 0;
1257}
1258
1259/*
1260 * sfe_get_exceptions()
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301261 * Dump exception counters
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301262 */
1263static ssize_t sfe_get_exceptions(struct device *dev,
1264 struct device_attribute *attr,
1265 char *buf)
1266{
1267 int idx, len;
1268 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1269
1270 spin_lock_bh(&sfe_ctx->lock);
1271 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1272 if (sfe_ctx->exceptions[idx]) {
1273 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1274 }
1275 }
1276 spin_unlock_bh(&sfe_ctx->lock);
1277
1278 return len;
1279}
1280
1281/*
1282 * sysfs attributes.
1283 */
1284static const struct device_attribute sfe_exceptions_attr =
1285 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1286
1287/*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301288 * sfe_is_l2_feature_enabled()
1289 * Check if l2 features flag feature is enabled or not. (VLAN, PPPOE, BRIDGE and tunnels)
1290 *
1291 * 32bit read is atomic. No need of locks.
1292 */
1293bool sfe_is_l2_feature_enabled()
1294{
1295 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1296 return (sfe_ctx->l2_feature_support == 1);
1297}
1298EXPORT_SYMBOL(sfe_is_l2_feature_enabled);
1299
1300/*
1301 * sfe_get_l2_feature()
1302 * L2 feature is enabled/disabled
1303 */
1304ssize_t sfe_get_l2_feature(struct device *dev,
1305 struct device_attribute *attr,
1306 char *buf)
1307{
1308 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1309 ssize_t len;
1310
1311 spin_lock_bh(&sfe_ctx->lock);
1312 len = snprintf(buf, (ssize_t)(PAGE_SIZE), "L2 feature is %s\n", sfe_ctx->l2_feature_support ? "enabled" : "disabled");
1313 spin_unlock_bh(&sfe_ctx->lock);
1314 return len;
1315}
1316
1317/*
1318 * sfe_set_l2_feature()
1319 * Enable or disable l2 features flag.
1320 */
1321ssize_t sfe_set_l2_feature(struct device *dev, struct device_attribute *attr,
1322 const char *buf, size_t count)
1323{
1324 unsigned long val;
1325 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1326 int ret;
1327 ret = sscanf(buf, "%lu", &val);
1328
1329 if (ret != 1) {
1330 pr_err("Wrong input, %s\n", buf);
1331 return -EINVAL;
1332 }
1333
1334 if (val != 1 && val != 0) {
1335 pr_err("Input should be either 1 or 0, (%s)\n", buf);
1336 return -EINVAL;
1337 }
1338
1339 spin_lock_bh(&sfe_ctx->lock);
1340
1341 if (sfe_ctx->l2_feature_support && val) {
1342 spin_unlock_bh(&sfe_ctx->lock);
1343 pr_err("L2 feature is already enabled\n");
1344 return -EINVAL;
1345 }
1346
1347 if (!sfe_ctx->l2_feature_support && !val) {
1348 spin_unlock_bh(&sfe_ctx->lock);
1349 pr_err("L2 feature is already disabled\n");
1350 return -EINVAL;
1351 }
1352
1353 sfe_ctx->l2_feature_support = val;
1354 spin_unlock_bh(&sfe_ctx->lock);
1355
1356 return count;
1357}
1358
1359static const struct device_attribute sfe_l2_feature_attr =
1360 __ATTR(l2_feature, 0644, sfe_get_l2_feature, sfe_set_l2_feature);
1361
1362/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301363 * sfe_init_if()
1364 */
1365int sfe_init_if(void)
1366{
1367 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1368 int result = -1;
1369
1370 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301371 * L2 feature is disabled by default
1372 */
1373 sfe_ctx->l2_feature_support = 0;
1374
1375 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301376 * Create sys/sfe
1377 */
1378 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1379 if (!sfe_ctx->sys_sfe) {
1380 DEBUG_ERROR("failed to register sfe\n");
1381 goto exit1;
1382 }
1383
1384 /*
1385 * Create sys/sfe/exceptions
1386 */
1387 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1388 if (result) {
1389 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1390 goto exit2;
1391 }
1392
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301393 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_l2_feature_attr.attr);
1394 if (result) {
1395 DEBUG_ERROR("failed to register L2 feature flag sysfs file: %d\n", result);
1396 goto exit2;
1397 }
1398
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301399 spin_lock_init(&sfe_ctx->lock);
1400
1401 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1402 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1403
1404 /*
1405 * Hook the receive path in the network stack.
1406 */
1407 BUG_ON(athrs_fast_nat_recv);
1408 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1409
1410 return 0;
1411exit2:
1412 kobject_put(sfe_ctx->sys_sfe);
1413exit1:
1414 return result;
1415}
1416
1417/*
1418 * sfe_exit_if()
1419 */
1420void sfe_exit_if(void)
1421{
1422 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1423
1424 /*
1425 * Unregister our receive callback.
1426 */
1427 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1428
1429 /*
1430 * Wait for all callbacks to complete.
1431 */
1432 rcu_barrier();
1433
1434 /*
1435 * Destroy all connections.
1436 */
1437 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1438 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1439
1440 /*
1441 * stop work queue, and flush all pending message in queue
1442 */
1443 cancel_work_sync(&sfe_ctx->work);
1444 sfe_process_response_msg(&sfe_ctx->work);
1445
1446 /*
1447 * Unregister our sync callback.
1448 */
1449 sfe_ipv4_notify_unregister();
1450 sfe_ipv6_notify_unregister();
1451
1452 kobject_put(sfe_ctx->sys_sfe);
1453
1454 return;
1455}