blob: b8c80409eb65ab9974078c4ca892c9134369cc86 [file] [log] [blame]
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301/*
2 * sfe.c
3 * API for shortcut forwarding engine.
4 *
5 * Copyright (c) 2015,2016, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/module.h>
22#include <linux/version.h>
23#include <linux/sysfs.h>
24#include <linux/skbuff.h>
25#include <net/addrconf.h>
26#include <linux/inetdevice.h>
27#include <net/pkt_sched.h>
Suruchi Sumane811dad2022-01-19 19:39:50 +053028#include <net/vxlan.h>
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053029
30#include "sfe_debug.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053031#include "sfe_api.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053032#include "sfe.h"
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053033#include "sfe_pppoe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053034
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +053035extern int max_ipv4_conn;
36extern int max_ipv6_conn;
37
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053038#define SFE_MESSAGE_VERSION 0x1
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053039#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
40#define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
41#define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
42
43typedef enum sfe_exception {
44 SFE_EXCEPTION_IPV4_MSG_UNKNOW,
45 SFE_EXCEPTION_IPV6_MSG_UNKNOW,
46 SFE_EXCEPTION_CONNECTION_INVALID,
47 SFE_EXCEPTION_NOT_SUPPORT_BRIDGE,
48 SFE_EXCEPTION_TCP_INVALID,
49 SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT,
50 SFE_EXCEPTION_SRC_DEV_NOT_L3,
51 SFE_EXCEPTION_DEST_DEV_NOT_L3,
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053052 SFE_EXCEPTION_CFG_ERR,
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053053 SFE_EXCEPTION_CREATE_FAILED,
54 SFE_EXCEPTION_ENQUEUE_FAILED,
55 SFE_EXCEPTION_NOT_SUPPORT_6RD,
56 SFE_EXCEPTION_NO_SYNC_CB,
57 SFE_EXCEPTION_MAX
58} sfe_exception_t;
59
60static char *sfe_exception_events_string[SFE_EXCEPTION_MAX] = {
61 "IPV4_MSG_UNKNOW",
62 "IPV6_MSG_UNKNOW",
63 "CONNECTION_INVALID",
64 "NOT_SUPPORT_BRIDGE",
65 "TCP_INVALID",
66 "PROTOCOL_NOT_SUPPORT",
67 "SRC_DEV_NOT_L3",
68 "DEST_DEV_NOT_L3",
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +053069 "CONFIG_ERROR",
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053070 "CREATE_FAILED",
71 "ENQUEUE_FAILED",
72 "NOT_SUPPORT_6RD",
73 "NO_SYNC_CB"
74};
75
76/*
77 * Message type of queued response message
78 */
79typedef enum {
80 SFE_MSG_TYPE_IPV4,
81 SFE_MSG_TYPE_IPV6
82} sfe_msg_types_t;
83
84/*
85 * Queued response message,
86 * will be sent back to caller in workqueue
87 */
88struct sfe_response_msg {
89 struct list_head node;
90 sfe_msg_types_t type;
91 void *msg[0];
92};
93
94/*
95 * SFE context instance, private for SFE
96 */
97struct sfe_ctx_instance_internal {
98 struct sfe_ctx_instance base; /* Exported SFE context, is public to user of SFE*/
99
100 /*
101 * Control state.
102 */
103 struct kobject *sys_sfe; /* Sysfs linkage */
104
105 struct list_head msg_queue; /* Response message queue*/
106 spinlock_t lock; /* Lock to protect message queue */
107
108 struct work_struct work; /* Work to send response message back to caller*/
109
110 sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
111 void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
112
113 sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
114 void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
115
116 u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530117
118 int32_t l2_feature_support; /* L2 feature support */
119
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530120};
121
122static struct sfe_ctx_instance_internal __sfe_ctx;
123
124/*
125 * Convert public SFE context to internal context
126 */
127#define SFE_CTX_TO_PRIVATE(base) (struct sfe_ctx_instance_internal *)(base)
128/*
129 * Convert internal SFE context to public context
130 */
131#define SFE_CTX_TO_PUBLIC(intrv) (struct sfe_ctx_instance *)(intrv)
132
133/*
134 * sfe_incr_exceptions()
135 * Increase an exception counter.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530136 *
137 * TODO: Merge sfe_ctx stats to ipv4 and ipv6 percpu stats.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530138 */
139static inline void sfe_incr_exceptions(sfe_exception_t except)
140{
141 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
142
143 spin_lock_bh(&sfe_ctx->lock);
144 sfe_ctx->exceptions[except]++;
145 spin_unlock_bh(&sfe_ctx->lock);
146}
147
148/*
149 * sfe_dev_is_layer_3_interface()
150 * Check if a network device is ipv4 or ipv6 layer 3 interface
151 *
152 * @param dev network device to check
153 * @param check_v4 check ipv4 layer 3 interface(which have ipv4 address) or ipv6 layer 3 interface(which have ipv6 address)
154 */
155inline bool sfe_dev_is_layer_3_interface(struct net_device *dev, bool check_v4)
156{
157 struct in_device *in4_dev;
158 struct inet6_dev *in6_dev;
159
160 BUG_ON(!dev);
161
162 if (likely(check_v4)) {
163 /*
164 * Does our input device support IPv4 processing?
165 */
166 in4_dev = (struct in_device *)dev->ip_ptr;
167 if (unlikely(!in4_dev)) {
168 return false;
169 }
170
171 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800172 * Does it have an IPv4 address? If it doesn't then it could be MAP-T interface,
173 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530174 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800175 if (likely(in4_dev->ifa_list || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
176 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530177 }
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800178 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530179 }
180
181 /*
182 * Does our input device support IPv6 processing?
183 */
184 in6_dev = (struct inet6_dev *)dev->ip6_ptr;
185 if (unlikely(!in6_dev)) {
186 return false;
187 }
188
189 /*
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800190 * Does it have an IPv6 address? If it doesn't then it could be MAP-T interface,
191 * else we can't do anything interesting here!
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530192 */
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800193 if (likely(!list_empty(&in6_dev->addr_list) || (dev->priv_flags_ext & IFF_EXT_MAPT))) {
194 return true;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530195 }
196
Suruchi Agarwal6f5c95d2022-01-13 07:37:43 -0800197 return false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530198}
199
200/*
201 * sfe_clean_response_msg_by_type()
202 * clean response message in queue when ECM exit
203 *
204 * @param sfe_ctx SFE context
205 * @param msg_type message type, ipv4 or ipv6
206 */
207static void sfe_clean_response_msg_by_type(struct sfe_ctx_instance_internal *sfe_ctx, sfe_msg_types_t msg_type)
208{
209 struct sfe_response_msg *response, *tmp;
210
211 if (!sfe_ctx) {
212 return;
213 }
214
215 spin_lock_bh(&sfe_ctx->lock);
216 list_for_each_entry_safe(response, tmp, &sfe_ctx->msg_queue, node) {
217 if (response->type == msg_type) {
218 list_del(&response->node);
219 /*
220 * Free response message
221 */
222 kfree(response);
223 }
224 }
225 spin_unlock_bh(&sfe_ctx->lock);
226
227}
228
229/*
230 * sfe_process_response_msg()
231 * Send all pending response message to ECM by calling callback function included in message
232 *
233 * @param work work structure
234 */
235static void sfe_process_response_msg(struct work_struct *work)
236{
237 struct sfe_ctx_instance_internal *sfe_ctx = container_of(work, struct sfe_ctx_instance_internal, work);
238 struct sfe_response_msg *response;
239
240 spin_lock_bh(&sfe_ctx->lock);
241 while ((response = list_first_entry_or_null(&sfe_ctx->msg_queue, struct sfe_response_msg, node))) {
242 list_del(&response->node);
243 spin_unlock_bh(&sfe_ctx->lock);
244 rcu_read_lock();
245
246 /*
247 * Send response message back to caller
248 */
249 if ((response->type == SFE_MSG_TYPE_IPV4) && !sfe_ipv4_stopped(sfe_ctx)) {
250 struct sfe_ipv4_msg *msg = (struct sfe_ipv4_msg *)response->msg;
251 sfe_ipv4_msg_callback_t callback = (sfe_ipv4_msg_callback_t)msg->cm.cb;
252 if (callback) {
253 callback((void *)msg->cm.app_data, msg);
254 }
255 } else if ((response->type == SFE_MSG_TYPE_IPV6) && !sfe_ipv6_stopped(sfe_ctx)) {
256 struct sfe_ipv6_msg *msg = (struct sfe_ipv6_msg *)response->msg;
257 sfe_ipv6_msg_callback_t callback = (sfe_ipv6_msg_callback_t)msg->cm.cb;
258 if (callback) {
259 callback((void *)msg->cm.app_data, msg);
260 }
261 }
262
263 rcu_read_unlock();
264 /*
265 * Free response message
266 */
267 kfree(response);
268 spin_lock_bh(&sfe_ctx->lock);
269 }
270 spin_unlock_bh(&sfe_ctx->lock);
271}
272
273/*
274 * sfe_alloc_response_msg()
275 * Alloc and construct new response message
276 *
277 * @param type message type
278 * @param msg used to construct response message if not NULL
279 *
280 * @return !NULL, success; NULL, failed
281 */
282static struct sfe_response_msg *
283sfe_alloc_response_msg(sfe_msg_types_t type, void *msg)
284{
285 struct sfe_response_msg *response;
286 int size;
287
288 switch (type) {
289 case SFE_MSG_TYPE_IPV4:
290 size = sizeof(struct sfe_ipv4_msg);
291 break;
292 case SFE_MSG_TYPE_IPV6:
293 size = sizeof(struct sfe_ipv6_msg);
294 break;
295 default:
296 DEBUG_ERROR("message type %d not supported\n", type);
297 return NULL;
298 }
299
300 response = (struct sfe_response_msg *)kzalloc(sizeof(struct sfe_response_msg) + size, GFP_ATOMIC);
301 if (!response) {
302 DEBUG_ERROR("allocate memory failed\n");
303 return NULL;
304 }
305
306 response->type = type;
307
308 if (msg) {
309 memcpy(response->msg, msg, size);
310 }
311
312 return response;
313}
314
315/*
316 * sfe_enqueue_msg()
317 * Queue response message
318 *
319 * @param sfe_ctx SFE context
320 * @param response response message to be queue
321 */
322static inline void sfe_enqueue_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_response_msg *response)
323{
324 spin_lock_bh(&sfe_ctx->lock);
325 list_add_tail(&response->node, &sfe_ctx->msg_queue);
326 spin_unlock_bh(&sfe_ctx->lock);
327
328 schedule_work(&sfe_ctx->work);
329}
330
331/*
332 * sfe_cmn_msg_init()
333 * Initialize the common message structure.
334 *
335 * @param ncm message to init
336 * @param if_num interface number related with this message
337 * @param type message type
338 * @param cb callback function to process repsonse of this message
339 * @param app_data argument for above callback function
340 */
341static void sfe_cmn_msg_init(struct sfe_cmn_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
342{
343 ncm->interface = if_num;
344 ncm->version = SFE_MESSAGE_VERSION;
345 ncm->type = type;
346 ncm->len = len;
347 ncm->cb = (sfe_ptr_t)cb;
348 ncm->app_data = (sfe_ptr_t)app_data;
349}
350
351/*
352 * sfe_ipv4_stats_sync_callback()
353 * Synchronize a connection's state.
354 *
355 * @param sis SFE statistics from SFE core engine
356 */
357static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
358{
359 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
360 struct sfe_ipv4_msg msg;
361 struct sfe_ipv4_conn_sync *sync_msg;
362 sfe_ipv4_msg_callback_t sync_cb;
363
364 rcu_read_lock();
365 sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
366 if (!sync_cb) {
367 rcu_read_unlock();
368 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
369 return;
370 }
371
372 sync_msg = &msg.msg.conn_stats;
373
374 memset(&msg, 0, sizeof(msg));
375 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
376 sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
377
378 /*
379 * Fill connection specific information
380 */
381 sync_msg->protocol = (u8)sis->protocol;
382 sync_msg->flow_ip = sis->src_ip.ip;
383 sync_msg->flow_ip_xlate = sis->src_ip_xlate.ip;
384 sync_msg->flow_ident = sis->src_port;
385 sync_msg->flow_ident_xlate = sis->src_port_xlate;
386
387 sync_msg->return_ip = sis->dest_ip.ip;
388 sync_msg->return_ip_xlate = sis->dest_ip_xlate.ip;
389 sync_msg->return_ident = sis->dest_port;
390 sync_msg->return_ident_xlate = sis->dest_port_xlate;
391
392 /*
393 * Fill TCP protocol specific information
394 */
395 if (sis->protocol == IPPROTO_TCP) {
396 sync_msg->flow_max_window = sis->src_td_max_window;
397 sync_msg->flow_end = sis->src_td_end;
398 sync_msg->flow_max_end = sis->src_td_max_end;
399
400 sync_msg->return_max_window = sis->dest_td_max_window;
401 sync_msg->return_end = sis->dest_td_end;
402 sync_msg->return_max_end = sis->dest_td_max_end;
403 }
404
405 /*
406 * Fill statistics information
407 */
408 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
409 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
410 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
411 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
412
413 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
414 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
415 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
416 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
417
418 /*
419 * Fill expiration time to extend, in unit of msec
420 */
421 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
422
423 /*
424 * Fill other information
425 */
426 switch (sis->reason) {
427 case SFE_SYNC_REASON_DESTROY:
428 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
429 break;
430 case SFE_SYNC_REASON_FLUSH:
431 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
432 break;
433 default:
434 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
435 break;
436 }
437
438 /*
439 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
440 */
441 sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
442 rcu_read_unlock();
443}
444
445/*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530446 * sfe_recv_parse_l2()
447 * Parse L2 headers
448 *
449 * Returns true if the packet is parsed and false otherwise.
450 */
451static bool sfe_recv_parse_l2(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info)
452{
453 /*
454 * l2_hdr_offset will not change as we parse more L2.5 headers
455 * TODO: Move from storing offsets to storing pointers
456 */
457 sfe_l2_hdr_offset_set(l2_info, ((skb->data - ETH_HLEN) - skb->head));
458
459 /*
460 * TODO: Add VLAN parsing here.
461 * Add VLAN fields to l2_info structure and update l2_hdr_size
462 * In case of exception, use l2_hdr_size to move the data pointer back
463 */
464
465 /*
466 * PPPoE parsing
467 */
468 if (unlikely(htons(ETH_P_PPP_SES) != skb->protocol)) {
469 return false;
470 }
471
472 /*
473 * Parse only PPPoE session packets
474 * skb->data is pointing to PPPoE hdr
475 */
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530476 if (!sfe_pppoe_parse_hdr(skb, l2_info)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530477
478 /*
479 * For exception from PPPoE return from here without modifying the skb->data
480 * This includes non-IPv4/v6 cases also
481 */
482 return false;
483 }
484
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530485 /*
486 * Pull by L2 header size considering all L2.5 headers
487 */
488 __skb_pull(skb, sfe_l2_hdr_size_get(l2_info));
489 return true;
490}
491
492/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530493 * sfe_create_ipv4_rule_msg()
494 * Convert create message format from ecm to sfe
495 *
496 * @param sfe_ctx SFE context
497 * @param msg The IPv4 message
498 *
499 * @return sfe_tx_status_t The status of the Tx operation
500 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530501sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530502{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530503 struct net_device *src_dev = NULL;
504 struct net_device *dest_dev = NULL;
505 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530506 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530507 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530508 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530509
510 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
511 if (!response) {
512 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
513 return SFE_TX_FAILURE_QUEUE;
514 }
515
516 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
517 ret = SFE_CMN_RESPONSE_EMSG;
518 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
519 goto failed_ret;
520 }
521
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530522 switch (msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530523 case IPPROTO_TCP:
524 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
525 ret = SFE_CMN_RESPONSE_EMSG;
526 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
527 goto failed_ret;
528 }
529
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530530 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530531 break;
532
533 default:
534 ret = SFE_CMN_RESPONSE_EMSG;
535 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
536 goto failed_ret;
537 }
538
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530539 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530540 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530541 */
542 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530543 if (!sfe_is_l2_feature_enabled()) {
544 ret = SFE_CMN_RESPONSE_EINTERFACE;
545 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
546 goto failed_ret;
547 }
548
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530549 is_routed = false;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530550 }
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530551
552 /*
553 * Does our input device support IP processing?
554 */
555 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Suruchi Sumane811dad2022-01-19 19:39:50 +0530556 if (!src_dev || (is_routed && !sfe_dev_is_layer_3_interface(src_dev, true) && !netif_is_vxlan(src_dev))) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530557 ret = SFE_CMN_RESPONSE_EINTERFACE;
558 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
559 goto failed_ret;
560 }
561
562 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530563 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
564 */
565 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
566 if (cfg_err) {
567 ret = SFE_CMN_RESPONSE_EMSG;
568 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
569 goto failed_ret;
570 }
571
572 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530573 * Does our output device support IP processing?
574 */
575 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Suruchi Sumane811dad2022-01-19 19:39:50 +0530576 if (!dest_dev || (is_routed && !sfe_dev_is_layer_3_interface(dest_dev, true) && !netif_is_vxlan(dest_dev))) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530577 ret = SFE_CMN_RESPONSE_EINTERFACE;
578 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
579 goto failed_ret;
580 }
581
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530582 /*
583 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
584 */
585 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
586 if (cfg_err) {
587 ret = SFE_CMN_RESPONSE_EMSG;
588 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
589 goto failed_ret;
590 }
591
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530592 if (!sfe_ipv4_create_rule(&msg->msg.rule_create)) {
593 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530594 ret = SFE_CMN_RESPONSE_ACK;
595 } else {
596 /* Failed */
597 ret = SFE_CMN_RESPONSE_EMSG;
598 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
599 }
600
601 /*
602 * Fall through
603 */
604failed_ret:
605 if (src_dev) {
606 dev_put(src_dev);
607 }
608
609 if (dest_dev) {
610 dev_put(dest_dev);
611 }
612
613 /*
614 * Try to queue response message
615 */
616 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = ret;
617 sfe_enqueue_msg(sfe_ctx, response);
618
619 return SFE_TX_SUCCESS;
620}
621
622/*
623 * sfe_destroy_ipv4_rule_msg()
624 * Convert destroy message format from ecm to sfe
625 *
626 * @param sfe_ctx SFE context
627 * @param msg The IPv4 message
628 *
629 * @return sfe_tx_status_t The status of the Tx operation
630 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530631sfe_tx_status_t sfe_destroy_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530632{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530633 struct sfe_response_msg *response;
634
635 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV4, msg);
636 if (!response) {
637 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
638 return SFE_TX_FAILURE_QUEUE;
639 }
640
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530641 sfe_ipv4_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530642
643 /*
644 * Try to queue response message
645 */
646 ((struct sfe_ipv4_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
647 sfe_enqueue_msg(sfe_ctx, response);
648
649 return SFE_TX_SUCCESS;
650}
651
652/*
653 * sfe_ipv4_tx()
654 * Transmit an IPv4 message to the sfe
655 *
656 * @param sfe_ctx SFE context
657 * @param msg The IPv4 message
658 *
659 * @return sfe_tx_status_t The status of the Tx operation
660 */
661sfe_tx_status_t sfe_ipv4_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv4_msg *msg)
662{
663 switch (msg->cm.type) {
664 case SFE_TX_CREATE_RULE_MSG:
665 return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
666 case SFE_TX_DESTROY_RULE_MSG:
667 return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
668 default:
669 sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
670 return SFE_TX_FAILURE_NOT_ENABLED;
671 }
672}
673EXPORT_SYMBOL(sfe_ipv4_tx);
674
675/*
676 * sfe_ipv4_msg_init()
677 * Initialize IPv4 message.
678 */
679void sfe_ipv4_msg_init(struct sfe_ipv4_msg *nim, u16 if_num, u32 type, u32 len,
680 sfe_ipv4_msg_callback_t cb, void *app_data)
681{
682 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
683}
684EXPORT_SYMBOL(sfe_ipv4_msg_init);
685
686/*
687 * sfe_ipv4_max_conn_count()
688 * Return maximum number of entries SFE supported
689 */
690int sfe_ipv4_max_conn_count(void)
691{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +0530692 return max_ipv4_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530693}
694EXPORT_SYMBOL(sfe_ipv4_max_conn_count);
695
696/*
697 * sfe_ipv4_notify_register()
698 * Register a notifier callback for IPv4 messages from SFE
699 *
700 * @param cb The callback pointer
701 * @param app_data The application context for this message
702 *
703 * @return struct sfe_ctx_instance * The SFE context
704 */
705struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
706{
707 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
708
709 spin_lock_bh(&sfe_ctx->lock);
710 /*
711 * Hook the shortcut sync callback.
712 */
713 if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
714 sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
715 }
716
717 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
718 sfe_ctx->ipv4_stats_sync_data = app_data;
719
720 spin_unlock_bh(&sfe_ctx->lock);
721
722 return SFE_CTX_TO_PUBLIC(sfe_ctx);
723}
724EXPORT_SYMBOL(sfe_ipv4_notify_register);
725
726/*
727 * sfe_ipv4_notify_unregister()
728 * Un-Register a notifier callback for IPv4 messages from SFE
729 */
730void sfe_ipv4_notify_unregister(void)
731{
732 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
733
734 spin_lock_bh(&sfe_ctx->lock);
735 /*
736 * Unregister our sync callback.
737 */
738 if (sfe_ctx->ipv4_stats_sync_cb) {
739 sfe_ipv4_register_sync_rule_callback(NULL);
740 rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
741 sfe_ctx->ipv4_stats_sync_data = NULL;
742 }
743 spin_unlock_bh(&sfe_ctx->lock);
744
745 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
746
747 return;
748}
749EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
750
751/*
752 * sfe_ipv6_stats_sync_callback()
753 * Synchronize a connection's state.
754 */
755static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
756{
757 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
758 struct sfe_ipv6_msg msg;
759 struct sfe_ipv6_conn_sync *sync_msg;
760 sfe_ipv6_msg_callback_t sync_cb;
761
762 rcu_read_lock();
763 sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
764 if (!sync_cb) {
765 rcu_read_unlock();
766 sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
767 return;
768 }
769
770 sync_msg = &msg.msg.conn_stats;
771
772 memset(&msg, 0, sizeof(msg));
773 sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
774 sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
775
776 /*
777 * Fill connection specific information
778 */
779 sync_msg->protocol = (u8)sis->protocol;
780 sfe_ipv6_addr_copy(sis->src_ip.ip6, sync_msg->flow_ip);
781 sync_msg->flow_ident = sis->src_port;
782
783 sfe_ipv6_addr_copy(sis->dest_ip.ip6, sync_msg->return_ip);
784 sync_msg->return_ident = sis->dest_port;
785
786 /*
787 * Fill TCP protocol specific information
788 */
789 if (sis->protocol == IPPROTO_TCP) {
790 sync_msg->flow_max_window = sis->src_td_max_window;
791 sync_msg->flow_end = sis->src_td_end;
792 sync_msg->flow_max_end = sis->src_td_max_end;
793
794 sync_msg->return_max_window = sis->dest_td_max_window;
795 sync_msg->return_end = sis->dest_td_end;
796 sync_msg->return_max_end = sis->dest_td_max_end;
797 }
798
799 /*
800 * Fill statistics information
801 */
802 sync_msg->flow_rx_packet_count = sis->src_new_packet_count;
803 sync_msg->flow_rx_byte_count = sis->src_new_byte_count;
804 sync_msg->flow_tx_packet_count = sis->dest_new_packet_count;
805 sync_msg->flow_tx_byte_count = sis->dest_new_byte_count;
806
807 sync_msg->return_rx_packet_count = sis->dest_new_packet_count;
808 sync_msg->return_rx_byte_count = sis->dest_new_byte_count;
809 sync_msg->return_tx_packet_count = sis->src_new_packet_count;
810 sync_msg->return_tx_byte_count = sis->src_new_byte_count;
811
812 /*
813 * Fill expiration time to extend, in unit of msec
814 */
815 sync_msg->inc_ticks = (((u32)sis->delta_jiffies) * MSEC_PER_SEC)/HZ;
816
817 /*
818 * Fill other information
819 */
820 switch (sis->reason) {
821 case SFE_SYNC_REASON_DESTROY:
822 sync_msg->reason = SFE_RULE_SYNC_REASON_DESTROY;
823 break;
824 case SFE_SYNC_REASON_FLUSH:
825 sync_msg->reason = SFE_RULE_SYNC_REASON_FLUSH;
826 break;
827 default:
828 sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
829 break;
830 }
831
832 /*
833 * SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
834 */
835 sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
836 rcu_read_unlock();
837}
838
839/*
840 * sfe_create_ipv6_rule_msg()
841 * convert create message format from ecm to sfe
842 *
843 * @param sfe_ctx SFE context
844 * @param msg The IPv6 message
845 *
846 * @return sfe_tx_status_t The status of the Tx operation
847 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530848sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530849{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530850 struct net_device *src_dev = NULL;
851 struct net_device *dest_dev = NULL;
852 struct sfe_response_msg *response;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530853 enum sfe_cmn_response ret = SFE_TX_SUCCESS;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530854 bool is_routed = true;
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530855 bool cfg_err;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530856
857 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
858 if (!response) {
859 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
860 return SFE_TX_FAILURE_QUEUE;
861 }
862
863 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_CONN_VALID)) {
864 ret = SFE_CMN_RESPONSE_EMSG;
865 sfe_incr_exceptions(SFE_EXCEPTION_CONNECTION_INVALID);
866 goto failed_ret;
867 }
868
869 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530870 * Bridge flows are accelerated if L2 feature is enabled.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530871 */
872 if (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530873 if (!sfe_is_l2_feature_enabled()) {
874 ret = SFE_CMN_RESPONSE_EINTERFACE;
875 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_BRIDGE);
876 goto failed_ret;
877 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530878 is_routed = false;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530879 }
880
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530881 switch(msg->msg.rule_create.tuple.protocol) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530882
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530883 case IPPROTO_TCP:
884 if (!(msg->msg.rule_create.valid_flags & SFE_RULE_CREATE_TCP_VALID)) {
885 ret = SFE_CMN_RESPONSE_EMSG;
886 sfe_incr_exceptions(SFE_EXCEPTION_TCP_INVALID);
887 goto failed_ret;
888 }
889
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530890 break;
891
892 case IPPROTO_UDP:
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530893 break;
894
895 default:
896 ret = SFE_CMN_RESPONSE_EMSG;
897 sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
898 goto failed_ret;
899 }
900
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530901 /*
902 * Does our input device support IP processing?
903 */
904 src_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.flow_top_interface_num);
Suruchi Sumane811dad2022-01-19 19:39:50 +0530905 if (!src_dev || (is_routed && !sfe_dev_is_layer_3_interface(src_dev, false) && !netif_is_vxlan(src_dev))) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530906 ret = SFE_CMN_RESPONSE_EINTERFACE;
907 sfe_incr_exceptions(SFE_EXCEPTION_SRC_DEV_NOT_L3);
908 goto failed_ret;
909 }
910
911 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530912 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
913 */
914 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
915 if (cfg_err) {
916 ret = SFE_CMN_RESPONSE_EMSG;
917 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
918 goto failed_ret;
919 }
920
921 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530922 * Does our output device support IP processing?
923 */
924 dest_dev = dev_get_by_index(&init_net, msg->msg.rule_create.conn_rule.return_top_interface_num);
Suruchi Sumane811dad2022-01-19 19:39:50 +0530925 if (!dest_dev || (is_routed && !sfe_dev_is_layer_3_interface(dest_dev, false) && !netif_is_vxlan(dest_dev))) {
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530926 ret = SFE_CMN_RESPONSE_EINTERFACE;
927 sfe_incr_exceptions(SFE_EXCEPTION_DEST_DEV_NOT_L3);
928 goto failed_ret;
929 }
930
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +0530931 /*
932 * Check whether L2 feature is disabled and rule flag is configured to use bottom interface
933 */
934 cfg_err = (msg->msg.rule_create.rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) && !sfe_is_l2_feature_enabled();
935 if (cfg_err) {
936 ret = SFE_CMN_RESPONSE_EMSG;
937 sfe_incr_exceptions(SFE_EXCEPTION_CFG_ERR);
938 goto failed_ret;
939 }
940
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530941 if (!sfe_ipv6_create_rule(&msg->msg.rule_create)) {
942 /* success */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530943 ret = SFE_CMN_RESPONSE_ACK;
944 } else {
945 /* Failed */
946 ret = SFE_CMN_RESPONSE_EMSG;
947 sfe_incr_exceptions(SFE_EXCEPTION_CREATE_FAILED);
948 }
949
950 /*
951 * Fall through
952 */
953failed_ret:
954 if (src_dev) {
955 dev_put(src_dev);
956 }
957
958 if (dest_dev) {
959 dev_put(dest_dev);
960 }
961
962 /*
963 * Try to queue response message
964 */
965 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = ret;
966 sfe_enqueue_msg(sfe_ctx, response);
967
968 return SFE_TX_SUCCESS;
969}
970
971/*
972 * sfe_destroy_ipv6_rule_msg()
973 * Convert destroy message format from ecm to sfe
974 *
975 * @param sfe_ctx SFE context
976 * @param msg The IPv6 message
977 *
978 * @return sfe_tx_status_t The status of the Tx operation
979 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530980sfe_tx_status_t sfe_destroy_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530981{
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530982 struct sfe_response_msg *response;
983
984 response = sfe_alloc_response_msg(SFE_MSG_TYPE_IPV6, msg);
985 if (!response) {
986 sfe_incr_exceptions(SFE_EXCEPTION_ENQUEUE_FAILED);
987 return SFE_TX_FAILURE_QUEUE;
988 }
989
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530990 sfe_ipv6_destroy_rule(&msg->msg.rule_destroy);
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +0530991
992 /*
993 * Try to queue response message
994 */
995 ((struct sfe_ipv6_msg *)response->msg)->cm.response = msg->cm.response = SFE_CMN_RESPONSE_ACK;
996 sfe_enqueue_msg(sfe_ctx, response);
997
998 return SFE_TX_SUCCESS;
999}
1000
1001/*
1002 * sfe_ipv6_tx()
1003 * Transmit an IPv6 message to the sfe
1004 *
1005 * @param sfe_ctx SFE context
1006 * @param msg The IPv6 message
1007 *
1008 * @return sfe_tx_status_t The status of the Tx operation
1009 */
1010sfe_tx_status_t sfe_ipv6_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_ipv6_msg *msg)
1011{
1012 switch (msg->cm.type) {
1013 case SFE_TX_CREATE_RULE_MSG:
1014 return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1015 case SFE_TX_DESTROY_RULE_MSG:
1016 return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
1017 default:
1018 sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
1019 return SFE_TX_FAILURE_NOT_ENABLED;
1020 }
1021}
1022EXPORT_SYMBOL(sfe_ipv6_tx);
1023
1024/*
1025 * sfe_ipv6_msg_init()
1026 * Initialize IPv6 message.
1027 */
1028void sfe_ipv6_msg_init(struct sfe_ipv6_msg *nim, u16 if_num, u32 type, u32 len,
1029 sfe_ipv6_msg_callback_t cb, void *app_data)
1030{
1031 sfe_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data);
1032}
1033EXPORT_SYMBOL(sfe_ipv6_msg_init);
1034
1035/*
1036 * sfe_ipv6_max_conn_count()
1037 * Return maximum number of entries SFE supported
1038 */
1039int sfe_ipv6_max_conn_count(void)
1040{
Ratheesh Kannoth7a6a4ae2021-10-20 08:24:05 +05301041 return max_ipv6_conn;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301042}
1043EXPORT_SYMBOL(sfe_ipv6_max_conn_count);
1044
1045/*
1046 * sfe_ipv6_notify_register()
1047 * Register a notifier callback for IPv6 messages from SFE
1048 *
1049 * @param cb The callback pointer
1050 * @param app_data The application context for this message
1051 *
1052 * @return struct sfe_ctx_instance * The SFE context
1053 */
1054struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
1055{
1056 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1057
1058 spin_lock_bh(&sfe_ctx->lock);
1059 /*
1060 * Hook the shortcut sync callback.
1061 */
1062 if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
1063 sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
1064 }
1065
1066 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
1067 sfe_ctx->ipv6_stats_sync_data = app_data;
1068
1069 spin_unlock_bh(&sfe_ctx->lock);
1070
1071 return SFE_CTX_TO_PUBLIC(sfe_ctx);
1072}
1073EXPORT_SYMBOL(sfe_ipv6_notify_register);
1074
1075/*
1076 * sfe_ipv6_notify_unregister()
1077 * Un-Register a notifier callback for IPv6 messages from SFE
1078 */
1079void sfe_ipv6_notify_unregister(void)
1080{
1081 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1082
1083 spin_lock_bh(&sfe_ctx->lock);
1084 /*
1085 * Unregister our sync callback.
1086 */
1087 if (sfe_ctx->ipv6_stats_sync_cb) {
1088 sfe_ipv6_register_sync_rule_callback(NULL);
1089 rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
1090 sfe_ctx->ipv6_stats_sync_data = NULL;
1091 }
1092 spin_unlock_bh(&sfe_ctx->lock);
1093
1094 sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
1095
1096 return;
1097}
1098EXPORT_SYMBOL(sfe_ipv6_notify_unregister);
1099
1100/*
1101 * sfe_tun6rd_tx()
1102 * Transmit a tun6rd message to sfe engine
1103 */
1104sfe_tx_status_t sfe_tun6rd_tx(struct sfe_ctx_instance *sfe_ctx, struct sfe_tun6rd_msg *msg)
1105{
1106 sfe_incr_exceptions(SFE_EXCEPTION_NOT_SUPPORT_6RD);
1107 return SFE_TX_FAILURE_NOT_ENABLED;
1108}
1109EXPORT_SYMBOL(sfe_tun6rd_tx);
1110
1111/*
1112 * sfe_tun6rd_msg_init()
1113 * Initialize sfe_tun6rd msg.
1114 */
1115void sfe_tun6rd_msg_init(struct sfe_tun6rd_msg *ncm, u16 if_num, u32 type, u32 len, void *cb, void *app_data)
1116{
1117 sfe_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
1118}
1119EXPORT_SYMBOL(sfe_tun6rd_msg_init);
1120
1121/*
1122 * sfe_recv()
1123 * Handle packet receives.
1124 *
1125 * Returns 1 if the packet is forwarded or 0 if it isn't.
1126 */
1127int sfe_recv(struct sk_buff *skb)
1128{
1129 struct net_device *dev;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301130 struct sfe_l2_info l2_info;
1131 int ret;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301132
1133 /*
1134 * We know that for the vast majority of packets we need the transport
1135 * layer header so we may as well start to fetch it now!
1136 */
1137 prefetch(skb->data + 32);
1138 barrier();
1139
1140 dev = skb->dev;
1141
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301142 /*
1143 * Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
1144 */
1145 l2_info.parse_flags = 0;
1146
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301147#ifdef CONFIG_NET_CLS_ACT
1148 /*
1149 * If ingress Qdisc configured, and packet not processed by ingress Qdisc yet
1150 * We can not accelerate this packet.
1151 */
1152#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
1153 if (dev->ingress_queue && !(skb->tc_verd & TC_NCLS)) {
1154 return 0;
1155 }
1156#else
1157 if (rcu_access_pointer(dev->miniq_ingress) && !skb->tc_skip_classify) {
1158 return 0;
1159 }
1160#endif
1161#endif
1162
1163 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301164 * If l2_feature is enabled, we need not check if src dev is L3 interface since bridge flow offload is supported.
1165 * If l2_feature is disabled, then we make sure src dev is L3 interface to avoid cost of rule lookup for L2 flows
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301166 */
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301167 switch (ntohs(skb->protocol)) {
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301168 case ETH_P_IP:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301169 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, true)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301170 return sfe_ipv4_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301171 }
1172
1173 DEBUG_TRACE("No IPv4 address for device: %s\n", dev->name);
1174 return 0;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301175
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301176 case ETH_P_IPV6:
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301177 if (likely(sfe_is_l2_feature_enabled()) || sfe_dev_is_layer_3_interface(dev, false)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301178 return sfe_ipv6_recv(dev, skb, &l2_info, false);
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301179 }
1180
1181 DEBUG_TRACE("No IPv6 address for device: %s\n", dev->name);
1182 return 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301183
1184 default:
1185 break;
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301186 }
1187
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301188 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301189 * Stop L2 processing if L2 feature is disabled.
1190 */
1191 if (!sfe_is_l2_feature_enabled()) {
1192 DEBUG_TRACE("Unsupported protocol %d (L2 feature is disabled)\n", ntohs(skb->protocol));
1193 return 0;
1194 }
1195
1196 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301197 * Parse the L2 headers to find the L3 protocol and the L2 header offset
1198 */
1199 if (unlikely(!sfe_recv_parse_l2(dev, skb, &l2_info))) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301200 DEBUG_TRACE("%px: Invalid L2.5 header format with protocol : %d\n", skb, ntohs(skb->protocol));
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301201 return 0;
1202 }
1203
1204 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05301205 * Protocol in l2_info is expected to be in host byte order.
1206 * PPPoE is doing it in the sfe_pppoe_parse_hdr()
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301207 */
1208 if (likely(l2_info.protocol == ETH_P_IP)) {
Amitesh Anand63be37d2021-12-24 20:51:48 +05301209 ret = sfe_ipv4_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301210 if (unlikely(!ret)) {
1211 goto send_to_linux;
1212 }
1213 return ret;
1214 }
1215
1216 if (likely(l2_info.protocol == ETH_P_IPV6)) {
Suruchi Suman23a279d2021-11-16 15:13:09 +05301217 ret = sfe_ipv6_recv(dev, skb, &l2_info, false);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301218 if (likely(ret)) {
1219 return ret;
1220 }
1221 }
1222
1223send_to_linux:
1224 /*
1225 * Push the data back before sending to linux if -
1226 * a. There is any exception from IPV4/V6
1227 * b. If the next protocol is neither IPV4 nor IPV6
1228 */
1229 __skb_push(skb, sfe_l2_hdr_size_get(&l2_info));
1230
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301231 return 0;
1232}
1233
1234/*
1235 * sfe_get_exceptions()
Ratheesh Kannothecdf8532021-10-28 11:37:51 +05301236 * Dump exception counters
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301237 */
1238static ssize_t sfe_get_exceptions(struct device *dev,
1239 struct device_attribute *attr,
1240 char *buf)
1241{
1242 int idx, len;
1243 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1244
1245 spin_lock_bh(&sfe_ctx->lock);
1246 for (len = 0, idx = 0; idx < SFE_EXCEPTION_MAX; idx++) {
1247 if (sfe_ctx->exceptions[idx]) {
1248 len += snprintf(buf + len, (ssize_t)(PAGE_SIZE - len), "%s = %d\n", sfe_exception_events_string[idx], sfe_ctx->exceptions[idx]);
1249 }
1250 }
1251 spin_unlock_bh(&sfe_ctx->lock);
1252
1253 return len;
1254}
1255
1256/*
1257 * sysfs attributes.
1258 */
1259static const struct device_attribute sfe_exceptions_attr =
1260 __ATTR(exceptions, S_IRUGO, sfe_get_exceptions, NULL);
1261
1262/*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301263 * sfe_is_l2_feature_enabled()
1264 * Check if l2 features flag feature is enabled or not. (VLAN, PPPOE, BRIDGE and tunnels)
1265 *
1266 * 32bit read is atomic. No need of locks.
1267 */
1268bool sfe_is_l2_feature_enabled()
1269{
1270 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1271 return (sfe_ctx->l2_feature_support == 1);
1272}
1273EXPORT_SYMBOL(sfe_is_l2_feature_enabled);
1274
1275/*
1276 * sfe_get_l2_feature()
1277 * L2 feature is enabled/disabled
1278 */
1279ssize_t sfe_get_l2_feature(struct device *dev,
1280 struct device_attribute *attr,
1281 char *buf)
1282{
1283 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1284 ssize_t len;
1285
1286 spin_lock_bh(&sfe_ctx->lock);
1287 len = snprintf(buf, (ssize_t)(PAGE_SIZE), "L2 feature is %s\n", sfe_ctx->l2_feature_support ? "enabled" : "disabled");
1288 spin_unlock_bh(&sfe_ctx->lock);
1289 return len;
1290}
1291
1292/*
1293 * sfe_set_l2_feature()
1294 * Enable or disable l2 features flag.
1295 */
1296ssize_t sfe_set_l2_feature(struct device *dev, struct device_attribute *attr,
1297 const char *buf, size_t count)
1298{
1299 unsigned long val;
1300 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1301 int ret;
1302 ret = sscanf(buf, "%lu", &val);
1303
1304 if (ret != 1) {
1305 pr_err("Wrong input, %s\n", buf);
1306 return -EINVAL;
1307 }
1308
1309 if (val != 1 && val != 0) {
1310 pr_err("Input should be either 1 or 0, (%s)\n", buf);
1311 return -EINVAL;
1312 }
1313
1314 spin_lock_bh(&sfe_ctx->lock);
1315
1316 if (sfe_ctx->l2_feature_support && val) {
1317 spin_unlock_bh(&sfe_ctx->lock);
1318 pr_err("L2 feature is already enabled\n");
1319 return -EINVAL;
1320 }
1321
1322 if (!sfe_ctx->l2_feature_support && !val) {
1323 spin_unlock_bh(&sfe_ctx->lock);
1324 pr_err("L2 feature is already disabled\n");
1325 return -EINVAL;
1326 }
1327
1328 sfe_ctx->l2_feature_support = val;
1329 spin_unlock_bh(&sfe_ctx->lock);
1330
1331 return count;
1332}
1333
1334static const struct device_attribute sfe_l2_feature_attr =
1335 __ATTR(l2_feature, 0644, sfe_get_l2_feature, sfe_set_l2_feature);
1336
1337/*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301338 * sfe_init_if()
1339 */
1340int sfe_init_if(void)
1341{
1342 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1343 int result = -1;
1344
1345 /*
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301346 * L2 feature is disabled by default
1347 */
1348 sfe_ctx->l2_feature_support = 0;
1349
1350 /*
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301351 * Create sys/sfe
1352 */
1353 sfe_ctx->sys_sfe = kobject_create_and_add("sfe", NULL);
1354 if (!sfe_ctx->sys_sfe) {
1355 DEBUG_ERROR("failed to register sfe\n");
1356 goto exit1;
1357 }
1358
1359 /*
1360 * Create sys/sfe/exceptions
1361 */
1362 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_exceptions_attr.attr);
1363 if (result) {
1364 DEBUG_ERROR("failed to register exceptions file: %d\n", result);
1365 goto exit2;
1366 }
1367
Ratheesh Kannothdd382ed2021-11-23 09:36:30 +05301368 result = sysfs_create_file(sfe_ctx->sys_sfe, &sfe_l2_feature_attr.attr);
1369 if (result) {
1370 DEBUG_ERROR("failed to register L2 feature flag sysfs file: %d\n", result);
1371 goto exit2;
1372 }
1373
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05301374 spin_lock_init(&sfe_ctx->lock);
1375
1376 INIT_LIST_HEAD(&sfe_ctx->msg_queue);
1377 INIT_WORK(&sfe_ctx->work, sfe_process_response_msg);
1378
1379 /*
1380 * Hook the receive path in the network stack.
1381 */
1382 BUG_ON(athrs_fast_nat_recv);
1383 RCU_INIT_POINTER(athrs_fast_nat_recv, sfe_recv);
1384
1385 return 0;
1386exit2:
1387 kobject_put(sfe_ctx->sys_sfe);
1388exit1:
1389 return result;
1390}
1391
1392/*
1393 * sfe_exit_if()
1394 */
1395void sfe_exit_if(void)
1396{
1397 struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
1398
1399 /*
1400 * Unregister our receive callback.
1401 */
1402 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1403
1404 /*
1405 * Wait for all callbacks to complete.
1406 */
1407 rcu_barrier();
1408
1409 /*
1410 * Destroy all connections.
1411 */
1412 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1413 sfe_ipv6_destroy_all_rules_for_dev(NULL);
1414
1415 /*
1416 * stop work queue, and flush all pending message in queue
1417 */
1418 cancel_work_sync(&sfe_ctx->work);
1419 sfe_process_response_msg(&sfe_ctx->work);
1420
1421 /*
1422 * Unregister our sync callback.
1423 */
1424 sfe_ipv4_notify_unregister();
1425 sfe_ipv6_notify_unregister();
1426
1427 kobject_put(sfe_ctx->sys_sfe);
1428
1429 return;
1430}