blob: aa59de5237a80ffb20a88e2cb1e2667020dad3d2 [file] [log] [blame]
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001/*
2 * sfe_ipv4.c
3 * Shortcut forwarding engine - IPv4 edition.
4 *
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05305 * Copyright (c) 2013-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
Xiaoping Fana42c68b2015-08-07 18:00:39 -070012 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053017 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010019 */
Matthew McClintocka3221942014-01-16 11:44:26 -060020
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010021#include <linux/module.h>
Dave Hudsondcd08fb2013-11-22 09:25:16 -060022#include <linux/sysfs.h>
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010023#include <linux/skbuff.h>
24#include <linux/icmp.h>
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010025#include <net/tcp.h>
Amitesh Anand63be37d2021-12-24 20:51:48 +053026#include <net/udp.h>
27#include <net/vxlan.h>
Dave Hudsondcd08fb2013-11-22 09:25:16 -060028#include <linux/etherdevice.h>
Tian Yang45f39c82020-10-06 14:07:47 -070029#include <linux/version.h>
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +053030#include <linux/lockdep.h>
Amitesh Anand63be37d2021-12-24 20:51:48 +053031#include <linux/refcount.h>
32#include <linux/netfilter.h>
33#include <linux/inetdevice.h>
34#include <linux/netfilter_ipv4.h>
Parikshit Guned31a8202022-01-05 22:15:04 +053035#include <linux/seqlock.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053036#include <net/protocol.h>
37#include <net/gre.h>
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010038
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053039#include "sfe_debug.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053040#include "sfe_api.h"
Dave Hudsondcd08fb2013-11-22 09:25:16 -060041#include "sfe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053042#include "sfe_flow_cookie.h"
43#include "sfe_ipv4.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053044#include "sfe_ipv4_udp.h"
45#include "sfe_ipv4_tcp.h"
46#include "sfe_ipv4_icmp.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080047#include "sfe_pppoe.h"
Wayne Tan1cabbf12022-05-01 13:01:45 -070048#include "sfe_pppoe_mgr.h"
49#include "sfe_ipv4_pppoe_br.h"
Nitin Shettye6ed5b52021-12-27 14:50:11 +053050#include "sfe_ipv4_gre.h"
Tian Yangd98d91b2022-03-09 14:50:12 -080051#include "sfe_ipv4_tun6rd.h"
Suhas N Bhargav592e64c2021-11-12 16:53:08 +053052#include "sfe_ipv4_esp.h"
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010053
54static char *sfe_ipv4_exception_events_string[SFE_IPV4_EXCEPTION_EVENT_LAST] = {
55 "UDP_HEADER_INCOMPLETE",
56 "UDP_NO_CONNECTION",
57 "UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
58 "UDP_SMALL_TTL",
59 "UDP_NEEDS_FRAGMENTATION",
60 "TCP_HEADER_INCOMPLETE",
61 "TCP_NO_CONNECTION_SLOW_FLAGS",
62 "TCP_NO_CONNECTION_FAST_FLAGS",
63 "TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
64 "TCP_SMALL_TTL",
65 "TCP_NEEDS_FRAGMENTATION",
66 "TCP_FLAGS",
67 "TCP_SEQ_EXCEEDS_RIGHT_EDGE",
68 "TCP_SMALL_DATA_OFFS",
69 "TCP_BAD_SACK",
70 "TCP_BIG_DATA_OFFS",
71 "TCP_SEQ_BEFORE_LEFT_EDGE",
72 "TCP_ACK_EXCEEDS_RIGHT_EDGE",
73 "TCP_ACK_BEFORE_LEFT_EDGE",
74 "ICMP_HEADER_INCOMPLETE",
75 "ICMP_UNHANDLED_TYPE",
76 "ICMP_IPV4_HEADER_INCOMPLETE",
77 "ICMP_IPV4_NON_V4",
78 "ICMP_IPV4_IP_OPTIONS_INCOMPLETE",
79 "ICMP_IPV4_UDP_HEADER_INCOMPLETE",
80 "ICMP_IPV4_TCP_HEADER_INCOMPLETE",
81 "ICMP_IPV4_UNHANDLED_PROTOCOL",
82 "ICMP_NO_CONNECTION",
83 "ICMP_FLUSHED_CONNECTION",
84 "HEADER_INCOMPLETE",
Ratheesh Kannoth43d64f82021-10-20 08:23:29 +053085 "HEADER_CSUM_BAD",
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010086 "BAD_TOTAL_LENGTH",
87 "NON_V4",
88 "NON_INITIAL_FRAGMENT",
89 "DATAGRAM_INCOMPLETE",
90 "IP_OPTIONS_INCOMPLETE",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053091 "UNHANDLED_PROTOCOL",
Nitin Shetty16ab38d2022-02-09 01:26:19 +053092 "NO_HEADROOM",
93 "INVALID_PPPOE_SESSION",
94 "INCORRECT_PPPOE_PARSING",
95 "PPPOE_NOT_SET_IN_CME",
Wayne Tan1cabbf12022-05-01 13:01:45 -070096 "PPPOE_BR_NOT_IN_CME",
Nitin Shetty16ab38d2022-02-09 01:26:19 +053097 "INGRESS_VLAN_TAG_MISMATCH",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053098 "INVALID_SOURCE_INTERFACE",
Tian Yangd98d91b2022-03-09 14:50:12 -080099 "TUN6RD_NO_CONNECTION",
100 "TUN6RD_NEEDS_FRAGMENTATION",
101 "TUN6RD_SYNC_ON_FIND",
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530102 "GRE_HEADER_INCOMPLETE",
103 "GRE_NO_CONNECTION",
104 "GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
105 "GRE_SMALL_TTL",
Suhas N Bhargav592e64c2021-11-12 16:53:08 +0530106 "GRE_NEEDS_FRAGMENTATION",
107 "ESP_NO_CONNECTION",
108 "ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
109 "ESP_NEEDS_FRAGMENTATION",
110 "ESP_SMALL_TTL"
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100111};
112
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700113static struct sfe_ipv4 __si;
Ken Zhu7a43d882022-01-04 10:51:44 -0800114struct sfe_ipv4_msg *sfe_ipv4_sync_many_msg;
115uint32_t sfe_ipv4_sync_max_number;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100116
117/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100118 * sfe_ipv4_gen_ip_csum()
119 * Generate the IP checksum for an IPv4 header.
120 *
121 * Note that this function assumes that we have only 20 bytes of IP header.
122 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530123u16 sfe_ipv4_gen_ip_csum(struct iphdr *iph)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100124{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700125 u32 sum;
126 u16 *i = (u16 *)iph;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100127
128 iph->check = 0;
129
130 /*
131 * Generate the sum.
132 */
133 sum = i[0] + i[1] + i[2] + i[3] + i[4] + i[5] + i[6] + i[7] + i[8] + i[9];
134
135 /*
136 * Fold it to ones-complement form.
137 */
138 sum = (sum & 0xffff) + (sum >> 16);
139 sum = (sum & 0xffff) + (sum >> 16);
140
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700141 return (u16)sum ^ 0xffff;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100142}
143
144/*
145 * sfe_ipv4_get_connection_match_hash()
146 * Generate the hash used in connection match lookups.
147 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700148static inline unsigned int sfe_ipv4_get_connection_match_hash(struct net_device *dev, u8 protocol,
Dave Hudson87973cd2013-10-22 16:00:04 +0100149 __be32 src_ip, __be16 src_port,
150 __be32 dest_ip, __be16 dest_port)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100151{
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530152 u32 hash = ntohl(src_ip ^ dest_ip) ^ protocol ^ ntohs(src_port ^ dest_port);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100153 return ((hash >> SFE_IPV4_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV4_CONNECTION_HASH_MASK;
154}
155
156/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530157 * sfe_ipv4_find_connection_match_rcu()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100158 * Get the IPv4 flow match info that corresponds to a particular 5-tuple.
159 *
160 * On entry we must be holding the lock that protects the hash table.
161 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530162struct sfe_ipv4_connection_match *
163sfe_ipv4_find_connection_match_rcu(struct sfe_ipv4 *si, struct net_device *dev, u8 protocol,
Dave Hudson87973cd2013-10-22 16:00:04 +0100164 __be32 src_ip, __be16 src_port,
165 __be32 dest_ip, __be16 dest_port)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100166{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530167 struct sfe_ipv4_connection_match *cm = NULL;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100168 unsigned int conn_match_idx;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530169 struct hlist_head *lhead;
170
171 WARN_ON_ONCE(!rcu_read_lock_held());
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100172
173 conn_match_idx = sfe_ipv4_get_connection_match_hash(dev, protocol, src_ip, src_port, dest_ip, dest_port);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100174
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530175 lhead = &si->hlist_conn_match_hash_head[conn_match_idx];
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100176
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530177 hlist_for_each_entry_rcu(cm, lhead, hnode) {
178 if (cm->match_src_port != src_port
179 || cm->match_dest_port != dest_port
180 || cm->match_src_ip != src_ip
181 || cm->match_dest_ip != dest_ip
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530182 || cm->match_protocol != protocol) {
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530183 continue;
184 }
185
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530186 this_cpu_inc(si->stats_pcpu->connection_match_hash_hits64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100187
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530188 break;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100189 }
190
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100191 return cm;
192}
193
194/*
195 * sfe_ipv4_connection_match_update_summary_stats()
196 * Update the summary stats for a connection match entry.
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530197 *
198 * Stats are incremented atomically. So use atomic substraction to update summary
199 * stats.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100200 */
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530201static inline void sfe_ipv4_connection_match_update_summary_stats(struct sfe_ipv4_connection_match *cm,
202 u32 *packets, u32 *bytes)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100203{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530204 u32 packet_count, byte_count;
205
206 packet_count = atomic_read(&cm->rx_packet_count);
207 cm->rx_packet_count64 += packet_count;
208 atomic_sub(packet_count, &cm->rx_packet_count);
209
210 byte_count = atomic_read(&cm->rx_byte_count);
211 cm->rx_byte_count64 += byte_count;
212 atomic_sub(byte_count, &cm->rx_byte_count);
213
214 *packets = packet_count;
215 *bytes = byte_count;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100216}
217
218/*
219 * sfe_ipv4_connection_match_compute_translations()
220 * Compute port and address translations for a connection match entry.
221 */
222static void sfe_ipv4_connection_match_compute_translations(struct sfe_ipv4_connection_match *cm)
223{
224 /*
225 * Before we insert the entry look to see if this is tagged as doing address
226 * translations. If it is then work out the adjustment that we need to apply
227 * to the transport checksum.
228 */
229 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC) {
230 /*
231 * Precompute an incremental checksum adjustment so we can
232 * edit packets in this stream very quickly. The algorithm is from RFC1624.
233 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700234 u16 src_ip_hi = cm->match_src_ip >> 16;
235 u16 src_ip_lo = cm->match_src_ip & 0xffff;
236 u32 xlate_src_ip = ~cm->xlate_src_ip;
237 u16 xlate_src_ip_hi = xlate_src_ip >> 16;
238 u16 xlate_src_ip_lo = xlate_src_ip & 0xffff;
239 u16 xlate_src_port = ~cm->xlate_src_port;
240 u32 adj;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100241
242 /*
243 * When we compute this fold it down to a 16-bit offset
244 * as that way we can avoid having to do a double
245 * folding of the twos-complement result because the
246 * addition of 2 16-bit values cannot cause a double
247 * wrap-around!
248 */
249 adj = src_ip_hi + src_ip_lo + cm->match_src_port
250 + xlate_src_ip_hi + xlate_src_ip_lo + xlate_src_port;
251 adj = (adj & 0xffff) + (adj >> 16);
252 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700253 cm->xlate_src_csum_adjustment = (u16)adj;
Nicolas Costaac2979c2014-01-14 10:35:24 -0600254
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100255 }
256
257 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST) {
258 /*
259 * Precompute an incremental checksum adjustment so we can
260 * edit packets in this stream very quickly. The algorithm is from RFC1624.
261 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700262 u16 dest_ip_hi = cm->match_dest_ip >> 16;
263 u16 dest_ip_lo = cm->match_dest_ip & 0xffff;
264 u32 xlate_dest_ip = ~cm->xlate_dest_ip;
265 u16 xlate_dest_ip_hi = xlate_dest_ip >> 16;
266 u16 xlate_dest_ip_lo = xlate_dest_ip & 0xffff;
267 u16 xlate_dest_port = ~cm->xlate_dest_port;
268 u32 adj;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100269
270 /*
271 * When we compute this fold it down to a 16-bit offset
272 * as that way we can avoid having to do a double
273 * folding of the twos-complement result because the
274 * addition of 2 16-bit values cannot cause a double
275 * wrap-around!
276 */
277 adj = dest_ip_hi + dest_ip_lo + cm->match_dest_port
278 + xlate_dest_ip_hi + xlate_dest_ip_lo + xlate_dest_port;
279 adj = (adj & 0xffff) + (adj >> 16);
280 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700281 cm->xlate_dest_csum_adjustment = (u16)adj;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100282 }
Xiaoping Fanad755af2015-04-01 16:58:46 -0700283
284 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700285 u32 adj = ~cm->match_src_ip + cm->xlate_src_ip;
Xiaoping Fanad755af2015-04-01 16:58:46 -0700286 if (adj < cm->xlate_src_ip) {
287 adj++;
288 }
289
290 adj = (adj & 0xffff) + (adj >> 16);
291 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700292 cm->xlate_src_partial_csum_adjustment = (u16)adj;
Xiaoping Fanad755af2015-04-01 16:58:46 -0700293 }
294
295 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700296 u32 adj = ~cm->match_dest_ip + cm->xlate_dest_ip;
Xiaoping Fanad755af2015-04-01 16:58:46 -0700297 if (adj < cm->xlate_dest_ip) {
298 adj++;
299 }
300
301 adj = (adj & 0xffff) + (adj >> 16);
302 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700303 cm->xlate_dest_partial_csum_adjustment = (u16)adj;
Xiaoping Fanad755af2015-04-01 16:58:46 -0700304 }
305
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100306}
307
308/*
309 * sfe_ipv4_update_summary_stats()
310 * Update the summary stats.
311 */
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530312static void sfe_ipv4_update_summary_stats(struct sfe_ipv4 *si, struct sfe_ipv4_stats *stats)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100313{
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530314 int i = 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100315
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530316 memset(stats, 0, sizeof(*stats));
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100317
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530318 for_each_possible_cpu(i) {
319 const struct sfe_ipv4_stats *s = per_cpu_ptr(si->stats_pcpu, i);
320
321 stats->connection_create_requests64 += s->connection_create_requests64;
322 stats->connection_create_collisions64 += s->connection_create_collisions64;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530323 stats->connection_create_failures64 += s->connection_create_failures64;
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530324 stats->connection_destroy_requests64 += s->connection_destroy_requests64;
325 stats->connection_destroy_misses64 += s->connection_destroy_misses64;
326 stats->connection_match_hash_hits64 += s->connection_match_hash_hits64;
327 stats->connection_match_hash_reorders64 += s->connection_match_hash_reorders64;
328 stats->connection_flushes64 += s->connection_flushes64;
Amitesh Anand63be37d2021-12-24 20:51:48 +0530329 stats->packets_dropped64 += s->packets_dropped64;
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530330 stats->packets_forwarded64 += s->packets_forwarded64;
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800331 stats->packets_fast_xmited64 += s->packets_fast_xmited64;
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530332 stats->packets_not_forwarded64 += s->packets_not_forwarded64;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530333 stats->pppoe_encap_packets_forwarded64 += s->pppoe_encap_packets_forwarded64;
334 stats->pppoe_decap_packets_forwarded64 += s->pppoe_decap_packets_forwarded64;
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530335 stats->pppoe_bridge_packets_forwarded64 += s->pppoe_bridge_packets_forwarded64;
Wayne Tan1cabbf12022-05-01 13:01:45 -0700336 stats->pppoe_bridge_packets_3tuple_forwarded64 += s->pppoe_bridge_packets_3tuple_forwarded64;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100337 }
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530338
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100339}
340
341/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530342 * sfe_ipv4_insert_connection_match()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100343 * Insert a connection match into the hash.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100344 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530345static inline void sfe_ipv4_insert_connection_match(struct sfe_ipv4 *si,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700346 struct sfe_ipv4_connection_match *cm)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100347{
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100348 unsigned int conn_match_idx
349 = sfe_ipv4_get_connection_match_hash(cm->match_dev, cm->match_protocol,
350 cm->match_src_ip, cm->match_src_port,
351 cm->match_dest_ip, cm->match_dest_port);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700352
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530353 lockdep_assert_held(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100354
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530355 hlist_add_head_rcu(&cm->hnode, &si->hlist_conn_match_hash_head[conn_match_idx]);
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800356#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700357 if (!si->flow_cookie_enable)
358 return;
359
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800360 /*
361 * Configure hardware to put a flow cookie in packet of this flow,
362 * then we can accelerate the lookup process when we received this packet.
363 */
364 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
365 struct sfe_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
366
367 if ((NULL == entry->match) && time_is_before_jiffies(entry->last_clean_time + HZ)) {
368 flow_cookie_set_func_t func;
369
370 rcu_read_lock();
371 func = rcu_dereference(si->flow_cookie_set_func);
372 if (func) {
Xiaoping Fan59176422015-05-22 15:58:10 -0700373 if (!func(cm->match_protocol, cm->match_src_ip, cm->match_src_port,
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800374 cm->match_dest_ip, cm->match_dest_port, conn_match_idx)) {
375 entry->match = cm;
376 cm->flow_cookie = conn_match_idx;
377 }
378 }
379 rcu_read_unlock();
380
381 break;
382 }
383 }
384#endif
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100385}
386
387/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530388 * sfe_ipv4_remove_connection_match()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100389 * Remove a connection match object from the hash.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100390 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530391static inline void sfe_ipv4_remove_connection_match(struct sfe_ipv4 *si, struct sfe_ipv4_connection_match *cm)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100392{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530393
394 lockdep_assert_held(&si->lock);
395
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800396#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700397 if (si->flow_cookie_enable) {
398 /*
399 * Tell hardware that we no longer need a flow cookie in packet of this flow
400 */
401 unsigned int conn_match_idx;
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800402
Xiaoping Fan640faf42015-08-28 15:50:55 -0700403 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
404 struct sfe_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800405
Xiaoping Fan640faf42015-08-28 15:50:55 -0700406 if (cm == entry->match) {
407 flow_cookie_set_func_t func;
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800408
Xiaoping Fan640faf42015-08-28 15:50:55 -0700409 rcu_read_lock();
410 func = rcu_dereference(si->flow_cookie_set_func);
411 if (func) {
412 func(cm->match_protocol, cm->match_src_ip, cm->match_src_port,
413 cm->match_dest_ip, cm->match_dest_port, 0);
414 }
415 rcu_read_unlock();
416
417 cm->flow_cookie = 0;
418 entry->match = NULL;
419 entry->last_clean_time = jiffies;
420 break;
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800421 }
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800422 }
423 }
424#endif
425
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530426 hlist_del_init_rcu(&cm->hnode);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100427
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100428}
429
430/*
431 * sfe_ipv4_get_connection_hash()
432 * Generate the hash used in connection lookups.
433 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700434static inline unsigned int sfe_ipv4_get_connection_hash(u8 protocol, __be32 src_ip, __be16 src_port,
Dave Hudson87973cd2013-10-22 16:00:04 +0100435 __be32 dest_ip, __be16 dest_port)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100436{
Wayne Tan1cabbf12022-05-01 13:01:45 -0700437 u32 hash = ntohl(src_ip ^ dest_ip) ^ protocol ^ ntohs(src_port) ^ dest_port;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100438 return ((hash >> SFE_IPV4_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV4_CONNECTION_HASH_MASK;
439}
440
441/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530442 * sfe_ipv4_find_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100443 * Get the IPv4 connection info that corresponds to a particular 5-tuple.
444 *
445 * On entry we must be holding the lock that protects the hash table.
446 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530447static inline struct sfe_ipv4_connection *sfe_ipv4_find_connection(struct sfe_ipv4 *si, u32 protocol,
Dave Hudson87973cd2013-10-22 16:00:04 +0100448 __be32 src_ip, __be16 src_port,
449 __be32 dest_ip, __be16 dest_port)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100450{
451 struct sfe_ipv4_connection *c;
452 unsigned int conn_idx = sfe_ipv4_get_connection_hash(protocol, src_ip, src_port, dest_ip, dest_port);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530453
454 lockdep_assert_held(&si->lock);
455
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100456 c = si->conn_hash[conn_idx];
457
458 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100459 * Will need connection entry for next create/destroy metadata,
460 * So no need to re-order entry for these requests
461 */
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530462 while (c) {
463 if ((c->src_port == src_port)
464 && (c->dest_port == dest_port)
465 && (c->src_ip == src_ip)
466 && (c->dest_ip == dest_ip)
467 && (c->protocol == protocol)) {
468 return c;
469 }
470
471 c = c->next;
472 }
473
474 return NULL;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100475}
476
477/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530478 * sfe_ipv4_insert_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100479 * Insert a connection into the hash.
480 *
481 * On entry we must be holding the lock that protects the hash table.
482 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530483static void sfe_ipv4_insert_connection(struct sfe_ipv4 *si, struct sfe_ipv4_connection *c)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100484{
485 struct sfe_ipv4_connection **hash_head;
486 struct sfe_ipv4_connection *prev_head;
487 unsigned int conn_idx;
488
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530489 lockdep_assert_held(&si->lock);
490
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100491 /*
492 * Insert entry into the connection hash.
493 */
494 conn_idx = sfe_ipv4_get_connection_hash(c->protocol, c->src_ip, c->src_port,
495 c->dest_ip, c->dest_port);
496 hash_head = &si->conn_hash[conn_idx];
497 prev_head = *hash_head;
498 c->prev = NULL;
499 if (prev_head) {
500 prev_head->prev = c;
501 }
502
503 c->next = prev_head;
504 *hash_head = c;
505
506 /*
507 * Insert entry into the "all connections" list.
508 */
509 if (si->all_connections_tail) {
510 c->all_connections_prev = si->all_connections_tail;
511 si->all_connections_tail->all_connections_next = c;
512 } else {
513 c->all_connections_prev = NULL;
514 si->all_connections_head = c;
515 }
516
517 si->all_connections_tail = c;
518 c->all_connections_next = NULL;
519 si->num_connections++;
520
521 /*
522 * Insert the connection match objects too.
523 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530524 sfe_ipv4_insert_connection_match(si, c->original_match);
525 sfe_ipv4_insert_connection_match(si, c->reply_match);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100526}
527
528/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530529 * sfe_ipv4_remove_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100530 * Remove a sfe_ipv4_connection object from the hash.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100531 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530532bool sfe_ipv4_remove_connection(struct sfe_ipv4 *si, struct sfe_ipv4_connection *c)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100533{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530534 lockdep_assert_held(&si->lock);
535
536 if (c->removed) {
537 DEBUG_ERROR("%px: Connection has been removed already\n", c);
538 return false;
539 }
540
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100541 /*
Tian Yang46d6eb02022-03-31 10:26:16 -0700542 * dereference the decap direction top_interface_dev
543 */
544 if (c->reply_match->top_interface_dev) {
545 dev_put(c->reply_match->top_interface_dev);
546 }
547
548 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100549 * Remove the connection match objects.
550 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530551 sfe_ipv4_remove_connection_match(si, c->reply_match);
552 sfe_ipv4_remove_connection_match(si, c->original_match);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100553
554 /*
555 * Unlink the connection.
556 */
557 if (c->prev) {
558 c->prev->next = c->next;
559 } else {
560 unsigned int conn_idx = sfe_ipv4_get_connection_hash(c->protocol, c->src_ip, c->src_port,
561 c->dest_ip, c->dest_port);
562 si->conn_hash[conn_idx] = c->next;
563 }
564
565 if (c->next) {
566 c->next->prev = c->prev;
567 }
Xiaoping Fan34586472015-07-03 02:20:35 -0700568
569 /*
570 * Unlink connection from all_connections list
571 */
572 if (c->all_connections_prev) {
573 c->all_connections_prev->all_connections_next = c->all_connections_next;
574 } else {
575 si->all_connections_head = c->all_connections_next;
576 }
577
578 if (c->all_connections_next) {
579 c->all_connections_next->all_connections_prev = c->all_connections_prev;
580 } else {
581 si->all_connections_tail = c->all_connections_prev;
582 }
583
Ken Zhudc423672021-09-02 18:27:01 -0700584 /*
585 * If I am the next sync connection, move the sync to my next or head.
586 */
587 if (unlikely(si->wc_next == c)) {
588 si->wc_next = c->all_connections_next;
589 }
590
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530591 c->removed = true;
Xiaoping Fan34586472015-07-03 02:20:35 -0700592 si->num_connections--;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530593 return true;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100594}
595
596/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530597 * sfe_ipv4_gen_sync_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100598 * Sync a connection.
599 *
600 * On entry to this function we expect that the lock for the connection is either
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530601 * already held (while called from sfe_ipv4_periodic_sync() or isn't required
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530602 * (while called from sfe_ipv4_flush_connection())
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100603 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530604static void sfe_ipv4_gen_sync_connection(struct sfe_ipv4 *si, struct sfe_ipv4_connection *c,
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700605 struct sfe_connection_sync *sis, sfe_sync_reason_t reason,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700606 u64 now_jiffies)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100607{
608 struct sfe_ipv4_connection_match *original_cm;
609 struct sfe_ipv4_connection_match *reply_cm;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530610 u32 packet_count, byte_count;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100611
612 /*
613 * Fill in the update message.
614 */
Xiaoping Fand44a5b42015-05-26 17:37:37 -0700615 sis->is_v6 = 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100616 sis->protocol = c->protocol;
Xiaoping Fand44a5b42015-05-26 17:37:37 -0700617 sis->src_ip.ip = c->src_ip;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700618 sis->src_ip_xlate.ip = c->src_ip_xlate;
Xiaoping Fand44a5b42015-05-26 17:37:37 -0700619 sis->dest_ip.ip = c->dest_ip;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700620 sis->dest_ip_xlate.ip = c->dest_ip_xlate;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100621 sis->src_port = c->src_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700622 sis->src_port_xlate = c->src_port_xlate;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100623 sis->dest_port = c->dest_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700624 sis->dest_port_xlate = c->dest_port_xlate;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100625
626 original_cm = c->original_match;
627 reply_cm = c->reply_match;
628 sis->src_td_max_window = original_cm->protocol_state.tcp.max_win;
629 sis->src_td_end = original_cm->protocol_state.tcp.end;
630 sis->src_td_max_end = original_cm->protocol_state.tcp.max_end;
631 sis->dest_td_max_window = reply_cm->protocol_state.tcp.max_win;
632 sis->dest_td_end = reply_cm->protocol_state.tcp.end;
633 sis->dest_td_max_end = reply_cm->protocol_state.tcp.max_end;
634
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530635 sfe_ipv4_connection_match_update_summary_stats(original_cm, &packet_count, &byte_count);
636 sis->src_new_packet_count = packet_count;
637 sis->src_new_byte_count = byte_count;
Matthew McClintockd0cdb802014-02-24 16:30:35 -0600638
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530639 sfe_ipv4_connection_match_update_summary_stats(reply_cm, &packet_count, &byte_count);
640 sis->dest_new_packet_count = packet_count;
641 sis->dest_new_byte_count = byte_count;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100642
Matthew McClintockd0cdb802014-02-24 16:30:35 -0600643 sis->src_dev = original_cm->match_dev;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100644 sis->src_packet_count = original_cm->rx_packet_count64;
645 sis->src_byte_count = original_cm->rx_byte_count64;
Matthew McClintockd0cdb802014-02-24 16:30:35 -0600646
647 sis->dest_dev = reply_cm->match_dev;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100648 sis->dest_packet_count = reply_cm->rx_packet_count64;
649 sis->dest_byte_count = reply_cm->rx_byte_count64;
650
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700651 sis->reason = reason;
652
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100653 /*
654 * Get the time increment since our last sync.
655 */
656 sis->delta_jiffies = now_jiffies - c->last_sync_jiffies;
657 c->last_sync_jiffies = now_jiffies;
658}
659
660/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530661 * sfe_ipv4_free_connection_rcu()
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530662 * Called at RCU qs state to free the connection object.
663 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530664static void sfe_ipv4_free_connection_rcu(struct rcu_head *head)
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530665{
666 struct sfe_ipv4_connection *c;
Amitesh Anand63be37d2021-12-24 20:51:48 +0530667 struct udp_sock *up;
668 struct sock *sk;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530669
670 /*
671 * We dont need spin lock as the connection is already removed from link list
672 */
673 c = container_of(head, struct sfe_ipv4_connection, rcu);
674
675 BUG_ON(!c->removed);
676
677 DEBUG_TRACE("%px: connecton has been deleted\n", c);
678
679 /*
Amitesh Anand63be37d2021-12-24 20:51:48 +0530680 * Decrease the refcount taken in function sfe_ipv4_create_rule(),
681 * during call of __udp4_lib_lookup()
682 */
683 up = c->reply_match->up;
684 if (up) {
685 sk = (struct sock *)up;
686 sock_put(sk);
687 }
688
689 /*
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530690 * Release our hold of the source and dest devices and free the memory
691 * for our connection objects.
692 */
693 dev_put(c->original_dev);
694 dev_put(c->reply_dev);
695 kfree(c->original_match);
696 kfree(c->reply_match);
697 kfree(c);
698}
699
700/*
Ken Zhu88c58152021-12-09 15:12:06 -0800701 * sfe_ipv4_sync_status()
702 * update a connection status to its connection manager.
703 *
704 * si: the ipv4 context
705 * c: which connection to be notified
706 * reason: what kind of notification: flush, stats or destroy
707 */
708void sfe_ipv4_sync_status(struct sfe_ipv4 *si,
709 struct sfe_ipv4_connection *c,
710 sfe_sync_reason_t reason)
711{
712 struct sfe_connection_sync sis;
713 u64 now_jiffies;
714 sfe_sync_rule_callback_t sync_rule_callback;
715
716 rcu_read_lock();
717 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
Ken Zhu7a43d882022-01-04 10:51:44 -0800718 rcu_read_unlock();
Ken Zhu88c58152021-12-09 15:12:06 -0800719 if (!sync_rule_callback) {
Ken Zhu88c58152021-12-09 15:12:06 -0800720 return;
721 }
722
723 /*
724 * Generate a sync message and then sync.
725 */
726 now_jiffies = get_jiffies_64();
727 sfe_ipv4_gen_sync_connection(si, c, &sis, reason, now_jiffies);
728 sync_rule_callback(&sis);
Ken Zhu88c58152021-12-09 15:12:06 -0800729}
730
731/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530732 * sfe_ipv4_flush_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100733 * Flush a connection and free all associated resources.
734 *
735 * We need to be called with bottom halves disabled locally as we need to acquire
736 * the connection hash lock and release it again. In general we're actually called
737 * from within a BH and so we're fine, but we're also called when connections are
738 * torn down.
739 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530740void sfe_ipv4_flush_connection(struct sfe_ipv4 *si,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700741 struct sfe_ipv4_connection *c,
742 sfe_sync_reason_t reason)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100743{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530744 BUG_ON(!c->removed);
745
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530746 this_cpu_inc(si->stats_pcpu->connection_flushes64);
Ken Zhu88c58152021-12-09 15:12:06 -0800747 sfe_ipv4_sync_status(si, c, reason);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100748
749 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100750 * Release our hold of the source and dest devices and free the memory
751 * for our connection objects.
752 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530753 call_rcu(&c->rcu, sfe_ipv4_free_connection_rcu);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100754}
755
756/*
Parikshit Guned31a8202022-01-05 22:15:04 +0530757 * sfe_ipv4_service_class_stats_inc()
758 * Increment per cpu per service class stats.
759 */
760void sfe_ipv4_service_class_stats_inc(struct sfe_ipv4 *si, uint8_t sid, uint64_t bytes)
761{
762 struct sfe_ipv4_service_class_stats_db *sc_stats_db = this_cpu_ptr(si->stats_pcpu_psc);
763 struct sfe_ipv4_per_service_class_stats *sc_stats = &sc_stats_db->psc_stats[sid];
764
765 write_seqcount_begin(&sc_stats->seq);
766 sc_stats->tx_bytes += bytes;
767 sc_stats->tx_packets++;
768 write_seqcount_end(&sc_stats->seq);
769}
770
771/*
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530772 * sfe_ipv4_exception_stats_inc()
773 * Increment exception stats.
774 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530775void sfe_ipv4_exception_stats_inc(struct sfe_ipv4 *si, enum sfe_ipv4_exception_events reason)
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530776{
777 struct sfe_ipv4_stats *stats = this_cpu_ptr(si->stats_pcpu);
778 stats->exception_events64[reason]++;
779 stats->packets_not_forwarded64++;
780}
781
782/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530783 * sfe_ipv4_is_loal_ip()
784 * Returns true if IP is local; returns false otherwise.
785 */
786static bool sfe_ipv4_is_local_ip(struct sfe_ipv4 *si, __be32 ip_addr)
787{
788 struct net_device *dev;
789
790 dev = ip_dev_find(&init_net, ip_addr);
791 if (dev) {
792 dev_put(dev);
793 return true;
794 }
795
796 return false;
797}
798
799/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100800 * sfe_ipv4_recv()
Matthew McClintocka8ad7962014-01-16 16:49:30 -0600801 * Handle packet receives and forwaring.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100802 *
803 * Returns 1 if the packet is forwarded or 0 if it isn't.
804 */
Amitesh Anand63be37d2021-12-24 20:51:48 +0530805int sfe_ipv4_recv(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info, bool tun_outer)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100806{
807 struct sfe_ipv4 *si = &__si;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100808 unsigned int len;
809 unsigned int tot_len;
810 unsigned int frag_off;
811 unsigned int ihl;
Ken Zhu88c58152021-12-09 15:12:06 -0800812 bool sync_on_find;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100813 bool ip_options;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530814 struct iphdr *iph;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700815 u32 protocol;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100816
817 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100818 * Check that we have space for an IP header here.
819 */
820 len = skb->len;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530821 if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr)))) {
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530822 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100823 DEBUG_TRACE("len: %u is too short\n", len);
824 return 0;
825 }
826
827 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530828 * Validate ip csum if necessary. If ip_summed is set to CHECKSUM_UNNECESSARY, it is assumed
829 * that the L3 checksum is validated by the Rx interface or the tunnel interface that has
830 * generated the packet.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100831 */
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530832 iph = (struct iphdr *)skb->data;
Ratheesh Kannoth43d64f82021-10-20 08:23:29 +0530833 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY) && (ip_fast_csum((u8 *)iph, iph->ihl))) {
834 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_HEADER_CSUM_BAD);
835
836 DEBUG_TRACE("Bad IPv4 header csum: 0x%x\n", iph->check);
837 return 0;
838 }
839
840 /*
841 * Check that our "total length" is large enough for an IP header.
842 */
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100843 tot_len = ntohs(iph->tot_len);
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530844 if (unlikely(tot_len < sizeof(struct iphdr))) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100845
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530846 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_BAD_TOTAL_LENGTH);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100847 DEBUG_TRACE("tot_len: %u is too short\n", tot_len);
848 return 0;
849 }
850
851 /*
852 * Is our IP version wrong?
853 */
854 if (unlikely(iph->version != 4)) {
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530855 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_NON_V4);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100856 DEBUG_TRACE("IP version: %u\n", iph->version);
857 return 0;
858 }
859
860 /*
861 * Does our datagram fit inside the skb?
862 */
863 if (unlikely(tot_len > len)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100864 DEBUG_TRACE("tot_len: %u, exceeds len: %u\n", tot_len, len);
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530865 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_DATAGRAM_INCOMPLETE);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100866 return 0;
867 }
868
869 /*
870 * Do we have a non-initial fragment?
Nicolas Costaac2979c2014-01-14 10:35:24 -0600871 */
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100872 frag_off = ntohs(iph->frag_off);
873 if (unlikely(frag_off & IP_OFFSET)) {
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530874 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_NON_INITIAL_FRAGMENT);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100875 DEBUG_TRACE("non-initial fragment\n");
876 return 0;
877 }
878
879 /*
880 * If we have a (first) fragment then mark it to cause any connection to flush.
881 */
Ken Zhu88c58152021-12-09 15:12:06 -0800882 sync_on_find = unlikely(frag_off & IP_MF) ? true : false;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100883
884 /*
885 * Do we have any IP options? That's definite a slow path! If we do have IP
886 * options we need to recheck our header size.
887 */
888 ihl = iph->ihl << 2;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530889 ip_options = unlikely(ihl != sizeof(struct iphdr)) ? true : false;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100890 if (unlikely(ip_options)) {
891 if (unlikely(len < ihl)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100892
893 DEBUG_TRACE("len: %u is too short for header of size: %u\n", len, ihl);
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530894 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_IP_OPTIONS_INCOMPLETE);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100895 return 0;
896 }
897
Ken Zhu88c58152021-12-09 15:12:06 -0800898 sync_on_find = true;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100899 }
900
Wayne Tan1cabbf12022-05-01 13:01:45 -0700901 /*
902 * Handle PPPoE bridge packets using 3-tuple acceleration if SFE_PPPOE_BR_ACCEL_MODE_EN_3T
903 */
904 if (unlikely(sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS)) &&
905 unlikely(sfe_pppoe_get_br_accel_mode() == SFE_PPPOE_BR_ACCEL_MODE_EN_3T)) {
906 struct ethhdr *eth = eth_hdr(skb);
907 if (!sfe_pppoe_mgr_find_session(l2_info->pppoe_session_id, eth->h_source)) {
908 return sfe_ipv4_recv_pppoe_bridge(si, skb, dev, len, iph, ihl, l2_info);
909 }
910 }
911
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100912 protocol = iph->protocol;
913 if (IPPROTO_UDP == protocol) {
Ken Zhu88c58152021-12-09 15:12:06 -0800914 return sfe_ipv4_recv_udp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100915 }
916
917 if (IPPROTO_TCP == protocol) {
Ken Zhu88c58152021-12-09 15:12:06 -0800918 return sfe_ipv4_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100919 }
920
Suhas N Bhargav592e64c2021-11-12 16:53:08 +0530921 if (IPPROTO_ESP == protocol) {
922 return sfe_ipv4_recv_esp(si, skb, dev, len, iph, ihl, sync_on_find, tun_outer);
923 }
924
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100925 if (IPPROTO_ICMP == protocol) {
926 return sfe_ipv4_recv_icmp(si, skb, dev, len, iph, ihl);
927 }
928
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530929#ifdef SFE_GRE_TUN_ENABLE
930 if (IPPROTO_GRE == protocol) {
Nitin Shetty2114a892022-01-28 20:03:56 +0530931 return sfe_ipv4_recv_gre(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530932 }
933#endif
Tian Yangd98d91b2022-03-09 14:50:12 -0800934 if (IPPROTO_IPV6 == protocol) {
935 return sfe_ipv4_recv_tun6rd(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, true);
936 }
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530937
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530938 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UNHANDLED_PROTOCOL);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100939
940 DEBUG_TRACE("not UDP, TCP or ICMP: %u\n", protocol);
941 return 0;
942}
943
Nicolas Costa436926b2014-01-14 10:36:22 -0600944static void
945sfe_ipv4_update_tcp_state(struct sfe_ipv4_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530946 struct sfe_ipv4_rule_create_msg *msg)
Nicolas Costa436926b2014-01-14 10:36:22 -0600947{
948 struct sfe_ipv4_connection_match *orig_cm;
949 struct sfe_ipv4_connection_match *repl_cm;
950 struct sfe_ipv4_tcp_connection_match *orig_tcp;
951 struct sfe_ipv4_tcp_connection_match *repl_tcp;
952
953 orig_cm = c->original_match;
954 repl_cm = c->reply_match;
955 orig_tcp = &orig_cm->protocol_state.tcp;
956 repl_tcp = &repl_cm->protocol_state.tcp;
957
958 /* update orig */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530959 if (orig_tcp->max_win < msg->tcp_rule.flow_max_window) {
960 orig_tcp->max_win = msg->tcp_rule.flow_max_window;
Nicolas Costa436926b2014-01-14 10:36:22 -0600961 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530962 if ((s32)(orig_tcp->end - msg->tcp_rule.flow_end) < 0) {
963 orig_tcp->end = msg->tcp_rule.flow_end;
Nicolas Costa436926b2014-01-14 10:36:22 -0600964 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530965 if ((s32)(orig_tcp->max_end - msg->tcp_rule.flow_max_end) < 0) {
966 orig_tcp->max_end = msg->tcp_rule.flow_max_end;
Nicolas Costa436926b2014-01-14 10:36:22 -0600967 }
968
969 /* update reply */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530970 if (repl_tcp->max_win < msg->tcp_rule.return_max_window) {
971 repl_tcp->max_win = msg->tcp_rule.return_max_window;
Nicolas Costa436926b2014-01-14 10:36:22 -0600972 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530973 if ((s32)(repl_tcp->end - msg->tcp_rule.return_end) < 0) {
974 repl_tcp->end = msg->tcp_rule.return_end;
Nicolas Costa436926b2014-01-14 10:36:22 -0600975 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530976 if ((s32)(repl_tcp->max_end - msg->tcp_rule.return_max_end) < 0) {
977 repl_tcp->max_end = msg->tcp_rule.return_max_end;
Nicolas Costa436926b2014-01-14 10:36:22 -0600978 }
979
980 /* update match flags */
981 orig_cm->flags &= ~SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
982 repl_cm->flags &= ~SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530983 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
984
Nicolas Costa436926b2014-01-14 10:36:22 -0600985 orig_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
986 repl_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
987 }
988}
989
990static void
991sfe_ipv4_update_protocol_state(struct sfe_ipv4_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530992 struct sfe_ipv4_rule_create_msg *msg)
Nicolas Costa436926b2014-01-14 10:36:22 -0600993{
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530994 switch (msg->tuple.protocol) {
Nicolas Costa436926b2014-01-14 10:36:22 -0600995 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530996 sfe_ipv4_update_tcp_state(c, msg);
Nicolas Costa436926b2014-01-14 10:36:22 -0600997 break;
998 }
999}
1000
Wayne Tanbb7f1782021-12-13 11:16:04 -08001001/*
1002 * sfe_ipv4_match_entry_set_vlan()
1003 */
1004static void sfe_ipv4_match_entry_set_vlan(
1005 struct sfe_ipv4_connection_match *cm,
1006 u32 primary_ingress_vlan_tag,
1007 u32 primary_egress_vlan_tag,
1008 u32 secondary_ingress_vlan_tag,
1009 u32 secondary_egress_vlan_tag)
1010{
1011 u16 tpid;
1012 /*
1013 * Prevent stacking header counts when updating.
1014 */
1015 cm->ingress_vlan_hdr_cnt = 0;
1016 cm->egress_vlan_hdr_cnt = 0;
1017 memset(cm->ingress_vlan_hdr, 0, sizeof(cm->ingress_vlan_hdr));
1018 memset(cm->egress_vlan_hdr, 0, sizeof(cm->egress_vlan_hdr));
1019
1020 /*
1021 * vlan_hdr[0] corresponds to outer tag
1022 * vlan_hdr[1] corresponds to inner tag
1023 * Extract the vlan information (tpid and tci) from rule message
1024 */
1025 if ((primary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1026 tpid = (u16)(primary_ingress_vlan_tag >> 16);
1027 cm->ingress_vlan_hdr[0].tpid = ntohs(tpid);
1028 cm->ingress_vlan_hdr[0].tci = (u16)primary_ingress_vlan_tag;
1029 cm->ingress_vlan_hdr_cnt++;
1030 }
1031
1032 if ((secondary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1033 tpid = (u16)(secondary_ingress_vlan_tag >> 16);
1034 cm->ingress_vlan_hdr[1].tpid = ntohs(tpid);
1035 cm->ingress_vlan_hdr[1].tci = (u16)secondary_ingress_vlan_tag;
1036 cm->ingress_vlan_hdr_cnt++;
1037 }
1038
1039 if ((primary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1040 tpid = (u16)(primary_egress_vlan_tag >> 16);
1041 cm->egress_vlan_hdr[0].tpid = ntohs(tpid);
1042 cm->egress_vlan_hdr[0].tci = (u16)primary_egress_vlan_tag;
1043 cm->egress_vlan_hdr_cnt++;
1044 }
1045
1046 if ((secondary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1047 tpid = (u16)(secondary_egress_vlan_tag >> 16);
1048 cm->egress_vlan_hdr[1].tpid = ntohs(tpid);
1049 cm->egress_vlan_hdr[1].tci = (u16)secondary_egress_vlan_tag;
1050 cm->egress_vlan_hdr_cnt++;
1051 }
1052}
1053
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301054void sfe_ipv4_update_rule(struct sfe_ipv4_rule_create_msg *msg)
Nicolas Costa436926b2014-01-14 10:36:22 -06001055{
1056 struct sfe_ipv4_connection *c;
1057 struct sfe_ipv4 *si = &__si;
1058
1059 spin_lock_bh(&si->lock);
1060
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301061 c = sfe_ipv4_find_connection(si,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301062 msg->tuple.protocol,
1063 msg->tuple.flow_ip,
1064 msg->tuple.flow_ident,
1065 msg->tuple.return_ip,
1066 msg->tuple.return_ident);
Nicolas Costa436926b2014-01-14 10:36:22 -06001067 if (c != NULL) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301068 sfe_ipv4_update_protocol_state(c, msg);
Nicolas Costa436926b2014-01-14 10:36:22 -06001069 }
1070
1071 spin_unlock_bh(&si->lock);
1072}
1073
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001074/*
Murat Sezginef190392022-04-04 17:56:31 -07001075 * sfe_ipv4_mark_rule_update()
1076 * Updates the mark values of match entries.
1077 */
1078void sfe_ipv4_mark_rule_update(struct sfe_connection_mark *mark)
1079{
1080 struct sfe_ipv4_connection *c;
1081 struct sfe_ipv4 *si = &__si;
1082
1083 spin_lock_bh(&si->lock);
1084 c = sfe_ipv4_find_connection(si, mark->protocol,
1085 mark->src_ip[0],
1086 mark->src_port,
1087 mark->dest_ip[0],
1088 mark->dest_port);
1089 if (!c) {
1090 spin_unlock_bh(&si->lock);
1091 DEBUG_WARN("%px: connection not found for mark update\n", mark);
1092 return;
1093 }
1094 c->original_match->mark = mark->mark;
1095 c->reply_match->mark = mark->mark;
1096 spin_unlock_bh(&si->lock);
1097 DEBUG_TRACE("%px: connection mark updated with %d\n", mark, mark->mark);
1098}
1099EXPORT_SYMBOL(sfe_ipv4_mark_rule_update);
1100
1101/*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301102 * sfe_ipv4_xmit_eth_type_check()
1103 * Checking if MAC header has to be written.
1104 */
1105static inline bool sfe_ipv4_xmit_eth_type_check(struct net_device *dev, u32 cm_flags)
1106{
1107 if (!(dev->flags & IFF_NOARP)) {
1108 return true;
1109 }
1110
1111 /*
1112 * For PPPoE, since we are now supporting PPPoE encapsulation, we are writing L2 header.
1113 */
1114 if (unlikely(cm_flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
1115 return true;
1116 }
1117
1118 return false;
1119}
1120
1121/*
Jackson Bockus3fafbf32022-02-13 17:15:26 -08001122 * sfe_ipv4_service_class_stats_pcpu_get()
1123 * Gets one CPU's service class statistics.
1124 */
1125static inline bool sfe_ipv4_service_class_stats_pcpu_get(struct sfe_ipv4_per_service_class_stats *sc_stats, uint64_t *bytes, uint64_t *packets)
1126{
1127 uint32_t retries = 0;
1128 uint32_t seq;
1129 uint64_t bytes_tmp, packets_tmp;
1130
1131 do {
1132 seq = read_seqcount_begin(&sc_stats->seq);
1133 bytes_tmp = sc_stats->tx_bytes;
1134 packets_tmp = sc_stats->tx_packets;
1135 } while (read_seqcount_retry(&sc_stats->seq, seq) && ++retries < SFE_SERVICE_CLASS_STATS_MAX_RETRY);
1136
1137 *bytes += bytes_tmp;
1138 *packets += packets_tmp;
1139
1140 return retries < SFE_SERVICE_CLASS_STATS_MAX_RETRY;
1141}
1142
1143/*
1144 * sfe_ipv4_service_class_stats_get()
1145 * Copy the ipv4 statistics for the given service class.
1146 */
1147bool sfe_ipv4_service_class_stats_get(uint8_t sid, uint64_t *bytes, uint64_t *packets)
1148{
1149 struct sfe_ipv4 *si = &__si;
1150 uint32_t cpu = 0;
1151
1152 for_each_possible_cpu(cpu) {
1153 struct sfe_ipv4_service_class_stats_db *stats_db = per_cpu_ptr(si->stats_pcpu_psc, cpu);
1154 struct sfe_ipv4_per_service_class_stats *sc_stats = &stats_db->psc_stats[sid];
1155
1156 if (!sfe_ipv4_service_class_stats_pcpu_get(sc_stats, bytes, packets)) {
1157 return false;
1158 }
1159 }
1160
1161 return true;
1162}
1163
1164/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001165 * sfe_ipv4_create_rule()
1166 * Create a forwarding rule.
1167 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301168int sfe_ipv4_create_rule(struct sfe_ipv4_rule_create_msg *msg)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001169{
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001170 struct sfe_ipv4 *si = &__si;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301171 struct sfe_ipv4_connection *c, *c_old;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001172 struct sfe_ipv4_connection_match *original_cm;
1173 struct sfe_ipv4_connection_match *reply_cm;
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001174 struct net_device *dest_dev;
1175 struct net_device *src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301176 struct sfe_ipv4_5tuple *tuple = &msg->tuple;
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301177 s32 flow_interface_num = msg->conn_rule.flow_top_interface_num;
1178 s32 return_interface_num = msg->conn_rule.return_top_interface_num;
Amitesh Anand63be37d2021-12-24 20:51:48 +05301179 struct net *net;
1180 struct sock *sk;
1181 unsigned int src_if_idx;
Parikshit Guned31a8202022-01-05 22:15:04 +05301182 u32 flow_sawf_tag;
1183 u32 return_sawf_tag;
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001184
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301185 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) {
1186 flow_interface_num = msg->conn_rule.flow_interface_num;
1187 }
1188
1189 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) {
1190 return_interface_num = msg->conn_rule.return_interface_num;
1191 }
1192
1193 src_dev = dev_get_by_index(&init_net, flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301194 if (!src_dev) {
1195 DEBUG_WARN("%px: Unable to find src_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301196 flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301197 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1198 return -EINVAL;
1199 }
1200
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301201 dest_dev = dev_get_by_index(&init_net, return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301202 if (!dest_dev) {
1203 DEBUG_WARN("%px: Unable to find dest_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301204 return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301205 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1206 dev_put(src_dev);
1207 return -EINVAL;
1208 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001209
Matthew McClintock389b42a2014-09-24 14:05:51 -05001210 if (unlikely((dest_dev->reg_state != NETREG_REGISTERED) ||
1211 (src_dev->reg_state != NETREG_REGISTERED))) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301212 dev_put(src_dev);
1213 dev_put(dest_dev);
1214 DEBUG_WARN("%px: src_dev=%s and dest_dev=%s are unregistered\n", msg,
1215 src_dev->name, dest_dev->name);
1216 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Matthew McClintock389b42a2014-09-24 14:05:51 -05001217 return -EINVAL;
1218 }
1219
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301220 /*
1221 * Allocate the various connection tracking objects.
1222 */
Parikshit Guneef1664c2022-03-24 14:15:42 +05301223 c = (struct sfe_ipv4_connection *)kzalloc(sizeof(struct sfe_ipv4_connection), GFP_ATOMIC);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301224 if (unlikely(!c)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301225 DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
1226 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1227 dev_put(src_dev);
1228 dev_put(dest_dev);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301229 return -ENOMEM;
1230 }
1231
Parikshit Guneef1664c2022-03-24 14:15:42 +05301232 original_cm = (struct sfe_ipv4_connection_match *)kzalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301233 if (unlikely(!original_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301234 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
1235 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301236 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301237 dev_put(src_dev);
1238 dev_put(dest_dev);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301239 return -ENOMEM;
1240 }
1241
Parikshit Guneef1664c2022-03-24 14:15:42 +05301242 reply_cm = (struct sfe_ipv4_connection_match *)kzalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301243 if (unlikely(!reply_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301244 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
1245 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301246 kfree(original_cm);
1247 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301248 dev_put(src_dev);
1249 dev_put(dest_dev);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301250 return -ENOMEM;
1251 }
1252
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05301253 this_cpu_inc(si->stats_pcpu->connection_create_requests64);
1254
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001255 spin_lock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001256
1257 /*
Nicolas Costa436926b2014-01-14 10:36:22 -06001258 * Check to see if there is already a flow that matches the rule we're
1259 * trying to create. If there is then we can't create a new one.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001260 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301261 c_old = sfe_ipv4_find_connection(si,
Wayne Tanbb7f1782021-12-13 11:16:04 -08001262 msg->tuple.protocol,
1263 msg->tuple.flow_ip,
1264 msg->tuple.flow_ident,
1265 msg->tuple.return_ip,
1266 msg->tuple.return_ident);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301267
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301268 if (c_old != NULL) {
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05301269 this_cpu_inc(si->stats_pcpu->connection_create_collisions64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001270
1271 /*
Nicolas Costa436926b2014-01-14 10:36:22 -06001272 * If we already have the flow then it's likely that this
1273 * request to create the connection rule contains more
1274 * up-to-date information. Check and update accordingly.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001275 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301276 sfe_ipv4_update_protocol_state(c, msg);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001277 spin_unlock_bh(&si->lock);
1278
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301279 kfree(reply_cm);
1280 kfree(original_cm);
1281 kfree(c);
1282
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301283 dev_put(src_dev);
1284 dev_put(dest_dev);
1285
Amitesh Anand63be37d2021-12-24 20:51:48 +05301286 DEBUG_TRACE("%px: connection already exists - p:%d\n"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301287 " s: %s:%pM:%pI4:%u, d: %s:%pM:%pI4:%u\n",
Amitesh Anand63be37d2021-12-24 20:51:48 +05301288 msg, tuple->protocol,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301289 src_dev->name, msg->conn_rule.flow_mac, &tuple->flow_ip, ntohs(tuple->flow_ident),
1290 dest_dev->name, msg->conn_rule.return_mac, &tuple->return_ip, ntohs(tuple->return_ident));
1291
Nicolas Costa514fde02014-01-13 15:50:29 -06001292 return -EADDRINUSE;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001293 }
1294
1295 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001296 * Fill in the "original" direction connection matching object.
1297 * Note that the transmit MAC address is "dest_mac_xlate" because
1298 * we always know both ends of a connection by their translated
1299 * addresses and not their public addresses.
1300 */
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001301 original_cm->match_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301302 original_cm->match_protocol = tuple->protocol;
1303 original_cm->match_src_ip = tuple->flow_ip;
Suruchi Suman66609a72022-01-20 02:34:25 +05301304 original_cm->match_src_port = netif_is_vxlan(src_dev) ? 0 : tuple->flow_ident;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301305 original_cm->match_dest_ip = tuple->return_ip;
1306 original_cm->match_dest_port = tuple->return_ident;
1307
1308 original_cm->xlate_src_ip = msg->conn_rule.flow_ip_xlate;
1309 original_cm->xlate_src_port = msg->conn_rule.flow_ident_xlate;
1310 original_cm->xlate_dest_ip = msg->conn_rule.return_ip_xlate;
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301311 original_cm->xlate_dest_port = msg->conn_rule.return_ident_xlate;
1312
1313 if (tuple->protocol == IPPROTO_GRE) {
1314 /*
1315 * the PPTP is 4 tuple lookup.
1316 * During th rule lookup destination call id from packet
1317 * is matched against destination port in cm.
1318 */
1319 original_cm->match_src_port = 0;
1320 original_cm->xlate_src_port = 0;
1321 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001322
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001323 original_cm->xmit_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301324 original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
1325
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001326 original_cm->connection = c;
1327 original_cm->counter_match = reply_cm;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301328
Amitesh Anand63be37d2021-12-24 20:51:48 +05301329 /*
1330 * UDP Socket is valid only in decap direction.
1331 */
1332 RCU_INIT_POINTER(original_cm->up, NULL);
1333
Ken Zhu37040ea2021-09-09 21:11:15 -07001334 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1335 original_cm->mark = msg->mark_rule.flow_mark;
1336 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_MARK;
1337 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301338 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1339 original_cm->priority = msg->qos_rule.flow_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001340 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1341 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301342 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1343 original_cm->dscp = msg->dscp_rule.flow_dscp << SFE_IPV4_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001344 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1345 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301346 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1347 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1348 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001349 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST) {
1350 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1351 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301352
Wayne Tanbb7f1782021-12-13 11:16:04 -08001353 /*
Parikshit Guned31a8202022-01-05 22:15:04 +05301354 * Mark SAWF metadata if the sawf tag is valid and set.
1355 */
1356 original_cm->sawf_valid = false;
1357 flow_sawf_tag = SFE_GET_SAWF_TAG(msg->sawf_rule.flow_mark);
1358 if (likely(SFE_SAWF_TAG_IS_VALID(flow_sawf_tag))) {
1359 original_cm->mark = msg->sawf_rule.flow_mark;
1360 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_MARK;
1361 original_cm->sawf_valid = true;
1362 }
1363
1364 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -08001365 * Add VLAN rule to original_cm
1366 */
1367 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1368 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1369 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1370 sfe_ipv4_match_entry_set_vlan(original_cm,
1371 vlan_primary_rule->ingress_vlan_tag,
1372 vlan_primary_rule->egress_vlan_tag,
1373 vlan_secondary_rule->ingress_vlan_tag,
1374 vlan_secondary_rule->egress_vlan_tag);
1375
1376 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) &&
1377 original_cm->egress_vlan_hdr_cnt > 0) {
1378 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1379 original_cm->l2_hdr_size += original_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1380 }
1381 }
1382
Suhas N Bhargav592e64c2021-11-12 16:53:08 +05301383 if (((IPPROTO_GRE == tuple->protocol) || (IPPROTO_ESP == tuple->protocol)) &&
1384 !sfe_ipv4_is_local_ip(si, original_cm->match_dest_ip)) {
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301385 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1386 }
1387
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08001388#ifdef CONFIG_NF_FLOW_COOKIE
1389 original_cm->flow_cookie = 0;
1390#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001391#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301392 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1393 original_cm->flow_accel = msg->direction_rule.flow_accel;
1394 } else {
1395 original_cm->flow_accel = 1;
1396 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001397#endif
Tian Yangd98d91b2022-03-09 14:50:12 -08001398
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301399 /*
1400 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1401 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1402 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1403 * are sending directly to the destination interface that supports it.
1404 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301405 if (likely(dest_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(dest_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301406 if ((msg->conn_rule.return_top_interface_num == msg->conn_rule.return_interface_num) ||
1407 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301408
1409 /*
1410 * Dont enable CSUM offload
1411 */
1412#if 0
Suruchi Sumanf2077182022-01-13 21:35:23 +05301413 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301414#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301415 }
1416 }
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001417
Murat Sezgin9c538972022-05-17 13:33:17 -07001418 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK) {
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301419 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1420 }
1421
Murat Sezgin9c538972022-05-17 13:33:17 -07001422 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH) {
1423 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
1424 }
1425
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001426 /*
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301427 * Adding PPPoE parameters to original and reply entries based on the direction where
1428 * PPPoE header is valid in ECM rule.
1429 *
1430 * If PPPoE is valid in flow direction (from interface is PPPoE), then
1431 * original cm will have PPPoE at ingress (strip PPPoE header)
1432 * reply cm will have PPPoE at egress (add PPPoE header)
1433 *
1434 * If PPPoE is valid in return direction (to interface is PPPoE), then
1435 * original cm will have PPPoE at egress (add PPPoE header)
1436 * reply cm will have PPPoE at ingress (strip PPPoE header)
1437 */
1438 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_DECAP_VALID) {
1439 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1440 original_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1441 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1442
1443 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tan1cabbf12022-05-01 13:01:45 -07001444 reply_cm->l2_hdr_size += PPPOE_SES_HLEN;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301445 reply_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1446 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1447 }
1448
1449 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_ENCAP_VALID) {
1450 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tan1cabbf12022-05-01 13:01:45 -07001451 original_cm->l2_hdr_size += PPPOE_SES_HLEN;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301452 original_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1453 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1454
1455 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1456 reply_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1457 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1458 }
1459
Murat Sezgin9c538972022-05-17 13:33:17 -07001460 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK) {
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301461 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1462 }
1463
Murat Sezgin9c538972022-05-17 13:33:17 -07001464 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH) {
1465 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
1466 }
1467
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301468 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001469 * For the non-arp interface, we don't write L2 HDR.
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001470 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301471 if (sfe_ipv4_xmit_eth_type_check(dest_dev, original_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301472
1473 /*
1474 * Check whether the rule has configured a specific source MAC address to use.
1475 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1476 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301477
1478 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1479 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->conn_rule.flow_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301480 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301481 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1482 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_RETURN_VALID)) {
1483 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.return_src_mac);
1484 } else {
1485 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)dest_dev->dev_addr);
1486 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301487 }
1488
1489 ether_addr_copy((u8 *)original_cm->xmit_dest_mac, (u8 *)msg->conn_rule.return_mac);
1490
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001491 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001492 original_cm->l2_hdr_size += ETH_HLEN;
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001493
1494 /*
1495 * If our dev writes Ethernet headers then we can write a really fast
1496 * version.
1497 */
1498 if (dest_dev->header_ops) {
1499 if (dest_dev->header_ops->create == eth_header) {
1500 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1501 }
1502 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001503 }
1504
1505 /*
1506 * Fill in the "reply" direction connection matching object.
1507 */
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001508 reply_cm->match_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301509 reply_cm->match_protocol = tuple->protocol;
1510 reply_cm->match_src_ip = msg->conn_rule.return_ip_xlate;
Amitesh Anand63be37d2021-12-24 20:51:48 +05301511
1512 /*
1513 * Keep source port as 0 for VxLAN tunnels.
1514 */
1515 if (netif_is_vxlan(src_dev) || netif_is_vxlan(dest_dev)) {
1516 reply_cm->match_src_port = 0;
1517 } else {
1518 reply_cm->match_src_port = msg->conn_rule.return_ident_xlate;
1519 }
1520
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301521 reply_cm->match_dest_ip = msg->conn_rule.flow_ip_xlate;
1522 reply_cm->match_dest_port = msg->conn_rule.flow_ident_xlate;
1523
1524 reply_cm->xlate_src_ip = tuple->return_ip;
1525 reply_cm->xlate_src_port = tuple->return_ident;
1526 reply_cm->xlate_dest_ip = tuple->flow_ip;
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301527 reply_cm->xlate_dest_port = tuple->flow_ident;
1528
1529 if (tuple->protocol == IPPROTO_GRE) {
1530 /*
1531 * the PPTP is 4 tuple lookup.
1532 * During th rule lookup destination call id from packet
1533 * is matched against destination port in cm.
1534 */
1535 reply_cm->match_src_port = 0;
1536 reply_cm->xlate_src_port = 0;
1537 }
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301538
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001539 reply_cm->xmit_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301540 reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301541
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001542 reply_cm->connection = c;
1543 reply_cm->counter_match = original_cm;
Ken Zhu37040ea2021-09-09 21:11:15 -07001544
Ken Zhu37040ea2021-09-09 21:11:15 -07001545 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1546 reply_cm->mark = msg->mark_rule.return_mark;
1547 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_MARK;
1548 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301549 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1550 reply_cm->priority = msg->qos_rule.return_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001551 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1552 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001553
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301554 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1555 reply_cm->dscp = msg->dscp_rule.return_dscp << SFE_IPV4_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001556 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1557 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301558 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1559 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1560 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001561 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST) {
1562 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1563 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301564
Suhas N Bhargav592e64c2021-11-12 16:53:08 +05301565 if (((IPPROTO_GRE == tuple->protocol) || (IPPROTO_ESP == tuple->protocol)) &&
1566 !sfe_ipv4_is_local_ip(si, reply_cm->match_dest_ip)) {
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301567 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1568 }
1569
Amitesh Anand63be37d2021-12-24 20:51:48 +05301570 /*
Parikshit Guned31a8202022-01-05 22:15:04 +05301571 * Mark SAWF metadata in reply match if the sawf tag is valid.
1572 */
1573 reply_cm->sawf_valid = false;
1574 return_sawf_tag = SFE_GET_SAWF_TAG(msg->sawf_rule.return_mark);
1575 if (likely(SFE_SAWF_TAG_IS_VALID(return_sawf_tag))) {
1576 reply_cm->mark = msg->sawf_rule.return_mark;
1577 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_MARK;
1578 reply_cm->sawf_valid = true;
1579 }
1580
1581 /*
Amitesh Anand63be37d2021-12-24 20:51:48 +05301582 * Setup UDP Socket if found to be valid for decap.
1583 */
1584 RCU_INIT_POINTER(reply_cm->up, NULL);
1585 net = dev_net(reply_cm->match_dev);
1586 src_if_idx = src_dev->ifindex;
1587
1588 rcu_read_lock();
1589
1590 /*
1591 * Look for the associated sock object.
1592 * __udp4_lib_lookup() holds a reference for this sock object,
1593 * which will be released in sfe_ipv4_free_connection_rcu()
1594 */
1595#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
Hardik S. Panchalb171d082022-06-27 13:50:03 +05301596 sk = __udp4_lib_lookup(net, reply_cm->xlate_src_ip, reply_cm->xlate_src_port,
1597 reply_cm->match_dest_ip, reply_cm->match_dest_port, src_if_idx, &udp_table);
Amitesh Anand63be37d2021-12-24 20:51:48 +05301598#else
Hardik S. Panchalb171d082022-06-27 13:50:03 +05301599 sk = __udp4_lib_lookup(net, reply_cm->xlate_src_ip, reply_cm->xlate_src_port,
1600 reply_cm->match_dest_ip, reply_cm->match_dest_port, src_if_idx, 0, &udp_table, NULL);
Amitesh Anand63be37d2021-12-24 20:51:48 +05301601#endif
1602
1603 rcu_read_unlock();
1604
1605 /*
1606 * We set the UDP sock pointer as valid only for decap direction.
1607 */
1608 if (sk && udp_sk(sk)->encap_type) {
1609#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1610 if (!atomic_add_unless(&sk->sk_refcnt, 1, 0)) {
1611#else
1612 if (!refcount_inc_not_zero(&sk->sk_refcnt)) {
1613#endif
Nitin Shetty9ab15622022-04-11 08:04:06 +05301614 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001615 spin_unlock_bh(&si->lock);
Amitesh Anand63be37d2021-12-24 20:51:48 +05301616 kfree(reply_cm);
1617 kfree(original_cm);
1618 kfree(c);
1619
1620 DEBUG_TRACE("%px: sfe: unable to take reference for socket(%px) p:%d\n"
1621 " s: %s:%pM:%pI4:%u, d: %s:%pM:%pI4:%u\n",
1622 msg, sk, tuple->protocol,
1623 src_dev->name, msg->conn_rule.flow_mac, &tuple->flow_ip, ntohs(tuple->flow_ident),
1624 dest_dev->name, msg->conn_rule.return_mac, &tuple->return_ip, ntohs(tuple->return_ident));
1625
1626 dev_put(src_dev);
1627 dev_put(dest_dev);
1628
1629 return -ESHUTDOWN;
1630 }
1631
1632 rcu_assign_pointer(reply_cm->up, udp_sk(sk));
1633
1634 DEBUG_INFO("%px: Sock(%px) lookup success with reply_cm direction\n", msg, sk);
1635 DEBUG_INFO("%px: SFE connection -\n"
1636 " s: %s:%pI4(%pI4):%u(%u)\n"
1637 " d: %s:%pI4(%pI4):%u(%u)\n",
1638 msg, reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1639 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1640 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1641 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1642 }
1643
Wayne Tanbb7f1782021-12-13 11:16:04 -08001644 /*
1645 * Add VLAN rule to reply_cm
1646 */
1647 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1648 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1649 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1650 sfe_ipv4_match_entry_set_vlan(reply_cm,
1651 vlan_primary_rule->egress_vlan_tag,
1652 vlan_primary_rule->ingress_vlan_tag,
1653 vlan_secondary_rule->egress_vlan_tag,
1654 vlan_secondary_rule->ingress_vlan_tag);
1655
1656 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) &&
1657 reply_cm->egress_vlan_hdr_cnt > 0) {
1658 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1659 reply_cm->l2_hdr_size += reply_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1660 }
1661 }
1662
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301663 /*
1664 * the net_protocol handler will be used only in decap path
1665 * for non passthrough case.
1666 */
1667 original_cm->proto = NULL;
1668 reply_cm->proto = NULL;
Tian Yang46d6eb02022-03-31 10:26:16 -07001669 original_cm->top_interface_dev = NULL;
1670 reply_cm->top_interface_dev = NULL;
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301671
1672#ifdef SFE_GRE_TUN_ENABLE
1673 if ((IPPROTO_GRE == tuple->protocol) && !(reply_cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
1674 rcu_read_lock();
1675 reply_cm->proto = rcu_dereference(inet_protos[IPPROTO_GRE]);
1676 rcu_read_unlock();
1677
1678 if (unlikely(!reply_cm->proto)) {
Nitin Shetty9ab15622022-04-11 08:04:06 +05301679 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1680 spin_unlock_bh(&si->lock);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301681 kfree(reply_cm);
1682 kfree(original_cm);
1683 kfree(c);
1684 dev_put(src_dev);
1685 dev_put(dest_dev);
1686 DEBUG_WARN("sfe: GRE proto handler is not registered\n");
1687 return -EPERM;
1688 }
1689 }
1690#endif
1691
Tian Yangd98d91b2022-03-09 14:50:12 -08001692 if (IPPROTO_IPV6 == tuple->protocol) {
1693 original_cm->proto = NULL;
1694 rcu_read_lock();
1695 reply_cm->proto = rcu_dereference(inet_protos[IPPROTO_IPV6]);
1696 rcu_read_unlock();
Tian Yang46d6eb02022-03-31 10:26:16 -07001697 reply_cm->top_interface_dev = dev_get_by_index(&init_net, msg->conn_rule.return_top_interface_num);
1698
1699 if (unlikely(!reply_cm->top_interface_dev)) {
1700 DEBUG_WARN("%px: Unable to find top_interface_dev corresponding to %d\n", msg,
1701 msg->conn_rule.return_top_interface_num);
1702 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1703 spin_unlock_bh(&si->lock);
1704 kfree(reply_cm);
1705 kfree(original_cm);
1706 kfree(c);
1707 dev_put(src_dev);
1708 dev_put(dest_dev);
1709 return -EINVAL;
1710 }
Tian Yangd98d91b2022-03-09 14:50:12 -08001711 }
1712
Suhas N Bhargav592e64c2021-11-12 16:53:08 +05301713 if ((IPPROTO_ESP == tuple->protocol) && !(reply_cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
1714 rcu_read_lock();
1715 reply_cm->proto = rcu_dereference(inet_protos[IPPROTO_ESP]);
1716 rcu_read_unlock();
1717
1718 if (unlikely(!reply_cm->proto)) {
1719 kfree(reply_cm);
1720 kfree(original_cm);
1721 kfree(c);
1722 dev_put(src_dev);
1723 dev_put(dest_dev);
1724 DEBUG_WARN("sfe: ESP proto handler is not registered\n");
1725 return -EPERM;
1726 }
1727 }
1728
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08001729#ifdef CONFIG_NF_FLOW_COOKIE
1730 reply_cm->flow_cookie = 0;
1731#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001732#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301733 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1734 reply_cm->flow_accel = msg->direction_rule.return_accel;
1735 } else {
1736 reply_cm->flow_accel = 1;
1737 }
1738
Zhi Chen8748eb32015-06-18 12:58:48 -07001739#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301740 /*
1741 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1742 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1743 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1744 * are sending directly to the destination interface that supports it.
1745 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301746 if (likely(src_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(src_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301747 if ((msg->conn_rule.flow_top_interface_num == msg->conn_rule.flow_interface_num) ||
1748 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301749 /*
1750 * Dont enable CSUM offload
1751 */
1752#if 0
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301753 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301754#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301755 }
1756 }
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001757
1758 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001759 * For the non-arp interface, we don't write L2 HDR.
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001760 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301761 if (sfe_ipv4_xmit_eth_type_check(src_dev, reply_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301762
1763 /*
1764 * Check whether the rule has configured a specific source MAC address to use.
1765 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1766 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301767
1768 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1769 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->conn_rule.return_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301770 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301771 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1772 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_FLOW_VALID)) {
1773 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.flow_src_mac);
1774 } else {
1775 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)src_dev->dev_addr);
1776 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301777 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301778
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301779 ether_addr_copy((u8 *)reply_cm->xmit_dest_mac, (u8 *)msg->conn_rule.flow_mac);
1780
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001781 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001782 reply_cm->l2_hdr_size += ETH_HLEN;
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001783
1784 /*
1785 * If our dev writes Ethernet headers then we can write a really fast
1786 * version.
1787 */
1788 if (src_dev->header_ops) {
1789 if (src_dev->header_ops->create == eth_header) {
1790 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1791 }
1792 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001793 }
1794
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301795 if ((tuple->return_ip != msg->conn_rule.return_ip_xlate) ||
1796 (tuple->return_ident != msg->conn_rule.return_ident_xlate)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001797 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST;
1798 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC;
1799 }
1800
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301801 if ((tuple->flow_ip != msg->conn_rule.flow_ip_xlate) ||
1802 (tuple->flow_ident != msg->conn_rule.flow_ident_xlate)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001803 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC;
1804 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST;
1805 }
1806
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001807 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001808 * Initialize the protocol-specific information that we track.
1809 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301810 switch (tuple->protocol) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001811 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301812 original_cm->protocol_state.tcp.win_scale = msg->tcp_rule.flow_window_scale;
1813 original_cm->protocol_state.tcp.max_win = msg->tcp_rule.flow_max_window ? msg->tcp_rule.flow_max_window : 1;
1814 original_cm->protocol_state.tcp.end = msg->tcp_rule.flow_end;
1815 original_cm->protocol_state.tcp.max_end = msg->tcp_rule.flow_max_end;
1816
1817 reply_cm->protocol_state.tcp.win_scale = msg->tcp_rule.return_window_scale;
1818 reply_cm->protocol_state.tcp.max_win = msg->tcp_rule.return_max_window ? msg->tcp_rule.return_max_window : 1;
1819 reply_cm->protocol_state.tcp.end = msg->tcp_rule.return_end;
1820 reply_cm->protocol_state.tcp.max_end = msg->tcp_rule.return_max_end;
1821
1822 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001823 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1824 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1825 }
1826 break;
Wayne Tan1cabbf12022-05-01 13:01:45 -07001827
1828 case IPPROTO_RAW:
1829 /*
1830 * Set src_port to 0 to avoid hash collision in connection match lookups.
1831 */
1832 original_cm->match_src_port = 0;
1833 original_cm->xlate_src_port = 0;
1834 reply_cm->match_src_port = 0;
1835 reply_cm->xlate_src_port = 0;
1836 break;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001837 }
1838
Wayne Tanbb7f1782021-12-13 11:16:04 -08001839 /*
1840 * Fill in the ipv4_connection object.
1841 */
1842 c->protocol = tuple->protocol;
1843 c->src_ip = tuple->flow_ip;
1844 c->src_ip_xlate = msg->conn_rule.flow_ip_xlate;
1845 c->src_port = tuple->flow_ident;
1846 c->src_port_xlate = msg->conn_rule.flow_ident_xlate;
1847 c->original_dev = src_dev;
1848 c->original_match = original_cm;
1849 c->dest_ip = tuple->return_ip;
1850 c->dest_ip_xlate = msg->conn_rule.return_ip_xlate;
1851 c->dest_port = tuple->return_ident;
1852 c->dest_port_xlate = msg->conn_rule.return_ident_xlate;
1853 c->reply_dev = dest_dev;
1854 c->reply_match = reply_cm;
1855 c->debug_read_seq = 0;
1856 c->last_sync_jiffies = get_jiffies_64();
1857 c->removed = false;
1858
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001859 sfe_ipv4_connection_match_compute_translations(original_cm);
1860 sfe_ipv4_connection_match_compute_translations(reply_cm);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301861 sfe_ipv4_insert_connection(si, c);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001862
1863 spin_unlock_bh(&si->lock);
1864
1865 /*
1866 * We have everything we need!
1867 */
Wayne Tan1cabbf12022-05-01 13:01:45 -07001868 DEBUG_INFO("%px: NEW connection - p: %d\n"
Wayne Tanbb7f1782021-12-13 11:16:04 -08001869 "original_cm: match_dev=src_dev: %s %d %pM\n"
1870 " xmit_dev=dest_dev: %s %d %pM\n"
1871 " xmit_src_mac: %pM\n"
1872 " xmit_dest_mac: %pM\n"
1873 " flags: %x l2_hdr: %u\n"
1874 "flow_ip: %pI4:%u\n"
1875 "flow_ip_xlate: %pI4:%u\n"
1876 "flow_mac: %pM\n"
1877 "reply_cm: match_dev=dest_dev: %s %d %pM\n"
1878 " xmit_dev=src_dev: %s %d %pM\n"
1879 " xmit_src_mac: %pM\n"
1880 " xmit_dest_mac: %pM\n"
1881 " flags: %x l2_hdr: %u\n"
1882 "return_ip: %pI4:%u\n"
1883 "return_ip_xlate: %pI4:%u\n"
1884 "return_mac: %pM\n"
1885 "flags: valid=%x src_mac_valid=%x\n",
Wayne Tan1cabbf12022-05-01 13:01:45 -07001886 c, tuple->protocol,
Wayne Tanbb7f1782021-12-13 11:16:04 -08001887 original_cm->match_dev->name, original_cm->match_dev->ifindex, original_cm->match_dev->dev_addr,
1888 original_cm->xmit_dev->name, original_cm->xmit_dev->ifindex, original_cm->xmit_dev->dev_addr,
1889 original_cm->xmit_src_mac, original_cm->xmit_dest_mac, original_cm->flags, original_cm->l2_hdr_size,
1890 &tuple->flow_ip, ntohs(tuple->flow_ident),
1891 &msg->conn_rule.flow_ip_xlate, ntohs(msg->conn_rule.flow_ident_xlate),
1892 msg->conn_rule.flow_mac,
1893 reply_cm->match_dev->name, reply_cm->match_dev->ifindex, reply_cm->match_dev->dev_addr,
1894 reply_cm->xmit_dev->name, reply_cm->xmit_dev->ifindex, reply_cm->xmit_dev->dev_addr,
1895 reply_cm->xmit_src_mac, reply_cm->xmit_dest_mac, reply_cm->flags, reply_cm->l2_hdr_size,
1896 &tuple->return_ip, ntohs(tuple->return_ident),
1897 &msg->conn_rule.return_ip_xlate, ntohs(msg->conn_rule.return_ident_xlate),
1898 msg->conn_rule.return_mac,
1899 msg->valid_flags, msg->src_mac_rule.mac_valid_flags);
Nicolas Costa514fde02014-01-13 15:50:29 -06001900
1901 return 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001902}
1903
1904/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001905 * sfe_ipv4_destroy_rule()
1906 * Destroy a forwarding rule.
1907 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301908void sfe_ipv4_destroy_rule(struct sfe_ipv4_rule_destroy_msg *msg)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001909{
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001910 struct sfe_ipv4 *si = &__si;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001911 struct sfe_ipv4_connection *c;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301912 bool ret;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301913 struct sfe_ipv4_5tuple *tuple = &msg->tuple;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001914
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05301915 this_cpu_inc(si->stats_pcpu->connection_destroy_requests64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001916 spin_lock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001917
1918 /*
1919 * Check to see if we have a flow that matches the rule we're trying
1920 * to destroy. If there isn't then we can't destroy it.
1921 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301922 c = sfe_ipv4_find_connection(si, tuple->protocol, tuple->flow_ip, tuple->flow_ident,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301923 tuple->return_ip, tuple->return_ident);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001924 if (!c) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001925 spin_unlock_bh(&si->lock);
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05301926 this_cpu_inc(si->stats_pcpu->connection_destroy_misses64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001927
1928 DEBUG_TRACE("connection does not exist - p: %d, s: %pI4:%u, d: %pI4:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301929 tuple->protocol, &tuple->flow_ip, ntohs(tuple->flow_ident),
1930 &tuple->return_ip, ntohs(tuple->return_ident));
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001931 return;
1932 }
1933
1934 /*
1935 * Remove our connection details from the hash tables.
1936 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301937 ret = sfe_ipv4_remove_connection(si, c);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001938 spin_unlock_bh(&si->lock);
1939
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301940 if (ret) {
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301941 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301942 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001943
1944 DEBUG_INFO("connection destroyed - p: %d, s: %pI4:%u, d: %pI4:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301945 tuple->protocol, &tuple->flow_ip, ntohs(tuple->flow_ident),
1946 &tuple->return_ip, ntohs(tuple->return_ident));
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001947}
1948
1949/*
Ken Zhu7a43d882022-01-04 10:51:44 -08001950 * sfe_ipv4_sync_invoke()
1951 * Schedule many sync stats.
1952 */
1953bool sfe_ipv4_sync_invoke(uint16_t index)
1954{
1955 struct sfe_ipv4 *si = &__si;
1956 DEBUG_INFO("Request for a sync with index[%d]\n", index);
1957 return schedule_delayed_work_on(si->work_cpu, &(si->sync_dwork), 0);
1958}
1959
1960/*
1961 * sfe_ipv4_register_sync_rule_callback()
1962 * Register a callback for many rule synchronization.
1963 */
1964void sfe_ipv4_register_many_sync_callback(sfe_ipv4_many_sync_callback_t cb)
1965{
1966 struct sfe_ipv4 *si = &__si;
1967
1968 spin_lock_bh(&si->lock);
1969 rcu_assign_pointer(si->many_sync_callback, cb);
1970 spin_unlock_bh(&si->lock);
1971}
1972
1973/*
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001974 * sfe_ipv4_register_sync_rule_callback()
1975 * Register a callback for rule synchronization.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001976 */
Xiaoping Fand44a5b42015-05-26 17:37:37 -07001977void sfe_ipv4_register_sync_rule_callback(sfe_sync_rule_callback_t sync_rule_callback)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001978{
1979 struct sfe_ipv4 *si = &__si;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001980
1981 spin_lock_bh(&si->lock);
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001982 rcu_assign_pointer(si->sync_rule_callback, sync_rule_callback);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001983 spin_unlock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001984}
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001985/*
1986 * sfe_ipv4_get_debug_dev()
1987 */
1988static ssize_t sfe_ipv4_get_debug_dev(struct device *dev,
1989 struct device_attribute *attr,
1990 char *buf)
1991{
1992 struct sfe_ipv4 *si = &__si;
1993 ssize_t count;
1994 int num;
1995
1996 spin_lock_bh(&si->lock);
1997 num = si->debug_dev;
1998 spin_unlock_bh(&si->lock);
1999
2000 count = snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", num);
2001 return count;
2002}
2003
2004/*
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002005 * sysfs attributes.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002006 */
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002007static const struct device_attribute sfe_ipv4_debug_dev_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -08002008 __ATTR(debug_dev, S_IWUSR | S_IRUGO, sfe_ipv4_get_debug_dev, NULL);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002009
2010/*
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002011 * sfe_ipv4_destroy_all_rules_for_dev()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002012 * Destroy all connections that match a particular device.
2013 *
2014 * If we pass dev as NULL then this destroys all connections.
2015 */
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002016void sfe_ipv4_destroy_all_rules_for_dev(struct net_device *dev)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002017{
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002018 struct sfe_ipv4 *si = &__si;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002019 struct sfe_ipv4_connection *c;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302020 bool ret;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002021
Xiaoping Fan34586472015-07-03 02:20:35 -07002022another_round:
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002023 spin_lock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002024
Xiaoping Fan34586472015-07-03 02:20:35 -07002025 for (c = si->all_connections_head; c; c = c->all_connections_next) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002026 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07002027 * Does this connection relate to the device we are destroying?
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002028 */
2029 if (!dev
2030 || (dev == c->original_dev)
2031 || (dev == c->reply_dev)) {
Xiaoping Fan34586472015-07-03 02:20:35 -07002032 break;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002033 }
Xiaoping Fan34586472015-07-03 02:20:35 -07002034 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002035
Xiaoping Fan34586472015-07-03 02:20:35 -07002036 if (c) {
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302037 ret = sfe_ipv4_remove_connection(si, c);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002038 }
2039
2040 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07002041
2042 if (c) {
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302043 if (ret) {
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302044 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302045 }
Xiaoping Fan34586472015-07-03 02:20:35 -07002046 goto another_round;
2047 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002048}
2049
2050/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002051 * sfe_ipv4_periodic_sync()
2052 */
Ken Zhu137722d2021-09-23 17:57:36 -07002053static void sfe_ipv4_periodic_sync(struct work_struct *work)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002054{
Ken Zhu137722d2021-09-23 17:57:36 -07002055 struct sfe_ipv4 *si = container_of((struct delayed_work *)work, struct sfe_ipv4, sync_dwork);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07002056 u64 now_jiffies;
Ken Zhu7a43d882022-01-04 10:51:44 -08002057 int quota,count;
2058 sfe_ipv4_many_sync_callback_t sync_rule_callback;
Ken Zhudc423672021-09-02 18:27:01 -07002059 struct sfe_ipv4_connection *c;
Ken Zhu7a43d882022-01-04 10:51:44 -08002060 struct sfe_ipv4_conn_sync *conn_sync;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002061
2062 now_jiffies = get_jiffies_64();
2063
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002064 rcu_read_lock();
Ken Zhu7a43d882022-01-04 10:51:44 -08002065 sync_rule_callback = rcu_dereference(si->many_sync_callback);
2066 rcu_read_unlock();
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002067 if (!sync_rule_callback) {
Ken Zhu7a43d882022-01-04 10:51:44 -08002068 return;
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002069 }
2070
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002071 spin_lock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002072
2073 /*
Ken Zhudc423672021-09-02 18:27:01 -07002074 * If we have reached the end of the connection list, walk from
2075 * the connection head.
2076 */
2077 c = si->wc_next;
2078 if (unlikely(!c)) {
2079 c = si->all_connections_head;
2080 }
2081
2082 /*
Ken Zhu7a43d882022-01-04 10:51:44 -08002083 * Get the max number of connections to be put in this sync msg.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002084 */
Ken Zhu7a43d882022-01-04 10:51:44 -08002085 quota = sfe_ipv4_sync_max_number;
2086 conn_sync = sfe_ipv4_sync_many_msg->msg.conn_stats_many.conn_sync;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002087
2088 /*
Ken Zhudc423672021-09-02 18:27:01 -07002089 * Walk the "all connection" list and sync the connection state.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002090 */
Ken Zhudc423672021-09-02 18:27:01 -07002091 while (likely(c && quota)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002092 struct sfe_ipv4_connection_match *cm;
2093 struct sfe_ipv4_connection_match *counter_cm;
Xiaoping Fand44a5b42015-05-26 17:37:37 -07002094 struct sfe_connection_sync sis;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002095
Ken Zhudc423672021-09-02 18:27:01 -07002096 cm = c->original_match;
2097 counter_cm = c->reply_match;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002098
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002099 /*
Ken Zhudc423672021-09-02 18:27:01 -07002100 * Didn't receive packets in the original direction or reply
2101 * direction, move to the next connection.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002102 */
Ken Zhudc423672021-09-02 18:27:01 -07002103 if ((!atomic_read(&cm->rx_packet_count)) && !(atomic_read(&counter_cm->rx_packet_count))) {
2104 c = c->all_connections_next;
2105 continue;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002106 }
2107
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302108 sfe_ipv4_gen_sync_connection(si, c, &sis, SFE_SYNC_REASON_STATS, now_jiffies);
Ken Zhu7a43d882022-01-04 10:51:44 -08002109 sfe_ipv4_stats_convert(conn_sync, &sis);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002110
Ken Zhu7a43d882022-01-04 10:51:44 -08002111 quota--;
2112 conn_sync++;
2113 c = c->all_connections_next;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002114 }
2115
Ken Zhudc423672021-09-02 18:27:01 -07002116 /*
2117 * At the end of the sync, put the wc_next to the connection we left.
2118 */
2119 si->wc_next = c;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002120 spin_unlock_bh(&si->lock);
2121
Ken Zhu7a43d882022-01-04 10:51:44 -08002122 count = sfe_ipv4_sync_max_number - quota;
2123 /*
2124 * Tell ecm sync round done if at the end of all connection
2125 * otherwise tell the number in the msg.
2126 */
2127 if (c == NULL) {
2128 DEBUG_INFO("Synced all connections.\n");
2129 sfe_ipv4_sync_many_msg->msg.conn_stats_many.next = 0;
2130 } else {
2131 DEBUG_INFO("Some connections left.\n");
2132 sfe_ipv4_sync_many_msg->msg.conn_stats_many.next = count;
2133 }
2134 DEBUG_INFO("Sync %d connections\n", count);
2135 sfe_ipv4_sync_many_msg->msg.conn_stats_many.count = count;
2136 sfe_ipv4_sync_many_msg->cm.response = SFE_CMN_RESPONSE_ACK;
2137
2138 sync_rule_callback(sfe_ipv4_sync_many_msg);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002139}
2140
2141#define CHAR_DEV_MSG_SIZE 768
2142
2143/*
2144 * sfe_ipv4_debug_dev_read_start()
2145 * Generate part of the XML output.
2146 */
2147static bool sfe_ipv4_debug_dev_read_start(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
Ken Zhu7a43d882022-01-04 10:51:44 -08002148 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002149{
2150 int bytes_read;
2151
Xiaoping Fan34586472015-07-03 02:20:35 -07002152 si->debug_read_seq++;
2153
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002154 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "<sfe_ipv4>\n");
2155 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2156 return false;
2157 }
2158
2159 *length -= bytes_read;
2160 *total_read += bytes_read;
2161
2162 ws->state++;
2163 return true;
2164}
2165
2166/*
2167 * sfe_ipv4_debug_dev_read_connections_start()
2168 * Generate part of the XML output.
2169 */
2170static bool sfe_ipv4_debug_dev_read_connections_start(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2171 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2172{
2173 int bytes_read;
2174
2175 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<connections>\n");
2176 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2177 return false;
2178 }
2179
2180 *length -= bytes_read;
2181 *total_read += bytes_read;
2182
2183 ws->state++;
2184 return true;
2185}
2186
2187/*
2188 * sfe_ipv4_debug_dev_read_connections_connection()
2189 * Generate part of the XML output.
2190 */
2191static bool sfe_ipv4_debug_dev_read_connections_connection(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2192 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2193{
2194 struct sfe_ipv4_connection *c;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002195 struct sfe_ipv4_connection_match *original_cm;
2196 struct sfe_ipv4_connection_match *reply_cm;
2197 int bytes_read;
2198 int protocol;
2199 struct net_device *src_dev;
Dave Hudson87973cd2013-10-22 16:00:04 +01002200 __be32 src_ip;
2201 __be32 src_ip_xlate;
2202 __be16 src_port;
2203 __be16 src_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07002204 u64 src_rx_packets;
2205 u64 src_rx_bytes;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002206 struct net_device *dest_dev;
Dave Hudson87973cd2013-10-22 16:00:04 +01002207 __be32 dest_ip;
2208 __be32 dest_ip_xlate;
2209 __be16 dest_port;
2210 __be16 dest_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07002211 u64 dest_rx_packets;
2212 u64 dest_rx_bytes;
2213 u64 last_sync_jiffies;
Ken Zhu37040ea2021-09-09 21:11:15 -07002214 u32 src_mark, dest_mark, src_priority, dest_priority, src_dscp, dest_dscp;
Parikshit Guned31a8202022-01-05 22:15:04 +05302215 bool original_cm_sawf_valid, reply_cm_sawf_valid;
2216 u32 flow_service_class, return_service_class;
2217 u32 flow_msduq, return_msduq;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302218 u32 packet, byte, original_cm_flags;
2219 u16 pppoe_session_id;
2220 u8 pppoe_remote_mac[ETH_ALEN];
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002221 u32 original_fast_xmit, reply_fast_xmit;
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002222#ifdef CONFIG_NF_FLOW_COOKIE
2223 int src_flow_cookie, dst_flow_cookie;
2224#endif
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002225
2226 spin_lock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07002227
2228 for (c = si->all_connections_head; c; c = c->all_connections_next) {
2229 if (c->debug_read_seq < si->debug_read_seq) {
2230 c->debug_read_seq = si->debug_read_seq;
2231 break;
2232 }
2233 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002234
2235 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07002236 * If there were no connections then move to the next state.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002237 */
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302238 if (!c || c->removed) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002239 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07002240 ws->state++;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002241 return true;
2242 }
2243
2244 original_cm = c->original_match;
2245 reply_cm = c->reply_match;
2246
2247 protocol = c->protocol;
2248 src_dev = c->original_dev;
2249 src_ip = c->src_ip;
2250 src_ip_xlate = c->src_ip_xlate;
2251 src_port = c->src_port;
2252 src_port_xlate = c->src_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002253 src_priority = original_cm->priority;
2254 src_dscp = original_cm->dscp >> SFE_IPV4_DSCP_SHIFT;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002255
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302256 sfe_ipv4_connection_match_update_summary_stats(original_cm, &packet, &byte);
2257 sfe_ipv4_connection_match_update_summary_stats(reply_cm, &packet, &byte);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002258
2259 src_rx_packets = original_cm->rx_packet_count64;
2260 src_rx_bytes = original_cm->rx_byte_count64;
Ken Zhu37040ea2021-09-09 21:11:15 -07002261 src_mark = original_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002262 original_fast_xmit = (original_cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002263 dest_dev = c->reply_dev;
2264 dest_ip = c->dest_ip;
2265 dest_ip_xlate = c->dest_ip_xlate;
2266 dest_port = c->dest_port;
2267 dest_port_xlate = c->dest_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002268 dest_priority = reply_cm->priority;
2269 dest_dscp = reply_cm->dscp >> SFE_IPV4_DSCP_SHIFT;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002270 dest_rx_packets = reply_cm->rx_packet_count64;
2271 dest_rx_bytes = reply_cm->rx_byte_count64;
Ken Zhu37040ea2021-09-09 21:11:15 -07002272 dest_mark = reply_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002273 reply_fast_xmit = (reply_cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002274 last_sync_jiffies = get_jiffies_64() - c->last_sync_jiffies;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302275 original_cm_flags = original_cm->flags;
2276 pppoe_session_id = original_cm->pppoe_session_id;
2277 ether_addr_copy(pppoe_remote_mac, original_cm->pppoe_remote_mac);
Parikshit Guned31a8202022-01-05 22:15:04 +05302278 original_cm_sawf_valid = original_cm->sawf_valid;
2279 reply_cm_sawf_valid = reply_cm->sawf_valid;
2280 flow_service_class = SFE_GET_SAWF_SERVICE_CLASS(original_cm->mark);
2281 flow_msduq = SFE_GET_SAWF_MSDUQ(original_cm->mark);
2282 return_service_class = SFE_GET_SAWF_SERVICE_CLASS(reply_cm->mark);
2283 return_msduq = SFE_GET_SAWF_MSDUQ(reply_cm->mark);
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002284#ifdef CONFIG_NF_FLOW_COOKIE
2285 src_flow_cookie = original_cm->flow_cookie;
2286 dst_flow_cookie = reply_cm->flow_cookie;
2287#endif
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002288 spin_unlock_bh(&si->lock);
2289
2290 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t\t<connection "
2291 "protocol=\"%u\" "
2292 "src_dev=\"%s\" "
2293 "src_ip=\"%pI4\" src_ip_xlate=\"%pI4\" "
2294 "src_port=\"%u\" src_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002295 "src_priority=\"%u\" src_dscp=\"%u\" "
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002296 "src_rx_pkts=\"%llu\" src_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002297 "src_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002298 "src_fast_xmit=\"%s\" "
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002299 "dest_dev=\"%s\" "
2300 "dest_ip=\"%pI4\" dest_ip_xlate=\"%pI4\" "
2301 "dest_port=\"%u\" dest_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002302 "dest_priority=\"%u\" dest_dscp=\"%u\" "
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002303 "dest_rx_pkts=\"%llu\" dest_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002304 "dest_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002305 "reply_fast_xmit=\"%s\" "
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002306#ifdef CONFIG_NF_FLOW_COOKIE
2307 "src_flow_cookie=\"%d\" dst_flow_cookie=\"%d\" "
2308#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002309 "last_sync=\"%llu\" ",
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002310 protocol,
2311 src_dev->name,
2312 &src_ip, &src_ip_xlate,
Dave Hudson87973cd2013-10-22 16:00:04 +01002313 ntohs(src_port), ntohs(src_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002314 src_priority, src_dscp,
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002315 src_rx_packets, src_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002316 src_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002317 original_fast_xmit ? "Yes" : "No",
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002318 dest_dev->name,
2319 &dest_ip, &dest_ip_xlate,
Dave Hudson87973cd2013-10-22 16:00:04 +01002320 ntohs(dest_port), ntohs(dest_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002321 dest_priority, dest_dscp,
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002322 dest_rx_packets, dest_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002323 dest_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002324 reply_fast_xmit ? "Yes" : "No",
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002325#ifdef CONFIG_NF_FLOW_COOKIE
2326 src_flow_cookie, dst_flow_cookie,
2327#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002328 last_sync_jiffies);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002329
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302330 if (original_cm_flags &= (SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP | SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05302331 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "pppoe_session_id=\"%u\" pppoe_server MAC=\"%pM\" ",
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302332 pppoe_session_id, pppoe_remote_mac);
2333 }
2334
Parikshit Guned31a8202022-01-05 22:15:04 +05302335 if (original_cm_sawf_valid) {
Parikshit Gunefdd98652022-03-14 17:33:01 +05302336 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "flow_service_class=\"%d\" flow_msduq = \"0x%x\" ",
Parikshit Guned31a8202022-01-05 22:15:04 +05302337 flow_service_class, flow_msduq);
2338 }
2339
2340 if (reply_cm_sawf_valid) {
Parikshit Gunefdd98652022-03-14 17:33:01 +05302341 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "return_service_class=\"%d\" return_msduq = \"0x%x\" ",
Parikshit Guned31a8202022-01-05 22:15:04 +05302342 return_service_class, return_msduq);
2343 }
2344
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302345 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "/>\n");
2346
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002347 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2348 return false;
2349 }
2350
2351 *length -= bytes_read;
2352 *total_read += bytes_read;
2353
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002354 return true;
2355}
2356
2357/*
2358 * sfe_ipv4_debug_dev_read_connections_end()
2359 * Generate part of the XML output.
2360 */
2361static bool sfe_ipv4_debug_dev_read_connections_end(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2362 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2363{
2364 int bytes_read;
2365
2366 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</connections>\n");
2367 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2368 return false;
2369 }
2370
2371 *length -= bytes_read;
2372 *total_read += bytes_read;
2373
2374 ws->state++;
2375 return true;
2376}
2377
2378/*
2379 * sfe_ipv4_debug_dev_read_exceptions_start()
2380 * Generate part of the XML output.
2381 */
2382static bool sfe_ipv4_debug_dev_read_exceptions_start(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2383 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2384{
2385 int bytes_read;
2386
2387 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<exceptions>\n");
2388 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2389 return false;
2390 }
2391
2392 *length -= bytes_read;
2393 *total_read += bytes_read;
2394
2395 ws->state++;
2396 return true;
2397}
2398
2399/*
2400 * sfe_ipv4_debug_dev_read_exceptions_exception()
2401 * Generate part of the XML output.
2402 */
2403static bool sfe_ipv4_debug_dev_read_exceptions_exception(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2404 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2405{
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302406 int i;
2407 u64 val = 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002408
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302409 for_each_possible_cpu(i) {
2410 const struct sfe_ipv4_stats *s = per_cpu_ptr(si->stats_pcpu, i);
2411 val += s->exception_events64[ws->iter_exception];
2412 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002413
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302414 if (val) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002415 int bytes_read;
2416
2417 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE,
2418 "\t\t<exception name=\"%s\" count=\"%llu\" />\n",
2419 sfe_ipv4_exception_events_string[ws->iter_exception],
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302420 val);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002421 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2422 return false;
2423 }
2424
2425 *length -= bytes_read;
2426 *total_read += bytes_read;
2427 }
2428
2429 ws->iter_exception++;
2430 if (ws->iter_exception >= SFE_IPV4_EXCEPTION_EVENT_LAST) {
2431 ws->iter_exception = 0;
2432 ws->state++;
2433 }
2434
2435 return true;
2436}
2437
2438/*
2439 * sfe_ipv4_debug_dev_read_exceptions_end()
2440 * Generate part of the XML output.
2441 */
2442static bool sfe_ipv4_debug_dev_read_exceptions_end(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2443 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2444{
2445 int bytes_read;
2446
2447 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</exceptions>\n");
2448 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2449 return false;
2450 }
2451
2452 *length -= bytes_read;
2453 *total_read += bytes_read;
2454
2455 ws->state++;
2456 return true;
2457}
2458
2459/*
2460 * sfe_ipv4_debug_dev_read_stats()
2461 * Generate part of the XML output.
2462 */
2463static bool sfe_ipv4_debug_dev_read_stats(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2464 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2465{
2466 int bytes_read;
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302467 struct sfe_ipv4_stats stats;
2468 unsigned int num_conn;
2469
2470 sfe_ipv4_update_summary_stats(si, &stats);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002471
2472 spin_lock_bh(&si->lock);
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302473 num_conn = si->num_connections;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002474 spin_unlock_bh(&si->lock);
2475
2476 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<stats "
2477 "num_connections=\"%u\" "
Amitesh Anand63be37d2021-12-24 20:51:48 +05302478 "pkts_dropped=\"%llu\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002479 "pkts_fast_xmited=\"%llu\" "
Xiaoping Fan59176422015-05-22 15:58:10 -07002480 "pkts_forwarded=\"%llu\" pkts_not_forwarded=\"%llu\" "
2481 "create_requests=\"%llu\" create_collisions=\"%llu\" "
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302482 "create_failures=\"%llu\" "
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002483 "destroy_requests=\"%llu\" destroy_misses=\"%llu\" "
2484 "flushes=\"%llu\" "
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302485 "hash_hits=\"%llu\" hash_reorders=\"%llu\" "
2486 "pppoe_encap_pkts_fwded=\"%llu\" "
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302487 "pppoe_decap_pkts_fwded=\"%llu\" "
Wayne Tan1cabbf12022-05-01 13:01:45 -07002488 "pppoe_bridge_pkts_fwded=\"%llu\" "
2489 "pppoe_bridge_pkts_3tuple_fwded=\"%llu\" />\n",
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302490 num_conn,
Amitesh Anand63be37d2021-12-24 20:51:48 +05302491 stats.packets_dropped64,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002492 stats.packets_fast_xmited64,
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302493 stats.packets_forwarded64,
2494 stats.packets_not_forwarded64,
2495 stats.connection_create_requests64,
2496 stats.connection_create_collisions64,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302497 stats.connection_create_failures64,
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302498 stats.connection_destroy_requests64,
2499 stats.connection_destroy_misses64,
2500 stats.connection_flushes64,
2501 stats.connection_match_hash_hits64,
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302502 stats.connection_match_hash_reorders64,
2503 stats.pppoe_encap_packets_forwarded64,
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302504 stats.pppoe_decap_packets_forwarded64,
Wayne Tan1cabbf12022-05-01 13:01:45 -07002505 stats.pppoe_bridge_packets_forwarded64,
2506 stats.pppoe_bridge_packets_3tuple_forwarded64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002507 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2508 return false;
2509 }
2510
2511 *length -= bytes_read;
2512 *total_read += bytes_read;
2513
2514 ws->state++;
2515 return true;
2516}
2517
2518/*
2519 * sfe_ipv4_debug_dev_read_end()
2520 * Generate part of the XML output.
2521 */
2522static bool sfe_ipv4_debug_dev_read_end(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2523 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2524{
2525 int bytes_read;
2526
2527 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "</sfe_ipv4>\n");
2528 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2529 return false;
2530 }
2531
2532 *length -= bytes_read;
2533 *total_read += bytes_read;
2534
2535 ws->state++;
2536 return true;
2537}
2538
2539/*
2540 * Array of write functions that write various XML elements that correspond to
2541 * our XML output state machine.
2542 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07002543static sfe_ipv4_debug_xml_write_method_t sfe_ipv4_debug_xml_write_methods[SFE_IPV4_DEBUG_XML_STATE_DONE] = {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002544 sfe_ipv4_debug_dev_read_start,
2545 sfe_ipv4_debug_dev_read_connections_start,
2546 sfe_ipv4_debug_dev_read_connections_connection,
2547 sfe_ipv4_debug_dev_read_connections_end,
2548 sfe_ipv4_debug_dev_read_exceptions_start,
2549 sfe_ipv4_debug_dev_read_exceptions_exception,
2550 sfe_ipv4_debug_dev_read_exceptions_end,
2551 sfe_ipv4_debug_dev_read_stats,
2552 sfe_ipv4_debug_dev_read_end,
2553};
2554
2555/*
2556 * sfe_ipv4_debug_dev_read()
2557 * Send info to userspace upon read request from user
2558 */
2559static ssize_t sfe_ipv4_debug_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
2560{
2561 char msg[CHAR_DEV_MSG_SIZE];
2562 int total_read = 0;
2563 struct sfe_ipv4_debug_xml_write_state *ws;
2564 struct sfe_ipv4 *si = &__si;
2565
2566 ws = (struct sfe_ipv4_debug_xml_write_state *)filp->private_data;
2567 while ((ws->state != SFE_IPV4_DEBUG_XML_STATE_DONE) && (length > CHAR_DEV_MSG_SIZE)) {
2568 if ((sfe_ipv4_debug_xml_write_methods[ws->state])(si, buffer, msg, &length, &total_read, ws)) {
2569 continue;
2570 }
2571 }
2572
2573 return total_read;
2574}
2575
2576/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002577 * sfe_ipv4_debug_dev_open()
2578 */
2579static int sfe_ipv4_debug_dev_open(struct inode *inode, struct file *file)
2580{
2581 struct sfe_ipv4_debug_xml_write_state *ws;
2582
2583 ws = (struct sfe_ipv4_debug_xml_write_state *)file->private_data;
2584 if (!ws) {
2585 ws = kzalloc(sizeof(struct sfe_ipv4_debug_xml_write_state), GFP_KERNEL);
2586 if (!ws) {
2587 return -ENOMEM;
2588 }
2589
2590 ws->state = SFE_IPV4_DEBUG_XML_STATE_START;
2591 file->private_data = ws;
2592 }
2593
2594 return 0;
2595}
2596
2597/*
2598 * sfe_ipv4_debug_dev_release()
2599 */
2600static int sfe_ipv4_debug_dev_release(struct inode *inode, struct file *file)
2601{
2602 struct sfe_ipv4_debug_xml_write_state *ws;
2603
2604 ws = (struct sfe_ipv4_debug_xml_write_state *)file->private_data;
2605 if (ws) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002606 /*
2607 * We've finished with our output so free the write state.
2608 */
2609 kfree(ws);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302610 file->private_data = NULL;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002611 }
2612
2613 return 0;
2614}
2615
2616/*
2617 * File operations used in the debug char device
2618 */
2619static struct file_operations sfe_ipv4_debug_dev_fops = {
2620 .read = sfe_ipv4_debug_dev_read,
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002621 .open = sfe_ipv4_debug_dev_open,
2622 .release = sfe_ipv4_debug_dev_release
2623};
2624
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002625#ifdef CONFIG_NF_FLOW_COOKIE
2626/*
2627 * sfe_register_flow_cookie_cb
2628 * register a function in SFE to let SFE use this function to configure flow cookie for a flow
2629 *
2630 * Hardware driver which support flow cookie should register a callback function in SFE. Then SFE
2631 * can use this function to configure flow cookie for a flow.
2632 * return: 0, success; !=0, fail
2633 */
2634int sfe_register_flow_cookie_cb(flow_cookie_set_func_t cb)
2635{
2636 struct sfe_ipv4 *si = &__si;
2637
2638 BUG_ON(!cb);
2639
2640 if (si->flow_cookie_set_func) {
2641 return -1;
2642 }
2643
2644 rcu_assign_pointer(si->flow_cookie_set_func, cb);
2645 return 0;
2646}
2647
2648/*
2649 * sfe_unregister_flow_cookie_cb
2650 * unregister function which is used to configure flow cookie for a flow
2651 *
2652 * return: 0, success; !=0, fail
2653 */
2654int sfe_unregister_flow_cookie_cb(flow_cookie_set_func_t cb)
2655{
2656 struct sfe_ipv4 *si = &__si;
2657
2658 RCU_INIT_POINTER(si->flow_cookie_set_func, NULL);
2659 return 0;
2660}
Xiaoping Fan640faf42015-08-28 15:50:55 -07002661
2662/*
2663 * sfe_ipv4_get_flow_cookie()
2664 */
2665static ssize_t sfe_ipv4_get_flow_cookie(struct device *dev,
2666 struct device_attribute *attr,
2667 char *buf)
2668{
2669 struct sfe_ipv4 *si = &__si;
Xiaoping Fan01c67cc2015-11-09 11:31:57 -08002670 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->flow_cookie_enable);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002671}
2672
2673/*
2674 * sfe_ipv4_set_flow_cookie()
2675 */
2676static ssize_t sfe_ipv4_set_flow_cookie(struct device *dev,
2677 struct device_attribute *attr,
2678 const char *buf, size_t size)
2679{
2680 struct sfe_ipv4 *si = &__si;
Ken Zhu137722d2021-09-23 17:57:36 -07002681 si->flow_cookie_enable = simple_strtol(buf, NULL, 0);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002682
2683 return size;
2684}
2685
2686/*
2687 * sysfs attributes.
2688 */
2689static const struct device_attribute sfe_ipv4_flow_cookie_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -08002690 __ATTR(flow_cookie_enable, S_IWUSR | S_IRUGO, sfe_ipv4_get_flow_cookie, sfe_ipv4_set_flow_cookie);
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002691#endif /*CONFIG_NF_FLOW_COOKIE*/
2692
Ken Zhu137722d2021-09-23 17:57:36 -07002693/*
2694 * sfe_ipv4_get_cpu()
2695 */
2696static ssize_t sfe_ipv4_get_cpu(struct device *dev,
2697 struct device_attribute *attr,
2698 char *buf)
2699{
2700 struct sfe_ipv4 *si = &__si;
2701 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->work_cpu);
2702}
2703
2704/*
2705 * sfe_ipv4_set_cpu()
2706 */
2707static ssize_t sfe_ipv4_set_cpu(struct device *dev,
2708 struct device_attribute *attr,
2709 const char *buf, size_t size)
2710{
2711 struct sfe_ipv4 *si = &__si;
2712 int work_cpu;
2713 work_cpu = simple_strtol(buf, NULL, 0);
2714 if ((work_cpu >= 0) && (work_cpu <= NR_CPUS)) {
2715 si->work_cpu = work_cpu;
2716 } else {
2717 dev_err(dev, "%s is not in valid range[0,%d]", buf, NR_CPUS);
2718 }
2719 return size;
2720}
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002721
Ken Zhu137722d2021-09-23 17:57:36 -07002722/*
2723 * sysfs attributes.
2724 */
2725static const struct device_attribute sfe_ipv4_cpu_attr =
2726 __ATTR(stats_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv4_get_cpu, sfe_ipv4_set_cpu);
2727
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002728/*
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302729 * sfe_ipv4_conn_match_hash_init()
2730 * Initialize conn match hash lists
2731 */
2732static void sfe_ipv4_conn_match_hash_init(struct sfe_ipv4 *si, int len)
2733{
2734 struct hlist_head *hash_list = si->hlist_conn_match_hash_head;
2735 int i;
2736
2737 for (i = 0; i < len; i++) {
2738 INIT_HLIST_HEAD(&hash_list[i]);
2739 }
2740}
2741
Amitesh Anand63be37d2021-12-24 20:51:48 +05302742#ifdef SFE_PROCESS_LOCAL_OUT
2743/*
2744 * sfe_ipv4_local_out()
2745 * Called for packets from ip_local_out() - post encapsulation & other packets
2746 */
2747static unsigned int sfe_ipv4_local_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *nhs)
2748{
Nitin Shettyc28f8172022-02-04 16:23:46 +05302749 struct sfe_l2_info l2_info = {0};
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05302750
Amitesh Anand63be37d2021-12-24 20:51:48 +05302751 DEBUG_TRACE("%px: sfe: sfe_ipv4_local_out hook called.\n", skb);
2752
2753 if (likely(skb->skb_iif)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05302754 return sfe_ipv4_recv(skb->dev, skb, &l2_info, true) ? NF_STOLEN : NF_ACCEPT;
Amitesh Anand63be37d2021-12-24 20:51:48 +05302755 }
2756
2757 return NF_ACCEPT;
2758}
2759
2760/*
2761 * struct nf_hook_ops sfe_ipv4_ops_local_out[]
2762 * Hooks into netfilter local out packet monitoring points.
2763 */
2764static struct nf_hook_ops sfe_ipv4_ops_local_out[] __read_mostly = {
2765
2766 /*
2767 * Local out routing hook is used to monitor packets.
2768 */
2769 {
2770 .hook = sfe_ipv4_local_out,
2771 .pf = PF_INET,
2772 .hooknum = NF_INET_LOCAL_OUT,
2773 .priority = NF_IP_PRI_FIRST,
2774 },
2775};
2776#endif
2777
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002778/*
Dave Hudson87973cd2013-10-22 16:00:04 +01002779 * sfe_ipv4_init()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002780 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302781int sfe_ipv4_init(void)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002782{
2783 struct sfe_ipv4 *si = &__si;
2784 int result = -1;
2785
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002786 DEBUG_INFO("SFE IPv4 init\n");
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002787
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302788 sfe_ipv4_conn_match_hash_init(si, ARRAY_SIZE(si->hlist_conn_match_hash_head));
2789
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302790 si->stats_pcpu = alloc_percpu_gfp(struct sfe_ipv4_stats, GFP_KERNEL | __GFP_ZERO);
2791 if (!si->stats_pcpu) {
2792 DEBUG_ERROR("failed to allocate stats memory for sfe_ipv4\n");
2793 goto exit0;
2794 }
2795
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002796 /*
Parikshit Guned31a8202022-01-05 22:15:04 +05302797 * Allocate per cpu per service class memory.
2798 */
2799 si->stats_pcpu_psc = alloc_percpu_gfp(struct sfe_ipv4_service_class_stats_db,
2800 GFP_KERNEL | __GFP_ZERO);
2801 if (!si->stats_pcpu_psc) {
2802 DEBUG_ERROR("failed to allocate per cpu per service clas stats memory\n");
2803 goto exit1;
2804 }
2805
2806 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002807 * Create sys/sfe_ipv4
2808 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302809 si->sys_ipv4 = kobject_create_and_add("sfe_ipv4", NULL);
2810 if (!si->sys_ipv4) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002811 DEBUG_ERROR("failed to register sfe_ipv4\n");
Parikshit Guned31a8202022-01-05 22:15:04 +05302812 goto exit2;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002813 }
2814
2815 /*
2816 * Create files, one for each parameter supported by this module.
2817 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302818 result = sysfs_create_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002819 if (result) {
2820 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302821 goto exit3;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002822 }
2823
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302824 result = sysfs_create_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002825 if (result) {
2826 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302827 goto exit4;
Ken Zhu137722d2021-09-23 17:57:36 -07002828 }
2829
Xiaoping Fan640faf42015-08-28 15:50:55 -07002830#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302831 result = sysfs_create_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002832 if (result) {
2833 DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302834 goto exit5;
Xiaoping Fan640faf42015-08-28 15:50:55 -07002835 }
2836#endif /* CONFIG_NF_FLOW_COOKIE */
2837
Amitesh Anand63be37d2021-12-24 20:51:48 +05302838#ifdef SFE_PROCESS_LOCAL_OUT
2839#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2840 result = nf_register_hooks(sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2841#else
2842 result = nf_register_net_hooks(&init_net, sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2843#endif
2844 if (result < 0) {
2845 DEBUG_ERROR("can't register nf local out hook: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302846 goto exit6;
Amitesh Anand63be37d2021-12-24 20:51:48 +05302847 }
2848 DEBUG_INFO("Register nf local out hook success: %d\n", result);
2849#endif
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002850 /*
2851 * Register our debug char device.
2852 */
2853 result = register_chrdev(0, "sfe_ipv4", &sfe_ipv4_debug_dev_fops);
2854 if (result < 0) {
2855 DEBUG_ERROR("Failed to register chrdev: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302856 goto exit7;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002857 }
2858
2859 si->debug_dev = result;
Ken Zhu137722d2021-09-23 17:57:36 -07002860 si->work_cpu = WORK_CPU_UNBOUND;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002861
2862 /*
Ken Zhu7a43d882022-01-04 10:51:44 -08002863 * Create a work to handle pull message from ecm.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002864 */
Ken Zhu137722d2021-09-23 17:57:36 -07002865 INIT_DELAYED_WORK(&(si->sync_dwork), sfe_ipv4_periodic_sync);
Ken Zhu7a43d882022-01-04 10:51:44 -08002866 /*
2867 * Allocate a message for stats sync many
2868 */
2869 sfe_ipv4_sync_many_msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
2870 if(!sfe_ipv4_sync_many_msg) {
2871 goto exit8;
2872 }
2873
2874 sfe_ipv4_msg_init(sfe_ipv4_sync_many_msg, SFE_SPECIAL_INTERFACE_IPV4,
2875 SFE_TX_CONN_STATS_SYNC_MANY_MSG,
2876 sizeof(struct sfe_ipv4_conn_sync_many_msg),
2877 NULL,
2878 NULL);
2879 sfe_ipv4_sync_max_number = (PAGE_SIZE - sizeof(struct sfe_ipv4_msg)) / sizeof(struct sfe_ipv4_conn_sync);
Ken Zhu137722d2021-09-23 17:57:36 -07002880
Dave Hudson87973cd2013-10-22 16:00:04 +01002881 spin_lock_init(&si->lock);
Dave Hudson87973cd2013-10-22 16:00:04 +01002882 return 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002883
Ken Zhu7a43d882022-01-04 10:51:44 -08002884exit8:
2885 unregister_chrdev(si->debug_dev, "sfe_ipv4");
2886
Parikshit Guned31a8202022-01-05 22:15:04 +05302887exit7:
Amitesh Anand63be37d2021-12-24 20:51:48 +05302888#ifdef SFE_PROCESS_LOCAL_OUT
2889 DEBUG_TRACE("sfe: Unregister local out hook\n");
2890#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2891 nf_unregister_hooks(sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2892#else
2893 nf_unregister_net_hooks(&init_net, sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2894#endif
Parikshit Guned31a8202022-01-05 22:15:04 +05302895exit6:
Amitesh Anand63be37d2021-12-24 20:51:48 +05302896#endif
Xiaoping Fan640faf42015-08-28 15:50:55 -07002897#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302898 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002899
Parikshit Guned31a8202022-01-05 22:15:04 +05302900exit5:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002901#endif /* CONFIG_NF_FLOW_COOKIE */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302902 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
Parikshit Guned31a8202022-01-05 22:15:04 +05302903exit4:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302904 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002905
Parikshit Guned31a8202022-01-05 22:15:04 +05302906exit3:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302907 kobject_put(si->sys_ipv4);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002908
Parikshit Guned31a8202022-01-05 22:15:04 +05302909exit2:
2910 free_percpu(si->stats_pcpu_psc);
2911
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002912exit1:
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302913 free_percpu(si->stats_pcpu);
2914
2915exit0:
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002916 return result;
2917}
2918
2919/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002920 * sfe_ipv4_exit()
2921 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302922void sfe_ipv4_exit(void)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002923{
Dave Hudson87973cd2013-10-22 16:00:04 +01002924 struct sfe_ipv4 *si = &__si;
2925
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002926 DEBUG_INFO("SFE IPv4 exit\n");
Dave Hudson87973cd2013-10-22 16:00:04 +01002927 /*
2928 * Destroy all connections.
2929 */
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002930 sfe_ipv4_destroy_all_rules_for_dev(NULL);
Dave Hudson87973cd2013-10-22 16:00:04 +01002931
Ken Zhu137722d2021-09-23 17:57:36 -07002932 cancel_delayed_work_sync(&si->sync_dwork);
Dave Hudson87973cd2013-10-22 16:00:04 +01002933
Dave Hudson87973cd2013-10-22 16:00:04 +01002934 unregister_chrdev(si->debug_dev, "sfe_ipv4");
2935
Amitesh Anand63be37d2021-12-24 20:51:48 +05302936#ifdef SFE_PROCESS_LOCAL_OUT
2937 DEBUG_TRACE("sfe: Unregister local out hook\n");
2938#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2939 nf_unregister_hooks(sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2940#else
2941 nf_unregister_net_hooks(&init_net, sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2942#endif
2943#endif
2944
Xiaoping Fan640faf42015-08-28 15:50:55 -07002945#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302946 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002947#endif /* CONFIG_NF_FLOW_COOKIE */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302948 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002949
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302950 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
Dave Hudson87973cd2013-10-22 16:00:04 +01002951
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302952 kobject_put(si->sys_ipv4);
Dave Hudson87973cd2013-10-22 16:00:04 +01002953
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302954 free_percpu(si->stats_pcpu);
Parikshit Guned31a8202022-01-05 22:15:04 +05302955 free_percpu(si->stats_pcpu_psc);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002956}
2957
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002958#ifdef CONFIG_NF_FLOW_COOKIE
2959EXPORT_SYMBOL(sfe_register_flow_cookie_cb);
2960EXPORT_SYMBOL(sfe_unregister_flow_cookie_cb);
2961#endif