blob: 3b3d1cbd4724bb2abd594555fa30bd6b82d7d463 [file] [log] [blame]
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001/*
2 * sfe_ipv4.c
3 * Shortcut forwarding engine - IPv4 edition.
4 *
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05305 * Copyright (c) 2013-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
Xiaoping Fana42c68b2015-08-07 18:00:39 -070012 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053017 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010019 */
Matthew McClintocka3221942014-01-16 11:44:26 -060020
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010021#include <linux/module.h>
Dave Hudsondcd08fb2013-11-22 09:25:16 -060022#include <linux/sysfs.h>
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010023#include <linux/skbuff.h>
24#include <linux/icmp.h>
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010025#include <net/tcp.h>
Amitesh Anand63be37d2021-12-24 20:51:48 +053026#include <net/udp.h>
27#include <net/vxlan.h>
Dave Hudsondcd08fb2013-11-22 09:25:16 -060028#include <linux/etherdevice.h>
Tian Yang45f39c82020-10-06 14:07:47 -070029#include <linux/version.h>
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +053030#include <linux/lockdep.h>
Amitesh Anand63be37d2021-12-24 20:51:48 +053031#include <linux/refcount.h>
32#include <linux/netfilter.h>
33#include <linux/inetdevice.h>
34#include <linux/netfilter_ipv4.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053035#include <net/protocol.h>
36#include <net/gre.h>
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010037
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053038#include "sfe_debug.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053039#include "sfe_api.h"
Dave Hudsondcd08fb2013-11-22 09:25:16 -060040#include "sfe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053041#include "sfe_flow_cookie.h"
42#include "sfe_ipv4.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053043#include "sfe_ipv4_udp.h"
44#include "sfe_ipv4_tcp.h"
45#include "sfe_ipv4_icmp.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080046#include "sfe_pppoe.h"
Nitin Shettye6ed5b52021-12-27 14:50:11 +053047#include "sfe_ipv4_gre.h"
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010048
49static char *sfe_ipv4_exception_events_string[SFE_IPV4_EXCEPTION_EVENT_LAST] = {
50 "UDP_HEADER_INCOMPLETE",
51 "UDP_NO_CONNECTION",
52 "UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
53 "UDP_SMALL_TTL",
54 "UDP_NEEDS_FRAGMENTATION",
55 "TCP_HEADER_INCOMPLETE",
56 "TCP_NO_CONNECTION_SLOW_FLAGS",
57 "TCP_NO_CONNECTION_FAST_FLAGS",
58 "TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
59 "TCP_SMALL_TTL",
60 "TCP_NEEDS_FRAGMENTATION",
61 "TCP_FLAGS",
62 "TCP_SEQ_EXCEEDS_RIGHT_EDGE",
63 "TCP_SMALL_DATA_OFFS",
64 "TCP_BAD_SACK",
65 "TCP_BIG_DATA_OFFS",
66 "TCP_SEQ_BEFORE_LEFT_EDGE",
67 "TCP_ACK_EXCEEDS_RIGHT_EDGE",
68 "TCP_ACK_BEFORE_LEFT_EDGE",
69 "ICMP_HEADER_INCOMPLETE",
70 "ICMP_UNHANDLED_TYPE",
71 "ICMP_IPV4_HEADER_INCOMPLETE",
72 "ICMP_IPV4_NON_V4",
73 "ICMP_IPV4_IP_OPTIONS_INCOMPLETE",
74 "ICMP_IPV4_UDP_HEADER_INCOMPLETE",
75 "ICMP_IPV4_TCP_HEADER_INCOMPLETE",
76 "ICMP_IPV4_UNHANDLED_PROTOCOL",
77 "ICMP_NO_CONNECTION",
78 "ICMP_FLUSHED_CONNECTION",
79 "HEADER_INCOMPLETE",
Ratheesh Kannoth43d64f82021-10-20 08:23:29 +053080 "HEADER_CSUM_BAD",
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010081 "BAD_TOTAL_LENGTH",
82 "NON_V4",
83 "NON_INITIAL_FRAGMENT",
84 "DATAGRAM_INCOMPLETE",
85 "IP_OPTIONS_INCOMPLETE",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053086 "UNHANDLED_PROTOCOL",
Nitin Shetty16ab38d2022-02-09 01:26:19 +053087 "NO_HEADROOM",
88 "INVALID_PPPOE_SESSION",
89 "INCORRECT_PPPOE_PARSING",
90 "PPPOE_NOT_SET_IN_CME",
91 "INGRESS_VLAN_TAG_MISMATCH",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053092 "INVALID_SOURCE_INTERFACE",
Nitin Shettye6ed5b52021-12-27 14:50:11 +053093 "GRE_HEADER_INCOMPLETE",
94 "GRE_NO_CONNECTION",
95 "GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
96 "GRE_SMALL_TTL",
97 "GRE_NEEDS_FRAGMENTATION"
Dave Hudsonaaf97ca2013-06-13 17:52:29 +010098};
99
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700100static struct sfe_ipv4 __si;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100101
102/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100103 * sfe_ipv4_gen_ip_csum()
104 * Generate the IP checksum for an IPv4 header.
105 *
106 * Note that this function assumes that we have only 20 bytes of IP header.
107 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530108u16 sfe_ipv4_gen_ip_csum(struct iphdr *iph)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100109{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700110 u32 sum;
111 u16 *i = (u16 *)iph;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100112
113 iph->check = 0;
114
115 /*
116 * Generate the sum.
117 */
118 sum = i[0] + i[1] + i[2] + i[3] + i[4] + i[5] + i[6] + i[7] + i[8] + i[9];
119
120 /*
121 * Fold it to ones-complement form.
122 */
123 sum = (sum & 0xffff) + (sum >> 16);
124 sum = (sum & 0xffff) + (sum >> 16);
125
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700126 return (u16)sum ^ 0xffff;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100127}
128
129/*
130 * sfe_ipv4_get_connection_match_hash()
131 * Generate the hash used in connection match lookups.
132 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700133static inline unsigned int sfe_ipv4_get_connection_match_hash(struct net_device *dev, u8 protocol,
Dave Hudson87973cd2013-10-22 16:00:04 +0100134 __be32 src_ip, __be16 src_port,
135 __be32 dest_ip, __be16 dest_port)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100136{
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530137 u32 hash = ntohl(src_ip ^ dest_ip) ^ protocol ^ ntohs(src_port ^ dest_port);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100138 return ((hash >> SFE_IPV4_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV4_CONNECTION_HASH_MASK;
139}
140
141/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530142 * sfe_ipv4_find_connection_match_rcu()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100143 * Get the IPv4 flow match info that corresponds to a particular 5-tuple.
144 *
145 * On entry we must be holding the lock that protects the hash table.
146 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530147struct sfe_ipv4_connection_match *
148sfe_ipv4_find_connection_match_rcu(struct sfe_ipv4 *si, struct net_device *dev, u8 protocol,
Dave Hudson87973cd2013-10-22 16:00:04 +0100149 __be32 src_ip, __be16 src_port,
150 __be32 dest_ip, __be16 dest_port)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100151{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530152 struct sfe_ipv4_connection_match *cm = NULL;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100153 unsigned int conn_match_idx;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530154 struct hlist_head *lhead;
155
156 WARN_ON_ONCE(!rcu_read_lock_held());
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100157
158 conn_match_idx = sfe_ipv4_get_connection_match_hash(dev, protocol, src_ip, src_port, dest_ip, dest_port);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100159
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530160 lhead = &si->hlist_conn_match_hash_head[conn_match_idx];
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100161
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530162 hlist_for_each_entry_rcu(cm, lhead, hnode) {
163 if (cm->match_src_port != src_port
164 || cm->match_dest_port != dest_port
165 || cm->match_src_ip != src_ip
166 || cm->match_dest_ip != dest_ip
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530167 || cm->match_protocol != protocol) {
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530168 continue;
169 }
170
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530171 this_cpu_inc(si->stats_pcpu->connection_match_hash_hits64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100172
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530173 break;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100174 }
175
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100176 return cm;
177}
178
179/*
180 * sfe_ipv4_connection_match_update_summary_stats()
181 * Update the summary stats for a connection match entry.
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530182 *
183 * Stats are incremented atomically. So use atomic substraction to update summary
184 * stats.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100185 */
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530186static inline void sfe_ipv4_connection_match_update_summary_stats(struct sfe_ipv4_connection_match *cm,
187 u32 *packets, u32 *bytes)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100188{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530189 u32 packet_count, byte_count;
190
191 packet_count = atomic_read(&cm->rx_packet_count);
192 cm->rx_packet_count64 += packet_count;
193 atomic_sub(packet_count, &cm->rx_packet_count);
194
195 byte_count = atomic_read(&cm->rx_byte_count);
196 cm->rx_byte_count64 += byte_count;
197 atomic_sub(byte_count, &cm->rx_byte_count);
198
199 *packets = packet_count;
200 *bytes = byte_count;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100201}
202
203/*
204 * sfe_ipv4_connection_match_compute_translations()
205 * Compute port and address translations for a connection match entry.
206 */
207static void sfe_ipv4_connection_match_compute_translations(struct sfe_ipv4_connection_match *cm)
208{
209 /*
210 * Before we insert the entry look to see if this is tagged as doing address
211 * translations. If it is then work out the adjustment that we need to apply
212 * to the transport checksum.
213 */
214 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC) {
215 /*
216 * Precompute an incremental checksum adjustment so we can
217 * edit packets in this stream very quickly. The algorithm is from RFC1624.
218 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700219 u16 src_ip_hi = cm->match_src_ip >> 16;
220 u16 src_ip_lo = cm->match_src_ip & 0xffff;
221 u32 xlate_src_ip = ~cm->xlate_src_ip;
222 u16 xlate_src_ip_hi = xlate_src_ip >> 16;
223 u16 xlate_src_ip_lo = xlate_src_ip & 0xffff;
224 u16 xlate_src_port = ~cm->xlate_src_port;
225 u32 adj;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100226
227 /*
228 * When we compute this fold it down to a 16-bit offset
229 * as that way we can avoid having to do a double
230 * folding of the twos-complement result because the
231 * addition of 2 16-bit values cannot cause a double
232 * wrap-around!
233 */
234 adj = src_ip_hi + src_ip_lo + cm->match_src_port
235 + xlate_src_ip_hi + xlate_src_ip_lo + xlate_src_port;
236 adj = (adj & 0xffff) + (adj >> 16);
237 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700238 cm->xlate_src_csum_adjustment = (u16)adj;
Nicolas Costaac2979c2014-01-14 10:35:24 -0600239
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100240 }
241
242 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST) {
243 /*
244 * Precompute an incremental checksum adjustment so we can
245 * edit packets in this stream very quickly. The algorithm is from RFC1624.
246 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700247 u16 dest_ip_hi = cm->match_dest_ip >> 16;
248 u16 dest_ip_lo = cm->match_dest_ip & 0xffff;
249 u32 xlate_dest_ip = ~cm->xlate_dest_ip;
250 u16 xlate_dest_ip_hi = xlate_dest_ip >> 16;
251 u16 xlate_dest_ip_lo = xlate_dest_ip & 0xffff;
252 u16 xlate_dest_port = ~cm->xlate_dest_port;
253 u32 adj;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100254
255 /*
256 * When we compute this fold it down to a 16-bit offset
257 * as that way we can avoid having to do a double
258 * folding of the twos-complement result because the
259 * addition of 2 16-bit values cannot cause a double
260 * wrap-around!
261 */
262 adj = dest_ip_hi + dest_ip_lo + cm->match_dest_port
263 + xlate_dest_ip_hi + xlate_dest_ip_lo + xlate_dest_port;
264 adj = (adj & 0xffff) + (adj >> 16);
265 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700266 cm->xlate_dest_csum_adjustment = (u16)adj;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100267 }
Xiaoping Fanad755af2015-04-01 16:58:46 -0700268
269 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700270 u32 adj = ~cm->match_src_ip + cm->xlate_src_ip;
Xiaoping Fanad755af2015-04-01 16:58:46 -0700271 if (adj < cm->xlate_src_ip) {
272 adj++;
273 }
274
275 adj = (adj & 0xffff) + (adj >> 16);
276 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700277 cm->xlate_src_partial_csum_adjustment = (u16)adj;
Xiaoping Fanad755af2015-04-01 16:58:46 -0700278 }
279
280 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700281 u32 adj = ~cm->match_dest_ip + cm->xlate_dest_ip;
Xiaoping Fanad755af2015-04-01 16:58:46 -0700282 if (adj < cm->xlate_dest_ip) {
283 adj++;
284 }
285
286 adj = (adj & 0xffff) + (adj >> 16);
287 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700288 cm->xlate_dest_partial_csum_adjustment = (u16)adj;
Xiaoping Fanad755af2015-04-01 16:58:46 -0700289 }
290
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100291}
292
293/*
294 * sfe_ipv4_update_summary_stats()
295 * Update the summary stats.
296 */
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530297static void sfe_ipv4_update_summary_stats(struct sfe_ipv4 *si, struct sfe_ipv4_stats *stats)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100298{
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530299 int i = 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100300
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530301 memset(stats, 0, sizeof(*stats));
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100302
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530303 for_each_possible_cpu(i) {
304 const struct sfe_ipv4_stats *s = per_cpu_ptr(si->stats_pcpu, i);
305
306 stats->connection_create_requests64 += s->connection_create_requests64;
307 stats->connection_create_collisions64 += s->connection_create_collisions64;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530308 stats->connection_create_failures64 += s->connection_create_failures64;
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530309 stats->connection_destroy_requests64 += s->connection_destroy_requests64;
310 stats->connection_destroy_misses64 += s->connection_destroy_misses64;
311 stats->connection_match_hash_hits64 += s->connection_match_hash_hits64;
312 stats->connection_match_hash_reorders64 += s->connection_match_hash_reorders64;
313 stats->connection_flushes64 += s->connection_flushes64;
Amitesh Anand63be37d2021-12-24 20:51:48 +0530314 stats->packets_dropped64 += s->packets_dropped64;
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530315 stats->packets_forwarded64 += s->packets_forwarded64;
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800316 stats->packets_fast_xmited64 += s->packets_fast_xmited64;
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530317 stats->packets_not_forwarded64 += s->packets_not_forwarded64;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530318 stats->pppoe_encap_packets_forwarded64 += s->pppoe_encap_packets_forwarded64;
319 stats->pppoe_decap_packets_forwarded64 += s->pppoe_decap_packets_forwarded64;
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530320 stats->pppoe_bridge_packets_forwarded64 += s->pppoe_bridge_packets_forwarded64;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100321 }
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530322
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100323}
324
325/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530326 * sfe_ipv4_insert_connection_match()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100327 * Insert a connection match into the hash.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100328 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530329static inline void sfe_ipv4_insert_connection_match(struct sfe_ipv4 *si,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700330 struct sfe_ipv4_connection_match *cm)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100331{
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100332 unsigned int conn_match_idx
333 = sfe_ipv4_get_connection_match_hash(cm->match_dev, cm->match_protocol,
334 cm->match_src_ip, cm->match_src_port,
335 cm->match_dest_ip, cm->match_dest_port);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700336
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530337 lockdep_assert_held(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100338
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530339 hlist_add_head_rcu(&cm->hnode, &si->hlist_conn_match_hash_head[conn_match_idx]);
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800340#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700341 if (!si->flow_cookie_enable)
342 return;
343
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800344 /*
345 * Configure hardware to put a flow cookie in packet of this flow,
346 * then we can accelerate the lookup process when we received this packet.
347 */
348 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
349 struct sfe_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
350
351 if ((NULL == entry->match) && time_is_before_jiffies(entry->last_clean_time + HZ)) {
352 flow_cookie_set_func_t func;
353
354 rcu_read_lock();
355 func = rcu_dereference(si->flow_cookie_set_func);
356 if (func) {
Xiaoping Fan59176422015-05-22 15:58:10 -0700357 if (!func(cm->match_protocol, cm->match_src_ip, cm->match_src_port,
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800358 cm->match_dest_ip, cm->match_dest_port, conn_match_idx)) {
359 entry->match = cm;
360 cm->flow_cookie = conn_match_idx;
361 }
362 }
363 rcu_read_unlock();
364
365 break;
366 }
367 }
368#endif
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100369}
370
371/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530372 * sfe_ipv4_remove_connection_match()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100373 * Remove a connection match object from the hash.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100374 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530375static inline void sfe_ipv4_remove_connection_match(struct sfe_ipv4 *si, struct sfe_ipv4_connection_match *cm)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100376{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530377
378 lockdep_assert_held(&si->lock);
379
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800380#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700381 if (si->flow_cookie_enable) {
382 /*
383 * Tell hardware that we no longer need a flow cookie in packet of this flow
384 */
385 unsigned int conn_match_idx;
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800386
Xiaoping Fan640faf42015-08-28 15:50:55 -0700387 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
388 struct sfe_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800389
Xiaoping Fan640faf42015-08-28 15:50:55 -0700390 if (cm == entry->match) {
391 flow_cookie_set_func_t func;
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800392
Xiaoping Fan640faf42015-08-28 15:50:55 -0700393 rcu_read_lock();
394 func = rcu_dereference(si->flow_cookie_set_func);
395 if (func) {
396 func(cm->match_protocol, cm->match_src_ip, cm->match_src_port,
397 cm->match_dest_ip, cm->match_dest_port, 0);
398 }
399 rcu_read_unlock();
400
401 cm->flow_cookie = 0;
402 entry->match = NULL;
403 entry->last_clean_time = jiffies;
404 break;
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800405 }
Xiaoping Fand1dc7b22015-01-23 00:43:56 -0800406 }
407 }
408#endif
409
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530410 hlist_del_init_rcu(&cm->hnode);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100411
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100412}
413
414/*
415 * sfe_ipv4_get_connection_hash()
416 * Generate the hash used in connection lookups.
417 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700418static inline unsigned int sfe_ipv4_get_connection_hash(u8 protocol, __be32 src_ip, __be16 src_port,
Dave Hudson87973cd2013-10-22 16:00:04 +0100419 __be32 dest_ip, __be16 dest_port)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100420{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700421 u32 hash = ntohl(src_ip ^ dest_ip) ^ protocol ^ ntohs(src_port ^ dest_port);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100422 return ((hash >> SFE_IPV4_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV4_CONNECTION_HASH_MASK;
423}
424
425/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530426 * sfe_ipv4_find_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100427 * Get the IPv4 connection info that corresponds to a particular 5-tuple.
428 *
429 * On entry we must be holding the lock that protects the hash table.
430 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530431static inline struct sfe_ipv4_connection *sfe_ipv4_find_connection(struct sfe_ipv4 *si, u32 protocol,
Dave Hudson87973cd2013-10-22 16:00:04 +0100432 __be32 src_ip, __be16 src_port,
433 __be32 dest_ip, __be16 dest_port)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100434{
435 struct sfe_ipv4_connection *c;
436 unsigned int conn_idx = sfe_ipv4_get_connection_hash(protocol, src_ip, src_port, dest_ip, dest_port);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530437
438 lockdep_assert_held(&si->lock);
439
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100440 c = si->conn_hash[conn_idx];
441
442 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100443 * Will need connection entry for next create/destroy metadata,
444 * So no need to re-order entry for these requests
445 */
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530446 while (c) {
447 if ((c->src_port == src_port)
448 && (c->dest_port == dest_port)
449 && (c->src_ip == src_ip)
450 && (c->dest_ip == dest_ip)
451 && (c->protocol == protocol)) {
452 return c;
453 }
454
455 c = c->next;
456 }
457
458 return NULL;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100459}
460
461/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530462 * sfe_ipv4_insert_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100463 * Insert a connection into the hash.
464 *
465 * On entry we must be holding the lock that protects the hash table.
466 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530467static void sfe_ipv4_insert_connection(struct sfe_ipv4 *si, struct sfe_ipv4_connection *c)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100468{
469 struct sfe_ipv4_connection **hash_head;
470 struct sfe_ipv4_connection *prev_head;
471 unsigned int conn_idx;
472
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530473 lockdep_assert_held(&si->lock);
474
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100475 /*
476 * Insert entry into the connection hash.
477 */
478 conn_idx = sfe_ipv4_get_connection_hash(c->protocol, c->src_ip, c->src_port,
479 c->dest_ip, c->dest_port);
480 hash_head = &si->conn_hash[conn_idx];
481 prev_head = *hash_head;
482 c->prev = NULL;
483 if (prev_head) {
484 prev_head->prev = c;
485 }
486
487 c->next = prev_head;
488 *hash_head = c;
489
490 /*
491 * Insert entry into the "all connections" list.
492 */
493 if (si->all_connections_tail) {
494 c->all_connections_prev = si->all_connections_tail;
495 si->all_connections_tail->all_connections_next = c;
496 } else {
497 c->all_connections_prev = NULL;
498 si->all_connections_head = c;
499 }
500
501 si->all_connections_tail = c;
502 c->all_connections_next = NULL;
503 si->num_connections++;
504
505 /*
506 * Insert the connection match objects too.
507 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530508 sfe_ipv4_insert_connection_match(si, c->original_match);
509 sfe_ipv4_insert_connection_match(si, c->reply_match);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100510}
511
512/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530513 * sfe_ipv4_remove_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100514 * Remove a sfe_ipv4_connection object from the hash.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100515 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530516bool sfe_ipv4_remove_connection(struct sfe_ipv4 *si, struct sfe_ipv4_connection *c)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100517{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530518 lockdep_assert_held(&si->lock);
519
520 if (c->removed) {
521 DEBUG_ERROR("%px: Connection has been removed already\n", c);
522 return false;
523 }
524
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100525 /*
526 * Remove the connection match objects.
527 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530528 sfe_ipv4_remove_connection_match(si, c->reply_match);
529 sfe_ipv4_remove_connection_match(si, c->original_match);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100530
531 /*
532 * Unlink the connection.
533 */
534 if (c->prev) {
535 c->prev->next = c->next;
536 } else {
537 unsigned int conn_idx = sfe_ipv4_get_connection_hash(c->protocol, c->src_ip, c->src_port,
538 c->dest_ip, c->dest_port);
539 si->conn_hash[conn_idx] = c->next;
540 }
541
542 if (c->next) {
543 c->next->prev = c->prev;
544 }
Xiaoping Fan34586472015-07-03 02:20:35 -0700545
546 /*
547 * Unlink connection from all_connections list
548 */
549 if (c->all_connections_prev) {
550 c->all_connections_prev->all_connections_next = c->all_connections_next;
551 } else {
552 si->all_connections_head = c->all_connections_next;
553 }
554
555 if (c->all_connections_next) {
556 c->all_connections_next->all_connections_prev = c->all_connections_prev;
557 } else {
558 si->all_connections_tail = c->all_connections_prev;
559 }
560
Ken Zhudc423672021-09-02 18:27:01 -0700561 /*
562 * If I am the next sync connection, move the sync to my next or head.
563 */
564 if (unlikely(si->wc_next == c)) {
565 si->wc_next = c->all_connections_next;
566 }
567
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530568 c->removed = true;
Xiaoping Fan34586472015-07-03 02:20:35 -0700569 si->num_connections--;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530570 return true;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100571}
572
573/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530574 * sfe_ipv4_gen_sync_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100575 * Sync a connection.
576 *
577 * On entry to this function we expect that the lock for the connection is either
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530578 * already held (while called from sfe_ipv4_periodic_sync() or isn't required
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530579 * (while called from sfe_ipv4_flush_connection())
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100580 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530581static void sfe_ipv4_gen_sync_connection(struct sfe_ipv4 *si, struct sfe_ipv4_connection *c,
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700582 struct sfe_connection_sync *sis, sfe_sync_reason_t reason,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700583 u64 now_jiffies)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100584{
585 struct sfe_ipv4_connection_match *original_cm;
586 struct sfe_ipv4_connection_match *reply_cm;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530587 u32 packet_count, byte_count;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100588
589 /*
590 * Fill in the update message.
591 */
Xiaoping Fand44a5b42015-05-26 17:37:37 -0700592 sis->is_v6 = 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100593 sis->protocol = c->protocol;
Xiaoping Fand44a5b42015-05-26 17:37:37 -0700594 sis->src_ip.ip = c->src_ip;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700595 sis->src_ip_xlate.ip = c->src_ip_xlate;
Xiaoping Fand44a5b42015-05-26 17:37:37 -0700596 sis->dest_ip.ip = c->dest_ip;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700597 sis->dest_ip_xlate.ip = c->dest_ip_xlate;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100598 sis->src_port = c->src_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700599 sis->src_port_xlate = c->src_port_xlate;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100600 sis->dest_port = c->dest_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700601 sis->dest_port_xlate = c->dest_port_xlate;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100602
603 original_cm = c->original_match;
604 reply_cm = c->reply_match;
605 sis->src_td_max_window = original_cm->protocol_state.tcp.max_win;
606 sis->src_td_end = original_cm->protocol_state.tcp.end;
607 sis->src_td_max_end = original_cm->protocol_state.tcp.max_end;
608 sis->dest_td_max_window = reply_cm->protocol_state.tcp.max_win;
609 sis->dest_td_end = reply_cm->protocol_state.tcp.end;
610 sis->dest_td_max_end = reply_cm->protocol_state.tcp.max_end;
611
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530612 sfe_ipv4_connection_match_update_summary_stats(original_cm, &packet_count, &byte_count);
613 sis->src_new_packet_count = packet_count;
614 sis->src_new_byte_count = byte_count;
Matthew McClintockd0cdb802014-02-24 16:30:35 -0600615
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530616 sfe_ipv4_connection_match_update_summary_stats(reply_cm, &packet_count, &byte_count);
617 sis->dest_new_packet_count = packet_count;
618 sis->dest_new_byte_count = byte_count;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100619
Matthew McClintockd0cdb802014-02-24 16:30:35 -0600620 sis->src_dev = original_cm->match_dev;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100621 sis->src_packet_count = original_cm->rx_packet_count64;
622 sis->src_byte_count = original_cm->rx_byte_count64;
Matthew McClintockd0cdb802014-02-24 16:30:35 -0600623
624 sis->dest_dev = reply_cm->match_dev;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100625 sis->dest_packet_count = reply_cm->rx_packet_count64;
626 sis->dest_byte_count = reply_cm->rx_byte_count64;
627
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700628 sis->reason = reason;
629
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100630 /*
631 * Get the time increment since our last sync.
632 */
633 sis->delta_jiffies = now_jiffies - c->last_sync_jiffies;
634 c->last_sync_jiffies = now_jiffies;
635}
636
637/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530638 * sfe_ipv4_free_connection_rcu()
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530639 * Called at RCU qs state to free the connection object.
640 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530641static void sfe_ipv4_free_connection_rcu(struct rcu_head *head)
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530642{
643 struct sfe_ipv4_connection *c;
Amitesh Anand63be37d2021-12-24 20:51:48 +0530644 struct udp_sock *up;
645 struct sock *sk;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530646
647 /*
648 * We dont need spin lock as the connection is already removed from link list
649 */
650 c = container_of(head, struct sfe_ipv4_connection, rcu);
651
652 BUG_ON(!c->removed);
653
654 DEBUG_TRACE("%px: connecton has been deleted\n", c);
655
656 /*
Amitesh Anand63be37d2021-12-24 20:51:48 +0530657 * Decrease the refcount taken in function sfe_ipv4_create_rule(),
658 * during call of __udp4_lib_lookup()
659 */
660 up = c->reply_match->up;
661 if (up) {
662 sk = (struct sock *)up;
663 sock_put(sk);
664 }
665
666 /*
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530667 * Release our hold of the source and dest devices and free the memory
668 * for our connection objects.
669 */
670 dev_put(c->original_dev);
671 dev_put(c->reply_dev);
672 kfree(c->original_match);
673 kfree(c->reply_match);
674 kfree(c);
675}
676
677/*
Ken Zhu88c58152021-12-09 15:12:06 -0800678 * sfe_ipv4_sync_status()
679 * update a connection status to its connection manager.
680 *
681 * si: the ipv4 context
682 * c: which connection to be notified
683 * reason: what kind of notification: flush, stats or destroy
684 */
685void sfe_ipv4_sync_status(struct sfe_ipv4 *si,
686 struct sfe_ipv4_connection *c,
687 sfe_sync_reason_t reason)
688{
689 struct sfe_connection_sync sis;
690 u64 now_jiffies;
691 sfe_sync_rule_callback_t sync_rule_callback;
692
693 rcu_read_lock();
694 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
695 if (!sync_rule_callback) {
696 rcu_read_unlock();
697 return;
698 }
699
700 /*
701 * Generate a sync message and then sync.
702 */
703 now_jiffies = get_jiffies_64();
704 sfe_ipv4_gen_sync_connection(si, c, &sis, reason, now_jiffies);
705 sync_rule_callback(&sis);
706
707 rcu_read_unlock();
708}
709
710/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530711 * sfe_ipv4_flush_connection()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100712 * Flush a connection and free all associated resources.
713 *
714 * We need to be called with bottom halves disabled locally as we need to acquire
715 * the connection hash lock and release it again. In general we're actually called
716 * from within a BH and so we're fine, but we're also called when connections are
717 * torn down.
718 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530719void sfe_ipv4_flush_connection(struct sfe_ipv4 *si,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700720 struct sfe_ipv4_connection *c,
721 sfe_sync_reason_t reason)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100722{
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +0530723 BUG_ON(!c->removed);
724
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530725 this_cpu_inc(si->stats_pcpu->connection_flushes64);
Ken Zhu88c58152021-12-09 15:12:06 -0800726 sfe_ipv4_sync_status(si, c, reason);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100727
728 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100729 * Release our hold of the source and dest devices and free the memory
730 * for our connection objects.
731 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530732 call_rcu(&c->rcu, sfe_ipv4_free_connection_rcu);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100733}
734
735/*
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530736 * sfe_ipv4_exception_stats_inc()
737 * Increment exception stats.
738 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530739void sfe_ipv4_exception_stats_inc(struct sfe_ipv4 *si, enum sfe_ipv4_exception_events reason)
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530740{
741 struct sfe_ipv4_stats *stats = this_cpu_ptr(si->stats_pcpu);
742 stats->exception_events64[reason]++;
743 stats->packets_not_forwarded64++;
744}
745
746/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530747 * sfe_ipv4_is_loal_ip()
748 * Returns true if IP is local; returns false otherwise.
749 */
750static bool sfe_ipv4_is_local_ip(struct sfe_ipv4 *si, __be32 ip_addr)
751{
752 struct net_device *dev;
753
754 dev = ip_dev_find(&init_net, ip_addr);
755 if (dev) {
756 dev_put(dev);
757 return true;
758 }
759
760 return false;
761}
762
763/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100764 * sfe_ipv4_recv()
Matthew McClintocka8ad7962014-01-16 16:49:30 -0600765 * Handle packet receives and forwaring.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100766 *
767 * Returns 1 if the packet is forwarded or 0 if it isn't.
768 */
Amitesh Anand63be37d2021-12-24 20:51:48 +0530769int sfe_ipv4_recv(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info, bool tun_outer)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100770{
771 struct sfe_ipv4 *si = &__si;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100772 unsigned int len;
773 unsigned int tot_len;
774 unsigned int frag_off;
775 unsigned int ihl;
Ken Zhu88c58152021-12-09 15:12:06 -0800776 bool sync_on_find;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100777 bool ip_options;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530778 struct iphdr *iph;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700779 u32 protocol;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100780
781 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100782 * Check that we have space for an IP header here.
783 */
784 len = skb->len;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530785 if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr)))) {
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530786 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100787 DEBUG_TRACE("len: %u is too short\n", len);
788 return 0;
789 }
790
791 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530792 * Validate ip csum if necessary. If ip_summed is set to CHECKSUM_UNNECESSARY, it is assumed
793 * that the L3 checksum is validated by the Rx interface or the tunnel interface that has
794 * generated the packet.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100795 */
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530796 iph = (struct iphdr *)skb->data;
Ratheesh Kannoth43d64f82021-10-20 08:23:29 +0530797 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY) && (ip_fast_csum((u8 *)iph, iph->ihl))) {
798 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_HEADER_CSUM_BAD);
799
800 DEBUG_TRACE("Bad IPv4 header csum: 0x%x\n", iph->check);
801 return 0;
802 }
803
804 /*
805 * Check that our "total length" is large enough for an IP header.
806 */
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100807 tot_len = ntohs(iph->tot_len);
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530808 if (unlikely(tot_len < sizeof(struct iphdr))) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100809
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530810 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_BAD_TOTAL_LENGTH);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100811 DEBUG_TRACE("tot_len: %u is too short\n", tot_len);
812 return 0;
813 }
814
815 /*
816 * Is our IP version wrong?
817 */
818 if (unlikely(iph->version != 4)) {
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530819 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_NON_V4);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100820 DEBUG_TRACE("IP version: %u\n", iph->version);
821 return 0;
822 }
823
824 /*
825 * Does our datagram fit inside the skb?
826 */
827 if (unlikely(tot_len > len)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100828 DEBUG_TRACE("tot_len: %u, exceeds len: %u\n", tot_len, len);
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530829 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_DATAGRAM_INCOMPLETE);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100830 return 0;
831 }
832
833 /*
834 * Do we have a non-initial fragment?
Nicolas Costaac2979c2014-01-14 10:35:24 -0600835 */
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100836 frag_off = ntohs(iph->frag_off);
837 if (unlikely(frag_off & IP_OFFSET)) {
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530838 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_NON_INITIAL_FRAGMENT);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100839 DEBUG_TRACE("non-initial fragment\n");
840 return 0;
841 }
842
843 /*
844 * If we have a (first) fragment then mark it to cause any connection to flush.
845 */
Ken Zhu88c58152021-12-09 15:12:06 -0800846 sync_on_find = unlikely(frag_off & IP_MF) ? true : false;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100847
848 /*
849 * Do we have any IP options? That's definite a slow path! If we do have IP
850 * options we need to recheck our header size.
851 */
852 ihl = iph->ihl << 2;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530853 ip_options = unlikely(ihl != sizeof(struct iphdr)) ? true : false;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100854 if (unlikely(ip_options)) {
855 if (unlikely(len < ihl)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100856
857 DEBUG_TRACE("len: %u is too short for header of size: %u\n", len, ihl);
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530858 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_IP_OPTIONS_INCOMPLETE);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100859 return 0;
860 }
861
Ken Zhu88c58152021-12-09 15:12:06 -0800862 sync_on_find = true;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100863 }
864
865 protocol = iph->protocol;
866 if (IPPROTO_UDP == protocol) {
Ken Zhu88c58152021-12-09 15:12:06 -0800867 return sfe_ipv4_recv_udp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100868 }
869
870 if (IPPROTO_TCP == protocol) {
Ken Zhu88c58152021-12-09 15:12:06 -0800871 return sfe_ipv4_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100872 }
873
874 if (IPPROTO_ICMP == protocol) {
875 return sfe_ipv4_recv_icmp(si, skb, dev, len, iph, ihl);
876 }
877
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530878#ifdef SFE_GRE_TUN_ENABLE
879 if (IPPROTO_GRE == protocol) {
Nitin Shetty2114a892022-01-28 20:03:56 +0530880 return sfe_ipv4_recv_gre(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530881 }
882#endif
883
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +0530884 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UNHANDLED_PROTOCOL);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +0100885
886 DEBUG_TRACE("not UDP, TCP or ICMP: %u\n", protocol);
887 return 0;
888}
889
Nicolas Costa436926b2014-01-14 10:36:22 -0600890static void
891sfe_ipv4_update_tcp_state(struct sfe_ipv4_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530892 struct sfe_ipv4_rule_create_msg *msg)
Nicolas Costa436926b2014-01-14 10:36:22 -0600893{
894 struct sfe_ipv4_connection_match *orig_cm;
895 struct sfe_ipv4_connection_match *repl_cm;
896 struct sfe_ipv4_tcp_connection_match *orig_tcp;
897 struct sfe_ipv4_tcp_connection_match *repl_tcp;
898
899 orig_cm = c->original_match;
900 repl_cm = c->reply_match;
901 orig_tcp = &orig_cm->protocol_state.tcp;
902 repl_tcp = &repl_cm->protocol_state.tcp;
903
904 /* update orig */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530905 if (orig_tcp->max_win < msg->tcp_rule.flow_max_window) {
906 orig_tcp->max_win = msg->tcp_rule.flow_max_window;
Nicolas Costa436926b2014-01-14 10:36:22 -0600907 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530908 if ((s32)(orig_tcp->end - msg->tcp_rule.flow_end) < 0) {
909 orig_tcp->end = msg->tcp_rule.flow_end;
Nicolas Costa436926b2014-01-14 10:36:22 -0600910 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530911 if ((s32)(orig_tcp->max_end - msg->tcp_rule.flow_max_end) < 0) {
912 orig_tcp->max_end = msg->tcp_rule.flow_max_end;
Nicolas Costa436926b2014-01-14 10:36:22 -0600913 }
914
915 /* update reply */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530916 if (repl_tcp->max_win < msg->tcp_rule.return_max_window) {
917 repl_tcp->max_win = msg->tcp_rule.return_max_window;
Nicolas Costa436926b2014-01-14 10:36:22 -0600918 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530919 if ((s32)(repl_tcp->end - msg->tcp_rule.return_end) < 0) {
920 repl_tcp->end = msg->tcp_rule.return_end;
Nicolas Costa436926b2014-01-14 10:36:22 -0600921 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530922 if ((s32)(repl_tcp->max_end - msg->tcp_rule.return_max_end) < 0) {
923 repl_tcp->max_end = msg->tcp_rule.return_max_end;
Nicolas Costa436926b2014-01-14 10:36:22 -0600924 }
925
926 /* update match flags */
927 orig_cm->flags &= ~SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
928 repl_cm->flags &= ~SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530929 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
930
Nicolas Costa436926b2014-01-14 10:36:22 -0600931 orig_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
932 repl_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
933 }
934}
935
936static void
937sfe_ipv4_update_protocol_state(struct sfe_ipv4_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530938 struct sfe_ipv4_rule_create_msg *msg)
Nicolas Costa436926b2014-01-14 10:36:22 -0600939{
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530940 switch (msg->tuple.protocol) {
Nicolas Costa436926b2014-01-14 10:36:22 -0600941 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530942 sfe_ipv4_update_tcp_state(c, msg);
Nicolas Costa436926b2014-01-14 10:36:22 -0600943 break;
944 }
945}
946
Wayne Tanbb7f1782021-12-13 11:16:04 -0800947/*
948 * sfe_ipv4_match_entry_set_vlan()
949 */
950static void sfe_ipv4_match_entry_set_vlan(
951 struct sfe_ipv4_connection_match *cm,
952 u32 primary_ingress_vlan_tag,
953 u32 primary_egress_vlan_tag,
954 u32 secondary_ingress_vlan_tag,
955 u32 secondary_egress_vlan_tag)
956{
957 u16 tpid;
958 /*
959 * Prevent stacking header counts when updating.
960 */
961 cm->ingress_vlan_hdr_cnt = 0;
962 cm->egress_vlan_hdr_cnt = 0;
963 memset(cm->ingress_vlan_hdr, 0, sizeof(cm->ingress_vlan_hdr));
964 memset(cm->egress_vlan_hdr, 0, sizeof(cm->egress_vlan_hdr));
965
966 /*
967 * vlan_hdr[0] corresponds to outer tag
968 * vlan_hdr[1] corresponds to inner tag
969 * Extract the vlan information (tpid and tci) from rule message
970 */
971 if ((primary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
972 tpid = (u16)(primary_ingress_vlan_tag >> 16);
973 cm->ingress_vlan_hdr[0].tpid = ntohs(tpid);
974 cm->ingress_vlan_hdr[0].tci = (u16)primary_ingress_vlan_tag;
975 cm->ingress_vlan_hdr_cnt++;
976 }
977
978 if ((secondary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
979 tpid = (u16)(secondary_ingress_vlan_tag >> 16);
980 cm->ingress_vlan_hdr[1].tpid = ntohs(tpid);
981 cm->ingress_vlan_hdr[1].tci = (u16)secondary_ingress_vlan_tag;
982 cm->ingress_vlan_hdr_cnt++;
983 }
984
985 if ((primary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
986 tpid = (u16)(primary_egress_vlan_tag >> 16);
987 cm->egress_vlan_hdr[0].tpid = ntohs(tpid);
988 cm->egress_vlan_hdr[0].tci = (u16)primary_egress_vlan_tag;
989 cm->egress_vlan_hdr_cnt++;
990 }
991
992 if ((secondary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
993 tpid = (u16)(secondary_egress_vlan_tag >> 16);
994 cm->egress_vlan_hdr[1].tpid = ntohs(tpid);
995 cm->egress_vlan_hdr[1].tci = (u16)secondary_egress_vlan_tag;
996 cm->egress_vlan_hdr_cnt++;
997 }
998}
999
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301000void sfe_ipv4_update_rule(struct sfe_ipv4_rule_create_msg *msg)
Nicolas Costa436926b2014-01-14 10:36:22 -06001001{
1002 struct sfe_ipv4_connection *c;
1003 struct sfe_ipv4 *si = &__si;
1004
1005 spin_lock_bh(&si->lock);
1006
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301007 c = sfe_ipv4_find_connection(si,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301008 msg->tuple.protocol,
1009 msg->tuple.flow_ip,
1010 msg->tuple.flow_ident,
1011 msg->tuple.return_ip,
1012 msg->tuple.return_ident);
Nicolas Costa436926b2014-01-14 10:36:22 -06001013 if (c != NULL) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301014 sfe_ipv4_update_protocol_state(c, msg);
Nicolas Costa436926b2014-01-14 10:36:22 -06001015 }
1016
1017 spin_unlock_bh(&si->lock);
1018}
1019
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001020/*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301021 * sfe_ipv4_xmit_eth_type_check()
1022 * Checking if MAC header has to be written.
1023 */
1024static inline bool sfe_ipv4_xmit_eth_type_check(struct net_device *dev, u32 cm_flags)
1025{
1026 if (!(dev->flags & IFF_NOARP)) {
1027 return true;
1028 }
1029
1030 /*
1031 * For PPPoE, since we are now supporting PPPoE encapsulation, we are writing L2 header.
1032 */
1033 if (unlikely(cm_flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
1034 return true;
1035 }
1036
1037 return false;
1038}
1039
1040/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001041 * sfe_ipv4_create_rule()
1042 * Create a forwarding rule.
1043 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301044int sfe_ipv4_create_rule(struct sfe_ipv4_rule_create_msg *msg)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001045{
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001046 struct sfe_ipv4 *si = &__si;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301047 struct sfe_ipv4_connection *c, *c_old;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001048 struct sfe_ipv4_connection_match *original_cm;
1049 struct sfe_ipv4_connection_match *reply_cm;
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001050 struct net_device *dest_dev;
1051 struct net_device *src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301052 struct sfe_ipv4_5tuple *tuple = &msg->tuple;
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301053 s32 flow_interface_num = msg->conn_rule.flow_top_interface_num;
1054 s32 return_interface_num = msg->conn_rule.return_top_interface_num;
Amitesh Anand63be37d2021-12-24 20:51:48 +05301055 struct net *net;
1056 struct sock *sk;
1057 unsigned int src_if_idx;
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001058
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301059 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) {
1060 flow_interface_num = msg->conn_rule.flow_interface_num;
1061 }
1062
1063 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) {
1064 return_interface_num = msg->conn_rule.return_interface_num;
1065 }
1066
1067 src_dev = dev_get_by_index(&init_net, flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301068 if (!src_dev) {
1069 DEBUG_WARN("%px: Unable to find src_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301070 flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301071 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1072 return -EINVAL;
1073 }
1074
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301075 dest_dev = dev_get_by_index(&init_net, return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301076 if (!dest_dev) {
1077 DEBUG_WARN("%px: Unable to find dest_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301078 return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301079 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1080 dev_put(src_dev);
1081 return -EINVAL;
1082 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001083
Matthew McClintock389b42a2014-09-24 14:05:51 -05001084 if (unlikely((dest_dev->reg_state != NETREG_REGISTERED) ||
1085 (src_dev->reg_state != NETREG_REGISTERED))) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301086 dev_put(src_dev);
1087 dev_put(dest_dev);
1088 DEBUG_WARN("%px: src_dev=%s and dest_dev=%s are unregistered\n", msg,
1089 src_dev->name, dest_dev->name);
1090 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Matthew McClintock389b42a2014-09-24 14:05:51 -05001091 return -EINVAL;
1092 }
1093
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301094 /*
1095 * Allocate the various connection tracking objects.
1096 */
1097 c = (struct sfe_ipv4_connection *)kmalloc(sizeof(struct sfe_ipv4_connection), GFP_ATOMIC);
1098 if (unlikely(!c)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301099 DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
1100 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1101 dev_put(src_dev);
1102 dev_put(dest_dev);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301103 return -ENOMEM;
1104 }
1105
1106 original_cm = (struct sfe_ipv4_connection_match *)kmalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
1107 if (unlikely(!original_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301108 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
1109 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301110 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301111 dev_put(src_dev);
1112 dev_put(dest_dev);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301113 return -ENOMEM;
1114 }
1115
1116 reply_cm = (struct sfe_ipv4_connection_match *)kmalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
1117 if (unlikely(!reply_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301118 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
1119 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301120 kfree(original_cm);
1121 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301122 dev_put(src_dev);
1123 dev_put(dest_dev);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301124 return -ENOMEM;
1125 }
1126
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05301127 this_cpu_inc(si->stats_pcpu->connection_create_requests64);
1128
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001129 spin_lock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001130
1131 /*
Nicolas Costa436926b2014-01-14 10:36:22 -06001132 * Check to see if there is already a flow that matches the rule we're
1133 * trying to create. If there is then we can't create a new one.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001134 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301135 c_old = sfe_ipv4_find_connection(si,
Wayne Tanbb7f1782021-12-13 11:16:04 -08001136 msg->tuple.protocol,
1137 msg->tuple.flow_ip,
1138 msg->tuple.flow_ident,
1139 msg->tuple.return_ip,
1140 msg->tuple.return_ident);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301141
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301142 if (c_old != NULL) {
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05301143 this_cpu_inc(si->stats_pcpu->connection_create_collisions64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001144
1145 /*
Nicolas Costa436926b2014-01-14 10:36:22 -06001146 * If we already have the flow then it's likely that this
1147 * request to create the connection rule contains more
1148 * up-to-date information. Check and update accordingly.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001149 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301150 sfe_ipv4_update_protocol_state(c, msg);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001151 spin_unlock_bh(&si->lock);
1152
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301153 kfree(reply_cm);
1154 kfree(original_cm);
1155 kfree(c);
1156
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301157 dev_put(src_dev);
1158 dev_put(dest_dev);
1159
Amitesh Anand63be37d2021-12-24 20:51:48 +05301160 DEBUG_TRACE("%px: connection already exists - p:%d\n"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301161 " s: %s:%pM:%pI4:%u, d: %s:%pM:%pI4:%u\n",
Amitesh Anand63be37d2021-12-24 20:51:48 +05301162 msg, tuple->protocol,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301163 src_dev->name, msg->conn_rule.flow_mac, &tuple->flow_ip, ntohs(tuple->flow_ident),
1164 dest_dev->name, msg->conn_rule.return_mac, &tuple->return_ip, ntohs(tuple->return_ident));
1165
Nicolas Costa514fde02014-01-13 15:50:29 -06001166 return -EADDRINUSE;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001167 }
1168
1169 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001170 * Fill in the "original" direction connection matching object.
1171 * Note that the transmit MAC address is "dest_mac_xlate" because
1172 * we always know both ends of a connection by their translated
1173 * addresses and not their public addresses.
1174 */
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001175 original_cm->match_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301176 original_cm->match_protocol = tuple->protocol;
1177 original_cm->match_src_ip = tuple->flow_ip;
Suruchi Suman66609a72022-01-20 02:34:25 +05301178 original_cm->match_src_port = netif_is_vxlan(src_dev) ? 0 : tuple->flow_ident;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301179 original_cm->match_dest_ip = tuple->return_ip;
1180 original_cm->match_dest_port = tuple->return_ident;
1181
1182 original_cm->xlate_src_ip = msg->conn_rule.flow_ip_xlate;
1183 original_cm->xlate_src_port = msg->conn_rule.flow_ident_xlate;
1184 original_cm->xlate_dest_ip = msg->conn_rule.return_ip_xlate;
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301185 original_cm->xlate_dest_port = msg->conn_rule.return_ident_xlate;
1186
1187 if (tuple->protocol == IPPROTO_GRE) {
1188 /*
1189 * the PPTP is 4 tuple lookup.
1190 * During th rule lookup destination call id from packet
1191 * is matched against destination port in cm.
1192 */
1193 original_cm->match_src_port = 0;
1194 original_cm->xlate_src_port = 0;
1195 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001196
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301197 atomic_set(&original_cm->rx_packet_count, 0);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001198 original_cm->rx_packet_count64 = 0;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301199 atomic_set(&original_cm->rx_byte_count, 0);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001200 original_cm->rx_byte_count64 = 0;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301201
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001202 original_cm->xmit_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301203 original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
1204
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001205 original_cm->connection = c;
1206 original_cm->counter_match = reply_cm;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001207 original_cm->l2_hdr_size = 0;
1208 original_cm->flags = 0;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301209
Amitesh Anand63be37d2021-12-24 20:51:48 +05301210 /*
1211 * UDP Socket is valid only in decap direction.
1212 */
1213 RCU_INIT_POINTER(original_cm->up, NULL);
1214
Ken Zhu37040ea2021-09-09 21:11:15 -07001215 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1216 original_cm->mark = msg->mark_rule.flow_mark;
1217 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_MARK;
1218 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301219 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1220 original_cm->priority = msg->qos_rule.flow_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001221 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1222 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301223 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1224 original_cm->dscp = msg->dscp_rule.flow_dscp << SFE_IPV4_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001225 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1226 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301227 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1228 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1229 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001230 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST) {
1231 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1232 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301233
Wayne Tanbb7f1782021-12-13 11:16:04 -08001234 /*
1235 * Add VLAN rule to original_cm
1236 */
1237 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1238 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1239 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1240 sfe_ipv4_match_entry_set_vlan(original_cm,
1241 vlan_primary_rule->ingress_vlan_tag,
1242 vlan_primary_rule->egress_vlan_tag,
1243 vlan_secondary_rule->ingress_vlan_tag,
1244 vlan_secondary_rule->egress_vlan_tag);
1245
1246 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) &&
1247 original_cm->egress_vlan_hdr_cnt > 0) {
1248 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1249 original_cm->l2_hdr_size += original_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1250 }
1251 }
1252
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301253 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv4_is_local_ip(si, original_cm->match_dest_ip)) {
1254 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1255 }
1256
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08001257#ifdef CONFIG_NF_FLOW_COOKIE
1258 original_cm->flow_cookie = 0;
1259#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001260#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301261 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1262 original_cm->flow_accel = msg->direction_rule.flow_accel;
1263 } else {
1264 original_cm->flow_accel = 1;
1265 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001266#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301267 /*
1268 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1269 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1270 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1271 * are sending directly to the destination interface that supports it.
1272 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301273 if (likely(dest_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(dest_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301274 if ((msg->conn_rule.return_top_interface_num == msg->conn_rule.return_interface_num) ||
1275 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301276
1277 /*
1278 * Dont enable CSUM offload
1279 */
1280#if 0
Suruchi Sumanf2077182022-01-13 21:35:23 +05301281 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301282#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301283 }
1284 }
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001285
Wayne Tanbb7f1782021-12-13 11:16:04 -08001286 reply_cm->l2_hdr_size = 0;
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301287 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1288 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1289 }
1290
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301291 reply_cm->flags = 0;
1292
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001293 /*
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301294 * Adding PPPoE parameters to original and reply entries based on the direction where
1295 * PPPoE header is valid in ECM rule.
1296 *
1297 * If PPPoE is valid in flow direction (from interface is PPPoE), then
1298 * original cm will have PPPoE at ingress (strip PPPoE header)
1299 * reply cm will have PPPoE at egress (add PPPoE header)
1300 *
1301 * If PPPoE is valid in return direction (to interface is PPPoE), then
1302 * original cm will have PPPoE at egress (add PPPoE header)
1303 * reply cm will have PPPoE at ingress (strip PPPoE header)
1304 */
1305 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_DECAP_VALID) {
1306 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1307 original_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1308 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1309
1310 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001311 reply_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301312 reply_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1313 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1314 }
1315
1316 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_ENCAP_VALID) {
1317 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001318 original_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301319 original_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1320 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1321
1322 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1323 reply_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1324 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1325 }
1326
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301327 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1328 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1329 }
1330
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301331 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001332 * For the non-arp interface, we don't write L2 HDR.
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001333 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301334 if (sfe_ipv4_xmit_eth_type_check(dest_dev, original_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301335
1336 /*
1337 * Check whether the rule has configured a specific source MAC address to use.
1338 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1339 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301340
1341 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1342 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->conn_rule.flow_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301343 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301344 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1345 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_RETURN_VALID)) {
1346 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.return_src_mac);
1347 } else {
1348 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)dest_dev->dev_addr);
1349 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301350 }
1351
1352 ether_addr_copy((u8 *)original_cm->xmit_dest_mac, (u8 *)msg->conn_rule.return_mac);
1353
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001354 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001355 original_cm->l2_hdr_size += ETH_HLEN;
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001356
1357 /*
1358 * If our dev writes Ethernet headers then we can write a really fast
1359 * version.
1360 */
1361 if (dest_dev->header_ops) {
1362 if (dest_dev->header_ops->create == eth_header) {
1363 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1364 }
1365 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001366 }
1367
1368 /*
1369 * Fill in the "reply" direction connection matching object.
1370 */
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001371 reply_cm->match_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301372 reply_cm->match_protocol = tuple->protocol;
1373 reply_cm->match_src_ip = msg->conn_rule.return_ip_xlate;
Amitesh Anand63be37d2021-12-24 20:51:48 +05301374
1375 /*
1376 * Keep source port as 0 for VxLAN tunnels.
1377 */
1378 if (netif_is_vxlan(src_dev) || netif_is_vxlan(dest_dev)) {
1379 reply_cm->match_src_port = 0;
1380 } else {
1381 reply_cm->match_src_port = msg->conn_rule.return_ident_xlate;
1382 }
1383
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301384 reply_cm->match_dest_ip = msg->conn_rule.flow_ip_xlate;
1385 reply_cm->match_dest_port = msg->conn_rule.flow_ident_xlate;
1386
1387 reply_cm->xlate_src_ip = tuple->return_ip;
1388 reply_cm->xlate_src_port = tuple->return_ident;
1389 reply_cm->xlate_dest_ip = tuple->flow_ip;
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301390 reply_cm->xlate_dest_port = tuple->flow_ident;
1391
1392 if (tuple->protocol == IPPROTO_GRE) {
1393 /*
1394 * the PPTP is 4 tuple lookup.
1395 * During th rule lookup destination call id from packet
1396 * is matched against destination port in cm.
1397 */
1398 reply_cm->match_src_port = 0;
1399 reply_cm->xlate_src_port = 0;
1400 }
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301401
1402 atomic_set(&reply_cm->rx_packet_count, 0);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001403 reply_cm->rx_packet_count64 = 0;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301404 atomic_set(&reply_cm->rx_byte_count, 0);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001405 reply_cm->rx_byte_count64 = 0;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301406
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001407 reply_cm->xmit_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301408 reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301409
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001410 reply_cm->connection = c;
1411 reply_cm->counter_match = original_cm;
Ken Zhu37040ea2021-09-09 21:11:15 -07001412
Ken Zhu37040ea2021-09-09 21:11:15 -07001413 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1414 reply_cm->mark = msg->mark_rule.return_mark;
1415 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_MARK;
1416 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301417 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1418 reply_cm->priority = msg->qos_rule.return_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001419 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1420 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001421
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301422 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1423 reply_cm->dscp = msg->dscp_rule.return_dscp << SFE_IPV4_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001424 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1425 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301426 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1427 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1428 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001429 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST) {
1430 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1431 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301432
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301433 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv4_is_local_ip(si, reply_cm->match_dest_ip)) {
1434 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1435 }
1436
Amitesh Anand63be37d2021-12-24 20:51:48 +05301437 /*
1438 * Setup UDP Socket if found to be valid for decap.
1439 */
1440 RCU_INIT_POINTER(reply_cm->up, NULL);
1441 net = dev_net(reply_cm->match_dev);
1442 src_if_idx = src_dev->ifindex;
1443
1444 rcu_read_lock();
1445
1446 /*
1447 * Look for the associated sock object.
1448 * __udp4_lib_lookup() holds a reference for this sock object,
1449 * which will be released in sfe_ipv4_free_connection_rcu()
1450 */
1451#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1452 sk = __udp4_lib_lookup(net, reply_cm->match_dest_ip, reply_cm->match_dest_port,
1453 reply_cm->xlate_src_ip, reply_cm->xlate_src_port, src_if_idx, &udp_table);
1454#else
1455 sk = __udp4_lib_lookup(net, reply_cm->match_dest_ip, reply_cm->match_dest_port,
1456 reply_cm->xlate_src_ip, reply_cm->xlate_src_port, src_if_idx, 0, &udp_table, NULL);
1457#endif
1458
1459 rcu_read_unlock();
1460
1461 /*
1462 * We set the UDP sock pointer as valid only for decap direction.
1463 */
1464 if (sk && udp_sk(sk)->encap_type) {
1465#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1466 if (!atomic_add_unless(&sk->sk_refcnt, 1, 0)) {
1467#else
1468 if (!refcount_inc_not_zero(&sk->sk_refcnt)) {
1469#endif
Wayne Tanbb7f1782021-12-13 11:16:04 -08001470 spin_unlock_bh(&si->lock);
Amitesh Anand63be37d2021-12-24 20:51:48 +05301471 kfree(reply_cm);
1472 kfree(original_cm);
1473 kfree(c);
1474
1475 DEBUG_TRACE("%px: sfe: unable to take reference for socket(%px) p:%d\n"
1476 " s: %s:%pM:%pI4:%u, d: %s:%pM:%pI4:%u\n",
1477 msg, sk, tuple->protocol,
1478 src_dev->name, msg->conn_rule.flow_mac, &tuple->flow_ip, ntohs(tuple->flow_ident),
1479 dest_dev->name, msg->conn_rule.return_mac, &tuple->return_ip, ntohs(tuple->return_ident));
1480
1481 dev_put(src_dev);
1482 dev_put(dest_dev);
1483
1484 return -ESHUTDOWN;
1485 }
1486
1487 rcu_assign_pointer(reply_cm->up, udp_sk(sk));
1488
1489 DEBUG_INFO("%px: Sock(%px) lookup success with reply_cm direction\n", msg, sk);
1490 DEBUG_INFO("%px: SFE connection -\n"
1491 " s: %s:%pI4(%pI4):%u(%u)\n"
1492 " d: %s:%pI4(%pI4):%u(%u)\n",
1493 msg, reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1494 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1495 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1496 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1497 }
1498
Wayne Tanbb7f1782021-12-13 11:16:04 -08001499 /*
1500 * Add VLAN rule to reply_cm
1501 */
1502 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1503 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1504 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1505 sfe_ipv4_match_entry_set_vlan(reply_cm,
1506 vlan_primary_rule->egress_vlan_tag,
1507 vlan_primary_rule->ingress_vlan_tag,
1508 vlan_secondary_rule->egress_vlan_tag,
1509 vlan_secondary_rule->ingress_vlan_tag);
1510
1511 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) &&
1512 reply_cm->egress_vlan_hdr_cnt > 0) {
1513 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1514 reply_cm->l2_hdr_size += reply_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1515 }
1516 }
1517
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301518 /*
1519 * the net_protocol handler will be used only in decap path
1520 * for non passthrough case.
1521 */
1522 original_cm->proto = NULL;
1523 reply_cm->proto = NULL;
1524
1525#ifdef SFE_GRE_TUN_ENABLE
1526 if ((IPPROTO_GRE == tuple->protocol) && !(reply_cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
1527 rcu_read_lock();
1528 reply_cm->proto = rcu_dereference(inet_protos[IPPROTO_GRE]);
1529 rcu_read_unlock();
1530
1531 if (unlikely(!reply_cm->proto)) {
1532 kfree(reply_cm);
1533 kfree(original_cm);
1534 kfree(c);
1535 dev_put(src_dev);
1536 dev_put(dest_dev);
1537 DEBUG_WARN("sfe: GRE proto handler is not registered\n");
1538 return -EPERM;
1539 }
1540 }
1541#endif
1542
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08001543#ifdef CONFIG_NF_FLOW_COOKIE
1544 reply_cm->flow_cookie = 0;
1545#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001546#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301547 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1548 reply_cm->flow_accel = msg->direction_rule.return_accel;
1549 } else {
1550 reply_cm->flow_accel = 1;
1551 }
1552
Zhi Chen8748eb32015-06-18 12:58:48 -07001553#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301554 /*
1555 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1556 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1557 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1558 * are sending directly to the destination interface that supports it.
1559 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301560 if (likely(src_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(src_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301561 if ((msg->conn_rule.flow_top_interface_num == msg->conn_rule.flow_interface_num) ||
1562 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301563 /*
1564 * Dont enable CSUM offload
1565 */
1566#if 0
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301567 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301568#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301569 }
1570 }
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001571
1572 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001573 * For the non-arp interface, we don't write L2 HDR.
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001574 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301575 if (sfe_ipv4_xmit_eth_type_check(src_dev, reply_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301576
1577 /*
1578 * Check whether the rule has configured a specific source MAC address to use.
1579 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1580 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301581
1582 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1583 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->conn_rule.return_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301584 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301585 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1586 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_FLOW_VALID)) {
1587 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.flow_src_mac);
1588 } else {
1589 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)src_dev->dev_addr);
1590 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301591 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301592
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301593 ether_addr_copy((u8 *)reply_cm->xmit_dest_mac, (u8 *)msg->conn_rule.flow_mac);
1594
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001595 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001596 reply_cm->l2_hdr_size += ETH_HLEN;
Matthew McClintockdb5ac512014-01-16 17:01:40 -06001597
1598 /*
1599 * If our dev writes Ethernet headers then we can write a really fast
1600 * version.
1601 */
1602 if (src_dev->header_ops) {
1603 if (src_dev->header_ops->create == eth_header) {
1604 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1605 }
1606 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001607 }
1608
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301609 if ((tuple->return_ip != msg->conn_rule.return_ip_xlate) ||
1610 (tuple->return_ident != msg->conn_rule.return_ident_xlate)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001611 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST;
1612 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC;
1613 }
1614
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301615 if ((tuple->flow_ip != msg->conn_rule.flow_ip_xlate) ||
1616 (tuple->flow_ident != msg->conn_rule.flow_ident_xlate)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001617 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC;
1618 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST;
1619 }
1620
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001621 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001622 * Initialize the protocol-specific information that we track.
1623 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301624 switch (tuple->protocol) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001625 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301626 original_cm->protocol_state.tcp.win_scale = msg->tcp_rule.flow_window_scale;
1627 original_cm->protocol_state.tcp.max_win = msg->tcp_rule.flow_max_window ? msg->tcp_rule.flow_max_window : 1;
1628 original_cm->protocol_state.tcp.end = msg->tcp_rule.flow_end;
1629 original_cm->protocol_state.tcp.max_end = msg->tcp_rule.flow_max_end;
1630
1631 reply_cm->protocol_state.tcp.win_scale = msg->tcp_rule.return_window_scale;
1632 reply_cm->protocol_state.tcp.max_win = msg->tcp_rule.return_max_window ? msg->tcp_rule.return_max_window : 1;
1633 reply_cm->protocol_state.tcp.end = msg->tcp_rule.return_end;
1634 reply_cm->protocol_state.tcp.max_end = msg->tcp_rule.return_max_end;
1635
1636 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001637 original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1638 reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1639 }
1640 break;
1641 }
1642
Wayne Tanbb7f1782021-12-13 11:16:04 -08001643 /*
1644 * Fill in the ipv4_connection object.
1645 */
1646 c->protocol = tuple->protocol;
1647 c->src_ip = tuple->flow_ip;
1648 c->src_ip_xlate = msg->conn_rule.flow_ip_xlate;
1649 c->src_port = tuple->flow_ident;
1650 c->src_port_xlate = msg->conn_rule.flow_ident_xlate;
1651 c->original_dev = src_dev;
1652 c->original_match = original_cm;
1653 c->dest_ip = tuple->return_ip;
1654 c->dest_ip_xlate = msg->conn_rule.return_ip_xlate;
1655 c->dest_port = tuple->return_ident;
1656 c->dest_port_xlate = msg->conn_rule.return_ident_xlate;
1657 c->reply_dev = dest_dev;
1658 c->reply_match = reply_cm;
1659 c->debug_read_seq = 0;
1660 c->last_sync_jiffies = get_jiffies_64();
1661 c->removed = false;
1662
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001663 sfe_ipv4_connection_match_compute_translations(original_cm);
1664 sfe_ipv4_connection_match_compute_translations(reply_cm);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301665 sfe_ipv4_insert_connection(si, c);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001666
1667 spin_unlock_bh(&si->lock);
1668
1669 /*
1670 * We have everything we need!
1671 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001672 DEBUG_INFO("NEW connection - p: %d\n"
1673 "original_cm: match_dev=src_dev: %s %d %pM\n"
1674 " xmit_dev=dest_dev: %s %d %pM\n"
1675 " xmit_src_mac: %pM\n"
1676 " xmit_dest_mac: %pM\n"
1677 " flags: %x l2_hdr: %u\n"
1678 "flow_ip: %pI4:%u\n"
1679 "flow_ip_xlate: %pI4:%u\n"
1680 "flow_mac: %pM\n"
1681 "reply_cm: match_dev=dest_dev: %s %d %pM\n"
1682 " xmit_dev=src_dev: %s %d %pM\n"
1683 " xmit_src_mac: %pM\n"
1684 " xmit_dest_mac: %pM\n"
1685 " flags: %x l2_hdr: %u\n"
1686 "return_ip: %pI4:%u\n"
1687 "return_ip_xlate: %pI4:%u\n"
1688 "return_mac: %pM\n"
1689 "flags: valid=%x src_mac_valid=%x\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301690 tuple->protocol,
Wayne Tanbb7f1782021-12-13 11:16:04 -08001691 original_cm->match_dev->name, original_cm->match_dev->ifindex, original_cm->match_dev->dev_addr,
1692 original_cm->xmit_dev->name, original_cm->xmit_dev->ifindex, original_cm->xmit_dev->dev_addr,
1693 original_cm->xmit_src_mac, original_cm->xmit_dest_mac, original_cm->flags, original_cm->l2_hdr_size,
1694 &tuple->flow_ip, ntohs(tuple->flow_ident),
1695 &msg->conn_rule.flow_ip_xlate, ntohs(msg->conn_rule.flow_ident_xlate),
1696 msg->conn_rule.flow_mac,
1697 reply_cm->match_dev->name, reply_cm->match_dev->ifindex, reply_cm->match_dev->dev_addr,
1698 reply_cm->xmit_dev->name, reply_cm->xmit_dev->ifindex, reply_cm->xmit_dev->dev_addr,
1699 reply_cm->xmit_src_mac, reply_cm->xmit_dest_mac, reply_cm->flags, reply_cm->l2_hdr_size,
1700 &tuple->return_ip, ntohs(tuple->return_ident),
1701 &msg->conn_rule.return_ip_xlate, ntohs(msg->conn_rule.return_ident_xlate),
1702 msg->conn_rule.return_mac,
1703 msg->valid_flags, msg->src_mac_rule.mac_valid_flags);
Nicolas Costa514fde02014-01-13 15:50:29 -06001704
1705 return 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001706}
1707
1708/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001709 * sfe_ipv4_destroy_rule()
1710 * Destroy a forwarding rule.
1711 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301712void sfe_ipv4_destroy_rule(struct sfe_ipv4_rule_destroy_msg *msg)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001713{
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001714 struct sfe_ipv4 *si = &__si;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001715 struct sfe_ipv4_connection *c;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301716 bool ret;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301717 struct sfe_ipv4_5tuple *tuple = &msg->tuple;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001718
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05301719 this_cpu_inc(si->stats_pcpu->connection_destroy_requests64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001720 spin_lock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001721
1722 /*
1723 * Check to see if we have a flow that matches the rule we're trying
1724 * to destroy. If there isn't then we can't destroy it.
1725 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301726 c = sfe_ipv4_find_connection(si, tuple->protocol, tuple->flow_ip, tuple->flow_ident,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301727 tuple->return_ip, tuple->return_ident);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001728 if (!c) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001729 spin_unlock_bh(&si->lock);
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05301730 this_cpu_inc(si->stats_pcpu->connection_destroy_misses64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001731
1732 DEBUG_TRACE("connection does not exist - p: %d, s: %pI4:%u, d: %pI4:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301733 tuple->protocol, &tuple->flow_ip, ntohs(tuple->flow_ident),
1734 &tuple->return_ip, ntohs(tuple->return_ident));
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001735 return;
1736 }
1737
1738 /*
1739 * Remove our connection details from the hash tables.
1740 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301741 ret = sfe_ipv4_remove_connection(si, c);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001742 spin_unlock_bh(&si->lock);
1743
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301744 if (ret) {
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301745 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301746 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001747
1748 DEBUG_INFO("connection destroyed - p: %d, s: %pI4:%u, d: %pI4:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301749 tuple->protocol, &tuple->flow_ip, ntohs(tuple->flow_ident),
1750 &tuple->return_ip, ntohs(tuple->return_ident));
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001751}
1752
1753/*
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001754 * sfe_ipv4_register_sync_rule_callback()
1755 * Register a callback for rule synchronization.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001756 */
Xiaoping Fand44a5b42015-05-26 17:37:37 -07001757void sfe_ipv4_register_sync_rule_callback(sfe_sync_rule_callback_t sync_rule_callback)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001758{
1759 struct sfe_ipv4 *si = &__si;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001760
1761 spin_lock_bh(&si->lock);
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001762 rcu_assign_pointer(si->sync_rule_callback, sync_rule_callback);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001763 spin_unlock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001764}
1765
1766/*
1767 * sfe_ipv4_get_debug_dev()
1768 */
1769static ssize_t sfe_ipv4_get_debug_dev(struct device *dev,
1770 struct device_attribute *attr,
1771 char *buf)
1772{
1773 struct sfe_ipv4 *si = &__si;
1774 ssize_t count;
1775 int num;
1776
1777 spin_lock_bh(&si->lock);
1778 num = si->debug_dev;
1779 spin_unlock_bh(&si->lock);
1780
1781 count = snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", num);
1782 return count;
1783}
1784
1785/*
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001786 * sysfs attributes.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001787 */
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001788static const struct device_attribute sfe_ipv4_debug_dev_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -08001789 __ATTR(debug_dev, S_IWUSR | S_IRUGO, sfe_ipv4_get_debug_dev, NULL);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001790
1791/*
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001792 * sfe_ipv4_destroy_all_rules_for_dev()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001793 * Destroy all connections that match a particular device.
1794 *
1795 * If we pass dev as NULL then this destroys all connections.
1796 */
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001797void sfe_ipv4_destroy_all_rules_for_dev(struct net_device *dev)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001798{
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001799 struct sfe_ipv4 *si = &__si;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001800 struct sfe_ipv4_connection *c;
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301801 bool ret;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001802
Xiaoping Fan34586472015-07-03 02:20:35 -07001803another_round:
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001804 spin_lock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001805
Xiaoping Fan34586472015-07-03 02:20:35 -07001806 for (c = si->all_connections_head; c; c = c->all_connections_next) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001807 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07001808 * Does this connection relate to the device we are destroying?
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001809 */
1810 if (!dev
1811 || (dev == c->original_dev)
1812 || (dev == c->reply_dev)) {
Xiaoping Fan34586472015-07-03 02:20:35 -07001813 break;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001814 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001815 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001816
Xiaoping Fan34586472015-07-03 02:20:35 -07001817 if (c) {
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301818 ret = sfe_ipv4_remove_connection(si, c);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001819 }
1820
1821 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001822
1823 if (c) {
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301824 if (ret) {
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301825 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05301826 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001827 goto another_round;
1828 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001829}
1830
1831/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001832 * sfe_ipv4_periodic_sync()
1833 */
Ken Zhu137722d2021-09-23 17:57:36 -07001834static void sfe_ipv4_periodic_sync(struct work_struct *work)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001835{
Ken Zhu137722d2021-09-23 17:57:36 -07001836 struct sfe_ipv4 *si = container_of((struct delayed_work *)work, struct sfe_ipv4, sync_dwork);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001837 u64 now_jiffies;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001838 int quota;
Xiaoping Fand44a5b42015-05-26 17:37:37 -07001839 sfe_sync_rule_callback_t sync_rule_callback;
Ken Zhudc423672021-09-02 18:27:01 -07001840 struct sfe_ipv4_connection *c;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001841
1842 now_jiffies = get_jiffies_64();
1843
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001844 rcu_read_lock();
1845 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
1846 if (!sync_rule_callback) {
1847 rcu_read_unlock();
1848 goto done;
1849 }
1850
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001851 spin_lock_bh(&si->lock);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001852
1853 /*
Ken Zhudc423672021-09-02 18:27:01 -07001854 * If we have reached the end of the connection list, walk from
1855 * the connection head.
1856 */
1857 c = si->wc_next;
1858 if (unlikely(!c)) {
1859 c = si->all_connections_head;
1860 }
1861
1862 /*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001863 * Get an estimate of the number of connections to parse in this sync.
1864 */
1865 quota = (si->num_connections + 63) / 64;
1866
1867 /*
Ken Zhudc423672021-09-02 18:27:01 -07001868 * Walk the "all connection" list and sync the connection state.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001869 */
Ken Zhudc423672021-09-02 18:27:01 -07001870 while (likely(c && quota)) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001871 struct sfe_ipv4_connection_match *cm;
1872 struct sfe_ipv4_connection_match *counter_cm;
Xiaoping Fand44a5b42015-05-26 17:37:37 -07001873 struct sfe_connection_sync sis;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001874
Ken Zhudc423672021-09-02 18:27:01 -07001875 cm = c->original_match;
1876 counter_cm = c->reply_match;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001877
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001878 /*
Ken Zhudc423672021-09-02 18:27:01 -07001879 * Didn't receive packets in the original direction or reply
1880 * direction, move to the next connection.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001881 */
Ken Zhudc423672021-09-02 18:27:01 -07001882 if ((!atomic_read(&cm->rx_packet_count)) && !(atomic_read(&counter_cm->rx_packet_count))) {
1883 c = c->all_connections_next;
1884 continue;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001885 }
1886
Ken Zhudc423672021-09-02 18:27:01 -07001887 quota--;
Matthew McClintockaf48f1e2014-01-23 15:29:19 -06001888
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301889 sfe_ipv4_gen_sync_connection(si, c, &sis, SFE_SYNC_REASON_STATS, now_jiffies);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001890
Ken Zhudc423672021-09-02 18:27:01 -07001891 si->wc_next = c->all_connections_next;
1892
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001893 /*
1894 * We don't want to be holding the lock when we sync!
1895 */
1896 spin_unlock_bh(&si->lock);
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001897 sync_rule_callback(&sis);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001898 spin_lock_bh(&si->lock);
Ken Zhudc423672021-09-02 18:27:01 -07001899
1900 /*
1901 * c must be set and used in the same lock/unlock window;
1902 * because c could be removed when we don't hold the lock,
1903 * so delay grabbing until after the callback and relock.
1904 */
1905 c = si->wc_next;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001906 }
1907
Ken Zhudc423672021-09-02 18:27:01 -07001908 /*
1909 * At the end of the sync, put the wc_next to the connection we left.
1910 */
1911 si->wc_next = c;
1912
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001913 spin_unlock_bh(&si->lock);
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001914 rcu_read_unlock();
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001915
Dave Hudsondcd08fb2013-11-22 09:25:16 -06001916done:
Ken Zhu137722d2021-09-23 17:57:36 -07001917 schedule_delayed_work_on(si->work_cpu, (struct delayed_work *)work, ((HZ + 99) / 100));
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001918}
1919
1920#define CHAR_DEV_MSG_SIZE 768
1921
1922/*
1923 * sfe_ipv4_debug_dev_read_start()
1924 * Generate part of the XML output.
1925 */
1926static bool sfe_ipv4_debug_dev_read_start(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
1927 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
1928{
1929 int bytes_read;
1930
Xiaoping Fan34586472015-07-03 02:20:35 -07001931 si->debug_read_seq++;
1932
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001933 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "<sfe_ipv4>\n");
1934 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1935 return false;
1936 }
1937
1938 *length -= bytes_read;
1939 *total_read += bytes_read;
1940
1941 ws->state++;
1942 return true;
1943}
1944
1945/*
1946 * sfe_ipv4_debug_dev_read_connections_start()
1947 * Generate part of the XML output.
1948 */
1949static bool sfe_ipv4_debug_dev_read_connections_start(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
1950 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
1951{
1952 int bytes_read;
1953
1954 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<connections>\n");
1955 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1956 return false;
1957 }
1958
1959 *length -= bytes_read;
1960 *total_read += bytes_read;
1961
1962 ws->state++;
1963 return true;
1964}
1965
1966/*
1967 * sfe_ipv4_debug_dev_read_connections_connection()
1968 * Generate part of the XML output.
1969 */
1970static bool sfe_ipv4_debug_dev_read_connections_connection(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
1971 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
1972{
1973 struct sfe_ipv4_connection *c;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001974 struct sfe_ipv4_connection_match *original_cm;
1975 struct sfe_ipv4_connection_match *reply_cm;
1976 int bytes_read;
1977 int protocol;
1978 struct net_device *src_dev;
Dave Hudson87973cd2013-10-22 16:00:04 +01001979 __be32 src_ip;
1980 __be32 src_ip_xlate;
1981 __be16 src_port;
1982 __be16 src_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001983 u64 src_rx_packets;
1984 u64 src_rx_bytes;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01001985 struct net_device *dest_dev;
Dave Hudson87973cd2013-10-22 16:00:04 +01001986 __be32 dest_ip;
1987 __be32 dest_ip_xlate;
1988 __be16 dest_port;
1989 __be16 dest_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001990 u64 dest_rx_packets;
1991 u64 dest_rx_bytes;
1992 u64 last_sync_jiffies;
Ken Zhu37040ea2021-09-09 21:11:15 -07001993 u32 src_mark, dest_mark, src_priority, dest_priority, src_dscp, dest_dscp;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301994 u32 packet, byte, original_cm_flags;
1995 u16 pppoe_session_id;
1996 u8 pppoe_remote_mac[ETH_ALEN];
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001997 u32 original_fast_xmit, reply_fast_xmit;
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08001998#ifdef CONFIG_NF_FLOW_COOKIE
1999 int src_flow_cookie, dst_flow_cookie;
2000#endif
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002001
2002 spin_lock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07002003
2004 for (c = si->all_connections_head; c; c = c->all_connections_next) {
2005 if (c->debug_read_seq < si->debug_read_seq) {
2006 c->debug_read_seq = si->debug_read_seq;
2007 break;
2008 }
2009 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002010
2011 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07002012 * If there were no connections then move to the next state.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002013 */
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302014 if (!c || c->removed) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002015 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07002016 ws->state++;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002017 return true;
2018 }
2019
2020 original_cm = c->original_match;
2021 reply_cm = c->reply_match;
2022
2023 protocol = c->protocol;
2024 src_dev = c->original_dev;
2025 src_ip = c->src_ip;
2026 src_ip_xlate = c->src_ip_xlate;
2027 src_port = c->src_port;
2028 src_port_xlate = c->src_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002029 src_priority = original_cm->priority;
2030 src_dscp = original_cm->dscp >> SFE_IPV4_DSCP_SHIFT;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002031
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302032 sfe_ipv4_connection_match_update_summary_stats(original_cm, &packet, &byte);
2033 sfe_ipv4_connection_match_update_summary_stats(reply_cm, &packet, &byte);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002034
2035 src_rx_packets = original_cm->rx_packet_count64;
2036 src_rx_bytes = original_cm->rx_byte_count64;
Ken Zhu37040ea2021-09-09 21:11:15 -07002037 src_mark = original_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002038 original_fast_xmit = (original_cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002039 dest_dev = c->reply_dev;
2040 dest_ip = c->dest_ip;
2041 dest_ip_xlate = c->dest_ip_xlate;
2042 dest_port = c->dest_port;
2043 dest_port_xlate = c->dest_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002044 dest_priority = reply_cm->priority;
2045 dest_dscp = reply_cm->dscp >> SFE_IPV4_DSCP_SHIFT;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002046 dest_rx_packets = reply_cm->rx_packet_count64;
2047 dest_rx_bytes = reply_cm->rx_byte_count64;
Ken Zhu37040ea2021-09-09 21:11:15 -07002048 dest_mark = reply_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002049 reply_fast_xmit = (reply_cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002050 last_sync_jiffies = get_jiffies_64() - c->last_sync_jiffies;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302051 original_cm_flags = original_cm->flags;
2052 pppoe_session_id = original_cm->pppoe_session_id;
2053 ether_addr_copy(pppoe_remote_mac, original_cm->pppoe_remote_mac);
2054
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002055#ifdef CONFIG_NF_FLOW_COOKIE
2056 src_flow_cookie = original_cm->flow_cookie;
2057 dst_flow_cookie = reply_cm->flow_cookie;
2058#endif
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002059 spin_unlock_bh(&si->lock);
2060
2061 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t\t<connection "
2062 "protocol=\"%u\" "
2063 "src_dev=\"%s\" "
2064 "src_ip=\"%pI4\" src_ip_xlate=\"%pI4\" "
2065 "src_port=\"%u\" src_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002066 "src_priority=\"%u\" src_dscp=\"%u\" "
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002067 "src_rx_pkts=\"%llu\" src_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002068 "src_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002069 "src_fast_xmit=\"%s\" "
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002070 "dest_dev=\"%s\" "
2071 "dest_ip=\"%pI4\" dest_ip_xlate=\"%pI4\" "
2072 "dest_port=\"%u\" dest_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002073 "dest_priority=\"%u\" dest_dscp=\"%u\" "
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002074 "dest_rx_pkts=\"%llu\" dest_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002075 "dest_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002076 "reply_fast_xmit=\"%s\" "
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002077#ifdef CONFIG_NF_FLOW_COOKIE
2078 "src_flow_cookie=\"%d\" dst_flow_cookie=\"%d\" "
2079#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002080 "last_sync=\"%llu\" ",
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002081 protocol,
2082 src_dev->name,
2083 &src_ip, &src_ip_xlate,
Dave Hudson87973cd2013-10-22 16:00:04 +01002084 ntohs(src_port), ntohs(src_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002085 src_priority, src_dscp,
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002086 src_rx_packets, src_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002087 src_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002088 original_fast_xmit ? "Yes" : "No",
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002089 dest_dev->name,
2090 &dest_ip, &dest_ip_xlate,
Dave Hudson87973cd2013-10-22 16:00:04 +01002091 ntohs(dest_port), ntohs(dest_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002092 dest_priority, dest_dscp,
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002093 dest_rx_packets, dest_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002094 dest_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002095 reply_fast_xmit ? "Yes" : "No",
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002096#ifdef CONFIG_NF_FLOW_COOKIE
2097 src_flow_cookie, dst_flow_cookie,
2098#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002099 last_sync_jiffies);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002100
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302101 if (original_cm_flags &= (SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP | SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05302102 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "pppoe_session_id=\"%u\" pppoe_server MAC=\"%pM\" ",
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302103 pppoe_session_id, pppoe_remote_mac);
2104 }
2105
2106 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "/>\n");
2107
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002108 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2109 return false;
2110 }
2111
2112 *length -= bytes_read;
2113 *total_read += bytes_read;
2114
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002115 return true;
2116}
2117
2118/*
2119 * sfe_ipv4_debug_dev_read_connections_end()
2120 * Generate part of the XML output.
2121 */
2122static bool sfe_ipv4_debug_dev_read_connections_end(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2123 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2124{
2125 int bytes_read;
2126
2127 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</connections>\n");
2128 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2129 return false;
2130 }
2131
2132 *length -= bytes_read;
2133 *total_read += bytes_read;
2134
2135 ws->state++;
2136 return true;
2137}
2138
2139/*
2140 * sfe_ipv4_debug_dev_read_exceptions_start()
2141 * Generate part of the XML output.
2142 */
2143static bool sfe_ipv4_debug_dev_read_exceptions_start(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2144 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2145{
2146 int bytes_read;
2147
2148 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<exceptions>\n");
2149 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2150 return false;
2151 }
2152
2153 *length -= bytes_read;
2154 *total_read += bytes_read;
2155
2156 ws->state++;
2157 return true;
2158}
2159
2160/*
2161 * sfe_ipv4_debug_dev_read_exceptions_exception()
2162 * Generate part of the XML output.
2163 */
2164static bool sfe_ipv4_debug_dev_read_exceptions_exception(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2165 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2166{
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302167 int i;
2168 u64 val = 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002169
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302170 for_each_possible_cpu(i) {
2171 const struct sfe_ipv4_stats *s = per_cpu_ptr(si->stats_pcpu, i);
2172 val += s->exception_events64[ws->iter_exception];
2173 }
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002174
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302175 if (val) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002176 int bytes_read;
2177
2178 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE,
2179 "\t\t<exception name=\"%s\" count=\"%llu\" />\n",
2180 sfe_ipv4_exception_events_string[ws->iter_exception],
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302181 val);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002182 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2183 return false;
2184 }
2185
2186 *length -= bytes_read;
2187 *total_read += bytes_read;
2188 }
2189
2190 ws->iter_exception++;
2191 if (ws->iter_exception >= SFE_IPV4_EXCEPTION_EVENT_LAST) {
2192 ws->iter_exception = 0;
2193 ws->state++;
2194 }
2195
2196 return true;
2197}
2198
2199/*
2200 * sfe_ipv4_debug_dev_read_exceptions_end()
2201 * Generate part of the XML output.
2202 */
2203static bool sfe_ipv4_debug_dev_read_exceptions_end(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2204 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2205{
2206 int bytes_read;
2207
2208 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</exceptions>\n");
2209 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2210 return false;
2211 }
2212
2213 *length -= bytes_read;
2214 *total_read += bytes_read;
2215
2216 ws->state++;
2217 return true;
2218}
2219
2220/*
2221 * sfe_ipv4_debug_dev_read_stats()
2222 * Generate part of the XML output.
2223 */
2224static bool sfe_ipv4_debug_dev_read_stats(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2225 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2226{
2227 int bytes_read;
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302228 struct sfe_ipv4_stats stats;
2229 unsigned int num_conn;
2230
2231 sfe_ipv4_update_summary_stats(si, &stats);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002232
2233 spin_lock_bh(&si->lock);
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302234 num_conn = si->num_connections;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002235 spin_unlock_bh(&si->lock);
2236
2237 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<stats "
2238 "num_connections=\"%u\" "
Amitesh Anand63be37d2021-12-24 20:51:48 +05302239 "pkts_dropped=\"%llu\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002240 "pkts_fast_xmited=\"%llu\" "
Xiaoping Fan59176422015-05-22 15:58:10 -07002241 "pkts_forwarded=\"%llu\" pkts_not_forwarded=\"%llu\" "
2242 "create_requests=\"%llu\" create_collisions=\"%llu\" "
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302243 "create_failures=\"%llu\" "
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002244 "destroy_requests=\"%llu\" destroy_misses=\"%llu\" "
2245 "flushes=\"%llu\" "
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302246 "hash_hits=\"%llu\" hash_reorders=\"%llu\" "
2247 "pppoe_encap_pkts_fwded=\"%llu\" "
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302248 "pppoe_decap_pkts_fwded=\"%llu\" "
2249 "pppoe_bridge_pkts_fwded=\"%llu\" />\n",
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302250 num_conn,
Amitesh Anand63be37d2021-12-24 20:51:48 +05302251 stats.packets_dropped64,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002252 stats.packets_fast_xmited64,
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302253 stats.packets_forwarded64,
2254 stats.packets_not_forwarded64,
2255 stats.connection_create_requests64,
2256 stats.connection_create_collisions64,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302257 stats.connection_create_failures64,
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302258 stats.connection_destroy_requests64,
2259 stats.connection_destroy_misses64,
2260 stats.connection_flushes64,
2261 stats.connection_match_hash_hits64,
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302262 stats.connection_match_hash_reorders64,
2263 stats.pppoe_encap_packets_forwarded64,
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302264 stats.pppoe_decap_packets_forwarded64,
2265 stats.pppoe_bridge_packets_forwarded64);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002266 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2267 return false;
2268 }
2269
2270 *length -= bytes_read;
2271 *total_read += bytes_read;
2272
2273 ws->state++;
2274 return true;
2275}
2276
2277/*
2278 * sfe_ipv4_debug_dev_read_end()
2279 * Generate part of the XML output.
2280 */
2281static bool sfe_ipv4_debug_dev_read_end(struct sfe_ipv4 *si, char *buffer, char *msg, size_t *length,
2282 int *total_read, struct sfe_ipv4_debug_xml_write_state *ws)
2283{
2284 int bytes_read;
2285
2286 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "</sfe_ipv4>\n");
2287 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2288 return false;
2289 }
2290
2291 *length -= bytes_read;
2292 *total_read += bytes_read;
2293
2294 ws->state++;
2295 return true;
2296}
2297
2298/*
2299 * Array of write functions that write various XML elements that correspond to
2300 * our XML output state machine.
2301 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07002302static sfe_ipv4_debug_xml_write_method_t sfe_ipv4_debug_xml_write_methods[SFE_IPV4_DEBUG_XML_STATE_DONE] = {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002303 sfe_ipv4_debug_dev_read_start,
2304 sfe_ipv4_debug_dev_read_connections_start,
2305 sfe_ipv4_debug_dev_read_connections_connection,
2306 sfe_ipv4_debug_dev_read_connections_end,
2307 sfe_ipv4_debug_dev_read_exceptions_start,
2308 sfe_ipv4_debug_dev_read_exceptions_exception,
2309 sfe_ipv4_debug_dev_read_exceptions_end,
2310 sfe_ipv4_debug_dev_read_stats,
2311 sfe_ipv4_debug_dev_read_end,
2312};
2313
2314/*
2315 * sfe_ipv4_debug_dev_read()
2316 * Send info to userspace upon read request from user
2317 */
2318static ssize_t sfe_ipv4_debug_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
2319{
2320 char msg[CHAR_DEV_MSG_SIZE];
2321 int total_read = 0;
2322 struct sfe_ipv4_debug_xml_write_state *ws;
2323 struct sfe_ipv4 *si = &__si;
2324
2325 ws = (struct sfe_ipv4_debug_xml_write_state *)filp->private_data;
2326 while ((ws->state != SFE_IPV4_DEBUG_XML_STATE_DONE) && (length > CHAR_DEV_MSG_SIZE)) {
2327 if ((sfe_ipv4_debug_xml_write_methods[ws->state])(si, buffer, msg, &length, &total_read, ws)) {
2328 continue;
2329 }
2330 }
2331
2332 return total_read;
2333}
2334
2335/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002336 * sfe_ipv4_debug_dev_open()
2337 */
2338static int sfe_ipv4_debug_dev_open(struct inode *inode, struct file *file)
2339{
2340 struct sfe_ipv4_debug_xml_write_state *ws;
2341
2342 ws = (struct sfe_ipv4_debug_xml_write_state *)file->private_data;
2343 if (!ws) {
2344 ws = kzalloc(sizeof(struct sfe_ipv4_debug_xml_write_state), GFP_KERNEL);
2345 if (!ws) {
2346 return -ENOMEM;
2347 }
2348
2349 ws->state = SFE_IPV4_DEBUG_XML_STATE_START;
2350 file->private_data = ws;
2351 }
2352
2353 return 0;
2354}
2355
2356/*
2357 * sfe_ipv4_debug_dev_release()
2358 */
2359static int sfe_ipv4_debug_dev_release(struct inode *inode, struct file *file)
2360{
2361 struct sfe_ipv4_debug_xml_write_state *ws;
2362
2363 ws = (struct sfe_ipv4_debug_xml_write_state *)file->private_data;
2364 if (ws) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002365 /*
2366 * We've finished with our output so free the write state.
2367 */
2368 kfree(ws);
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302369 file->private_data = NULL;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002370 }
2371
2372 return 0;
2373}
2374
2375/*
2376 * File operations used in the debug char device
2377 */
2378static struct file_operations sfe_ipv4_debug_dev_fops = {
2379 .read = sfe_ipv4_debug_dev_read,
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002380 .open = sfe_ipv4_debug_dev_open,
2381 .release = sfe_ipv4_debug_dev_release
2382};
2383
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002384#ifdef CONFIG_NF_FLOW_COOKIE
2385/*
2386 * sfe_register_flow_cookie_cb
2387 * register a function in SFE to let SFE use this function to configure flow cookie for a flow
2388 *
2389 * Hardware driver which support flow cookie should register a callback function in SFE. Then SFE
2390 * can use this function to configure flow cookie for a flow.
2391 * return: 0, success; !=0, fail
2392 */
2393int sfe_register_flow_cookie_cb(flow_cookie_set_func_t cb)
2394{
2395 struct sfe_ipv4 *si = &__si;
2396
2397 BUG_ON(!cb);
2398
2399 if (si->flow_cookie_set_func) {
2400 return -1;
2401 }
2402
2403 rcu_assign_pointer(si->flow_cookie_set_func, cb);
2404 return 0;
2405}
2406
2407/*
2408 * sfe_unregister_flow_cookie_cb
2409 * unregister function which is used to configure flow cookie for a flow
2410 *
2411 * return: 0, success; !=0, fail
2412 */
2413int sfe_unregister_flow_cookie_cb(flow_cookie_set_func_t cb)
2414{
2415 struct sfe_ipv4 *si = &__si;
2416
2417 RCU_INIT_POINTER(si->flow_cookie_set_func, NULL);
2418 return 0;
2419}
Xiaoping Fan640faf42015-08-28 15:50:55 -07002420
2421/*
2422 * sfe_ipv4_get_flow_cookie()
2423 */
2424static ssize_t sfe_ipv4_get_flow_cookie(struct device *dev,
2425 struct device_attribute *attr,
2426 char *buf)
2427{
2428 struct sfe_ipv4 *si = &__si;
Xiaoping Fan01c67cc2015-11-09 11:31:57 -08002429 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->flow_cookie_enable);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002430}
2431
2432/*
2433 * sfe_ipv4_set_flow_cookie()
2434 */
2435static ssize_t sfe_ipv4_set_flow_cookie(struct device *dev,
2436 struct device_attribute *attr,
2437 const char *buf, size_t size)
2438{
2439 struct sfe_ipv4 *si = &__si;
Ken Zhu137722d2021-09-23 17:57:36 -07002440 si->flow_cookie_enable = simple_strtol(buf, NULL, 0);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002441
2442 return size;
2443}
2444
2445/*
2446 * sysfs attributes.
2447 */
2448static const struct device_attribute sfe_ipv4_flow_cookie_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -08002449 __ATTR(flow_cookie_enable, S_IWUSR | S_IRUGO, sfe_ipv4_get_flow_cookie, sfe_ipv4_set_flow_cookie);
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002450#endif /*CONFIG_NF_FLOW_COOKIE*/
2451
Ken Zhu137722d2021-09-23 17:57:36 -07002452/*
2453 * sfe_ipv4_get_cpu()
2454 */
2455static ssize_t sfe_ipv4_get_cpu(struct device *dev,
2456 struct device_attribute *attr,
2457 char *buf)
2458{
2459 struct sfe_ipv4 *si = &__si;
2460 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->work_cpu);
2461}
2462
2463/*
2464 * sfe_ipv4_set_cpu()
2465 */
2466static ssize_t sfe_ipv4_set_cpu(struct device *dev,
2467 struct device_attribute *attr,
2468 const char *buf, size_t size)
2469{
2470 struct sfe_ipv4 *si = &__si;
2471 int work_cpu;
2472 work_cpu = simple_strtol(buf, NULL, 0);
2473 if ((work_cpu >= 0) && (work_cpu <= NR_CPUS)) {
2474 si->work_cpu = work_cpu;
2475 } else {
2476 dev_err(dev, "%s is not in valid range[0,%d]", buf, NR_CPUS);
2477 }
2478 return size;
2479}
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002480
Ken Zhu137722d2021-09-23 17:57:36 -07002481/*
2482 * sysfs attributes.
2483 */
2484static const struct device_attribute sfe_ipv4_cpu_attr =
2485 __ATTR(stats_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv4_get_cpu, sfe_ipv4_set_cpu);
2486
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002487/*
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302488 * sfe_ipv4_conn_match_hash_init()
2489 * Initialize conn match hash lists
2490 */
2491static void sfe_ipv4_conn_match_hash_init(struct sfe_ipv4 *si, int len)
2492{
2493 struct hlist_head *hash_list = si->hlist_conn_match_hash_head;
2494 int i;
2495
2496 for (i = 0; i < len; i++) {
2497 INIT_HLIST_HEAD(&hash_list[i]);
2498 }
2499}
2500
Amitesh Anand63be37d2021-12-24 20:51:48 +05302501#ifdef SFE_PROCESS_LOCAL_OUT
2502/*
2503 * sfe_ipv4_local_out()
2504 * Called for packets from ip_local_out() - post encapsulation & other packets
2505 */
2506static unsigned int sfe_ipv4_local_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *nhs)
2507{
Nitin Shettyc28f8172022-02-04 16:23:46 +05302508 struct sfe_l2_info l2_info = {0};
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05302509
Amitesh Anand63be37d2021-12-24 20:51:48 +05302510 DEBUG_TRACE("%px: sfe: sfe_ipv4_local_out hook called.\n", skb);
2511
2512 if (likely(skb->skb_iif)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05302513 return sfe_ipv4_recv(skb->dev, skb, &l2_info, true) ? NF_STOLEN : NF_ACCEPT;
Amitesh Anand63be37d2021-12-24 20:51:48 +05302514 }
2515
2516 return NF_ACCEPT;
2517}
2518
2519/*
2520 * struct nf_hook_ops sfe_ipv4_ops_local_out[]
2521 * Hooks into netfilter local out packet monitoring points.
2522 */
2523static struct nf_hook_ops sfe_ipv4_ops_local_out[] __read_mostly = {
2524
2525 /*
2526 * Local out routing hook is used to monitor packets.
2527 */
2528 {
2529 .hook = sfe_ipv4_local_out,
2530 .pf = PF_INET,
2531 .hooknum = NF_INET_LOCAL_OUT,
2532 .priority = NF_IP_PRI_FIRST,
2533 },
2534};
2535#endif
2536
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002537/*
Dave Hudson87973cd2013-10-22 16:00:04 +01002538 * sfe_ipv4_init()
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002539 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302540int sfe_ipv4_init(void)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002541{
2542 struct sfe_ipv4 *si = &__si;
2543 int result = -1;
2544
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002545 DEBUG_INFO("SFE IPv4 init\n");
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002546
Ratheesh Kannoth94fc5b82021-10-20 07:45:06 +05302547 sfe_ipv4_conn_match_hash_init(si, ARRAY_SIZE(si->hlist_conn_match_hash_head));
2548
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302549 si->stats_pcpu = alloc_percpu_gfp(struct sfe_ipv4_stats, GFP_KERNEL | __GFP_ZERO);
2550 if (!si->stats_pcpu) {
2551 DEBUG_ERROR("failed to allocate stats memory for sfe_ipv4\n");
2552 goto exit0;
2553 }
2554
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002555 /*
2556 * Create sys/sfe_ipv4
2557 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302558 si->sys_ipv4 = kobject_create_and_add("sfe_ipv4", NULL);
2559 if (!si->sys_ipv4) {
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002560 DEBUG_ERROR("failed to register sfe_ipv4\n");
2561 goto exit1;
2562 }
2563
2564 /*
2565 * Create files, one for each parameter supported by this module.
2566 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302567 result = sysfs_create_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002568 if (result) {
2569 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002570 goto exit2;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002571 }
2572
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302573 result = sysfs_create_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002574 if (result) {
2575 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
2576 goto exit3;
2577 }
2578
Xiaoping Fan640faf42015-08-28 15:50:55 -07002579#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302580 result = sysfs_create_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002581 if (result) {
2582 DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
Ken Zhu137722d2021-09-23 17:57:36 -07002583 goto exit4;
Xiaoping Fan640faf42015-08-28 15:50:55 -07002584 }
2585#endif /* CONFIG_NF_FLOW_COOKIE */
2586
Amitesh Anand63be37d2021-12-24 20:51:48 +05302587#ifdef SFE_PROCESS_LOCAL_OUT
2588#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2589 result = nf_register_hooks(sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2590#else
2591 result = nf_register_net_hooks(&init_net, sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2592#endif
2593 if (result < 0) {
2594 DEBUG_ERROR("can't register nf local out hook: %d\n", result);
2595 goto exit5;
2596 }
2597 DEBUG_INFO("Register nf local out hook success: %d\n", result);
2598#endif
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002599 /*
2600 * Register our debug char device.
2601 */
2602 result = register_chrdev(0, "sfe_ipv4", &sfe_ipv4_debug_dev_fops);
2603 if (result < 0) {
2604 DEBUG_ERROR("Failed to register chrdev: %d\n", result);
Amitesh Anand63be37d2021-12-24 20:51:48 +05302605 goto exit6;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002606 }
2607
2608 si->debug_dev = result;
Ken Zhu137722d2021-09-23 17:57:36 -07002609 si->work_cpu = WORK_CPU_UNBOUND;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002610
2611 /*
Ken Zhu137722d2021-09-23 17:57:36 -07002612 * Create a work to handle periodic statistics.
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002613 */
Ken Zhu137722d2021-09-23 17:57:36 -07002614 INIT_DELAYED_WORK(&(si->sync_dwork), sfe_ipv4_periodic_sync);
2615 schedule_delayed_work_on(si->work_cpu, &(si->sync_dwork), ((HZ + 99) / 100));
2616
Dave Hudson87973cd2013-10-22 16:00:04 +01002617 spin_lock_init(&si->lock);
Dave Hudson87973cd2013-10-22 16:00:04 +01002618 return 0;
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002619
Amitesh Anand63be37d2021-12-24 20:51:48 +05302620exit6:
2621#ifdef SFE_PROCESS_LOCAL_OUT
2622 DEBUG_TRACE("sfe: Unregister local out hook\n");
2623#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2624 nf_unregister_hooks(sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2625#else
2626 nf_unregister_net_hooks(&init_net, sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2627#endif
Ken Zhu137722d2021-09-23 17:57:36 -07002628exit5:
Amitesh Anand63be37d2021-12-24 20:51:48 +05302629#endif
Xiaoping Fan640faf42015-08-28 15:50:55 -07002630#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302631 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002632
Ken Zhu137722d2021-09-23 17:57:36 -07002633exit4:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002634#endif /* CONFIG_NF_FLOW_COOKIE */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302635 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002636exit3:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302637 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002638
Xiaoping Fan640faf42015-08-28 15:50:55 -07002639exit2:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302640 kobject_put(si->sys_ipv4);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002641
2642exit1:
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302643 free_percpu(si->stats_pcpu);
2644
2645exit0:
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002646 return result;
2647}
2648
2649/*
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002650 * sfe_ipv4_exit()
2651 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302652void sfe_ipv4_exit(void)
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002653{
Dave Hudson87973cd2013-10-22 16:00:04 +01002654 struct sfe_ipv4 *si = &__si;
2655
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002656 DEBUG_INFO("SFE IPv4 exit\n");
Dave Hudson87973cd2013-10-22 16:00:04 +01002657 /*
2658 * Destroy all connections.
2659 */
Dave Hudsondcd08fb2013-11-22 09:25:16 -06002660 sfe_ipv4_destroy_all_rules_for_dev(NULL);
Dave Hudson87973cd2013-10-22 16:00:04 +01002661
Ken Zhu137722d2021-09-23 17:57:36 -07002662 cancel_delayed_work_sync(&si->sync_dwork);
Dave Hudson87973cd2013-10-22 16:00:04 +01002663
Dave Hudson87973cd2013-10-22 16:00:04 +01002664 unregister_chrdev(si->debug_dev, "sfe_ipv4");
2665
Amitesh Anand63be37d2021-12-24 20:51:48 +05302666#ifdef SFE_PROCESS_LOCAL_OUT
2667 DEBUG_TRACE("sfe: Unregister local out hook\n");
2668#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2669 nf_unregister_hooks(sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2670#else
2671 nf_unregister_net_hooks(&init_net, sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
2672#endif
2673#endif
2674
Xiaoping Fan640faf42015-08-28 15:50:55 -07002675#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302676 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002677#endif /* CONFIG_NF_FLOW_COOKIE */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302678 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002679
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302680 sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
Dave Hudson87973cd2013-10-22 16:00:04 +01002681
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302682 kobject_put(si->sys_ipv4);
Dave Hudson87973cd2013-10-22 16:00:04 +01002683
Ratheesh Kannoth3aeb2892021-10-20 07:57:15 +05302684 free_percpu(si->stats_pcpu);
Dave Hudsonaaf97ca2013-06-13 17:52:29 +01002685}
2686
Xiaoping Fand1dc7b22015-01-23 00:43:56 -08002687#ifdef CONFIG_NF_FLOW_COOKIE
2688EXPORT_SYMBOL(sfe_register_flow_cookie_cb);
2689EXPORT_SYMBOL(sfe_unregister_flow_cookie_cb);
2690#endif