blob: 5776fe272f35fad0c9835b721859b9c7e059e6ba [file] [log] [blame]
Xiaoping Fan978b3772015-05-27 14:15:18 -07001/*
2 * sfe_ipv6.c
3 * Shortcut forwarding engine - IPv6 support.
4 *
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05305 * Copyright (c) 2015-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
Xiaoping Fana42c68b2015-08-07 18:00:39 -070012 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053017 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Xiaoping Fan978b3772015-05-27 14:15:18 -070019 */
20
21#include <linux/module.h>
22#include <linux/sysfs.h>
23#include <linux/skbuff.h>
24#include <linux/icmp.h>
25#include <net/tcp.h>
26#include <linux/etherdevice.h>
Tian Yang45f39c82020-10-06 14:07:47 -070027#include <linux/version.h>
Suruchi Suman23a279d2021-11-16 15:13:09 +053028#include <net/udp.h>
29#include <net/vxlan.h>
30#include <linux/refcount.h>
31#include <linux/netfilter.h>
32#include <linux/inetdevice.h>
33#include <linux/netfilter_ipv6.h>
Parikshit Guned31a8202022-01-05 22:15:04 +053034#include <linux/seqlock.h>
Tian Yangafb03452022-01-13 18:53:13 -080035#include <net/protocol.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053036#include <net/addrconf.h>
37#include <net/gre.h>
38
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053039#include "sfe_debug.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053040#include "sfe_api.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070041#include "sfe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053042#include "sfe_flow_cookie.h"
43#include "sfe_ipv6.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053044#include "sfe_ipv6_udp.h"
45#include "sfe_ipv6_tcp.h"
46#include "sfe_ipv6_icmp.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080047#include "sfe_pppoe.h"
Tian Yangafb03452022-01-13 18:53:13 -080048#include "sfe_ipv6_tunipip6.h"
Nitin Shettye6ed5b52021-12-27 14:50:11 +053049#include "sfe_ipv6_gre.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070050
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053051#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
52
Xiaoping Fan978b3772015-05-27 14:15:18 -070053static char *sfe_ipv6_exception_events_string[SFE_IPV6_EXCEPTION_EVENT_LAST] = {
54 "UDP_HEADER_INCOMPLETE",
55 "UDP_NO_CONNECTION",
56 "UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
57 "UDP_SMALL_TTL",
58 "UDP_NEEDS_FRAGMENTATION",
59 "TCP_HEADER_INCOMPLETE",
60 "TCP_NO_CONNECTION_SLOW_FLAGS",
61 "TCP_NO_CONNECTION_FAST_FLAGS",
62 "TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
63 "TCP_SMALL_TTL",
64 "TCP_NEEDS_FRAGMENTATION",
65 "TCP_FLAGS",
66 "TCP_SEQ_EXCEEDS_RIGHT_EDGE",
67 "TCP_SMALL_DATA_OFFS",
68 "TCP_BAD_SACK",
69 "TCP_BIG_DATA_OFFS",
70 "TCP_SEQ_BEFORE_LEFT_EDGE",
71 "TCP_ACK_EXCEEDS_RIGHT_EDGE",
72 "TCP_ACK_BEFORE_LEFT_EDGE",
73 "ICMP_HEADER_INCOMPLETE",
74 "ICMP_UNHANDLED_TYPE",
75 "ICMP_IPV6_HEADER_INCOMPLETE",
76 "ICMP_IPV6_NON_V6",
77 "ICMP_IPV6_IP_OPTIONS_INCOMPLETE",
78 "ICMP_IPV6_UDP_HEADER_INCOMPLETE",
79 "ICMP_IPV6_TCP_HEADER_INCOMPLETE",
80 "ICMP_IPV6_UNHANDLED_PROTOCOL",
81 "ICMP_NO_CONNECTION",
82 "ICMP_FLUSHED_CONNECTION",
83 "HEADER_INCOMPLETE",
84 "BAD_TOTAL_LENGTH",
85 "NON_V6",
86 "NON_INITIAL_FRAGMENT",
87 "DATAGRAM_INCOMPLETE",
88 "IP_OPTIONS_INCOMPLETE",
89 "UNHANDLED_PROTOCOL",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053090 "FLOW_COOKIE_ADD_FAIL",
Nitin Shetty16ab38d2022-02-09 01:26:19 +053091 "NO_HEADROOM",
92 "INVALID_PPPOE_SESSION",
93 "INCORRECT_PPPOE_PARSING",
94 "PPPOE_NOT_SET_IN_CME",
95 "INGRESS_VLAN_TAG_MISMATCH",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053096 "INVALID_SOURCE_INTERFACE",
Tian Yangafb03452022-01-13 18:53:13 -080097 "TUNIPIP6_HEADER_INCOMPLETE",
98 "TUNIPIP6_NO_CONNECTION",
99 "TUNIPIP6_IP_OPTIONS_OR_INITIAL_FRAGMENT",
100 "TUNIPIP6_SMALL_TTL",
101 "TUNIPIP6_NEEDS_FRAGMENTATION",
Nitin Shetty16ab38d2022-02-09 01:26:19 +0530102 "TUNIPIP6_SYNC_ON_FIND",
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530103 "GRE_HEADER_INCOMPLETE",
104 "GRE_NO_CONNECTION",
105 "GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
106 "GRE_SMALL_TTL",
107 "GRE_NEEDS_FRAGMENTATION"
Xiaoping Fan978b3772015-05-27 14:15:18 -0700108};
109
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700110static struct sfe_ipv6 __si6;
Ken Zhu7a43d882022-01-04 10:51:44 -0800111struct sfe_ipv6_msg *sfe_ipv6_sync_many_msg;
112uint32_t sfe_ipv6_sync_max_number;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700113
114/*
115 * sfe_ipv6_get_debug_dev()
116 */
117static ssize_t sfe_ipv6_get_debug_dev(struct device *dev, struct device_attribute *attr, char *buf);
118
119/*
120 * sysfs attributes.
121 */
122static const struct device_attribute sfe_ipv6_debug_dev_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -0800123 __ATTR(debug_dev, S_IWUSR | S_IRUGO, sfe_ipv6_get_debug_dev, NULL);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700124
125/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700126 * sfe_ipv6_get_connection_match_hash()
127 * Generate the hash used in connection match lookups.
128 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700129static inline unsigned int sfe_ipv6_get_connection_match_hash(struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700130 struct sfe_ipv6_addr *src_ip, __be16 src_port,
131 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
132{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700133 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700134
135 for (idx = 0; idx < 4; idx++) {
136 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
137 }
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530138 hash = hash ^ protocol ^ ntohs(src_port ^ dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700139 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
140}
141
142/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530143 * sfe_ipv6_find_connection_match_rcu()
Xiaoping Fan978b3772015-05-27 14:15:18 -0700144 * Get the IPv6 flow match info that corresponds to a particular 5-tuple.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700145 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530146struct sfe_ipv6_connection_match *
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530147sfe_ipv6_find_connection_match_rcu(struct sfe_ipv6 *si, struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700148 struct sfe_ipv6_addr *src_ip, __be16 src_port,
149 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
150{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530151 struct sfe_ipv6_connection_match *cm = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700152 unsigned int conn_match_idx;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530153 struct hlist_head *lhead;
154 WARN_ON_ONCE(!rcu_read_lock_held());
Xiaoping Fan978b3772015-05-27 14:15:18 -0700155
156 conn_match_idx = sfe_ipv6_get_connection_match_hash(dev, protocol, src_ip, src_port, dest_ip, dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700157
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530158 lhead = &si->hlist_conn_match_hash_head[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700159
160 /*
161 * Hopefully the first entry is the one we want.
162 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530163 hlist_for_each_entry_rcu(cm, lhead, hnode) {
164 if ((cm->match_dest_port != dest_port) ||
165 (!sfe_ipv6_addr_equal(cm->match_src_ip, src_ip)) ||
166 (!sfe_ipv6_addr_equal(cm->match_dest_ip, dest_ip)) ||
167 (cm->match_protocol != protocol) ||
168 (cm->match_dev != dev)) {
169 continue;
170 }
171
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530172 this_cpu_inc(si->stats_pcpu->connection_match_hash_hits64);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700173
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530174 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700175
Xiaoping Fan978b3772015-05-27 14:15:18 -0700176 }
177
Xiaoping Fan978b3772015-05-27 14:15:18 -0700178 return cm;
179}
180
181/*
182 * sfe_ipv6_connection_match_update_summary_stats()
183 * Update the summary stats for a connection match entry.
184 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530185static inline void sfe_ipv6_connection_match_update_summary_stats(struct sfe_ipv6_connection_match *cm,
186 u32 *packets, u32 *bytes)
187
Xiaoping Fan978b3772015-05-27 14:15:18 -0700188{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530189 u32 packet_count, byte_count;
190
191 packet_count = atomic_read(&cm->rx_packet_count);
192 cm->rx_packet_count64 += packet_count;
193 atomic_sub(packet_count, &cm->rx_packet_count);
194
195 byte_count = atomic_read(&cm->rx_byte_count);
196 cm->rx_byte_count64 += byte_count;
197 atomic_sub(byte_count, &cm->rx_byte_count);
198
199 *packets = packet_count;
200 *bytes = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700201}
202
203/*
204 * sfe_ipv6_connection_match_compute_translations()
205 * Compute port and address translations for a connection match entry.
206 */
207static void sfe_ipv6_connection_match_compute_translations(struct sfe_ipv6_connection_match *cm)
208{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700209 u32 diff[9];
210 u32 *idx_32;
211 u16 *idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700212
213 /*
214 * Before we insert the entry look to see if this is tagged as doing address
215 * translations. If it is then work out the adjustment that we need to apply
216 * to the transport checksum.
217 */
218 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700219 u32 adj = 0;
220 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700221
222 /*
223 * Precompute an incremental checksum adjustment so we can
224 * edit packets in this stream very quickly. The algorithm is from RFC1624.
225 */
226 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530227 *(idx_32++) = cm->match_src_ip[0].addr[0];
228 *(idx_32++) = cm->match_src_ip[0].addr[1];
229 *(idx_32++) = cm->match_src_ip[0].addr[2];
230 *(idx_32++) = cm->match_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700231
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700232 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700233 *(idx_16++) = cm->match_src_port;
234 *(idx_16++) = ~cm->xlate_src_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700235 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700236
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530237 *(idx_32++) = ~cm->xlate_src_ip[0].addr[0];
238 *(idx_32++) = ~cm->xlate_src_ip[0].addr[1];
239 *(idx_32++) = ~cm->xlate_src_ip[0].addr[2];
240 *(idx_32++) = ~cm->xlate_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700241
242 /*
243 * When we compute this fold it down to a 16-bit offset
244 * as that way we can avoid having to do a double
245 * folding of the twos-complement result because the
246 * addition of 2 16-bit values cannot cause a double
247 * wrap-around!
248 */
249 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700250 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700251 adj += carry;
252 adj += w;
253 carry = (w > adj);
254 }
255 adj += carry;
256 adj = (adj & 0xffff) + (adj >> 16);
257 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700258 cm->xlate_src_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700259 }
260
261 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700262 u32 adj = 0;
263 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700264
265 /*
266 * Precompute an incremental checksum adjustment so we can
267 * edit packets in this stream very quickly. The algorithm is from RFC1624.
268 */
269 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530270 *(idx_32++) = cm->match_dest_ip[0].addr[0];
271 *(idx_32++) = cm->match_dest_ip[0].addr[1];
272 *(idx_32++) = cm->match_dest_ip[0].addr[2];
273 *(idx_32++) = cm->match_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700274
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700275 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700276 *(idx_16++) = cm->match_dest_port;
277 *(idx_16++) = ~cm->xlate_dest_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700278 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700279
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530280 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[0];
281 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[1];
282 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[2];
283 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700284
285 /*
286 * When we compute this fold it down to a 16-bit offset
287 * as that way we can avoid having to do a double
288 * folding of the twos-complement result because the
289 * addition of 2 16-bit values cannot cause a double
290 * wrap-around!
291 */
292 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700293 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700294 adj += carry;
295 adj += w;
296 carry = (w > adj);
297 }
298 adj += carry;
299 adj = (adj & 0xffff) + (adj >> 16);
300 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700301 cm->xlate_dest_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700302 }
303}
304
305/*
306 * sfe_ipv6_update_summary_stats()
307 * Update the summary stats.
308 */
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530309static void sfe_ipv6_update_summary_stats(struct sfe_ipv6 *si, struct sfe_ipv6_stats *stats)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700310{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530311 int i = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700312
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530313 memset(stats, 0, sizeof(*stats));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700314
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530315 for_each_possible_cpu(i) {
316 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
317
318 stats->connection_create_requests64 += s->connection_create_requests64;
319 stats->connection_create_collisions64 += s->connection_create_collisions64;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530320 stats->connection_create_failures64 += s->connection_create_failures64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530321 stats->connection_destroy_requests64 += s->connection_destroy_requests64;
322 stats->connection_destroy_misses64 += s->connection_destroy_misses64;
323 stats->connection_match_hash_hits64 += s->connection_match_hash_hits64;
324 stats->connection_match_hash_reorders64 += s->connection_match_hash_reorders64;
325 stats->connection_flushes64 += s->connection_flushes64;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530326 stats->packets_dropped64 += s->packets_dropped64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530327 stats->packets_forwarded64 += s->packets_forwarded64;
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800328 stats->packets_fast_xmited64 += s->packets_fast_xmited64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530329 stats->packets_not_forwarded64 += s->packets_not_forwarded64;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530330 stats->pppoe_encap_packets_forwarded64 += s->pppoe_encap_packets_forwarded64;
331 stats->pppoe_decap_packets_forwarded64 += s->pppoe_decap_packets_forwarded64;
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530332 stats->pppoe_bridge_packets_forwarded64 += s->pppoe_bridge_packets_forwarded64;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700333 }
334}
335
336/*
337 * sfe_ipv6_insert_connection_match()
338 * Insert a connection match into the hash.
339 *
340 * On entry we must be holding the lock that protects the hash table.
341 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700342static inline void sfe_ipv6_insert_connection_match(struct sfe_ipv6 *si,
343 struct sfe_ipv6_connection_match *cm)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700344{
Xiaoping Fan978b3772015-05-27 14:15:18 -0700345 unsigned int conn_match_idx
346 = sfe_ipv6_get_connection_match_hash(cm->match_dev, cm->match_protocol,
347 cm->match_src_ip, cm->match_src_port,
348 cm->match_dest_ip, cm->match_dest_port);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700349
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530350 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700351
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530352 hlist_add_head_rcu(&cm->hnode, &si->hlist_conn_match_hash_head[conn_match_idx]);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700353#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700354 if (!si->flow_cookie_enable || !(cm->flags & (SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC | SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST)))
Xiaoping Fan978b3772015-05-27 14:15:18 -0700355 return;
356
357 /*
358 * Configure hardware to put a flow cookie in packet of this flow,
359 * then we can accelerate the lookup process when we received this packet.
360 */
361 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
362 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
363
364 if ((NULL == entry->match) && time_is_before_jiffies(entry->last_clean_time + HZ)) {
365 sfe_ipv6_flow_cookie_set_func_t func;
366
367 rcu_read_lock();
368 func = rcu_dereference(si->flow_cookie_set_func);
369 if (func) {
370 if (!func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
371 cm->match_dest_ip->addr, cm->match_dest_port, conn_match_idx)) {
372 entry->match = cm;
373 cm->flow_cookie = conn_match_idx;
374 } else {
375 si->exception_events[SFE_IPV6_EXCEPTION_EVENT_FLOW_COOKIE_ADD_FAIL]++;
376 }
377 }
378 rcu_read_unlock();
379
380 break;
381 }
382 }
383#endif
Xiaoping Fan978b3772015-05-27 14:15:18 -0700384}
385
386/*
387 * sfe_ipv6_remove_connection_match()
388 * Remove a connection match object from the hash.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700389 */
390static inline void sfe_ipv6_remove_connection_match(struct sfe_ipv6 *si, struct sfe_ipv6_connection_match *cm)
391{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530392
393 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700394#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700395 if (si->flow_cookie_enable) {
396 /*
397 * Tell hardware that we no longer need a flow cookie in packet of this flow
398 */
399 unsigned int conn_match_idx;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700400
Xiaoping Fan640faf42015-08-28 15:50:55 -0700401 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
402 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700403
Xiaoping Fan640faf42015-08-28 15:50:55 -0700404 if (cm == entry->match) {
405 sfe_ipv6_flow_cookie_set_func_t func;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700406
Xiaoping Fan640faf42015-08-28 15:50:55 -0700407 rcu_read_lock();
408 func = rcu_dereference(si->flow_cookie_set_func);
409 if (func) {
410 func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
411 cm->match_dest_ip->addr, cm->match_dest_port, 0);
412 }
413 rcu_read_unlock();
414
415 cm->flow_cookie = 0;
416 entry->match = NULL;
417 entry->last_clean_time = jiffies;
418 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700419 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700420 }
421 }
422#endif
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530423 hlist_del_init_rcu(&cm->hnode);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700424
Xiaoping Fan978b3772015-05-27 14:15:18 -0700425}
426
427/*
428 * sfe_ipv6_get_connection_hash()
429 * Generate the hash used in connection lookups.
430 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700431static inline unsigned int sfe_ipv6_get_connection_hash(u8 protocol, struct sfe_ipv6_addr *src_ip, __be16 src_port,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700432 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
433{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700434 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700435
436 for (idx = 0; idx < 4; idx++) {
437 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
438 }
439 hash = hash ^ protocol ^ ntohs(src_port ^ dest_port);
440 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
441}
442
443/*
444 * sfe_ipv6_find_connection()
445 * Get the IPv6 connection info that corresponds to a particular 5-tuple.
446 *
447 * On entry we must be holding the lock that protects the hash table.
448 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700449static inline struct sfe_ipv6_connection *sfe_ipv6_find_connection(struct sfe_ipv6 *si, u32 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700450 struct sfe_ipv6_addr *src_ip, __be16 src_port,
451 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
452{
453 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530454
Xiaoping Fan978b3772015-05-27 14:15:18 -0700455 unsigned int conn_idx = sfe_ipv6_get_connection_hash(protocol, src_ip, src_port, dest_ip, dest_port);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530456
457 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700458 c = si->conn_hash[conn_idx];
459
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530460 while (c) {
461 if ((c->src_port == src_port)
462 && (c->dest_port == dest_port)
463 && (sfe_ipv6_addr_equal(c->src_ip, src_ip))
464 && (sfe_ipv6_addr_equal(c->dest_ip, dest_ip))
465 && (c->protocol == protocol)) {
466 return c;
467 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700468 c = c->next;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530469 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700470
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530471 return NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700472}
473
474/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700475 * sfe_ipv6_insert_connection()
476 * Insert a connection into the hash.
477 *
478 * On entry we must be holding the lock that protects the hash table.
479 */
480static void sfe_ipv6_insert_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
481{
482 struct sfe_ipv6_connection **hash_head;
483 struct sfe_ipv6_connection *prev_head;
484 unsigned int conn_idx;
485
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530486 lockdep_assert_held(&si->lock);
487
Xiaoping Fan978b3772015-05-27 14:15:18 -0700488 /*
489 * Insert entry into the connection hash.
490 */
491 conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
492 c->dest_ip, c->dest_port);
493 hash_head = &si->conn_hash[conn_idx];
494 prev_head = *hash_head;
495 c->prev = NULL;
496 if (prev_head) {
497 prev_head->prev = c;
498 }
499
500 c->next = prev_head;
501 *hash_head = c;
502
503 /*
504 * Insert entry into the "all connections" list.
505 */
506 if (si->all_connections_tail) {
507 c->all_connections_prev = si->all_connections_tail;
508 si->all_connections_tail->all_connections_next = c;
509 } else {
510 c->all_connections_prev = NULL;
511 si->all_connections_head = c;
512 }
513
514 si->all_connections_tail = c;
515 c->all_connections_next = NULL;
516 si->num_connections++;
517
518 /*
519 * Insert the connection match objects too.
520 */
521 sfe_ipv6_insert_connection_match(si, c->original_match);
522 sfe_ipv6_insert_connection_match(si, c->reply_match);
523}
524
525/*
526 * sfe_ipv6_remove_connection()
527 * Remove a sfe_ipv6_connection object from the hash.
528 *
529 * On entry we must be holding the lock that protects the hash table.
530 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530531bool sfe_ipv6_remove_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700532{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530533
534 lockdep_assert_held(&si->lock);
535 if (c->removed) {
536 DEBUG_ERROR("%px: Connection has been removed already\n", c);
537 return false;
538 }
539
Xiaoping Fan978b3772015-05-27 14:15:18 -0700540 /*
Tian Yang435afc42022-02-02 12:47:32 -0800541 * dereference the decap direction top_interface_dev
542 */
543 if (c->reply_match->top_interface_dev) {
544 dev_put(c->reply_match->top_interface_dev);
545 }
546 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700547 * Remove the connection match objects.
548 */
549 sfe_ipv6_remove_connection_match(si, c->reply_match);
550 sfe_ipv6_remove_connection_match(si, c->original_match);
551
552 /*
553 * Unlink the connection.
554 */
555 if (c->prev) {
556 c->prev->next = c->next;
557 } else {
558 unsigned int conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
559 c->dest_ip, c->dest_port);
560 si->conn_hash[conn_idx] = c->next;
561 }
562
563 if (c->next) {
564 c->next->prev = c->prev;
565 }
Xiaoping Fan34586472015-07-03 02:20:35 -0700566
567 /*
568 * Unlink connection from all_connections list
569 */
570 if (c->all_connections_prev) {
571 c->all_connections_prev->all_connections_next = c->all_connections_next;
572 } else {
573 si->all_connections_head = c->all_connections_next;
574 }
575
576 if (c->all_connections_next) {
577 c->all_connections_next->all_connections_prev = c->all_connections_prev;
578 } else {
579 si->all_connections_tail = c->all_connections_prev;
580 }
581
Ken Zhu32b95392021-09-03 13:52:04 -0700582 /*
583 * If I am the next sync connection, move the sync to my next or head.
584 */
585 if (unlikely(si->wc_next == c)) {
586 si->wc_next = c->all_connections_next;
587 }
588
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530589 c->removed = true;
Xiaoping Fan34586472015-07-03 02:20:35 -0700590 si->num_connections--;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530591 return true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700592}
593
594/*
595 * sfe_ipv6_gen_sync_connection()
596 * Sync a connection.
597 *
598 * On entry to this function we expect that the lock for the connection is either
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530599 * already held (while called from sfe_ipv6_periodic_sync() or isn't required
600 * (while called from sfe_ipv6_flush_sfe_ipv6_connection())
Xiaoping Fan978b3772015-05-27 14:15:18 -0700601 */
602static void sfe_ipv6_gen_sync_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c,
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700603 struct sfe_connection_sync *sis, sfe_sync_reason_t reason,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700604 u64 now_jiffies)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700605{
606 struct sfe_ipv6_connection_match *original_cm;
607 struct sfe_ipv6_connection_match *reply_cm;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530608 u32 packet_count, byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700609
610 /*
611 * Fill in the update message.
612 */
Murat Sezgin53509a12016-12-27 16:57:34 -0800613 sis->is_v6 = 1;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700614 sis->protocol = c->protocol;
615 sis->src_ip.ip6[0] = c->src_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700616 sis->src_ip_xlate.ip6[0] = c->src_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700617 sis->dest_ip.ip6[0] = c->dest_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700618 sis->dest_ip_xlate.ip6[0] = c->dest_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700619 sis->src_port = c->src_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700620 sis->src_port_xlate = c->src_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700621 sis->dest_port = c->dest_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700622 sis->dest_port_xlate = c->dest_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700623
624 original_cm = c->original_match;
625 reply_cm = c->reply_match;
626 sis->src_td_max_window = original_cm->protocol_state.tcp.max_win;
627 sis->src_td_end = original_cm->protocol_state.tcp.end;
628 sis->src_td_max_end = original_cm->protocol_state.tcp.max_end;
629 sis->dest_td_max_window = reply_cm->protocol_state.tcp.max_win;
630 sis->dest_td_end = reply_cm->protocol_state.tcp.end;
631 sis->dest_td_max_end = reply_cm->protocol_state.tcp.max_end;
632
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530633 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet_count, &byte_count);
634 sis->src_new_packet_count = packet_count;
635 sis->src_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700636
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530637 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet_count, &byte_count);
638 sis->dest_new_packet_count = packet_count;
639 sis->dest_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700640
641 sis->src_dev = original_cm->match_dev;
642 sis->src_packet_count = original_cm->rx_packet_count64;
643 sis->src_byte_count = original_cm->rx_byte_count64;
644
645 sis->dest_dev = reply_cm->match_dev;
646 sis->dest_packet_count = reply_cm->rx_packet_count64;
647 sis->dest_byte_count = reply_cm->rx_byte_count64;
648
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700649 sis->reason = reason;
650
Xiaoping Fan978b3772015-05-27 14:15:18 -0700651 /*
652 * Get the time increment since our last sync.
653 */
654 sis->delta_jiffies = now_jiffies - c->last_sync_jiffies;
655 c->last_sync_jiffies = now_jiffies;
656}
657
658/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530659 * sfe_ipv6_free_sfe_ipv6_connection_rcu()
660 * Called at RCU qs state to free the connection object.
661 */
662static void sfe_ipv6_free_sfe_ipv6_connection_rcu(struct rcu_head *head)
663{
664 struct sfe_ipv6_connection *c;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530665 struct udp_sock *up;
666 struct sock *sk;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530667
668 /*
669 * We dont need spin lock as the connection is already removed from link list
670 */
671 c = container_of(head, struct sfe_ipv6_connection, rcu);
672 BUG_ON(!c->removed);
673
674 DEBUG_TRACE("%px: connecton has been deleted\n", c);
675
676 /*
Suruchi Suman23a279d2021-11-16 15:13:09 +0530677 * Decrease the refcount taken in function sfe_ipv6_create_rule()
678 * during call of __udp6_lib_lookup()
679 */
680 up = c->reply_match->up;
681 if (up) {
682 sk = (struct sock *)up;
683 sock_put(sk);
684 }
685
686 /*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530687 * Release our hold of the source and dest devices and free the memory
688 * for our connection objects.
689 */
690 dev_put(c->original_dev);
691 dev_put(c->reply_dev);
692 kfree(c->original_match);
693 kfree(c->reply_match);
694 kfree(c);
695}
696
697/*
Ken Zhu88c58152021-12-09 15:12:06 -0800698 * sfe_ipv6_sync_status()
699 * update a connection status to its connection manager.
700 *
701 * si: the ipv6 context
702 * c: which connection to be notified
703 * reason: what kind of reason: flush, or destroy
704 */
705void sfe_ipv6_sync_status(struct sfe_ipv6 *si,
706 struct sfe_ipv6_connection *c,
707 sfe_sync_reason_t reason)
708{
709 struct sfe_connection_sync sis;
710 u64 now_jiffies;
711 sfe_sync_rule_callback_t sync_rule_callback;
712
713 rcu_read_lock();
714 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
Ken Zhu7a43d882022-01-04 10:51:44 -0800715 rcu_read_unlock();
Ken Zhu88c58152021-12-09 15:12:06 -0800716 if (unlikely(!sync_rule_callback)) {
Ken Zhu88c58152021-12-09 15:12:06 -0800717 return;
718 }
719
720 /*
721 * Generate a sync message and then sync.
722 */
723 now_jiffies = get_jiffies_64();
724 sfe_ipv6_gen_sync_connection(si, c, &sis, reason, now_jiffies);
725 sync_rule_callback(&sis);
Ken Zhu88c58152021-12-09 15:12:06 -0800726}
727
728/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700729 * sfe_ipv6_flush_connection()
730 * Flush a connection and free all associated resources.
731 *
732 * We need to be called with bottom halves disabled locally as we need to acquire
733 * the connection hash lock and release it again. In general we're actually called
734 * from within a BH and so we're fine, but we're also called when connections are
735 * torn down.
736 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530737void sfe_ipv6_flush_connection(struct sfe_ipv6 *si,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700738 struct sfe_ipv6_connection *c,
739 sfe_sync_reason_t reason)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700740{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530741 BUG_ON(!c->removed);
742
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530743 this_cpu_inc(si->stats_pcpu->connection_flushes64);
Ken Zhu88c58152021-12-09 15:12:06 -0800744 sfe_ipv6_sync_status(si, c, reason);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530745
746 /*
Ken Zhu88c58152021-12-09 15:12:06 -0800747 * Release our hold of the source and dest devices and free the memory
748 * for our connection objects.
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530749 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530750 call_rcu(&c->rcu, sfe_ipv6_free_sfe_ipv6_connection_rcu);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700751}
752
Parikshit Guned31a8202022-01-05 22:15:04 +0530753/*
Jackson Bockus3fafbf32022-02-13 17:15:26 -0800754 * sfe_ipv4_service_class_stats_pcpu_get()
755 * Gets one CPU's service class statistics.
756 */
757static inline bool sfe_ipv6_service_class_stats_pcpu_get(struct sfe_ipv6_per_service_class_stats *sc_stats, uint64_t *bytes, uint64_t *packets)
758{
759 uint32_t retries = 0;
760 uint32_t seq;
761 uint64_t bytes_tmp, packets_tmp;
762
763 do {
764 seq = read_seqcount_begin(&sc_stats->seq);
765 bytes_tmp = sc_stats->tx_bytes;
766 packets_tmp = sc_stats->tx_packets;
767 } while (read_seqcount_retry(&sc_stats->seq, seq) && ++retries < SFE_SERVICE_CLASS_STATS_MAX_RETRY);
768
769 *bytes += bytes_tmp;
770 *packets += packets_tmp;
771
772 return retries < SFE_SERVICE_CLASS_STATS_MAX_RETRY;
773}
774
775/*
776 * sfe_ipv4_service_class_stats_get()
777 * Copy the ipv4 statistics for the given service class.
778 */
779bool sfe_ipv6_service_class_stats_get(uint8_t sid, uint64_t *bytes, uint64_t *packets)
780{
781 struct sfe_ipv6 *si = &__si6;
782 uint32_t cpu = 0;
783
784 for_each_possible_cpu(cpu) {
785 struct sfe_ipv6_service_class_stats_db *stats_db = per_cpu_ptr(si->stats_pcpu_psc, cpu);
786 struct sfe_ipv6_per_service_class_stats *sc_stats = &stats_db->psc_stats[sid];
787
788 if (!sfe_ipv6_service_class_stats_pcpu_get(sc_stats, bytes, packets)) {
789 return false;
790 }
791 }
792
793 return true;
794}
795
796/*
Parikshit Guned31a8202022-01-05 22:15:04 +0530797 * sfe_ipv6_service_class_stats_inc()
798 * Increment per cpu per service class stats.
799 */
800void sfe_ipv6_service_class_stats_inc(struct sfe_ipv6 *si, uint8_t sid, uint64_t bytes)
801{
802 struct sfe_ipv6_service_class_stats_db *sc_stats_db = this_cpu_ptr(si->stats_pcpu_psc);
803 struct sfe_ipv6_per_service_class_stats *sc_stats = &sc_stats_db->psc_stats[sid];
804
805 write_seqcount_begin(&sc_stats->seq);
806 sc_stats->tx_bytes += bytes;
807 sc_stats->tx_packets++;
808 write_seqcount_end(&sc_stats->seq);
809}
810
811/*
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530812 * sfe_ipv6_exception_stats_inc()
813 * Increment exception stats.
814 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530815void sfe_ipv6_exception_stats_inc(struct sfe_ipv6 *si, enum sfe_ipv6_exception_events reason)
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530816{
817 struct sfe_ipv6_stats *stats = this_cpu_ptr(si->stats_pcpu);
818
819 stats->exception_events64[reason]++;
820 stats->packets_not_forwarded64++;
821}
822
Xiaoping Fan978b3772015-05-27 14:15:18 -0700823/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530824 * sfe_ipv6_is_local_ip()
825 * return true if it is local ip otherwise return false
826 */
827static bool sfe_ipv6_is_local_ip(struct sfe_ipv6 *si, uint8_t *addr)
828{
829 struct net_device *dev;
830 struct in6_addr ip_addr;
831 memcpy(ip_addr.s6_addr, addr, 16);
832
833 dev = ipv6_dev_find(&init_net, &ip_addr, 1);
834 if (dev) {
835 dev_put(dev);
836 return true;
837 }
838
839 return false;
840}
841
842/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700843 * sfe_ipv6_recv()
844 * Handle packet receives and forwaring.
845 *
846 * Returns 1 if the packet is forwarded or 0 if it isn't.
847 */
Suruchi Suman23a279d2021-11-16 15:13:09 +0530848int sfe_ipv6_recv(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info, bool tun_outer)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700849{
850 struct sfe_ipv6 *si = &__si6;
851 unsigned int len;
852 unsigned int payload_len;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530853 unsigned int ihl = sizeof(struct ipv6hdr);
Ken Zhu88c58152021-12-09 15:12:06 -0800854 bool sync_on_find = false;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530855 struct ipv6hdr *iph;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700856 u8 next_hdr;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700857
858 /*
859 * Check that we have space for an IP header and an uplayer header here.
860 */
861 len = skb->len;
862 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700863
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530864 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700865 DEBUG_TRACE("len: %u is too short\n", len);
866 return 0;
867 }
868
869 /*
870 * Is our IP version wrong?
871 */
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530872 iph = (struct ipv6hdr *)skb->data;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700873 if (unlikely(iph->version != 6)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700874
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530875 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_NON_V6);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700876 DEBUG_TRACE("IP version: %u\n", iph->version);
877 return 0;
878 }
879
880 /*
881 * Does our datagram fit inside the skb?
882 */
883 payload_len = ntohs(iph->payload_len);
884 if (unlikely(payload_len > (len - ihl))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700885
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530886 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_DATAGRAM_INCOMPLETE);
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530887 DEBUG_TRACE("payload_len: %u, exceeds len: %u\n", payload_len, (len - (unsigned int)sizeof(struct ipv6hdr)));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700888 return 0;
889 }
890
891 next_hdr = iph->nexthdr;
892 while (unlikely(sfe_ipv6_is_ext_hdr(next_hdr))) {
893 struct sfe_ipv6_ext_hdr *ext_hdr;
894 unsigned int ext_hdr_len;
895
896 ext_hdr = (struct sfe_ipv6_ext_hdr *)(skb->data + ihl);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700897
898 ext_hdr_len = ext_hdr->hdr_len;
899 ext_hdr_len <<= 3;
900 ext_hdr_len += sizeof(struct sfe_ipv6_ext_hdr);
901 ihl += ext_hdr_len;
902 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530903 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700904
905 DEBUG_TRACE("extension header %d not completed\n", next_hdr);
906 return 0;
907 }
Ken Zhu88c58152021-12-09 15:12:06 -0800908 /*
909 * Any packets have extend hdr, won't be handled in the fast
910 * path,sync its status and exception to the kernel.
911 */
912 sync_on_find = true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700913 next_hdr = ext_hdr->next_hdr;
914 }
915
916 if (IPPROTO_UDP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800917 return sfe_ipv6_recv_udp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700918 }
919
920 if (IPPROTO_TCP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800921 return sfe_ipv6_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700922 }
923
924 if (IPPROTO_ICMPV6 == next_hdr) {
925 return sfe_ipv6_recv_icmp(si, skb, dev, len, iph, ihl);
926 }
927
Tian Yangafb03452022-01-13 18:53:13 -0800928 if (IPPROTO_IPIP == next_hdr) {
929 return sfe_ipv6_recv_tunipip6(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, true);
930 }
931
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530932#ifdef SFE_GRE_TUN_ENABLE
933 if (IPPROTO_GRE == next_hdr) {
Nitin Shetty2114a892022-01-28 20:03:56 +0530934 return sfe_ipv6_recv_gre(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530935 }
936#endif
937
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530938 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_UNHANDLED_PROTOCOL);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700939 DEBUG_TRACE("not UDP, TCP or ICMP: %u\n", next_hdr);
940 return 0;
941}
942
943/*
944 * sfe_ipv6_update_tcp_state()
945 * update TCP window variables.
946 */
947static void
948sfe_ipv6_update_tcp_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530949 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700950{
951 struct sfe_ipv6_connection_match *orig_cm;
952 struct sfe_ipv6_connection_match *repl_cm;
953 struct sfe_ipv6_tcp_connection_match *orig_tcp;
954 struct sfe_ipv6_tcp_connection_match *repl_tcp;
955
956 orig_cm = c->original_match;
957 repl_cm = c->reply_match;
958 orig_tcp = &orig_cm->protocol_state.tcp;
959 repl_tcp = &repl_cm->protocol_state.tcp;
960
961 /* update orig */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530962 if (orig_tcp->max_win < msg->tcp_rule.flow_max_window) {
963 orig_tcp->max_win = msg->tcp_rule.flow_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700964 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530965 if ((s32)(orig_tcp->end - msg->tcp_rule.flow_end) < 0) {
966 orig_tcp->end = msg->tcp_rule.flow_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700967 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530968 if ((s32)(orig_tcp->max_end - msg->tcp_rule.flow_max_end) < 0) {
969 orig_tcp->max_end = msg->tcp_rule.flow_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700970 }
971
972 /* update reply */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530973 if (repl_tcp->max_win < msg->tcp_rule.return_max_window) {
974 repl_tcp->max_win = msg->tcp_rule.return_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700975 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530976 if ((s32)(repl_tcp->end - msg->tcp_rule.return_end) < 0) {
977 repl_tcp->end = msg->tcp_rule.return_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700978 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530979 if ((s32)(repl_tcp->max_end - msg->tcp_rule.return_max_end) < 0) {
980 repl_tcp->max_end = msg->tcp_rule.return_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700981 }
982
983 /* update match flags */
984 orig_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
985 repl_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530986 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700987 orig_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
988 repl_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
989 }
990}
991
992/*
993 * sfe_ipv6_update_protocol_state()
994 * update protocol specified state machine.
995 */
996static void
997sfe_ipv6_update_protocol_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530998 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700999{
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301000 switch (msg->tuple.protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001001 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301002 sfe_ipv6_update_tcp_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001003 break;
1004 }
1005}
1006
1007/*
Wayne Tanbb7f1782021-12-13 11:16:04 -08001008 * sfe_ipv6_match_entry_set_vlan()
1009 */
1010static void sfe_ipv6_match_entry_set_vlan(
1011 struct sfe_ipv6_connection_match *cm,
1012 u32 primary_ingress_vlan_tag,
1013 u32 primary_egress_vlan_tag,
1014 u32 secondary_ingress_vlan_tag,
1015 u32 secondary_egress_vlan_tag)
1016{
1017 u16 tpid;
1018 /*
1019 * Prevent stacking header counts when updating.
1020 */
1021 cm->ingress_vlan_hdr_cnt = 0;
1022 cm->egress_vlan_hdr_cnt = 0;
1023 memset(cm->ingress_vlan_hdr, 0, sizeof(cm->ingress_vlan_hdr));
1024 memset(cm->egress_vlan_hdr, 0, sizeof(cm->egress_vlan_hdr));
1025
1026 /*
1027 * vlan_hdr[0] corresponds to outer tag
1028 * vlan_hdr[1] corresponds to inner tag
1029 * Extract the vlan information (tpid and tci) from rule message
1030 */
1031 if ((primary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1032 tpid = (u16)(primary_ingress_vlan_tag >> 16);
1033 cm->ingress_vlan_hdr[0].tpid = ntohs(tpid);
1034 cm->ingress_vlan_hdr[0].tci = (u16)primary_ingress_vlan_tag;
1035 cm->ingress_vlan_hdr_cnt++;
1036 }
1037
1038 if ((secondary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1039 tpid = (u16)(secondary_ingress_vlan_tag >> 16);
1040 cm->ingress_vlan_hdr[1].tpid = ntohs(tpid);
1041 cm->ingress_vlan_hdr[1].tci = (u16)secondary_ingress_vlan_tag;
1042 cm->ingress_vlan_hdr_cnt++;
1043 }
1044
1045 if ((primary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1046 tpid = (u16)(primary_egress_vlan_tag >> 16);
1047 cm->egress_vlan_hdr[0].tpid = ntohs(tpid);
1048 cm->egress_vlan_hdr[0].tci = (u16)primary_egress_vlan_tag;
1049 cm->egress_vlan_hdr_cnt++;
1050 }
1051
1052 if ((secondary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1053 tpid = (u16)(secondary_egress_vlan_tag >> 16);
1054 cm->egress_vlan_hdr[1].tpid = ntohs(tpid);
1055 cm->egress_vlan_hdr[1].tci = (u16)secondary_egress_vlan_tag;
1056 cm->egress_vlan_hdr_cnt++;
1057 }
1058}
1059
1060/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001061 * sfe_ipv6_update_rule()
1062 * update forwarding rule after rule is created.
1063 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301064void sfe_ipv6_update_rule(struct sfe_ipv6_rule_create_msg *msg)
1065
Xiaoping Fan978b3772015-05-27 14:15:18 -07001066{
1067 struct sfe_ipv6_connection *c;
1068 struct sfe_ipv6 *si = &__si6;
1069
1070 spin_lock_bh(&si->lock);
1071
1072 c = sfe_ipv6_find_connection(si,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301073 msg->tuple.protocol,
1074 (struct sfe_ipv6_addr *)msg->tuple.flow_ip,
1075 msg->tuple.flow_ident,
1076 (struct sfe_ipv6_addr *)msg->tuple.return_ip,
1077 msg->tuple.return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001078 if (c != NULL) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301079 sfe_ipv6_update_protocol_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001080 }
1081
1082 spin_unlock_bh(&si->lock);
1083}
1084
1085/*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301086 * sfe_ipv6_xmit_eth_type_check
1087 * Checking if MAC header has to be written.
1088 */
1089static inline bool sfe_ipv6_xmit_eth_type_check(struct net_device *dev, u32 cm_flags)
1090{
1091 if (!(dev->flags & IFF_NOARP)) {
1092 return true;
1093 }
1094
1095 /*
1096 * For PPPoE, since we are now supporting PPPoE encapsulation, we are writing L2 header.
1097 */
1098 if (cm_flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP) {
1099 return true;
1100 }
1101
1102 return false;
1103}
1104
1105/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001106 * sfe_ipv6_create_rule()
1107 * Create a forwarding rule.
1108 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301109int sfe_ipv6_create_rule(struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001110{
1111 struct sfe_ipv6 *si = &__si6;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301112 struct sfe_ipv6_connection *c, *old_c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001113 struct sfe_ipv6_connection_match *original_cm;
1114 struct sfe_ipv6_connection_match *reply_cm;
1115 struct net_device *dest_dev;
1116 struct net_device *src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301117 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301118 struct sock *sk;
1119 struct net *net;
1120 unsigned int src_if_idx;
1121
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301122 s32 flow_interface_num = msg->conn_rule.flow_top_interface_num;
1123 s32 return_interface_num = msg->conn_rule.return_top_interface_num;
Parikshit Guned31a8202022-01-05 22:15:04 +05301124 u32 flow_sawf_tag;
1125 u32 return_sawf_tag;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001126
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301127 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) {
1128 flow_interface_num = msg->conn_rule.flow_interface_num;
1129 }
1130
1131 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) {
1132 return_interface_num = msg->conn_rule.return_interface_num;
1133 }
1134
1135 src_dev = dev_get_by_index(&init_net, flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301136 if (!src_dev) {
1137 DEBUG_WARN("%px: Unable to find src_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301138 flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301139 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1140 return -EINVAL;
1141 }
1142
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301143 dest_dev = dev_get_by_index(&init_net, return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301144 if (!dest_dev) {
1145 DEBUG_WARN("%px: Unable to find dest_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301146 return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301147 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1148 dev_put(src_dev);
1149 return -EINVAL;
1150 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001151
1152 if (unlikely((dest_dev->reg_state != NETREG_REGISTERED) ||
1153 (src_dev->reg_state != NETREG_REGISTERED))) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301154 DEBUG_WARN("%px: src_dev=%s and dest_dev=%s are unregistered\n", msg,
1155 src_dev->name, dest_dev->name);
1156 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1157 dev_put(src_dev);
1158 dev_put(dest_dev);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001159 return -EINVAL;
1160 }
1161
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301162 /*
1163 * Allocate the various connection tracking objects.
1164 */
Parikshit Guneef1664c2022-03-24 14:15:42 +05301165 c = (struct sfe_ipv6_connection *)kzalloc(sizeof(struct sfe_ipv6_connection), GFP_ATOMIC);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301166 if (unlikely(!c)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301167 DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
1168 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1169 dev_put(src_dev);
1170 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301171 return -ENOMEM;
1172 }
1173
Parikshit Guneef1664c2022-03-24 14:15:42 +05301174 original_cm = (struct sfe_ipv6_connection_match *)kzalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301175 if (unlikely(!original_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301176 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1177 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301178 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301179 dev_put(src_dev);
1180 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301181 return -ENOMEM;
1182 }
1183
Parikshit Guneef1664c2022-03-24 14:15:42 +05301184 reply_cm = (struct sfe_ipv6_connection_match *)kzalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301185 if (unlikely(!reply_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301186 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1187 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301188 kfree(original_cm);
1189 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301190 dev_put(src_dev);
1191 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301192 return -ENOMEM;
1193 }
1194
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301195 this_cpu_inc(si->stats_pcpu->connection_create_requests64);
1196
Xiaoping Fan978b3772015-05-27 14:15:18 -07001197 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001198
1199 /*
1200 * Check to see if there is already a flow that matches the rule we're
1201 * trying to create. If there is then we can't create a new one.
1202 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001203 old_c = sfe_ipv6_find_connection(si,
1204 tuple->protocol,
1205 (struct sfe_ipv6_addr *)tuple->flow_ip,
1206 tuple->flow_ident,
1207 (struct sfe_ipv6_addr *)tuple->return_ip,
1208 tuple->return_ident);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301209
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301210 if (old_c != NULL) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301211 this_cpu_inc(si->stats_pcpu->connection_create_collisions64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001212
1213 /*
1214 * If we already have the flow then it's likely that this
1215 * request to create the connection rule contains more
1216 * up-to-date information. Check and update accordingly.
1217 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301218 sfe_ipv6_update_protocol_state(old_c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001219 spin_unlock_bh(&si->lock);
1220
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301221 kfree(reply_cm);
1222 kfree(original_cm);
1223 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301224 dev_put(src_dev);
1225 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301226
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301227 DEBUG_TRACE("connection already exists - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001228 " s: %s:%pxM:%pI6:%u, d: %s:%pxM:%pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301229 tuple->protocol,
1230 src_dev->name, msg->conn_rule.flow_mac, tuple->flow_ip, ntohs(tuple->flow_ident),
1231 dest_dev->name, msg->conn_rule.return_mac, tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001232 return -EADDRINUSE;
1233 }
1234
1235 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001236 * Fill in the "original" direction connection matching object.
1237 * Note that the transmit MAC address is "dest_mac_xlate" because
1238 * we always know both ends of a connection by their translated
1239 * addresses and not their public addresses.
1240 */
1241 original_cm->match_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301242 original_cm->match_protocol = tuple->protocol;
1243 original_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
Suruchi Suman66609a72022-01-20 02:34:25 +05301244 original_cm->match_src_port = netif_is_vxlan(src_dev) ? 0 : tuple->flow_ident;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301245 original_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1246 original_cm->match_dest_port = tuple->return_ident;
1247
1248 original_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1249 original_cm->xlate_src_port = tuple->flow_ident;
1250 original_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1251 original_cm->xlate_dest_port = tuple->return_ident;
1252
Xiaoping Fan978b3772015-05-27 14:15:18 -07001253 original_cm->xmit_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301254
1255 original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301256
Xiaoping Fan978b3772015-05-27 14:15:18 -07001257 original_cm->connection = c;
1258 original_cm->counter_match = reply_cm;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301259
1260 /*
1261 * Valid in decap direction only
1262 */
1263 RCU_INIT_POINTER(original_cm->up, NULL);
1264
Ken Zhu37040ea2021-09-09 21:11:15 -07001265 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1266 original_cm->mark = msg->mark_rule.flow_mark;
1267 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1268 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301269 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1270 original_cm->priority = msg->qos_rule.flow_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001271 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1272 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001273
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301274 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1275 original_cm->dscp = msg->dscp_rule.flow_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001276 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1277 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301278 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1279 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1280 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001281 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST) {
1282 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1283 }
1284
Parikshit Guned31a8202022-01-05 22:15:04 +05301285 /*
1286 * Mark SAWF metadata if the sawf tag is valid.
1287 */
1288 original_cm->sawf_valid = false;
1289 flow_sawf_tag = SFE_GET_SAWF_TAG(msg->sawf_rule.flow_mark);
1290 if (likely(SFE_SAWF_TAG_IS_VALID(flow_sawf_tag))) {
1291 original_cm->mark = msg->sawf_rule.flow_mark;
1292 original_cm->sawf_valid = true;
1293 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1294 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301295
Wayne Tanbb7f1782021-12-13 11:16:04 -08001296 /*
1297 * Add VLAN rule to original_cm
1298 */
1299 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1300 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1301 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1302 sfe_ipv6_match_entry_set_vlan(original_cm,
1303 vlan_primary_rule->ingress_vlan_tag,
1304 vlan_primary_rule->egress_vlan_tag,
1305 vlan_secondary_rule->ingress_vlan_tag,
1306 vlan_secondary_rule->egress_vlan_tag);
1307
1308 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) &&
1309 original_cm->egress_vlan_hdr_cnt > 0) {
1310 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1311 original_cm->l2_hdr_size += original_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1312 }
1313 }
1314
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301315 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv6_is_local_ip(si, (uint8_t *)original_cm->match_dest_ip)) {
1316 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1317 }
1318
Xiaoping Fan978b3772015-05-27 14:15:18 -07001319#ifdef CONFIG_NF_FLOW_COOKIE
1320 original_cm->flow_cookie = 0;
1321#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001322#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301323 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1324 original_cm->flow_accel = msg->direction_rule.flow_accel;
1325 } else {
1326 original_cm->flow_accel = 1;
1327 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001328#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301329 /*
1330 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1331 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1332 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1333 * are sending directly to the destination interface that supports it.
1334 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301335 if (likely(dest_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(dest_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301336 if ((msg->conn_rule.return_top_interface_num == msg->conn_rule.return_interface_num) ||
1337 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301338 /*
1339 * Dont enable CSUM offload
1340 */
1341#if 0
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301342 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301343#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301344 }
1345 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001346
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301347 /*
1348 * Adding PPPoE parameters to original and reply entries based on the direction where
1349 * PPPoE header is valid in ECM rule.
1350 *
1351 * If PPPoE is valid in flow direction (from interface is PPPoE), then
1352 * original cm will have PPPoE at ingress (strip PPPoE header)
1353 * reply cm will have PPPoE at egress (add PPPoE header)
1354 *
1355 * If PPPoE is valid in return direction (to interface is PPPoE), then
1356 * original cm will have PPPoE at egress (add PPPoE header)
1357 * reply cm will have PPPoE at ingress (strip PPPoE header)
1358 */
1359 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_DECAP_VALID) {
1360 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1361 original_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1362 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1363
1364 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001365 reply_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301366 reply_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1367 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1368 }
1369
1370 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_ENCAP_VALID) {
1371 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001372 original_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301373 original_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1374 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1375
1376 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1377 reply_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1378 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1379 }
1380
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301381 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1382 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1383 }
1384
Xiaoping Fan978b3772015-05-27 14:15:18 -07001385 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001386 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301387 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001388 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301389 if (sfe_ipv6_xmit_eth_type_check(dest_dev, original_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301390
1391 /*
1392 * Check whether the rule has configured a specific source MAC address to use.
1393 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1394 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301395 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1396 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->conn_rule.flow_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301397 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301398 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1399 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_RETURN_VALID)) {
1400 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.return_src_mac);
1401 } else {
1402 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)dest_dev->dev_addr);
1403 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301404 }
1405 ether_addr_copy((u8 *)original_cm->xmit_dest_mac, (u8 *)msg->conn_rule.return_mac);
1406
Xiaoping Fan978b3772015-05-27 14:15:18 -07001407 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001408 original_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001409
1410 /*
1411 * If our dev writes Ethernet headers then we can write a really fast
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301412 * version
Xiaoping Fan978b3772015-05-27 14:15:18 -07001413 */
1414 if (dest_dev->header_ops) {
1415 if (dest_dev->header_ops->create == eth_header) {
1416 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1417 }
1418 }
1419 }
1420
1421 /*
1422 * Fill in the "reply" direction connection matching object.
1423 */
1424 reply_cm->match_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301425 reply_cm->match_protocol = tuple->protocol;
1426 reply_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301427 reply_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1428 reply_cm->match_dest_port = tuple->flow_ident;
1429 reply_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1430 reply_cm->xlate_src_port = tuple->return_ident;
1431 reply_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1432 reply_cm->xlate_dest_port = tuple->flow_ident;
1433
Suruchi Suman23a279d2021-11-16 15:13:09 +05301434 /*
1435 * Keep source port as 0 for VxLAN tunnels.
1436 */
1437 if (netif_is_vxlan(src_dev) || netif_is_vxlan(dest_dev)) {
1438 reply_cm->match_src_port = 0;
1439 } else {
1440 reply_cm->match_src_port = tuple->return_ident;
1441 }
1442
Xiaoping Fan978b3772015-05-27 14:15:18 -07001443 reply_cm->xmit_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301444 reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301445
Xiaoping Fan978b3772015-05-27 14:15:18 -07001446 reply_cm->connection = c;
1447 reply_cm->counter_match = original_cm;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301448
Ken Zhu37040ea2021-09-09 21:11:15 -07001449 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1450 reply_cm->mark = msg->mark_rule.return_mark;
1451 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1452 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301453 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1454 reply_cm->priority = msg->qos_rule.return_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001455 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1456 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301457 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1458 reply_cm->dscp = msg->dscp_rule.return_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001459 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1460 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301461 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1462 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1463 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001464 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST) {
1465 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1466 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301467
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301468 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv6_is_local_ip(si, (uint8_t *)reply_cm->match_dest_ip)) {
1469 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1470 }
1471
Suruchi Suman23a279d2021-11-16 15:13:09 +05301472 /*
Parikshit Guned31a8202022-01-05 22:15:04 +05301473 * Mark return SAWF metadata if the sawf tag is valid.
1474 */
1475 reply_cm->sawf_valid = false;
1476 return_sawf_tag = SFE_GET_SAWF_TAG(msg->sawf_rule.return_mark);
1477 if (likely(SFE_SAWF_TAG_IS_VALID(return_sawf_tag))) {
1478 reply_cm->mark = msg->sawf_rule.return_mark;
1479 reply_cm->sawf_valid = true;
1480 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1481 }
1482
1483 /*
Suruchi Suman23a279d2021-11-16 15:13:09 +05301484 * Setup UDP Socket if found to be valid for decap.
1485 */
1486 RCU_INIT_POINTER(reply_cm->up, NULL);
1487 net = dev_net(reply_cm->match_dev);
1488 src_if_idx = src_dev->ifindex;
1489
1490 rcu_read_lock();
1491
1492 /*
1493 * Look for the associated sock object.
1494 * __udp6_lib_lookup() holds a reference for this sock object,
1495 * which will be released in sfe_ipv6_flush_connection()
1496 */
1497#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1498 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1499 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1500 reply_cm->xlate_src_port, src_if_idx, &udp_table);
1501#else
1502 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1503 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1504 reply_cm->xlate_src_port, src_if_idx, 0, &udp_table, NULL);
1505#endif
1506 rcu_read_unlock();
1507
1508 /*
1509 * We set the UDP sock pointer as valid only for decap direction.
1510 */
1511 if (sk && udp_sk(sk)->encap_type) {
1512#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1513 if (!atomic_add_unless(&sk->sk_refcnt, 1, 0)) {
1514#else
1515 if (!refcount_inc_not_zero(&sk->sk_refcnt)) {
1516#endif
Tian Yang435afc42022-02-02 12:47:32 -08001517 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001518 spin_unlock_bh(&si->lock);
Suruchi Suman23a279d2021-11-16 15:13:09 +05301519 kfree(reply_cm);
1520 kfree(original_cm);
1521 kfree(c);
1522
1523 DEBUG_INFO("sfe: unable to take reference for socket p:%d\n", tuple->protocol);
1524 DEBUG_INFO("SK: connection - \n"
1525 " s: %s:%pI6(%pI6):%u(%u)\n"
1526 " d: %s:%pI6(%pI6):%u(%u)\n",
1527 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1528 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1529 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1530 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1531
1532 dev_put(src_dev);
1533 dev_put(dest_dev);
1534
1535 return -ESHUTDOWN;
1536 }
1537
1538 rcu_assign_pointer(reply_cm->up, udp_sk(sk));
1539 DEBUG_INFO("Sock lookup success with reply_cm direction(%p)\n", sk);
1540 DEBUG_INFO("SK: connection - \n"
1541 " s: %s:%pI6(%pI6):%u(%u)\n"
1542 " d: %s:%pI6(%pI6):%u(%u)\n",
1543 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1544 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1545 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1546 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1547 }
1548
Wayne Tanbb7f1782021-12-13 11:16:04 -08001549 /*
1550 * Add VLAN rule to reply_cm
1551 */
1552 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1553 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1554 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1555 sfe_ipv6_match_entry_set_vlan(reply_cm,
1556 vlan_primary_rule->egress_vlan_tag,
1557 vlan_primary_rule->ingress_vlan_tag,
1558 vlan_secondary_rule->egress_vlan_tag,
1559 vlan_secondary_rule->ingress_vlan_tag);
1560
1561 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) &&
1562 reply_cm->egress_vlan_hdr_cnt > 0) {
1563 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1564 reply_cm->l2_hdr_size += reply_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1565 }
1566 }
1567
Xiaoping Fan978b3772015-05-27 14:15:18 -07001568#ifdef CONFIG_NF_FLOW_COOKIE
1569 reply_cm->flow_cookie = 0;
1570#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001571#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301572 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1573 reply_cm->flow_accel = msg->direction_rule.return_accel;
1574 } else {
1575 reply_cm->flow_accel = 1;
1576 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001577#endif
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301578
1579 /*
1580 * the inet6_protocol handler will be used only in decap path
1581 * for non passthrough case.
1582 */
1583 original_cm->proto = NULL;
1584 reply_cm->proto = NULL;
Tian Yang435afc42022-02-02 12:47:32 -08001585 original_cm->top_interface_dev = NULL;
1586 reply_cm->top_interface_dev = NULL;
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301587
1588#ifdef SFE_GRE_TUN_ENABLE
1589 if (!(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
1590 rcu_read_lock();
1591 reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
1592 rcu_read_unlock();
1593
1594 if (unlikely(!reply_cm->proto)) {
Tian Yang435afc42022-02-02 12:47:32 -08001595 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1596 spin_unlock_bh(&si->lock);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301597 kfree(reply_cm);
1598 kfree(original_cm);
1599 kfree(c);
1600 dev_put(src_dev);
1601 dev_put(dest_dev);
1602 DEBUG_WARN("sfe: GRE proto handler is not registered\n");
1603 return -EPERM;
1604 }
1605 }
1606#endif
1607
Xiaoping Fan978b3772015-05-27 14:15:18 -07001608 /*
Tian Yangafb03452022-01-13 18:53:13 -08001609 * Decapsulation path have proto set.
1610 * This is used to differentiate de/encap, and call protocol specific handler.
1611 */
1612 if (IPPROTO_IPIP == tuple->protocol) {
1613 original_cm->proto = NULL;
1614 rcu_read_lock();
1615 reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
1616 rcu_read_unlock();
Tian Yang435afc42022-02-02 12:47:32 -08001617 reply_cm->top_interface_dev = dev_get_by_index(&init_net, msg->conn_rule.return_top_interface_num);
1618
1619 if (unlikely(!reply_cm->top_interface_dev)) {
1620 DEBUG_WARN("%px: Unable to find top_interface_dev corresponding to %d\n", msg,
1621 msg->conn_rule.return_top_interface_num);
1622 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1623 spin_unlock_bh(&si->lock);
1624 kfree(reply_cm);
1625 kfree(original_cm);
1626 kfree(c);
1627 dev_put(src_dev);
1628 dev_put(dest_dev);
1629 return -EINVAL;
1630 }
Tian Yangafb03452022-01-13 18:53:13 -08001631 }
1632 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301633 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1634 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1635 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1636 * are sending directly to the destination interface that supports it.
1637 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301638 if (likely(src_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(src_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301639 if ((msg->conn_rule.flow_top_interface_num == msg->conn_rule.flow_interface_num) ||
1640 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301641 /*
1642 * Dont enable CSUM offload
1643 */
1644#if 0
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301645 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301646#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301647 }
1648 }
1649
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301650 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1651 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1652 }
1653
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301654 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001655 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301656 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001657 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301658 if (sfe_ipv6_xmit_eth_type_check(src_dev, reply_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301659
1660 /*
1661 * Check whether the rule has configured a specific source MAC address to use.
1662 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1663 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301664 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1665 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->conn_rule.return_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301666 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301667 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1668 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_FLOW_VALID)) {
1669 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.flow_src_mac);
1670 } else {
1671 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)src_dev->dev_addr);
1672 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301673 }
1674
1675 ether_addr_copy((u8 *)reply_cm->xmit_dest_mac, (u8 *)msg->conn_rule.flow_mac);
1676
Xiaoping Fan978b3772015-05-27 14:15:18 -07001677 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001678 reply_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001679
1680 /*
1681 * If our dev writes Ethernet headers then we can write a really fast
1682 * version.
1683 */
1684 if (src_dev->header_ops) {
1685 if (src_dev->header_ops->create == eth_header) {
1686 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1687 }
1688 }
1689 }
1690
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301691 /*
1692 * No support for NAT in ipv6
1693 */
Xiaoping Fan978b3772015-05-27 14:15:18 -07001694
Xiaoping Fan978b3772015-05-27 14:15:18 -07001695 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001696 * Initialize the protocol-specific information that we track.
1697 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301698 switch (tuple->protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001699 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301700 original_cm->protocol_state.tcp.win_scale = msg->tcp_rule.flow_window_scale;
1701 original_cm->protocol_state.tcp.max_win = msg->tcp_rule.flow_max_window ? msg->tcp_rule.flow_max_window : 1;
1702 original_cm->protocol_state.tcp.end = msg->tcp_rule.flow_end;
1703 original_cm->protocol_state.tcp.max_end = msg->tcp_rule.flow_max_end;
1704 reply_cm->protocol_state.tcp.win_scale = msg->tcp_rule.return_window_scale;
1705 reply_cm->protocol_state.tcp.max_win = msg->tcp_rule.return_max_window ? msg->tcp_rule.return_max_window : 1;
1706 reply_cm->protocol_state.tcp.end = msg->tcp_rule.return_end;
1707 reply_cm->protocol_state.tcp.max_end = msg->tcp_rule.return_max_end;
1708 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001709 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1710 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1711 }
1712 break;
1713 }
1714
Wayne Tanbb7f1782021-12-13 11:16:04 -08001715 /*
1716 * Fill in the ipv6_connection object.
1717 */
1718 c->protocol = tuple->protocol;
1719 c->src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1720 c->src_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1721 c->src_port = tuple->flow_ident;
1722 c->src_port_xlate = tuple->flow_ident;
1723 c->original_dev = src_dev;
1724 c->original_match = original_cm;
1725
1726 c->dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1727 c->dest_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1728 c->dest_port = tuple->return_ident;
1729 c->dest_port_xlate = tuple->return_ident;
1730
1731 c->reply_dev = dest_dev;
1732 c->reply_match = reply_cm;
1733 c->debug_read_seq = 0;
1734 c->last_sync_jiffies = get_jiffies_64();
1735 c->removed = false;
1736
Xiaoping Fan978b3772015-05-27 14:15:18 -07001737 sfe_ipv6_connection_match_compute_translations(original_cm);
1738 sfe_ipv6_connection_match_compute_translations(reply_cm);
1739 sfe_ipv6_insert_connection(si, c);
1740
1741 spin_unlock_bh(&si->lock);
1742
1743 /*
1744 * We have everything we need!
1745 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301746 DEBUG_INFO("new connection - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001747 " s: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n"
1748 " d: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301749 tuple->protocol,
1750 src_dev->name, msg->conn_rule.flow_mac, NULL,
1751 (void *)tuple->flow_ip, (void *)tuple->flow_ip, ntohs(tuple->flow_ident), ntohs(tuple->flow_ident),
1752 dest_dev->name, NULL, msg->conn_rule.return_mac,
1753 (void *)tuple->return_ip, (void *)tuple->return_ip, ntohs(tuple->return_ident), ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001754
1755 return 0;
1756}
1757
1758/*
1759 * sfe_ipv6_destroy_rule()
1760 * Destroy a forwarding rule.
1761 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301762void sfe_ipv6_destroy_rule(struct sfe_ipv6_rule_destroy_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001763{
1764 struct sfe_ipv6 *si = &__si6;
1765 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301766 bool ret;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301767 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001768
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301769 this_cpu_inc(si->stats_pcpu->connection_destroy_requests64);
1770
Xiaoping Fan978b3772015-05-27 14:15:18 -07001771 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001772
1773 /*
1774 * Check to see if we have a flow that matches the rule we're trying
1775 * to destroy. If there isn't then we can't destroy it.
1776 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301777 c = sfe_ipv6_find_connection(si, tuple->protocol, (struct sfe_ipv6_addr *)tuple->flow_ip, tuple->flow_ident,
1778 (struct sfe_ipv6_addr *)tuple->return_ip, tuple->return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001779 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001780 spin_unlock_bh(&si->lock);
1781
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301782 this_cpu_inc(si->stats_pcpu->connection_destroy_misses64);
1783
Xiaoping Fan978b3772015-05-27 14:15:18 -07001784 DEBUG_TRACE("connection does not exist - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301785 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1786 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001787 return;
1788 }
1789
1790 /*
1791 * Remove our connection details from the hash tables.
1792 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301793 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001794 spin_unlock_bh(&si->lock);
1795
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301796 if (ret) {
1797 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1798 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001799
1800 DEBUG_INFO("connection destroyed - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301801 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1802 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001803}
1804
1805/*
Ken Zhu7a43d882022-01-04 10:51:44 -08001806 * sfe_ipv6_sync_invoke()
1807 * Schedule many sync stats.
1808 */
1809bool sfe_ipv6_sync_invoke(uint16_t index)
1810{
1811 struct sfe_ipv6 *si = &__si6;
1812 return schedule_delayed_work_on(si->work_cpu, &(si->sync_dwork), 0);
1813}
1814
1815/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001816 * sfe_ipv6_register_sync_rule_callback()
1817 * Register a callback for rule synchronization.
1818 */
1819void sfe_ipv6_register_sync_rule_callback(sfe_sync_rule_callback_t sync_rule_callback)
1820{
1821 struct sfe_ipv6 *si = &__si6;
1822
1823 spin_lock_bh(&si->lock);
1824 rcu_assign_pointer(si->sync_rule_callback, sync_rule_callback);
1825 spin_unlock_bh(&si->lock);
1826}
1827
1828/*
Ken Zhu7a43d882022-01-04 10:51:44 -08001829 * sfe_ipv6_register_sync_rule_callback()
1830 * Register a callback for rule synchronization.
1831 */
1832void sfe_ipv6_register_many_sync_callback(sfe_ipv6_many_sync_callback_t cb)
1833{
1834 struct sfe_ipv6 *si = &__si6;
1835
1836 spin_lock_bh(&si->lock);
1837 rcu_assign_pointer(si->many_sync_callback, cb);
1838 spin_unlock_bh(&si->lock);
1839}
1840
1841/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001842 * sfe_ipv6_get_debug_dev()
1843 */
1844static ssize_t sfe_ipv6_get_debug_dev(struct device *dev,
1845 struct device_attribute *attr,
1846 char *buf)
1847{
1848 struct sfe_ipv6 *si = &__si6;
1849 ssize_t count;
1850 int num;
1851
1852 spin_lock_bh(&si->lock);
1853 num = si->debug_dev;
1854 spin_unlock_bh(&si->lock);
1855
1856 count = snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", num);
1857 return count;
1858}
1859
1860/*
1861 * sfe_ipv6_destroy_all_rules_for_dev()
1862 * Destroy all connections that match a particular device.
1863 *
1864 * If we pass dev as NULL then this destroys all connections.
1865 */
1866void sfe_ipv6_destroy_all_rules_for_dev(struct net_device *dev)
1867{
1868 struct sfe_ipv6 *si = &__si6;
1869 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301870 bool ret;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001871
Xiaoping Fan34586472015-07-03 02:20:35 -07001872another_round:
Xiaoping Fan978b3772015-05-27 14:15:18 -07001873 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001874
Xiaoping Fan34586472015-07-03 02:20:35 -07001875 for (c = si->all_connections_head; c; c = c->all_connections_next) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001876 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07001877 * Does this connection relate to the device we are destroying?
Xiaoping Fan978b3772015-05-27 14:15:18 -07001878 */
1879 if (!dev
1880 || (dev == c->original_dev)
1881 || (dev == c->reply_dev)) {
Xiaoping Fan34586472015-07-03 02:20:35 -07001882 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001883 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001884 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001885
Xiaoping Fan34586472015-07-03 02:20:35 -07001886 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301887 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001888 }
1889
1890 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001891
1892 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301893 if (ret) {
1894 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1895 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001896 goto another_round;
1897 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001898}
1899
1900/*
1901 * sfe_ipv6_periodic_sync()
1902 */
Ken Zhu137722d2021-09-23 17:57:36 -07001903static void sfe_ipv6_periodic_sync(struct work_struct *work)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001904{
Ken Zhu137722d2021-09-23 17:57:36 -07001905 struct sfe_ipv6 *si = container_of((struct delayed_work *)work, struct sfe_ipv6, sync_dwork);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001906 u64 now_jiffies;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001907 int quota;
Ken Zhu7a43d882022-01-04 10:51:44 -08001908 sfe_ipv6_many_sync_callback_t sync_rule_callback;
Ken Zhu32b95392021-09-03 13:52:04 -07001909 struct sfe_ipv6_connection *c;
Ken Zhu7a43d882022-01-04 10:51:44 -08001910 struct sfe_ipv6_conn_sync *conn_sync;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001911
1912 now_jiffies = get_jiffies_64();
1913
1914 rcu_read_lock();
Ken Zhu7a43d882022-01-04 10:51:44 -08001915 sync_rule_callback = rcu_dereference(si->many_sync_callback);
1916 rcu_read_unlock();
Xiaoping Fan978b3772015-05-27 14:15:18 -07001917 if (!sync_rule_callback) {
Ken Zhu7a43d882022-01-04 10:51:44 -08001918 return;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001919 }
1920
1921 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001922
1923 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001924 * If we have reached the end of the connection list, walk from
1925 * the connection head.
1926 */
1927 c = si->wc_next;
1928 if (unlikely(!c)) {
1929 c = si->all_connections_head;
1930 }
Ken Zhu7a43d882022-01-04 10:51:44 -08001931
Ken Zhu32b95392021-09-03 13:52:04 -07001932 /*
Ken Zhu7a43d882022-01-04 10:51:44 -08001933 * Get the max number of connections to be put in this sync msg.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001934 */
Ken Zhu7a43d882022-01-04 10:51:44 -08001935 quota = sfe_ipv6_sync_max_number;
1936 conn_sync = sfe_ipv6_sync_many_msg->msg.conn_stats_many.conn_sync;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001937
1938 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001939 * Walk the "all connection" list and sync the connection state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001940 */
Ken Zhu32b95392021-09-03 13:52:04 -07001941 while (likely(c && quota)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001942 struct sfe_ipv6_connection_match *cm;
1943 struct sfe_ipv6_connection_match *counter_cm;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001944 struct sfe_connection_sync sis;
1945
Ken Zhu32b95392021-09-03 13:52:04 -07001946 cm = c->original_match;
1947 counter_cm = c->reply_match;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001948
1949 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001950 * Didn't receive packets in the origial direction or reply
1951 * direction, move to the next connection.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001952 */
Ken Zhu32b95392021-09-03 13:52:04 -07001953 if (!atomic_read(&cm->rx_packet_count) && !atomic_read(&counter_cm->rx_packet_count)) {
1954 c = c->all_connections_next;
1955 continue;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001956 }
1957
Xiaoping Fan978b3772015-05-27 14:15:18 -07001958 /*
1959 * Sync the connection state.
1960 */
Xiaoping Fan99cb4c12015-08-21 19:07:32 -07001961 sfe_ipv6_gen_sync_connection(si, c, &sis, SFE_SYNC_REASON_STATS, now_jiffies);
Ken Zhu7a43d882022-01-04 10:51:44 -08001962 sfe_ipv6_stats_convert(conn_sync, &sis);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001963
Ken Zhu7a43d882022-01-04 10:51:44 -08001964 quota--;
1965 conn_sync++;
1966 c = c->all_connections_next;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001967 }
1968
Ken Zhu32b95392021-09-03 13:52:04 -07001969 /*
1970 * At the end of loop, put wc_next to the connection we left
1971 */
1972 si->wc_next = c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001973 spin_unlock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001974
Ken Zhu7a43d882022-01-04 10:51:44 -08001975 if (c == NULL) {
1976 DEBUG_INFO("Synced all connections\n");
1977 sfe_ipv6_sync_many_msg->msg.conn_stats_many.next = 0;
1978 } else {
1979 DEBUG_INFO("Some connections left\n");
1980 sfe_ipv6_sync_many_msg->msg.conn_stats_many.next = sfe_ipv6_sync_max_number - quota;
1981 }
1982 DEBUG_INFO("Synced [%d] connections\n", (sfe_ipv6_sync_max_number - quota));
1983
1984 sfe_ipv6_sync_many_msg->msg.conn_stats_many.count = sfe_ipv6_sync_max_number - quota;
1985 sfe_ipv6_sync_many_msg->cm.response = SFE_CMN_RESPONSE_ACK;
1986
1987 sync_rule_callback(sfe_ipv6_sync_many_msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001988}
1989
1990/*
1991 * sfe_ipv6_debug_dev_read_start()
1992 * Generate part of the XML output.
1993 */
1994static bool sfe_ipv6_debug_dev_read_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1995 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1996{
1997 int bytes_read;
1998
Xiaoping Fan34586472015-07-03 02:20:35 -07001999 si->debug_read_seq++;
2000
Xiaoping Fan978b3772015-05-27 14:15:18 -07002001 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "<sfe_ipv6>\n");
2002 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2003 return false;
2004 }
2005
2006 *length -= bytes_read;
2007 *total_read += bytes_read;
2008
2009 ws->state++;
2010 return true;
2011}
2012
2013/*
2014 * sfe_ipv6_debug_dev_read_connections_start()
2015 * Generate part of the XML output.
2016 */
2017static bool sfe_ipv6_debug_dev_read_connections_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2018 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2019{
2020 int bytes_read;
2021
2022 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<connections>\n");
2023 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2024 return false;
2025 }
2026
2027 *length -= bytes_read;
2028 *total_read += bytes_read;
2029
2030 ws->state++;
2031 return true;
2032}
2033
2034/*
2035 * sfe_ipv6_debug_dev_read_connections_connection()
2036 * Generate part of the XML output.
2037 */
2038static bool sfe_ipv6_debug_dev_read_connections_connection(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2039 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2040{
2041 struct sfe_ipv6_connection *c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002042 struct sfe_ipv6_connection_match *original_cm;
2043 struct sfe_ipv6_connection_match *reply_cm;
2044 int bytes_read;
2045 int protocol;
2046 struct net_device *src_dev;
2047 struct sfe_ipv6_addr src_ip;
2048 struct sfe_ipv6_addr src_ip_xlate;
2049 __be16 src_port;
2050 __be16 src_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07002051 u64 src_rx_packets;
2052 u64 src_rx_bytes;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002053 struct net_device *dest_dev;
2054 struct sfe_ipv6_addr dest_ip;
2055 struct sfe_ipv6_addr dest_ip_xlate;
2056 __be16 dest_port;
2057 __be16 dest_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07002058 u64 dest_rx_packets;
2059 u64 dest_rx_bytes;
2060 u64 last_sync_jiffies;
Ken Zhu37040ea2021-09-09 21:11:15 -07002061 u32 src_mark, dest_mark, src_priority, dest_priority, src_dscp, dest_dscp;
Parikshit Guned31a8202022-01-05 22:15:04 +05302062 bool original_cm_sawf_valid, reply_cm_sawf_valid;
2063 u32 flow_service_class, return_service_class;
2064 u32 flow_msduq, return_msduq;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302065 u32 packet, byte, original_cm_flags;
2066 u16 pppoe_session_id;
2067 u8 pppoe_remote_mac[ETH_ALEN];
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002068 u32 original_fast_xmit, reply_fast_xmit;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002069#ifdef CONFIG_NF_FLOW_COOKIE
2070 int src_flow_cookie, dst_flow_cookie;
2071#endif
2072
2073 spin_lock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07002074
2075 for (c = si->all_connections_head; c; c = c->all_connections_next) {
2076 if (c->debug_read_seq < si->debug_read_seq) {
2077 c->debug_read_seq = si->debug_read_seq;
2078 break;
2079 }
2080 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002081
2082 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07002083 * If there were no connections then move to the next state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07002084 */
2085 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002086 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07002087 ws->state++;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002088 return true;
2089 }
2090
2091 original_cm = c->original_match;
2092 reply_cm = c->reply_match;
2093
2094 protocol = c->protocol;
2095 src_dev = c->original_dev;
2096 src_ip = c->src_ip[0];
2097 src_ip_xlate = c->src_ip_xlate[0];
2098 src_port = c->src_port;
2099 src_port_xlate = c->src_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002100 src_priority = original_cm->priority;
2101 src_dscp = original_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002102
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302103 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet, &byte);
2104 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet, &byte);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002105
2106 src_rx_packets = original_cm->rx_packet_count64;
2107 src_rx_bytes = original_cm->rx_byte_count64;
Ken Zhu37040ea2021-09-09 21:11:15 -07002108 src_mark = original_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002109 original_fast_xmit = original_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002110 dest_dev = c->reply_dev;
2111 dest_ip = c->dest_ip[0];
2112 dest_ip_xlate = c->dest_ip_xlate[0];
2113 dest_port = c->dest_port;
2114 dest_port_xlate = c->dest_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002115 dest_priority = reply_cm->priority;
2116 dest_dscp = reply_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002117 dest_rx_packets = reply_cm->rx_packet_count64;
2118 dest_rx_bytes = reply_cm->rx_byte_count64;
2119 last_sync_jiffies = get_jiffies_64() - c->last_sync_jiffies;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302120 original_cm_flags = original_cm->flags;
2121 pppoe_session_id = original_cm->pppoe_session_id;
2122 ether_addr_copy(pppoe_remote_mac, original_cm->pppoe_remote_mac);
Ken Zhu37040ea2021-09-09 21:11:15 -07002123 dest_mark = reply_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002124 reply_fast_xmit = reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
Parikshit Guned31a8202022-01-05 22:15:04 +05302125 original_cm_sawf_valid = original_cm->sawf_valid;
2126 reply_cm_sawf_valid = reply_cm->sawf_valid;
2127 flow_service_class = SFE_GET_SAWF_SERVICE_CLASS(original_cm->mark);
2128 flow_msduq = SFE_GET_SAWF_MSDUQ(original_cm->mark);
2129 return_service_class = SFE_GET_SAWF_SERVICE_CLASS(reply_cm->mark);
2130 return_msduq = SFE_GET_SAWF_MSDUQ(reply_cm->mark);
2131
Xiaoping Fan978b3772015-05-27 14:15:18 -07002132#ifdef CONFIG_NF_FLOW_COOKIE
2133 src_flow_cookie = original_cm->flow_cookie;
2134 dst_flow_cookie = reply_cm->flow_cookie;
2135#endif
2136 spin_unlock_bh(&si->lock);
2137
2138 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t\t<connection "
2139 "protocol=\"%u\" "
2140 "src_dev=\"%s\" "
2141 "src_ip=\"%pI6\" src_ip_xlate=\"%pI6\" "
2142 "src_port=\"%u\" src_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002143 "src_priority=\"%u\" src_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002144 "src_rx_pkts=\"%llu\" src_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002145 "src_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002146 "src_fast_xmit=\"%s\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002147 "dest_dev=\"%s\" "
2148 "dest_ip=\"%pI6\" dest_ip_xlate=\"%pI6\" "
2149 "dest_port=\"%u\" dest_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002150 "dest_priority=\"%u\" dest_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002151 "dest_rx_pkts=\"%llu\" dest_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002152 "dest_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002153 "reply_fast_xmit=\"%s\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002154#ifdef CONFIG_NF_FLOW_COOKIE
2155 "src_flow_cookie=\"%d\" dst_flow_cookie=\"%d\" "
2156#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002157 "last_sync=\"%llu\" ",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002158 protocol,
2159 src_dev->name,
2160 &src_ip, &src_ip_xlate,
2161 ntohs(src_port), ntohs(src_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002162 src_priority, src_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002163 src_rx_packets, src_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002164 src_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002165 original_fast_xmit ? "Yes" : "No",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002166 dest_dev->name,
2167 &dest_ip, &dest_ip_xlate,
2168 ntohs(dest_port), ntohs(dest_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002169 dest_priority, dest_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002170 dest_rx_packets, dest_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002171 dest_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002172 reply_fast_xmit ? "Yes" : "No",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002173#ifdef CONFIG_NF_FLOW_COOKIE
2174 src_flow_cookie, dst_flow_cookie,
2175#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002176 last_sync_jiffies);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002177
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302178 if (original_cm_flags &= (SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP | SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05302179 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "pppoe_session_id=\"%u\" pppoe_server_MAC=\"%pM\" ",
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302180 pppoe_session_id, pppoe_remote_mac);
2181 }
2182
Parikshit Guned31a8202022-01-05 22:15:04 +05302183 if (original_cm_sawf_valid) {
Parikshit Gunefdd98652022-03-14 17:33:01 +05302184 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "flow_service_class=\"%d\" flow_msduq= \"0x%x\" ",
Parikshit Guned31a8202022-01-05 22:15:04 +05302185 flow_service_class, flow_msduq);
2186 }
2187
2188 if (reply_cm_sawf_valid) {
Parikshit Gunefdd98652022-03-14 17:33:01 +05302189 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "return_service_class=\"%d\" return_msduq= \"0x%x\" ",
Parikshit Guned31a8202022-01-05 22:15:04 +05302190 return_service_class, return_msduq);
2191 }
2192
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302193 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, ")/>\n");
2194
Xiaoping Fan978b3772015-05-27 14:15:18 -07002195 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2196 return false;
2197 }
2198
2199 *length -= bytes_read;
2200 *total_read += bytes_read;
2201
Xiaoping Fan978b3772015-05-27 14:15:18 -07002202 return true;
2203}
2204
2205/*
2206 * sfe_ipv6_debug_dev_read_connections_end()
2207 * Generate part of the XML output.
2208 */
2209static bool sfe_ipv6_debug_dev_read_connections_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2210 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2211{
2212 int bytes_read;
2213
2214 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</connections>\n");
2215 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2216 return false;
2217 }
2218
2219 *length -= bytes_read;
2220 *total_read += bytes_read;
2221
2222 ws->state++;
2223 return true;
2224}
2225
2226/*
2227 * sfe_ipv6_debug_dev_read_exceptions_start()
2228 * Generate part of the XML output.
2229 */
2230static bool sfe_ipv6_debug_dev_read_exceptions_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2231 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2232{
2233 int bytes_read;
2234
2235 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<exceptions>\n");
2236 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2237 return false;
2238 }
2239
2240 *length -= bytes_read;
2241 *total_read += bytes_read;
2242
2243 ws->state++;
2244 return true;
2245}
2246
2247/*
2248 * sfe_ipv6_debug_dev_read_exceptions_exception()
2249 * Generate part of the XML output.
2250 */
2251static bool sfe_ipv6_debug_dev_read_exceptions_exception(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2252 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2253{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302254 int i;
2255 u64 val = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002256
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302257 for_each_possible_cpu(i) {
2258 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
2259 val += s->exception_events64[ws->iter_exception];
2260 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002261
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302262 if (val) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002263 int bytes_read;
2264
2265 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE,
2266 "\t\t<exception name=\"%s\" count=\"%llu\" />\n",
2267 sfe_ipv6_exception_events_string[ws->iter_exception],
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302268 val);
2269
Xiaoping Fan978b3772015-05-27 14:15:18 -07002270 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2271 return false;
2272 }
2273
2274 *length -= bytes_read;
2275 *total_read += bytes_read;
2276 }
2277
2278 ws->iter_exception++;
2279 if (ws->iter_exception >= SFE_IPV6_EXCEPTION_EVENT_LAST) {
2280 ws->iter_exception = 0;
2281 ws->state++;
2282 }
2283
2284 return true;
2285}
2286
2287/*
2288 * sfe_ipv6_debug_dev_read_exceptions_end()
2289 * Generate part of the XML output.
2290 */
2291static bool sfe_ipv6_debug_dev_read_exceptions_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2292 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2293{
2294 int bytes_read;
2295
2296 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</exceptions>\n");
2297 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2298 return false;
2299 }
2300
2301 *length -= bytes_read;
2302 *total_read += bytes_read;
2303
2304 ws->state++;
2305 return true;
2306}
2307
2308/*
2309 * sfe_ipv6_debug_dev_read_stats()
2310 * Generate part of the XML output.
2311 */
2312static bool sfe_ipv6_debug_dev_read_stats(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2313 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2314{
2315 int bytes_read;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302316 struct sfe_ipv6_stats stats;
2317 unsigned int num_conn;
2318
2319 sfe_ipv6_update_summary_stats(si, &stats);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002320
2321 spin_lock_bh(&si->lock);
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302322 num_conn = si->num_connections;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002323 spin_unlock_bh(&si->lock);
2324
2325 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<stats "
2326 "num_connections=\"%u\" "
Suruchi Suman23a279d2021-11-16 15:13:09 +05302327 "pkts_dropped=\"%llu\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002328 "pkts_fast_xmited=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002329 "pkts_forwarded=\"%llu\" pkts_not_forwarded=\"%llu\" "
2330 "create_requests=\"%llu\" create_collisions=\"%llu\" "
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302331 "create_failures=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002332 "destroy_requests=\"%llu\" destroy_misses=\"%llu\" "
2333 "flushes=\"%llu\" "
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302334 "hash_hits=\"%llu\" hash_reorders=\"%llu\" "
2335 "pppoe_encap_pkts_fwded=\"%llu\" "
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302336 "pppoe_decap_pkts_fwded=\"%llu\" "
2337 "pppoe_bridge_pkts_fwded=\"%llu\" />\n",
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302338
2339 num_conn,
Suruchi Suman23a279d2021-11-16 15:13:09 +05302340 stats.packets_dropped64,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002341 stats.packets_fast_xmited64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302342 stats.packets_forwarded64,
2343 stats.packets_not_forwarded64,
2344 stats.connection_create_requests64,
2345 stats.connection_create_collisions64,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302346 stats.connection_create_failures64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302347 stats.connection_destroy_requests64,
2348 stats.connection_destroy_misses64,
2349 stats.connection_flushes64,
2350 stats.connection_match_hash_hits64,
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302351 stats.connection_match_hash_reorders64,
2352 stats.pppoe_encap_packets_forwarded64,
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302353 stats.pppoe_decap_packets_forwarded64,
2354 stats.pppoe_bridge_packets_forwarded64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002355 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2356 return false;
2357 }
2358
2359 *length -= bytes_read;
2360 *total_read += bytes_read;
2361
2362 ws->state++;
2363 return true;
2364}
2365
2366/*
2367 * sfe_ipv6_debug_dev_read_end()
2368 * Generate part of the XML output.
2369 */
2370static bool sfe_ipv6_debug_dev_read_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2371 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2372{
2373 int bytes_read;
2374
2375 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "</sfe_ipv6>\n");
2376 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2377 return false;
2378 }
2379
2380 *length -= bytes_read;
2381 *total_read += bytes_read;
2382
2383 ws->state++;
2384 return true;
2385}
2386
2387/*
2388 * Array of write functions that write various XML elements that correspond to
2389 * our XML output state machine.
2390 */
2391static sfe_ipv6_debug_xml_write_method_t sfe_ipv6_debug_xml_write_methods[SFE_IPV6_DEBUG_XML_STATE_DONE] = {
2392 sfe_ipv6_debug_dev_read_start,
2393 sfe_ipv6_debug_dev_read_connections_start,
2394 sfe_ipv6_debug_dev_read_connections_connection,
2395 sfe_ipv6_debug_dev_read_connections_end,
2396 sfe_ipv6_debug_dev_read_exceptions_start,
2397 sfe_ipv6_debug_dev_read_exceptions_exception,
2398 sfe_ipv6_debug_dev_read_exceptions_end,
2399 sfe_ipv6_debug_dev_read_stats,
2400 sfe_ipv6_debug_dev_read_end,
2401};
2402
2403/*
2404 * sfe_ipv6_debug_dev_read()
2405 * Send info to userspace upon read request from user
2406 */
2407static ssize_t sfe_ipv6_debug_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
2408{
2409 char msg[CHAR_DEV_MSG_SIZE];
2410 int total_read = 0;
2411 struct sfe_ipv6_debug_xml_write_state *ws;
2412 struct sfe_ipv6 *si = &__si6;
2413
2414 ws = (struct sfe_ipv6_debug_xml_write_state *)filp->private_data;
2415 while ((ws->state != SFE_IPV6_DEBUG_XML_STATE_DONE) && (length > CHAR_DEV_MSG_SIZE)) {
2416 if ((sfe_ipv6_debug_xml_write_methods[ws->state])(si, buffer, msg, &length, &total_read, ws)) {
2417 continue;
2418 }
2419 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002420 return total_read;
2421}
2422
2423/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07002424 * sfe_ipv6_debug_dev_open()
2425 */
2426static int sfe_ipv6_debug_dev_open(struct inode *inode, struct file *file)
2427{
2428 struct sfe_ipv6_debug_xml_write_state *ws;
2429
2430 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
2431 if (ws) {
2432 return 0;
2433 }
2434
2435 ws = kzalloc(sizeof(struct sfe_ipv6_debug_xml_write_state), GFP_KERNEL);
2436 if (!ws) {
2437 return -ENOMEM;
2438 }
2439
2440 ws->state = SFE_IPV6_DEBUG_XML_STATE_START;
2441 file->private_data = ws;
2442
2443 return 0;
2444}
2445
2446/*
2447 * sfe_ipv6_debug_dev_release()
2448 */
2449static int sfe_ipv6_debug_dev_release(struct inode *inode, struct file *file)
2450{
2451 struct sfe_ipv6_debug_xml_write_state *ws;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002452
2453 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
Xiaoping Fan34586472015-07-03 02:20:35 -07002454 if (ws) {
2455 /*
2456 * We've finished with our output so free the write state.
2457 */
2458 kfree(ws);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302459 file->private_data = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002460 }
2461
Xiaoping Fan978b3772015-05-27 14:15:18 -07002462 return 0;
2463}
2464
2465/*
2466 * File operations used in the debug char device
2467 */
2468static struct file_operations sfe_ipv6_debug_dev_fops = {
2469 .read = sfe_ipv6_debug_dev_read,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002470 .open = sfe_ipv6_debug_dev_open,
2471 .release = sfe_ipv6_debug_dev_release
2472};
2473
2474#ifdef CONFIG_NF_FLOW_COOKIE
2475/*
2476 * sfe_ipv6_register_flow_cookie_cb
2477 * register a function in SFE to let SFE use this function to configure flow cookie for a flow
2478 *
2479 * Hardware driver which support flow cookie should register a callback function in SFE. Then SFE
2480 * can use this function to configure flow cookie for a flow.
2481 * return: 0, success; !=0, fail
2482 */
2483int sfe_ipv6_register_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2484{
2485 struct sfe_ipv6 *si = &__si6;
2486
2487 BUG_ON(!cb);
2488
2489 if (si->flow_cookie_set_func) {
2490 return -1;
2491 }
2492
2493 rcu_assign_pointer(si->flow_cookie_set_func, cb);
2494 return 0;
2495}
2496
2497/*
2498 * sfe_ipv6_unregister_flow_cookie_cb
2499 * unregister function which is used to configure flow cookie for a flow
2500 *
2501 * return: 0, success; !=0, fail
2502 */
2503int sfe_ipv6_unregister_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2504{
2505 struct sfe_ipv6 *si = &__si6;
2506
2507 RCU_INIT_POINTER(si->flow_cookie_set_func, NULL);
2508 return 0;
2509}
Xiaoping Fan640faf42015-08-28 15:50:55 -07002510
2511/*
2512 * sfe_ipv6_get_flow_cookie()
2513 */
2514static ssize_t sfe_ipv6_get_flow_cookie(struct device *dev,
2515 struct device_attribute *attr,
2516 char *buf)
2517{
2518 struct sfe_ipv6 *si = &__si6;
Xiaoping Fan01c67cc2015-11-09 11:31:57 -08002519 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->flow_cookie_enable);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002520}
2521
2522/*
2523 * sfe_ipv6_set_flow_cookie()
2524 */
2525static ssize_t sfe_ipv6_set_flow_cookie(struct device *dev,
2526 struct device_attribute *attr,
2527 const char *buf, size_t size)
2528{
2529 struct sfe_ipv6 *si = &__si6;
Ken Zhu137722d2021-09-23 17:57:36 -07002530 si->flow_cookie_enable = strict_strtol(buf, NULL, 0);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002531
2532 return size;
2533}
2534
2535/*
2536 * sysfs attributes.
2537 */
2538static const struct device_attribute sfe_ipv6_flow_cookie_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -08002539 __ATTR(flow_cookie_enable, S_IWUSR | S_IRUGO, sfe_ipv6_get_flow_cookie, sfe_ipv6_set_flow_cookie);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002540#endif /*CONFIG_NF_FLOW_COOKIE*/
2541
Ken Zhu137722d2021-09-23 17:57:36 -07002542/*
2543 * sfe_ipv6_get_cpu()
2544 */
2545static ssize_t sfe_ipv6_get_cpu(struct device *dev,
2546 struct device_attribute *attr,
2547 char *buf)
2548{
2549 struct sfe_ipv6 *si = &__si6;
2550 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->work_cpu);
2551}
2552
2553/*
Wayne Tanbb7f1782021-12-13 11:16:04 -08002554 * sfe_ipv6_set_cpu()
Ken Zhu137722d2021-09-23 17:57:36 -07002555 */
2556static ssize_t sfe_ipv6_set_cpu(struct device *dev,
2557 struct device_attribute *attr,
2558 const char *buf, size_t size)
2559{
2560 struct sfe_ipv6 *si = &__si6;
2561 int work_cpu;
2562
2563 work_cpu = simple_strtol(buf, NULL, 0);
2564 if ((work_cpu >= 0) && (work_cpu <= NR_CPUS)) {
2565 si->work_cpu = work_cpu;
2566 } else {
2567 dev_err(dev, "%s is not in valid range[0,%d]", buf, NR_CPUS);
2568 }
2569
2570 return size;
2571}
2572/*
2573 * sysfs attributes.
2574 */
2575static const struct device_attribute sfe_ipv6_cpu_attr =
2576 __ATTR(stat_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv6_get_cpu, sfe_ipv6_set_cpu);
2577
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302578 /*
2579 * sfe_ipv6_hash_init()
2580 * Initialize conn match hash lists
2581 */
2582static void sfe_ipv6_conn_match_hash_init(struct sfe_ipv6 *si, int len)
2583{
2584 struct hlist_head *hash_list = si->hlist_conn_match_hash_head;
2585 int i;
2586
2587 for (i = 0; i < len; i++) {
2588 INIT_HLIST_HEAD(&hash_list[i]);
2589 }
2590}
2591
Suruchi Suman23a279d2021-11-16 15:13:09 +05302592#ifdef SFE_PROCESS_LOCAL_OUT
2593/*
2594 * sfe_ipv6_local_out()
2595 * Called for packets from ip_local_out() - post encapsulation & other packets
2596 */
2597static unsigned int sfe_ipv6_local_out(void *priv,
2598 struct sk_buff *skb,
2599 const struct nf_hook_state *nhs)
2600{
Nitin Shettyc28f8172022-02-04 16:23:46 +05302601 struct sfe_l2_info l2_info = {0};
2602
Suruchi Suman23a279d2021-11-16 15:13:09 +05302603 DEBUG_TRACE("sfe: sfe_ipv6_local_out hook called.\n");
2604
2605 if (likely(skb->skb_iif)) {
Nitin Shettyc28f8172022-02-04 16:23:46 +05302606 return sfe_ipv6_recv(skb->dev, skb, &l2_info, true) ? NF_STOLEN : NF_ACCEPT;
Suruchi Suman23a279d2021-11-16 15:13:09 +05302607 }
2608
2609 return NF_ACCEPT;
2610}
2611
2612/*
2613 * struct nf_hook_ops sfe_ipv6_ops_local_out[]
2614 * Hooks into netfilter local out packet monitoring points.
2615 */
2616static struct nf_hook_ops sfe_ipv6_ops_local_out[] __read_mostly = {
2617
2618 /*
2619 * Local out routing hook is used to monitor packets.
2620 */
2621 {
2622 .hook = sfe_ipv6_local_out,
2623 .pf = PF_INET6,
2624 .hooknum = NF_INET_LOCAL_OUT,
2625 .priority = NF_IP6_PRI_FIRST,
2626 },
2627};
2628#endif
2629
Xiaoping Fan978b3772015-05-27 14:15:18 -07002630/*
2631 * sfe_ipv6_init()
2632 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302633int sfe_ipv6_init(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002634{
2635 struct sfe_ipv6 *si = &__si6;
2636 int result = -1;
2637
2638 DEBUG_INFO("SFE IPv6 init\n");
2639
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302640 sfe_ipv6_conn_match_hash_init(si, ARRAY_SIZE(si->hlist_conn_match_hash_head));
2641
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302642 si->stats_pcpu = alloc_percpu_gfp(struct sfe_ipv6_stats, GFP_KERNEL | __GFP_ZERO);
2643 if (!si->stats_pcpu) {
2644 DEBUG_ERROR("failed to allocate stats memory for sfe_ipv6\n");
2645 goto exit0;
2646 }
2647
Xiaoping Fan978b3772015-05-27 14:15:18 -07002648 /*
Parikshit Guned31a8202022-01-05 22:15:04 +05302649 * Allocate per cpu per service class memory.
2650 */
2651 si->stats_pcpu_psc = alloc_percpu_gfp(struct sfe_ipv6_service_class_stats_db,
2652 GFP_KERNEL | __GFP_ZERO);
2653 if (!si->stats_pcpu_psc) {
2654 DEBUG_ERROR("failed to allocate per cpu per service clas stats memory\n");
2655 goto exit1;
2656 }
2657
2658 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07002659 * Create sys/sfe_ipv6
2660 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302661 si->sys_ipv6 = kobject_create_and_add("sfe_ipv6", NULL);
2662 if (!si->sys_ipv6) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002663 DEBUG_ERROR("failed to register sfe_ipv6\n");
Parikshit Guned31a8202022-01-05 22:15:04 +05302664 goto exit2;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002665 }
2666
2667 /*
2668 * Create files, one for each parameter supported by this module.
2669 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302670 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002671 if (result) {
2672 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302673 goto exit3;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002674 }
2675
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302676 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002677 if (result) {
2678 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302679 goto exit4;
Ken Zhu137722d2021-09-23 17:57:36 -07002680 }
2681
Xiaoping Fan640faf42015-08-28 15:50:55 -07002682#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302683 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002684 if (result) {
2685 DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302686 goto exit5;
Xiaoping Fan640faf42015-08-28 15:50:55 -07002687 }
2688#endif /* CONFIG_NF_FLOW_COOKIE */
2689
Suruchi Suman23a279d2021-11-16 15:13:09 +05302690#ifdef SFE_PROCESS_LOCAL_OUT
2691#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2692 result = nf_register_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2693#else
2694 result = nf_register_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2695#endif
Suruchi Suman23a279d2021-11-16 15:13:09 +05302696 if (result < 0) {
2697 DEBUG_ERROR("can't register nf local out hook: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302698 goto exit6;
Suruchi Suman23a279d2021-11-16 15:13:09 +05302699 }
Murat Sezginc09b1322022-03-16 10:15:38 -07002700 DEBUG_INFO("Register nf local out hook success: %d\n", result);
2701#endif
Suruchi Suman23a279d2021-11-16 15:13:09 +05302702
Xiaoping Fan978b3772015-05-27 14:15:18 -07002703 /*
2704 * Register our debug char device.
2705 */
2706 result = register_chrdev(0, "sfe_ipv6", &sfe_ipv6_debug_dev_fops);
2707 if (result < 0) {
2708 DEBUG_ERROR("Failed to register chrdev: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302709 goto exit7;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002710 }
2711
2712 si->debug_dev = result;
Ken Zhu137722d2021-09-23 17:57:36 -07002713 si->work_cpu = WORK_CPU_UNBOUND;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002714
2715 /*
Ken Zhu137722d2021-09-23 17:57:36 -07002716 * Create work to handle periodic statistics.
Xiaoping Fan978b3772015-05-27 14:15:18 -07002717 */
Ken Zhu137722d2021-09-23 17:57:36 -07002718 INIT_DELAYED_WORK(&(si->sync_dwork), sfe_ipv6_periodic_sync);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002719
Ken Zhu7a43d882022-01-04 10:51:44 -08002720 sfe_ipv6_sync_many_msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
2721 if(!sfe_ipv6_sync_many_msg) {
2722 goto exit8;
2723 }
2724
2725 sfe_ipv6_msg_init(sfe_ipv6_sync_many_msg, SFE_SPECIAL_INTERFACE_IPV6,
2726 SFE_TX_CONN_STATS_SYNC_MANY_MSG,
2727 sizeof(struct sfe_ipv4_conn_sync_many_msg),
2728 NULL,
2729 NULL);
2730 sfe_ipv6_sync_max_number = (PAGE_SIZE - sizeof(struct sfe_ipv6_msg)) / sizeof(struct sfe_ipv6_conn_sync);
2731
2732 spin_lock_init(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002733 return 0;
2734
Ken Zhu7a43d882022-01-04 10:51:44 -08002735exit8:
2736 unregister_chrdev(si->debug_dev, "sfe_ipv6");
2737
Parikshit Guned31a8202022-01-05 22:15:04 +05302738exit7:
Suruchi Suman23a279d2021-11-16 15:13:09 +05302739#ifdef SFE_PROCESS_LOCAL_OUT
2740#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2741 DEBUG_TRACE("sfe: Unregister local out hook\n");
2742 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2743#else
2744 DEBUG_TRACE("sfe: Unregister local out hook\n");
2745 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2746#endif
Parikshit Guned31a8202022-01-05 22:15:04 +05302747exit6:
Murat Sezginc09b1322022-03-16 10:15:38 -07002748#endif
Xiaoping Fan640faf42015-08-28 15:50:55 -07002749#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302750 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002751
Parikshit Guned31a8202022-01-05 22:15:04 +05302752exit5:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002753#endif /* CONFIG_NF_FLOW_COOKIE */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302754 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Suruchi Suman23a279d2021-11-16 15:13:09 +05302755
Parikshit Guned31a8202022-01-05 22:15:04 +05302756exit4:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302757 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002758
Parikshit Guned31a8202022-01-05 22:15:04 +05302759exit3:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302760 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002761
Parikshit Guned31a8202022-01-05 22:15:04 +05302762exit2:
2763 free_percpu(si->stats_pcpu_psc);
2764
Xiaoping Fan978b3772015-05-27 14:15:18 -07002765exit1:
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302766 free_percpu(si->stats_pcpu);
2767
2768exit0:
Xiaoping Fan978b3772015-05-27 14:15:18 -07002769 return result;
2770}
2771
2772/*
2773 * sfe_ipv6_exit()
2774 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302775void sfe_ipv6_exit(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002776{
2777 struct sfe_ipv6 *si = &__si6;
2778
2779 DEBUG_INFO("SFE IPv6 exit\n");
2780
2781 /*
2782 * Destroy all connections.
2783 */
2784 sfe_ipv6_destroy_all_rules_for_dev(NULL);
2785
Ken Zhu137722d2021-09-23 17:57:36 -07002786 cancel_delayed_work(&si->sync_dwork);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002787
2788 unregister_chrdev(si->debug_dev, "sfe_ipv6");
2789
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302790 free_percpu(si->stats_pcpu);
Parikshit Guned31a8202022-01-05 22:15:04 +05302791 free_percpu(si->stats_pcpu_psc);
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302792
Suruchi Suman23a279d2021-11-16 15:13:09 +05302793#ifdef SFE_PROCESS_LOCAL_OUT
2794#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2795 DEBUG_TRACE("sfe: Unregister local out hook\n");
2796 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2797#else
2798 DEBUG_TRACE("sfe: Unregister local out hook\n");
2799 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2800#endif
2801#endif
2802
Xiaoping Fan640faf42015-08-28 15:50:55 -07002803#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302804 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002805#endif /* CONFIG_NF_FLOW_COOKIE */
Ken Zhu137722d2021-09-23 17:57:36 -07002806
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302807 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002808
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302809 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002810
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302811 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002812}
2813
Xiaoping Fan978b3772015-05-27 14:15:18 -07002814#ifdef CONFIG_NF_FLOW_COOKIE
2815EXPORT_SYMBOL(sfe_ipv6_register_flow_cookie_cb);
2816EXPORT_SYMBOL(sfe_ipv6_unregister_flow_cookie_cb);
2817#endif