blob: 85d7730c1df653941d3d13f278bd07b9d9ab0646 [file] [log] [blame]
Xiaoping Fan978b3772015-05-27 14:15:18 -07001/*
2 * sfe_ipv6.c
3 * Shortcut forwarding engine - IPv6 support.
4 *
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05305 * Copyright (c) 2015-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
Xiaoping Fana42c68b2015-08-07 18:00:39 -070012 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053017 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Xiaoping Fan978b3772015-05-27 14:15:18 -070019 */
20
21#include <linux/module.h>
22#include <linux/sysfs.h>
23#include <linux/skbuff.h>
24#include <linux/icmp.h>
25#include <net/tcp.h>
26#include <linux/etherdevice.h>
Tian Yang45f39c82020-10-06 14:07:47 -070027#include <linux/version.h>
Suruchi Suman23a279d2021-11-16 15:13:09 +053028#include <net/udp.h>
29#include <net/vxlan.h>
30#include <linux/refcount.h>
31#include <linux/netfilter.h>
32#include <linux/inetdevice.h>
33#include <linux/netfilter_ipv6.h>
Wayne Tanbb7f1782021-12-13 11:16:04 -080034
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053035#include "sfe_debug.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053036#include "sfe_api.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070037#include "sfe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053038#include "sfe_flow_cookie.h"
39#include "sfe_ipv6.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053040#include "sfe_ipv6_udp.h"
41#include "sfe_ipv6_tcp.h"
42#include "sfe_ipv6_icmp.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080043#include "sfe_pppoe.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070044
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053045#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
46
Xiaoping Fan978b3772015-05-27 14:15:18 -070047static char *sfe_ipv6_exception_events_string[SFE_IPV6_EXCEPTION_EVENT_LAST] = {
48 "UDP_HEADER_INCOMPLETE",
49 "UDP_NO_CONNECTION",
50 "UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
51 "UDP_SMALL_TTL",
52 "UDP_NEEDS_FRAGMENTATION",
53 "TCP_HEADER_INCOMPLETE",
54 "TCP_NO_CONNECTION_SLOW_FLAGS",
55 "TCP_NO_CONNECTION_FAST_FLAGS",
56 "TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
57 "TCP_SMALL_TTL",
58 "TCP_NEEDS_FRAGMENTATION",
59 "TCP_FLAGS",
60 "TCP_SEQ_EXCEEDS_RIGHT_EDGE",
61 "TCP_SMALL_DATA_OFFS",
62 "TCP_BAD_SACK",
63 "TCP_BIG_DATA_OFFS",
64 "TCP_SEQ_BEFORE_LEFT_EDGE",
65 "TCP_ACK_EXCEEDS_RIGHT_EDGE",
66 "TCP_ACK_BEFORE_LEFT_EDGE",
67 "ICMP_HEADER_INCOMPLETE",
68 "ICMP_UNHANDLED_TYPE",
69 "ICMP_IPV6_HEADER_INCOMPLETE",
70 "ICMP_IPV6_NON_V6",
71 "ICMP_IPV6_IP_OPTIONS_INCOMPLETE",
72 "ICMP_IPV6_UDP_HEADER_INCOMPLETE",
73 "ICMP_IPV6_TCP_HEADER_INCOMPLETE",
74 "ICMP_IPV6_UNHANDLED_PROTOCOL",
75 "ICMP_NO_CONNECTION",
76 "ICMP_FLUSHED_CONNECTION",
77 "HEADER_INCOMPLETE",
78 "BAD_TOTAL_LENGTH",
79 "NON_V6",
80 "NON_INITIAL_FRAGMENT",
81 "DATAGRAM_INCOMPLETE",
82 "IP_OPTIONS_INCOMPLETE",
83 "UNHANDLED_PROTOCOL",
84 "FLOW_COOKIE_ADD_FAIL"
85};
86
Xiaoping Fan6a1672f2016-08-17 19:58:12 -070087static struct sfe_ipv6 __si6;
Xiaoping Fan978b3772015-05-27 14:15:18 -070088
89/*
90 * sfe_ipv6_get_debug_dev()
91 */
92static ssize_t sfe_ipv6_get_debug_dev(struct device *dev, struct device_attribute *attr, char *buf);
93
94/*
95 * sysfs attributes.
96 */
97static const struct device_attribute sfe_ipv6_debug_dev_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -080098 __ATTR(debug_dev, S_IWUSR | S_IRUGO, sfe_ipv6_get_debug_dev, NULL);
Xiaoping Fan978b3772015-05-27 14:15:18 -070099
100/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700101 * sfe_ipv6_get_connection_match_hash()
102 * Generate the hash used in connection match lookups.
103 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700104static inline unsigned int sfe_ipv6_get_connection_match_hash(struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700105 struct sfe_ipv6_addr *src_ip, __be16 src_port,
106 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
107{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700108 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700109 size_t dev_addr = (size_t)dev;
110
111 for (idx = 0; idx < 4; idx++) {
112 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
113 }
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700114 hash = ((u32)dev_addr) ^ hash ^ protocol ^ ntohs(src_port ^ dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700115 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
116}
117
118/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530119 * sfe_ipv6_find_connection_match_rcu()
Xiaoping Fan978b3772015-05-27 14:15:18 -0700120 * Get the IPv6 flow match info that corresponds to a particular 5-tuple.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700121 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530122struct sfe_ipv6_connection_match *
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530123sfe_ipv6_find_connection_match_rcu(struct sfe_ipv6 *si, struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700124 struct sfe_ipv6_addr *src_ip, __be16 src_port,
125 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
126{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530127 struct sfe_ipv6_connection_match *cm = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700128 unsigned int conn_match_idx;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530129 struct hlist_head *lhead;
130 WARN_ON_ONCE(!rcu_read_lock_held());
Xiaoping Fan978b3772015-05-27 14:15:18 -0700131
132 conn_match_idx = sfe_ipv6_get_connection_match_hash(dev, protocol, src_ip, src_port, dest_ip, dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700133
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530134 lhead = &si->hlist_conn_match_hash_head[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700135
136 /*
137 * Hopefully the first entry is the one we want.
138 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530139 hlist_for_each_entry_rcu(cm, lhead, hnode) {
140 if ((cm->match_dest_port != dest_port) ||
141 (!sfe_ipv6_addr_equal(cm->match_src_ip, src_ip)) ||
142 (!sfe_ipv6_addr_equal(cm->match_dest_ip, dest_ip)) ||
143 (cm->match_protocol != protocol) ||
144 (cm->match_dev != dev)) {
145 continue;
146 }
147
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530148 this_cpu_inc(si->stats_pcpu->connection_match_hash_hits64);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700149
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530150 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700151
Xiaoping Fan978b3772015-05-27 14:15:18 -0700152 }
153
Xiaoping Fan978b3772015-05-27 14:15:18 -0700154 return cm;
155}
156
157/*
158 * sfe_ipv6_connection_match_update_summary_stats()
159 * Update the summary stats for a connection match entry.
160 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530161static inline void sfe_ipv6_connection_match_update_summary_stats(struct sfe_ipv6_connection_match *cm,
162 u32 *packets, u32 *bytes)
163
Xiaoping Fan978b3772015-05-27 14:15:18 -0700164{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530165 u32 packet_count, byte_count;
166
167 packet_count = atomic_read(&cm->rx_packet_count);
168 cm->rx_packet_count64 += packet_count;
169 atomic_sub(packet_count, &cm->rx_packet_count);
170
171 byte_count = atomic_read(&cm->rx_byte_count);
172 cm->rx_byte_count64 += byte_count;
173 atomic_sub(byte_count, &cm->rx_byte_count);
174
175 *packets = packet_count;
176 *bytes = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700177}
178
179/*
180 * sfe_ipv6_connection_match_compute_translations()
181 * Compute port and address translations for a connection match entry.
182 */
183static void sfe_ipv6_connection_match_compute_translations(struct sfe_ipv6_connection_match *cm)
184{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700185 u32 diff[9];
186 u32 *idx_32;
187 u16 *idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700188
189 /*
190 * Before we insert the entry look to see if this is tagged as doing address
191 * translations. If it is then work out the adjustment that we need to apply
192 * to the transport checksum.
193 */
194 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700195 u32 adj = 0;
196 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700197
198 /*
199 * Precompute an incremental checksum adjustment so we can
200 * edit packets in this stream very quickly. The algorithm is from RFC1624.
201 */
202 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530203 *(idx_32++) = cm->match_src_ip[0].addr[0];
204 *(idx_32++) = cm->match_src_ip[0].addr[1];
205 *(idx_32++) = cm->match_src_ip[0].addr[2];
206 *(idx_32++) = cm->match_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700207
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700208 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700209 *(idx_16++) = cm->match_src_port;
210 *(idx_16++) = ~cm->xlate_src_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700211 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700212
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530213 *(idx_32++) = ~cm->xlate_src_ip[0].addr[0];
214 *(idx_32++) = ~cm->xlate_src_ip[0].addr[1];
215 *(idx_32++) = ~cm->xlate_src_ip[0].addr[2];
216 *(idx_32++) = ~cm->xlate_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700217
218 /*
219 * When we compute this fold it down to a 16-bit offset
220 * as that way we can avoid having to do a double
221 * folding of the twos-complement result because the
222 * addition of 2 16-bit values cannot cause a double
223 * wrap-around!
224 */
225 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700226 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700227 adj += carry;
228 adj += w;
229 carry = (w > adj);
230 }
231 adj += carry;
232 adj = (adj & 0xffff) + (adj >> 16);
233 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700234 cm->xlate_src_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700235 }
236
237 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700238 u32 adj = 0;
239 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700240
241 /*
242 * Precompute an incremental checksum adjustment so we can
243 * edit packets in this stream very quickly. The algorithm is from RFC1624.
244 */
245 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530246 *(idx_32++) = cm->match_dest_ip[0].addr[0];
247 *(idx_32++) = cm->match_dest_ip[0].addr[1];
248 *(idx_32++) = cm->match_dest_ip[0].addr[2];
249 *(idx_32++) = cm->match_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700250
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700251 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700252 *(idx_16++) = cm->match_dest_port;
253 *(idx_16++) = ~cm->xlate_dest_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700254 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700255
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530256 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[0];
257 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[1];
258 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[2];
259 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700260
261 /*
262 * When we compute this fold it down to a 16-bit offset
263 * as that way we can avoid having to do a double
264 * folding of the twos-complement result because the
265 * addition of 2 16-bit values cannot cause a double
266 * wrap-around!
267 */
268 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700269 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700270 adj += carry;
271 adj += w;
272 carry = (w > adj);
273 }
274 adj += carry;
275 adj = (adj & 0xffff) + (adj >> 16);
276 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700277 cm->xlate_dest_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700278 }
279}
280
281/*
282 * sfe_ipv6_update_summary_stats()
283 * Update the summary stats.
284 */
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530285static void sfe_ipv6_update_summary_stats(struct sfe_ipv6 *si, struct sfe_ipv6_stats *stats)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700286{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530287 int i = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700288
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530289 memset(stats, 0, sizeof(*stats));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700290
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530291 for_each_possible_cpu(i) {
292 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
293
294 stats->connection_create_requests64 += s->connection_create_requests64;
295 stats->connection_create_collisions64 += s->connection_create_collisions64;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530296 stats->connection_create_failures64 += s->connection_create_failures64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530297 stats->connection_destroy_requests64 += s->connection_destroy_requests64;
298 stats->connection_destroy_misses64 += s->connection_destroy_misses64;
299 stats->connection_match_hash_hits64 += s->connection_match_hash_hits64;
300 stats->connection_match_hash_reorders64 += s->connection_match_hash_reorders64;
301 stats->connection_flushes64 += s->connection_flushes64;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530302 stats->packets_dropped64 += s->packets_dropped64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530303 stats->packets_forwarded64 += s->packets_forwarded64;
304 stats->packets_not_forwarded64 += s->packets_not_forwarded64;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530305 stats->pppoe_encap_packets_forwarded64 += s->pppoe_encap_packets_forwarded64;
306 stats->pppoe_decap_packets_forwarded64 += s->pppoe_decap_packets_forwarded64;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700307 }
308}
309
310/*
311 * sfe_ipv6_insert_connection_match()
312 * Insert a connection match into the hash.
313 *
314 * On entry we must be holding the lock that protects the hash table.
315 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700316static inline void sfe_ipv6_insert_connection_match(struct sfe_ipv6 *si,
317 struct sfe_ipv6_connection_match *cm)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700318{
Xiaoping Fan978b3772015-05-27 14:15:18 -0700319 unsigned int conn_match_idx
320 = sfe_ipv6_get_connection_match_hash(cm->match_dev, cm->match_protocol,
321 cm->match_src_ip, cm->match_src_port,
322 cm->match_dest_ip, cm->match_dest_port);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700323
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530324 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700325
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530326 hlist_add_head_rcu(&cm->hnode, &si->hlist_conn_match_hash_head[conn_match_idx]);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700327#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700328 if (!si->flow_cookie_enable || !(cm->flags & (SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC | SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST)))
Xiaoping Fan978b3772015-05-27 14:15:18 -0700329 return;
330
331 /*
332 * Configure hardware to put a flow cookie in packet of this flow,
333 * then we can accelerate the lookup process when we received this packet.
334 */
335 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
336 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
337
338 if ((NULL == entry->match) && time_is_before_jiffies(entry->last_clean_time + HZ)) {
339 sfe_ipv6_flow_cookie_set_func_t func;
340
341 rcu_read_lock();
342 func = rcu_dereference(si->flow_cookie_set_func);
343 if (func) {
344 if (!func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
345 cm->match_dest_ip->addr, cm->match_dest_port, conn_match_idx)) {
346 entry->match = cm;
347 cm->flow_cookie = conn_match_idx;
348 } else {
349 si->exception_events[SFE_IPV6_EXCEPTION_EVENT_FLOW_COOKIE_ADD_FAIL]++;
350 }
351 }
352 rcu_read_unlock();
353
354 break;
355 }
356 }
357#endif
Xiaoping Fan978b3772015-05-27 14:15:18 -0700358}
359
360/*
361 * sfe_ipv6_remove_connection_match()
362 * Remove a connection match object from the hash.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700363 */
364static inline void sfe_ipv6_remove_connection_match(struct sfe_ipv6 *si, struct sfe_ipv6_connection_match *cm)
365{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530366
367 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700368#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700369 if (si->flow_cookie_enable) {
370 /*
371 * Tell hardware that we no longer need a flow cookie in packet of this flow
372 */
373 unsigned int conn_match_idx;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700374
Xiaoping Fan640faf42015-08-28 15:50:55 -0700375 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
376 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700377
Xiaoping Fan640faf42015-08-28 15:50:55 -0700378 if (cm == entry->match) {
379 sfe_ipv6_flow_cookie_set_func_t func;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700380
Xiaoping Fan640faf42015-08-28 15:50:55 -0700381 rcu_read_lock();
382 func = rcu_dereference(si->flow_cookie_set_func);
383 if (func) {
384 func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
385 cm->match_dest_ip->addr, cm->match_dest_port, 0);
386 }
387 rcu_read_unlock();
388
389 cm->flow_cookie = 0;
390 entry->match = NULL;
391 entry->last_clean_time = jiffies;
392 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700393 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700394 }
395 }
396#endif
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530397 hlist_del_init_rcu(&cm->hnode);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700398
Xiaoping Fan978b3772015-05-27 14:15:18 -0700399}
400
401/*
402 * sfe_ipv6_get_connection_hash()
403 * Generate the hash used in connection lookups.
404 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700405static inline unsigned int sfe_ipv6_get_connection_hash(u8 protocol, struct sfe_ipv6_addr *src_ip, __be16 src_port,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700406 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
407{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700408 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700409
410 for (idx = 0; idx < 4; idx++) {
411 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
412 }
413 hash = hash ^ protocol ^ ntohs(src_port ^ dest_port);
414 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
415}
416
417/*
418 * sfe_ipv6_find_connection()
419 * Get the IPv6 connection info that corresponds to a particular 5-tuple.
420 *
421 * On entry we must be holding the lock that protects the hash table.
422 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700423static inline struct sfe_ipv6_connection *sfe_ipv6_find_connection(struct sfe_ipv6 *si, u32 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700424 struct sfe_ipv6_addr *src_ip, __be16 src_port,
425 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
426{
427 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530428
Xiaoping Fan978b3772015-05-27 14:15:18 -0700429 unsigned int conn_idx = sfe_ipv6_get_connection_hash(protocol, src_ip, src_port, dest_ip, dest_port);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530430
431 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700432 c = si->conn_hash[conn_idx];
433
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530434 while (c) {
435 if ((c->src_port == src_port)
436 && (c->dest_port == dest_port)
437 && (sfe_ipv6_addr_equal(c->src_ip, src_ip))
438 && (sfe_ipv6_addr_equal(c->dest_ip, dest_ip))
439 && (c->protocol == protocol)) {
440 return c;
441 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700442 c = c->next;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530443 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700444
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530445 return NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700446}
447
448/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700449 * sfe_ipv6_insert_connection()
450 * Insert a connection into the hash.
451 *
452 * On entry we must be holding the lock that protects the hash table.
453 */
454static void sfe_ipv6_insert_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
455{
456 struct sfe_ipv6_connection **hash_head;
457 struct sfe_ipv6_connection *prev_head;
458 unsigned int conn_idx;
459
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530460 lockdep_assert_held(&si->lock);
461
Xiaoping Fan978b3772015-05-27 14:15:18 -0700462 /*
463 * Insert entry into the connection hash.
464 */
465 conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
466 c->dest_ip, c->dest_port);
467 hash_head = &si->conn_hash[conn_idx];
468 prev_head = *hash_head;
469 c->prev = NULL;
470 if (prev_head) {
471 prev_head->prev = c;
472 }
473
474 c->next = prev_head;
475 *hash_head = c;
476
477 /*
478 * Insert entry into the "all connections" list.
479 */
480 if (si->all_connections_tail) {
481 c->all_connections_prev = si->all_connections_tail;
482 si->all_connections_tail->all_connections_next = c;
483 } else {
484 c->all_connections_prev = NULL;
485 si->all_connections_head = c;
486 }
487
488 si->all_connections_tail = c;
489 c->all_connections_next = NULL;
490 si->num_connections++;
491
492 /*
493 * Insert the connection match objects too.
494 */
495 sfe_ipv6_insert_connection_match(si, c->original_match);
496 sfe_ipv6_insert_connection_match(si, c->reply_match);
497}
498
499/*
500 * sfe_ipv6_remove_connection()
501 * Remove a sfe_ipv6_connection object from the hash.
502 *
503 * On entry we must be holding the lock that protects the hash table.
504 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530505bool sfe_ipv6_remove_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700506{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530507
508 lockdep_assert_held(&si->lock);
509 if (c->removed) {
510 DEBUG_ERROR("%px: Connection has been removed already\n", c);
511 return false;
512 }
513
Xiaoping Fan978b3772015-05-27 14:15:18 -0700514 /*
515 * Remove the connection match objects.
516 */
517 sfe_ipv6_remove_connection_match(si, c->reply_match);
518 sfe_ipv6_remove_connection_match(si, c->original_match);
519
520 /*
521 * Unlink the connection.
522 */
523 if (c->prev) {
524 c->prev->next = c->next;
525 } else {
526 unsigned int conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
527 c->dest_ip, c->dest_port);
528 si->conn_hash[conn_idx] = c->next;
529 }
530
531 if (c->next) {
532 c->next->prev = c->prev;
533 }
Xiaoping Fan34586472015-07-03 02:20:35 -0700534
535 /*
536 * Unlink connection from all_connections list
537 */
538 if (c->all_connections_prev) {
539 c->all_connections_prev->all_connections_next = c->all_connections_next;
540 } else {
541 si->all_connections_head = c->all_connections_next;
542 }
543
544 if (c->all_connections_next) {
545 c->all_connections_next->all_connections_prev = c->all_connections_prev;
546 } else {
547 si->all_connections_tail = c->all_connections_prev;
548 }
549
Ken Zhu32b95392021-09-03 13:52:04 -0700550 /*
551 * If I am the next sync connection, move the sync to my next or head.
552 */
553 if (unlikely(si->wc_next == c)) {
554 si->wc_next = c->all_connections_next;
555 }
556
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530557 c->removed = true;
Xiaoping Fan34586472015-07-03 02:20:35 -0700558 si->num_connections--;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530559 return true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700560}
561
562/*
563 * sfe_ipv6_gen_sync_connection()
564 * Sync a connection.
565 *
566 * On entry to this function we expect that the lock for the connection is either
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530567 * already held (while called from sfe_ipv6_periodic_sync() or isn't required
568 * (while called from sfe_ipv6_flush_sfe_ipv6_connection())
Xiaoping Fan978b3772015-05-27 14:15:18 -0700569 */
570static void sfe_ipv6_gen_sync_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c,
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700571 struct sfe_connection_sync *sis, sfe_sync_reason_t reason,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700572 u64 now_jiffies)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700573{
574 struct sfe_ipv6_connection_match *original_cm;
575 struct sfe_ipv6_connection_match *reply_cm;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530576 u32 packet_count, byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700577
578 /*
579 * Fill in the update message.
580 */
Murat Sezgin53509a12016-12-27 16:57:34 -0800581 sis->is_v6 = 1;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700582 sis->protocol = c->protocol;
583 sis->src_ip.ip6[0] = c->src_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700584 sis->src_ip_xlate.ip6[0] = c->src_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700585 sis->dest_ip.ip6[0] = c->dest_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700586 sis->dest_ip_xlate.ip6[0] = c->dest_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700587 sis->src_port = c->src_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700588 sis->src_port_xlate = c->src_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700589 sis->dest_port = c->dest_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700590 sis->dest_port_xlate = c->dest_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700591
592 original_cm = c->original_match;
593 reply_cm = c->reply_match;
594 sis->src_td_max_window = original_cm->protocol_state.tcp.max_win;
595 sis->src_td_end = original_cm->protocol_state.tcp.end;
596 sis->src_td_max_end = original_cm->protocol_state.tcp.max_end;
597 sis->dest_td_max_window = reply_cm->protocol_state.tcp.max_win;
598 sis->dest_td_end = reply_cm->protocol_state.tcp.end;
599 sis->dest_td_max_end = reply_cm->protocol_state.tcp.max_end;
600
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530601 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet_count, &byte_count);
602 sis->src_new_packet_count = packet_count;
603 sis->src_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700604
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530605 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet_count, &byte_count);
606 sis->dest_new_packet_count = packet_count;
607 sis->dest_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700608
609 sis->src_dev = original_cm->match_dev;
610 sis->src_packet_count = original_cm->rx_packet_count64;
611 sis->src_byte_count = original_cm->rx_byte_count64;
612
613 sis->dest_dev = reply_cm->match_dev;
614 sis->dest_packet_count = reply_cm->rx_packet_count64;
615 sis->dest_byte_count = reply_cm->rx_byte_count64;
616
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700617 sis->reason = reason;
618
Xiaoping Fan978b3772015-05-27 14:15:18 -0700619 /*
620 * Get the time increment since our last sync.
621 */
622 sis->delta_jiffies = now_jiffies - c->last_sync_jiffies;
623 c->last_sync_jiffies = now_jiffies;
624}
625
626/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530627 * sfe_ipv6_free_sfe_ipv6_connection_rcu()
628 * Called at RCU qs state to free the connection object.
629 */
630static void sfe_ipv6_free_sfe_ipv6_connection_rcu(struct rcu_head *head)
631{
632 struct sfe_ipv6_connection *c;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530633 struct udp_sock *up;
634 struct sock *sk;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530635
636 /*
637 * We dont need spin lock as the connection is already removed from link list
638 */
639 c = container_of(head, struct sfe_ipv6_connection, rcu);
640 BUG_ON(!c->removed);
641
642 DEBUG_TRACE("%px: connecton has been deleted\n", c);
643
644 /*
Suruchi Suman23a279d2021-11-16 15:13:09 +0530645 * Decrease the refcount taken in function sfe_ipv6_create_rule()
646 * during call of __udp6_lib_lookup()
647 */
648 up = c->reply_match->up;
649 if (up) {
650 sk = (struct sock *)up;
651 sock_put(sk);
652 }
653
654 /*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530655 * Release our hold of the source and dest devices and free the memory
656 * for our connection objects.
657 */
658 dev_put(c->original_dev);
659 dev_put(c->reply_dev);
660 kfree(c->original_match);
661 kfree(c->reply_match);
662 kfree(c);
663}
664
665/*
Ken Zhu88c58152021-12-09 15:12:06 -0800666 * sfe_ipv6_sync_status()
667 * update a connection status to its connection manager.
668 *
669 * si: the ipv6 context
670 * c: which connection to be notified
671 * reason: what kind of reason: flush, or destroy
672 */
673void sfe_ipv6_sync_status(struct sfe_ipv6 *si,
674 struct sfe_ipv6_connection *c,
675 sfe_sync_reason_t reason)
676{
677 struct sfe_connection_sync sis;
678 u64 now_jiffies;
679 sfe_sync_rule_callback_t sync_rule_callback;
680
681 rcu_read_lock();
682 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
683
684 if (unlikely(!sync_rule_callback)) {
685 rcu_read_unlock();
686 return;
687 }
688
689 /*
690 * Generate a sync message and then sync.
691 */
692 now_jiffies = get_jiffies_64();
693 sfe_ipv6_gen_sync_connection(si, c, &sis, reason, now_jiffies);
694 sync_rule_callback(&sis);
695
696 rcu_read_unlock();
697}
698
699/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700700 * sfe_ipv6_flush_connection()
701 * Flush a connection and free all associated resources.
702 *
703 * We need to be called with bottom halves disabled locally as we need to acquire
704 * the connection hash lock and release it again. In general we're actually called
705 * from within a BH and so we're fine, but we're also called when connections are
706 * torn down.
707 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530708void sfe_ipv6_flush_connection(struct sfe_ipv6 *si,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700709 struct sfe_ipv6_connection *c,
710 sfe_sync_reason_t reason)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700711{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530712 BUG_ON(!c->removed);
713
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530714 this_cpu_inc(si->stats_pcpu->connection_flushes64);
Ken Zhu88c58152021-12-09 15:12:06 -0800715 sfe_ipv6_sync_status(si, c, reason);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530716
717 /*
Ken Zhu88c58152021-12-09 15:12:06 -0800718 * Release our hold of the source and dest devices and free the memory
719 * for our connection objects.
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530720 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530721 call_rcu(&c->rcu, sfe_ipv6_free_sfe_ipv6_connection_rcu);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700722}
723
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530724 /*
725 * sfe_ipv6_exception_stats_inc()
726 * Increment exception stats.
727 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530728void sfe_ipv6_exception_stats_inc(struct sfe_ipv6 *si, enum sfe_ipv6_exception_events reason)
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530729{
730 struct sfe_ipv6_stats *stats = this_cpu_ptr(si->stats_pcpu);
731
732 stats->exception_events64[reason]++;
733 stats->packets_not_forwarded64++;
734}
735
Xiaoping Fan978b3772015-05-27 14:15:18 -0700736/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700737 * sfe_ipv6_recv()
738 * Handle packet receives and forwaring.
739 *
740 * Returns 1 if the packet is forwarded or 0 if it isn't.
741 */
Suruchi Suman23a279d2021-11-16 15:13:09 +0530742int sfe_ipv6_recv(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info, bool tun_outer)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700743{
744 struct sfe_ipv6 *si = &__si6;
745 unsigned int len;
746 unsigned int payload_len;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530747 unsigned int ihl = sizeof(struct ipv6hdr);
Ken Zhu88c58152021-12-09 15:12:06 -0800748 bool sync_on_find = false;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530749 struct ipv6hdr *iph;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700750 u8 next_hdr;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700751
752 /*
753 * Check that we have space for an IP header and an uplayer header here.
754 */
755 len = skb->len;
756 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700757
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530758 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700759 DEBUG_TRACE("len: %u is too short\n", len);
760 return 0;
761 }
762
763 /*
764 * Is our IP version wrong?
765 */
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530766 iph = (struct ipv6hdr *)skb->data;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700767 if (unlikely(iph->version != 6)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700768
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530769 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_NON_V6);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700770 DEBUG_TRACE("IP version: %u\n", iph->version);
771 return 0;
772 }
773
774 /*
775 * Does our datagram fit inside the skb?
776 */
777 payload_len = ntohs(iph->payload_len);
778 if (unlikely(payload_len > (len - ihl))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700779
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530780 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_DATAGRAM_INCOMPLETE);
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530781 DEBUG_TRACE("payload_len: %u, exceeds len: %u\n", payload_len, (len - (unsigned int)sizeof(struct ipv6hdr)));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700782 return 0;
783 }
784
785 next_hdr = iph->nexthdr;
786 while (unlikely(sfe_ipv6_is_ext_hdr(next_hdr))) {
787 struct sfe_ipv6_ext_hdr *ext_hdr;
788 unsigned int ext_hdr_len;
789
790 ext_hdr = (struct sfe_ipv6_ext_hdr *)(skb->data + ihl);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700791
792 ext_hdr_len = ext_hdr->hdr_len;
793 ext_hdr_len <<= 3;
794 ext_hdr_len += sizeof(struct sfe_ipv6_ext_hdr);
795 ihl += ext_hdr_len;
796 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530797 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700798
799 DEBUG_TRACE("extension header %d not completed\n", next_hdr);
800 return 0;
801 }
Ken Zhu88c58152021-12-09 15:12:06 -0800802 /*
803 * Any packets have extend hdr, won't be handled in the fast
804 * path,sync its status and exception to the kernel.
805 */
806 sync_on_find = true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700807 next_hdr = ext_hdr->next_hdr;
808 }
809
810 if (IPPROTO_UDP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800811 return sfe_ipv6_recv_udp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700812 }
813
814 if (IPPROTO_TCP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800815 return sfe_ipv6_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700816 }
817
818 if (IPPROTO_ICMPV6 == next_hdr) {
819 return sfe_ipv6_recv_icmp(si, skb, dev, len, iph, ihl);
820 }
821
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530822 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_UNHANDLED_PROTOCOL);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700823 DEBUG_TRACE("not UDP, TCP or ICMP: %u\n", next_hdr);
824 return 0;
825}
826
827/*
828 * sfe_ipv6_update_tcp_state()
829 * update TCP window variables.
830 */
831static void
832sfe_ipv6_update_tcp_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530833 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700834{
835 struct sfe_ipv6_connection_match *orig_cm;
836 struct sfe_ipv6_connection_match *repl_cm;
837 struct sfe_ipv6_tcp_connection_match *orig_tcp;
838 struct sfe_ipv6_tcp_connection_match *repl_tcp;
839
840 orig_cm = c->original_match;
841 repl_cm = c->reply_match;
842 orig_tcp = &orig_cm->protocol_state.tcp;
843 repl_tcp = &repl_cm->protocol_state.tcp;
844
845 /* update orig */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530846 if (orig_tcp->max_win < msg->tcp_rule.flow_max_window) {
847 orig_tcp->max_win = msg->tcp_rule.flow_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700848 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530849 if ((s32)(orig_tcp->end - msg->tcp_rule.flow_end) < 0) {
850 orig_tcp->end = msg->tcp_rule.flow_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700851 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530852 if ((s32)(orig_tcp->max_end - msg->tcp_rule.flow_max_end) < 0) {
853 orig_tcp->max_end = msg->tcp_rule.flow_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700854 }
855
856 /* update reply */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530857 if (repl_tcp->max_win < msg->tcp_rule.return_max_window) {
858 repl_tcp->max_win = msg->tcp_rule.return_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700859 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530860 if ((s32)(repl_tcp->end - msg->tcp_rule.return_end) < 0) {
861 repl_tcp->end = msg->tcp_rule.return_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700862 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530863 if ((s32)(repl_tcp->max_end - msg->tcp_rule.return_max_end) < 0) {
864 repl_tcp->max_end = msg->tcp_rule.return_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700865 }
866
867 /* update match flags */
868 orig_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
869 repl_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530870 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700871 orig_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
872 repl_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
873 }
874}
875
876/*
877 * sfe_ipv6_update_protocol_state()
878 * update protocol specified state machine.
879 */
880static void
881sfe_ipv6_update_protocol_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530882 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700883{
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530884 switch (msg->tuple.protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700885 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530886 sfe_ipv6_update_tcp_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700887 break;
888 }
889}
890
891/*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800892 * sfe_ipv6_match_entry_set_vlan()
893 */
894static void sfe_ipv6_match_entry_set_vlan(
895 struct sfe_ipv6_connection_match *cm,
896 u32 primary_ingress_vlan_tag,
897 u32 primary_egress_vlan_tag,
898 u32 secondary_ingress_vlan_tag,
899 u32 secondary_egress_vlan_tag)
900{
901 u16 tpid;
902 /*
903 * Prevent stacking header counts when updating.
904 */
905 cm->ingress_vlan_hdr_cnt = 0;
906 cm->egress_vlan_hdr_cnt = 0;
907 memset(cm->ingress_vlan_hdr, 0, sizeof(cm->ingress_vlan_hdr));
908 memset(cm->egress_vlan_hdr, 0, sizeof(cm->egress_vlan_hdr));
909
910 /*
911 * vlan_hdr[0] corresponds to outer tag
912 * vlan_hdr[1] corresponds to inner tag
913 * Extract the vlan information (tpid and tci) from rule message
914 */
915 if ((primary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
916 tpid = (u16)(primary_ingress_vlan_tag >> 16);
917 cm->ingress_vlan_hdr[0].tpid = ntohs(tpid);
918 cm->ingress_vlan_hdr[0].tci = (u16)primary_ingress_vlan_tag;
919 cm->ingress_vlan_hdr_cnt++;
920 }
921
922 if ((secondary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
923 tpid = (u16)(secondary_ingress_vlan_tag >> 16);
924 cm->ingress_vlan_hdr[1].tpid = ntohs(tpid);
925 cm->ingress_vlan_hdr[1].tci = (u16)secondary_ingress_vlan_tag;
926 cm->ingress_vlan_hdr_cnt++;
927 }
928
929 if ((primary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
930 tpid = (u16)(primary_egress_vlan_tag >> 16);
931 cm->egress_vlan_hdr[0].tpid = ntohs(tpid);
932 cm->egress_vlan_hdr[0].tci = (u16)primary_egress_vlan_tag;
933 cm->egress_vlan_hdr_cnt++;
934 }
935
936 if ((secondary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
937 tpid = (u16)(secondary_egress_vlan_tag >> 16);
938 cm->egress_vlan_hdr[1].tpid = ntohs(tpid);
939 cm->egress_vlan_hdr[1].tci = (u16)secondary_egress_vlan_tag;
940 cm->egress_vlan_hdr_cnt++;
941 }
942}
943
944/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700945 * sfe_ipv6_update_rule()
946 * update forwarding rule after rule is created.
947 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530948void sfe_ipv6_update_rule(struct sfe_ipv6_rule_create_msg *msg)
949
Xiaoping Fan978b3772015-05-27 14:15:18 -0700950{
951 struct sfe_ipv6_connection *c;
952 struct sfe_ipv6 *si = &__si6;
953
954 spin_lock_bh(&si->lock);
955
956 c = sfe_ipv6_find_connection(si,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530957 msg->tuple.protocol,
958 (struct sfe_ipv6_addr *)msg->tuple.flow_ip,
959 msg->tuple.flow_ident,
960 (struct sfe_ipv6_addr *)msg->tuple.return_ip,
961 msg->tuple.return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700962 if (c != NULL) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530963 sfe_ipv6_update_protocol_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700964 }
965
966 spin_unlock_bh(&si->lock);
967}
968
969/*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +0530970 * sfe_ipv6_xmit_eth_type_check
971 * Checking if MAC header has to be written.
972 */
973static inline bool sfe_ipv6_xmit_eth_type_check(struct net_device *dev, u32 cm_flags)
974{
975 if (!(dev->flags & IFF_NOARP)) {
976 return true;
977 }
978
979 /*
980 * For PPPoE, since we are now supporting PPPoE encapsulation, we are writing L2 header.
981 */
982 if (cm_flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP) {
983 return true;
984 }
985
986 return false;
987}
988
989/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700990 * sfe_ipv6_create_rule()
991 * Create a forwarding rule.
992 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530993int sfe_ipv6_create_rule(struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700994{
995 struct sfe_ipv6 *si = &__si6;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530996 struct sfe_ipv6_connection *c, *old_c;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700997 struct sfe_ipv6_connection_match *original_cm;
998 struct sfe_ipv6_connection_match *reply_cm;
999 struct net_device *dest_dev;
1000 struct net_device *src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301001 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301002 struct sock *sk;
1003 struct net *net;
1004 unsigned int src_if_idx;
1005
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301006 s32 flow_interface_num = msg->conn_rule.flow_top_interface_num;
1007 s32 return_interface_num = msg->conn_rule.return_top_interface_num;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001008
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301009 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) {
1010 flow_interface_num = msg->conn_rule.flow_interface_num;
1011 }
1012
1013 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) {
1014 return_interface_num = msg->conn_rule.return_interface_num;
1015 }
1016
1017 src_dev = dev_get_by_index(&init_net, flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301018 if (!src_dev) {
1019 DEBUG_WARN("%px: Unable to find src_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301020 flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301021 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1022 return -EINVAL;
1023 }
1024
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301025 dest_dev = dev_get_by_index(&init_net, return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301026 if (!dest_dev) {
1027 DEBUG_WARN("%px: Unable to find dest_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301028 return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301029 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1030 dev_put(src_dev);
1031 return -EINVAL;
1032 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001033
1034 if (unlikely((dest_dev->reg_state != NETREG_REGISTERED) ||
1035 (src_dev->reg_state != NETREG_REGISTERED))) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301036 DEBUG_WARN("%px: src_dev=%s and dest_dev=%s are unregistered\n", msg,
1037 src_dev->name, dest_dev->name);
1038 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1039 dev_put(src_dev);
1040 dev_put(dest_dev);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001041 return -EINVAL;
1042 }
1043
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301044 /*
1045 * Allocate the various connection tracking objects.
1046 */
1047 c = (struct sfe_ipv6_connection *)kmalloc(sizeof(struct sfe_ipv6_connection), GFP_ATOMIC);
1048 if (unlikely(!c)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301049 DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
1050 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1051 dev_put(src_dev);
1052 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301053 return -ENOMEM;
1054 }
1055
1056 original_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
1057 if (unlikely(!original_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301058 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1059 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301060 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301061 dev_put(src_dev);
1062 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301063 return -ENOMEM;
1064 }
1065
1066 reply_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
1067 if (unlikely(!reply_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301068 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1069 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301070 kfree(original_cm);
1071 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301072 dev_put(src_dev);
1073 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301074 return -ENOMEM;
1075 }
1076
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301077 this_cpu_inc(si->stats_pcpu->connection_create_requests64);
1078
Xiaoping Fan978b3772015-05-27 14:15:18 -07001079 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001080
1081 /*
1082 * Check to see if there is already a flow that matches the rule we're
1083 * trying to create. If there is then we can't create a new one.
1084 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001085 old_c = sfe_ipv6_find_connection(si,
1086 tuple->protocol,
1087 (struct sfe_ipv6_addr *)tuple->flow_ip,
1088 tuple->flow_ident,
1089 (struct sfe_ipv6_addr *)tuple->return_ip,
1090 tuple->return_ident);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301091
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301092 if (old_c != NULL) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301093 this_cpu_inc(si->stats_pcpu->connection_create_collisions64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001094
1095 /*
1096 * If we already have the flow then it's likely that this
1097 * request to create the connection rule contains more
1098 * up-to-date information. Check and update accordingly.
1099 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301100 sfe_ipv6_update_protocol_state(old_c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001101 spin_unlock_bh(&si->lock);
1102
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301103 kfree(reply_cm);
1104 kfree(original_cm);
1105 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301106 dev_put(src_dev);
1107 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301108
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301109 DEBUG_TRACE("connection already exists - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001110 " s: %s:%pxM:%pI6:%u, d: %s:%pxM:%pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301111 tuple->protocol,
1112 src_dev->name, msg->conn_rule.flow_mac, tuple->flow_ip, ntohs(tuple->flow_ident),
1113 dest_dev->name, msg->conn_rule.return_mac, tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001114 return -EADDRINUSE;
1115 }
1116
1117 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001118 * Fill in the "original" direction connection matching object.
1119 * Note that the transmit MAC address is "dest_mac_xlate" because
1120 * we always know both ends of a connection by their translated
1121 * addresses and not their public addresses.
1122 */
1123 original_cm->match_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301124 original_cm->match_protocol = tuple->protocol;
1125 original_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
Suruchi Suman66609a72022-01-20 02:34:25 +05301126 original_cm->match_src_port = netif_is_vxlan(src_dev) ? 0 : tuple->flow_ident;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301127 original_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1128 original_cm->match_dest_port = tuple->return_ident;
1129
1130 original_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1131 original_cm->xlate_src_port = tuple->flow_ident;
1132 original_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1133 original_cm->xlate_dest_port = tuple->return_ident;
1134
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301135 atomic_set(&original_cm->rx_packet_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001136 original_cm->rx_packet_count64 = 0;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301137 atomic_set(&original_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001138 original_cm->rx_byte_count64 = 0;
1139 original_cm->xmit_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301140
1141 original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301142
Xiaoping Fan978b3772015-05-27 14:15:18 -07001143 original_cm->connection = c;
1144 original_cm->counter_match = reply_cm;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001145 original_cm->l2_hdr_size = 0;
1146 original_cm->flags = 0;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301147
1148 /*
1149 * Valid in decap direction only
1150 */
1151 RCU_INIT_POINTER(original_cm->up, NULL);
1152
Ken Zhu37040ea2021-09-09 21:11:15 -07001153 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1154 original_cm->mark = msg->mark_rule.flow_mark;
1155 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1156 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301157 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1158 original_cm->priority = msg->qos_rule.flow_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001159 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1160 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001161
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301162 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1163 original_cm->dscp = msg->dscp_rule.flow_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001164 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1165 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301166 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1167 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1168 }
1169
Wayne Tanbb7f1782021-12-13 11:16:04 -08001170 /*
1171 * Add VLAN rule to original_cm
1172 */
1173 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1174 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1175 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1176 sfe_ipv6_match_entry_set_vlan(original_cm,
1177 vlan_primary_rule->ingress_vlan_tag,
1178 vlan_primary_rule->egress_vlan_tag,
1179 vlan_secondary_rule->ingress_vlan_tag,
1180 vlan_secondary_rule->egress_vlan_tag);
1181
1182 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) &&
1183 original_cm->egress_vlan_hdr_cnt > 0) {
1184 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1185 original_cm->l2_hdr_size += original_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1186 }
1187 }
1188
Xiaoping Fan978b3772015-05-27 14:15:18 -07001189#ifdef CONFIG_NF_FLOW_COOKIE
1190 original_cm->flow_cookie = 0;
1191#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001192#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301193 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1194 original_cm->flow_accel = msg->direction_rule.flow_accel;
1195 } else {
1196 original_cm->flow_accel = 1;
1197 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001198#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301199 /*
1200 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1201 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1202 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1203 * are sending directly to the destination interface that supports it.
1204 */
Suruchi Sumanf2077182022-01-13 21:35:23 +05301205 if (likely(dest_dev->features & NETIF_F_HW_CSUM) && !netif_is_vxlan(dest_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301206 if ((msg->conn_rule.return_top_interface_num == msg->conn_rule.return_interface_num) ||
1207 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE)) {
1208 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
1209 }
1210 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001211
Wayne Tanbb7f1782021-12-13 11:16:04 -08001212 reply_cm->l2_hdr_size = 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301213 reply_cm->flags = 0;
1214
1215 /*
1216 * Adding PPPoE parameters to original and reply entries based on the direction where
1217 * PPPoE header is valid in ECM rule.
1218 *
1219 * If PPPoE is valid in flow direction (from interface is PPPoE), then
1220 * original cm will have PPPoE at ingress (strip PPPoE header)
1221 * reply cm will have PPPoE at egress (add PPPoE header)
1222 *
1223 * If PPPoE is valid in return direction (to interface is PPPoE), then
1224 * original cm will have PPPoE at egress (add PPPoE header)
1225 * reply cm will have PPPoE at ingress (strip PPPoE header)
1226 */
1227 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_DECAP_VALID) {
1228 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1229 original_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1230 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1231
1232 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001233 reply_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301234 reply_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1235 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1236 }
1237
1238 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_ENCAP_VALID) {
1239 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001240 original_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301241 original_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1242 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1243
1244 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1245 reply_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1246 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1247 }
1248
Xiaoping Fan978b3772015-05-27 14:15:18 -07001249 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001250 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301251 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001252 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301253 if (sfe_ipv6_xmit_eth_type_check(dest_dev, original_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301254
1255 /*
1256 * Check whether the rule has configured a specific source MAC address to use.
1257 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1258 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301259 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1260 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->conn_rule.flow_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301261 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301262 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1263 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_RETURN_VALID)) {
1264 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.return_src_mac);
1265 } else {
1266 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)dest_dev->dev_addr);
1267 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301268 }
1269 ether_addr_copy((u8 *)original_cm->xmit_dest_mac, (u8 *)msg->conn_rule.return_mac);
1270
Xiaoping Fan978b3772015-05-27 14:15:18 -07001271 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001272 original_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001273
1274 /*
1275 * If our dev writes Ethernet headers then we can write a really fast
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301276 * version
Xiaoping Fan978b3772015-05-27 14:15:18 -07001277 */
1278 if (dest_dev->header_ops) {
1279 if (dest_dev->header_ops->create == eth_header) {
1280 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1281 }
1282 }
1283 }
1284
1285 /*
1286 * Fill in the "reply" direction connection matching object.
1287 */
1288 reply_cm->match_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301289 reply_cm->match_protocol = tuple->protocol;
1290 reply_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301291 reply_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1292 reply_cm->match_dest_port = tuple->flow_ident;
1293 reply_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1294 reply_cm->xlate_src_port = tuple->return_ident;
1295 reply_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1296 reply_cm->xlate_dest_port = tuple->flow_ident;
1297
Suruchi Suman23a279d2021-11-16 15:13:09 +05301298 /*
1299 * Keep source port as 0 for VxLAN tunnels.
1300 */
1301 if (netif_is_vxlan(src_dev) || netif_is_vxlan(dest_dev)) {
1302 reply_cm->match_src_port = 0;
1303 } else {
1304 reply_cm->match_src_port = tuple->return_ident;
1305 }
1306
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301307 atomic_set(&original_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001308 reply_cm->rx_packet_count64 = 0;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301309 atomic_set(&reply_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001310 reply_cm->rx_byte_count64 = 0;
1311 reply_cm->xmit_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301312 reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301313
Xiaoping Fan978b3772015-05-27 14:15:18 -07001314 reply_cm->connection = c;
1315 reply_cm->counter_match = original_cm;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301316
Ken Zhu37040ea2021-09-09 21:11:15 -07001317 reply_cm->flags = 0;
1318 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1319 reply_cm->mark = msg->mark_rule.return_mark;
1320 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1321 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301322 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1323 reply_cm->priority = msg->qos_rule.return_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001324 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1325 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301326 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1327 reply_cm->dscp = msg->dscp_rule.return_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001328 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1329 }
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301330
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301331 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1332 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1333 }
1334
Suruchi Suman23a279d2021-11-16 15:13:09 +05301335 /*
1336 * Setup UDP Socket if found to be valid for decap.
1337 */
1338 RCU_INIT_POINTER(reply_cm->up, NULL);
1339 net = dev_net(reply_cm->match_dev);
1340 src_if_idx = src_dev->ifindex;
1341
1342 rcu_read_lock();
1343
1344 /*
1345 * Look for the associated sock object.
1346 * __udp6_lib_lookup() holds a reference for this sock object,
1347 * which will be released in sfe_ipv6_flush_connection()
1348 */
1349#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1350 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1351 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1352 reply_cm->xlate_src_port, src_if_idx, &udp_table);
1353#else
1354 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1355 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1356 reply_cm->xlate_src_port, src_if_idx, 0, &udp_table, NULL);
1357#endif
1358 rcu_read_unlock();
1359
1360 /*
1361 * We set the UDP sock pointer as valid only for decap direction.
1362 */
1363 if (sk && udp_sk(sk)->encap_type) {
1364#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1365 if (!atomic_add_unless(&sk->sk_refcnt, 1, 0)) {
1366#else
1367 if (!refcount_inc_not_zero(&sk->sk_refcnt)) {
1368#endif
Wayne Tanbb7f1782021-12-13 11:16:04 -08001369 spin_unlock_bh(&si->lock);
Suruchi Suman23a279d2021-11-16 15:13:09 +05301370 kfree(reply_cm);
1371 kfree(original_cm);
1372 kfree(c);
1373
1374 DEBUG_INFO("sfe: unable to take reference for socket p:%d\n", tuple->protocol);
1375 DEBUG_INFO("SK: connection - \n"
1376 " s: %s:%pI6(%pI6):%u(%u)\n"
1377 " d: %s:%pI6(%pI6):%u(%u)\n",
1378 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1379 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1380 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1381 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1382
1383 dev_put(src_dev);
1384 dev_put(dest_dev);
1385
1386 return -ESHUTDOWN;
1387 }
1388
1389 rcu_assign_pointer(reply_cm->up, udp_sk(sk));
1390 DEBUG_INFO("Sock lookup success with reply_cm direction(%p)\n", sk);
1391 DEBUG_INFO("SK: connection - \n"
1392 " s: %s:%pI6(%pI6):%u(%u)\n"
1393 " d: %s:%pI6(%pI6):%u(%u)\n",
1394 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1395 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1396 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1397 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1398 }
1399
Wayne Tanbb7f1782021-12-13 11:16:04 -08001400 /*
1401 * Add VLAN rule to reply_cm
1402 */
1403 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1404 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1405 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1406 sfe_ipv6_match_entry_set_vlan(reply_cm,
1407 vlan_primary_rule->egress_vlan_tag,
1408 vlan_primary_rule->ingress_vlan_tag,
1409 vlan_secondary_rule->egress_vlan_tag,
1410 vlan_secondary_rule->ingress_vlan_tag);
1411
1412 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) &&
1413 reply_cm->egress_vlan_hdr_cnt > 0) {
1414 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1415 reply_cm->l2_hdr_size += reply_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1416 }
1417 }
1418
Xiaoping Fan978b3772015-05-27 14:15:18 -07001419#ifdef CONFIG_NF_FLOW_COOKIE
1420 reply_cm->flow_cookie = 0;
1421#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001422#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301423 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1424 reply_cm->flow_accel = msg->direction_rule.return_accel;
1425 } else {
1426 reply_cm->flow_accel = 1;
1427 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001428#endif
Xiaoping Fan978b3772015-05-27 14:15:18 -07001429 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301430 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1431 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1432 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1433 * are sending directly to the destination interface that supports it.
1434 */
Suruchi Sumanf2077182022-01-13 21:35:23 +05301435 if (likely(src_dev->features & NETIF_F_HW_CSUM) && !(netif_is_vxlan(src_dev) || netif_is_vxlan(dest_dev))) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301436 if ((msg->conn_rule.flow_top_interface_num == msg->conn_rule.flow_interface_num) ||
1437 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE)) {
1438 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
1439 }
1440 }
1441
1442 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001443 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301444 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001445 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301446 if (sfe_ipv6_xmit_eth_type_check(src_dev, reply_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301447
1448 /*
1449 * Check whether the rule has configured a specific source MAC address to use.
1450 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1451 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301452 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1453 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->conn_rule.return_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301454 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301455 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1456 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_FLOW_VALID)) {
1457 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.flow_src_mac);
1458 } else {
1459 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)src_dev->dev_addr);
1460 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301461 }
1462
1463 ether_addr_copy((u8 *)reply_cm->xmit_dest_mac, (u8 *)msg->conn_rule.flow_mac);
1464
Xiaoping Fan978b3772015-05-27 14:15:18 -07001465 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001466 reply_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001467
1468 /*
1469 * If our dev writes Ethernet headers then we can write a really fast
1470 * version.
1471 */
1472 if (src_dev->header_ops) {
1473 if (src_dev->header_ops->create == eth_header) {
1474 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1475 }
1476 }
1477 }
1478
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301479 /*
1480 * No support for NAT in ipv6
1481 */
Xiaoping Fan978b3772015-05-27 14:15:18 -07001482
Xiaoping Fan978b3772015-05-27 14:15:18 -07001483 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001484 * Initialize the protocol-specific information that we track.
1485 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301486 switch (tuple->protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001487 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301488 original_cm->protocol_state.tcp.win_scale = msg->tcp_rule.flow_window_scale;
1489 original_cm->protocol_state.tcp.max_win = msg->tcp_rule.flow_max_window ? msg->tcp_rule.flow_max_window : 1;
1490 original_cm->protocol_state.tcp.end = msg->tcp_rule.flow_end;
1491 original_cm->protocol_state.tcp.max_end = msg->tcp_rule.flow_max_end;
1492 reply_cm->protocol_state.tcp.win_scale = msg->tcp_rule.return_window_scale;
1493 reply_cm->protocol_state.tcp.max_win = msg->tcp_rule.return_max_window ? msg->tcp_rule.return_max_window : 1;
1494 reply_cm->protocol_state.tcp.end = msg->tcp_rule.return_end;
1495 reply_cm->protocol_state.tcp.max_end = msg->tcp_rule.return_max_end;
1496 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001497 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1498 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1499 }
1500 break;
1501 }
1502
Wayne Tanbb7f1782021-12-13 11:16:04 -08001503 /*
1504 * Fill in the ipv6_connection object.
1505 */
1506 c->protocol = tuple->protocol;
1507 c->src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1508 c->src_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1509 c->src_port = tuple->flow_ident;
1510 c->src_port_xlate = tuple->flow_ident;
1511 c->original_dev = src_dev;
1512 c->original_match = original_cm;
1513
1514 c->dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1515 c->dest_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1516 c->dest_port = tuple->return_ident;
1517 c->dest_port_xlate = tuple->return_ident;
1518
1519 c->reply_dev = dest_dev;
1520 c->reply_match = reply_cm;
1521 c->debug_read_seq = 0;
1522 c->last_sync_jiffies = get_jiffies_64();
1523 c->removed = false;
1524
Xiaoping Fan978b3772015-05-27 14:15:18 -07001525 sfe_ipv6_connection_match_compute_translations(original_cm);
1526 sfe_ipv6_connection_match_compute_translations(reply_cm);
1527 sfe_ipv6_insert_connection(si, c);
1528
1529 spin_unlock_bh(&si->lock);
1530
1531 /*
1532 * We have everything we need!
1533 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301534 DEBUG_INFO("new connection - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001535 " s: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n"
1536 " d: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301537 tuple->protocol,
1538 src_dev->name, msg->conn_rule.flow_mac, NULL,
1539 (void *)tuple->flow_ip, (void *)tuple->flow_ip, ntohs(tuple->flow_ident), ntohs(tuple->flow_ident),
1540 dest_dev->name, NULL, msg->conn_rule.return_mac,
1541 (void *)tuple->return_ip, (void *)tuple->return_ip, ntohs(tuple->return_ident), ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001542
1543 return 0;
1544}
1545
1546/*
1547 * sfe_ipv6_destroy_rule()
1548 * Destroy a forwarding rule.
1549 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301550void sfe_ipv6_destroy_rule(struct sfe_ipv6_rule_destroy_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001551{
1552 struct sfe_ipv6 *si = &__si6;
1553 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301554 bool ret;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301555 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001556
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301557 this_cpu_inc(si->stats_pcpu->connection_destroy_requests64);
1558
Xiaoping Fan978b3772015-05-27 14:15:18 -07001559 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001560
1561 /*
1562 * Check to see if we have a flow that matches the rule we're trying
1563 * to destroy. If there isn't then we can't destroy it.
1564 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301565 c = sfe_ipv6_find_connection(si, tuple->protocol, (struct sfe_ipv6_addr *)tuple->flow_ip, tuple->flow_ident,
1566 (struct sfe_ipv6_addr *)tuple->return_ip, tuple->return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001567 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001568 spin_unlock_bh(&si->lock);
1569
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301570 this_cpu_inc(si->stats_pcpu->connection_destroy_misses64);
1571
Xiaoping Fan978b3772015-05-27 14:15:18 -07001572 DEBUG_TRACE("connection does not exist - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301573 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1574 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001575 return;
1576 }
1577
1578 /*
1579 * Remove our connection details from the hash tables.
1580 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301581 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001582 spin_unlock_bh(&si->lock);
1583
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301584 if (ret) {
1585 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1586 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001587
1588 DEBUG_INFO("connection destroyed - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301589 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1590 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001591}
1592
1593/*
1594 * sfe_ipv6_register_sync_rule_callback()
1595 * Register a callback for rule synchronization.
1596 */
1597void sfe_ipv6_register_sync_rule_callback(sfe_sync_rule_callback_t sync_rule_callback)
1598{
1599 struct sfe_ipv6 *si = &__si6;
1600
1601 spin_lock_bh(&si->lock);
1602 rcu_assign_pointer(si->sync_rule_callback, sync_rule_callback);
1603 spin_unlock_bh(&si->lock);
1604}
1605
1606/*
1607 * sfe_ipv6_get_debug_dev()
1608 */
1609static ssize_t sfe_ipv6_get_debug_dev(struct device *dev,
1610 struct device_attribute *attr,
1611 char *buf)
1612{
1613 struct sfe_ipv6 *si = &__si6;
1614 ssize_t count;
1615 int num;
1616
1617 spin_lock_bh(&si->lock);
1618 num = si->debug_dev;
1619 spin_unlock_bh(&si->lock);
1620
1621 count = snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", num);
1622 return count;
1623}
1624
1625/*
1626 * sfe_ipv6_destroy_all_rules_for_dev()
1627 * Destroy all connections that match a particular device.
1628 *
1629 * If we pass dev as NULL then this destroys all connections.
1630 */
1631void sfe_ipv6_destroy_all_rules_for_dev(struct net_device *dev)
1632{
1633 struct sfe_ipv6 *si = &__si6;
1634 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301635 bool ret;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001636
Xiaoping Fan34586472015-07-03 02:20:35 -07001637another_round:
Xiaoping Fan978b3772015-05-27 14:15:18 -07001638 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001639
Xiaoping Fan34586472015-07-03 02:20:35 -07001640 for (c = si->all_connections_head; c; c = c->all_connections_next) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001641 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07001642 * Does this connection relate to the device we are destroying?
Xiaoping Fan978b3772015-05-27 14:15:18 -07001643 */
1644 if (!dev
1645 || (dev == c->original_dev)
1646 || (dev == c->reply_dev)) {
Xiaoping Fan34586472015-07-03 02:20:35 -07001647 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001648 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001649 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001650
Xiaoping Fan34586472015-07-03 02:20:35 -07001651 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301652 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001653 }
1654
1655 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001656
1657 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301658 if (ret) {
1659 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1660 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001661 goto another_round;
1662 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001663}
1664
1665/*
1666 * sfe_ipv6_periodic_sync()
1667 */
Ken Zhu137722d2021-09-23 17:57:36 -07001668static void sfe_ipv6_periodic_sync(struct work_struct *work)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001669{
Ken Zhu137722d2021-09-23 17:57:36 -07001670 struct sfe_ipv6 *si = container_of((struct delayed_work *)work, struct sfe_ipv6, sync_dwork);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001671 u64 now_jiffies;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001672 int quota;
1673 sfe_sync_rule_callback_t sync_rule_callback;
Ken Zhu32b95392021-09-03 13:52:04 -07001674 struct sfe_ipv6_connection *c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001675
1676 now_jiffies = get_jiffies_64();
1677
1678 rcu_read_lock();
1679 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
1680 if (!sync_rule_callback) {
1681 rcu_read_unlock();
1682 goto done;
1683 }
1684
1685 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001686
1687 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001688 * If we have reached the end of the connection list, walk from
1689 * the connection head.
1690 */
1691 c = si->wc_next;
1692 if (unlikely(!c)) {
1693 c = si->all_connections_head;
1694 }
1695 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001696 * Get an estimate of the number of connections to parse in this sync.
1697 */
1698 quota = (si->num_connections + 63) / 64;
1699
1700 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001701 * Walk the "all connection" list and sync the connection state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001702 */
Ken Zhu32b95392021-09-03 13:52:04 -07001703 while (likely(c && quota)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001704 struct sfe_ipv6_connection_match *cm;
1705 struct sfe_ipv6_connection_match *counter_cm;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001706 struct sfe_connection_sync sis;
1707
Ken Zhu32b95392021-09-03 13:52:04 -07001708 cm = c->original_match;
1709 counter_cm = c->reply_match;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001710
1711 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001712 * Didn't receive packets in the origial direction or reply
1713 * direction, move to the next connection.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001714 */
Ken Zhu32b95392021-09-03 13:52:04 -07001715 if (!atomic_read(&cm->rx_packet_count) && !atomic_read(&counter_cm->rx_packet_count)) {
1716 c = c->all_connections_next;
1717 continue;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001718 }
1719
Ken Zhu32b95392021-09-03 13:52:04 -07001720 quota--;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001721
1722 /*
1723 * Sync the connection state.
1724 */
Xiaoping Fan99cb4c12015-08-21 19:07:32 -07001725 sfe_ipv6_gen_sync_connection(si, c, &sis, SFE_SYNC_REASON_STATS, now_jiffies);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001726
Ken Zhu32b95392021-09-03 13:52:04 -07001727 si->wc_next = c->all_connections_next;
1728
Xiaoping Fan978b3772015-05-27 14:15:18 -07001729 spin_unlock_bh(&si->lock);
1730 sync_rule_callback(&sis);
1731 spin_lock_bh(&si->lock);
Ken Zhu32b95392021-09-03 13:52:04 -07001732
1733 /*
1734 * c must be set and used in the same lock/unlock window;
1735 * because c could be removed when we don't hold the lock,
1736 * so delay grabbing until after the callback and relock.
1737 */
1738 c = si->wc_next;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001739 }
1740
Ken Zhu32b95392021-09-03 13:52:04 -07001741 /*
1742 * At the end of loop, put wc_next to the connection we left
1743 */
1744 si->wc_next = c;
1745
Xiaoping Fan978b3772015-05-27 14:15:18 -07001746 spin_unlock_bh(&si->lock);
1747 rcu_read_unlock();
1748
1749done:
Ken Zhu137722d2021-09-23 17:57:36 -07001750 schedule_delayed_work_on(si->work_cpu, (struct delayed_work *)work, ((HZ + 99) / 100));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001751}
1752
1753/*
1754 * sfe_ipv6_debug_dev_read_start()
1755 * Generate part of the XML output.
1756 */
1757static bool sfe_ipv6_debug_dev_read_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1758 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1759{
1760 int bytes_read;
1761
Xiaoping Fan34586472015-07-03 02:20:35 -07001762 si->debug_read_seq++;
1763
Xiaoping Fan978b3772015-05-27 14:15:18 -07001764 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "<sfe_ipv6>\n");
1765 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1766 return false;
1767 }
1768
1769 *length -= bytes_read;
1770 *total_read += bytes_read;
1771
1772 ws->state++;
1773 return true;
1774}
1775
1776/*
1777 * sfe_ipv6_debug_dev_read_connections_start()
1778 * Generate part of the XML output.
1779 */
1780static bool sfe_ipv6_debug_dev_read_connections_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1781 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1782{
1783 int bytes_read;
1784
1785 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<connections>\n");
1786 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1787 return false;
1788 }
1789
1790 *length -= bytes_read;
1791 *total_read += bytes_read;
1792
1793 ws->state++;
1794 return true;
1795}
1796
1797/*
1798 * sfe_ipv6_debug_dev_read_connections_connection()
1799 * Generate part of the XML output.
1800 */
1801static bool sfe_ipv6_debug_dev_read_connections_connection(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1802 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1803{
1804 struct sfe_ipv6_connection *c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001805 struct sfe_ipv6_connection_match *original_cm;
1806 struct sfe_ipv6_connection_match *reply_cm;
1807 int bytes_read;
1808 int protocol;
1809 struct net_device *src_dev;
1810 struct sfe_ipv6_addr src_ip;
1811 struct sfe_ipv6_addr src_ip_xlate;
1812 __be16 src_port;
1813 __be16 src_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001814 u64 src_rx_packets;
1815 u64 src_rx_bytes;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001816 struct net_device *dest_dev;
1817 struct sfe_ipv6_addr dest_ip;
1818 struct sfe_ipv6_addr dest_ip_xlate;
1819 __be16 dest_port;
1820 __be16 dest_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001821 u64 dest_rx_packets;
1822 u64 dest_rx_bytes;
1823 u64 last_sync_jiffies;
Ken Zhu37040ea2021-09-09 21:11:15 -07001824 u32 src_mark, dest_mark, src_priority, dest_priority, src_dscp, dest_dscp;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301825 u32 packet, byte, original_cm_flags;
1826 u16 pppoe_session_id;
1827 u8 pppoe_remote_mac[ETH_ALEN];
Xiaoping Fan978b3772015-05-27 14:15:18 -07001828#ifdef CONFIG_NF_FLOW_COOKIE
1829 int src_flow_cookie, dst_flow_cookie;
1830#endif
1831
1832 spin_lock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001833
1834 for (c = si->all_connections_head; c; c = c->all_connections_next) {
1835 if (c->debug_read_seq < si->debug_read_seq) {
1836 c->debug_read_seq = si->debug_read_seq;
1837 break;
1838 }
1839 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001840
1841 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07001842 * If there were no connections then move to the next state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001843 */
1844 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001845 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001846 ws->state++;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001847 return true;
1848 }
1849
1850 original_cm = c->original_match;
1851 reply_cm = c->reply_match;
1852
1853 protocol = c->protocol;
1854 src_dev = c->original_dev;
1855 src_ip = c->src_ip[0];
1856 src_ip_xlate = c->src_ip_xlate[0];
1857 src_port = c->src_port;
1858 src_port_xlate = c->src_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001859 src_priority = original_cm->priority;
1860 src_dscp = original_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001861
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301862 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet, &byte);
1863 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet, &byte);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001864
1865 src_rx_packets = original_cm->rx_packet_count64;
1866 src_rx_bytes = original_cm->rx_byte_count64;
Ken Zhu37040ea2021-09-09 21:11:15 -07001867 src_mark = original_cm->mark;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001868 dest_dev = c->reply_dev;
1869 dest_ip = c->dest_ip[0];
1870 dest_ip_xlate = c->dest_ip_xlate[0];
1871 dest_port = c->dest_port;
1872 dest_port_xlate = c->dest_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001873 dest_priority = reply_cm->priority;
1874 dest_dscp = reply_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001875 dest_rx_packets = reply_cm->rx_packet_count64;
1876 dest_rx_bytes = reply_cm->rx_byte_count64;
1877 last_sync_jiffies = get_jiffies_64() - c->last_sync_jiffies;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301878 original_cm_flags = original_cm->flags;
1879 pppoe_session_id = original_cm->pppoe_session_id;
1880 ether_addr_copy(pppoe_remote_mac, original_cm->pppoe_remote_mac);
Ken Zhu37040ea2021-09-09 21:11:15 -07001881 dest_mark = reply_cm->mark;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001882#ifdef CONFIG_NF_FLOW_COOKIE
1883 src_flow_cookie = original_cm->flow_cookie;
1884 dst_flow_cookie = reply_cm->flow_cookie;
1885#endif
1886 spin_unlock_bh(&si->lock);
1887
1888 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t\t<connection "
1889 "protocol=\"%u\" "
1890 "src_dev=\"%s\" "
1891 "src_ip=\"%pI6\" src_ip_xlate=\"%pI6\" "
1892 "src_port=\"%u\" src_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07001893 "src_priority=\"%u\" src_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07001894 "src_rx_pkts=\"%llu\" src_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07001895 "src_mark=\"%08x\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07001896 "dest_dev=\"%s\" "
1897 "dest_ip=\"%pI6\" dest_ip_xlate=\"%pI6\" "
1898 "dest_port=\"%u\" dest_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07001899 "dest_priority=\"%u\" dest_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07001900 "dest_rx_pkts=\"%llu\" dest_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07001901 "dest_mark=\"%08x\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07001902#ifdef CONFIG_NF_FLOW_COOKIE
1903 "src_flow_cookie=\"%d\" dst_flow_cookie=\"%d\" "
1904#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07001905 "last_sync=\"%llu\" ",
Xiaoping Fan978b3772015-05-27 14:15:18 -07001906 protocol,
1907 src_dev->name,
1908 &src_ip, &src_ip_xlate,
1909 ntohs(src_port), ntohs(src_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07001910 src_priority, src_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07001911 src_rx_packets, src_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07001912 src_mark,
Xiaoping Fan978b3772015-05-27 14:15:18 -07001913 dest_dev->name,
1914 &dest_ip, &dest_ip_xlate,
1915 ntohs(dest_port), ntohs(dest_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07001916 dest_priority, dest_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07001917 dest_rx_packets, dest_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07001918 dest_mark,
Xiaoping Fan978b3772015-05-27 14:15:18 -07001919#ifdef CONFIG_NF_FLOW_COOKIE
1920 src_flow_cookie, dst_flow_cookie,
1921#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07001922 last_sync_jiffies);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001923
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301924 if (original_cm_flags &= (SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP | SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301925 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "pppoe_session_id=\"%u\" pppoe_server_MAC=\"%pM\" ",
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301926 pppoe_session_id, pppoe_remote_mac);
1927 }
1928
1929 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, ")/>\n");
1930
Xiaoping Fan978b3772015-05-27 14:15:18 -07001931 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1932 return false;
1933 }
1934
1935 *length -= bytes_read;
1936 *total_read += bytes_read;
1937
Xiaoping Fan978b3772015-05-27 14:15:18 -07001938 return true;
1939}
1940
1941/*
1942 * sfe_ipv6_debug_dev_read_connections_end()
1943 * Generate part of the XML output.
1944 */
1945static bool sfe_ipv6_debug_dev_read_connections_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1946 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1947{
1948 int bytes_read;
1949
1950 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</connections>\n");
1951 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1952 return false;
1953 }
1954
1955 *length -= bytes_read;
1956 *total_read += bytes_read;
1957
1958 ws->state++;
1959 return true;
1960}
1961
1962/*
1963 * sfe_ipv6_debug_dev_read_exceptions_start()
1964 * Generate part of the XML output.
1965 */
1966static bool sfe_ipv6_debug_dev_read_exceptions_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1967 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1968{
1969 int bytes_read;
1970
1971 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<exceptions>\n");
1972 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1973 return false;
1974 }
1975
1976 *length -= bytes_read;
1977 *total_read += bytes_read;
1978
1979 ws->state++;
1980 return true;
1981}
1982
1983/*
1984 * sfe_ipv6_debug_dev_read_exceptions_exception()
1985 * Generate part of the XML output.
1986 */
1987static bool sfe_ipv6_debug_dev_read_exceptions_exception(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1988 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1989{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301990 int i;
1991 u64 val = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001992
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301993 for_each_possible_cpu(i) {
1994 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
1995 val += s->exception_events64[ws->iter_exception];
1996 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001997
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301998 if (val) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001999 int bytes_read;
2000
2001 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE,
2002 "\t\t<exception name=\"%s\" count=\"%llu\" />\n",
2003 sfe_ipv6_exception_events_string[ws->iter_exception],
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302004 val);
2005
Xiaoping Fan978b3772015-05-27 14:15:18 -07002006 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2007 return false;
2008 }
2009
2010 *length -= bytes_read;
2011 *total_read += bytes_read;
2012 }
2013
2014 ws->iter_exception++;
2015 if (ws->iter_exception >= SFE_IPV6_EXCEPTION_EVENT_LAST) {
2016 ws->iter_exception = 0;
2017 ws->state++;
2018 }
2019
2020 return true;
2021}
2022
2023/*
2024 * sfe_ipv6_debug_dev_read_exceptions_end()
2025 * Generate part of the XML output.
2026 */
2027static bool sfe_ipv6_debug_dev_read_exceptions_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2028 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2029{
2030 int bytes_read;
2031
2032 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</exceptions>\n");
2033 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2034 return false;
2035 }
2036
2037 *length -= bytes_read;
2038 *total_read += bytes_read;
2039
2040 ws->state++;
2041 return true;
2042}
2043
2044/*
2045 * sfe_ipv6_debug_dev_read_stats()
2046 * Generate part of the XML output.
2047 */
2048static bool sfe_ipv6_debug_dev_read_stats(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2049 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2050{
2051 int bytes_read;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302052 struct sfe_ipv6_stats stats;
2053 unsigned int num_conn;
2054
2055 sfe_ipv6_update_summary_stats(si, &stats);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002056
2057 spin_lock_bh(&si->lock);
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302058 num_conn = si->num_connections;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002059 spin_unlock_bh(&si->lock);
2060
2061 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<stats "
2062 "num_connections=\"%u\" "
Suruchi Suman23a279d2021-11-16 15:13:09 +05302063 "pkts_dropped=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002064 "pkts_forwarded=\"%llu\" pkts_not_forwarded=\"%llu\" "
2065 "create_requests=\"%llu\" create_collisions=\"%llu\" "
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302066 "create_failures=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002067 "destroy_requests=\"%llu\" destroy_misses=\"%llu\" "
2068 "flushes=\"%llu\" "
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302069 "hash_hits=\"%llu\" hash_reorders=\"%llu\" "
2070 "pppoe_encap_pkts_fwded=\"%llu\" "
2071 "pppoe_decap_pkts_fwded=\"%llu\" />\n",
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302072
2073 num_conn,
Suruchi Suman23a279d2021-11-16 15:13:09 +05302074 stats.packets_dropped64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302075 stats.packets_forwarded64,
2076 stats.packets_not_forwarded64,
2077 stats.connection_create_requests64,
2078 stats.connection_create_collisions64,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302079 stats.connection_create_failures64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302080 stats.connection_destroy_requests64,
2081 stats.connection_destroy_misses64,
2082 stats.connection_flushes64,
2083 stats.connection_match_hash_hits64,
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302084 stats.connection_match_hash_reorders64,
2085 stats.pppoe_encap_packets_forwarded64,
2086 stats.pppoe_decap_packets_forwarded64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002087 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2088 return false;
2089 }
2090
2091 *length -= bytes_read;
2092 *total_read += bytes_read;
2093
2094 ws->state++;
2095 return true;
2096}
2097
2098/*
2099 * sfe_ipv6_debug_dev_read_end()
2100 * Generate part of the XML output.
2101 */
2102static bool sfe_ipv6_debug_dev_read_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2103 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2104{
2105 int bytes_read;
2106
2107 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "</sfe_ipv6>\n");
2108 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2109 return false;
2110 }
2111
2112 *length -= bytes_read;
2113 *total_read += bytes_read;
2114
2115 ws->state++;
2116 return true;
2117}
2118
2119/*
2120 * Array of write functions that write various XML elements that correspond to
2121 * our XML output state machine.
2122 */
2123static sfe_ipv6_debug_xml_write_method_t sfe_ipv6_debug_xml_write_methods[SFE_IPV6_DEBUG_XML_STATE_DONE] = {
2124 sfe_ipv6_debug_dev_read_start,
2125 sfe_ipv6_debug_dev_read_connections_start,
2126 sfe_ipv6_debug_dev_read_connections_connection,
2127 sfe_ipv6_debug_dev_read_connections_end,
2128 sfe_ipv6_debug_dev_read_exceptions_start,
2129 sfe_ipv6_debug_dev_read_exceptions_exception,
2130 sfe_ipv6_debug_dev_read_exceptions_end,
2131 sfe_ipv6_debug_dev_read_stats,
2132 sfe_ipv6_debug_dev_read_end,
2133};
2134
2135/*
2136 * sfe_ipv6_debug_dev_read()
2137 * Send info to userspace upon read request from user
2138 */
2139static ssize_t sfe_ipv6_debug_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
2140{
2141 char msg[CHAR_DEV_MSG_SIZE];
2142 int total_read = 0;
2143 struct sfe_ipv6_debug_xml_write_state *ws;
2144 struct sfe_ipv6 *si = &__si6;
2145
2146 ws = (struct sfe_ipv6_debug_xml_write_state *)filp->private_data;
2147 while ((ws->state != SFE_IPV6_DEBUG_XML_STATE_DONE) && (length > CHAR_DEV_MSG_SIZE)) {
2148 if ((sfe_ipv6_debug_xml_write_methods[ws->state])(si, buffer, msg, &length, &total_read, ws)) {
2149 continue;
2150 }
2151 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002152 return total_read;
2153}
2154
2155/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07002156 * sfe_ipv6_debug_dev_open()
2157 */
2158static int sfe_ipv6_debug_dev_open(struct inode *inode, struct file *file)
2159{
2160 struct sfe_ipv6_debug_xml_write_state *ws;
2161
2162 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
2163 if (ws) {
2164 return 0;
2165 }
2166
2167 ws = kzalloc(sizeof(struct sfe_ipv6_debug_xml_write_state), GFP_KERNEL);
2168 if (!ws) {
2169 return -ENOMEM;
2170 }
2171
2172 ws->state = SFE_IPV6_DEBUG_XML_STATE_START;
2173 file->private_data = ws;
2174
2175 return 0;
2176}
2177
2178/*
2179 * sfe_ipv6_debug_dev_release()
2180 */
2181static int sfe_ipv6_debug_dev_release(struct inode *inode, struct file *file)
2182{
2183 struct sfe_ipv6_debug_xml_write_state *ws;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002184
2185 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
Xiaoping Fan34586472015-07-03 02:20:35 -07002186 if (ws) {
2187 /*
2188 * We've finished with our output so free the write state.
2189 */
2190 kfree(ws);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302191 file->private_data = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002192 }
2193
Xiaoping Fan978b3772015-05-27 14:15:18 -07002194 return 0;
2195}
2196
2197/*
2198 * File operations used in the debug char device
2199 */
2200static struct file_operations sfe_ipv6_debug_dev_fops = {
2201 .read = sfe_ipv6_debug_dev_read,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002202 .open = sfe_ipv6_debug_dev_open,
2203 .release = sfe_ipv6_debug_dev_release
2204};
2205
2206#ifdef CONFIG_NF_FLOW_COOKIE
2207/*
2208 * sfe_ipv6_register_flow_cookie_cb
2209 * register a function in SFE to let SFE use this function to configure flow cookie for a flow
2210 *
2211 * Hardware driver which support flow cookie should register a callback function in SFE. Then SFE
2212 * can use this function to configure flow cookie for a flow.
2213 * return: 0, success; !=0, fail
2214 */
2215int sfe_ipv6_register_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2216{
2217 struct sfe_ipv6 *si = &__si6;
2218
2219 BUG_ON(!cb);
2220
2221 if (si->flow_cookie_set_func) {
2222 return -1;
2223 }
2224
2225 rcu_assign_pointer(si->flow_cookie_set_func, cb);
2226 return 0;
2227}
2228
2229/*
2230 * sfe_ipv6_unregister_flow_cookie_cb
2231 * unregister function which is used to configure flow cookie for a flow
2232 *
2233 * return: 0, success; !=0, fail
2234 */
2235int sfe_ipv6_unregister_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2236{
2237 struct sfe_ipv6 *si = &__si6;
2238
2239 RCU_INIT_POINTER(si->flow_cookie_set_func, NULL);
2240 return 0;
2241}
Xiaoping Fan640faf42015-08-28 15:50:55 -07002242
2243/*
2244 * sfe_ipv6_get_flow_cookie()
2245 */
2246static ssize_t sfe_ipv6_get_flow_cookie(struct device *dev,
2247 struct device_attribute *attr,
2248 char *buf)
2249{
2250 struct sfe_ipv6 *si = &__si6;
Xiaoping Fan01c67cc2015-11-09 11:31:57 -08002251 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->flow_cookie_enable);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002252}
2253
2254/*
2255 * sfe_ipv6_set_flow_cookie()
2256 */
2257static ssize_t sfe_ipv6_set_flow_cookie(struct device *dev,
2258 struct device_attribute *attr,
2259 const char *buf, size_t size)
2260{
2261 struct sfe_ipv6 *si = &__si6;
Ken Zhu137722d2021-09-23 17:57:36 -07002262 si->flow_cookie_enable = strict_strtol(buf, NULL, 0);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002263
2264 return size;
2265}
2266
2267/*
2268 * sysfs attributes.
2269 */
2270static const struct device_attribute sfe_ipv6_flow_cookie_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -08002271 __ATTR(flow_cookie_enable, S_IWUSR | S_IRUGO, sfe_ipv6_get_flow_cookie, sfe_ipv6_set_flow_cookie);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002272#endif /*CONFIG_NF_FLOW_COOKIE*/
2273
Ken Zhu137722d2021-09-23 17:57:36 -07002274/*
2275 * sfe_ipv6_get_cpu()
2276 */
2277static ssize_t sfe_ipv6_get_cpu(struct device *dev,
2278 struct device_attribute *attr,
2279 char *buf)
2280{
2281 struct sfe_ipv6 *si = &__si6;
2282 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->work_cpu);
2283}
2284
2285/*
Wayne Tanbb7f1782021-12-13 11:16:04 -08002286 * sfe_ipv6_set_cpu()
Ken Zhu137722d2021-09-23 17:57:36 -07002287 */
2288static ssize_t sfe_ipv6_set_cpu(struct device *dev,
2289 struct device_attribute *attr,
2290 const char *buf, size_t size)
2291{
2292 struct sfe_ipv6 *si = &__si6;
2293 int work_cpu;
2294
2295 work_cpu = simple_strtol(buf, NULL, 0);
2296 if ((work_cpu >= 0) && (work_cpu <= NR_CPUS)) {
2297 si->work_cpu = work_cpu;
2298 } else {
2299 dev_err(dev, "%s is not in valid range[0,%d]", buf, NR_CPUS);
2300 }
2301
2302 return size;
2303}
2304/*
2305 * sysfs attributes.
2306 */
2307static const struct device_attribute sfe_ipv6_cpu_attr =
2308 __ATTR(stat_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv6_get_cpu, sfe_ipv6_set_cpu);
2309
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302310 /*
2311 * sfe_ipv6_hash_init()
2312 * Initialize conn match hash lists
2313 */
2314static void sfe_ipv6_conn_match_hash_init(struct sfe_ipv6 *si, int len)
2315{
2316 struct hlist_head *hash_list = si->hlist_conn_match_hash_head;
2317 int i;
2318
2319 for (i = 0; i < len; i++) {
2320 INIT_HLIST_HEAD(&hash_list[i]);
2321 }
2322}
2323
Suruchi Suman23a279d2021-11-16 15:13:09 +05302324#ifdef SFE_PROCESS_LOCAL_OUT
2325/*
2326 * sfe_ipv6_local_out()
2327 * Called for packets from ip_local_out() - post encapsulation & other packets
2328 */
2329static unsigned int sfe_ipv6_local_out(void *priv,
2330 struct sk_buff *skb,
2331 const struct nf_hook_state *nhs)
2332{
2333 DEBUG_TRACE("sfe: sfe_ipv6_local_out hook called.\n");
2334
2335 if (likely(skb->skb_iif)) {
2336 return sfe_ipv6_recv(skb->dev, skb, NULL, true) ? NF_STOLEN : NF_ACCEPT;
2337 }
2338
2339 return NF_ACCEPT;
2340}
2341
2342/*
2343 * struct nf_hook_ops sfe_ipv6_ops_local_out[]
2344 * Hooks into netfilter local out packet monitoring points.
2345 */
2346static struct nf_hook_ops sfe_ipv6_ops_local_out[] __read_mostly = {
2347
2348 /*
2349 * Local out routing hook is used to monitor packets.
2350 */
2351 {
2352 .hook = sfe_ipv6_local_out,
2353 .pf = PF_INET6,
2354 .hooknum = NF_INET_LOCAL_OUT,
2355 .priority = NF_IP6_PRI_FIRST,
2356 },
2357};
2358#endif
2359
Xiaoping Fan978b3772015-05-27 14:15:18 -07002360/*
2361 * sfe_ipv6_init()
2362 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302363int sfe_ipv6_init(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002364{
2365 struct sfe_ipv6 *si = &__si6;
2366 int result = -1;
2367
2368 DEBUG_INFO("SFE IPv6 init\n");
2369
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302370 sfe_ipv6_conn_match_hash_init(si, ARRAY_SIZE(si->hlist_conn_match_hash_head));
2371
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302372 si->stats_pcpu = alloc_percpu_gfp(struct sfe_ipv6_stats, GFP_KERNEL | __GFP_ZERO);
2373 if (!si->stats_pcpu) {
2374 DEBUG_ERROR("failed to allocate stats memory for sfe_ipv6\n");
2375 goto exit0;
2376 }
2377
Xiaoping Fan978b3772015-05-27 14:15:18 -07002378 /*
2379 * Create sys/sfe_ipv6
2380 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302381 si->sys_ipv6 = kobject_create_and_add("sfe_ipv6", NULL);
2382 if (!si->sys_ipv6) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002383 DEBUG_ERROR("failed to register sfe_ipv6\n");
2384 goto exit1;
2385 }
2386
2387 /*
2388 * Create files, one for each parameter supported by this module.
2389 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302390 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002391 if (result) {
2392 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
2393 goto exit2;
2394 }
2395
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302396 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002397 if (result) {
2398 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
2399 goto exit3;
2400 }
2401
Xiaoping Fan640faf42015-08-28 15:50:55 -07002402#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302403 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002404 if (result) {
2405 DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
Ken Zhu137722d2021-09-23 17:57:36 -07002406 goto exit4;
Xiaoping Fan640faf42015-08-28 15:50:55 -07002407 }
2408#endif /* CONFIG_NF_FLOW_COOKIE */
2409
Suruchi Suman23a279d2021-11-16 15:13:09 +05302410#ifdef SFE_PROCESS_LOCAL_OUT
2411#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2412 result = nf_register_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2413#else
2414 result = nf_register_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2415#endif
2416#endif
2417 if (result < 0) {
2418 DEBUG_ERROR("can't register nf local out hook: %d\n", result);
2419 goto exit5;
2420 } else {
2421 DEBUG_ERROR("Register nf local out hook success: %d\n", result);
2422 }
2423
Xiaoping Fan978b3772015-05-27 14:15:18 -07002424 /*
2425 * Register our debug char device.
2426 */
2427 result = register_chrdev(0, "sfe_ipv6", &sfe_ipv6_debug_dev_fops);
2428 if (result < 0) {
2429 DEBUG_ERROR("Failed to register chrdev: %d\n", result);
Suruchi Suman23a279d2021-11-16 15:13:09 +05302430 goto exit6;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002431 }
2432
2433 si->debug_dev = result;
Ken Zhu137722d2021-09-23 17:57:36 -07002434 si->work_cpu = WORK_CPU_UNBOUND;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002435
2436 /*
Ken Zhu137722d2021-09-23 17:57:36 -07002437 * Create work to handle periodic statistics.
Xiaoping Fan978b3772015-05-27 14:15:18 -07002438 */
Ken Zhu137722d2021-09-23 17:57:36 -07002439 INIT_DELAYED_WORK(&(si->sync_dwork), sfe_ipv6_periodic_sync);
2440 schedule_delayed_work_on(si->work_cpu, &(si->sync_dwork), ((HZ + 99) / 100));
Xiaoping Fan978b3772015-05-27 14:15:18 -07002441 spin_lock_init(&si->lock);
2442
2443 return 0;
2444
Suruchi Suman23a279d2021-11-16 15:13:09 +05302445exit6:
2446#ifdef SFE_PROCESS_LOCAL_OUT
2447#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2448 DEBUG_TRACE("sfe: Unregister local out hook\n");
2449 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2450#else
2451 DEBUG_TRACE("sfe: Unregister local out hook\n");
2452 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2453#endif
2454#endif
2455
Ken Zhu137722d2021-09-23 17:57:36 -07002456exit5:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002457#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302458 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002459
Ken Zhu137722d2021-09-23 17:57:36 -07002460exit4:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002461#endif /* CONFIG_NF_FLOW_COOKIE */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302462 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Suruchi Suman23a279d2021-11-16 15:13:09 +05302463
Ken Zhu137722d2021-09-23 17:57:36 -07002464exit3:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302465 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002466
2467exit2:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302468 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002469
2470exit1:
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302471 free_percpu(si->stats_pcpu);
2472
2473exit0:
Xiaoping Fan978b3772015-05-27 14:15:18 -07002474 return result;
2475}
2476
2477/*
2478 * sfe_ipv6_exit()
2479 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302480void sfe_ipv6_exit(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002481{
2482 struct sfe_ipv6 *si = &__si6;
2483
2484 DEBUG_INFO("SFE IPv6 exit\n");
2485
2486 /*
2487 * Destroy all connections.
2488 */
2489 sfe_ipv6_destroy_all_rules_for_dev(NULL);
2490
Ken Zhu137722d2021-09-23 17:57:36 -07002491 cancel_delayed_work(&si->sync_dwork);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002492
2493 unregister_chrdev(si->debug_dev, "sfe_ipv6");
2494
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302495 free_percpu(si->stats_pcpu);
2496
Suruchi Suman23a279d2021-11-16 15:13:09 +05302497#ifdef SFE_PROCESS_LOCAL_OUT
2498#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2499 DEBUG_TRACE("sfe: Unregister local out hook\n");
2500 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2501#else
2502 DEBUG_TRACE("sfe: Unregister local out hook\n");
2503 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2504#endif
2505#endif
2506
Xiaoping Fan640faf42015-08-28 15:50:55 -07002507#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302508 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002509#endif /* CONFIG_NF_FLOW_COOKIE */
Ken Zhu137722d2021-09-23 17:57:36 -07002510
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302511 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002512
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302513 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002514
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302515 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002516}
2517
Xiaoping Fan978b3772015-05-27 14:15:18 -07002518#ifdef CONFIG_NF_FLOW_COOKIE
2519EXPORT_SYMBOL(sfe_ipv6_register_flow_cookie_cb);
2520EXPORT_SYMBOL(sfe_ipv6_unregister_flow_cookie_cb);
2521#endif