blob: 5d1cbf8d5fd0a7364d7252721ba5fe01afe49410 [file] [log] [blame]
Xiaoping Fan978b3772015-05-27 14:15:18 -07001/*
2 * sfe_ipv6.c
3 * Shortcut forwarding engine - IPv6 support.
4 *
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05305 * Copyright (c) 2015-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
Xiaoping Fana42c68b2015-08-07 18:00:39 -070012 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053017 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Xiaoping Fan978b3772015-05-27 14:15:18 -070019 */
20
21#include <linux/module.h>
22#include <linux/sysfs.h>
23#include <linux/skbuff.h>
24#include <linux/icmp.h>
25#include <net/tcp.h>
26#include <linux/etherdevice.h>
Tian Yang45f39c82020-10-06 14:07:47 -070027#include <linux/version.h>
Suruchi Suman23a279d2021-11-16 15:13:09 +053028#include <net/udp.h>
29#include <net/vxlan.h>
30#include <linux/refcount.h>
31#include <linux/netfilter.h>
32#include <linux/inetdevice.h>
33#include <linux/netfilter_ipv6.h>
Tian Yangafb03452022-01-13 18:53:13 -080034#include <net/protocol.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053035#include <net/addrconf.h>
36#include <net/gre.h>
37
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053038#include "sfe_debug.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053039#include "sfe_api.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070040#include "sfe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053041#include "sfe_flow_cookie.h"
42#include "sfe_ipv6.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053043#include "sfe_ipv6_udp.h"
44#include "sfe_ipv6_tcp.h"
45#include "sfe_ipv6_icmp.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080046#include "sfe_pppoe.h"
Tian Yangafb03452022-01-13 18:53:13 -080047#include "sfe_ipv6_tunipip6.h"
Nitin Shettye6ed5b52021-12-27 14:50:11 +053048#include "sfe_ipv6_gre.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070049
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053050#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
51
Xiaoping Fan978b3772015-05-27 14:15:18 -070052static char *sfe_ipv6_exception_events_string[SFE_IPV6_EXCEPTION_EVENT_LAST] = {
53 "UDP_HEADER_INCOMPLETE",
54 "UDP_NO_CONNECTION",
55 "UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
56 "UDP_SMALL_TTL",
57 "UDP_NEEDS_FRAGMENTATION",
58 "TCP_HEADER_INCOMPLETE",
59 "TCP_NO_CONNECTION_SLOW_FLAGS",
60 "TCP_NO_CONNECTION_FAST_FLAGS",
61 "TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
62 "TCP_SMALL_TTL",
63 "TCP_NEEDS_FRAGMENTATION",
64 "TCP_FLAGS",
65 "TCP_SEQ_EXCEEDS_RIGHT_EDGE",
66 "TCP_SMALL_DATA_OFFS",
67 "TCP_BAD_SACK",
68 "TCP_BIG_DATA_OFFS",
69 "TCP_SEQ_BEFORE_LEFT_EDGE",
70 "TCP_ACK_EXCEEDS_RIGHT_EDGE",
71 "TCP_ACK_BEFORE_LEFT_EDGE",
72 "ICMP_HEADER_INCOMPLETE",
73 "ICMP_UNHANDLED_TYPE",
74 "ICMP_IPV6_HEADER_INCOMPLETE",
75 "ICMP_IPV6_NON_V6",
76 "ICMP_IPV6_IP_OPTIONS_INCOMPLETE",
77 "ICMP_IPV6_UDP_HEADER_INCOMPLETE",
78 "ICMP_IPV6_TCP_HEADER_INCOMPLETE",
79 "ICMP_IPV6_UNHANDLED_PROTOCOL",
80 "ICMP_NO_CONNECTION",
81 "ICMP_FLUSHED_CONNECTION",
82 "HEADER_INCOMPLETE",
83 "BAD_TOTAL_LENGTH",
84 "NON_V6",
85 "NON_INITIAL_FRAGMENT",
86 "DATAGRAM_INCOMPLETE",
87 "IP_OPTIONS_INCOMPLETE",
88 "UNHANDLED_PROTOCOL",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053089 "FLOW_COOKIE_ADD_FAIL",
90 "INVALID_SOURCE_INTERFACE",
Tian Yangafb03452022-01-13 18:53:13 -080091 "TUNIPIP6_HEADER_INCOMPLETE",
92 "TUNIPIP6_NO_CONNECTION",
93 "TUNIPIP6_IP_OPTIONS_OR_INITIAL_FRAGMENT",
94 "TUNIPIP6_SMALL_TTL",
95 "TUNIPIP6_NEEDS_FRAGMENTATION",
96 "TUNIPIP6_SYNC_ON_FIND"
Nitin Shettye6ed5b52021-12-27 14:50:11 +053097 "GRE_HEADER_INCOMPLETE",
98 "GRE_NO_CONNECTION",
99 "GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
100 "GRE_SMALL_TTL",
101 "GRE_NEEDS_FRAGMENTATION"
Xiaoping Fan978b3772015-05-27 14:15:18 -0700102};
103
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700104static struct sfe_ipv6 __si6;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700105
106/*
107 * sfe_ipv6_get_debug_dev()
108 */
109static ssize_t sfe_ipv6_get_debug_dev(struct device *dev, struct device_attribute *attr, char *buf);
110
111/*
112 * sysfs attributes.
113 */
114static const struct device_attribute sfe_ipv6_debug_dev_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -0800115 __ATTR(debug_dev, S_IWUSR | S_IRUGO, sfe_ipv6_get_debug_dev, NULL);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700116
117/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700118 * sfe_ipv6_get_connection_match_hash()
119 * Generate the hash used in connection match lookups.
120 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700121static inline unsigned int sfe_ipv6_get_connection_match_hash(struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700122 struct sfe_ipv6_addr *src_ip, __be16 src_port,
123 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
124{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700125 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700126
127 for (idx = 0; idx < 4; idx++) {
128 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
129 }
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530130 hash = hash ^ protocol ^ ntohs(src_port ^ dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700131 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
132}
133
134/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530135 * sfe_ipv6_find_connection_match_rcu()
Xiaoping Fan978b3772015-05-27 14:15:18 -0700136 * Get the IPv6 flow match info that corresponds to a particular 5-tuple.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700137 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530138struct sfe_ipv6_connection_match *
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530139sfe_ipv6_find_connection_match_rcu(struct sfe_ipv6 *si, struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700140 struct sfe_ipv6_addr *src_ip, __be16 src_port,
141 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
142{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530143 struct sfe_ipv6_connection_match *cm = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700144 unsigned int conn_match_idx;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530145 struct hlist_head *lhead;
146 WARN_ON_ONCE(!rcu_read_lock_held());
Xiaoping Fan978b3772015-05-27 14:15:18 -0700147
148 conn_match_idx = sfe_ipv6_get_connection_match_hash(dev, protocol, src_ip, src_port, dest_ip, dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700149
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530150 lhead = &si->hlist_conn_match_hash_head[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700151
152 /*
153 * Hopefully the first entry is the one we want.
154 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530155 hlist_for_each_entry_rcu(cm, lhead, hnode) {
156 if ((cm->match_dest_port != dest_port) ||
157 (!sfe_ipv6_addr_equal(cm->match_src_ip, src_ip)) ||
158 (!sfe_ipv6_addr_equal(cm->match_dest_ip, dest_ip)) ||
159 (cm->match_protocol != protocol) ||
160 (cm->match_dev != dev)) {
161 continue;
162 }
163
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530164 this_cpu_inc(si->stats_pcpu->connection_match_hash_hits64);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700165
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530166 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700167
Xiaoping Fan978b3772015-05-27 14:15:18 -0700168 }
169
Xiaoping Fan978b3772015-05-27 14:15:18 -0700170 return cm;
171}
172
173/*
174 * sfe_ipv6_connection_match_update_summary_stats()
175 * Update the summary stats for a connection match entry.
176 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530177static inline void sfe_ipv6_connection_match_update_summary_stats(struct sfe_ipv6_connection_match *cm,
178 u32 *packets, u32 *bytes)
179
Xiaoping Fan978b3772015-05-27 14:15:18 -0700180{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530181 u32 packet_count, byte_count;
182
183 packet_count = atomic_read(&cm->rx_packet_count);
184 cm->rx_packet_count64 += packet_count;
185 atomic_sub(packet_count, &cm->rx_packet_count);
186
187 byte_count = atomic_read(&cm->rx_byte_count);
188 cm->rx_byte_count64 += byte_count;
189 atomic_sub(byte_count, &cm->rx_byte_count);
190
191 *packets = packet_count;
192 *bytes = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700193}
194
195/*
196 * sfe_ipv6_connection_match_compute_translations()
197 * Compute port and address translations for a connection match entry.
198 */
199static void sfe_ipv6_connection_match_compute_translations(struct sfe_ipv6_connection_match *cm)
200{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700201 u32 diff[9];
202 u32 *idx_32;
203 u16 *idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700204
205 /*
206 * Before we insert the entry look to see if this is tagged as doing address
207 * translations. If it is then work out the adjustment that we need to apply
208 * to the transport checksum.
209 */
210 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700211 u32 adj = 0;
212 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700213
214 /*
215 * Precompute an incremental checksum adjustment so we can
216 * edit packets in this stream very quickly. The algorithm is from RFC1624.
217 */
218 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530219 *(idx_32++) = cm->match_src_ip[0].addr[0];
220 *(idx_32++) = cm->match_src_ip[0].addr[1];
221 *(idx_32++) = cm->match_src_ip[0].addr[2];
222 *(idx_32++) = cm->match_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700223
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700224 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700225 *(idx_16++) = cm->match_src_port;
226 *(idx_16++) = ~cm->xlate_src_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700227 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700228
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530229 *(idx_32++) = ~cm->xlate_src_ip[0].addr[0];
230 *(idx_32++) = ~cm->xlate_src_ip[0].addr[1];
231 *(idx_32++) = ~cm->xlate_src_ip[0].addr[2];
232 *(idx_32++) = ~cm->xlate_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700233
234 /*
235 * When we compute this fold it down to a 16-bit offset
236 * as that way we can avoid having to do a double
237 * folding of the twos-complement result because the
238 * addition of 2 16-bit values cannot cause a double
239 * wrap-around!
240 */
241 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700242 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700243 adj += carry;
244 adj += w;
245 carry = (w > adj);
246 }
247 adj += carry;
248 adj = (adj & 0xffff) + (adj >> 16);
249 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700250 cm->xlate_src_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700251 }
252
253 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700254 u32 adj = 0;
255 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700256
257 /*
258 * Precompute an incremental checksum adjustment so we can
259 * edit packets in this stream very quickly. The algorithm is from RFC1624.
260 */
261 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530262 *(idx_32++) = cm->match_dest_ip[0].addr[0];
263 *(idx_32++) = cm->match_dest_ip[0].addr[1];
264 *(idx_32++) = cm->match_dest_ip[0].addr[2];
265 *(idx_32++) = cm->match_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700266
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700267 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700268 *(idx_16++) = cm->match_dest_port;
269 *(idx_16++) = ~cm->xlate_dest_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700270 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700271
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530272 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[0];
273 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[1];
274 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[2];
275 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700276
277 /*
278 * When we compute this fold it down to a 16-bit offset
279 * as that way we can avoid having to do a double
280 * folding of the twos-complement result because the
281 * addition of 2 16-bit values cannot cause a double
282 * wrap-around!
283 */
284 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700285 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700286 adj += carry;
287 adj += w;
288 carry = (w > adj);
289 }
290 adj += carry;
291 adj = (adj & 0xffff) + (adj >> 16);
292 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700293 cm->xlate_dest_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700294 }
295}
296
297/*
298 * sfe_ipv6_update_summary_stats()
299 * Update the summary stats.
300 */
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530301static void sfe_ipv6_update_summary_stats(struct sfe_ipv6 *si, struct sfe_ipv6_stats *stats)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700302{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530303 int i = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700304
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530305 memset(stats, 0, sizeof(*stats));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700306
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530307 for_each_possible_cpu(i) {
308 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
309
310 stats->connection_create_requests64 += s->connection_create_requests64;
311 stats->connection_create_collisions64 += s->connection_create_collisions64;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530312 stats->connection_create_failures64 += s->connection_create_failures64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530313 stats->connection_destroy_requests64 += s->connection_destroy_requests64;
314 stats->connection_destroy_misses64 += s->connection_destroy_misses64;
315 stats->connection_match_hash_hits64 += s->connection_match_hash_hits64;
316 stats->connection_match_hash_reorders64 += s->connection_match_hash_reorders64;
317 stats->connection_flushes64 += s->connection_flushes64;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530318 stats->packets_dropped64 += s->packets_dropped64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530319 stats->packets_forwarded64 += s->packets_forwarded64;
320 stats->packets_not_forwarded64 += s->packets_not_forwarded64;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530321 stats->pppoe_encap_packets_forwarded64 += s->pppoe_encap_packets_forwarded64;
322 stats->pppoe_decap_packets_forwarded64 += s->pppoe_decap_packets_forwarded64;
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530323 stats->pppoe_bridge_packets_forwarded64 += s->pppoe_bridge_packets_forwarded64;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700324 }
325}
326
327/*
328 * sfe_ipv6_insert_connection_match()
329 * Insert a connection match into the hash.
330 *
331 * On entry we must be holding the lock that protects the hash table.
332 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700333static inline void sfe_ipv6_insert_connection_match(struct sfe_ipv6 *si,
334 struct sfe_ipv6_connection_match *cm)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700335{
Xiaoping Fan978b3772015-05-27 14:15:18 -0700336 unsigned int conn_match_idx
337 = sfe_ipv6_get_connection_match_hash(cm->match_dev, cm->match_protocol,
338 cm->match_src_ip, cm->match_src_port,
339 cm->match_dest_ip, cm->match_dest_port);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700340
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530341 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700342
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530343 hlist_add_head_rcu(&cm->hnode, &si->hlist_conn_match_hash_head[conn_match_idx]);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700344#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700345 if (!si->flow_cookie_enable || !(cm->flags & (SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC | SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST)))
Xiaoping Fan978b3772015-05-27 14:15:18 -0700346 return;
347
348 /*
349 * Configure hardware to put a flow cookie in packet of this flow,
350 * then we can accelerate the lookup process when we received this packet.
351 */
352 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
353 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
354
355 if ((NULL == entry->match) && time_is_before_jiffies(entry->last_clean_time + HZ)) {
356 sfe_ipv6_flow_cookie_set_func_t func;
357
358 rcu_read_lock();
359 func = rcu_dereference(si->flow_cookie_set_func);
360 if (func) {
361 if (!func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
362 cm->match_dest_ip->addr, cm->match_dest_port, conn_match_idx)) {
363 entry->match = cm;
364 cm->flow_cookie = conn_match_idx;
365 } else {
366 si->exception_events[SFE_IPV6_EXCEPTION_EVENT_FLOW_COOKIE_ADD_FAIL]++;
367 }
368 }
369 rcu_read_unlock();
370
371 break;
372 }
373 }
374#endif
Xiaoping Fan978b3772015-05-27 14:15:18 -0700375}
376
377/*
378 * sfe_ipv6_remove_connection_match()
379 * Remove a connection match object from the hash.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700380 */
381static inline void sfe_ipv6_remove_connection_match(struct sfe_ipv6 *si, struct sfe_ipv6_connection_match *cm)
382{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530383
384 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700385#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700386 if (si->flow_cookie_enable) {
387 /*
388 * Tell hardware that we no longer need a flow cookie in packet of this flow
389 */
390 unsigned int conn_match_idx;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700391
Xiaoping Fan640faf42015-08-28 15:50:55 -0700392 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
393 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700394
Xiaoping Fan640faf42015-08-28 15:50:55 -0700395 if (cm == entry->match) {
396 sfe_ipv6_flow_cookie_set_func_t func;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700397
Xiaoping Fan640faf42015-08-28 15:50:55 -0700398 rcu_read_lock();
399 func = rcu_dereference(si->flow_cookie_set_func);
400 if (func) {
401 func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
402 cm->match_dest_ip->addr, cm->match_dest_port, 0);
403 }
404 rcu_read_unlock();
405
406 cm->flow_cookie = 0;
407 entry->match = NULL;
408 entry->last_clean_time = jiffies;
409 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700410 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700411 }
412 }
413#endif
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530414 hlist_del_init_rcu(&cm->hnode);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700415
Xiaoping Fan978b3772015-05-27 14:15:18 -0700416}
417
418/*
419 * sfe_ipv6_get_connection_hash()
420 * Generate the hash used in connection lookups.
421 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700422static inline unsigned int sfe_ipv6_get_connection_hash(u8 protocol, struct sfe_ipv6_addr *src_ip, __be16 src_port,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700423 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
424{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700425 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700426
427 for (idx = 0; idx < 4; idx++) {
428 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
429 }
430 hash = hash ^ protocol ^ ntohs(src_port ^ dest_port);
431 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
432}
433
434/*
435 * sfe_ipv6_find_connection()
436 * Get the IPv6 connection info that corresponds to a particular 5-tuple.
437 *
438 * On entry we must be holding the lock that protects the hash table.
439 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700440static inline struct sfe_ipv6_connection *sfe_ipv6_find_connection(struct sfe_ipv6 *si, u32 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700441 struct sfe_ipv6_addr *src_ip, __be16 src_port,
442 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
443{
444 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530445
Xiaoping Fan978b3772015-05-27 14:15:18 -0700446 unsigned int conn_idx = sfe_ipv6_get_connection_hash(protocol, src_ip, src_port, dest_ip, dest_port);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530447
448 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700449 c = si->conn_hash[conn_idx];
450
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530451 while (c) {
452 if ((c->src_port == src_port)
453 && (c->dest_port == dest_port)
454 && (sfe_ipv6_addr_equal(c->src_ip, src_ip))
455 && (sfe_ipv6_addr_equal(c->dest_ip, dest_ip))
456 && (c->protocol == protocol)) {
457 return c;
458 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700459 c = c->next;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530460 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700461
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530462 return NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700463}
464
465/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700466 * sfe_ipv6_insert_connection()
467 * Insert a connection into the hash.
468 *
469 * On entry we must be holding the lock that protects the hash table.
470 */
471static void sfe_ipv6_insert_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
472{
473 struct sfe_ipv6_connection **hash_head;
474 struct sfe_ipv6_connection *prev_head;
475 unsigned int conn_idx;
476
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530477 lockdep_assert_held(&si->lock);
478
Xiaoping Fan978b3772015-05-27 14:15:18 -0700479 /*
480 * Insert entry into the connection hash.
481 */
482 conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
483 c->dest_ip, c->dest_port);
484 hash_head = &si->conn_hash[conn_idx];
485 prev_head = *hash_head;
486 c->prev = NULL;
487 if (prev_head) {
488 prev_head->prev = c;
489 }
490
491 c->next = prev_head;
492 *hash_head = c;
493
494 /*
495 * Insert entry into the "all connections" list.
496 */
497 if (si->all_connections_tail) {
498 c->all_connections_prev = si->all_connections_tail;
499 si->all_connections_tail->all_connections_next = c;
500 } else {
501 c->all_connections_prev = NULL;
502 si->all_connections_head = c;
503 }
504
505 si->all_connections_tail = c;
506 c->all_connections_next = NULL;
507 si->num_connections++;
508
509 /*
510 * Insert the connection match objects too.
511 */
512 sfe_ipv6_insert_connection_match(si, c->original_match);
513 sfe_ipv6_insert_connection_match(si, c->reply_match);
514}
515
516/*
517 * sfe_ipv6_remove_connection()
518 * Remove a sfe_ipv6_connection object from the hash.
519 *
520 * On entry we must be holding the lock that protects the hash table.
521 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530522bool sfe_ipv6_remove_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700523{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530524
525 lockdep_assert_held(&si->lock);
526 if (c->removed) {
527 DEBUG_ERROR("%px: Connection has been removed already\n", c);
528 return false;
529 }
530
Xiaoping Fan978b3772015-05-27 14:15:18 -0700531 /*
532 * Remove the connection match objects.
533 */
534 sfe_ipv6_remove_connection_match(si, c->reply_match);
535 sfe_ipv6_remove_connection_match(si, c->original_match);
536
537 /*
538 * Unlink the connection.
539 */
540 if (c->prev) {
541 c->prev->next = c->next;
542 } else {
543 unsigned int conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
544 c->dest_ip, c->dest_port);
545 si->conn_hash[conn_idx] = c->next;
546 }
547
548 if (c->next) {
549 c->next->prev = c->prev;
550 }
Xiaoping Fan34586472015-07-03 02:20:35 -0700551
552 /*
553 * Unlink connection from all_connections list
554 */
555 if (c->all_connections_prev) {
556 c->all_connections_prev->all_connections_next = c->all_connections_next;
557 } else {
558 si->all_connections_head = c->all_connections_next;
559 }
560
561 if (c->all_connections_next) {
562 c->all_connections_next->all_connections_prev = c->all_connections_prev;
563 } else {
564 si->all_connections_tail = c->all_connections_prev;
565 }
566
Ken Zhu32b95392021-09-03 13:52:04 -0700567 /*
568 * If I am the next sync connection, move the sync to my next or head.
569 */
570 if (unlikely(si->wc_next == c)) {
571 si->wc_next = c->all_connections_next;
572 }
573
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530574 c->removed = true;
Xiaoping Fan34586472015-07-03 02:20:35 -0700575 si->num_connections--;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530576 return true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700577}
578
579/*
580 * sfe_ipv6_gen_sync_connection()
581 * Sync a connection.
582 *
583 * On entry to this function we expect that the lock for the connection is either
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530584 * already held (while called from sfe_ipv6_periodic_sync() or isn't required
585 * (while called from sfe_ipv6_flush_sfe_ipv6_connection())
Xiaoping Fan978b3772015-05-27 14:15:18 -0700586 */
587static void sfe_ipv6_gen_sync_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c,
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700588 struct sfe_connection_sync *sis, sfe_sync_reason_t reason,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700589 u64 now_jiffies)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700590{
591 struct sfe_ipv6_connection_match *original_cm;
592 struct sfe_ipv6_connection_match *reply_cm;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530593 u32 packet_count, byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700594
595 /*
596 * Fill in the update message.
597 */
Murat Sezgin53509a12016-12-27 16:57:34 -0800598 sis->is_v6 = 1;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700599 sis->protocol = c->protocol;
600 sis->src_ip.ip6[0] = c->src_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700601 sis->src_ip_xlate.ip6[0] = c->src_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700602 sis->dest_ip.ip6[0] = c->dest_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700603 sis->dest_ip_xlate.ip6[0] = c->dest_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700604 sis->src_port = c->src_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700605 sis->src_port_xlate = c->src_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700606 sis->dest_port = c->dest_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700607 sis->dest_port_xlate = c->dest_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700608
609 original_cm = c->original_match;
610 reply_cm = c->reply_match;
611 sis->src_td_max_window = original_cm->protocol_state.tcp.max_win;
612 sis->src_td_end = original_cm->protocol_state.tcp.end;
613 sis->src_td_max_end = original_cm->protocol_state.tcp.max_end;
614 sis->dest_td_max_window = reply_cm->protocol_state.tcp.max_win;
615 sis->dest_td_end = reply_cm->protocol_state.tcp.end;
616 sis->dest_td_max_end = reply_cm->protocol_state.tcp.max_end;
617
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530618 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet_count, &byte_count);
619 sis->src_new_packet_count = packet_count;
620 sis->src_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700621
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530622 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet_count, &byte_count);
623 sis->dest_new_packet_count = packet_count;
624 sis->dest_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700625
626 sis->src_dev = original_cm->match_dev;
627 sis->src_packet_count = original_cm->rx_packet_count64;
628 sis->src_byte_count = original_cm->rx_byte_count64;
629
630 sis->dest_dev = reply_cm->match_dev;
631 sis->dest_packet_count = reply_cm->rx_packet_count64;
632 sis->dest_byte_count = reply_cm->rx_byte_count64;
633
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700634 sis->reason = reason;
635
Xiaoping Fan978b3772015-05-27 14:15:18 -0700636 /*
637 * Get the time increment since our last sync.
638 */
639 sis->delta_jiffies = now_jiffies - c->last_sync_jiffies;
640 c->last_sync_jiffies = now_jiffies;
641}
642
643/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530644 * sfe_ipv6_free_sfe_ipv6_connection_rcu()
645 * Called at RCU qs state to free the connection object.
646 */
647static void sfe_ipv6_free_sfe_ipv6_connection_rcu(struct rcu_head *head)
648{
649 struct sfe_ipv6_connection *c;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530650 struct udp_sock *up;
651 struct sock *sk;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530652
653 /*
654 * We dont need spin lock as the connection is already removed from link list
655 */
656 c = container_of(head, struct sfe_ipv6_connection, rcu);
657 BUG_ON(!c->removed);
658
659 DEBUG_TRACE("%px: connecton has been deleted\n", c);
660
661 /*
Suruchi Suman23a279d2021-11-16 15:13:09 +0530662 * Decrease the refcount taken in function sfe_ipv6_create_rule()
663 * during call of __udp6_lib_lookup()
664 */
665 up = c->reply_match->up;
666 if (up) {
667 sk = (struct sock *)up;
668 sock_put(sk);
669 }
670
671 /*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530672 * Release our hold of the source and dest devices and free the memory
673 * for our connection objects.
674 */
675 dev_put(c->original_dev);
676 dev_put(c->reply_dev);
677 kfree(c->original_match);
678 kfree(c->reply_match);
679 kfree(c);
680}
681
682/*
Ken Zhu88c58152021-12-09 15:12:06 -0800683 * sfe_ipv6_sync_status()
684 * update a connection status to its connection manager.
685 *
686 * si: the ipv6 context
687 * c: which connection to be notified
688 * reason: what kind of reason: flush, or destroy
689 */
690void sfe_ipv6_sync_status(struct sfe_ipv6 *si,
691 struct sfe_ipv6_connection *c,
692 sfe_sync_reason_t reason)
693{
694 struct sfe_connection_sync sis;
695 u64 now_jiffies;
696 sfe_sync_rule_callback_t sync_rule_callback;
697
698 rcu_read_lock();
699 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
700
701 if (unlikely(!sync_rule_callback)) {
702 rcu_read_unlock();
703 return;
704 }
705
706 /*
707 * Generate a sync message and then sync.
708 */
709 now_jiffies = get_jiffies_64();
710 sfe_ipv6_gen_sync_connection(si, c, &sis, reason, now_jiffies);
711 sync_rule_callback(&sis);
712
713 rcu_read_unlock();
714}
715
716/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700717 * sfe_ipv6_flush_connection()
718 * Flush a connection and free all associated resources.
719 *
720 * We need to be called with bottom halves disabled locally as we need to acquire
721 * the connection hash lock and release it again. In general we're actually called
722 * from within a BH and so we're fine, but we're also called when connections are
723 * torn down.
724 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530725void sfe_ipv6_flush_connection(struct sfe_ipv6 *si,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700726 struct sfe_ipv6_connection *c,
727 sfe_sync_reason_t reason)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700728{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530729 BUG_ON(!c->removed);
730
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530731 this_cpu_inc(si->stats_pcpu->connection_flushes64);
Ken Zhu88c58152021-12-09 15:12:06 -0800732 sfe_ipv6_sync_status(si, c, reason);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530733
734 /*
Ken Zhu88c58152021-12-09 15:12:06 -0800735 * Release our hold of the source and dest devices and free the memory
736 * for our connection objects.
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530737 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530738 call_rcu(&c->rcu, sfe_ipv6_free_sfe_ipv6_connection_rcu);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700739}
740
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530741 /*
742 * sfe_ipv6_exception_stats_inc()
743 * Increment exception stats.
744 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530745void sfe_ipv6_exception_stats_inc(struct sfe_ipv6 *si, enum sfe_ipv6_exception_events reason)
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530746{
747 struct sfe_ipv6_stats *stats = this_cpu_ptr(si->stats_pcpu);
748
749 stats->exception_events64[reason]++;
750 stats->packets_not_forwarded64++;
751}
752
Xiaoping Fan978b3772015-05-27 14:15:18 -0700753/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530754 * sfe_ipv6_is_local_ip()
755 * return true if it is local ip otherwise return false
756 */
757static bool sfe_ipv6_is_local_ip(struct sfe_ipv6 *si, uint8_t *addr)
758{
759 struct net_device *dev;
760 struct in6_addr ip_addr;
761 memcpy(ip_addr.s6_addr, addr, 16);
762
763 dev = ipv6_dev_find(&init_net, &ip_addr, 1);
764 if (dev) {
765 dev_put(dev);
766 return true;
767 }
768
769 return false;
770}
771
772/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700773 * sfe_ipv6_recv()
774 * Handle packet receives and forwaring.
775 *
776 * Returns 1 if the packet is forwarded or 0 if it isn't.
777 */
Suruchi Suman23a279d2021-11-16 15:13:09 +0530778int sfe_ipv6_recv(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info, bool tun_outer)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700779{
780 struct sfe_ipv6 *si = &__si6;
781 unsigned int len;
782 unsigned int payload_len;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530783 unsigned int ihl = sizeof(struct ipv6hdr);
Ken Zhu88c58152021-12-09 15:12:06 -0800784 bool sync_on_find = false;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530785 struct ipv6hdr *iph;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700786 u8 next_hdr;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700787
788 /*
789 * Check that we have space for an IP header and an uplayer header here.
790 */
791 len = skb->len;
792 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700793
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530794 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700795 DEBUG_TRACE("len: %u is too short\n", len);
796 return 0;
797 }
798
799 /*
800 * Is our IP version wrong?
801 */
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530802 iph = (struct ipv6hdr *)skb->data;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700803 if (unlikely(iph->version != 6)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700804
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530805 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_NON_V6);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700806 DEBUG_TRACE("IP version: %u\n", iph->version);
807 return 0;
808 }
809
810 /*
811 * Does our datagram fit inside the skb?
812 */
813 payload_len = ntohs(iph->payload_len);
814 if (unlikely(payload_len > (len - ihl))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700815
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530816 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_DATAGRAM_INCOMPLETE);
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530817 DEBUG_TRACE("payload_len: %u, exceeds len: %u\n", payload_len, (len - (unsigned int)sizeof(struct ipv6hdr)));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700818 return 0;
819 }
820
821 next_hdr = iph->nexthdr;
822 while (unlikely(sfe_ipv6_is_ext_hdr(next_hdr))) {
823 struct sfe_ipv6_ext_hdr *ext_hdr;
824 unsigned int ext_hdr_len;
825
826 ext_hdr = (struct sfe_ipv6_ext_hdr *)(skb->data + ihl);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700827
828 ext_hdr_len = ext_hdr->hdr_len;
829 ext_hdr_len <<= 3;
830 ext_hdr_len += sizeof(struct sfe_ipv6_ext_hdr);
831 ihl += ext_hdr_len;
832 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530833 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700834
835 DEBUG_TRACE("extension header %d not completed\n", next_hdr);
836 return 0;
837 }
Ken Zhu88c58152021-12-09 15:12:06 -0800838 /*
839 * Any packets have extend hdr, won't be handled in the fast
840 * path,sync its status and exception to the kernel.
841 */
842 sync_on_find = true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700843 next_hdr = ext_hdr->next_hdr;
844 }
845
846 if (IPPROTO_UDP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800847 return sfe_ipv6_recv_udp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700848 }
849
850 if (IPPROTO_TCP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800851 return sfe_ipv6_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700852 }
853
854 if (IPPROTO_ICMPV6 == next_hdr) {
855 return sfe_ipv6_recv_icmp(si, skb, dev, len, iph, ihl);
856 }
857
Tian Yangafb03452022-01-13 18:53:13 -0800858 if (IPPROTO_IPIP == next_hdr) {
859 return sfe_ipv6_recv_tunipip6(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, true);
860 }
861
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530862#ifdef SFE_GRE_TUN_ENABLE
863 if (IPPROTO_GRE == next_hdr) {
864 return sfe_ipv6_recv_gre(si, skb, dev, len, iph, ihl, sync_on_find, tun_outer);
865 }
866#endif
867
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530868 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_UNHANDLED_PROTOCOL);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700869 DEBUG_TRACE("not UDP, TCP or ICMP: %u\n", next_hdr);
870 return 0;
871}
872
873/*
874 * sfe_ipv6_update_tcp_state()
875 * update TCP window variables.
876 */
877static void
878sfe_ipv6_update_tcp_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530879 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700880{
881 struct sfe_ipv6_connection_match *orig_cm;
882 struct sfe_ipv6_connection_match *repl_cm;
883 struct sfe_ipv6_tcp_connection_match *orig_tcp;
884 struct sfe_ipv6_tcp_connection_match *repl_tcp;
885
886 orig_cm = c->original_match;
887 repl_cm = c->reply_match;
888 orig_tcp = &orig_cm->protocol_state.tcp;
889 repl_tcp = &repl_cm->protocol_state.tcp;
890
891 /* update orig */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530892 if (orig_tcp->max_win < msg->tcp_rule.flow_max_window) {
893 orig_tcp->max_win = msg->tcp_rule.flow_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700894 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530895 if ((s32)(orig_tcp->end - msg->tcp_rule.flow_end) < 0) {
896 orig_tcp->end = msg->tcp_rule.flow_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700897 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530898 if ((s32)(orig_tcp->max_end - msg->tcp_rule.flow_max_end) < 0) {
899 orig_tcp->max_end = msg->tcp_rule.flow_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700900 }
901
902 /* update reply */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530903 if (repl_tcp->max_win < msg->tcp_rule.return_max_window) {
904 repl_tcp->max_win = msg->tcp_rule.return_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700905 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530906 if ((s32)(repl_tcp->end - msg->tcp_rule.return_end) < 0) {
907 repl_tcp->end = msg->tcp_rule.return_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700908 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530909 if ((s32)(repl_tcp->max_end - msg->tcp_rule.return_max_end) < 0) {
910 repl_tcp->max_end = msg->tcp_rule.return_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700911 }
912
913 /* update match flags */
914 orig_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
915 repl_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530916 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700917 orig_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
918 repl_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
919 }
920}
921
922/*
923 * sfe_ipv6_update_protocol_state()
924 * update protocol specified state machine.
925 */
926static void
927sfe_ipv6_update_protocol_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530928 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700929{
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530930 switch (msg->tuple.protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700931 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530932 sfe_ipv6_update_tcp_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700933 break;
934 }
935}
936
937/*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800938 * sfe_ipv6_match_entry_set_vlan()
939 */
940static void sfe_ipv6_match_entry_set_vlan(
941 struct sfe_ipv6_connection_match *cm,
942 u32 primary_ingress_vlan_tag,
943 u32 primary_egress_vlan_tag,
944 u32 secondary_ingress_vlan_tag,
945 u32 secondary_egress_vlan_tag)
946{
947 u16 tpid;
948 /*
949 * Prevent stacking header counts when updating.
950 */
951 cm->ingress_vlan_hdr_cnt = 0;
952 cm->egress_vlan_hdr_cnt = 0;
953 memset(cm->ingress_vlan_hdr, 0, sizeof(cm->ingress_vlan_hdr));
954 memset(cm->egress_vlan_hdr, 0, sizeof(cm->egress_vlan_hdr));
955
956 /*
957 * vlan_hdr[0] corresponds to outer tag
958 * vlan_hdr[1] corresponds to inner tag
959 * Extract the vlan information (tpid and tci) from rule message
960 */
961 if ((primary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
962 tpid = (u16)(primary_ingress_vlan_tag >> 16);
963 cm->ingress_vlan_hdr[0].tpid = ntohs(tpid);
964 cm->ingress_vlan_hdr[0].tci = (u16)primary_ingress_vlan_tag;
965 cm->ingress_vlan_hdr_cnt++;
966 }
967
968 if ((secondary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
969 tpid = (u16)(secondary_ingress_vlan_tag >> 16);
970 cm->ingress_vlan_hdr[1].tpid = ntohs(tpid);
971 cm->ingress_vlan_hdr[1].tci = (u16)secondary_ingress_vlan_tag;
972 cm->ingress_vlan_hdr_cnt++;
973 }
974
975 if ((primary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
976 tpid = (u16)(primary_egress_vlan_tag >> 16);
977 cm->egress_vlan_hdr[0].tpid = ntohs(tpid);
978 cm->egress_vlan_hdr[0].tci = (u16)primary_egress_vlan_tag;
979 cm->egress_vlan_hdr_cnt++;
980 }
981
982 if ((secondary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
983 tpid = (u16)(secondary_egress_vlan_tag >> 16);
984 cm->egress_vlan_hdr[1].tpid = ntohs(tpid);
985 cm->egress_vlan_hdr[1].tci = (u16)secondary_egress_vlan_tag;
986 cm->egress_vlan_hdr_cnt++;
987 }
988}
989
990/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700991 * sfe_ipv6_update_rule()
992 * update forwarding rule after rule is created.
993 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530994void sfe_ipv6_update_rule(struct sfe_ipv6_rule_create_msg *msg)
995
Xiaoping Fan978b3772015-05-27 14:15:18 -0700996{
997 struct sfe_ipv6_connection *c;
998 struct sfe_ipv6 *si = &__si6;
999
1000 spin_lock_bh(&si->lock);
1001
1002 c = sfe_ipv6_find_connection(si,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301003 msg->tuple.protocol,
1004 (struct sfe_ipv6_addr *)msg->tuple.flow_ip,
1005 msg->tuple.flow_ident,
1006 (struct sfe_ipv6_addr *)msg->tuple.return_ip,
1007 msg->tuple.return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001008 if (c != NULL) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301009 sfe_ipv6_update_protocol_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001010 }
1011
1012 spin_unlock_bh(&si->lock);
1013}
1014
1015/*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301016 * sfe_ipv6_xmit_eth_type_check
1017 * Checking if MAC header has to be written.
1018 */
1019static inline bool sfe_ipv6_xmit_eth_type_check(struct net_device *dev, u32 cm_flags)
1020{
1021 if (!(dev->flags & IFF_NOARP)) {
1022 return true;
1023 }
1024
1025 /*
1026 * For PPPoE, since we are now supporting PPPoE encapsulation, we are writing L2 header.
1027 */
1028 if (cm_flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP) {
1029 return true;
1030 }
1031
1032 return false;
1033}
1034
1035/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001036 * sfe_ipv6_create_rule()
1037 * Create a forwarding rule.
1038 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301039int sfe_ipv6_create_rule(struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001040{
1041 struct sfe_ipv6 *si = &__si6;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301042 struct sfe_ipv6_connection *c, *old_c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001043 struct sfe_ipv6_connection_match *original_cm;
1044 struct sfe_ipv6_connection_match *reply_cm;
1045 struct net_device *dest_dev;
1046 struct net_device *src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301047 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301048 struct sock *sk;
1049 struct net *net;
1050 unsigned int src_if_idx;
1051
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301052 s32 flow_interface_num = msg->conn_rule.flow_top_interface_num;
1053 s32 return_interface_num = msg->conn_rule.return_top_interface_num;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001054
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301055 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) {
1056 flow_interface_num = msg->conn_rule.flow_interface_num;
1057 }
1058
1059 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) {
1060 return_interface_num = msg->conn_rule.return_interface_num;
1061 }
1062
1063 src_dev = dev_get_by_index(&init_net, flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301064 if (!src_dev) {
1065 DEBUG_WARN("%px: Unable to find src_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301066 flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301067 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1068 return -EINVAL;
1069 }
1070
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301071 dest_dev = dev_get_by_index(&init_net, return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301072 if (!dest_dev) {
1073 DEBUG_WARN("%px: Unable to find dest_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301074 return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301075 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1076 dev_put(src_dev);
1077 return -EINVAL;
1078 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001079
1080 if (unlikely((dest_dev->reg_state != NETREG_REGISTERED) ||
1081 (src_dev->reg_state != NETREG_REGISTERED))) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301082 DEBUG_WARN("%px: src_dev=%s and dest_dev=%s are unregistered\n", msg,
1083 src_dev->name, dest_dev->name);
1084 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1085 dev_put(src_dev);
1086 dev_put(dest_dev);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001087 return -EINVAL;
1088 }
1089
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301090 /*
1091 * Allocate the various connection tracking objects.
1092 */
1093 c = (struct sfe_ipv6_connection *)kmalloc(sizeof(struct sfe_ipv6_connection), GFP_ATOMIC);
1094 if (unlikely(!c)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301095 DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
1096 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1097 dev_put(src_dev);
1098 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301099 return -ENOMEM;
1100 }
1101
1102 original_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
1103 if (unlikely(!original_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301104 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1105 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301106 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301107 dev_put(src_dev);
1108 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301109 return -ENOMEM;
1110 }
1111
1112 reply_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
1113 if (unlikely(!reply_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301114 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1115 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301116 kfree(original_cm);
1117 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301118 dev_put(src_dev);
1119 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301120 return -ENOMEM;
1121 }
1122
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301123 this_cpu_inc(si->stats_pcpu->connection_create_requests64);
1124
Xiaoping Fan978b3772015-05-27 14:15:18 -07001125 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001126
1127 /*
1128 * Check to see if there is already a flow that matches the rule we're
1129 * trying to create. If there is then we can't create a new one.
1130 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001131 old_c = sfe_ipv6_find_connection(si,
1132 tuple->protocol,
1133 (struct sfe_ipv6_addr *)tuple->flow_ip,
1134 tuple->flow_ident,
1135 (struct sfe_ipv6_addr *)tuple->return_ip,
1136 tuple->return_ident);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301137
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301138 if (old_c != NULL) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301139 this_cpu_inc(si->stats_pcpu->connection_create_collisions64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001140
1141 /*
1142 * If we already have the flow then it's likely that this
1143 * request to create the connection rule contains more
1144 * up-to-date information. Check and update accordingly.
1145 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301146 sfe_ipv6_update_protocol_state(old_c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001147 spin_unlock_bh(&si->lock);
1148
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301149 kfree(reply_cm);
1150 kfree(original_cm);
1151 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301152 dev_put(src_dev);
1153 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301154
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301155 DEBUG_TRACE("connection already exists - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001156 " s: %s:%pxM:%pI6:%u, d: %s:%pxM:%pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301157 tuple->protocol,
1158 src_dev->name, msg->conn_rule.flow_mac, tuple->flow_ip, ntohs(tuple->flow_ident),
1159 dest_dev->name, msg->conn_rule.return_mac, tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001160 return -EADDRINUSE;
1161 }
1162
1163 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001164 * Fill in the "original" direction connection matching object.
1165 * Note that the transmit MAC address is "dest_mac_xlate" because
1166 * we always know both ends of a connection by their translated
1167 * addresses and not their public addresses.
1168 */
1169 original_cm->match_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301170 original_cm->match_protocol = tuple->protocol;
1171 original_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
Suruchi Suman66609a72022-01-20 02:34:25 +05301172 original_cm->match_src_port = netif_is_vxlan(src_dev) ? 0 : tuple->flow_ident;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301173 original_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1174 original_cm->match_dest_port = tuple->return_ident;
1175
1176 original_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1177 original_cm->xlate_src_port = tuple->flow_ident;
1178 original_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1179 original_cm->xlate_dest_port = tuple->return_ident;
1180
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301181 atomic_set(&original_cm->rx_packet_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001182 original_cm->rx_packet_count64 = 0;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301183 atomic_set(&original_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001184 original_cm->rx_byte_count64 = 0;
1185 original_cm->xmit_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301186
1187 original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301188
Xiaoping Fan978b3772015-05-27 14:15:18 -07001189 original_cm->connection = c;
1190 original_cm->counter_match = reply_cm;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001191 original_cm->l2_hdr_size = 0;
1192 original_cm->flags = 0;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301193
1194 /*
1195 * Valid in decap direction only
1196 */
1197 RCU_INIT_POINTER(original_cm->up, NULL);
1198
Ken Zhu37040ea2021-09-09 21:11:15 -07001199 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1200 original_cm->mark = msg->mark_rule.flow_mark;
1201 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1202 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301203 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1204 original_cm->priority = msg->qos_rule.flow_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001205 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1206 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001207
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301208 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1209 original_cm->dscp = msg->dscp_rule.flow_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001210 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1211 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301212 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1213 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1214 }
1215
Wayne Tanbb7f1782021-12-13 11:16:04 -08001216 /*
1217 * Add VLAN rule to original_cm
1218 */
1219 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1220 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1221 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1222 sfe_ipv6_match_entry_set_vlan(original_cm,
1223 vlan_primary_rule->ingress_vlan_tag,
1224 vlan_primary_rule->egress_vlan_tag,
1225 vlan_secondary_rule->ingress_vlan_tag,
1226 vlan_secondary_rule->egress_vlan_tag);
1227
1228 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) &&
1229 original_cm->egress_vlan_hdr_cnt > 0) {
1230 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1231 original_cm->l2_hdr_size += original_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1232 }
1233 }
1234
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301235 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv6_is_local_ip(si, (uint8_t *)original_cm->match_dest_ip)) {
1236 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1237 }
1238
Xiaoping Fan978b3772015-05-27 14:15:18 -07001239#ifdef CONFIG_NF_FLOW_COOKIE
1240 original_cm->flow_cookie = 0;
1241#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001242#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301243 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1244 original_cm->flow_accel = msg->direction_rule.flow_accel;
1245 } else {
1246 original_cm->flow_accel = 1;
1247 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001248#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301249 /*
1250 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1251 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1252 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1253 * are sending directly to the destination interface that supports it.
1254 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301255 if (likely(dest_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(dest_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301256 if ((msg->conn_rule.return_top_interface_num == msg->conn_rule.return_interface_num) ||
1257 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE)) {
1258 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
1259 }
1260 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001261
Wayne Tanbb7f1782021-12-13 11:16:04 -08001262 reply_cm->l2_hdr_size = 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301263 reply_cm->flags = 0;
1264
1265 /*
1266 * Adding PPPoE parameters to original and reply entries based on the direction where
1267 * PPPoE header is valid in ECM rule.
1268 *
1269 * If PPPoE is valid in flow direction (from interface is PPPoE), then
1270 * original cm will have PPPoE at ingress (strip PPPoE header)
1271 * reply cm will have PPPoE at egress (add PPPoE header)
1272 *
1273 * If PPPoE is valid in return direction (to interface is PPPoE), then
1274 * original cm will have PPPoE at egress (add PPPoE header)
1275 * reply cm will have PPPoE at ingress (strip PPPoE header)
1276 */
1277 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_DECAP_VALID) {
1278 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1279 original_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1280 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1281
1282 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001283 reply_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301284 reply_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1285 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1286 }
1287
1288 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_ENCAP_VALID) {
1289 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001290 original_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301291 original_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1292 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1293
1294 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1295 reply_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1296 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1297 }
1298
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301299 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1300 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1301 }
1302
Xiaoping Fan978b3772015-05-27 14:15:18 -07001303 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001304 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301305 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001306 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301307 if (sfe_ipv6_xmit_eth_type_check(dest_dev, original_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301308
1309 /*
1310 * Check whether the rule has configured a specific source MAC address to use.
1311 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1312 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301313 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1314 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->conn_rule.flow_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301315 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301316 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1317 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_RETURN_VALID)) {
1318 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.return_src_mac);
1319 } else {
1320 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)dest_dev->dev_addr);
1321 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301322 }
1323 ether_addr_copy((u8 *)original_cm->xmit_dest_mac, (u8 *)msg->conn_rule.return_mac);
1324
Xiaoping Fan978b3772015-05-27 14:15:18 -07001325 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001326 original_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001327
1328 /*
1329 * If our dev writes Ethernet headers then we can write a really fast
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301330 * version
Xiaoping Fan978b3772015-05-27 14:15:18 -07001331 */
1332 if (dest_dev->header_ops) {
1333 if (dest_dev->header_ops->create == eth_header) {
1334 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1335 }
1336 }
1337 }
1338
1339 /*
1340 * Fill in the "reply" direction connection matching object.
1341 */
1342 reply_cm->match_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301343 reply_cm->match_protocol = tuple->protocol;
1344 reply_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301345 reply_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1346 reply_cm->match_dest_port = tuple->flow_ident;
1347 reply_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1348 reply_cm->xlate_src_port = tuple->return_ident;
1349 reply_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1350 reply_cm->xlate_dest_port = tuple->flow_ident;
1351
Suruchi Suman23a279d2021-11-16 15:13:09 +05301352 /*
1353 * Keep source port as 0 for VxLAN tunnels.
1354 */
1355 if (netif_is_vxlan(src_dev) || netif_is_vxlan(dest_dev)) {
1356 reply_cm->match_src_port = 0;
1357 } else {
1358 reply_cm->match_src_port = tuple->return_ident;
1359 }
1360
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301361 atomic_set(&original_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001362 reply_cm->rx_packet_count64 = 0;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301363 atomic_set(&reply_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001364 reply_cm->rx_byte_count64 = 0;
1365 reply_cm->xmit_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301366 reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301367
Xiaoping Fan978b3772015-05-27 14:15:18 -07001368 reply_cm->connection = c;
1369 reply_cm->counter_match = original_cm;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301370
Ken Zhu37040ea2021-09-09 21:11:15 -07001371 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1372 reply_cm->mark = msg->mark_rule.return_mark;
1373 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1374 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301375 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1376 reply_cm->priority = msg->qos_rule.return_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001377 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1378 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301379 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1380 reply_cm->dscp = msg->dscp_rule.return_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001381 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1382 }
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301383
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301384 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1385 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1386 }
1387
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301388 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv6_is_local_ip(si, (uint8_t *)reply_cm->match_dest_ip)) {
1389 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1390 }
1391
Suruchi Suman23a279d2021-11-16 15:13:09 +05301392 /*
1393 * Setup UDP Socket if found to be valid for decap.
1394 */
1395 RCU_INIT_POINTER(reply_cm->up, NULL);
1396 net = dev_net(reply_cm->match_dev);
1397 src_if_idx = src_dev->ifindex;
1398
1399 rcu_read_lock();
1400
1401 /*
1402 * Look for the associated sock object.
1403 * __udp6_lib_lookup() holds a reference for this sock object,
1404 * which will be released in sfe_ipv6_flush_connection()
1405 */
1406#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1407 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1408 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1409 reply_cm->xlate_src_port, src_if_idx, &udp_table);
1410#else
1411 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1412 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1413 reply_cm->xlate_src_port, src_if_idx, 0, &udp_table, NULL);
1414#endif
1415 rcu_read_unlock();
1416
1417 /*
1418 * We set the UDP sock pointer as valid only for decap direction.
1419 */
1420 if (sk && udp_sk(sk)->encap_type) {
1421#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1422 if (!atomic_add_unless(&sk->sk_refcnt, 1, 0)) {
1423#else
1424 if (!refcount_inc_not_zero(&sk->sk_refcnt)) {
1425#endif
Wayne Tanbb7f1782021-12-13 11:16:04 -08001426 spin_unlock_bh(&si->lock);
Suruchi Suman23a279d2021-11-16 15:13:09 +05301427 kfree(reply_cm);
1428 kfree(original_cm);
1429 kfree(c);
1430
1431 DEBUG_INFO("sfe: unable to take reference for socket p:%d\n", tuple->protocol);
1432 DEBUG_INFO("SK: connection - \n"
1433 " s: %s:%pI6(%pI6):%u(%u)\n"
1434 " d: %s:%pI6(%pI6):%u(%u)\n",
1435 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1436 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1437 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1438 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1439
1440 dev_put(src_dev);
1441 dev_put(dest_dev);
1442
1443 return -ESHUTDOWN;
1444 }
1445
1446 rcu_assign_pointer(reply_cm->up, udp_sk(sk));
1447 DEBUG_INFO("Sock lookup success with reply_cm direction(%p)\n", sk);
1448 DEBUG_INFO("SK: connection - \n"
1449 " s: %s:%pI6(%pI6):%u(%u)\n"
1450 " d: %s:%pI6(%pI6):%u(%u)\n",
1451 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1452 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1453 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1454 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1455 }
1456
Wayne Tanbb7f1782021-12-13 11:16:04 -08001457 /*
1458 * Add VLAN rule to reply_cm
1459 */
1460 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1461 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1462 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1463 sfe_ipv6_match_entry_set_vlan(reply_cm,
1464 vlan_primary_rule->egress_vlan_tag,
1465 vlan_primary_rule->ingress_vlan_tag,
1466 vlan_secondary_rule->egress_vlan_tag,
1467 vlan_secondary_rule->ingress_vlan_tag);
1468
1469 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) &&
1470 reply_cm->egress_vlan_hdr_cnt > 0) {
1471 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1472 reply_cm->l2_hdr_size += reply_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1473 }
1474 }
1475
Xiaoping Fan978b3772015-05-27 14:15:18 -07001476#ifdef CONFIG_NF_FLOW_COOKIE
1477 reply_cm->flow_cookie = 0;
1478#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001479#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301480 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1481 reply_cm->flow_accel = msg->direction_rule.return_accel;
1482 } else {
1483 reply_cm->flow_accel = 1;
1484 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001485#endif
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301486
1487 /*
1488 * the inet6_protocol handler will be used only in decap path
1489 * for non passthrough case.
1490 */
1491 original_cm->proto = NULL;
1492 reply_cm->proto = NULL;
1493
1494#ifdef SFE_GRE_TUN_ENABLE
1495 if (!(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
1496 rcu_read_lock();
1497 reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
1498 rcu_read_unlock();
1499
1500 if (unlikely(!reply_cm->proto)) {
1501 kfree(reply_cm);
1502 kfree(original_cm);
1503 kfree(c);
1504 dev_put(src_dev);
1505 dev_put(dest_dev);
1506 DEBUG_WARN("sfe: GRE proto handler is not registered\n");
1507 return -EPERM;
1508 }
1509 }
1510#endif
1511
Xiaoping Fan978b3772015-05-27 14:15:18 -07001512 /*
Tian Yangafb03452022-01-13 18:53:13 -08001513 * Decapsulation path have proto set.
1514 * This is used to differentiate de/encap, and call protocol specific handler.
1515 */
1516 if (IPPROTO_IPIP == tuple->protocol) {
1517 original_cm->proto = NULL;
1518 rcu_read_lock();
1519 reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
1520 rcu_read_unlock();
1521 }
1522 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301523 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1524 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1525 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1526 * are sending directly to the destination interface that supports it.
1527 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301528 if (likely(src_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(src_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301529 if ((msg->conn_rule.flow_top_interface_num == msg->conn_rule.flow_interface_num) ||
1530 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE)) {
1531 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
1532 }
1533 }
1534
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301535 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1536 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1537 }
1538
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301539 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001540 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301541 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001542 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301543 if (sfe_ipv6_xmit_eth_type_check(src_dev, reply_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301544
1545 /*
1546 * Check whether the rule has configured a specific source MAC address to use.
1547 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1548 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301549 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1550 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->conn_rule.return_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301551 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301552 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1553 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_FLOW_VALID)) {
1554 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.flow_src_mac);
1555 } else {
1556 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)src_dev->dev_addr);
1557 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301558 }
1559
1560 ether_addr_copy((u8 *)reply_cm->xmit_dest_mac, (u8 *)msg->conn_rule.flow_mac);
1561
Xiaoping Fan978b3772015-05-27 14:15:18 -07001562 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001563 reply_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001564
1565 /*
1566 * If our dev writes Ethernet headers then we can write a really fast
1567 * version.
1568 */
1569 if (src_dev->header_ops) {
1570 if (src_dev->header_ops->create == eth_header) {
1571 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1572 }
1573 }
1574 }
1575
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301576 /*
1577 * No support for NAT in ipv6
1578 */
Xiaoping Fan978b3772015-05-27 14:15:18 -07001579
Xiaoping Fan978b3772015-05-27 14:15:18 -07001580 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001581 * Initialize the protocol-specific information that we track.
1582 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301583 switch (tuple->protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001584 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301585 original_cm->protocol_state.tcp.win_scale = msg->tcp_rule.flow_window_scale;
1586 original_cm->protocol_state.tcp.max_win = msg->tcp_rule.flow_max_window ? msg->tcp_rule.flow_max_window : 1;
1587 original_cm->protocol_state.tcp.end = msg->tcp_rule.flow_end;
1588 original_cm->protocol_state.tcp.max_end = msg->tcp_rule.flow_max_end;
1589 reply_cm->protocol_state.tcp.win_scale = msg->tcp_rule.return_window_scale;
1590 reply_cm->protocol_state.tcp.max_win = msg->tcp_rule.return_max_window ? msg->tcp_rule.return_max_window : 1;
1591 reply_cm->protocol_state.tcp.end = msg->tcp_rule.return_end;
1592 reply_cm->protocol_state.tcp.max_end = msg->tcp_rule.return_max_end;
1593 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001594 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1595 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1596 }
1597 break;
1598 }
1599
Wayne Tanbb7f1782021-12-13 11:16:04 -08001600 /*
1601 * Fill in the ipv6_connection object.
1602 */
1603 c->protocol = tuple->protocol;
1604 c->src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1605 c->src_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1606 c->src_port = tuple->flow_ident;
1607 c->src_port_xlate = tuple->flow_ident;
1608 c->original_dev = src_dev;
1609 c->original_match = original_cm;
1610
1611 c->dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1612 c->dest_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1613 c->dest_port = tuple->return_ident;
1614 c->dest_port_xlate = tuple->return_ident;
1615
1616 c->reply_dev = dest_dev;
1617 c->reply_match = reply_cm;
1618 c->debug_read_seq = 0;
1619 c->last_sync_jiffies = get_jiffies_64();
1620 c->removed = false;
1621
Xiaoping Fan978b3772015-05-27 14:15:18 -07001622 sfe_ipv6_connection_match_compute_translations(original_cm);
1623 sfe_ipv6_connection_match_compute_translations(reply_cm);
1624 sfe_ipv6_insert_connection(si, c);
1625
1626 spin_unlock_bh(&si->lock);
1627
1628 /*
1629 * We have everything we need!
1630 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301631 DEBUG_INFO("new connection - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001632 " s: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n"
1633 " d: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301634 tuple->protocol,
1635 src_dev->name, msg->conn_rule.flow_mac, NULL,
1636 (void *)tuple->flow_ip, (void *)tuple->flow_ip, ntohs(tuple->flow_ident), ntohs(tuple->flow_ident),
1637 dest_dev->name, NULL, msg->conn_rule.return_mac,
1638 (void *)tuple->return_ip, (void *)tuple->return_ip, ntohs(tuple->return_ident), ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001639
1640 return 0;
1641}
1642
1643/*
1644 * sfe_ipv6_destroy_rule()
1645 * Destroy a forwarding rule.
1646 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301647void sfe_ipv6_destroy_rule(struct sfe_ipv6_rule_destroy_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001648{
1649 struct sfe_ipv6 *si = &__si6;
1650 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301651 bool ret;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301652 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001653
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301654 this_cpu_inc(si->stats_pcpu->connection_destroy_requests64);
1655
Xiaoping Fan978b3772015-05-27 14:15:18 -07001656 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001657
1658 /*
1659 * Check to see if we have a flow that matches the rule we're trying
1660 * to destroy. If there isn't then we can't destroy it.
1661 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301662 c = sfe_ipv6_find_connection(si, tuple->protocol, (struct sfe_ipv6_addr *)tuple->flow_ip, tuple->flow_ident,
1663 (struct sfe_ipv6_addr *)tuple->return_ip, tuple->return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001664 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001665 spin_unlock_bh(&si->lock);
1666
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301667 this_cpu_inc(si->stats_pcpu->connection_destroy_misses64);
1668
Xiaoping Fan978b3772015-05-27 14:15:18 -07001669 DEBUG_TRACE("connection does not exist - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301670 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1671 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001672 return;
1673 }
1674
1675 /*
1676 * Remove our connection details from the hash tables.
1677 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301678 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001679 spin_unlock_bh(&si->lock);
1680
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301681 if (ret) {
1682 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1683 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001684
1685 DEBUG_INFO("connection destroyed - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301686 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1687 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001688}
1689
1690/*
1691 * sfe_ipv6_register_sync_rule_callback()
1692 * Register a callback for rule synchronization.
1693 */
1694void sfe_ipv6_register_sync_rule_callback(sfe_sync_rule_callback_t sync_rule_callback)
1695{
1696 struct sfe_ipv6 *si = &__si6;
1697
1698 spin_lock_bh(&si->lock);
1699 rcu_assign_pointer(si->sync_rule_callback, sync_rule_callback);
1700 spin_unlock_bh(&si->lock);
1701}
1702
1703/*
1704 * sfe_ipv6_get_debug_dev()
1705 */
1706static ssize_t sfe_ipv6_get_debug_dev(struct device *dev,
1707 struct device_attribute *attr,
1708 char *buf)
1709{
1710 struct sfe_ipv6 *si = &__si6;
1711 ssize_t count;
1712 int num;
1713
1714 spin_lock_bh(&si->lock);
1715 num = si->debug_dev;
1716 spin_unlock_bh(&si->lock);
1717
1718 count = snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", num);
1719 return count;
1720}
1721
1722/*
1723 * sfe_ipv6_destroy_all_rules_for_dev()
1724 * Destroy all connections that match a particular device.
1725 *
1726 * If we pass dev as NULL then this destroys all connections.
1727 */
1728void sfe_ipv6_destroy_all_rules_for_dev(struct net_device *dev)
1729{
1730 struct sfe_ipv6 *si = &__si6;
1731 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301732 bool ret;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001733
Xiaoping Fan34586472015-07-03 02:20:35 -07001734another_round:
Xiaoping Fan978b3772015-05-27 14:15:18 -07001735 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001736
Xiaoping Fan34586472015-07-03 02:20:35 -07001737 for (c = si->all_connections_head; c; c = c->all_connections_next) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001738 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07001739 * Does this connection relate to the device we are destroying?
Xiaoping Fan978b3772015-05-27 14:15:18 -07001740 */
1741 if (!dev
1742 || (dev == c->original_dev)
1743 || (dev == c->reply_dev)) {
Xiaoping Fan34586472015-07-03 02:20:35 -07001744 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001745 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001746 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001747
Xiaoping Fan34586472015-07-03 02:20:35 -07001748 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301749 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001750 }
1751
1752 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001753
1754 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301755 if (ret) {
1756 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1757 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001758 goto another_round;
1759 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001760}
1761
1762/*
1763 * sfe_ipv6_periodic_sync()
1764 */
Ken Zhu137722d2021-09-23 17:57:36 -07001765static void sfe_ipv6_periodic_sync(struct work_struct *work)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001766{
Ken Zhu137722d2021-09-23 17:57:36 -07001767 struct sfe_ipv6 *si = container_of((struct delayed_work *)work, struct sfe_ipv6, sync_dwork);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001768 u64 now_jiffies;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001769 int quota;
1770 sfe_sync_rule_callback_t sync_rule_callback;
Ken Zhu32b95392021-09-03 13:52:04 -07001771 struct sfe_ipv6_connection *c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001772
1773 now_jiffies = get_jiffies_64();
1774
1775 rcu_read_lock();
1776 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
1777 if (!sync_rule_callback) {
1778 rcu_read_unlock();
1779 goto done;
1780 }
1781
1782 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001783
1784 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001785 * If we have reached the end of the connection list, walk from
1786 * the connection head.
1787 */
1788 c = si->wc_next;
1789 if (unlikely(!c)) {
1790 c = si->all_connections_head;
1791 }
1792 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001793 * Get an estimate of the number of connections to parse in this sync.
1794 */
1795 quota = (si->num_connections + 63) / 64;
1796
1797 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001798 * Walk the "all connection" list and sync the connection state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001799 */
Ken Zhu32b95392021-09-03 13:52:04 -07001800 while (likely(c && quota)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001801 struct sfe_ipv6_connection_match *cm;
1802 struct sfe_ipv6_connection_match *counter_cm;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001803 struct sfe_connection_sync sis;
1804
Ken Zhu32b95392021-09-03 13:52:04 -07001805 cm = c->original_match;
1806 counter_cm = c->reply_match;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001807
1808 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001809 * Didn't receive packets in the origial direction or reply
1810 * direction, move to the next connection.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001811 */
Ken Zhu32b95392021-09-03 13:52:04 -07001812 if (!atomic_read(&cm->rx_packet_count) && !atomic_read(&counter_cm->rx_packet_count)) {
1813 c = c->all_connections_next;
1814 continue;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001815 }
1816
Ken Zhu32b95392021-09-03 13:52:04 -07001817 quota--;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001818
1819 /*
1820 * Sync the connection state.
1821 */
Xiaoping Fan99cb4c12015-08-21 19:07:32 -07001822 sfe_ipv6_gen_sync_connection(si, c, &sis, SFE_SYNC_REASON_STATS, now_jiffies);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001823
Ken Zhu32b95392021-09-03 13:52:04 -07001824 si->wc_next = c->all_connections_next;
1825
Xiaoping Fan978b3772015-05-27 14:15:18 -07001826 spin_unlock_bh(&si->lock);
1827 sync_rule_callback(&sis);
1828 spin_lock_bh(&si->lock);
Ken Zhu32b95392021-09-03 13:52:04 -07001829
1830 /*
1831 * c must be set and used in the same lock/unlock window;
1832 * because c could be removed when we don't hold the lock,
1833 * so delay grabbing until after the callback and relock.
1834 */
1835 c = si->wc_next;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001836 }
1837
Ken Zhu32b95392021-09-03 13:52:04 -07001838 /*
1839 * At the end of loop, put wc_next to the connection we left
1840 */
1841 si->wc_next = c;
1842
Xiaoping Fan978b3772015-05-27 14:15:18 -07001843 spin_unlock_bh(&si->lock);
1844 rcu_read_unlock();
1845
1846done:
Ken Zhu137722d2021-09-23 17:57:36 -07001847 schedule_delayed_work_on(si->work_cpu, (struct delayed_work *)work, ((HZ + 99) / 100));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001848}
1849
1850/*
1851 * sfe_ipv6_debug_dev_read_start()
1852 * Generate part of the XML output.
1853 */
1854static bool sfe_ipv6_debug_dev_read_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1855 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1856{
1857 int bytes_read;
1858
Xiaoping Fan34586472015-07-03 02:20:35 -07001859 si->debug_read_seq++;
1860
Xiaoping Fan978b3772015-05-27 14:15:18 -07001861 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "<sfe_ipv6>\n");
1862 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1863 return false;
1864 }
1865
1866 *length -= bytes_read;
1867 *total_read += bytes_read;
1868
1869 ws->state++;
1870 return true;
1871}
1872
1873/*
1874 * sfe_ipv6_debug_dev_read_connections_start()
1875 * Generate part of the XML output.
1876 */
1877static bool sfe_ipv6_debug_dev_read_connections_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1878 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1879{
1880 int bytes_read;
1881
1882 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<connections>\n");
1883 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1884 return false;
1885 }
1886
1887 *length -= bytes_read;
1888 *total_read += bytes_read;
1889
1890 ws->state++;
1891 return true;
1892}
1893
1894/*
1895 * sfe_ipv6_debug_dev_read_connections_connection()
1896 * Generate part of the XML output.
1897 */
1898static bool sfe_ipv6_debug_dev_read_connections_connection(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1899 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1900{
1901 struct sfe_ipv6_connection *c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001902 struct sfe_ipv6_connection_match *original_cm;
1903 struct sfe_ipv6_connection_match *reply_cm;
1904 int bytes_read;
1905 int protocol;
1906 struct net_device *src_dev;
1907 struct sfe_ipv6_addr src_ip;
1908 struct sfe_ipv6_addr src_ip_xlate;
1909 __be16 src_port;
1910 __be16 src_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001911 u64 src_rx_packets;
1912 u64 src_rx_bytes;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001913 struct net_device *dest_dev;
1914 struct sfe_ipv6_addr dest_ip;
1915 struct sfe_ipv6_addr dest_ip_xlate;
1916 __be16 dest_port;
1917 __be16 dest_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001918 u64 dest_rx_packets;
1919 u64 dest_rx_bytes;
1920 u64 last_sync_jiffies;
Ken Zhu37040ea2021-09-09 21:11:15 -07001921 u32 src_mark, dest_mark, src_priority, dest_priority, src_dscp, dest_dscp;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301922 u32 packet, byte, original_cm_flags;
1923 u16 pppoe_session_id;
1924 u8 pppoe_remote_mac[ETH_ALEN];
Xiaoping Fan978b3772015-05-27 14:15:18 -07001925#ifdef CONFIG_NF_FLOW_COOKIE
1926 int src_flow_cookie, dst_flow_cookie;
1927#endif
1928
1929 spin_lock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001930
1931 for (c = si->all_connections_head; c; c = c->all_connections_next) {
1932 if (c->debug_read_seq < si->debug_read_seq) {
1933 c->debug_read_seq = si->debug_read_seq;
1934 break;
1935 }
1936 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001937
1938 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07001939 * If there were no connections then move to the next state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001940 */
1941 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001942 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001943 ws->state++;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001944 return true;
1945 }
1946
1947 original_cm = c->original_match;
1948 reply_cm = c->reply_match;
1949
1950 protocol = c->protocol;
1951 src_dev = c->original_dev;
1952 src_ip = c->src_ip[0];
1953 src_ip_xlate = c->src_ip_xlate[0];
1954 src_port = c->src_port;
1955 src_port_xlate = c->src_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001956 src_priority = original_cm->priority;
1957 src_dscp = original_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001958
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301959 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet, &byte);
1960 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet, &byte);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001961
1962 src_rx_packets = original_cm->rx_packet_count64;
1963 src_rx_bytes = original_cm->rx_byte_count64;
Ken Zhu37040ea2021-09-09 21:11:15 -07001964 src_mark = original_cm->mark;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001965 dest_dev = c->reply_dev;
1966 dest_ip = c->dest_ip[0];
1967 dest_ip_xlate = c->dest_ip_xlate[0];
1968 dest_port = c->dest_port;
1969 dest_port_xlate = c->dest_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001970 dest_priority = reply_cm->priority;
1971 dest_dscp = reply_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001972 dest_rx_packets = reply_cm->rx_packet_count64;
1973 dest_rx_bytes = reply_cm->rx_byte_count64;
1974 last_sync_jiffies = get_jiffies_64() - c->last_sync_jiffies;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301975 original_cm_flags = original_cm->flags;
1976 pppoe_session_id = original_cm->pppoe_session_id;
1977 ether_addr_copy(pppoe_remote_mac, original_cm->pppoe_remote_mac);
Ken Zhu37040ea2021-09-09 21:11:15 -07001978 dest_mark = reply_cm->mark;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001979#ifdef CONFIG_NF_FLOW_COOKIE
1980 src_flow_cookie = original_cm->flow_cookie;
1981 dst_flow_cookie = reply_cm->flow_cookie;
1982#endif
1983 spin_unlock_bh(&si->lock);
1984
1985 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t\t<connection "
1986 "protocol=\"%u\" "
1987 "src_dev=\"%s\" "
1988 "src_ip=\"%pI6\" src_ip_xlate=\"%pI6\" "
1989 "src_port=\"%u\" src_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07001990 "src_priority=\"%u\" src_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07001991 "src_rx_pkts=\"%llu\" src_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07001992 "src_mark=\"%08x\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07001993 "dest_dev=\"%s\" "
1994 "dest_ip=\"%pI6\" dest_ip_xlate=\"%pI6\" "
1995 "dest_port=\"%u\" dest_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07001996 "dest_priority=\"%u\" dest_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07001997 "dest_rx_pkts=\"%llu\" dest_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07001998 "dest_mark=\"%08x\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07001999#ifdef CONFIG_NF_FLOW_COOKIE
2000 "src_flow_cookie=\"%d\" dst_flow_cookie=\"%d\" "
2001#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002002 "last_sync=\"%llu\" ",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002003 protocol,
2004 src_dev->name,
2005 &src_ip, &src_ip_xlate,
2006 ntohs(src_port), ntohs(src_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002007 src_priority, src_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002008 src_rx_packets, src_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002009 src_mark,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002010 dest_dev->name,
2011 &dest_ip, &dest_ip_xlate,
2012 ntohs(dest_port), ntohs(dest_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002013 dest_priority, dest_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002014 dest_rx_packets, dest_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002015 dest_mark,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002016#ifdef CONFIG_NF_FLOW_COOKIE
2017 src_flow_cookie, dst_flow_cookie,
2018#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002019 last_sync_jiffies);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002020
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302021 if (original_cm_flags &= (SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP | SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05302022 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "pppoe_session_id=\"%u\" pppoe_server_MAC=\"%pM\" ",
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302023 pppoe_session_id, pppoe_remote_mac);
2024 }
2025
2026 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, ")/>\n");
2027
Xiaoping Fan978b3772015-05-27 14:15:18 -07002028 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2029 return false;
2030 }
2031
2032 *length -= bytes_read;
2033 *total_read += bytes_read;
2034
Xiaoping Fan978b3772015-05-27 14:15:18 -07002035 return true;
2036}
2037
2038/*
2039 * sfe_ipv6_debug_dev_read_connections_end()
2040 * Generate part of the XML output.
2041 */
2042static bool sfe_ipv6_debug_dev_read_connections_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2043 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2044{
2045 int bytes_read;
2046
2047 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</connections>\n");
2048 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2049 return false;
2050 }
2051
2052 *length -= bytes_read;
2053 *total_read += bytes_read;
2054
2055 ws->state++;
2056 return true;
2057}
2058
2059/*
2060 * sfe_ipv6_debug_dev_read_exceptions_start()
2061 * Generate part of the XML output.
2062 */
2063static bool sfe_ipv6_debug_dev_read_exceptions_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2064 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2065{
2066 int bytes_read;
2067
2068 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<exceptions>\n");
2069 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2070 return false;
2071 }
2072
2073 *length -= bytes_read;
2074 *total_read += bytes_read;
2075
2076 ws->state++;
2077 return true;
2078}
2079
2080/*
2081 * sfe_ipv6_debug_dev_read_exceptions_exception()
2082 * Generate part of the XML output.
2083 */
2084static bool sfe_ipv6_debug_dev_read_exceptions_exception(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2085 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2086{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302087 int i;
2088 u64 val = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002089
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302090 for_each_possible_cpu(i) {
2091 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
2092 val += s->exception_events64[ws->iter_exception];
2093 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002094
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302095 if (val) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002096 int bytes_read;
2097
2098 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE,
2099 "\t\t<exception name=\"%s\" count=\"%llu\" />\n",
2100 sfe_ipv6_exception_events_string[ws->iter_exception],
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302101 val);
2102
Xiaoping Fan978b3772015-05-27 14:15:18 -07002103 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2104 return false;
2105 }
2106
2107 *length -= bytes_read;
2108 *total_read += bytes_read;
2109 }
2110
2111 ws->iter_exception++;
2112 if (ws->iter_exception >= SFE_IPV6_EXCEPTION_EVENT_LAST) {
2113 ws->iter_exception = 0;
2114 ws->state++;
2115 }
2116
2117 return true;
2118}
2119
2120/*
2121 * sfe_ipv6_debug_dev_read_exceptions_end()
2122 * Generate part of the XML output.
2123 */
2124static bool sfe_ipv6_debug_dev_read_exceptions_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2125 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2126{
2127 int bytes_read;
2128
2129 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</exceptions>\n");
2130 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2131 return false;
2132 }
2133
2134 *length -= bytes_read;
2135 *total_read += bytes_read;
2136
2137 ws->state++;
2138 return true;
2139}
2140
2141/*
2142 * sfe_ipv6_debug_dev_read_stats()
2143 * Generate part of the XML output.
2144 */
2145static bool sfe_ipv6_debug_dev_read_stats(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2146 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2147{
2148 int bytes_read;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302149 struct sfe_ipv6_stats stats;
2150 unsigned int num_conn;
2151
2152 sfe_ipv6_update_summary_stats(si, &stats);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002153
2154 spin_lock_bh(&si->lock);
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302155 num_conn = si->num_connections;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002156 spin_unlock_bh(&si->lock);
2157
2158 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<stats "
2159 "num_connections=\"%u\" "
Suruchi Suman23a279d2021-11-16 15:13:09 +05302160 "pkts_dropped=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002161 "pkts_forwarded=\"%llu\" pkts_not_forwarded=\"%llu\" "
2162 "create_requests=\"%llu\" create_collisions=\"%llu\" "
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302163 "create_failures=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002164 "destroy_requests=\"%llu\" destroy_misses=\"%llu\" "
2165 "flushes=\"%llu\" "
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302166 "hash_hits=\"%llu\" hash_reorders=\"%llu\" "
2167 "pppoe_encap_pkts_fwded=\"%llu\" "
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302168 "pppoe_decap_pkts_fwded=\"%llu\" "
2169 "pppoe_bridge_pkts_fwded=\"%llu\" />\n",
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302170
2171 num_conn,
Suruchi Suman23a279d2021-11-16 15:13:09 +05302172 stats.packets_dropped64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302173 stats.packets_forwarded64,
2174 stats.packets_not_forwarded64,
2175 stats.connection_create_requests64,
2176 stats.connection_create_collisions64,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302177 stats.connection_create_failures64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302178 stats.connection_destroy_requests64,
2179 stats.connection_destroy_misses64,
2180 stats.connection_flushes64,
2181 stats.connection_match_hash_hits64,
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302182 stats.connection_match_hash_reorders64,
2183 stats.pppoe_encap_packets_forwarded64,
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302184 stats.pppoe_decap_packets_forwarded64,
2185 stats.pppoe_bridge_packets_forwarded64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002186 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2187 return false;
2188 }
2189
2190 *length -= bytes_read;
2191 *total_read += bytes_read;
2192
2193 ws->state++;
2194 return true;
2195}
2196
2197/*
2198 * sfe_ipv6_debug_dev_read_end()
2199 * Generate part of the XML output.
2200 */
2201static bool sfe_ipv6_debug_dev_read_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2202 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2203{
2204 int bytes_read;
2205
2206 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "</sfe_ipv6>\n");
2207 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2208 return false;
2209 }
2210
2211 *length -= bytes_read;
2212 *total_read += bytes_read;
2213
2214 ws->state++;
2215 return true;
2216}
2217
2218/*
2219 * Array of write functions that write various XML elements that correspond to
2220 * our XML output state machine.
2221 */
2222static sfe_ipv6_debug_xml_write_method_t sfe_ipv6_debug_xml_write_methods[SFE_IPV6_DEBUG_XML_STATE_DONE] = {
2223 sfe_ipv6_debug_dev_read_start,
2224 sfe_ipv6_debug_dev_read_connections_start,
2225 sfe_ipv6_debug_dev_read_connections_connection,
2226 sfe_ipv6_debug_dev_read_connections_end,
2227 sfe_ipv6_debug_dev_read_exceptions_start,
2228 sfe_ipv6_debug_dev_read_exceptions_exception,
2229 sfe_ipv6_debug_dev_read_exceptions_end,
2230 sfe_ipv6_debug_dev_read_stats,
2231 sfe_ipv6_debug_dev_read_end,
2232};
2233
2234/*
2235 * sfe_ipv6_debug_dev_read()
2236 * Send info to userspace upon read request from user
2237 */
2238static ssize_t sfe_ipv6_debug_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
2239{
2240 char msg[CHAR_DEV_MSG_SIZE];
2241 int total_read = 0;
2242 struct sfe_ipv6_debug_xml_write_state *ws;
2243 struct sfe_ipv6 *si = &__si6;
2244
2245 ws = (struct sfe_ipv6_debug_xml_write_state *)filp->private_data;
2246 while ((ws->state != SFE_IPV6_DEBUG_XML_STATE_DONE) && (length > CHAR_DEV_MSG_SIZE)) {
2247 if ((sfe_ipv6_debug_xml_write_methods[ws->state])(si, buffer, msg, &length, &total_read, ws)) {
2248 continue;
2249 }
2250 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002251 return total_read;
2252}
2253
2254/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07002255 * sfe_ipv6_debug_dev_open()
2256 */
2257static int sfe_ipv6_debug_dev_open(struct inode *inode, struct file *file)
2258{
2259 struct sfe_ipv6_debug_xml_write_state *ws;
2260
2261 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
2262 if (ws) {
2263 return 0;
2264 }
2265
2266 ws = kzalloc(sizeof(struct sfe_ipv6_debug_xml_write_state), GFP_KERNEL);
2267 if (!ws) {
2268 return -ENOMEM;
2269 }
2270
2271 ws->state = SFE_IPV6_DEBUG_XML_STATE_START;
2272 file->private_data = ws;
2273
2274 return 0;
2275}
2276
2277/*
2278 * sfe_ipv6_debug_dev_release()
2279 */
2280static int sfe_ipv6_debug_dev_release(struct inode *inode, struct file *file)
2281{
2282 struct sfe_ipv6_debug_xml_write_state *ws;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002283
2284 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
Xiaoping Fan34586472015-07-03 02:20:35 -07002285 if (ws) {
2286 /*
2287 * We've finished with our output so free the write state.
2288 */
2289 kfree(ws);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302290 file->private_data = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002291 }
2292
Xiaoping Fan978b3772015-05-27 14:15:18 -07002293 return 0;
2294}
2295
2296/*
2297 * File operations used in the debug char device
2298 */
2299static struct file_operations sfe_ipv6_debug_dev_fops = {
2300 .read = sfe_ipv6_debug_dev_read,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002301 .open = sfe_ipv6_debug_dev_open,
2302 .release = sfe_ipv6_debug_dev_release
2303};
2304
2305#ifdef CONFIG_NF_FLOW_COOKIE
2306/*
2307 * sfe_ipv6_register_flow_cookie_cb
2308 * register a function in SFE to let SFE use this function to configure flow cookie for a flow
2309 *
2310 * Hardware driver which support flow cookie should register a callback function in SFE. Then SFE
2311 * can use this function to configure flow cookie for a flow.
2312 * return: 0, success; !=0, fail
2313 */
2314int sfe_ipv6_register_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2315{
2316 struct sfe_ipv6 *si = &__si6;
2317
2318 BUG_ON(!cb);
2319
2320 if (si->flow_cookie_set_func) {
2321 return -1;
2322 }
2323
2324 rcu_assign_pointer(si->flow_cookie_set_func, cb);
2325 return 0;
2326}
2327
2328/*
2329 * sfe_ipv6_unregister_flow_cookie_cb
2330 * unregister function which is used to configure flow cookie for a flow
2331 *
2332 * return: 0, success; !=0, fail
2333 */
2334int sfe_ipv6_unregister_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2335{
2336 struct sfe_ipv6 *si = &__si6;
2337
2338 RCU_INIT_POINTER(si->flow_cookie_set_func, NULL);
2339 return 0;
2340}
Xiaoping Fan640faf42015-08-28 15:50:55 -07002341
2342/*
2343 * sfe_ipv6_get_flow_cookie()
2344 */
2345static ssize_t sfe_ipv6_get_flow_cookie(struct device *dev,
2346 struct device_attribute *attr,
2347 char *buf)
2348{
2349 struct sfe_ipv6 *si = &__si6;
Xiaoping Fan01c67cc2015-11-09 11:31:57 -08002350 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->flow_cookie_enable);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002351}
2352
2353/*
2354 * sfe_ipv6_set_flow_cookie()
2355 */
2356static ssize_t sfe_ipv6_set_flow_cookie(struct device *dev,
2357 struct device_attribute *attr,
2358 const char *buf, size_t size)
2359{
2360 struct sfe_ipv6 *si = &__si6;
Ken Zhu137722d2021-09-23 17:57:36 -07002361 si->flow_cookie_enable = strict_strtol(buf, NULL, 0);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002362
2363 return size;
2364}
2365
2366/*
2367 * sysfs attributes.
2368 */
2369static const struct device_attribute sfe_ipv6_flow_cookie_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -08002370 __ATTR(flow_cookie_enable, S_IWUSR | S_IRUGO, sfe_ipv6_get_flow_cookie, sfe_ipv6_set_flow_cookie);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002371#endif /*CONFIG_NF_FLOW_COOKIE*/
2372
Ken Zhu137722d2021-09-23 17:57:36 -07002373/*
2374 * sfe_ipv6_get_cpu()
2375 */
2376static ssize_t sfe_ipv6_get_cpu(struct device *dev,
2377 struct device_attribute *attr,
2378 char *buf)
2379{
2380 struct sfe_ipv6 *si = &__si6;
2381 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->work_cpu);
2382}
2383
2384/*
Wayne Tanbb7f1782021-12-13 11:16:04 -08002385 * sfe_ipv6_set_cpu()
Ken Zhu137722d2021-09-23 17:57:36 -07002386 */
2387static ssize_t sfe_ipv6_set_cpu(struct device *dev,
2388 struct device_attribute *attr,
2389 const char *buf, size_t size)
2390{
2391 struct sfe_ipv6 *si = &__si6;
2392 int work_cpu;
2393
2394 work_cpu = simple_strtol(buf, NULL, 0);
2395 if ((work_cpu >= 0) && (work_cpu <= NR_CPUS)) {
2396 si->work_cpu = work_cpu;
2397 } else {
2398 dev_err(dev, "%s is not in valid range[0,%d]", buf, NR_CPUS);
2399 }
2400
2401 return size;
2402}
2403/*
2404 * sysfs attributes.
2405 */
2406static const struct device_attribute sfe_ipv6_cpu_attr =
2407 __ATTR(stat_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv6_get_cpu, sfe_ipv6_set_cpu);
2408
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302409 /*
2410 * sfe_ipv6_hash_init()
2411 * Initialize conn match hash lists
2412 */
2413static void sfe_ipv6_conn_match_hash_init(struct sfe_ipv6 *si, int len)
2414{
2415 struct hlist_head *hash_list = si->hlist_conn_match_hash_head;
2416 int i;
2417
2418 for (i = 0; i < len; i++) {
2419 INIT_HLIST_HEAD(&hash_list[i]);
2420 }
2421}
2422
Suruchi Suman23a279d2021-11-16 15:13:09 +05302423#ifdef SFE_PROCESS_LOCAL_OUT
2424/*
2425 * sfe_ipv6_local_out()
2426 * Called for packets from ip_local_out() - post encapsulation & other packets
2427 */
2428static unsigned int sfe_ipv6_local_out(void *priv,
2429 struct sk_buff *skb,
2430 const struct nf_hook_state *nhs)
2431{
2432 DEBUG_TRACE("sfe: sfe_ipv6_local_out hook called.\n");
2433
2434 if (likely(skb->skb_iif)) {
2435 return sfe_ipv6_recv(skb->dev, skb, NULL, true) ? NF_STOLEN : NF_ACCEPT;
2436 }
2437
2438 return NF_ACCEPT;
2439}
2440
2441/*
2442 * struct nf_hook_ops sfe_ipv6_ops_local_out[]
2443 * Hooks into netfilter local out packet monitoring points.
2444 */
2445static struct nf_hook_ops sfe_ipv6_ops_local_out[] __read_mostly = {
2446
2447 /*
2448 * Local out routing hook is used to monitor packets.
2449 */
2450 {
2451 .hook = sfe_ipv6_local_out,
2452 .pf = PF_INET6,
2453 .hooknum = NF_INET_LOCAL_OUT,
2454 .priority = NF_IP6_PRI_FIRST,
2455 },
2456};
2457#endif
2458
Xiaoping Fan978b3772015-05-27 14:15:18 -07002459/*
2460 * sfe_ipv6_init()
2461 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302462int sfe_ipv6_init(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002463{
2464 struct sfe_ipv6 *si = &__si6;
2465 int result = -1;
2466
2467 DEBUG_INFO("SFE IPv6 init\n");
2468
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302469 sfe_ipv6_conn_match_hash_init(si, ARRAY_SIZE(si->hlist_conn_match_hash_head));
2470
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302471 si->stats_pcpu = alloc_percpu_gfp(struct sfe_ipv6_stats, GFP_KERNEL | __GFP_ZERO);
2472 if (!si->stats_pcpu) {
2473 DEBUG_ERROR("failed to allocate stats memory for sfe_ipv6\n");
2474 goto exit0;
2475 }
2476
Xiaoping Fan978b3772015-05-27 14:15:18 -07002477 /*
2478 * Create sys/sfe_ipv6
2479 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302480 si->sys_ipv6 = kobject_create_and_add("sfe_ipv6", NULL);
2481 if (!si->sys_ipv6) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002482 DEBUG_ERROR("failed to register sfe_ipv6\n");
2483 goto exit1;
2484 }
2485
2486 /*
2487 * Create files, one for each parameter supported by this module.
2488 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302489 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002490 if (result) {
2491 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
2492 goto exit2;
2493 }
2494
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302495 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002496 if (result) {
2497 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
2498 goto exit3;
2499 }
2500
Xiaoping Fan640faf42015-08-28 15:50:55 -07002501#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302502 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002503 if (result) {
2504 DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
Ken Zhu137722d2021-09-23 17:57:36 -07002505 goto exit4;
Xiaoping Fan640faf42015-08-28 15:50:55 -07002506 }
2507#endif /* CONFIG_NF_FLOW_COOKIE */
2508
Suruchi Suman23a279d2021-11-16 15:13:09 +05302509#ifdef SFE_PROCESS_LOCAL_OUT
2510#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2511 result = nf_register_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2512#else
2513 result = nf_register_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2514#endif
2515#endif
2516 if (result < 0) {
2517 DEBUG_ERROR("can't register nf local out hook: %d\n", result);
2518 goto exit5;
2519 } else {
2520 DEBUG_ERROR("Register nf local out hook success: %d\n", result);
2521 }
2522
Xiaoping Fan978b3772015-05-27 14:15:18 -07002523 /*
2524 * Register our debug char device.
2525 */
2526 result = register_chrdev(0, "sfe_ipv6", &sfe_ipv6_debug_dev_fops);
2527 if (result < 0) {
2528 DEBUG_ERROR("Failed to register chrdev: %d\n", result);
Suruchi Suman23a279d2021-11-16 15:13:09 +05302529 goto exit6;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002530 }
2531
2532 si->debug_dev = result;
Ken Zhu137722d2021-09-23 17:57:36 -07002533 si->work_cpu = WORK_CPU_UNBOUND;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002534
2535 /*
Ken Zhu137722d2021-09-23 17:57:36 -07002536 * Create work to handle periodic statistics.
Xiaoping Fan978b3772015-05-27 14:15:18 -07002537 */
Ken Zhu137722d2021-09-23 17:57:36 -07002538 INIT_DELAYED_WORK(&(si->sync_dwork), sfe_ipv6_periodic_sync);
2539 schedule_delayed_work_on(si->work_cpu, &(si->sync_dwork), ((HZ + 99) / 100));
Xiaoping Fan978b3772015-05-27 14:15:18 -07002540 spin_lock_init(&si->lock);
2541
2542 return 0;
2543
Suruchi Suman23a279d2021-11-16 15:13:09 +05302544exit6:
2545#ifdef SFE_PROCESS_LOCAL_OUT
2546#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2547 DEBUG_TRACE("sfe: Unregister local out hook\n");
2548 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2549#else
2550 DEBUG_TRACE("sfe: Unregister local out hook\n");
2551 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2552#endif
2553#endif
2554
Ken Zhu137722d2021-09-23 17:57:36 -07002555exit5:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002556#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302557 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002558
Ken Zhu137722d2021-09-23 17:57:36 -07002559exit4:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002560#endif /* CONFIG_NF_FLOW_COOKIE */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302561 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Suruchi Suman23a279d2021-11-16 15:13:09 +05302562
Ken Zhu137722d2021-09-23 17:57:36 -07002563exit3:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302564 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002565
2566exit2:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302567 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002568
2569exit1:
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302570 free_percpu(si->stats_pcpu);
2571
2572exit0:
Xiaoping Fan978b3772015-05-27 14:15:18 -07002573 return result;
2574}
2575
2576/*
2577 * sfe_ipv6_exit()
2578 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302579void sfe_ipv6_exit(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002580{
2581 struct sfe_ipv6 *si = &__si6;
2582
2583 DEBUG_INFO("SFE IPv6 exit\n");
2584
2585 /*
2586 * Destroy all connections.
2587 */
2588 sfe_ipv6_destroy_all_rules_for_dev(NULL);
2589
Ken Zhu137722d2021-09-23 17:57:36 -07002590 cancel_delayed_work(&si->sync_dwork);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002591
2592 unregister_chrdev(si->debug_dev, "sfe_ipv6");
2593
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302594 free_percpu(si->stats_pcpu);
2595
Suruchi Suman23a279d2021-11-16 15:13:09 +05302596#ifdef SFE_PROCESS_LOCAL_OUT
2597#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2598 DEBUG_TRACE("sfe: Unregister local out hook\n");
2599 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2600#else
2601 DEBUG_TRACE("sfe: Unregister local out hook\n");
2602 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2603#endif
2604#endif
2605
Xiaoping Fan640faf42015-08-28 15:50:55 -07002606#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302607 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002608#endif /* CONFIG_NF_FLOW_COOKIE */
Ken Zhu137722d2021-09-23 17:57:36 -07002609
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302610 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002611
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302612 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002613
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302614 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002615}
2616
Xiaoping Fan978b3772015-05-27 14:15:18 -07002617#ifdef CONFIG_NF_FLOW_COOKIE
2618EXPORT_SYMBOL(sfe_ipv6_register_flow_cookie_cb);
2619EXPORT_SYMBOL(sfe_ipv6_unregister_flow_cookie_cb);
2620#endif