blob: 0549bf41ac3a359d4343b2809783a7cd63a0f6f4 [file] [log] [blame]
Xiaoping Fan978b3772015-05-27 14:15:18 -07001/*
2 * sfe_ipv6.c
3 * Shortcut forwarding engine - IPv6 support.
4 *
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05305 * Copyright (c) 2015-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
Xiaoping Fana42c68b2015-08-07 18:00:39 -070012 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053017 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Xiaoping Fan978b3772015-05-27 14:15:18 -070019 */
20
21#include <linux/module.h>
22#include <linux/sysfs.h>
23#include <linux/skbuff.h>
24#include <linux/icmp.h>
25#include <net/tcp.h>
26#include <linux/etherdevice.h>
Tian Yang45f39c82020-10-06 14:07:47 -070027#include <linux/version.h>
Suruchi Suman23a279d2021-11-16 15:13:09 +053028#include <net/udp.h>
29#include <net/vxlan.h>
30#include <linux/refcount.h>
31#include <linux/netfilter.h>
32#include <linux/inetdevice.h>
33#include <linux/netfilter_ipv6.h>
Parikshit Guned31a8202022-01-05 22:15:04 +053034#include <linux/seqlock.h>
Tian Yangafb03452022-01-13 18:53:13 -080035#include <net/protocol.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053036#include <net/addrconf.h>
37#include <net/gre.h>
38
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053039#include "sfe_debug.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053040#include "sfe_api.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070041#include "sfe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053042#include "sfe_flow_cookie.h"
43#include "sfe_ipv6.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053044#include "sfe_ipv6_udp.h"
45#include "sfe_ipv6_tcp.h"
46#include "sfe_ipv6_icmp.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080047#include "sfe_pppoe.h"
Tian Yangafb03452022-01-13 18:53:13 -080048#include "sfe_ipv6_tunipip6.h"
Nitin Shettye6ed5b52021-12-27 14:50:11 +053049#include "sfe_ipv6_gre.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070050
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053051#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
52
Xiaoping Fan978b3772015-05-27 14:15:18 -070053static char *sfe_ipv6_exception_events_string[SFE_IPV6_EXCEPTION_EVENT_LAST] = {
54 "UDP_HEADER_INCOMPLETE",
55 "UDP_NO_CONNECTION",
56 "UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
57 "UDP_SMALL_TTL",
58 "UDP_NEEDS_FRAGMENTATION",
59 "TCP_HEADER_INCOMPLETE",
60 "TCP_NO_CONNECTION_SLOW_FLAGS",
61 "TCP_NO_CONNECTION_FAST_FLAGS",
62 "TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
63 "TCP_SMALL_TTL",
64 "TCP_NEEDS_FRAGMENTATION",
65 "TCP_FLAGS",
66 "TCP_SEQ_EXCEEDS_RIGHT_EDGE",
67 "TCP_SMALL_DATA_OFFS",
68 "TCP_BAD_SACK",
69 "TCP_BIG_DATA_OFFS",
70 "TCP_SEQ_BEFORE_LEFT_EDGE",
71 "TCP_ACK_EXCEEDS_RIGHT_EDGE",
72 "TCP_ACK_BEFORE_LEFT_EDGE",
73 "ICMP_HEADER_INCOMPLETE",
74 "ICMP_UNHANDLED_TYPE",
75 "ICMP_IPV6_HEADER_INCOMPLETE",
76 "ICMP_IPV6_NON_V6",
77 "ICMP_IPV6_IP_OPTIONS_INCOMPLETE",
78 "ICMP_IPV6_UDP_HEADER_INCOMPLETE",
79 "ICMP_IPV6_TCP_HEADER_INCOMPLETE",
80 "ICMP_IPV6_UNHANDLED_PROTOCOL",
81 "ICMP_NO_CONNECTION",
82 "ICMP_FLUSHED_CONNECTION",
83 "HEADER_INCOMPLETE",
84 "BAD_TOTAL_LENGTH",
85 "NON_V6",
86 "NON_INITIAL_FRAGMENT",
87 "DATAGRAM_INCOMPLETE",
88 "IP_OPTIONS_INCOMPLETE",
89 "UNHANDLED_PROTOCOL",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053090 "FLOW_COOKIE_ADD_FAIL",
Nitin Shetty16ab38d2022-02-09 01:26:19 +053091 "NO_HEADROOM",
92 "INVALID_PPPOE_SESSION",
93 "INCORRECT_PPPOE_PARSING",
94 "PPPOE_NOT_SET_IN_CME",
95 "INGRESS_VLAN_TAG_MISMATCH",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053096 "INVALID_SOURCE_INTERFACE",
Tian Yangafb03452022-01-13 18:53:13 -080097 "TUNIPIP6_HEADER_INCOMPLETE",
98 "TUNIPIP6_NO_CONNECTION",
99 "TUNIPIP6_IP_OPTIONS_OR_INITIAL_FRAGMENT",
100 "TUNIPIP6_SMALL_TTL",
101 "TUNIPIP6_NEEDS_FRAGMENTATION",
Nitin Shetty16ab38d2022-02-09 01:26:19 +0530102 "TUNIPIP6_SYNC_ON_FIND",
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530103 "GRE_HEADER_INCOMPLETE",
104 "GRE_NO_CONNECTION",
105 "GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
106 "GRE_SMALL_TTL",
107 "GRE_NEEDS_FRAGMENTATION"
Xiaoping Fan978b3772015-05-27 14:15:18 -0700108};
109
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700110static struct sfe_ipv6 __si6;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700111
112/*
113 * sfe_ipv6_get_debug_dev()
114 */
115static ssize_t sfe_ipv6_get_debug_dev(struct device *dev, struct device_attribute *attr, char *buf);
116
117/*
118 * sysfs attributes.
119 */
120static const struct device_attribute sfe_ipv6_debug_dev_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -0800121 __ATTR(debug_dev, S_IWUSR | S_IRUGO, sfe_ipv6_get_debug_dev, NULL);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700122
123/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700124 * sfe_ipv6_get_connection_match_hash()
125 * Generate the hash used in connection match lookups.
126 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700127static inline unsigned int sfe_ipv6_get_connection_match_hash(struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700128 struct sfe_ipv6_addr *src_ip, __be16 src_port,
129 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
130{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700131 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700132
133 for (idx = 0; idx < 4; idx++) {
134 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
135 }
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530136 hash = hash ^ protocol ^ ntohs(src_port ^ dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700137 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
138}
139
140/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530141 * sfe_ipv6_find_connection_match_rcu()
Xiaoping Fan978b3772015-05-27 14:15:18 -0700142 * Get the IPv6 flow match info that corresponds to a particular 5-tuple.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700143 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530144struct sfe_ipv6_connection_match *
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530145sfe_ipv6_find_connection_match_rcu(struct sfe_ipv6 *si, struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700146 struct sfe_ipv6_addr *src_ip, __be16 src_port,
147 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
148{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530149 struct sfe_ipv6_connection_match *cm = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700150 unsigned int conn_match_idx;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530151 struct hlist_head *lhead;
152 WARN_ON_ONCE(!rcu_read_lock_held());
Xiaoping Fan978b3772015-05-27 14:15:18 -0700153
154 conn_match_idx = sfe_ipv6_get_connection_match_hash(dev, protocol, src_ip, src_port, dest_ip, dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700155
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530156 lhead = &si->hlist_conn_match_hash_head[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700157
158 /*
159 * Hopefully the first entry is the one we want.
160 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530161 hlist_for_each_entry_rcu(cm, lhead, hnode) {
162 if ((cm->match_dest_port != dest_port) ||
163 (!sfe_ipv6_addr_equal(cm->match_src_ip, src_ip)) ||
164 (!sfe_ipv6_addr_equal(cm->match_dest_ip, dest_ip)) ||
165 (cm->match_protocol != protocol) ||
166 (cm->match_dev != dev)) {
167 continue;
168 }
169
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530170 this_cpu_inc(si->stats_pcpu->connection_match_hash_hits64);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700171
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530172 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700173
Xiaoping Fan978b3772015-05-27 14:15:18 -0700174 }
175
Xiaoping Fan978b3772015-05-27 14:15:18 -0700176 return cm;
177}
178
179/*
180 * sfe_ipv6_connection_match_update_summary_stats()
181 * Update the summary stats for a connection match entry.
182 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530183static inline void sfe_ipv6_connection_match_update_summary_stats(struct sfe_ipv6_connection_match *cm,
184 u32 *packets, u32 *bytes)
185
Xiaoping Fan978b3772015-05-27 14:15:18 -0700186{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530187 u32 packet_count, byte_count;
188
189 packet_count = atomic_read(&cm->rx_packet_count);
190 cm->rx_packet_count64 += packet_count;
191 atomic_sub(packet_count, &cm->rx_packet_count);
192
193 byte_count = atomic_read(&cm->rx_byte_count);
194 cm->rx_byte_count64 += byte_count;
195 atomic_sub(byte_count, &cm->rx_byte_count);
196
197 *packets = packet_count;
198 *bytes = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700199}
200
201/*
202 * sfe_ipv6_connection_match_compute_translations()
203 * Compute port and address translations for a connection match entry.
204 */
205static void sfe_ipv6_connection_match_compute_translations(struct sfe_ipv6_connection_match *cm)
206{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700207 u32 diff[9];
208 u32 *idx_32;
209 u16 *idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700210
211 /*
212 * Before we insert the entry look to see if this is tagged as doing address
213 * translations. If it is then work out the adjustment that we need to apply
214 * to the transport checksum.
215 */
216 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700217 u32 adj = 0;
218 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700219
220 /*
221 * Precompute an incremental checksum adjustment so we can
222 * edit packets in this stream very quickly. The algorithm is from RFC1624.
223 */
224 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530225 *(idx_32++) = cm->match_src_ip[0].addr[0];
226 *(idx_32++) = cm->match_src_ip[0].addr[1];
227 *(idx_32++) = cm->match_src_ip[0].addr[2];
228 *(idx_32++) = cm->match_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700229
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700230 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700231 *(idx_16++) = cm->match_src_port;
232 *(idx_16++) = ~cm->xlate_src_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700233 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700234
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530235 *(idx_32++) = ~cm->xlate_src_ip[0].addr[0];
236 *(idx_32++) = ~cm->xlate_src_ip[0].addr[1];
237 *(idx_32++) = ~cm->xlate_src_ip[0].addr[2];
238 *(idx_32++) = ~cm->xlate_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700239
240 /*
241 * When we compute this fold it down to a 16-bit offset
242 * as that way we can avoid having to do a double
243 * folding of the twos-complement result because the
244 * addition of 2 16-bit values cannot cause a double
245 * wrap-around!
246 */
247 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700248 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700249 adj += carry;
250 adj += w;
251 carry = (w > adj);
252 }
253 adj += carry;
254 adj = (adj & 0xffff) + (adj >> 16);
255 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700256 cm->xlate_src_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700257 }
258
259 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700260 u32 adj = 0;
261 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700262
263 /*
264 * Precompute an incremental checksum adjustment so we can
265 * edit packets in this stream very quickly. The algorithm is from RFC1624.
266 */
267 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530268 *(idx_32++) = cm->match_dest_ip[0].addr[0];
269 *(idx_32++) = cm->match_dest_ip[0].addr[1];
270 *(idx_32++) = cm->match_dest_ip[0].addr[2];
271 *(idx_32++) = cm->match_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700272
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700273 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700274 *(idx_16++) = cm->match_dest_port;
275 *(idx_16++) = ~cm->xlate_dest_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700276 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700277
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530278 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[0];
279 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[1];
280 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[2];
281 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700282
283 /*
284 * When we compute this fold it down to a 16-bit offset
285 * as that way we can avoid having to do a double
286 * folding of the twos-complement result because the
287 * addition of 2 16-bit values cannot cause a double
288 * wrap-around!
289 */
290 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700291 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700292 adj += carry;
293 adj += w;
294 carry = (w > adj);
295 }
296 adj += carry;
297 adj = (adj & 0xffff) + (adj >> 16);
298 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700299 cm->xlate_dest_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700300 }
301}
302
303/*
304 * sfe_ipv6_update_summary_stats()
305 * Update the summary stats.
306 */
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530307static void sfe_ipv6_update_summary_stats(struct sfe_ipv6 *si, struct sfe_ipv6_stats *stats)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700308{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530309 int i = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700310
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530311 memset(stats, 0, sizeof(*stats));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700312
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530313 for_each_possible_cpu(i) {
314 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
315
316 stats->connection_create_requests64 += s->connection_create_requests64;
317 stats->connection_create_collisions64 += s->connection_create_collisions64;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530318 stats->connection_create_failures64 += s->connection_create_failures64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530319 stats->connection_destroy_requests64 += s->connection_destroy_requests64;
320 stats->connection_destroy_misses64 += s->connection_destroy_misses64;
321 stats->connection_match_hash_hits64 += s->connection_match_hash_hits64;
322 stats->connection_match_hash_reorders64 += s->connection_match_hash_reorders64;
323 stats->connection_flushes64 += s->connection_flushes64;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530324 stats->packets_dropped64 += s->packets_dropped64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530325 stats->packets_forwarded64 += s->packets_forwarded64;
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800326 stats->packets_fast_xmited64 += s->packets_fast_xmited64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530327 stats->packets_not_forwarded64 += s->packets_not_forwarded64;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530328 stats->pppoe_encap_packets_forwarded64 += s->pppoe_encap_packets_forwarded64;
329 stats->pppoe_decap_packets_forwarded64 += s->pppoe_decap_packets_forwarded64;
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530330 stats->pppoe_bridge_packets_forwarded64 += s->pppoe_bridge_packets_forwarded64;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700331 }
332}
333
334/*
335 * sfe_ipv6_insert_connection_match()
336 * Insert a connection match into the hash.
337 *
338 * On entry we must be holding the lock that protects the hash table.
339 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700340static inline void sfe_ipv6_insert_connection_match(struct sfe_ipv6 *si,
341 struct sfe_ipv6_connection_match *cm)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700342{
Xiaoping Fan978b3772015-05-27 14:15:18 -0700343 unsigned int conn_match_idx
344 = sfe_ipv6_get_connection_match_hash(cm->match_dev, cm->match_protocol,
345 cm->match_src_ip, cm->match_src_port,
346 cm->match_dest_ip, cm->match_dest_port);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700347
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530348 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700349
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530350 hlist_add_head_rcu(&cm->hnode, &si->hlist_conn_match_hash_head[conn_match_idx]);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700351#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700352 if (!si->flow_cookie_enable || !(cm->flags & (SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC | SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST)))
Xiaoping Fan978b3772015-05-27 14:15:18 -0700353 return;
354
355 /*
356 * Configure hardware to put a flow cookie in packet of this flow,
357 * then we can accelerate the lookup process when we received this packet.
358 */
359 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
360 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
361
362 if ((NULL == entry->match) && time_is_before_jiffies(entry->last_clean_time + HZ)) {
363 sfe_ipv6_flow_cookie_set_func_t func;
364
365 rcu_read_lock();
366 func = rcu_dereference(si->flow_cookie_set_func);
367 if (func) {
368 if (!func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
369 cm->match_dest_ip->addr, cm->match_dest_port, conn_match_idx)) {
370 entry->match = cm;
371 cm->flow_cookie = conn_match_idx;
372 } else {
373 si->exception_events[SFE_IPV6_EXCEPTION_EVENT_FLOW_COOKIE_ADD_FAIL]++;
374 }
375 }
376 rcu_read_unlock();
377
378 break;
379 }
380 }
381#endif
Xiaoping Fan978b3772015-05-27 14:15:18 -0700382}
383
384/*
385 * sfe_ipv6_remove_connection_match()
386 * Remove a connection match object from the hash.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700387 */
388static inline void sfe_ipv6_remove_connection_match(struct sfe_ipv6 *si, struct sfe_ipv6_connection_match *cm)
389{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530390
391 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700392#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700393 if (si->flow_cookie_enable) {
394 /*
395 * Tell hardware that we no longer need a flow cookie in packet of this flow
396 */
397 unsigned int conn_match_idx;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700398
Xiaoping Fan640faf42015-08-28 15:50:55 -0700399 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
400 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700401
Xiaoping Fan640faf42015-08-28 15:50:55 -0700402 if (cm == entry->match) {
403 sfe_ipv6_flow_cookie_set_func_t func;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700404
Xiaoping Fan640faf42015-08-28 15:50:55 -0700405 rcu_read_lock();
406 func = rcu_dereference(si->flow_cookie_set_func);
407 if (func) {
408 func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
409 cm->match_dest_ip->addr, cm->match_dest_port, 0);
410 }
411 rcu_read_unlock();
412
413 cm->flow_cookie = 0;
414 entry->match = NULL;
415 entry->last_clean_time = jiffies;
416 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700417 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700418 }
419 }
420#endif
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530421 hlist_del_init_rcu(&cm->hnode);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700422
Xiaoping Fan978b3772015-05-27 14:15:18 -0700423}
424
425/*
426 * sfe_ipv6_get_connection_hash()
427 * Generate the hash used in connection lookups.
428 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700429static inline unsigned int sfe_ipv6_get_connection_hash(u8 protocol, struct sfe_ipv6_addr *src_ip, __be16 src_port,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700430 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
431{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700432 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700433
434 for (idx = 0; idx < 4; idx++) {
435 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
436 }
437 hash = hash ^ protocol ^ ntohs(src_port ^ dest_port);
438 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
439}
440
441/*
442 * sfe_ipv6_find_connection()
443 * Get the IPv6 connection info that corresponds to a particular 5-tuple.
444 *
445 * On entry we must be holding the lock that protects the hash table.
446 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700447static inline struct sfe_ipv6_connection *sfe_ipv6_find_connection(struct sfe_ipv6 *si, u32 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700448 struct sfe_ipv6_addr *src_ip, __be16 src_port,
449 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
450{
451 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530452
Xiaoping Fan978b3772015-05-27 14:15:18 -0700453 unsigned int conn_idx = sfe_ipv6_get_connection_hash(protocol, src_ip, src_port, dest_ip, dest_port);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530454
455 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700456 c = si->conn_hash[conn_idx];
457
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530458 while (c) {
459 if ((c->src_port == src_port)
460 && (c->dest_port == dest_port)
461 && (sfe_ipv6_addr_equal(c->src_ip, src_ip))
462 && (sfe_ipv6_addr_equal(c->dest_ip, dest_ip))
463 && (c->protocol == protocol)) {
464 return c;
465 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700466 c = c->next;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530467 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700468
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530469 return NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700470}
471
472/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700473 * sfe_ipv6_insert_connection()
474 * Insert a connection into the hash.
475 *
476 * On entry we must be holding the lock that protects the hash table.
477 */
478static void sfe_ipv6_insert_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
479{
480 struct sfe_ipv6_connection **hash_head;
481 struct sfe_ipv6_connection *prev_head;
482 unsigned int conn_idx;
483
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530484 lockdep_assert_held(&si->lock);
485
Xiaoping Fan978b3772015-05-27 14:15:18 -0700486 /*
487 * Insert entry into the connection hash.
488 */
489 conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
490 c->dest_ip, c->dest_port);
491 hash_head = &si->conn_hash[conn_idx];
492 prev_head = *hash_head;
493 c->prev = NULL;
494 if (prev_head) {
495 prev_head->prev = c;
496 }
497
498 c->next = prev_head;
499 *hash_head = c;
500
501 /*
502 * Insert entry into the "all connections" list.
503 */
504 if (si->all_connections_tail) {
505 c->all_connections_prev = si->all_connections_tail;
506 si->all_connections_tail->all_connections_next = c;
507 } else {
508 c->all_connections_prev = NULL;
509 si->all_connections_head = c;
510 }
511
512 si->all_connections_tail = c;
513 c->all_connections_next = NULL;
514 si->num_connections++;
515
516 /*
517 * Insert the connection match objects too.
518 */
519 sfe_ipv6_insert_connection_match(si, c->original_match);
520 sfe_ipv6_insert_connection_match(si, c->reply_match);
521}
522
523/*
524 * sfe_ipv6_remove_connection()
525 * Remove a sfe_ipv6_connection object from the hash.
526 *
527 * On entry we must be holding the lock that protects the hash table.
528 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530529bool sfe_ipv6_remove_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700530{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530531
532 lockdep_assert_held(&si->lock);
533 if (c->removed) {
534 DEBUG_ERROR("%px: Connection has been removed already\n", c);
535 return false;
536 }
537
Xiaoping Fan978b3772015-05-27 14:15:18 -0700538 /*
Tian Yang435afc42022-02-02 12:47:32 -0800539 * dereference the decap direction top_interface_dev
540 */
541 if (c->reply_match->top_interface_dev) {
542 dev_put(c->reply_match->top_interface_dev);
543 }
544 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700545 * Remove the connection match objects.
546 */
547 sfe_ipv6_remove_connection_match(si, c->reply_match);
548 sfe_ipv6_remove_connection_match(si, c->original_match);
549
550 /*
551 * Unlink the connection.
552 */
553 if (c->prev) {
554 c->prev->next = c->next;
555 } else {
556 unsigned int conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
557 c->dest_ip, c->dest_port);
558 si->conn_hash[conn_idx] = c->next;
559 }
560
561 if (c->next) {
562 c->next->prev = c->prev;
563 }
Xiaoping Fan34586472015-07-03 02:20:35 -0700564
565 /*
566 * Unlink connection from all_connections list
567 */
568 if (c->all_connections_prev) {
569 c->all_connections_prev->all_connections_next = c->all_connections_next;
570 } else {
571 si->all_connections_head = c->all_connections_next;
572 }
573
574 if (c->all_connections_next) {
575 c->all_connections_next->all_connections_prev = c->all_connections_prev;
576 } else {
577 si->all_connections_tail = c->all_connections_prev;
578 }
579
Ken Zhu32b95392021-09-03 13:52:04 -0700580 /*
581 * If I am the next sync connection, move the sync to my next or head.
582 */
583 if (unlikely(si->wc_next == c)) {
584 si->wc_next = c->all_connections_next;
585 }
586
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530587 c->removed = true;
Xiaoping Fan34586472015-07-03 02:20:35 -0700588 si->num_connections--;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530589 return true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700590}
591
592/*
593 * sfe_ipv6_gen_sync_connection()
594 * Sync a connection.
595 *
596 * On entry to this function we expect that the lock for the connection is either
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530597 * already held (while called from sfe_ipv6_periodic_sync() or isn't required
598 * (while called from sfe_ipv6_flush_sfe_ipv6_connection())
Xiaoping Fan978b3772015-05-27 14:15:18 -0700599 */
600static void sfe_ipv6_gen_sync_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c,
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700601 struct sfe_connection_sync *sis, sfe_sync_reason_t reason,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700602 u64 now_jiffies)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700603{
604 struct sfe_ipv6_connection_match *original_cm;
605 struct sfe_ipv6_connection_match *reply_cm;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530606 u32 packet_count, byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700607
608 /*
609 * Fill in the update message.
610 */
Murat Sezgin53509a12016-12-27 16:57:34 -0800611 sis->is_v6 = 1;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700612 sis->protocol = c->protocol;
613 sis->src_ip.ip6[0] = c->src_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700614 sis->src_ip_xlate.ip6[0] = c->src_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700615 sis->dest_ip.ip6[0] = c->dest_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700616 sis->dest_ip_xlate.ip6[0] = c->dest_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700617 sis->src_port = c->src_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700618 sis->src_port_xlate = c->src_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700619 sis->dest_port = c->dest_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700620 sis->dest_port_xlate = c->dest_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700621
622 original_cm = c->original_match;
623 reply_cm = c->reply_match;
624 sis->src_td_max_window = original_cm->protocol_state.tcp.max_win;
625 sis->src_td_end = original_cm->protocol_state.tcp.end;
626 sis->src_td_max_end = original_cm->protocol_state.tcp.max_end;
627 sis->dest_td_max_window = reply_cm->protocol_state.tcp.max_win;
628 sis->dest_td_end = reply_cm->protocol_state.tcp.end;
629 sis->dest_td_max_end = reply_cm->protocol_state.tcp.max_end;
630
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530631 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet_count, &byte_count);
632 sis->src_new_packet_count = packet_count;
633 sis->src_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700634
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530635 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet_count, &byte_count);
636 sis->dest_new_packet_count = packet_count;
637 sis->dest_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700638
639 sis->src_dev = original_cm->match_dev;
640 sis->src_packet_count = original_cm->rx_packet_count64;
641 sis->src_byte_count = original_cm->rx_byte_count64;
642
643 sis->dest_dev = reply_cm->match_dev;
644 sis->dest_packet_count = reply_cm->rx_packet_count64;
645 sis->dest_byte_count = reply_cm->rx_byte_count64;
646
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700647 sis->reason = reason;
648
Xiaoping Fan978b3772015-05-27 14:15:18 -0700649 /*
650 * Get the time increment since our last sync.
651 */
652 sis->delta_jiffies = now_jiffies - c->last_sync_jiffies;
653 c->last_sync_jiffies = now_jiffies;
654}
655
656/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530657 * sfe_ipv6_free_sfe_ipv6_connection_rcu()
658 * Called at RCU qs state to free the connection object.
659 */
660static void sfe_ipv6_free_sfe_ipv6_connection_rcu(struct rcu_head *head)
661{
662 struct sfe_ipv6_connection *c;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530663 struct udp_sock *up;
664 struct sock *sk;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530665
666 /*
667 * We dont need spin lock as the connection is already removed from link list
668 */
669 c = container_of(head, struct sfe_ipv6_connection, rcu);
670 BUG_ON(!c->removed);
671
672 DEBUG_TRACE("%px: connecton has been deleted\n", c);
673
674 /*
Suruchi Suman23a279d2021-11-16 15:13:09 +0530675 * Decrease the refcount taken in function sfe_ipv6_create_rule()
676 * during call of __udp6_lib_lookup()
677 */
678 up = c->reply_match->up;
679 if (up) {
680 sk = (struct sock *)up;
681 sock_put(sk);
682 }
683
684 /*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530685 * Release our hold of the source and dest devices and free the memory
686 * for our connection objects.
687 */
688 dev_put(c->original_dev);
689 dev_put(c->reply_dev);
690 kfree(c->original_match);
691 kfree(c->reply_match);
692 kfree(c);
693}
694
695/*
Ken Zhu88c58152021-12-09 15:12:06 -0800696 * sfe_ipv6_sync_status()
697 * update a connection status to its connection manager.
698 *
699 * si: the ipv6 context
700 * c: which connection to be notified
701 * reason: what kind of reason: flush, or destroy
702 */
703void sfe_ipv6_sync_status(struct sfe_ipv6 *si,
704 struct sfe_ipv6_connection *c,
705 sfe_sync_reason_t reason)
706{
707 struct sfe_connection_sync sis;
708 u64 now_jiffies;
709 sfe_sync_rule_callback_t sync_rule_callback;
710
711 rcu_read_lock();
712 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
713
714 if (unlikely(!sync_rule_callback)) {
715 rcu_read_unlock();
716 return;
717 }
718
719 /*
720 * Generate a sync message and then sync.
721 */
722 now_jiffies = get_jiffies_64();
723 sfe_ipv6_gen_sync_connection(si, c, &sis, reason, now_jiffies);
724 sync_rule_callback(&sis);
725
726 rcu_read_unlock();
727}
728
729/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700730 * sfe_ipv6_flush_connection()
731 * Flush a connection and free all associated resources.
732 *
733 * We need to be called with bottom halves disabled locally as we need to acquire
734 * the connection hash lock and release it again. In general we're actually called
735 * from within a BH and so we're fine, but we're also called when connections are
736 * torn down.
737 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530738void sfe_ipv6_flush_connection(struct sfe_ipv6 *si,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700739 struct sfe_ipv6_connection *c,
740 sfe_sync_reason_t reason)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700741{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530742 BUG_ON(!c->removed);
743
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530744 this_cpu_inc(si->stats_pcpu->connection_flushes64);
Ken Zhu88c58152021-12-09 15:12:06 -0800745 sfe_ipv6_sync_status(si, c, reason);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530746
747 /*
Ken Zhu88c58152021-12-09 15:12:06 -0800748 * Release our hold of the source and dest devices and free the memory
749 * for our connection objects.
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530750 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530751 call_rcu(&c->rcu, sfe_ipv6_free_sfe_ipv6_connection_rcu);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700752}
753
Parikshit Guned31a8202022-01-05 22:15:04 +0530754/*
755 * sfe_ipv6_service_class_stats_inc()
756 * Increment per cpu per service class stats.
757 */
758void sfe_ipv6_service_class_stats_inc(struct sfe_ipv6 *si, uint8_t sid, uint64_t bytes)
759{
760 struct sfe_ipv6_service_class_stats_db *sc_stats_db = this_cpu_ptr(si->stats_pcpu_psc);
761 struct sfe_ipv6_per_service_class_stats *sc_stats = &sc_stats_db->psc_stats[sid];
762
763 write_seqcount_begin(&sc_stats->seq);
764 sc_stats->tx_bytes += bytes;
765 sc_stats->tx_packets++;
766 write_seqcount_end(&sc_stats->seq);
767}
768
769/*
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530770 * sfe_ipv6_exception_stats_inc()
771 * Increment exception stats.
772 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530773void sfe_ipv6_exception_stats_inc(struct sfe_ipv6 *si, enum sfe_ipv6_exception_events reason)
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530774{
775 struct sfe_ipv6_stats *stats = this_cpu_ptr(si->stats_pcpu);
776
777 stats->exception_events64[reason]++;
778 stats->packets_not_forwarded64++;
779}
780
Xiaoping Fan978b3772015-05-27 14:15:18 -0700781/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530782 * sfe_ipv6_is_local_ip()
783 * return true if it is local ip otherwise return false
784 */
785static bool sfe_ipv6_is_local_ip(struct sfe_ipv6 *si, uint8_t *addr)
786{
787 struct net_device *dev;
788 struct in6_addr ip_addr;
789 memcpy(ip_addr.s6_addr, addr, 16);
790
791 dev = ipv6_dev_find(&init_net, &ip_addr, 1);
792 if (dev) {
793 dev_put(dev);
794 return true;
795 }
796
797 return false;
798}
799
800/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700801 * sfe_ipv6_recv()
802 * Handle packet receives and forwaring.
803 *
804 * Returns 1 if the packet is forwarded or 0 if it isn't.
805 */
Suruchi Suman23a279d2021-11-16 15:13:09 +0530806int sfe_ipv6_recv(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info, bool tun_outer)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700807{
808 struct sfe_ipv6 *si = &__si6;
809 unsigned int len;
810 unsigned int payload_len;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530811 unsigned int ihl = sizeof(struct ipv6hdr);
Ken Zhu88c58152021-12-09 15:12:06 -0800812 bool sync_on_find = false;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530813 struct ipv6hdr *iph;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700814 u8 next_hdr;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700815
816 /*
817 * Check that we have space for an IP header and an uplayer header here.
818 */
819 len = skb->len;
820 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700821
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530822 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700823 DEBUG_TRACE("len: %u is too short\n", len);
824 return 0;
825 }
826
827 /*
828 * Is our IP version wrong?
829 */
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530830 iph = (struct ipv6hdr *)skb->data;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700831 if (unlikely(iph->version != 6)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700832
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530833 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_NON_V6);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700834 DEBUG_TRACE("IP version: %u\n", iph->version);
835 return 0;
836 }
837
838 /*
839 * Does our datagram fit inside the skb?
840 */
841 payload_len = ntohs(iph->payload_len);
842 if (unlikely(payload_len > (len - ihl))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700843
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530844 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_DATAGRAM_INCOMPLETE);
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530845 DEBUG_TRACE("payload_len: %u, exceeds len: %u\n", payload_len, (len - (unsigned int)sizeof(struct ipv6hdr)));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700846 return 0;
847 }
848
849 next_hdr = iph->nexthdr;
850 while (unlikely(sfe_ipv6_is_ext_hdr(next_hdr))) {
851 struct sfe_ipv6_ext_hdr *ext_hdr;
852 unsigned int ext_hdr_len;
853
854 ext_hdr = (struct sfe_ipv6_ext_hdr *)(skb->data + ihl);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700855
856 ext_hdr_len = ext_hdr->hdr_len;
857 ext_hdr_len <<= 3;
858 ext_hdr_len += sizeof(struct sfe_ipv6_ext_hdr);
859 ihl += ext_hdr_len;
860 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530861 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700862
863 DEBUG_TRACE("extension header %d not completed\n", next_hdr);
864 return 0;
865 }
Ken Zhu88c58152021-12-09 15:12:06 -0800866 /*
867 * Any packets have extend hdr, won't be handled in the fast
868 * path,sync its status and exception to the kernel.
869 */
870 sync_on_find = true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700871 next_hdr = ext_hdr->next_hdr;
872 }
873
874 if (IPPROTO_UDP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800875 return sfe_ipv6_recv_udp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700876 }
877
878 if (IPPROTO_TCP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800879 return sfe_ipv6_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700880 }
881
882 if (IPPROTO_ICMPV6 == next_hdr) {
883 return sfe_ipv6_recv_icmp(si, skb, dev, len, iph, ihl);
884 }
885
Tian Yangafb03452022-01-13 18:53:13 -0800886 if (IPPROTO_IPIP == next_hdr) {
887 return sfe_ipv6_recv_tunipip6(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, true);
888 }
889
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530890#ifdef SFE_GRE_TUN_ENABLE
891 if (IPPROTO_GRE == next_hdr) {
Nitin Shetty2114a892022-01-28 20:03:56 +0530892 return sfe_ipv6_recv_gre(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530893 }
894#endif
895
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530896 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_UNHANDLED_PROTOCOL);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700897 DEBUG_TRACE("not UDP, TCP or ICMP: %u\n", next_hdr);
898 return 0;
899}
900
901/*
902 * sfe_ipv6_update_tcp_state()
903 * update TCP window variables.
904 */
905static void
906sfe_ipv6_update_tcp_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530907 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700908{
909 struct sfe_ipv6_connection_match *orig_cm;
910 struct sfe_ipv6_connection_match *repl_cm;
911 struct sfe_ipv6_tcp_connection_match *orig_tcp;
912 struct sfe_ipv6_tcp_connection_match *repl_tcp;
913
914 orig_cm = c->original_match;
915 repl_cm = c->reply_match;
916 orig_tcp = &orig_cm->protocol_state.tcp;
917 repl_tcp = &repl_cm->protocol_state.tcp;
918
919 /* update orig */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530920 if (orig_tcp->max_win < msg->tcp_rule.flow_max_window) {
921 orig_tcp->max_win = msg->tcp_rule.flow_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700922 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530923 if ((s32)(orig_tcp->end - msg->tcp_rule.flow_end) < 0) {
924 orig_tcp->end = msg->tcp_rule.flow_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700925 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530926 if ((s32)(orig_tcp->max_end - msg->tcp_rule.flow_max_end) < 0) {
927 orig_tcp->max_end = msg->tcp_rule.flow_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700928 }
929
930 /* update reply */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530931 if (repl_tcp->max_win < msg->tcp_rule.return_max_window) {
932 repl_tcp->max_win = msg->tcp_rule.return_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700933 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530934 if ((s32)(repl_tcp->end - msg->tcp_rule.return_end) < 0) {
935 repl_tcp->end = msg->tcp_rule.return_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700936 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530937 if ((s32)(repl_tcp->max_end - msg->tcp_rule.return_max_end) < 0) {
938 repl_tcp->max_end = msg->tcp_rule.return_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700939 }
940
941 /* update match flags */
942 orig_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
943 repl_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530944 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700945 orig_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
946 repl_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
947 }
948}
949
950/*
951 * sfe_ipv6_update_protocol_state()
952 * update protocol specified state machine.
953 */
954static void
955sfe_ipv6_update_protocol_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530956 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700957{
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530958 switch (msg->tuple.protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700959 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530960 sfe_ipv6_update_tcp_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700961 break;
962 }
963}
964
965/*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800966 * sfe_ipv6_match_entry_set_vlan()
967 */
968static void sfe_ipv6_match_entry_set_vlan(
969 struct sfe_ipv6_connection_match *cm,
970 u32 primary_ingress_vlan_tag,
971 u32 primary_egress_vlan_tag,
972 u32 secondary_ingress_vlan_tag,
973 u32 secondary_egress_vlan_tag)
974{
975 u16 tpid;
976 /*
977 * Prevent stacking header counts when updating.
978 */
979 cm->ingress_vlan_hdr_cnt = 0;
980 cm->egress_vlan_hdr_cnt = 0;
981 memset(cm->ingress_vlan_hdr, 0, sizeof(cm->ingress_vlan_hdr));
982 memset(cm->egress_vlan_hdr, 0, sizeof(cm->egress_vlan_hdr));
983
984 /*
985 * vlan_hdr[0] corresponds to outer tag
986 * vlan_hdr[1] corresponds to inner tag
987 * Extract the vlan information (tpid and tci) from rule message
988 */
989 if ((primary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
990 tpid = (u16)(primary_ingress_vlan_tag >> 16);
991 cm->ingress_vlan_hdr[0].tpid = ntohs(tpid);
992 cm->ingress_vlan_hdr[0].tci = (u16)primary_ingress_vlan_tag;
993 cm->ingress_vlan_hdr_cnt++;
994 }
995
996 if ((secondary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
997 tpid = (u16)(secondary_ingress_vlan_tag >> 16);
998 cm->ingress_vlan_hdr[1].tpid = ntohs(tpid);
999 cm->ingress_vlan_hdr[1].tci = (u16)secondary_ingress_vlan_tag;
1000 cm->ingress_vlan_hdr_cnt++;
1001 }
1002
1003 if ((primary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1004 tpid = (u16)(primary_egress_vlan_tag >> 16);
1005 cm->egress_vlan_hdr[0].tpid = ntohs(tpid);
1006 cm->egress_vlan_hdr[0].tci = (u16)primary_egress_vlan_tag;
1007 cm->egress_vlan_hdr_cnt++;
1008 }
1009
1010 if ((secondary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
1011 tpid = (u16)(secondary_egress_vlan_tag >> 16);
1012 cm->egress_vlan_hdr[1].tpid = ntohs(tpid);
1013 cm->egress_vlan_hdr[1].tci = (u16)secondary_egress_vlan_tag;
1014 cm->egress_vlan_hdr_cnt++;
1015 }
1016}
1017
1018/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001019 * sfe_ipv6_update_rule()
1020 * update forwarding rule after rule is created.
1021 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301022void sfe_ipv6_update_rule(struct sfe_ipv6_rule_create_msg *msg)
1023
Xiaoping Fan978b3772015-05-27 14:15:18 -07001024{
1025 struct sfe_ipv6_connection *c;
1026 struct sfe_ipv6 *si = &__si6;
1027
1028 spin_lock_bh(&si->lock);
1029
1030 c = sfe_ipv6_find_connection(si,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301031 msg->tuple.protocol,
1032 (struct sfe_ipv6_addr *)msg->tuple.flow_ip,
1033 msg->tuple.flow_ident,
1034 (struct sfe_ipv6_addr *)msg->tuple.return_ip,
1035 msg->tuple.return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001036 if (c != NULL) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301037 sfe_ipv6_update_protocol_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001038 }
1039
1040 spin_unlock_bh(&si->lock);
1041}
1042
1043/*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301044 * sfe_ipv6_xmit_eth_type_check
1045 * Checking if MAC header has to be written.
1046 */
1047static inline bool sfe_ipv6_xmit_eth_type_check(struct net_device *dev, u32 cm_flags)
1048{
1049 if (!(dev->flags & IFF_NOARP)) {
1050 return true;
1051 }
1052
1053 /*
1054 * For PPPoE, since we are now supporting PPPoE encapsulation, we are writing L2 header.
1055 */
1056 if (cm_flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP) {
1057 return true;
1058 }
1059
1060 return false;
1061}
1062
1063/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001064 * sfe_ipv6_create_rule()
1065 * Create a forwarding rule.
1066 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301067int sfe_ipv6_create_rule(struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001068{
1069 struct sfe_ipv6 *si = &__si6;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301070 struct sfe_ipv6_connection *c, *old_c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001071 struct sfe_ipv6_connection_match *original_cm;
1072 struct sfe_ipv6_connection_match *reply_cm;
1073 struct net_device *dest_dev;
1074 struct net_device *src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301075 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301076 struct sock *sk;
1077 struct net *net;
1078 unsigned int src_if_idx;
1079
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301080 s32 flow_interface_num = msg->conn_rule.flow_top_interface_num;
1081 s32 return_interface_num = msg->conn_rule.return_top_interface_num;
Parikshit Guned31a8202022-01-05 22:15:04 +05301082 u32 flow_sawf_tag;
1083 u32 return_sawf_tag;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001084
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301085 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) {
1086 flow_interface_num = msg->conn_rule.flow_interface_num;
1087 }
1088
1089 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) {
1090 return_interface_num = msg->conn_rule.return_interface_num;
1091 }
1092
1093 src_dev = dev_get_by_index(&init_net, flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301094 if (!src_dev) {
1095 DEBUG_WARN("%px: Unable to find src_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301096 flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301097 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1098 return -EINVAL;
1099 }
1100
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301101 dest_dev = dev_get_by_index(&init_net, return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301102 if (!dest_dev) {
1103 DEBUG_WARN("%px: Unable to find dest_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301104 return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301105 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1106 dev_put(src_dev);
1107 return -EINVAL;
1108 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001109
1110 if (unlikely((dest_dev->reg_state != NETREG_REGISTERED) ||
1111 (src_dev->reg_state != NETREG_REGISTERED))) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301112 DEBUG_WARN("%px: src_dev=%s and dest_dev=%s are unregistered\n", msg,
1113 src_dev->name, dest_dev->name);
1114 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1115 dev_put(src_dev);
1116 dev_put(dest_dev);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001117 return -EINVAL;
1118 }
1119
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301120 /*
1121 * Allocate the various connection tracking objects.
1122 */
1123 c = (struct sfe_ipv6_connection *)kmalloc(sizeof(struct sfe_ipv6_connection), GFP_ATOMIC);
1124 if (unlikely(!c)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301125 DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
1126 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1127 dev_put(src_dev);
1128 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301129 return -ENOMEM;
1130 }
1131
1132 original_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
1133 if (unlikely(!original_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301134 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1135 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301136 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301137 dev_put(src_dev);
1138 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301139 return -ENOMEM;
1140 }
1141
1142 reply_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
1143 if (unlikely(!reply_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301144 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1145 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301146 kfree(original_cm);
1147 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301148 dev_put(src_dev);
1149 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301150 return -ENOMEM;
1151 }
1152
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301153 this_cpu_inc(si->stats_pcpu->connection_create_requests64);
1154
Xiaoping Fan978b3772015-05-27 14:15:18 -07001155 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001156
1157 /*
1158 * Check to see if there is already a flow that matches the rule we're
1159 * trying to create. If there is then we can't create a new one.
1160 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001161 old_c = sfe_ipv6_find_connection(si,
1162 tuple->protocol,
1163 (struct sfe_ipv6_addr *)tuple->flow_ip,
1164 tuple->flow_ident,
1165 (struct sfe_ipv6_addr *)tuple->return_ip,
1166 tuple->return_ident);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301167
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301168 if (old_c != NULL) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301169 this_cpu_inc(si->stats_pcpu->connection_create_collisions64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001170
1171 /*
1172 * If we already have the flow then it's likely that this
1173 * request to create the connection rule contains more
1174 * up-to-date information. Check and update accordingly.
1175 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301176 sfe_ipv6_update_protocol_state(old_c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001177 spin_unlock_bh(&si->lock);
1178
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301179 kfree(reply_cm);
1180 kfree(original_cm);
1181 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301182 dev_put(src_dev);
1183 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301184
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301185 DEBUG_TRACE("connection already exists - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001186 " s: %s:%pxM:%pI6:%u, d: %s:%pxM:%pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301187 tuple->protocol,
1188 src_dev->name, msg->conn_rule.flow_mac, tuple->flow_ip, ntohs(tuple->flow_ident),
1189 dest_dev->name, msg->conn_rule.return_mac, tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001190 return -EADDRINUSE;
1191 }
1192
1193 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001194 * Fill in the "original" direction connection matching object.
1195 * Note that the transmit MAC address is "dest_mac_xlate" because
1196 * we always know both ends of a connection by their translated
1197 * addresses and not their public addresses.
1198 */
1199 original_cm->match_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301200 original_cm->match_protocol = tuple->protocol;
1201 original_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
Suruchi Suman66609a72022-01-20 02:34:25 +05301202 original_cm->match_src_port = netif_is_vxlan(src_dev) ? 0 : tuple->flow_ident;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301203 original_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1204 original_cm->match_dest_port = tuple->return_ident;
1205
1206 original_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1207 original_cm->xlate_src_port = tuple->flow_ident;
1208 original_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1209 original_cm->xlate_dest_port = tuple->return_ident;
1210
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301211 atomic_set(&original_cm->rx_packet_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001212 original_cm->rx_packet_count64 = 0;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301213 atomic_set(&original_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001214 original_cm->rx_byte_count64 = 0;
1215 original_cm->xmit_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301216
1217 original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301218
Xiaoping Fan978b3772015-05-27 14:15:18 -07001219 original_cm->connection = c;
1220 original_cm->counter_match = reply_cm;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001221 original_cm->l2_hdr_size = 0;
1222 original_cm->flags = 0;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301223
1224 /*
1225 * Valid in decap direction only
1226 */
1227 RCU_INIT_POINTER(original_cm->up, NULL);
1228
Ken Zhu37040ea2021-09-09 21:11:15 -07001229 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1230 original_cm->mark = msg->mark_rule.flow_mark;
1231 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1232 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301233 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1234 original_cm->priority = msg->qos_rule.flow_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001235 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1236 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001237
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301238 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1239 original_cm->dscp = msg->dscp_rule.flow_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001240 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1241 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301242 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1243 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1244 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001245 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST) {
1246 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1247 }
1248
Parikshit Guned31a8202022-01-05 22:15:04 +05301249 /*
1250 * Mark SAWF metadata if the sawf tag is valid.
1251 */
1252 original_cm->sawf_valid = false;
1253 flow_sawf_tag = SFE_GET_SAWF_TAG(msg->sawf_rule.flow_mark);
1254 if (likely(SFE_SAWF_TAG_IS_VALID(flow_sawf_tag))) {
1255 original_cm->mark = msg->sawf_rule.flow_mark;
1256 original_cm->sawf_valid = true;
1257 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1258 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301259
Wayne Tanbb7f1782021-12-13 11:16:04 -08001260 /*
1261 * Add VLAN rule to original_cm
1262 */
1263 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1264 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1265 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1266 sfe_ipv6_match_entry_set_vlan(original_cm,
1267 vlan_primary_rule->ingress_vlan_tag,
1268 vlan_primary_rule->egress_vlan_tag,
1269 vlan_secondary_rule->ingress_vlan_tag,
1270 vlan_secondary_rule->egress_vlan_tag);
1271
1272 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) &&
1273 original_cm->egress_vlan_hdr_cnt > 0) {
1274 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1275 original_cm->l2_hdr_size += original_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1276 }
1277 }
1278
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301279 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv6_is_local_ip(si, (uint8_t *)original_cm->match_dest_ip)) {
1280 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1281 }
1282
Xiaoping Fan978b3772015-05-27 14:15:18 -07001283#ifdef CONFIG_NF_FLOW_COOKIE
1284 original_cm->flow_cookie = 0;
1285#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001286#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301287 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1288 original_cm->flow_accel = msg->direction_rule.flow_accel;
1289 } else {
1290 original_cm->flow_accel = 1;
1291 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001292#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301293 /*
1294 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1295 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1296 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1297 * are sending directly to the destination interface that supports it.
1298 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301299 if (likely(dest_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(dest_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301300 if ((msg->conn_rule.return_top_interface_num == msg->conn_rule.return_interface_num) ||
1301 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301302 /*
1303 * Dont enable CSUM offload
1304 */
1305#if 0
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301306 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301307#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301308 }
1309 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001310
Wayne Tanbb7f1782021-12-13 11:16:04 -08001311 reply_cm->l2_hdr_size = 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301312 reply_cm->flags = 0;
1313
1314 /*
1315 * Adding PPPoE parameters to original and reply entries based on the direction where
1316 * PPPoE header is valid in ECM rule.
1317 *
1318 * If PPPoE is valid in flow direction (from interface is PPPoE), then
1319 * original cm will have PPPoE at ingress (strip PPPoE header)
1320 * reply cm will have PPPoE at egress (add PPPoE header)
1321 *
1322 * If PPPoE is valid in return direction (to interface is PPPoE), then
1323 * original cm will have PPPoE at egress (add PPPoE header)
1324 * reply cm will have PPPoE at ingress (strip PPPoE header)
1325 */
1326 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_DECAP_VALID) {
1327 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1328 original_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1329 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1330
1331 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001332 reply_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301333 reply_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1334 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1335 }
1336
1337 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_ENCAP_VALID) {
1338 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001339 original_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301340 original_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1341 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1342
1343 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1344 reply_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1345 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1346 }
1347
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301348 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1349 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1350 }
1351
Xiaoping Fan978b3772015-05-27 14:15:18 -07001352 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001353 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301354 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001355 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301356 if (sfe_ipv6_xmit_eth_type_check(dest_dev, original_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301357
1358 /*
1359 * Check whether the rule has configured a specific source MAC address to use.
1360 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1361 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301362 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1363 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->conn_rule.flow_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301364 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301365 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1366 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_RETURN_VALID)) {
1367 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.return_src_mac);
1368 } else {
1369 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)dest_dev->dev_addr);
1370 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301371 }
1372 ether_addr_copy((u8 *)original_cm->xmit_dest_mac, (u8 *)msg->conn_rule.return_mac);
1373
Xiaoping Fan978b3772015-05-27 14:15:18 -07001374 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001375 original_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001376
1377 /*
1378 * If our dev writes Ethernet headers then we can write a really fast
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301379 * version
Xiaoping Fan978b3772015-05-27 14:15:18 -07001380 */
1381 if (dest_dev->header_ops) {
1382 if (dest_dev->header_ops->create == eth_header) {
1383 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1384 }
1385 }
1386 }
1387
1388 /*
1389 * Fill in the "reply" direction connection matching object.
1390 */
1391 reply_cm->match_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301392 reply_cm->match_protocol = tuple->protocol;
1393 reply_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301394 reply_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1395 reply_cm->match_dest_port = tuple->flow_ident;
1396 reply_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1397 reply_cm->xlate_src_port = tuple->return_ident;
1398 reply_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1399 reply_cm->xlate_dest_port = tuple->flow_ident;
1400
Suruchi Suman23a279d2021-11-16 15:13:09 +05301401 /*
1402 * Keep source port as 0 for VxLAN tunnels.
1403 */
1404 if (netif_is_vxlan(src_dev) || netif_is_vxlan(dest_dev)) {
1405 reply_cm->match_src_port = 0;
1406 } else {
1407 reply_cm->match_src_port = tuple->return_ident;
1408 }
1409
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301410 atomic_set(&original_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001411 reply_cm->rx_packet_count64 = 0;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301412 atomic_set(&reply_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001413 reply_cm->rx_byte_count64 = 0;
1414 reply_cm->xmit_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301415 reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301416
Xiaoping Fan978b3772015-05-27 14:15:18 -07001417 reply_cm->connection = c;
1418 reply_cm->counter_match = original_cm;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301419
Ken Zhu37040ea2021-09-09 21:11:15 -07001420 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1421 reply_cm->mark = msg->mark_rule.return_mark;
1422 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1423 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301424 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1425 reply_cm->priority = msg->qos_rule.return_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001426 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1427 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301428 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1429 reply_cm->dscp = msg->dscp_rule.return_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001430 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1431 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301432 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1433 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1434 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001435 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST) {
1436 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1437 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301438
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301439 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv6_is_local_ip(si, (uint8_t *)reply_cm->match_dest_ip)) {
1440 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1441 }
1442
Suruchi Suman23a279d2021-11-16 15:13:09 +05301443 /*
Parikshit Guned31a8202022-01-05 22:15:04 +05301444 * Mark return SAWF metadata if the sawf tag is valid.
1445 */
1446 reply_cm->sawf_valid = false;
1447 return_sawf_tag = SFE_GET_SAWF_TAG(msg->sawf_rule.return_mark);
1448 if (likely(SFE_SAWF_TAG_IS_VALID(return_sawf_tag))) {
1449 reply_cm->mark = msg->sawf_rule.return_mark;
1450 reply_cm->sawf_valid = true;
1451 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1452 }
1453
1454 /*
Suruchi Suman23a279d2021-11-16 15:13:09 +05301455 * Setup UDP Socket if found to be valid for decap.
1456 */
1457 RCU_INIT_POINTER(reply_cm->up, NULL);
1458 net = dev_net(reply_cm->match_dev);
1459 src_if_idx = src_dev->ifindex;
1460
1461 rcu_read_lock();
1462
1463 /*
1464 * Look for the associated sock object.
1465 * __udp6_lib_lookup() holds a reference for this sock object,
1466 * which will be released in sfe_ipv6_flush_connection()
1467 */
1468#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1469 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1470 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1471 reply_cm->xlate_src_port, src_if_idx, &udp_table);
1472#else
1473 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1474 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1475 reply_cm->xlate_src_port, src_if_idx, 0, &udp_table, NULL);
1476#endif
1477 rcu_read_unlock();
1478
1479 /*
1480 * We set the UDP sock pointer as valid only for decap direction.
1481 */
1482 if (sk && udp_sk(sk)->encap_type) {
1483#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1484 if (!atomic_add_unless(&sk->sk_refcnt, 1, 0)) {
1485#else
1486 if (!refcount_inc_not_zero(&sk->sk_refcnt)) {
1487#endif
Tian Yang435afc42022-02-02 12:47:32 -08001488 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001489 spin_unlock_bh(&si->lock);
Suruchi Suman23a279d2021-11-16 15:13:09 +05301490 kfree(reply_cm);
1491 kfree(original_cm);
1492 kfree(c);
1493
1494 DEBUG_INFO("sfe: unable to take reference for socket p:%d\n", tuple->protocol);
1495 DEBUG_INFO("SK: connection - \n"
1496 " s: %s:%pI6(%pI6):%u(%u)\n"
1497 " d: %s:%pI6(%pI6):%u(%u)\n",
1498 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1499 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1500 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1501 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1502
1503 dev_put(src_dev);
1504 dev_put(dest_dev);
1505
1506 return -ESHUTDOWN;
1507 }
1508
1509 rcu_assign_pointer(reply_cm->up, udp_sk(sk));
1510 DEBUG_INFO("Sock lookup success with reply_cm direction(%p)\n", sk);
1511 DEBUG_INFO("SK: connection - \n"
1512 " s: %s:%pI6(%pI6):%u(%u)\n"
1513 " d: %s:%pI6(%pI6):%u(%u)\n",
1514 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1515 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1516 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1517 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1518 }
1519
Wayne Tanbb7f1782021-12-13 11:16:04 -08001520 /*
1521 * Add VLAN rule to reply_cm
1522 */
1523 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1524 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1525 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1526 sfe_ipv6_match_entry_set_vlan(reply_cm,
1527 vlan_primary_rule->egress_vlan_tag,
1528 vlan_primary_rule->ingress_vlan_tag,
1529 vlan_secondary_rule->egress_vlan_tag,
1530 vlan_secondary_rule->ingress_vlan_tag);
1531
1532 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) &&
1533 reply_cm->egress_vlan_hdr_cnt > 0) {
1534 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1535 reply_cm->l2_hdr_size += reply_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1536 }
1537 }
1538
Xiaoping Fan978b3772015-05-27 14:15:18 -07001539#ifdef CONFIG_NF_FLOW_COOKIE
1540 reply_cm->flow_cookie = 0;
1541#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001542#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301543 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1544 reply_cm->flow_accel = msg->direction_rule.return_accel;
1545 } else {
1546 reply_cm->flow_accel = 1;
1547 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001548#endif
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301549
1550 /*
1551 * the inet6_protocol handler will be used only in decap path
1552 * for non passthrough case.
1553 */
1554 original_cm->proto = NULL;
1555 reply_cm->proto = NULL;
Tian Yang435afc42022-02-02 12:47:32 -08001556 original_cm->top_interface_dev = NULL;
1557 reply_cm->top_interface_dev = NULL;
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301558
1559#ifdef SFE_GRE_TUN_ENABLE
1560 if (!(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
1561 rcu_read_lock();
1562 reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
1563 rcu_read_unlock();
1564
1565 if (unlikely(!reply_cm->proto)) {
Tian Yang435afc42022-02-02 12:47:32 -08001566 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1567 spin_unlock_bh(&si->lock);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301568 kfree(reply_cm);
1569 kfree(original_cm);
1570 kfree(c);
1571 dev_put(src_dev);
1572 dev_put(dest_dev);
1573 DEBUG_WARN("sfe: GRE proto handler is not registered\n");
1574 return -EPERM;
1575 }
1576 }
1577#endif
1578
Xiaoping Fan978b3772015-05-27 14:15:18 -07001579 /*
Tian Yangafb03452022-01-13 18:53:13 -08001580 * Decapsulation path have proto set.
1581 * This is used to differentiate de/encap, and call protocol specific handler.
1582 */
1583 if (IPPROTO_IPIP == tuple->protocol) {
1584 original_cm->proto = NULL;
1585 rcu_read_lock();
1586 reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
1587 rcu_read_unlock();
Tian Yang435afc42022-02-02 12:47:32 -08001588 reply_cm->top_interface_dev = dev_get_by_index(&init_net, msg->conn_rule.return_top_interface_num);
1589
1590 if (unlikely(!reply_cm->top_interface_dev)) {
1591 DEBUG_WARN("%px: Unable to find top_interface_dev corresponding to %d\n", msg,
1592 msg->conn_rule.return_top_interface_num);
1593 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1594 spin_unlock_bh(&si->lock);
1595 kfree(reply_cm);
1596 kfree(original_cm);
1597 kfree(c);
1598 dev_put(src_dev);
1599 dev_put(dest_dev);
1600 return -EINVAL;
1601 }
Tian Yangafb03452022-01-13 18:53:13 -08001602 }
1603 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301604 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1605 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1606 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1607 * are sending directly to the destination interface that supports it.
1608 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301609 if (likely(src_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(src_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301610 if ((msg->conn_rule.flow_top_interface_num == msg->conn_rule.flow_interface_num) ||
1611 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301612 /*
1613 * Dont enable CSUM offload
1614 */
1615#if 0
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301616 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301617#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301618 }
1619 }
1620
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301621 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1622 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1623 }
1624
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301625 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001626 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301627 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001628 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301629 if (sfe_ipv6_xmit_eth_type_check(src_dev, reply_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301630
1631 /*
1632 * Check whether the rule has configured a specific source MAC address to use.
1633 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1634 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301635 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1636 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->conn_rule.return_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301637 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301638 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1639 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_FLOW_VALID)) {
1640 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.flow_src_mac);
1641 } else {
1642 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)src_dev->dev_addr);
1643 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301644 }
1645
1646 ether_addr_copy((u8 *)reply_cm->xmit_dest_mac, (u8 *)msg->conn_rule.flow_mac);
1647
Xiaoping Fan978b3772015-05-27 14:15:18 -07001648 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001649 reply_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001650
1651 /*
1652 * If our dev writes Ethernet headers then we can write a really fast
1653 * version.
1654 */
1655 if (src_dev->header_ops) {
1656 if (src_dev->header_ops->create == eth_header) {
1657 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1658 }
1659 }
1660 }
1661
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301662 /*
1663 * No support for NAT in ipv6
1664 */
Xiaoping Fan978b3772015-05-27 14:15:18 -07001665
Xiaoping Fan978b3772015-05-27 14:15:18 -07001666 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001667 * Initialize the protocol-specific information that we track.
1668 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301669 switch (tuple->protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001670 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301671 original_cm->protocol_state.tcp.win_scale = msg->tcp_rule.flow_window_scale;
1672 original_cm->protocol_state.tcp.max_win = msg->tcp_rule.flow_max_window ? msg->tcp_rule.flow_max_window : 1;
1673 original_cm->protocol_state.tcp.end = msg->tcp_rule.flow_end;
1674 original_cm->protocol_state.tcp.max_end = msg->tcp_rule.flow_max_end;
1675 reply_cm->protocol_state.tcp.win_scale = msg->tcp_rule.return_window_scale;
1676 reply_cm->protocol_state.tcp.max_win = msg->tcp_rule.return_max_window ? msg->tcp_rule.return_max_window : 1;
1677 reply_cm->protocol_state.tcp.end = msg->tcp_rule.return_end;
1678 reply_cm->protocol_state.tcp.max_end = msg->tcp_rule.return_max_end;
1679 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001680 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1681 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1682 }
1683 break;
1684 }
1685
Wayne Tanbb7f1782021-12-13 11:16:04 -08001686 /*
1687 * Fill in the ipv6_connection object.
1688 */
1689 c->protocol = tuple->protocol;
1690 c->src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1691 c->src_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1692 c->src_port = tuple->flow_ident;
1693 c->src_port_xlate = tuple->flow_ident;
1694 c->original_dev = src_dev;
1695 c->original_match = original_cm;
1696
1697 c->dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1698 c->dest_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1699 c->dest_port = tuple->return_ident;
1700 c->dest_port_xlate = tuple->return_ident;
1701
1702 c->reply_dev = dest_dev;
1703 c->reply_match = reply_cm;
1704 c->debug_read_seq = 0;
1705 c->last_sync_jiffies = get_jiffies_64();
1706 c->removed = false;
1707
Xiaoping Fan978b3772015-05-27 14:15:18 -07001708 sfe_ipv6_connection_match_compute_translations(original_cm);
1709 sfe_ipv6_connection_match_compute_translations(reply_cm);
1710 sfe_ipv6_insert_connection(si, c);
1711
1712 spin_unlock_bh(&si->lock);
1713
1714 /*
1715 * We have everything we need!
1716 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301717 DEBUG_INFO("new connection - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001718 " s: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n"
1719 " d: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301720 tuple->protocol,
1721 src_dev->name, msg->conn_rule.flow_mac, NULL,
1722 (void *)tuple->flow_ip, (void *)tuple->flow_ip, ntohs(tuple->flow_ident), ntohs(tuple->flow_ident),
1723 dest_dev->name, NULL, msg->conn_rule.return_mac,
1724 (void *)tuple->return_ip, (void *)tuple->return_ip, ntohs(tuple->return_ident), ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001725
1726 return 0;
1727}
1728
1729/*
1730 * sfe_ipv6_destroy_rule()
1731 * Destroy a forwarding rule.
1732 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301733void sfe_ipv6_destroy_rule(struct sfe_ipv6_rule_destroy_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001734{
1735 struct sfe_ipv6 *si = &__si6;
1736 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301737 bool ret;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301738 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001739
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301740 this_cpu_inc(si->stats_pcpu->connection_destroy_requests64);
1741
Xiaoping Fan978b3772015-05-27 14:15:18 -07001742 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001743
1744 /*
1745 * Check to see if we have a flow that matches the rule we're trying
1746 * to destroy. If there isn't then we can't destroy it.
1747 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301748 c = sfe_ipv6_find_connection(si, tuple->protocol, (struct sfe_ipv6_addr *)tuple->flow_ip, tuple->flow_ident,
1749 (struct sfe_ipv6_addr *)tuple->return_ip, tuple->return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001750 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001751 spin_unlock_bh(&si->lock);
1752
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301753 this_cpu_inc(si->stats_pcpu->connection_destroy_misses64);
1754
Xiaoping Fan978b3772015-05-27 14:15:18 -07001755 DEBUG_TRACE("connection does not exist - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301756 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1757 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001758 return;
1759 }
1760
1761 /*
1762 * Remove our connection details from the hash tables.
1763 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301764 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001765 spin_unlock_bh(&si->lock);
1766
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301767 if (ret) {
1768 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1769 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001770
1771 DEBUG_INFO("connection destroyed - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301772 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1773 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001774}
1775
1776/*
1777 * sfe_ipv6_register_sync_rule_callback()
1778 * Register a callback for rule synchronization.
1779 */
1780void sfe_ipv6_register_sync_rule_callback(sfe_sync_rule_callback_t sync_rule_callback)
1781{
1782 struct sfe_ipv6 *si = &__si6;
1783
1784 spin_lock_bh(&si->lock);
1785 rcu_assign_pointer(si->sync_rule_callback, sync_rule_callback);
1786 spin_unlock_bh(&si->lock);
1787}
1788
1789/*
1790 * sfe_ipv6_get_debug_dev()
1791 */
1792static ssize_t sfe_ipv6_get_debug_dev(struct device *dev,
1793 struct device_attribute *attr,
1794 char *buf)
1795{
1796 struct sfe_ipv6 *si = &__si6;
1797 ssize_t count;
1798 int num;
1799
1800 spin_lock_bh(&si->lock);
1801 num = si->debug_dev;
1802 spin_unlock_bh(&si->lock);
1803
1804 count = snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", num);
1805 return count;
1806}
1807
1808/*
1809 * sfe_ipv6_destroy_all_rules_for_dev()
1810 * Destroy all connections that match a particular device.
1811 *
1812 * If we pass dev as NULL then this destroys all connections.
1813 */
1814void sfe_ipv6_destroy_all_rules_for_dev(struct net_device *dev)
1815{
1816 struct sfe_ipv6 *si = &__si6;
1817 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301818 bool ret;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001819
Xiaoping Fan34586472015-07-03 02:20:35 -07001820another_round:
Xiaoping Fan978b3772015-05-27 14:15:18 -07001821 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001822
Xiaoping Fan34586472015-07-03 02:20:35 -07001823 for (c = si->all_connections_head; c; c = c->all_connections_next) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001824 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07001825 * Does this connection relate to the device we are destroying?
Xiaoping Fan978b3772015-05-27 14:15:18 -07001826 */
1827 if (!dev
1828 || (dev == c->original_dev)
1829 || (dev == c->reply_dev)) {
Xiaoping Fan34586472015-07-03 02:20:35 -07001830 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001831 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001832 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001833
Xiaoping Fan34586472015-07-03 02:20:35 -07001834 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301835 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001836 }
1837
1838 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001839
1840 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301841 if (ret) {
1842 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1843 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001844 goto another_round;
1845 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001846}
1847
1848/*
1849 * sfe_ipv6_periodic_sync()
1850 */
Ken Zhu137722d2021-09-23 17:57:36 -07001851static void sfe_ipv6_periodic_sync(struct work_struct *work)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001852{
Ken Zhu137722d2021-09-23 17:57:36 -07001853 struct sfe_ipv6 *si = container_of((struct delayed_work *)work, struct sfe_ipv6, sync_dwork);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001854 u64 now_jiffies;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001855 int quota;
1856 sfe_sync_rule_callback_t sync_rule_callback;
Ken Zhu32b95392021-09-03 13:52:04 -07001857 struct sfe_ipv6_connection *c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001858
1859 now_jiffies = get_jiffies_64();
1860
1861 rcu_read_lock();
1862 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
1863 if (!sync_rule_callback) {
1864 rcu_read_unlock();
1865 goto done;
1866 }
1867
1868 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001869
1870 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001871 * If we have reached the end of the connection list, walk from
1872 * the connection head.
1873 */
1874 c = si->wc_next;
1875 if (unlikely(!c)) {
1876 c = si->all_connections_head;
1877 }
1878 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001879 * Get an estimate of the number of connections to parse in this sync.
1880 */
1881 quota = (si->num_connections + 63) / 64;
1882
1883 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001884 * Walk the "all connection" list and sync the connection state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001885 */
Ken Zhu32b95392021-09-03 13:52:04 -07001886 while (likely(c && quota)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001887 struct sfe_ipv6_connection_match *cm;
1888 struct sfe_ipv6_connection_match *counter_cm;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001889 struct sfe_connection_sync sis;
1890
Ken Zhu32b95392021-09-03 13:52:04 -07001891 cm = c->original_match;
1892 counter_cm = c->reply_match;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001893
1894 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001895 * Didn't receive packets in the origial direction or reply
1896 * direction, move to the next connection.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001897 */
Ken Zhu32b95392021-09-03 13:52:04 -07001898 if (!atomic_read(&cm->rx_packet_count) && !atomic_read(&counter_cm->rx_packet_count)) {
1899 c = c->all_connections_next;
1900 continue;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001901 }
1902
Ken Zhu32b95392021-09-03 13:52:04 -07001903 quota--;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001904
1905 /*
1906 * Sync the connection state.
1907 */
Xiaoping Fan99cb4c12015-08-21 19:07:32 -07001908 sfe_ipv6_gen_sync_connection(si, c, &sis, SFE_SYNC_REASON_STATS, now_jiffies);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001909
Ken Zhu32b95392021-09-03 13:52:04 -07001910 si->wc_next = c->all_connections_next;
1911
Xiaoping Fan978b3772015-05-27 14:15:18 -07001912 spin_unlock_bh(&si->lock);
1913 sync_rule_callback(&sis);
1914 spin_lock_bh(&si->lock);
Ken Zhu32b95392021-09-03 13:52:04 -07001915
1916 /*
1917 * c must be set and used in the same lock/unlock window;
1918 * because c could be removed when we don't hold the lock,
1919 * so delay grabbing until after the callback and relock.
1920 */
1921 c = si->wc_next;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001922 }
1923
Ken Zhu32b95392021-09-03 13:52:04 -07001924 /*
1925 * At the end of loop, put wc_next to the connection we left
1926 */
1927 si->wc_next = c;
1928
Xiaoping Fan978b3772015-05-27 14:15:18 -07001929 spin_unlock_bh(&si->lock);
1930 rcu_read_unlock();
1931
1932done:
Ken Zhu137722d2021-09-23 17:57:36 -07001933 schedule_delayed_work_on(si->work_cpu, (struct delayed_work *)work, ((HZ + 99) / 100));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001934}
1935
1936/*
1937 * sfe_ipv6_debug_dev_read_start()
1938 * Generate part of the XML output.
1939 */
1940static bool sfe_ipv6_debug_dev_read_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1941 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1942{
1943 int bytes_read;
1944
Xiaoping Fan34586472015-07-03 02:20:35 -07001945 si->debug_read_seq++;
1946
Xiaoping Fan978b3772015-05-27 14:15:18 -07001947 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "<sfe_ipv6>\n");
1948 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1949 return false;
1950 }
1951
1952 *length -= bytes_read;
1953 *total_read += bytes_read;
1954
1955 ws->state++;
1956 return true;
1957}
1958
1959/*
1960 * sfe_ipv6_debug_dev_read_connections_start()
1961 * Generate part of the XML output.
1962 */
1963static bool sfe_ipv6_debug_dev_read_connections_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1964 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1965{
1966 int bytes_read;
1967
1968 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<connections>\n");
1969 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1970 return false;
1971 }
1972
1973 *length -= bytes_read;
1974 *total_read += bytes_read;
1975
1976 ws->state++;
1977 return true;
1978}
1979
1980/*
1981 * sfe_ipv6_debug_dev_read_connections_connection()
1982 * Generate part of the XML output.
1983 */
1984static bool sfe_ipv6_debug_dev_read_connections_connection(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1985 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1986{
1987 struct sfe_ipv6_connection *c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001988 struct sfe_ipv6_connection_match *original_cm;
1989 struct sfe_ipv6_connection_match *reply_cm;
1990 int bytes_read;
1991 int protocol;
1992 struct net_device *src_dev;
1993 struct sfe_ipv6_addr src_ip;
1994 struct sfe_ipv6_addr src_ip_xlate;
1995 __be16 src_port;
1996 __be16 src_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001997 u64 src_rx_packets;
1998 u64 src_rx_bytes;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001999 struct net_device *dest_dev;
2000 struct sfe_ipv6_addr dest_ip;
2001 struct sfe_ipv6_addr dest_ip_xlate;
2002 __be16 dest_port;
2003 __be16 dest_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07002004 u64 dest_rx_packets;
2005 u64 dest_rx_bytes;
2006 u64 last_sync_jiffies;
Ken Zhu37040ea2021-09-09 21:11:15 -07002007 u32 src_mark, dest_mark, src_priority, dest_priority, src_dscp, dest_dscp;
Parikshit Guned31a8202022-01-05 22:15:04 +05302008 bool original_cm_sawf_valid, reply_cm_sawf_valid;
2009 u32 flow_service_class, return_service_class;
2010 u32 flow_msduq, return_msduq;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302011 u32 packet, byte, original_cm_flags;
2012 u16 pppoe_session_id;
2013 u8 pppoe_remote_mac[ETH_ALEN];
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002014 u32 original_fast_xmit, reply_fast_xmit;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002015#ifdef CONFIG_NF_FLOW_COOKIE
2016 int src_flow_cookie, dst_flow_cookie;
2017#endif
2018
2019 spin_lock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07002020
2021 for (c = si->all_connections_head; c; c = c->all_connections_next) {
2022 if (c->debug_read_seq < si->debug_read_seq) {
2023 c->debug_read_seq = si->debug_read_seq;
2024 break;
2025 }
2026 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002027
2028 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07002029 * If there were no connections then move to the next state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07002030 */
2031 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002032 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07002033 ws->state++;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002034 return true;
2035 }
2036
2037 original_cm = c->original_match;
2038 reply_cm = c->reply_match;
2039
2040 protocol = c->protocol;
2041 src_dev = c->original_dev;
2042 src_ip = c->src_ip[0];
2043 src_ip_xlate = c->src_ip_xlate[0];
2044 src_port = c->src_port;
2045 src_port_xlate = c->src_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002046 src_priority = original_cm->priority;
2047 src_dscp = original_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002048
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302049 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet, &byte);
2050 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet, &byte);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002051
2052 src_rx_packets = original_cm->rx_packet_count64;
2053 src_rx_bytes = original_cm->rx_byte_count64;
Ken Zhu37040ea2021-09-09 21:11:15 -07002054 src_mark = original_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002055 original_fast_xmit = original_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002056 dest_dev = c->reply_dev;
2057 dest_ip = c->dest_ip[0];
2058 dest_ip_xlate = c->dest_ip_xlate[0];
2059 dest_port = c->dest_port;
2060 dest_port_xlate = c->dest_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002061 dest_priority = reply_cm->priority;
2062 dest_dscp = reply_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002063 dest_rx_packets = reply_cm->rx_packet_count64;
2064 dest_rx_bytes = reply_cm->rx_byte_count64;
2065 last_sync_jiffies = get_jiffies_64() - c->last_sync_jiffies;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302066 original_cm_flags = original_cm->flags;
2067 pppoe_session_id = original_cm->pppoe_session_id;
2068 ether_addr_copy(pppoe_remote_mac, original_cm->pppoe_remote_mac);
Ken Zhu37040ea2021-09-09 21:11:15 -07002069 dest_mark = reply_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002070 reply_fast_xmit = reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
Parikshit Guned31a8202022-01-05 22:15:04 +05302071 original_cm_sawf_valid = original_cm->sawf_valid;
2072 reply_cm_sawf_valid = reply_cm->sawf_valid;
2073 flow_service_class = SFE_GET_SAWF_SERVICE_CLASS(original_cm->mark);
2074 flow_msduq = SFE_GET_SAWF_MSDUQ(original_cm->mark);
2075 return_service_class = SFE_GET_SAWF_SERVICE_CLASS(reply_cm->mark);
2076 return_msduq = SFE_GET_SAWF_MSDUQ(reply_cm->mark);
2077
Xiaoping Fan978b3772015-05-27 14:15:18 -07002078#ifdef CONFIG_NF_FLOW_COOKIE
2079 src_flow_cookie = original_cm->flow_cookie;
2080 dst_flow_cookie = reply_cm->flow_cookie;
2081#endif
2082 spin_unlock_bh(&si->lock);
2083
2084 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t\t<connection "
2085 "protocol=\"%u\" "
2086 "src_dev=\"%s\" "
2087 "src_ip=\"%pI6\" src_ip_xlate=\"%pI6\" "
2088 "src_port=\"%u\" src_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002089 "src_priority=\"%u\" src_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002090 "src_rx_pkts=\"%llu\" src_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002091 "src_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002092 "src_fast_xmit=\"%s\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002093 "dest_dev=\"%s\" "
2094 "dest_ip=\"%pI6\" dest_ip_xlate=\"%pI6\" "
2095 "dest_port=\"%u\" dest_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002096 "dest_priority=\"%u\" dest_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002097 "dest_rx_pkts=\"%llu\" dest_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002098 "dest_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002099 "reply_fast_xmit=\"%s\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002100#ifdef CONFIG_NF_FLOW_COOKIE
2101 "src_flow_cookie=\"%d\" dst_flow_cookie=\"%d\" "
2102#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002103 "last_sync=\"%llu\" ",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002104 protocol,
2105 src_dev->name,
2106 &src_ip, &src_ip_xlate,
2107 ntohs(src_port), ntohs(src_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002108 src_priority, src_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002109 src_rx_packets, src_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002110 src_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002111 original_fast_xmit ? "Yes" : "No",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002112 dest_dev->name,
2113 &dest_ip, &dest_ip_xlate,
2114 ntohs(dest_port), ntohs(dest_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002115 dest_priority, dest_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002116 dest_rx_packets, dest_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002117 dest_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002118 reply_fast_xmit ? "Yes" : "No",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002119#ifdef CONFIG_NF_FLOW_COOKIE
2120 src_flow_cookie, dst_flow_cookie,
2121#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002122 last_sync_jiffies);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002123
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302124 if (original_cm_flags &= (SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP | SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05302125 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "pppoe_session_id=\"%u\" pppoe_server_MAC=\"%pM\" ",
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302126 pppoe_session_id, pppoe_remote_mac);
2127 }
2128
Parikshit Guned31a8202022-01-05 22:15:04 +05302129 if (original_cm_sawf_valid) {
2130 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "flow_service_class=\"%d\" flow_msduq=\"%d\" ",
2131 flow_service_class, flow_msduq);
2132 }
2133
2134 if (reply_cm_sawf_valid) {
2135 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "return_service_class=\"%d\" return_msduq=\"%d\" ",
2136 return_service_class, return_msduq);
2137 }
2138
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302139 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, ")/>\n");
2140
Xiaoping Fan978b3772015-05-27 14:15:18 -07002141 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2142 return false;
2143 }
2144
2145 *length -= bytes_read;
2146 *total_read += bytes_read;
2147
Xiaoping Fan978b3772015-05-27 14:15:18 -07002148 return true;
2149}
2150
2151/*
2152 * sfe_ipv6_debug_dev_read_connections_end()
2153 * Generate part of the XML output.
2154 */
2155static bool sfe_ipv6_debug_dev_read_connections_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2156 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2157{
2158 int bytes_read;
2159
2160 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</connections>\n");
2161 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2162 return false;
2163 }
2164
2165 *length -= bytes_read;
2166 *total_read += bytes_read;
2167
2168 ws->state++;
2169 return true;
2170}
2171
2172/*
2173 * sfe_ipv6_debug_dev_read_exceptions_start()
2174 * Generate part of the XML output.
2175 */
2176static bool sfe_ipv6_debug_dev_read_exceptions_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2177 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2178{
2179 int bytes_read;
2180
2181 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<exceptions>\n");
2182 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2183 return false;
2184 }
2185
2186 *length -= bytes_read;
2187 *total_read += bytes_read;
2188
2189 ws->state++;
2190 return true;
2191}
2192
2193/*
2194 * sfe_ipv6_debug_dev_read_exceptions_exception()
2195 * Generate part of the XML output.
2196 */
2197static bool sfe_ipv6_debug_dev_read_exceptions_exception(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2198 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2199{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302200 int i;
2201 u64 val = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002202
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302203 for_each_possible_cpu(i) {
2204 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
2205 val += s->exception_events64[ws->iter_exception];
2206 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002207
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302208 if (val) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002209 int bytes_read;
2210
2211 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE,
2212 "\t\t<exception name=\"%s\" count=\"%llu\" />\n",
2213 sfe_ipv6_exception_events_string[ws->iter_exception],
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302214 val);
2215
Xiaoping Fan978b3772015-05-27 14:15:18 -07002216 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2217 return false;
2218 }
2219
2220 *length -= bytes_read;
2221 *total_read += bytes_read;
2222 }
2223
2224 ws->iter_exception++;
2225 if (ws->iter_exception >= SFE_IPV6_EXCEPTION_EVENT_LAST) {
2226 ws->iter_exception = 0;
2227 ws->state++;
2228 }
2229
2230 return true;
2231}
2232
2233/*
2234 * sfe_ipv6_debug_dev_read_exceptions_end()
2235 * Generate part of the XML output.
2236 */
2237static bool sfe_ipv6_debug_dev_read_exceptions_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2238 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2239{
2240 int bytes_read;
2241
2242 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</exceptions>\n");
2243 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2244 return false;
2245 }
2246
2247 *length -= bytes_read;
2248 *total_read += bytes_read;
2249
2250 ws->state++;
2251 return true;
2252}
2253
2254/*
2255 * sfe_ipv6_debug_dev_read_stats()
2256 * Generate part of the XML output.
2257 */
2258static bool sfe_ipv6_debug_dev_read_stats(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2259 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2260{
2261 int bytes_read;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302262 struct sfe_ipv6_stats stats;
2263 unsigned int num_conn;
2264
2265 sfe_ipv6_update_summary_stats(si, &stats);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002266
2267 spin_lock_bh(&si->lock);
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302268 num_conn = si->num_connections;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002269 spin_unlock_bh(&si->lock);
2270
2271 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<stats "
2272 "num_connections=\"%u\" "
Suruchi Suman23a279d2021-11-16 15:13:09 +05302273 "pkts_dropped=\"%llu\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002274 "pkts_fast_xmited=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002275 "pkts_forwarded=\"%llu\" pkts_not_forwarded=\"%llu\" "
2276 "create_requests=\"%llu\" create_collisions=\"%llu\" "
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302277 "create_failures=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002278 "destroy_requests=\"%llu\" destroy_misses=\"%llu\" "
2279 "flushes=\"%llu\" "
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302280 "hash_hits=\"%llu\" hash_reorders=\"%llu\" "
2281 "pppoe_encap_pkts_fwded=\"%llu\" "
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302282 "pppoe_decap_pkts_fwded=\"%llu\" "
2283 "pppoe_bridge_pkts_fwded=\"%llu\" />\n",
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302284
2285 num_conn,
Suruchi Suman23a279d2021-11-16 15:13:09 +05302286 stats.packets_dropped64,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002287 stats.packets_fast_xmited64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302288 stats.packets_forwarded64,
2289 stats.packets_not_forwarded64,
2290 stats.connection_create_requests64,
2291 stats.connection_create_collisions64,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302292 stats.connection_create_failures64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302293 stats.connection_destroy_requests64,
2294 stats.connection_destroy_misses64,
2295 stats.connection_flushes64,
2296 stats.connection_match_hash_hits64,
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302297 stats.connection_match_hash_reorders64,
2298 stats.pppoe_encap_packets_forwarded64,
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302299 stats.pppoe_decap_packets_forwarded64,
2300 stats.pppoe_bridge_packets_forwarded64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002301 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2302 return false;
2303 }
2304
2305 *length -= bytes_read;
2306 *total_read += bytes_read;
2307
2308 ws->state++;
2309 return true;
2310}
2311
2312/*
2313 * sfe_ipv6_debug_dev_read_end()
2314 * Generate part of the XML output.
2315 */
2316static bool sfe_ipv6_debug_dev_read_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2317 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2318{
2319 int bytes_read;
2320
2321 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "</sfe_ipv6>\n");
2322 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2323 return false;
2324 }
2325
2326 *length -= bytes_read;
2327 *total_read += bytes_read;
2328
2329 ws->state++;
2330 return true;
2331}
2332
2333/*
2334 * Array of write functions that write various XML elements that correspond to
2335 * our XML output state machine.
2336 */
2337static sfe_ipv6_debug_xml_write_method_t sfe_ipv6_debug_xml_write_methods[SFE_IPV6_DEBUG_XML_STATE_DONE] = {
2338 sfe_ipv6_debug_dev_read_start,
2339 sfe_ipv6_debug_dev_read_connections_start,
2340 sfe_ipv6_debug_dev_read_connections_connection,
2341 sfe_ipv6_debug_dev_read_connections_end,
2342 sfe_ipv6_debug_dev_read_exceptions_start,
2343 sfe_ipv6_debug_dev_read_exceptions_exception,
2344 sfe_ipv6_debug_dev_read_exceptions_end,
2345 sfe_ipv6_debug_dev_read_stats,
2346 sfe_ipv6_debug_dev_read_end,
2347};
2348
2349/*
2350 * sfe_ipv6_debug_dev_read()
2351 * Send info to userspace upon read request from user
2352 */
2353static ssize_t sfe_ipv6_debug_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
2354{
2355 char msg[CHAR_DEV_MSG_SIZE];
2356 int total_read = 0;
2357 struct sfe_ipv6_debug_xml_write_state *ws;
2358 struct sfe_ipv6 *si = &__si6;
2359
2360 ws = (struct sfe_ipv6_debug_xml_write_state *)filp->private_data;
2361 while ((ws->state != SFE_IPV6_DEBUG_XML_STATE_DONE) && (length > CHAR_DEV_MSG_SIZE)) {
2362 if ((sfe_ipv6_debug_xml_write_methods[ws->state])(si, buffer, msg, &length, &total_read, ws)) {
2363 continue;
2364 }
2365 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002366 return total_read;
2367}
2368
2369/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07002370 * sfe_ipv6_debug_dev_open()
2371 */
2372static int sfe_ipv6_debug_dev_open(struct inode *inode, struct file *file)
2373{
2374 struct sfe_ipv6_debug_xml_write_state *ws;
2375
2376 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
2377 if (ws) {
2378 return 0;
2379 }
2380
2381 ws = kzalloc(sizeof(struct sfe_ipv6_debug_xml_write_state), GFP_KERNEL);
2382 if (!ws) {
2383 return -ENOMEM;
2384 }
2385
2386 ws->state = SFE_IPV6_DEBUG_XML_STATE_START;
2387 file->private_data = ws;
2388
2389 return 0;
2390}
2391
2392/*
2393 * sfe_ipv6_debug_dev_release()
2394 */
2395static int sfe_ipv6_debug_dev_release(struct inode *inode, struct file *file)
2396{
2397 struct sfe_ipv6_debug_xml_write_state *ws;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002398
2399 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
Xiaoping Fan34586472015-07-03 02:20:35 -07002400 if (ws) {
2401 /*
2402 * We've finished with our output so free the write state.
2403 */
2404 kfree(ws);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302405 file->private_data = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002406 }
2407
Xiaoping Fan978b3772015-05-27 14:15:18 -07002408 return 0;
2409}
2410
2411/*
2412 * File operations used in the debug char device
2413 */
2414static struct file_operations sfe_ipv6_debug_dev_fops = {
2415 .read = sfe_ipv6_debug_dev_read,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002416 .open = sfe_ipv6_debug_dev_open,
2417 .release = sfe_ipv6_debug_dev_release
2418};
2419
2420#ifdef CONFIG_NF_FLOW_COOKIE
2421/*
2422 * sfe_ipv6_register_flow_cookie_cb
2423 * register a function in SFE to let SFE use this function to configure flow cookie for a flow
2424 *
2425 * Hardware driver which support flow cookie should register a callback function in SFE. Then SFE
2426 * can use this function to configure flow cookie for a flow.
2427 * return: 0, success; !=0, fail
2428 */
2429int sfe_ipv6_register_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2430{
2431 struct sfe_ipv6 *si = &__si6;
2432
2433 BUG_ON(!cb);
2434
2435 if (si->flow_cookie_set_func) {
2436 return -1;
2437 }
2438
2439 rcu_assign_pointer(si->flow_cookie_set_func, cb);
2440 return 0;
2441}
2442
2443/*
2444 * sfe_ipv6_unregister_flow_cookie_cb
2445 * unregister function which is used to configure flow cookie for a flow
2446 *
2447 * return: 0, success; !=0, fail
2448 */
2449int sfe_ipv6_unregister_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2450{
2451 struct sfe_ipv6 *si = &__si6;
2452
2453 RCU_INIT_POINTER(si->flow_cookie_set_func, NULL);
2454 return 0;
2455}
Xiaoping Fan640faf42015-08-28 15:50:55 -07002456
2457/*
2458 * sfe_ipv6_get_flow_cookie()
2459 */
2460static ssize_t sfe_ipv6_get_flow_cookie(struct device *dev,
2461 struct device_attribute *attr,
2462 char *buf)
2463{
2464 struct sfe_ipv6 *si = &__si6;
Xiaoping Fan01c67cc2015-11-09 11:31:57 -08002465 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->flow_cookie_enable);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002466}
2467
2468/*
2469 * sfe_ipv6_set_flow_cookie()
2470 */
2471static ssize_t sfe_ipv6_set_flow_cookie(struct device *dev,
2472 struct device_attribute *attr,
2473 const char *buf, size_t size)
2474{
2475 struct sfe_ipv6 *si = &__si6;
Ken Zhu137722d2021-09-23 17:57:36 -07002476 si->flow_cookie_enable = strict_strtol(buf, NULL, 0);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002477
2478 return size;
2479}
2480
2481/*
2482 * sysfs attributes.
2483 */
2484static const struct device_attribute sfe_ipv6_flow_cookie_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -08002485 __ATTR(flow_cookie_enable, S_IWUSR | S_IRUGO, sfe_ipv6_get_flow_cookie, sfe_ipv6_set_flow_cookie);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002486#endif /*CONFIG_NF_FLOW_COOKIE*/
2487
Ken Zhu137722d2021-09-23 17:57:36 -07002488/*
2489 * sfe_ipv6_get_cpu()
2490 */
2491static ssize_t sfe_ipv6_get_cpu(struct device *dev,
2492 struct device_attribute *attr,
2493 char *buf)
2494{
2495 struct sfe_ipv6 *si = &__si6;
2496 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->work_cpu);
2497}
2498
2499/*
Wayne Tanbb7f1782021-12-13 11:16:04 -08002500 * sfe_ipv6_set_cpu()
Ken Zhu137722d2021-09-23 17:57:36 -07002501 */
2502static ssize_t sfe_ipv6_set_cpu(struct device *dev,
2503 struct device_attribute *attr,
2504 const char *buf, size_t size)
2505{
2506 struct sfe_ipv6 *si = &__si6;
2507 int work_cpu;
2508
2509 work_cpu = simple_strtol(buf, NULL, 0);
2510 if ((work_cpu >= 0) && (work_cpu <= NR_CPUS)) {
2511 si->work_cpu = work_cpu;
2512 } else {
2513 dev_err(dev, "%s is not in valid range[0,%d]", buf, NR_CPUS);
2514 }
2515
2516 return size;
2517}
2518/*
2519 * sysfs attributes.
2520 */
2521static const struct device_attribute sfe_ipv6_cpu_attr =
2522 __ATTR(stat_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv6_get_cpu, sfe_ipv6_set_cpu);
2523
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302524 /*
2525 * sfe_ipv6_hash_init()
2526 * Initialize conn match hash lists
2527 */
2528static void sfe_ipv6_conn_match_hash_init(struct sfe_ipv6 *si, int len)
2529{
2530 struct hlist_head *hash_list = si->hlist_conn_match_hash_head;
2531 int i;
2532
2533 for (i = 0; i < len; i++) {
2534 INIT_HLIST_HEAD(&hash_list[i]);
2535 }
2536}
2537
Suruchi Suman23a279d2021-11-16 15:13:09 +05302538#ifdef SFE_PROCESS_LOCAL_OUT
2539/*
2540 * sfe_ipv6_local_out()
2541 * Called for packets from ip_local_out() - post encapsulation & other packets
2542 */
2543static unsigned int sfe_ipv6_local_out(void *priv,
2544 struct sk_buff *skb,
2545 const struct nf_hook_state *nhs)
2546{
Nitin Shettyc28f8172022-02-04 16:23:46 +05302547 struct sfe_l2_info l2_info = {0};
2548
Suruchi Suman23a279d2021-11-16 15:13:09 +05302549 DEBUG_TRACE("sfe: sfe_ipv6_local_out hook called.\n");
2550
2551 if (likely(skb->skb_iif)) {
Nitin Shettyc28f8172022-02-04 16:23:46 +05302552 return sfe_ipv6_recv(skb->dev, skb, &l2_info, true) ? NF_STOLEN : NF_ACCEPT;
Suruchi Suman23a279d2021-11-16 15:13:09 +05302553 }
2554
2555 return NF_ACCEPT;
2556}
2557
2558/*
2559 * struct nf_hook_ops sfe_ipv6_ops_local_out[]
2560 * Hooks into netfilter local out packet monitoring points.
2561 */
2562static struct nf_hook_ops sfe_ipv6_ops_local_out[] __read_mostly = {
2563
2564 /*
2565 * Local out routing hook is used to monitor packets.
2566 */
2567 {
2568 .hook = sfe_ipv6_local_out,
2569 .pf = PF_INET6,
2570 .hooknum = NF_INET_LOCAL_OUT,
2571 .priority = NF_IP6_PRI_FIRST,
2572 },
2573};
2574#endif
2575
Xiaoping Fan978b3772015-05-27 14:15:18 -07002576/*
2577 * sfe_ipv6_init()
2578 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302579int sfe_ipv6_init(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002580{
2581 struct sfe_ipv6 *si = &__si6;
2582 int result = -1;
2583
2584 DEBUG_INFO("SFE IPv6 init\n");
2585
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302586 sfe_ipv6_conn_match_hash_init(si, ARRAY_SIZE(si->hlist_conn_match_hash_head));
2587
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302588 si->stats_pcpu = alloc_percpu_gfp(struct sfe_ipv6_stats, GFP_KERNEL | __GFP_ZERO);
2589 if (!si->stats_pcpu) {
2590 DEBUG_ERROR("failed to allocate stats memory for sfe_ipv6\n");
2591 goto exit0;
2592 }
2593
Xiaoping Fan978b3772015-05-27 14:15:18 -07002594 /*
Parikshit Guned31a8202022-01-05 22:15:04 +05302595 * Allocate per cpu per service class memory.
2596 */
2597 si->stats_pcpu_psc = alloc_percpu_gfp(struct sfe_ipv6_service_class_stats_db,
2598 GFP_KERNEL | __GFP_ZERO);
2599 if (!si->stats_pcpu_psc) {
2600 DEBUG_ERROR("failed to allocate per cpu per service clas stats memory\n");
2601 goto exit1;
2602 }
2603
2604 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07002605 * Create sys/sfe_ipv6
2606 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302607 si->sys_ipv6 = kobject_create_and_add("sfe_ipv6", NULL);
2608 if (!si->sys_ipv6) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002609 DEBUG_ERROR("failed to register sfe_ipv6\n");
Parikshit Guned31a8202022-01-05 22:15:04 +05302610 goto exit2;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002611 }
2612
2613 /*
2614 * Create files, one for each parameter supported by this module.
2615 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302616 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002617 if (result) {
2618 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302619 goto exit3;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002620 }
2621
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302622 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002623 if (result) {
2624 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302625 goto exit4;
Ken Zhu137722d2021-09-23 17:57:36 -07002626 }
2627
Xiaoping Fan640faf42015-08-28 15:50:55 -07002628#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302629 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002630 if (result) {
2631 DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302632 goto exit5;
Xiaoping Fan640faf42015-08-28 15:50:55 -07002633 }
2634#endif /* CONFIG_NF_FLOW_COOKIE */
2635
Suruchi Suman23a279d2021-11-16 15:13:09 +05302636#ifdef SFE_PROCESS_LOCAL_OUT
2637#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2638 result = nf_register_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2639#else
2640 result = nf_register_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2641#endif
2642#endif
2643 if (result < 0) {
2644 DEBUG_ERROR("can't register nf local out hook: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302645 goto exit6;
Suruchi Suman23a279d2021-11-16 15:13:09 +05302646 } else {
2647 DEBUG_ERROR("Register nf local out hook success: %d\n", result);
2648 }
2649
Xiaoping Fan978b3772015-05-27 14:15:18 -07002650 /*
2651 * Register our debug char device.
2652 */
2653 result = register_chrdev(0, "sfe_ipv6", &sfe_ipv6_debug_dev_fops);
2654 if (result < 0) {
2655 DEBUG_ERROR("Failed to register chrdev: %d\n", result);
Parikshit Guned31a8202022-01-05 22:15:04 +05302656 goto exit7;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002657 }
2658
2659 si->debug_dev = result;
Ken Zhu137722d2021-09-23 17:57:36 -07002660 si->work_cpu = WORK_CPU_UNBOUND;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002661
2662 /*
Ken Zhu137722d2021-09-23 17:57:36 -07002663 * Create work to handle periodic statistics.
Xiaoping Fan978b3772015-05-27 14:15:18 -07002664 */
Ken Zhu137722d2021-09-23 17:57:36 -07002665 INIT_DELAYED_WORK(&(si->sync_dwork), sfe_ipv6_periodic_sync);
2666 schedule_delayed_work_on(si->work_cpu, &(si->sync_dwork), ((HZ + 99) / 100));
Xiaoping Fan978b3772015-05-27 14:15:18 -07002667 spin_lock_init(&si->lock);
2668
2669 return 0;
2670
Parikshit Guned31a8202022-01-05 22:15:04 +05302671exit7:
Suruchi Suman23a279d2021-11-16 15:13:09 +05302672#ifdef SFE_PROCESS_LOCAL_OUT
2673#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2674 DEBUG_TRACE("sfe: Unregister local out hook\n");
2675 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2676#else
2677 DEBUG_TRACE("sfe: Unregister local out hook\n");
2678 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2679#endif
2680#endif
2681
Parikshit Guned31a8202022-01-05 22:15:04 +05302682exit6:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002683#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302684 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002685
Parikshit Guned31a8202022-01-05 22:15:04 +05302686exit5:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002687#endif /* CONFIG_NF_FLOW_COOKIE */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302688 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Suruchi Suman23a279d2021-11-16 15:13:09 +05302689
Parikshit Guned31a8202022-01-05 22:15:04 +05302690exit4:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302691 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002692
Parikshit Guned31a8202022-01-05 22:15:04 +05302693exit3:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302694 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002695
Parikshit Guned31a8202022-01-05 22:15:04 +05302696exit2:
2697 free_percpu(si->stats_pcpu_psc);
2698
Xiaoping Fan978b3772015-05-27 14:15:18 -07002699exit1:
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302700 free_percpu(si->stats_pcpu);
2701
2702exit0:
Xiaoping Fan978b3772015-05-27 14:15:18 -07002703 return result;
2704}
2705
2706/*
2707 * sfe_ipv6_exit()
2708 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302709void sfe_ipv6_exit(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002710{
2711 struct sfe_ipv6 *si = &__si6;
2712
2713 DEBUG_INFO("SFE IPv6 exit\n");
2714
2715 /*
2716 * Destroy all connections.
2717 */
2718 sfe_ipv6_destroy_all_rules_for_dev(NULL);
2719
Ken Zhu137722d2021-09-23 17:57:36 -07002720 cancel_delayed_work(&si->sync_dwork);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002721
2722 unregister_chrdev(si->debug_dev, "sfe_ipv6");
2723
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302724 free_percpu(si->stats_pcpu);
Parikshit Guned31a8202022-01-05 22:15:04 +05302725 free_percpu(si->stats_pcpu_psc);
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302726
Suruchi Suman23a279d2021-11-16 15:13:09 +05302727#ifdef SFE_PROCESS_LOCAL_OUT
2728#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2729 DEBUG_TRACE("sfe: Unregister local out hook\n");
2730 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2731#else
2732 DEBUG_TRACE("sfe: Unregister local out hook\n");
2733 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2734#endif
2735#endif
2736
Xiaoping Fan640faf42015-08-28 15:50:55 -07002737#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302738 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002739#endif /* CONFIG_NF_FLOW_COOKIE */
Ken Zhu137722d2021-09-23 17:57:36 -07002740
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302741 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002742
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302743 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002744
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302745 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002746}
2747
Xiaoping Fan978b3772015-05-27 14:15:18 -07002748#ifdef CONFIG_NF_FLOW_COOKIE
2749EXPORT_SYMBOL(sfe_ipv6_register_flow_cookie_cb);
2750EXPORT_SYMBOL(sfe_ipv6_unregister_flow_cookie_cb);
2751#endif