blob: 162a1d745360e563c03afe88dcd16bc76aa5e3e5 [file] [log] [blame]
Xiaoping Fan978b3772015-05-27 14:15:18 -07001/*
2 * sfe_ipv6.c
3 * Shortcut forwarding engine - IPv6 support.
4 *
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05305 * Copyright (c) 2015-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
Xiaoping Fana42c68b2015-08-07 18:00:39 -070012 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053017 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Xiaoping Fan978b3772015-05-27 14:15:18 -070019 */
20
21#include <linux/module.h>
22#include <linux/sysfs.h>
23#include <linux/skbuff.h>
24#include <linux/icmp.h>
25#include <net/tcp.h>
26#include <linux/etherdevice.h>
Tian Yang45f39c82020-10-06 14:07:47 -070027#include <linux/version.h>
Suruchi Suman23a279d2021-11-16 15:13:09 +053028#include <net/udp.h>
29#include <net/vxlan.h>
30#include <linux/refcount.h>
31#include <linux/netfilter.h>
32#include <linux/inetdevice.h>
33#include <linux/netfilter_ipv6.h>
Tian Yangafb03452022-01-13 18:53:13 -080034#include <net/protocol.h>
Nitin Shettye6ed5b52021-12-27 14:50:11 +053035#include <net/addrconf.h>
36#include <net/gre.h>
37
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053038#include "sfe_debug.h"
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053039#include "sfe_api.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070040#include "sfe.h"
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +053041#include "sfe_flow_cookie.h"
42#include "sfe_ipv6.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053043#include "sfe_ipv6_udp.h"
44#include "sfe_ipv6_tcp.h"
45#include "sfe_ipv6_icmp.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080046#include "sfe_pppoe.h"
Tian Yangafb03452022-01-13 18:53:13 -080047#include "sfe_ipv6_tunipip6.h"
Nitin Shettye6ed5b52021-12-27 14:50:11 +053048#include "sfe_ipv6_gre.h"
Xiaoping Fan978b3772015-05-27 14:15:18 -070049
Ratheesh Kannoth89302a72021-10-20 08:10:37 +053050#define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
51
Xiaoping Fan978b3772015-05-27 14:15:18 -070052static char *sfe_ipv6_exception_events_string[SFE_IPV6_EXCEPTION_EVENT_LAST] = {
53 "UDP_HEADER_INCOMPLETE",
54 "UDP_NO_CONNECTION",
55 "UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
56 "UDP_SMALL_TTL",
57 "UDP_NEEDS_FRAGMENTATION",
58 "TCP_HEADER_INCOMPLETE",
59 "TCP_NO_CONNECTION_SLOW_FLAGS",
60 "TCP_NO_CONNECTION_FAST_FLAGS",
61 "TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
62 "TCP_SMALL_TTL",
63 "TCP_NEEDS_FRAGMENTATION",
64 "TCP_FLAGS",
65 "TCP_SEQ_EXCEEDS_RIGHT_EDGE",
66 "TCP_SMALL_DATA_OFFS",
67 "TCP_BAD_SACK",
68 "TCP_BIG_DATA_OFFS",
69 "TCP_SEQ_BEFORE_LEFT_EDGE",
70 "TCP_ACK_EXCEEDS_RIGHT_EDGE",
71 "TCP_ACK_BEFORE_LEFT_EDGE",
72 "ICMP_HEADER_INCOMPLETE",
73 "ICMP_UNHANDLED_TYPE",
74 "ICMP_IPV6_HEADER_INCOMPLETE",
75 "ICMP_IPV6_NON_V6",
76 "ICMP_IPV6_IP_OPTIONS_INCOMPLETE",
77 "ICMP_IPV6_UDP_HEADER_INCOMPLETE",
78 "ICMP_IPV6_TCP_HEADER_INCOMPLETE",
79 "ICMP_IPV6_UNHANDLED_PROTOCOL",
80 "ICMP_NO_CONNECTION",
81 "ICMP_FLUSHED_CONNECTION",
82 "HEADER_INCOMPLETE",
83 "BAD_TOTAL_LENGTH",
84 "NON_V6",
85 "NON_INITIAL_FRAGMENT",
86 "DATAGRAM_INCOMPLETE",
87 "IP_OPTIONS_INCOMPLETE",
88 "UNHANDLED_PROTOCOL",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053089 "FLOW_COOKIE_ADD_FAIL",
Nitin Shetty16ab38d2022-02-09 01:26:19 +053090 "NO_HEADROOM",
91 "INVALID_PPPOE_SESSION",
92 "INCORRECT_PPPOE_PARSING",
93 "PPPOE_NOT_SET_IN_CME",
94 "INGRESS_VLAN_TAG_MISMATCH",
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +053095 "INVALID_SOURCE_INTERFACE",
Tian Yangafb03452022-01-13 18:53:13 -080096 "TUNIPIP6_HEADER_INCOMPLETE",
97 "TUNIPIP6_NO_CONNECTION",
98 "TUNIPIP6_IP_OPTIONS_OR_INITIAL_FRAGMENT",
99 "TUNIPIP6_SMALL_TTL",
100 "TUNIPIP6_NEEDS_FRAGMENTATION",
Nitin Shetty16ab38d2022-02-09 01:26:19 +0530101 "TUNIPIP6_SYNC_ON_FIND",
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530102 "GRE_HEADER_INCOMPLETE",
103 "GRE_NO_CONNECTION",
104 "GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
105 "GRE_SMALL_TTL",
106 "GRE_NEEDS_FRAGMENTATION"
Xiaoping Fan978b3772015-05-27 14:15:18 -0700107};
108
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700109static struct sfe_ipv6 __si6;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700110
111/*
112 * sfe_ipv6_get_debug_dev()
113 */
114static ssize_t sfe_ipv6_get_debug_dev(struct device *dev, struct device_attribute *attr, char *buf);
115
116/*
117 * sysfs attributes.
118 */
119static const struct device_attribute sfe_ipv6_debug_dev_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -0800120 __ATTR(debug_dev, S_IWUSR | S_IRUGO, sfe_ipv6_get_debug_dev, NULL);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700121
122/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700123 * sfe_ipv6_get_connection_match_hash()
124 * Generate the hash used in connection match lookups.
125 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700126static inline unsigned int sfe_ipv6_get_connection_match_hash(struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700127 struct sfe_ipv6_addr *src_ip, __be16 src_port,
128 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
129{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700130 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700131
132 for (idx = 0; idx < 4; idx++) {
133 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
134 }
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530135 hash = hash ^ protocol ^ ntohs(src_port ^ dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700136 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
137}
138
139/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530140 * sfe_ipv6_find_connection_match_rcu()
Xiaoping Fan978b3772015-05-27 14:15:18 -0700141 * Get the IPv6 flow match info that corresponds to a particular 5-tuple.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700142 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530143struct sfe_ipv6_connection_match *
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530144sfe_ipv6_find_connection_match_rcu(struct sfe_ipv6 *si, struct net_device *dev, u8 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700145 struct sfe_ipv6_addr *src_ip, __be16 src_port,
146 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
147{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530148 struct sfe_ipv6_connection_match *cm = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700149 unsigned int conn_match_idx;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530150 struct hlist_head *lhead;
151 WARN_ON_ONCE(!rcu_read_lock_held());
Xiaoping Fan978b3772015-05-27 14:15:18 -0700152
153 conn_match_idx = sfe_ipv6_get_connection_match_hash(dev, protocol, src_ip, src_port, dest_ip, dest_port);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700154
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530155 lhead = &si->hlist_conn_match_hash_head[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700156
157 /*
158 * Hopefully the first entry is the one we want.
159 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530160 hlist_for_each_entry_rcu(cm, lhead, hnode) {
161 if ((cm->match_dest_port != dest_port) ||
162 (!sfe_ipv6_addr_equal(cm->match_src_ip, src_ip)) ||
163 (!sfe_ipv6_addr_equal(cm->match_dest_ip, dest_ip)) ||
164 (cm->match_protocol != protocol) ||
165 (cm->match_dev != dev)) {
166 continue;
167 }
168
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530169 this_cpu_inc(si->stats_pcpu->connection_match_hash_hits64);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700170
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530171 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700172
Xiaoping Fan978b3772015-05-27 14:15:18 -0700173 }
174
Xiaoping Fan978b3772015-05-27 14:15:18 -0700175 return cm;
176}
177
178/*
179 * sfe_ipv6_connection_match_update_summary_stats()
180 * Update the summary stats for a connection match entry.
181 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530182static inline void sfe_ipv6_connection_match_update_summary_stats(struct sfe_ipv6_connection_match *cm,
183 u32 *packets, u32 *bytes)
184
Xiaoping Fan978b3772015-05-27 14:15:18 -0700185{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530186 u32 packet_count, byte_count;
187
188 packet_count = atomic_read(&cm->rx_packet_count);
189 cm->rx_packet_count64 += packet_count;
190 atomic_sub(packet_count, &cm->rx_packet_count);
191
192 byte_count = atomic_read(&cm->rx_byte_count);
193 cm->rx_byte_count64 += byte_count;
194 atomic_sub(byte_count, &cm->rx_byte_count);
195
196 *packets = packet_count;
197 *bytes = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700198}
199
200/*
201 * sfe_ipv6_connection_match_compute_translations()
202 * Compute port and address translations for a connection match entry.
203 */
204static void sfe_ipv6_connection_match_compute_translations(struct sfe_ipv6_connection_match *cm)
205{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700206 u32 diff[9];
207 u32 *idx_32;
208 u16 *idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700209
210 /*
211 * Before we insert the entry look to see if this is tagged as doing address
212 * translations. If it is then work out the adjustment that we need to apply
213 * to the transport checksum.
214 */
215 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700216 u32 adj = 0;
217 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700218
219 /*
220 * Precompute an incremental checksum adjustment so we can
221 * edit packets in this stream very quickly. The algorithm is from RFC1624.
222 */
223 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530224 *(idx_32++) = cm->match_src_ip[0].addr[0];
225 *(idx_32++) = cm->match_src_ip[0].addr[1];
226 *(idx_32++) = cm->match_src_ip[0].addr[2];
227 *(idx_32++) = cm->match_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700228
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700229 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700230 *(idx_16++) = cm->match_src_port;
231 *(idx_16++) = ~cm->xlate_src_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700232 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700233
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530234 *(idx_32++) = ~cm->xlate_src_ip[0].addr[0];
235 *(idx_32++) = ~cm->xlate_src_ip[0].addr[1];
236 *(idx_32++) = ~cm->xlate_src_ip[0].addr[2];
237 *(idx_32++) = ~cm->xlate_src_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700238
239 /*
240 * When we compute this fold it down to a 16-bit offset
241 * as that way we can avoid having to do a double
242 * folding of the twos-complement result because the
243 * addition of 2 16-bit values cannot cause a double
244 * wrap-around!
245 */
246 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700247 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700248 adj += carry;
249 adj += w;
250 carry = (w > adj);
251 }
252 adj += carry;
253 adj = (adj & 0xffff) + (adj >> 16);
254 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700255 cm->xlate_src_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700256 }
257
258 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700259 u32 adj = 0;
260 u32 carry = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700261
262 /*
263 * Precompute an incremental checksum adjustment so we can
264 * edit packets in this stream very quickly. The algorithm is from RFC1624.
265 */
266 idx_32 = diff;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530267 *(idx_32++) = cm->match_dest_ip[0].addr[0];
268 *(idx_32++) = cm->match_dest_ip[0].addr[1];
269 *(idx_32++) = cm->match_dest_ip[0].addr[2];
270 *(idx_32++) = cm->match_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700271
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700272 idx_16 = (u16 *)idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700273 *(idx_16++) = cm->match_dest_port;
274 *(idx_16++) = ~cm->xlate_dest_port;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700275 idx_32 = (u32 *)idx_16;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700276
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530277 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[0];
278 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[1];
279 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[2];
280 *(idx_32++) = ~cm->xlate_dest_ip[0].addr[3];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700281
282 /*
283 * When we compute this fold it down to a 16-bit offset
284 * as that way we can avoid having to do a double
285 * folding of the twos-complement result because the
286 * addition of 2 16-bit values cannot cause a double
287 * wrap-around!
288 */
289 for (idx_32 = diff; idx_32 < diff + 9; idx_32++) {
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700290 u32 w = *idx_32;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700291 adj += carry;
292 adj += w;
293 carry = (w > adj);
294 }
295 adj += carry;
296 adj = (adj & 0xffff) + (adj >> 16);
297 adj = (adj & 0xffff) + (adj >> 16);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700298 cm->xlate_dest_csum_adjustment = (u16)adj;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700299 }
300}
301
302/*
303 * sfe_ipv6_update_summary_stats()
304 * Update the summary stats.
305 */
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530306static void sfe_ipv6_update_summary_stats(struct sfe_ipv6 *si, struct sfe_ipv6_stats *stats)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700307{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530308 int i = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700309
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530310 memset(stats, 0, sizeof(*stats));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700311
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530312 for_each_possible_cpu(i) {
313 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
314
315 stats->connection_create_requests64 += s->connection_create_requests64;
316 stats->connection_create_collisions64 += s->connection_create_collisions64;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530317 stats->connection_create_failures64 += s->connection_create_failures64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530318 stats->connection_destroy_requests64 += s->connection_destroy_requests64;
319 stats->connection_destroy_misses64 += s->connection_destroy_misses64;
320 stats->connection_match_hash_hits64 += s->connection_match_hash_hits64;
321 stats->connection_match_hash_reorders64 += s->connection_match_hash_reorders64;
322 stats->connection_flushes64 += s->connection_flushes64;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530323 stats->packets_dropped64 += s->packets_dropped64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530324 stats->packets_forwarded64 += s->packets_forwarded64;
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800325 stats->packets_fast_xmited64 += s->packets_fast_xmited64;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530326 stats->packets_not_forwarded64 += s->packets_not_forwarded64;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530327 stats->pppoe_encap_packets_forwarded64 += s->pppoe_encap_packets_forwarded64;
328 stats->pppoe_decap_packets_forwarded64 += s->pppoe_decap_packets_forwarded64;
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530329 stats->pppoe_bridge_packets_forwarded64 += s->pppoe_bridge_packets_forwarded64;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700330 }
331}
332
333/*
334 * sfe_ipv6_insert_connection_match()
335 * Insert a connection match into the hash.
336 *
337 * On entry we must be holding the lock that protects the hash table.
338 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700339static inline void sfe_ipv6_insert_connection_match(struct sfe_ipv6 *si,
340 struct sfe_ipv6_connection_match *cm)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700341{
Xiaoping Fan978b3772015-05-27 14:15:18 -0700342 unsigned int conn_match_idx
343 = sfe_ipv6_get_connection_match_hash(cm->match_dev, cm->match_protocol,
344 cm->match_src_ip, cm->match_src_port,
345 cm->match_dest_ip, cm->match_dest_port);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700346
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530347 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700348
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530349 hlist_add_head_rcu(&cm->hnode, &si->hlist_conn_match_hash_head[conn_match_idx]);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700350#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700351 if (!si->flow_cookie_enable || !(cm->flags & (SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC | SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST)))
Xiaoping Fan978b3772015-05-27 14:15:18 -0700352 return;
353
354 /*
355 * Configure hardware to put a flow cookie in packet of this flow,
356 * then we can accelerate the lookup process when we received this packet.
357 */
358 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
359 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
360
361 if ((NULL == entry->match) && time_is_before_jiffies(entry->last_clean_time + HZ)) {
362 sfe_ipv6_flow_cookie_set_func_t func;
363
364 rcu_read_lock();
365 func = rcu_dereference(si->flow_cookie_set_func);
366 if (func) {
367 if (!func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
368 cm->match_dest_ip->addr, cm->match_dest_port, conn_match_idx)) {
369 entry->match = cm;
370 cm->flow_cookie = conn_match_idx;
371 } else {
372 si->exception_events[SFE_IPV6_EXCEPTION_EVENT_FLOW_COOKIE_ADD_FAIL]++;
373 }
374 }
375 rcu_read_unlock();
376
377 break;
378 }
379 }
380#endif
Xiaoping Fan978b3772015-05-27 14:15:18 -0700381}
382
383/*
384 * sfe_ipv6_remove_connection_match()
385 * Remove a connection match object from the hash.
Xiaoping Fan978b3772015-05-27 14:15:18 -0700386 */
387static inline void sfe_ipv6_remove_connection_match(struct sfe_ipv6 *si, struct sfe_ipv6_connection_match *cm)
388{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530389
390 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700391#ifdef CONFIG_NF_FLOW_COOKIE
Xiaoping Fan640faf42015-08-28 15:50:55 -0700392 if (si->flow_cookie_enable) {
393 /*
394 * Tell hardware that we no longer need a flow cookie in packet of this flow
395 */
396 unsigned int conn_match_idx;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700397
Xiaoping Fan640faf42015-08-28 15:50:55 -0700398 for (conn_match_idx = 1; conn_match_idx < SFE_FLOW_COOKIE_SIZE; conn_match_idx++) {
399 struct sfe_ipv6_flow_cookie_entry *entry = &si->sfe_flow_cookie_table[conn_match_idx];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700400
Xiaoping Fan640faf42015-08-28 15:50:55 -0700401 if (cm == entry->match) {
402 sfe_ipv6_flow_cookie_set_func_t func;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700403
Xiaoping Fan640faf42015-08-28 15:50:55 -0700404 rcu_read_lock();
405 func = rcu_dereference(si->flow_cookie_set_func);
406 if (func) {
407 func(cm->match_protocol, cm->match_src_ip->addr, cm->match_src_port,
408 cm->match_dest_ip->addr, cm->match_dest_port, 0);
409 }
410 rcu_read_unlock();
411
412 cm->flow_cookie = 0;
413 entry->match = NULL;
414 entry->last_clean_time = jiffies;
415 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700416 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700417 }
418 }
419#endif
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530420 hlist_del_init_rcu(&cm->hnode);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700421
Xiaoping Fan978b3772015-05-27 14:15:18 -0700422}
423
424/*
425 * sfe_ipv6_get_connection_hash()
426 * Generate the hash used in connection lookups.
427 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700428static inline unsigned int sfe_ipv6_get_connection_hash(u8 protocol, struct sfe_ipv6_addr *src_ip, __be16 src_port,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700429 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
430{
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700431 u32 idx, hash = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700432
433 for (idx = 0; idx < 4; idx++) {
434 hash ^= src_ip->addr[idx] ^ dest_ip->addr[idx];
435 }
436 hash = hash ^ protocol ^ ntohs(src_port ^ dest_port);
437 return ((hash >> SFE_IPV6_CONNECTION_HASH_SHIFT) ^ hash) & SFE_IPV6_CONNECTION_HASH_MASK;
438}
439
440/*
441 * sfe_ipv6_find_connection()
442 * Get the IPv6 connection info that corresponds to a particular 5-tuple.
443 *
444 * On entry we must be holding the lock that protects the hash table.
445 */
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700446static inline struct sfe_ipv6_connection *sfe_ipv6_find_connection(struct sfe_ipv6 *si, u32 protocol,
Xiaoping Fan978b3772015-05-27 14:15:18 -0700447 struct sfe_ipv6_addr *src_ip, __be16 src_port,
448 struct sfe_ipv6_addr *dest_ip, __be16 dest_port)
449{
450 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530451
Xiaoping Fan978b3772015-05-27 14:15:18 -0700452 unsigned int conn_idx = sfe_ipv6_get_connection_hash(protocol, src_ip, src_port, dest_ip, dest_port);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530453
454 lockdep_assert_held(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700455 c = si->conn_hash[conn_idx];
456
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530457 while (c) {
458 if ((c->src_port == src_port)
459 && (c->dest_port == dest_port)
460 && (sfe_ipv6_addr_equal(c->src_ip, src_ip))
461 && (sfe_ipv6_addr_equal(c->dest_ip, dest_ip))
462 && (c->protocol == protocol)) {
463 return c;
464 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700465 c = c->next;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530466 }
Xiaoping Fan978b3772015-05-27 14:15:18 -0700467
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530468 return NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700469}
470
471/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700472 * sfe_ipv6_insert_connection()
473 * Insert a connection into the hash.
474 *
475 * On entry we must be holding the lock that protects the hash table.
476 */
477static void sfe_ipv6_insert_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
478{
479 struct sfe_ipv6_connection **hash_head;
480 struct sfe_ipv6_connection *prev_head;
481 unsigned int conn_idx;
482
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530483 lockdep_assert_held(&si->lock);
484
Xiaoping Fan978b3772015-05-27 14:15:18 -0700485 /*
486 * Insert entry into the connection hash.
487 */
488 conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
489 c->dest_ip, c->dest_port);
490 hash_head = &si->conn_hash[conn_idx];
491 prev_head = *hash_head;
492 c->prev = NULL;
493 if (prev_head) {
494 prev_head->prev = c;
495 }
496
497 c->next = prev_head;
498 *hash_head = c;
499
500 /*
501 * Insert entry into the "all connections" list.
502 */
503 if (si->all_connections_tail) {
504 c->all_connections_prev = si->all_connections_tail;
505 si->all_connections_tail->all_connections_next = c;
506 } else {
507 c->all_connections_prev = NULL;
508 si->all_connections_head = c;
509 }
510
511 si->all_connections_tail = c;
512 c->all_connections_next = NULL;
513 si->num_connections++;
514
515 /*
516 * Insert the connection match objects too.
517 */
518 sfe_ipv6_insert_connection_match(si, c->original_match);
519 sfe_ipv6_insert_connection_match(si, c->reply_match);
520}
521
522/*
523 * sfe_ipv6_remove_connection()
524 * Remove a sfe_ipv6_connection object from the hash.
525 *
526 * On entry we must be holding the lock that protects the hash table.
527 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530528bool sfe_ipv6_remove_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700529{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530530
531 lockdep_assert_held(&si->lock);
532 if (c->removed) {
533 DEBUG_ERROR("%px: Connection has been removed already\n", c);
534 return false;
535 }
536
Xiaoping Fan978b3772015-05-27 14:15:18 -0700537 /*
Tian Yang435afc42022-02-02 12:47:32 -0800538 * dereference the decap direction top_interface_dev
539 */
540 if (c->reply_match->top_interface_dev) {
541 dev_put(c->reply_match->top_interface_dev);
542 }
543 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700544 * Remove the connection match objects.
545 */
546 sfe_ipv6_remove_connection_match(si, c->reply_match);
547 sfe_ipv6_remove_connection_match(si, c->original_match);
548
549 /*
550 * Unlink the connection.
551 */
552 if (c->prev) {
553 c->prev->next = c->next;
554 } else {
555 unsigned int conn_idx = sfe_ipv6_get_connection_hash(c->protocol, c->src_ip, c->src_port,
556 c->dest_ip, c->dest_port);
557 si->conn_hash[conn_idx] = c->next;
558 }
559
560 if (c->next) {
561 c->next->prev = c->prev;
562 }
Xiaoping Fan34586472015-07-03 02:20:35 -0700563
564 /*
565 * Unlink connection from all_connections list
566 */
567 if (c->all_connections_prev) {
568 c->all_connections_prev->all_connections_next = c->all_connections_next;
569 } else {
570 si->all_connections_head = c->all_connections_next;
571 }
572
573 if (c->all_connections_next) {
574 c->all_connections_next->all_connections_prev = c->all_connections_prev;
575 } else {
576 si->all_connections_tail = c->all_connections_prev;
577 }
578
Ken Zhu32b95392021-09-03 13:52:04 -0700579 /*
580 * If I am the next sync connection, move the sync to my next or head.
581 */
582 if (unlikely(si->wc_next == c)) {
583 si->wc_next = c->all_connections_next;
584 }
585
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530586 c->removed = true;
Xiaoping Fan34586472015-07-03 02:20:35 -0700587 si->num_connections--;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530588 return true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700589}
590
591/*
592 * sfe_ipv6_gen_sync_connection()
593 * Sync a connection.
594 *
595 * On entry to this function we expect that the lock for the connection is either
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530596 * already held (while called from sfe_ipv6_periodic_sync() or isn't required
597 * (while called from sfe_ipv6_flush_sfe_ipv6_connection())
Xiaoping Fan978b3772015-05-27 14:15:18 -0700598 */
599static void sfe_ipv6_gen_sync_connection(struct sfe_ipv6 *si, struct sfe_ipv6_connection *c,
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700600 struct sfe_connection_sync *sis, sfe_sync_reason_t reason,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700601 u64 now_jiffies)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700602{
603 struct sfe_ipv6_connection_match *original_cm;
604 struct sfe_ipv6_connection_match *reply_cm;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530605 u32 packet_count, byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700606
607 /*
608 * Fill in the update message.
609 */
Murat Sezgin53509a12016-12-27 16:57:34 -0800610 sis->is_v6 = 1;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700611 sis->protocol = c->protocol;
612 sis->src_ip.ip6[0] = c->src_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700613 sis->src_ip_xlate.ip6[0] = c->src_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700614 sis->dest_ip.ip6[0] = c->dest_ip[0];
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700615 sis->dest_ip_xlate.ip6[0] = c->dest_ip_xlate[0];
Xiaoping Fan978b3772015-05-27 14:15:18 -0700616 sis->src_port = c->src_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700617 sis->src_port_xlate = c->src_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700618 sis->dest_port = c->dest_port;
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700619 sis->dest_port_xlate = c->dest_port_xlate;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700620
621 original_cm = c->original_match;
622 reply_cm = c->reply_match;
623 sis->src_td_max_window = original_cm->protocol_state.tcp.max_win;
624 sis->src_td_end = original_cm->protocol_state.tcp.end;
625 sis->src_td_max_end = original_cm->protocol_state.tcp.max_end;
626 sis->dest_td_max_window = reply_cm->protocol_state.tcp.max_win;
627 sis->dest_td_end = reply_cm->protocol_state.tcp.end;
628 sis->dest_td_max_end = reply_cm->protocol_state.tcp.max_end;
629
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530630 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet_count, &byte_count);
631 sis->src_new_packet_count = packet_count;
632 sis->src_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700633
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530634 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet_count, &byte_count);
635 sis->dest_new_packet_count = packet_count;
636 sis->dest_new_byte_count = byte_count;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700637
638 sis->src_dev = original_cm->match_dev;
639 sis->src_packet_count = original_cm->rx_packet_count64;
640 sis->src_byte_count = original_cm->rx_byte_count64;
641
642 sis->dest_dev = reply_cm->match_dev;
643 sis->dest_packet_count = reply_cm->rx_packet_count64;
644 sis->dest_byte_count = reply_cm->rx_byte_count64;
645
Xiaoping Fan99cb4c12015-08-21 19:07:32 -0700646 sis->reason = reason;
647
Xiaoping Fan978b3772015-05-27 14:15:18 -0700648 /*
649 * Get the time increment since our last sync.
650 */
651 sis->delta_jiffies = now_jiffies - c->last_sync_jiffies;
652 c->last_sync_jiffies = now_jiffies;
653}
654
655/*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530656 * sfe_ipv6_free_sfe_ipv6_connection_rcu()
657 * Called at RCU qs state to free the connection object.
658 */
659static void sfe_ipv6_free_sfe_ipv6_connection_rcu(struct rcu_head *head)
660{
661 struct sfe_ipv6_connection *c;
Suruchi Suman23a279d2021-11-16 15:13:09 +0530662 struct udp_sock *up;
663 struct sock *sk;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530664
665 /*
666 * We dont need spin lock as the connection is already removed from link list
667 */
668 c = container_of(head, struct sfe_ipv6_connection, rcu);
669 BUG_ON(!c->removed);
670
671 DEBUG_TRACE("%px: connecton has been deleted\n", c);
672
673 /*
Suruchi Suman23a279d2021-11-16 15:13:09 +0530674 * Decrease the refcount taken in function sfe_ipv6_create_rule()
675 * during call of __udp6_lib_lookup()
676 */
677 up = c->reply_match->up;
678 if (up) {
679 sk = (struct sock *)up;
680 sock_put(sk);
681 }
682
683 /*
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530684 * Release our hold of the source and dest devices and free the memory
685 * for our connection objects.
686 */
687 dev_put(c->original_dev);
688 dev_put(c->reply_dev);
689 kfree(c->original_match);
690 kfree(c->reply_match);
691 kfree(c);
692}
693
694/*
Ken Zhu88c58152021-12-09 15:12:06 -0800695 * sfe_ipv6_sync_status()
696 * update a connection status to its connection manager.
697 *
698 * si: the ipv6 context
699 * c: which connection to be notified
700 * reason: what kind of reason: flush, or destroy
701 */
702void sfe_ipv6_sync_status(struct sfe_ipv6 *si,
703 struct sfe_ipv6_connection *c,
704 sfe_sync_reason_t reason)
705{
706 struct sfe_connection_sync sis;
707 u64 now_jiffies;
708 sfe_sync_rule_callback_t sync_rule_callback;
709
710 rcu_read_lock();
711 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
712
713 if (unlikely(!sync_rule_callback)) {
714 rcu_read_unlock();
715 return;
716 }
717
718 /*
719 * Generate a sync message and then sync.
720 */
721 now_jiffies = get_jiffies_64();
722 sfe_ipv6_gen_sync_connection(si, c, &sis, reason, now_jiffies);
723 sync_rule_callback(&sis);
724
725 rcu_read_unlock();
726}
727
728/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700729 * sfe_ipv6_flush_connection()
730 * Flush a connection and free all associated resources.
731 *
732 * We need to be called with bottom halves disabled locally as we need to acquire
733 * the connection hash lock and release it again. In general we're actually called
734 * from within a BH and so we're fine, but we're also called when connections are
735 * torn down.
736 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530737void sfe_ipv6_flush_connection(struct sfe_ipv6 *si,
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700738 struct sfe_ipv6_connection *c,
739 sfe_sync_reason_t reason)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700740{
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530741 BUG_ON(!c->removed);
742
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530743 this_cpu_inc(si->stats_pcpu->connection_flushes64);
Ken Zhu88c58152021-12-09 15:12:06 -0800744 sfe_ipv6_sync_status(si, c, reason);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530745
746 /*
Ken Zhu88c58152021-12-09 15:12:06 -0800747 * Release our hold of the source and dest devices and free the memory
748 * for our connection objects.
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530749 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +0530750 call_rcu(&c->rcu, sfe_ipv6_free_sfe_ipv6_connection_rcu);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700751}
752
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530753 /*
754 * sfe_ipv6_exception_stats_inc()
755 * Increment exception stats.
756 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530757void sfe_ipv6_exception_stats_inc(struct sfe_ipv6 *si, enum sfe_ipv6_exception_events reason)
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530758{
759 struct sfe_ipv6_stats *stats = this_cpu_ptr(si->stats_pcpu);
760
761 stats->exception_events64[reason]++;
762 stats->packets_not_forwarded64++;
763}
764
Xiaoping Fan978b3772015-05-27 14:15:18 -0700765/*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530766 * sfe_ipv6_is_local_ip()
767 * return true if it is local ip otherwise return false
768 */
769static bool sfe_ipv6_is_local_ip(struct sfe_ipv6 *si, uint8_t *addr)
770{
771 struct net_device *dev;
772 struct in6_addr ip_addr;
773 memcpy(ip_addr.s6_addr, addr, 16);
774
775 dev = ipv6_dev_find(&init_net, &ip_addr, 1);
776 if (dev) {
777 dev_put(dev);
778 return true;
779 }
780
781 return false;
782}
783
784/*
Xiaoping Fan978b3772015-05-27 14:15:18 -0700785 * sfe_ipv6_recv()
786 * Handle packet receives and forwaring.
787 *
788 * Returns 1 if the packet is forwarded or 0 if it isn't.
789 */
Suruchi Suman23a279d2021-11-16 15:13:09 +0530790int sfe_ipv6_recv(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_info *l2_info, bool tun_outer)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700791{
792 struct sfe_ipv6 *si = &__si6;
793 unsigned int len;
794 unsigned int payload_len;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530795 unsigned int ihl = sizeof(struct ipv6hdr);
Ken Zhu88c58152021-12-09 15:12:06 -0800796 bool sync_on_find = false;
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530797 struct ipv6hdr *iph;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -0700798 u8 next_hdr;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700799
800 /*
801 * Check that we have space for an IP header and an uplayer header here.
802 */
803 len = skb->len;
804 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700805
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530806 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700807 DEBUG_TRACE("len: %u is too short\n", len);
808 return 0;
809 }
810
811 /*
812 * Is our IP version wrong?
813 */
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530814 iph = (struct ipv6hdr *)skb->data;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700815 if (unlikely(iph->version != 6)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700816
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530817 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_NON_V6);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700818 DEBUG_TRACE("IP version: %u\n", iph->version);
819 return 0;
820 }
821
822 /*
823 * Does our datagram fit inside the skb?
824 */
825 payload_len = ntohs(iph->payload_len);
826 if (unlikely(payload_len > (len - ihl))) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700827
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530828 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_DATAGRAM_INCOMPLETE);
Ratheesh Kannoth741f7992021-10-20 07:39:52 +0530829 DEBUG_TRACE("payload_len: %u, exceeds len: %u\n", payload_len, (len - (unsigned int)sizeof(struct ipv6hdr)));
Xiaoping Fan978b3772015-05-27 14:15:18 -0700830 return 0;
831 }
832
833 next_hdr = iph->nexthdr;
834 while (unlikely(sfe_ipv6_is_ext_hdr(next_hdr))) {
835 struct sfe_ipv6_ext_hdr *ext_hdr;
836 unsigned int ext_hdr_len;
837
838 ext_hdr = (struct sfe_ipv6_ext_hdr *)(skb->data + ihl);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700839
840 ext_hdr_len = ext_hdr->hdr_len;
841 ext_hdr_len <<= 3;
842 ext_hdr_len += sizeof(struct sfe_ipv6_ext_hdr);
843 ihl += ext_hdr_len;
844 if (!pskb_may_pull(skb, ihl + sizeof(struct sfe_ipv6_ext_hdr))) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530845 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700846
847 DEBUG_TRACE("extension header %d not completed\n", next_hdr);
848 return 0;
849 }
Ken Zhu88c58152021-12-09 15:12:06 -0800850 /*
851 * Any packets have extend hdr, won't be handled in the fast
852 * path,sync its status and exception to the kernel.
853 */
854 sync_on_find = true;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700855 next_hdr = ext_hdr->next_hdr;
856 }
857
858 if (IPPROTO_UDP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800859 return sfe_ipv6_recv_udp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700860 }
861
862 if (IPPROTO_TCP == next_hdr) {
Ken Zhu88c58152021-12-09 15:12:06 -0800863 return sfe_ipv6_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700864 }
865
866 if (IPPROTO_ICMPV6 == next_hdr) {
867 return sfe_ipv6_recv_icmp(si, skb, dev, len, iph, ihl);
868 }
869
Tian Yangafb03452022-01-13 18:53:13 -0800870 if (IPPROTO_IPIP == next_hdr) {
871 return sfe_ipv6_recv_tunipip6(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, true);
872 }
873
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530874#ifdef SFE_GRE_TUN_ENABLE
875 if (IPPROTO_GRE == next_hdr) {
Nitin Shetty2114a892022-01-28 20:03:56 +0530876 return sfe_ipv6_recv_gre(si, skb, dev, len, iph, ihl, sync_on_find, l2_info, tun_outer);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530877 }
878#endif
879
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +0530880 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_UNHANDLED_PROTOCOL);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700881 DEBUG_TRACE("not UDP, TCP or ICMP: %u\n", next_hdr);
882 return 0;
883}
884
885/*
886 * sfe_ipv6_update_tcp_state()
887 * update TCP window variables.
888 */
889static void
890sfe_ipv6_update_tcp_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530891 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700892{
893 struct sfe_ipv6_connection_match *orig_cm;
894 struct sfe_ipv6_connection_match *repl_cm;
895 struct sfe_ipv6_tcp_connection_match *orig_tcp;
896 struct sfe_ipv6_tcp_connection_match *repl_tcp;
897
898 orig_cm = c->original_match;
899 repl_cm = c->reply_match;
900 orig_tcp = &orig_cm->protocol_state.tcp;
901 repl_tcp = &repl_cm->protocol_state.tcp;
902
903 /* update orig */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530904 if (orig_tcp->max_win < msg->tcp_rule.flow_max_window) {
905 orig_tcp->max_win = msg->tcp_rule.flow_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700906 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530907 if ((s32)(orig_tcp->end - msg->tcp_rule.flow_end) < 0) {
908 orig_tcp->end = msg->tcp_rule.flow_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700909 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530910 if ((s32)(orig_tcp->max_end - msg->tcp_rule.flow_max_end) < 0) {
911 orig_tcp->max_end = msg->tcp_rule.flow_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700912 }
913
914 /* update reply */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530915 if (repl_tcp->max_win < msg->tcp_rule.return_max_window) {
916 repl_tcp->max_win = msg->tcp_rule.return_max_window;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700917 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530918 if ((s32)(repl_tcp->end - msg->tcp_rule.return_end) < 0) {
919 repl_tcp->end = msg->tcp_rule.return_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700920 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530921 if ((s32)(repl_tcp->max_end - msg->tcp_rule.return_max_end) < 0) {
922 repl_tcp->max_end = msg->tcp_rule.return_max_end;
Xiaoping Fan978b3772015-05-27 14:15:18 -0700923 }
924
925 /* update match flags */
926 orig_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
927 repl_cm->flags &= ~SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530928 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700929 orig_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
930 repl_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
931 }
932}
933
934/*
935 * sfe_ipv6_update_protocol_state()
936 * update protocol specified state machine.
937 */
938static void
939sfe_ipv6_update_protocol_state(struct sfe_ipv6_connection *c,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530940 struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -0700941{
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530942 switch (msg->tuple.protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -0700943 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +0530944 sfe_ipv6_update_tcp_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -0700945 break;
946 }
947}
948
949/*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800950 * sfe_ipv6_match_entry_set_vlan()
951 */
952static void sfe_ipv6_match_entry_set_vlan(
953 struct sfe_ipv6_connection_match *cm,
954 u32 primary_ingress_vlan_tag,
955 u32 primary_egress_vlan_tag,
956 u32 secondary_ingress_vlan_tag,
957 u32 secondary_egress_vlan_tag)
958{
959 u16 tpid;
960 /*
961 * Prevent stacking header counts when updating.
962 */
963 cm->ingress_vlan_hdr_cnt = 0;
964 cm->egress_vlan_hdr_cnt = 0;
965 memset(cm->ingress_vlan_hdr, 0, sizeof(cm->ingress_vlan_hdr));
966 memset(cm->egress_vlan_hdr, 0, sizeof(cm->egress_vlan_hdr));
967
968 /*
969 * vlan_hdr[0] corresponds to outer tag
970 * vlan_hdr[1] corresponds to inner tag
971 * Extract the vlan information (tpid and tci) from rule message
972 */
973 if ((primary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
974 tpid = (u16)(primary_ingress_vlan_tag >> 16);
975 cm->ingress_vlan_hdr[0].tpid = ntohs(tpid);
976 cm->ingress_vlan_hdr[0].tci = (u16)primary_ingress_vlan_tag;
977 cm->ingress_vlan_hdr_cnt++;
978 }
979
980 if ((secondary_ingress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
981 tpid = (u16)(secondary_ingress_vlan_tag >> 16);
982 cm->ingress_vlan_hdr[1].tpid = ntohs(tpid);
983 cm->ingress_vlan_hdr[1].tci = (u16)secondary_ingress_vlan_tag;
984 cm->ingress_vlan_hdr_cnt++;
985 }
986
987 if ((primary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
988 tpid = (u16)(primary_egress_vlan_tag >> 16);
989 cm->egress_vlan_hdr[0].tpid = ntohs(tpid);
990 cm->egress_vlan_hdr[0].tci = (u16)primary_egress_vlan_tag;
991 cm->egress_vlan_hdr_cnt++;
992 }
993
994 if ((secondary_egress_vlan_tag & VLAN_VID_MASK) != SFE_VLAN_ID_NOT_CONFIGURED) {
995 tpid = (u16)(secondary_egress_vlan_tag >> 16);
996 cm->egress_vlan_hdr[1].tpid = ntohs(tpid);
997 cm->egress_vlan_hdr[1].tci = (u16)secondary_egress_vlan_tag;
998 cm->egress_vlan_hdr_cnt++;
999 }
1000}
1001
1002/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001003 * sfe_ipv6_update_rule()
1004 * update forwarding rule after rule is created.
1005 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301006void sfe_ipv6_update_rule(struct sfe_ipv6_rule_create_msg *msg)
1007
Xiaoping Fan978b3772015-05-27 14:15:18 -07001008{
1009 struct sfe_ipv6_connection *c;
1010 struct sfe_ipv6 *si = &__si6;
1011
1012 spin_lock_bh(&si->lock);
1013
1014 c = sfe_ipv6_find_connection(si,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301015 msg->tuple.protocol,
1016 (struct sfe_ipv6_addr *)msg->tuple.flow_ip,
1017 msg->tuple.flow_ident,
1018 (struct sfe_ipv6_addr *)msg->tuple.return_ip,
1019 msg->tuple.return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001020 if (c != NULL) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301021 sfe_ipv6_update_protocol_state(c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001022 }
1023
1024 spin_unlock_bh(&si->lock);
1025}
1026
1027/*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301028 * sfe_ipv6_xmit_eth_type_check
1029 * Checking if MAC header has to be written.
1030 */
1031static inline bool sfe_ipv6_xmit_eth_type_check(struct net_device *dev, u32 cm_flags)
1032{
1033 if (!(dev->flags & IFF_NOARP)) {
1034 return true;
1035 }
1036
1037 /*
1038 * For PPPoE, since we are now supporting PPPoE encapsulation, we are writing L2 header.
1039 */
1040 if (cm_flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP) {
1041 return true;
1042 }
1043
1044 return false;
1045}
1046
1047/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001048 * sfe_ipv6_create_rule()
1049 * Create a forwarding rule.
1050 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301051int sfe_ipv6_create_rule(struct sfe_ipv6_rule_create_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001052{
1053 struct sfe_ipv6 *si = &__si6;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301054 struct sfe_ipv6_connection *c, *old_c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001055 struct sfe_ipv6_connection_match *original_cm;
1056 struct sfe_ipv6_connection_match *reply_cm;
1057 struct net_device *dest_dev;
1058 struct net_device *src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301059 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301060 struct sock *sk;
1061 struct net *net;
1062 unsigned int src_if_idx;
1063
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301064 s32 flow_interface_num = msg->conn_rule.flow_top_interface_num;
1065 s32 return_interface_num = msg->conn_rule.return_top_interface_num;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001066
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301067 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) {
1068 flow_interface_num = msg->conn_rule.flow_interface_num;
1069 }
1070
1071 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) {
1072 return_interface_num = msg->conn_rule.return_interface_num;
1073 }
1074
1075 src_dev = dev_get_by_index(&init_net, flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301076 if (!src_dev) {
1077 DEBUG_WARN("%px: Unable to find src_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301078 flow_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301079 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1080 return -EINVAL;
1081 }
1082
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301083 dest_dev = dev_get_by_index(&init_net, return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301084 if (!dest_dev) {
1085 DEBUG_WARN("%px: Unable to find dest_dev corresponding to %d\n", msg,
Suruchi Sumanc1a4a612021-10-21 14:50:23 +05301086 return_interface_num);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301087 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1088 dev_put(src_dev);
1089 return -EINVAL;
1090 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001091
1092 if (unlikely((dest_dev->reg_state != NETREG_REGISTERED) ||
1093 (src_dev->reg_state != NETREG_REGISTERED))) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301094 DEBUG_WARN("%px: src_dev=%s and dest_dev=%s are unregistered\n", msg,
1095 src_dev->name, dest_dev->name);
1096 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1097 dev_put(src_dev);
1098 dev_put(dest_dev);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001099 return -EINVAL;
1100 }
1101
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301102 /*
1103 * Allocate the various connection tracking objects.
1104 */
1105 c = (struct sfe_ipv6_connection *)kmalloc(sizeof(struct sfe_ipv6_connection), GFP_ATOMIC);
1106 if (unlikely(!c)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301107 DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
1108 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1109 dev_put(src_dev);
1110 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301111 return -ENOMEM;
1112 }
1113
1114 original_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
1115 if (unlikely(!original_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301116 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1117 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301118 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301119 dev_put(src_dev);
1120 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301121 return -ENOMEM;
1122 }
1123
1124 reply_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
1125 if (unlikely(!reply_cm)) {
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301126 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1127 DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301128 kfree(original_cm);
1129 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301130 dev_put(src_dev);
1131 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301132 return -ENOMEM;
1133 }
1134
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301135 this_cpu_inc(si->stats_pcpu->connection_create_requests64);
1136
Xiaoping Fan978b3772015-05-27 14:15:18 -07001137 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001138
1139 /*
1140 * Check to see if there is already a flow that matches the rule we're
1141 * trying to create. If there is then we can't create a new one.
1142 */
Wayne Tanbb7f1782021-12-13 11:16:04 -08001143 old_c = sfe_ipv6_find_connection(si,
1144 tuple->protocol,
1145 (struct sfe_ipv6_addr *)tuple->flow_ip,
1146 tuple->flow_ident,
1147 (struct sfe_ipv6_addr *)tuple->return_ip,
1148 tuple->return_ident);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301149
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301150 if (old_c != NULL) {
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301151 this_cpu_inc(si->stats_pcpu->connection_create_collisions64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001152
1153 /*
1154 * If we already have the flow then it's likely that this
1155 * request to create the connection rule contains more
1156 * up-to-date information. Check and update accordingly.
1157 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301158 sfe_ipv6_update_protocol_state(old_c, msg);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001159 spin_unlock_bh(&si->lock);
1160
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301161 kfree(reply_cm);
1162 kfree(original_cm);
1163 kfree(c);
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301164 dev_put(src_dev);
1165 dev_put(dest_dev);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301166
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301167 DEBUG_TRACE("connection already exists - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001168 " s: %s:%pxM:%pI6:%u, d: %s:%pxM:%pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301169 tuple->protocol,
1170 src_dev->name, msg->conn_rule.flow_mac, tuple->flow_ip, ntohs(tuple->flow_ident),
1171 dest_dev->name, msg->conn_rule.return_mac, tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001172 return -EADDRINUSE;
1173 }
1174
1175 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001176 * Fill in the "original" direction connection matching object.
1177 * Note that the transmit MAC address is "dest_mac_xlate" because
1178 * we always know both ends of a connection by their translated
1179 * addresses and not their public addresses.
1180 */
1181 original_cm->match_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301182 original_cm->match_protocol = tuple->protocol;
1183 original_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
Suruchi Suman66609a72022-01-20 02:34:25 +05301184 original_cm->match_src_port = netif_is_vxlan(src_dev) ? 0 : tuple->flow_ident;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301185 original_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1186 original_cm->match_dest_port = tuple->return_ident;
1187
1188 original_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1189 original_cm->xlate_src_port = tuple->flow_ident;
1190 original_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1191 original_cm->xlate_dest_port = tuple->return_ident;
1192
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301193 atomic_set(&original_cm->rx_packet_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001194 original_cm->rx_packet_count64 = 0;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301195 atomic_set(&original_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001196 original_cm->rx_byte_count64 = 0;
1197 original_cm->xmit_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301198
1199 original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301200
Xiaoping Fan978b3772015-05-27 14:15:18 -07001201 original_cm->connection = c;
1202 original_cm->counter_match = reply_cm;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001203 original_cm->l2_hdr_size = 0;
1204 original_cm->flags = 0;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301205
1206 /*
1207 * Valid in decap direction only
1208 */
1209 RCU_INIT_POINTER(original_cm->up, NULL);
1210
Ken Zhu37040ea2021-09-09 21:11:15 -07001211 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1212 original_cm->mark = msg->mark_rule.flow_mark;
1213 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1214 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301215 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1216 original_cm->priority = msg->qos_rule.flow_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001217 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1218 }
Wayne Tanbb7f1782021-12-13 11:16:04 -08001219
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301220 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1221 original_cm->dscp = msg->dscp_rule.flow_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001222 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1223 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301224 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1225 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1226 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001227 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST) {
1228 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1229 }
1230
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301231
Wayne Tanbb7f1782021-12-13 11:16:04 -08001232 /*
1233 * Add VLAN rule to original_cm
1234 */
1235 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1236 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1237 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1238 sfe_ipv6_match_entry_set_vlan(original_cm,
1239 vlan_primary_rule->ingress_vlan_tag,
1240 vlan_primary_rule->egress_vlan_tag,
1241 vlan_secondary_rule->ingress_vlan_tag,
1242 vlan_secondary_rule->egress_vlan_tag);
1243
1244 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE) &&
1245 original_cm->egress_vlan_hdr_cnt > 0) {
1246 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1247 original_cm->l2_hdr_size += original_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1248 }
1249 }
1250
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301251 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv6_is_local_ip(si, (uint8_t *)original_cm->match_dest_ip)) {
1252 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1253 }
1254
Xiaoping Fan978b3772015-05-27 14:15:18 -07001255#ifdef CONFIG_NF_FLOW_COOKIE
1256 original_cm->flow_cookie = 0;
1257#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001258#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301259 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1260 original_cm->flow_accel = msg->direction_rule.flow_accel;
1261 } else {
1262 original_cm->flow_accel = 1;
1263 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001264#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301265 /*
1266 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1267 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1268 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1269 * are sending directly to the destination interface that supports it.
1270 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301271 if (likely(dest_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(dest_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301272 if ((msg->conn_rule.return_top_interface_num == msg->conn_rule.return_interface_num) ||
1273 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301274 /*
1275 * Dont enable CSUM offload
1276 */
1277#if 0
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301278 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301279#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301280 }
1281 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001282
Wayne Tanbb7f1782021-12-13 11:16:04 -08001283 reply_cm->l2_hdr_size = 0;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301284 reply_cm->flags = 0;
1285
1286 /*
1287 * Adding PPPoE parameters to original and reply entries based on the direction where
1288 * PPPoE header is valid in ECM rule.
1289 *
1290 * If PPPoE is valid in flow direction (from interface is PPPoE), then
1291 * original cm will have PPPoE at ingress (strip PPPoE header)
1292 * reply cm will have PPPoE at egress (add PPPoE header)
1293 *
1294 * If PPPoE is valid in return direction (to interface is PPPoE), then
1295 * original cm will have PPPoE at egress (add PPPoE header)
1296 * reply cm will have PPPoE at ingress (strip PPPoE header)
1297 */
1298 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_DECAP_VALID) {
1299 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1300 original_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1301 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1302
1303 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001304 reply_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301305 reply_cm->pppoe_session_id = msg->pppoe_rule.flow_pppoe_session_id;
1306 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.flow_pppoe_remote_mac);
1307 }
1308
1309 if (msg->valid_flags & SFE_RULE_CREATE_PPPOE_ENCAP_VALID) {
1310 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001311 original_cm->l2_hdr_size += SFE_PPPOE_SESSION_HEADER_SIZE;
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05301312 original_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1313 ether_addr_copy(original_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1314
1315 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP;
1316 reply_cm->pppoe_session_id = msg->pppoe_rule.return_pppoe_session_id;
1317 ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
1318 }
1319
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301320 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1321 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1322 }
1323
Xiaoping Fan978b3772015-05-27 14:15:18 -07001324 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001325 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301326 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001327 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301328 if (sfe_ipv6_xmit_eth_type_check(dest_dev, original_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301329
1330 /*
1331 * Check whether the rule has configured a specific source MAC address to use.
1332 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1333 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301334 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1335 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->conn_rule.flow_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301336 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301337 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1338 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_RETURN_VALID)) {
1339 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.return_src_mac);
1340 } else {
1341 ether_addr_copy((u8 *)original_cm->xmit_src_mac, (u8 *)dest_dev->dev_addr);
1342 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301343 }
1344 ether_addr_copy((u8 *)original_cm->xmit_dest_mac, (u8 *)msg->conn_rule.return_mac);
1345
Xiaoping Fan978b3772015-05-27 14:15:18 -07001346 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001347 original_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001348
1349 /*
1350 * If our dev writes Ethernet headers then we can write a really fast
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301351 * version
Xiaoping Fan978b3772015-05-27 14:15:18 -07001352 */
1353 if (dest_dev->header_ops) {
1354 if (dest_dev->header_ops->create == eth_header) {
1355 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1356 }
1357 }
1358 }
1359
1360 /*
1361 * Fill in the "reply" direction connection matching object.
1362 */
1363 reply_cm->match_dev = dest_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301364 reply_cm->match_protocol = tuple->protocol;
1365 reply_cm->match_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301366 reply_cm->match_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1367 reply_cm->match_dest_port = tuple->flow_ident;
1368 reply_cm->xlate_src_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1369 reply_cm->xlate_src_port = tuple->return_ident;
1370 reply_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1371 reply_cm->xlate_dest_port = tuple->flow_ident;
1372
Suruchi Suman23a279d2021-11-16 15:13:09 +05301373 /*
1374 * Keep source port as 0 for VxLAN tunnels.
1375 */
1376 if (netif_is_vxlan(src_dev) || netif_is_vxlan(dest_dev)) {
1377 reply_cm->match_src_port = 0;
1378 } else {
1379 reply_cm->match_src_port = tuple->return_ident;
1380 }
1381
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301382 atomic_set(&original_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001383 reply_cm->rx_packet_count64 = 0;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301384 atomic_set(&reply_cm->rx_byte_count, 0);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001385 reply_cm->rx_byte_count64 = 0;
1386 reply_cm->xmit_dev = src_dev;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301387 reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301388
Xiaoping Fan978b3772015-05-27 14:15:18 -07001389 reply_cm->connection = c;
1390 reply_cm->counter_match = original_cm;
Suruchi Suman23a279d2021-11-16 15:13:09 +05301391
Ken Zhu37040ea2021-09-09 21:11:15 -07001392 if (msg->valid_flags & SFE_RULE_CREATE_MARK_VALID) {
1393 reply_cm->mark = msg->mark_rule.return_mark;
1394 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_MARK;
1395 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301396 if (msg->valid_flags & SFE_RULE_CREATE_QOS_VALID) {
1397 reply_cm->priority = msg->qos_rule.return_qos_tag;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001398 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK;
1399 }
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301400 if (msg->valid_flags & SFE_RULE_CREATE_DSCP_MARKING_VALID) {
1401 reply_cm->dscp = msg->dscp_rule.return_dscp << SFE_IPV6_DSCP_SHIFT;
Xiaoping Fane1963d42015-08-25 17:06:19 -07001402 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK;
1403 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301404 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1405 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW;
1406 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001407 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST) {
1408 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
1409 }
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301410
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301411 if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv6_is_local_ip(si, (uint8_t *)reply_cm->match_dest_ip)) {
1412 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
1413 }
1414
Suruchi Suman23a279d2021-11-16 15:13:09 +05301415 /*
1416 * Setup UDP Socket if found to be valid for decap.
1417 */
1418 RCU_INIT_POINTER(reply_cm->up, NULL);
1419 net = dev_net(reply_cm->match_dev);
1420 src_if_idx = src_dev->ifindex;
1421
1422 rcu_read_lock();
1423
1424 /*
1425 * Look for the associated sock object.
1426 * __udp6_lib_lookup() holds a reference for this sock object,
1427 * which will be released in sfe_ipv6_flush_connection()
1428 */
1429#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1430 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1431 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1432 reply_cm->xlate_src_port, src_if_idx, &udp_table);
1433#else
1434 sk = __udp6_lib_lookup(net, (const struct in6_addr *)reply_cm->match_dest_ip,
1435 reply_cm->match_dest_port, (const struct in6_addr *)reply_cm->xlate_src_ip,
1436 reply_cm->xlate_src_port, src_if_idx, 0, &udp_table, NULL);
1437#endif
1438 rcu_read_unlock();
1439
1440 /*
1441 * We set the UDP sock pointer as valid only for decap direction.
1442 */
1443 if (sk && udp_sk(sk)->encap_type) {
1444#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
1445 if (!atomic_add_unless(&sk->sk_refcnt, 1, 0)) {
1446#else
1447 if (!refcount_inc_not_zero(&sk->sk_refcnt)) {
1448#endif
Tian Yang435afc42022-02-02 12:47:32 -08001449 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
Wayne Tanbb7f1782021-12-13 11:16:04 -08001450 spin_unlock_bh(&si->lock);
Suruchi Suman23a279d2021-11-16 15:13:09 +05301451 kfree(reply_cm);
1452 kfree(original_cm);
1453 kfree(c);
1454
1455 DEBUG_INFO("sfe: unable to take reference for socket p:%d\n", tuple->protocol);
1456 DEBUG_INFO("SK: connection - \n"
1457 " s: %s:%pI6(%pI6):%u(%u)\n"
1458 " d: %s:%pI6(%pI6):%u(%u)\n",
1459 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1460 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1461 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1462 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1463
1464 dev_put(src_dev);
1465 dev_put(dest_dev);
1466
1467 return -ESHUTDOWN;
1468 }
1469
1470 rcu_assign_pointer(reply_cm->up, udp_sk(sk));
1471 DEBUG_INFO("Sock lookup success with reply_cm direction(%p)\n", sk);
1472 DEBUG_INFO("SK: connection - \n"
1473 " s: %s:%pI6(%pI6):%u(%u)\n"
1474 " d: %s:%pI6(%pI6):%u(%u)\n",
1475 reply_cm->match_dev->name, &reply_cm->match_src_ip, &reply_cm->xlate_src_ip,
1476 ntohs(reply_cm->match_src_port), ntohs(reply_cm->xlate_src_port),
1477 reply_cm->xmit_dev->name, &reply_cm->match_dest_ip, &reply_cm->xlate_dest_ip,
1478 ntohs(reply_cm->match_dest_port), ntohs(reply_cm->xlate_dest_port));
1479 }
1480
Wayne Tanbb7f1782021-12-13 11:16:04 -08001481 /*
1482 * Add VLAN rule to reply_cm
1483 */
1484 if (msg->valid_flags & SFE_RULE_CREATE_VLAN_VALID) {
1485 struct sfe_vlan_rule *vlan_primary_rule = &msg->vlan_primary_rule;
1486 struct sfe_vlan_rule *vlan_secondary_rule = &msg->vlan_secondary_rule;
1487 sfe_ipv6_match_entry_set_vlan(reply_cm,
1488 vlan_primary_rule->egress_vlan_tag,
1489 vlan_primary_rule->ingress_vlan_tag,
1490 vlan_secondary_rule->egress_vlan_tag,
1491 vlan_secondary_rule->ingress_vlan_tag);
1492
1493 if ((msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE) &&
1494 reply_cm->egress_vlan_hdr_cnt > 0) {
1495 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG;
1496 reply_cm->l2_hdr_size += reply_cm->egress_vlan_hdr_cnt * VLAN_HLEN;
1497 }
1498 }
1499
Xiaoping Fan978b3772015-05-27 14:15:18 -07001500#ifdef CONFIG_NF_FLOW_COOKIE
1501 reply_cm->flow_cookie = 0;
1502#endif
Zhi Chen8748eb32015-06-18 12:58:48 -07001503#ifdef CONFIG_XFRM
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301504 if (msg->valid_flags & SFE_RULE_CREATE_DIRECTION_VALID) {
1505 reply_cm->flow_accel = msg->direction_rule.return_accel;
1506 } else {
1507 reply_cm->flow_accel = 1;
1508 }
Zhi Chen8748eb32015-06-18 12:58:48 -07001509#endif
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301510
1511 /*
1512 * the inet6_protocol handler will be used only in decap path
1513 * for non passthrough case.
1514 */
1515 original_cm->proto = NULL;
1516 reply_cm->proto = NULL;
Tian Yang435afc42022-02-02 12:47:32 -08001517 original_cm->top_interface_dev = NULL;
1518 reply_cm->top_interface_dev = NULL;
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301519
1520#ifdef SFE_GRE_TUN_ENABLE
1521 if (!(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
1522 rcu_read_lock();
1523 reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
1524 rcu_read_unlock();
1525
1526 if (unlikely(!reply_cm->proto)) {
Tian Yang435afc42022-02-02 12:47:32 -08001527 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1528 spin_unlock_bh(&si->lock);
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301529 kfree(reply_cm);
1530 kfree(original_cm);
1531 kfree(c);
1532 dev_put(src_dev);
1533 dev_put(dest_dev);
1534 DEBUG_WARN("sfe: GRE proto handler is not registered\n");
1535 return -EPERM;
1536 }
1537 }
1538#endif
1539
Xiaoping Fan978b3772015-05-27 14:15:18 -07001540 /*
Tian Yangafb03452022-01-13 18:53:13 -08001541 * Decapsulation path have proto set.
1542 * This is used to differentiate de/encap, and call protocol specific handler.
1543 */
1544 if (IPPROTO_IPIP == tuple->protocol) {
1545 original_cm->proto = NULL;
1546 rcu_read_lock();
1547 reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
1548 rcu_read_unlock();
Tian Yang435afc42022-02-02 12:47:32 -08001549 reply_cm->top_interface_dev = dev_get_by_index(&init_net, msg->conn_rule.return_top_interface_num);
1550
1551 if (unlikely(!reply_cm->top_interface_dev)) {
1552 DEBUG_WARN("%px: Unable to find top_interface_dev corresponding to %d\n", msg,
1553 msg->conn_rule.return_top_interface_num);
1554 this_cpu_inc(si->stats_pcpu->connection_create_failures64);
1555 spin_unlock_bh(&si->lock);
1556 kfree(reply_cm);
1557 kfree(original_cm);
1558 kfree(c);
1559 dev_put(src_dev);
1560 dev_put(dest_dev);
1561 return -EINVAL;
1562 }
Tian Yangafb03452022-01-13 18:53:13 -08001563 }
1564 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301565 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
1566 * bottom interfaces are expected to be disabled in the flow rule and always top interfaces
1567 * are used. In such cases, do not use HW csum offload. csum offload is used only when we
1568 * are sending directly to the destination interface that supports it.
1569 */
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301570 if (likely(src_dev->features & NETIF_F_HW_CSUM) && sfe_dev_has_hw_csum(src_dev)) {
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301571 if ((msg->conn_rule.flow_top_interface_num == msg->conn_rule.flow_interface_num) ||
1572 (msg->rule_flags & SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE)) {
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301573 /*
1574 * Dont enable CSUM offload
1575 */
1576#if 0
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301577 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD;
Ratheesh Kannoth48445532022-02-07 16:19:00 +05301578#endif
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301579 }
1580 }
1581
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +05301582 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
1583 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
1584 }
1585
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +05301586 /*
Ken Zhubbf49652021-09-12 15:33:09 -07001587 * For the non-arp interface, we don't write L2 HDR.
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301588 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001589 */
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05301590 if (sfe_ipv6_xmit_eth_type_check(src_dev, reply_cm->flags)) {
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301591
1592 /*
1593 * Check whether the rule has configured a specific source MAC address to use.
1594 * This is needed when virtual L3 interfaces such as br-lan, macvlan, vlan are used during egress
1595 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301596 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_BRIDGE_FLOW) {
1597 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->conn_rule.return_mac);
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301598 } else {
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +05301599 if ((msg->valid_flags & SFE_RULE_CREATE_SRC_MAC_VALID) &&
1600 (msg->src_mac_rule.mac_valid_flags & SFE_SRC_MAC_FLOW_VALID)) {
1601 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)msg->src_mac_rule.flow_src_mac);
1602 } else {
1603 ether_addr_copy((u8 *)reply_cm->xmit_src_mac, (u8 *)src_dev->dev_addr);
1604 }
Ratheesh Kannoth29140aa2021-10-20 08:25:02 +05301605 }
1606
1607 ether_addr_copy((u8 *)reply_cm->xmit_dest_mac, (u8 *)msg->conn_rule.flow_mac);
1608
Xiaoping Fan978b3772015-05-27 14:15:18 -07001609 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR;
Wayne Tanbb7f1782021-12-13 11:16:04 -08001610 reply_cm->l2_hdr_size += ETH_HLEN;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001611
1612 /*
1613 * If our dev writes Ethernet headers then we can write a really fast
1614 * version.
1615 */
1616 if (src_dev->header_ops) {
1617 if (src_dev->header_ops->create == eth_header) {
1618 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR;
1619 }
1620 }
1621 }
1622
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301623 /*
1624 * No support for NAT in ipv6
1625 */
Xiaoping Fan978b3772015-05-27 14:15:18 -07001626
Xiaoping Fan978b3772015-05-27 14:15:18 -07001627 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001628 * Initialize the protocol-specific information that we track.
1629 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301630 switch (tuple->protocol) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001631 case IPPROTO_TCP:
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301632 original_cm->protocol_state.tcp.win_scale = msg->tcp_rule.flow_window_scale;
1633 original_cm->protocol_state.tcp.max_win = msg->tcp_rule.flow_max_window ? msg->tcp_rule.flow_max_window : 1;
1634 original_cm->protocol_state.tcp.end = msg->tcp_rule.flow_end;
1635 original_cm->protocol_state.tcp.max_end = msg->tcp_rule.flow_max_end;
1636 reply_cm->protocol_state.tcp.win_scale = msg->tcp_rule.return_window_scale;
1637 reply_cm->protocol_state.tcp.max_win = msg->tcp_rule.return_max_window ? msg->tcp_rule.return_max_window : 1;
1638 reply_cm->protocol_state.tcp.end = msg->tcp_rule.return_end;
1639 reply_cm->protocol_state.tcp.max_end = msg->tcp_rule.return_max_end;
1640 if (msg->rule_flags & SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001641 original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1642 reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK;
1643 }
1644 break;
1645 }
1646
Wayne Tanbb7f1782021-12-13 11:16:04 -08001647 /*
1648 * Fill in the ipv6_connection object.
1649 */
1650 c->protocol = tuple->protocol;
1651 c->src_ip[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1652 c->src_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->flow_ip;
1653 c->src_port = tuple->flow_ident;
1654 c->src_port_xlate = tuple->flow_ident;
1655 c->original_dev = src_dev;
1656 c->original_match = original_cm;
1657
1658 c->dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1659 c->dest_ip_xlate[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
1660 c->dest_port = tuple->return_ident;
1661 c->dest_port_xlate = tuple->return_ident;
1662
1663 c->reply_dev = dest_dev;
1664 c->reply_match = reply_cm;
1665 c->debug_read_seq = 0;
1666 c->last_sync_jiffies = get_jiffies_64();
1667 c->removed = false;
1668
Xiaoping Fan978b3772015-05-27 14:15:18 -07001669 sfe_ipv6_connection_match_compute_translations(original_cm);
1670 sfe_ipv6_connection_match_compute_translations(reply_cm);
1671 sfe_ipv6_insert_connection(si, c);
1672
1673 spin_unlock_bh(&si->lock);
1674
1675 /*
1676 * We have everything we need!
1677 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301678 DEBUG_INFO("new connection - p: %d\n"
Tian Yang45f39c82020-10-06 14:07:47 -07001679 " s: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n"
1680 " d: %s:%pxM(%pxM):%pI6(%pI6):%u(%u)\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301681 tuple->protocol,
1682 src_dev->name, msg->conn_rule.flow_mac, NULL,
1683 (void *)tuple->flow_ip, (void *)tuple->flow_ip, ntohs(tuple->flow_ident), ntohs(tuple->flow_ident),
1684 dest_dev->name, NULL, msg->conn_rule.return_mac,
1685 (void *)tuple->return_ip, (void *)tuple->return_ip, ntohs(tuple->return_ident), ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001686
1687 return 0;
1688}
1689
1690/*
1691 * sfe_ipv6_destroy_rule()
1692 * Destroy a forwarding rule.
1693 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301694void sfe_ipv6_destroy_rule(struct sfe_ipv6_rule_destroy_msg *msg)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001695{
1696 struct sfe_ipv6 *si = &__si6;
1697 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301698 bool ret;
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301699 struct sfe_ipv6_5tuple *tuple = &msg->tuple;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001700
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301701 this_cpu_inc(si->stats_pcpu->connection_destroy_requests64);
1702
Xiaoping Fan978b3772015-05-27 14:15:18 -07001703 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001704
1705 /*
1706 * Check to see if we have a flow that matches the rule we're trying
1707 * to destroy. If there isn't then we can't destroy it.
1708 */
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301709 c = sfe_ipv6_find_connection(si, tuple->protocol, (struct sfe_ipv6_addr *)tuple->flow_ip, tuple->flow_ident,
1710 (struct sfe_ipv6_addr *)tuple->return_ip, tuple->return_ident);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001711 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001712 spin_unlock_bh(&si->lock);
1713
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05301714 this_cpu_inc(si->stats_pcpu->connection_destroy_misses64);
1715
Xiaoping Fan978b3772015-05-27 14:15:18 -07001716 DEBUG_TRACE("connection does not exist - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301717 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1718 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001719 return;
1720 }
1721
1722 /*
1723 * Remove our connection details from the hash tables.
1724 */
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301725 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001726 spin_unlock_bh(&si->lock);
1727
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301728 if (ret) {
1729 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1730 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001731
1732 DEBUG_INFO("connection destroyed - p: %d, s: %pI6:%u, d: %pI6:%u\n",
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05301733 tuple->protocol, tuple->flow_ip, ntohs(tuple->flow_ident),
1734 tuple->return_ip, ntohs(tuple->return_ident));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001735}
1736
1737/*
1738 * sfe_ipv6_register_sync_rule_callback()
1739 * Register a callback for rule synchronization.
1740 */
1741void sfe_ipv6_register_sync_rule_callback(sfe_sync_rule_callback_t sync_rule_callback)
1742{
1743 struct sfe_ipv6 *si = &__si6;
1744
1745 spin_lock_bh(&si->lock);
1746 rcu_assign_pointer(si->sync_rule_callback, sync_rule_callback);
1747 spin_unlock_bh(&si->lock);
1748}
1749
1750/*
1751 * sfe_ipv6_get_debug_dev()
1752 */
1753static ssize_t sfe_ipv6_get_debug_dev(struct device *dev,
1754 struct device_attribute *attr,
1755 char *buf)
1756{
1757 struct sfe_ipv6 *si = &__si6;
1758 ssize_t count;
1759 int num;
1760
1761 spin_lock_bh(&si->lock);
1762 num = si->debug_dev;
1763 spin_unlock_bh(&si->lock);
1764
1765 count = snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", num);
1766 return count;
1767}
1768
1769/*
1770 * sfe_ipv6_destroy_all_rules_for_dev()
1771 * Destroy all connections that match a particular device.
1772 *
1773 * If we pass dev as NULL then this destroys all connections.
1774 */
1775void sfe_ipv6_destroy_all_rules_for_dev(struct net_device *dev)
1776{
1777 struct sfe_ipv6 *si = &__si6;
1778 struct sfe_ipv6_connection *c;
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301779 bool ret;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001780
Xiaoping Fan34586472015-07-03 02:20:35 -07001781another_round:
Xiaoping Fan978b3772015-05-27 14:15:18 -07001782 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001783
Xiaoping Fan34586472015-07-03 02:20:35 -07001784 for (c = si->all_connections_head; c; c = c->all_connections_next) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001785 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07001786 * Does this connection relate to the device we are destroying?
Xiaoping Fan978b3772015-05-27 14:15:18 -07001787 */
1788 if (!dev
1789 || (dev == c->original_dev)
1790 || (dev == c->reply_dev)) {
Xiaoping Fan34586472015-07-03 02:20:35 -07001791 break;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001792 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001793 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001794
Xiaoping Fan34586472015-07-03 02:20:35 -07001795 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301796 ret = sfe_ipv6_remove_connection(si, c);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001797 }
1798
1799 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001800
1801 if (c) {
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05301802 if (ret) {
1803 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_DESTROY);
1804 }
Xiaoping Fan34586472015-07-03 02:20:35 -07001805 goto another_round;
1806 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001807}
1808
1809/*
1810 * sfe_ipv6_periodic_sync()
1811 */
Ken Zhu137722d2021-09-23 17:57:36 -07001812static void sfe_ipv6_periodic_sync(struct work_struct *work)
Xiaoping Fan978b3772015-05-27 14:15:18 -07001813{
Ken Zhu137722d2021-09-23 17:57:36 -07001814 struct sfe_ipv6 *si = container_of((struct delayed_work *)work, struct sfe_ipv6, sync_dwork);
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001815 u64 now_jiffies;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001816 int quota;
1817 sfe_sync_rule_callback_t sync_rule_callback;
Ken Zhu32b95392021-09-03 13:52:04 -07001818 struct sfe_ipv6_connection *c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001819
1820 now_jiffies = get_jiffies_64();
1821
1822 rcu_read_lock();
1823 sync_rule_callback = rcu_dereference(si->sync_rule_callback);
1824 if (!sync_rule_callback) {
1825 rcu_read_unlock();
1826 goto done;
1827 }
1828
1829 spin_lock_bh(&si->lock);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001830
1831 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001832 * If we have reached the end of the connection list, walk from
1833 * the connection head.
1834 */
1835 c = si->wc_next;
1836 if (unlikely(!c)) {
1837 c = si->all_connections_head;
1838 }
1839 /*
Xiaoping Fan978b3772015-05-27 14:15:18 -07001840 * Get an estimate of the number of connections to parse in this sync.
1841 */
1842 quota = (si->num_connections + 63) / 64;
1843
1844 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001845 * Walk the "all connection" list and sync the connection state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001846 */
Ken Zhu32b95392021-09-03 13:52:04 -07001847 while (likely(c && quota)) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001848 struct sfe_ipv6_connection_match *cm;
1849 struct sfe_ipv6_connection_match *counter_cm;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001850 struct sfe_connection_sync sis;
1851
Ken Zhu32b95392021-09-03 13:52:04 -07001852 cm = c->original_match;
1853 counter_cm = c->reply_match;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001854
1855 /*
Ken Zhu32b95392021-09-03 13:52:04 -07001856 * Didn't receive packets in the origial direction or reply
1857 * direction, move to the next connection.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001858 */
Ken Zhu32b95392021-09-03 13:52:04 -07001859 if (!atomic_read(&cm->rx_packet_count) && !atomic_read(&counter_cm->rx_packet_count)) {
1860 c = c->all_connections_next;
1861 continue;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001862 }
1863
Ken Zhu32b95392021-09-03 13:52:04 -07001864 quota--;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001865
1866 /*
1867 * Sync the connection state.
1868 */
Xiaoping Fan99cb4c12015-08-21 19:07:32 -07001869 sfe_ipv6_gen_sync_connection(si, c, &sis, SFE_SYNC_REASON_STATS, now_jiffies);
Xiaoping Fan978b3772015-05-27 14:15:18 -07001870
Ken Zhu32b95392021-09-03 13:52:04 -07001871 si->wc_next = c->all_connections_next;
1872
Xiaoping Fan978b3772015-05-27 14:15:18 -07001873 spin_unlock_bh(&si->lock);
1874 sync_rule_callback(&sis);
1875 spin_lock_bh(&si->lock);
Ken Zhu32b95392021-09-03 13:52:04 -07001876
1877 /*
1878 * c must be set and used in the same lock/unlock window;
1879 * because c could be removed when we don't hold the lock,
1880 * so delay grabbing until after the callback and relock.
1881 */
1882 c = si->wc_next;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001883 }
1884
Ken Zhu32b95392021-09-03 13:52:04 -07001885 /*
1886 * At the end of loop, put wc_next to the connection we left
1887 */
1888 si->wc_next = c;
1889
Xiaoping Fan978b3772015-05-27 14:15:18 -07001890 spin_unlock_bh(&si->lock);
1891 rcu_read_unlock();
1892
1893done:
Ken Zhu137722d2021-09-23 17:57:36 -07001894 schedule_delayed_work_on(si->work_cpu, (struct delayed_work *)work, ((HZ + 99) / 100));
Xiaoping Fan978b3772015-05-27 14:15:18 -07001895}
1896
1897/*
1898 * sfe_ipv6_debug_dev_read_start()
1899 * Generate part of the XML output.
1900 */
1901static bool sfe_ipv6_debug_dev_read_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1902 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1903{
1904 int bytes_read;
1905
Xiaoping Fan34586472015-07-03 02:20:35 -07001906 si->debug_read_seq++;
1907
Xiaoping Fan978b3772015-05-27 14:15:18 -07001908 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "<sfe_ipv6>\n");
1909 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1910 return false;
1911 }
1912
1913 *length -= bytes_read;
1914 *total_read += bytes_read;
1915
1916 ws->state++;
1917 return true;
1918}
1919
1920/*
1921 * sfe_ipv6_debug_dev_read_connections_start()
1922 * Generate part of the XML output.
1923 */
1924static bool sfe_ipv6_debug_dev_read_connections_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1925 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1926{
1927 int bytes_read;
1928
1929 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<connections>\n");
1930 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
1931 return false;
1932 }
1933
1934 *length -= bytes_read;
1935 *total_read += bytes_read;
1936
1937 ws->state++;
1938 return true;
1939}
1940
1941/*
1942 * sfe_ipv6_debug_dev_read_connections_connection()
1943 * Generate part of the XML output.
1944 */
1945static bool sfe_ipv6_debug_dev_read_connections_connection(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
1946 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
1947{
1948 struct sfe_ipv6_connection *c;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001949 struct sfe_ipv6_connection_match *original_cm;
1950 struct sfe_ipv6_connection_match *reply_cm;
1951 int bytes_read;
1952 int protocol;
1953 struct net_device *src_dev;
1954 struct sfe_ipv6_addr src_ip;
1955 struct sfe_ipv6_addr src_ip_xlate;
1956 __be16 src_port;
1957 __be16 src_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001958 u64 src_rx_packets;
1959 u64 src_rx_bytes;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001960 struct net_device *dest_dev;
1961 struct sfe_ipv6_addr dest_ip;
1962 struct sfe_ipv6_addr dest_ip_xlate;
1963 __be16 dest_port;
1964 __be16 dest_port_xlate;
Xiaoping Fan6a1672f2016-08-17 19:58:12 -07001965 u64 dest_rx_packets;
1966 u64 dest_rx_bytes;
1967 u64 last_sync_jiffies;
Ken Zhu37040ea2021-09-09 21:11:15 -07001968 u32 src_mark, dest_mark, src_priority, dest_priority, src_dscp, dest_dscp;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05301969 u32 packet, byte, original_cm_flags;
1970 u16 pppoe_session_id;
1971 u8 pppoe_remote_mac[ETH_ALEN];
Ken Zhu7e38d1a2021-11-30 17:31:46 -08001972 u32 original_fast_xmit, reply_fast_xmit;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001973#ifdef CONFIG_NF_FLOW_COOKIE
1974 int src_flow_cookie, dst_flow_cookie;
1975#endif
1976
1977 spin_lock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001978
1979 for (c = si->all_connections_head; c; c = c->all_connections_next) {
1980 if (c->debug_read_seq < si->debug_read_seq) {
1981 c->debug_read_seq = si->debug_read_seq;
1982 break;
1983 }
1984 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07001985
1986 /*
Xiaoping Fan34586472015-07-03 02:20:35 -07001987 * If there were no connections then move to the next state.
Xiaoping Fan978b3772015-05-27 14:15:18 -07001988 */
1989 if (!c) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07001990 spin_unlock_bh(&si->lock);
Xiaoping Fan34586472015-07-03 02:20:35 -07001991 ws->state++;
Xiaoping Fan978b3772015-05-27 14:15:18 -07001992 return true;
1993 }
1994
1995 original_cm = c->original_match;
1996 reply_cm = c->reply_match;
1997
1998 protocol = c->protocol;
1999 src_dev = c->original_dev;
2000 src_ip = c->src_ip[0];
2001 src_ip_xlate = c->src_ip_xlate[0];
2002 src_port = c->src_port;
2003 src_port_xlate = c->src_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002004 src_priority = original_cm->priority;
2005 src_dscp = original_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002006
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302007 sfe_ipv6_connection_match_update_summary_stats(original_cm, &packet, &byte);
2008 sfe_ipv6_connection_match_update_summary_stats(reply_cm, &packet, &byte);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002009
2010 src_rx_packets = original_cm->rx_packet_count64;
2011 src_rx_bytes = original_cm->rx_byte_count64;
Ken Zhu37040ea2021-09-09 21:11:15 -07002012 src_mark = original_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002013 original_fast_xmit = original_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002014 dest_dev = c->reply_dev;
2015 dest_ip = c->dest_ip[0];
2016 dest_ip_xlate = c->dest_ip_xlate[0];
2017 dest_port = c->dest_port;
2018 dest_port_xlate = c->dest_port_xlate;
Xiaoping Fane1963d42015-08-25 17:06:19 -07002019 dest_priority = reply_cm->priority;
2020 dest_dscp = reply_cm->dscp >> SFE_IPV6_DSCP_SHIFT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002021 dest_rx_packets = reply_cm->rx_packet_count64;
2022 dest_rx_bytes = reply_cm->rx_byte_count64;
2023 last_sync_jiffies = get_jiffies_64() - c->last_sync_jiffies;
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302024 original_cm_flags = original_cm->flags;
2025 pppoe_session_id = original_cm->pppoe_session_id;
2026 ether_addr_copy(pppoe_remote_mac, original_cm->pppoe_remote_mac);
Ken Zhu37040ea2021-09-09 21:11:15 -07002027 dest_mark = reply_cm->mark;
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002028 reply_fast_xmit = reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002029#ifdef CONFIG_NF_FLOW_COOKIE
2030 src_flow_cookie = original_cm->flow_cookie;
2031 dst_flow_cookie = reply_cm->flow_cookie;
2032#endif
2033 spin_unlock_bh(&si->lock);
2034
2035 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t\t<connection "
2036 "protocol=\"%u\" "
2037 "src_dev=\"%s\" "
2038 "src_ip=\"%pI6\" src_ip_xlate=\"%pI6\" "
2039 "src_port=\"%u\" src_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002040 "src_priority=\"%u\" src_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002041 "src_rx_pkts=\"%llu\" src_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002042 "src_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002043 "src_fast_xmit=\"%s\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002044 "dest_dev=\"%s\" "
2045 "dest_ip=\"%pI6\" dest_ip_xlate=\"%pI6\" "
2046 "dest_port=\"%u\" dest_port_xlate=\"%u\" "
Xiaoping Fane1963d42015-08-25 17:06:19 -07002047 "dest_priority=\"%u\" dest_dscp=\"%u\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002048 "dest_rx_pkts=\"%llu\" dest_rx_bytes=\"%llu\" "
Ken Zhu37040ea2021-09-09 21:11:15 -07002049 "dest_mark=\"%08x\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002050 "reply_fast_xmit=\"%s\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002051#ifdef CONFIG_NF_FLOW_COOKIE
2052 "src_flow_cookie=\"%d\" dst_flow_cookie=\"%d\" "
2053#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002054 "last_sync=\"%llu\" ",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002055 protocol,
2056 src_dev->name,
2057 &src_ip, &src_ip_xlate,
2058 ntohs(src_port), ntohs(src_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002059 src_priority, src_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002060 src_rx_packets, src_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002061 src_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002062 original_fast_xmit ? "Yes" : "No",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002063 dest_dev->name,
2064 &dest_ip, &dest_ip_xlate,
2065 ntohs(dest_port), ntohs(dest_port_xlate),
Xiaoping Fane1963d42015-08-25 17:06:19 -07002066 dest_priority, dest_dscp,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002067 dest_rx_packets, dest_rx_bytes,
Ken Zhu37040ea2021-09-09 21:11:15 -07002068 dest_mark,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002069 reply_fast_xmit ? "Yes" : "No",
Xiaoping Fan978b3772015-05-27 14:15:18 -07002070#ifdef CONFIG_NF_FLOW_COOKIE
2071 src_flow_cookie, dst_flow_cookie,
2072#endif
Ken Zhu37040ea2021-09-09 21:11:15 -07002073 last_sync_jiffies);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002074
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302075 if (original_cm_flags &= (SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP | SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +05302076 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, "pppoe_session_id=\"%u\" pppoe_server_MAC=\"%pM\" ",
Guduri Prathyushaeb31c902021-11-10 20:18:50 +05302077 pppoe_session_id, pppoe_remote_mac);
2078 }
2079
2080 bytes_read += snprintf(msg + bytes_read, CHAR_DEV_MSG_SIZE, ")/>\n");
2081
Xiaoping Fan978b3772015-05-27 14:15:18 -07002082 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2083 return false;
2084 }
2085
2086 *length -= bytes_read;
2087 *total_read += bytes_read;
2088
Xiaoping Fan978b3772015-05-27 14:15:18 -07002089 return true;
2090}
2091
2092/*
2093 * sfe_ipv6_debug_dev_read_connections_end()
2094 * Generate part of the XML output.
2095 */
2096static bool sfe_ipv6_debug_dev_read_connections_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2097 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2098{
2099 int bytes_read;
2100
2101 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</connections>\n");
2102 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2103 return false;
2104 }
2105
2106 *length -= bytes_read;
2107 *total_read += bytes_read;
2108
2109 ws->state++;
2110 return true;
2111}
2112
2113/*
2114 * sfe_ipv6_debug_dev_read_exceptions_start()
2115 * Generate part of the XML output.
2116 */
2117static bool sfe_ipv6_debug_dev_read_exceptions_start(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2118 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2119{
2120 int bytes_read;
2121
2122 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<exceptions>\n");
2123 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2124 return false;
2125 }
2126
2127 *length -= bytes_read;
2128 *total_read += bytes_read;
2129
2130 ws->state++;
2131 return true;
2132}
2133
2134/*
2135 * sfe_ipv6_debug_dev_read_exceptions_exception()
2136 * Generate part of the XML output.
2137 */
2138static bool sfe_ipv6_debug_dev_read_exceptions_exception(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2139 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2140{
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302141 int i;
2142 u64 val = 0;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002143
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302144 for_each_possible_cpu(i) {
2145 const struct sfe_ipv6_stats *s = per_cpu_ptr(si->stats_pcpu, i);
2146 val += s->exception_events64[ws->iter_exception];
2147 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002148
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302149 if (val) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002150 int bytes_read;
2151
2152 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE,
2153 "\t\t<exception name=\"%s\" count=\"%llu\" />\n",
2154 sfe_ipv6_exception_events_string[ws->iter_exception],
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302155 val);
2156
Xiaoping Fan978b3772015-05-27 14:15:18 -07002157 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2158 return false;
2159 }
2160
2161 *length -= bytes_read;
2162 *total_read += bytes_read;
2163 }
2164
2165 ws->iter_exception++;
2166 if (ws->iter_exception >= SFE_IPV6_EXCEPTION_EVENT_LAST) {
2167 ws->iter_exception = 0;
2168 ws->state++;
2169 }
2170
2171 return true;
2172}
2173
2174/*
2175 * sfe_ipv6_debug_dev_read_exceptions_end()
2176 * Generate part of the XML output.
2177 */
2178static bool sfe_ipv6_debug_dev_read_exceptions_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2179 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2180{
2181 int bytes_read;
2182
2183 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t</exceptions>\n");
2184 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2185 return false;
2186 }
2187
2188 *length -= bytes_read;
2189 *total_read += bytes_read;
2190
2191 ws->state++;
2192 return true;
2193}
2194
2195/*
2196 * sfe_ipv6_debug_dev_read_stats()
2197 * Generate part of the XML output.
2198 */
2199static bool sfe_ipv6_debug_dev_read_stats(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2200 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2201{
2202 int bytes_read;
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302203 struct sfe_ipv6_stats stats;
2204 unsigned int num_conn;
2205
2206 sfe_ipv6_update_summary_stats(si, &stats);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002207
2208 spin_lock_bh(&si->lock);
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302209 num_conn = si->num_connections;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002210 spin_unlock_bh(&si->lock);
2211
2212 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "\t<stats "
2213 "num_connections=\"%u\" "
Suruchi Suman23a279d2021-11-16 15:13:09 +05302214 "pkts_dropped=\"%llu\" "
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002215 "pkts_fast_xmited=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002216 "pkts_forwarded=\"%llu\" pkts_not_forwarded=\"%llu\" "
2217 "create_requests=\"%llu\" create_collisions=\"%llu\" "
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302218 "create_failures=\"%llu\" "
Xiaoping Fan978b3772015-05-27 14:15:18 -07002219 "destroy_requests=\"%llu\" destroy_misses=\"%llu\" "
2220 "flushes=\"%llu\" "
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302221 "hash_hits=\"%llu\" hash_reorders=\"%llu\" "
2222 "pppoe_encap_pkts_fwded=\"%llu\" "
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302223 "pppoe_decap_pkts_fwded=\"%llu\" "
2224 "pppoe_bridge_pkts_fwded=\"%llu\" />\n",
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302225
2226 num_conn,
Suruchi Suman23a279d2021-11-16 15:13:09 +05302227 stats.packets_dropped64,
Ken Zhu7e38d1a2021-11-30 17:31:46 -08002228 stats.packets_fast_xmited64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302229 stats.packets_forwarded64,
2230 stats.packets_not_forwarded64,
2231 stats.connection_create_requests64,
2232 stats.connection_create_collisions64,
Ratheesh Kannoth89302a72021-10-20 08:10:37 +05302233 stats.connection_create_failures64,
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302234 stats.connection_destroy_requests64,
2235 stats.connection_destroy_misses64,
2236 stats.connection_flushes64,
2237 stats.connection_match_hash_hits64,
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05302238 stats.connection_match_hash_reorders64,
2239 stats.pppoe_encap_packets_forwarded64,
Guduri Prathyusha034d6352022-01-12 16:49:04 +05302240 stats.pppoe_decap_packets_forwarded64,
2241 stats.pppoe_bridge_packets_forwarded64);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002242 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2243 return false;
2244 }
2245
2246 *length -= bytes_read;
2247 *total_read += bytes_read;
2248
2249 ws->state++;
2250 return true;
2251}
2252
2253/*
2254 * sfe_ipv6_debug_dev_read_end()
2255 * Generate part of the XML output.
2256 */
2257static bool sfe_ipv6_debug_dev_read_end(struct sfe_ipv6 *si, char *buffer, char *msg, size_t *length,
2258 int *total_read, struct sfe_ipv6_debug_xml_write_state *ws)
2259{
2260 int bytes_read;
2261
2262 bytes_read = snprintf(msg, CHAR_DEV_MSG_SIZE, "</sfe_ipv6>\n");
2263 if (copy_to_user(buffer + *total_read, msg, CHAR_DEV_MSG_SIZE)) {
2264 return false;
2265 }
2266
2267 *length -= bytes_read;
2268 *total_read += bytes_read;
2269
2270 ws->state++;
2271 return true;
2272}
2273
2274/*
2275 * Array of write functions that write various XML elements that correspond to
2276 * our XML output state machine.
2277 */
2278static sfe_ipv6_debug_xml_write_method_t sfe_ipv6_debug_xml_write_methods[SFE_IPV6_DEBUG_XML_STATE_DONE] = {
2279 sfe_ipv6_debug_dev_read_start,
2280 sfe_ipv6_debug_dev_read_connections_start,
2281 sfe_ipv6_debug_dev_read_connections_connection,
2282 sfe_ipv6_debug_dev_read_connections_end,
2283 sfe_ipv6_debug_dev_read_exceptions_start,
2284 sfe_ipv6_debug_dev_read_exceptions_exception,
2285 sfe_ipv6_debug_dev_read_exceptions_end,
2286 sfe_ipv6_debug_dev_read_stats,
2287 sfe_ipv6_debug_dev_read_end,
2288};
2289
2290/*
2291 * sfe_ipv6_debug_dev_read()
2292 * Send info to userspace upon read request from user
2293 */
2294static ssize_t sfe_ipv6_debug_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
2295{
2296 char msg[CHAR_DEV_MSG_SIZE];
2297 int total_read = 0;
2298 struct sfe_ipv6_debug_xml_write_state *ws;
2299 struct sfe_ipv6 *si = &__si6;
2300
2301 ws = (struct sfe_ipv6_debug_xml_write_state *)filp->private_data;
2302 while ((ws->state != SFE_IPV6_DEBUG_XML_STATE_DONE) && (length > CHAR_DEV_MSG_SIZE)) {
2303 if ((sfe_ipv6_debug_xml_write_methods[ws->state])(si, buffer, msg, &length, &total_read, ws)) {
2304 continue;
2305 }
2306 }
Xiaoping Fan978b3772015-05-27 14:15:18 -07002307 return total_read;
2308}
2309
2310/*
Xiaoping Fan978b3772015-05-27 14:15:18 -07002311 * sfe_ipv6_debug_dev_open()
2312 */
2313static int sfe_ipv6_debug_dev_open(struct inode *inode, struct file *file)
2314{
2315 struct sfe_ipv6_debug_xml_write_state *ws;
2316
2317 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
2318 if (ws) {
2319 return 0;
2320 }
2321
2322 ws = kzalloc(sizeof(struct sfe_ipv6_debug_xml_write_state), GFP_KERNEL);
2323 if (!ws) {
2324 return -ENOMEM;
2325 }
2326
2327 ws->state = SFE_IPV6_DEBUG_XML_STATE_START;
2328 file->private_data = ws;
2329
2330 return 0;
2331}
2332
2333/*
2334 * sfe_ipv6_debug_dev_release()
2335 */
2336static int sfe_ipv6_debug_dev_release(struct inode *inode, struct file *file)
2337{
2338 struct sfe_ipv6_debug_xml_write_state *ws;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002339
2340 ws = (struct sfe_ipv6_debug_xml_write_state *)file->private_data;
Xiaoping Fan34586472015-07-03 02:20:35 -07002341 if (ws) {
2342 /*
2343 * We've finished with our output so free the write state.
2344 */
2345 kfree(ws);
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302346 file->private_data = NULL;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002347 }
2348
Xiaoping Fan978b3772015-05-27 14:15:18 -07002349 return 0;
2350}
2351
2352/*
2353 * File operations used in the debug char device
2354 */
2355static struct file_operations sfe_ipv6_debug_dev_fops = {
2356 .read = sfe_ipv6_debug_dev_read,
Xiaoping Fan978b3772015-05-27 14:15:18 -07002357 .open = sfe_ipv6_debug_dev_open,
2358 .release = sfe_ipv6_debug_dev_release
2359};
2360
2361#ifdef CONFIG_NF_FLOW_COOKIE
2362/*
2363 * sfe_ipv6_register_flow_cookie_cb
2364 * register a function in SFE to let SFE use this function to configure flow cookie for a flow
2365 *
2366 * Hardware driver which support flow cookie should register a callback function in SFE. Then SFE
2367 * can use this function to configure flow cookie for a flow.
2368 * return: 0, success; !=0, fail
2369 */
2370int sfe_ipv6_register_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2371{
2372 struct sfe_ipv6 *si = &__si6;
2373
2374 BUG_ON(!cb);
2375
2376 if (si->flow_cookie_set_func) {
2377 return -1;
2378 }
2379
2380 rcu_assign_pointer(si->flow_cookie_set_func, cb);
2381 return 0;
2382}
2383
2384/*
2385 * sfe_ipv6_unregister_flow_cookie_cb
2386 * unregister function which is used to configure flow cookie for a flow
2387 *
2388 * return: 0, success; !=0, fail
2389 */
2390int sfe_ipv6_unregister_flow_cookie_cb(sfe_ipv6_flow_cookie_set_func_t cb)
2391{
2392 struct sfe_ipv6 *si = &__si6;
2393
2394 RCU_INIT_POINTER(si->flow_cookie_set_func, NULL);
2395 return 0;
2396}
Xiaoping Fan640faf42015-08-28 15:50:55 -07002397
2398/*
2399 * sfe_ipv6_get_flow_cookie()
2400 */
2401static ssize_t sfe_ipv6_get_flow_cookie(struct device *dev,
2402 struct device_attribute *attr,
2403 char *buf)
2404{
2405 struct sfe_ipv6 *si = &__si6;
Xiaoping Fan01c67cc2015-11-09 11:31:57 -08002406 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->flow_cookie_enable);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002407}
2408
2409/*
2410 * sfe_ipv6_set_flow_cookie()
2411 */
2412static ssize_t sfe_ipv6_set_flow_cookie(struct device *dev,
2413 struct device_attribute *attr,
2414 const char *buf, size_t size)
2415{
2416 struct sfe_ipv6 *si = &__si6;
Ken Zhu137722d2021-09-23 17:57:36 -07002417 si->flow_cookie_enable = strict_strtol(buf, NULL, 0);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002418
2419 return size;
2420}
2421
2422/*
2423 * sysfs attributes.
2424 */
2425static const struct device_attribute sfe_ipv6_flow_cookie_attr =
Xiaoping Fane70da412016-02-26 16:47:57 -08002426 __ATTR(flow_cookie_enable, S_IWUSR | S_IRUGO, sfe_ipv6_get_flow_cookie, sfe_ipv6_set_flow_cookie);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002427#endif /*CONFIG_NF_FLOW_COOKIE*/
2428
Ken Zhu137722d2021-09-23 17:57:36 -07002429/*
2430 * sfe_ipv6_get_cpu()
2431 */
2432static ssize_t sfe_ipv6_get_cpu(struct device *dev,
2433 struct device_attribute *attr,
2434 char *buf)
2435{
2436 struct sfe_ipv6 *si = &__si6;
2437 return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", si->work_cpu);
2438}
2439
2440/*
Wayne Tanbb7f1782021-12-13 11:16:04 -08002441 * sfe_ipv6_set_cpu()
Ken Zhu137722d2021-09-23 17:57:36 -07002442 */
2443static ssize_t sfe_ipv6_set_cpu(struct device *dev,
2444 struct device_attribute *attr,
2445 const char *buf, size_t size)
2446{
2447 struct sfe_ipv6 *si = &__si6;
2448 int work_cpu;
2449
2450 work_cpu = simple_strtol(buf, NULL, 0);
2451 if ((work_cpu >= 0) && (work_cpu <= NR_CPUS)) {
2452 si->work_cpu = work_cpu;
2453 } else {
2454 dev_err(dev, "%s is not in valid range[0,%d]", buf, NR_CPUS);
2455 }
2456
2457 return size;
2458}
2459/*
2460 * sysfs attributes.
2461 */
2462static const struct device_attribute sfe_ipv6_cpu_attr =
2463 __ATTR(stat_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv6_get_cpu, sfe_ipv6_set_cpu);
2464
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302465 /*
2466 * sfe_ipv6_hash_init()
2467 * Initialize conn match hash lists
2468 */
2469static void sfe_ipv6_conn_match_hash_init(struct sfe_ipv6 *si, int len)
2470{
2471 struct hlist_head *hash_list = si->hlist_conn_match_hash_head;
2472 int i;
2473
2474 for (i = 0; i < len; i++) {
2475 INIT_HLIST_HEAD(&hash_list[i]);
2476 }
2477}
2478
Suruchi Suman23a279d2021-11-16 15:13:09 +05302479#ifdef SFE_PROCESS_LOCAL_OUT
2480/*
2481 * sfe_ipv6_local_out()
2482 * Called for packets from ip_local_out() - post encapsulation & other packets
2483 */
2484static unsigned int sfe_ipv6_local_out(void *priv,
2485 struct sk_buff *skb,
2486 const struct nf_hook_state *nhs)
2487{
Nitin Shettyc28f8172022-02-04 16:23:46 +05302488 struct sfe_l2_info l2_info = {0};
2489
Suruchi Suman23a279d2021-11-16 15:13:09 +05302490 DEBUG_TRACE("sfe: sfe_ipv6_local_out hook called.\n");
2491
2492 if (likely(skb->skb_iif)) {
Nitin Shettyc28f8172022-02-04 16:23:46 +05302493 return sfe_ipv6_recv(skb->dev, skb, &l2_info, true) ? NF_STOLEN : NF_ACCEPT;
Suruchi Suman23a279d2021-11-16 15:13:09 +05302494 }
2495
2496 return NF_ACCEPT;
2497}
2498
2499/*
2500 * struct nf_hook_ops sfe_ipv6_ops_local_out[]
2501 * Hooks into netfilter local out packet monitoring points.
2502 */
2503static struct nf_hook_ops sfe_ipv6_ops_local_out[] __read_mostly = {
2504
2505 /*
2506 * Local out routing hook is used to monitor packets.
2507 */
2508 {
2509 .hook = sfe_ipv6_local_out,
2510 .pf = PF_INET6,
2511 .hooknum = NF_INET_LOCAL_OUT,
2512 .priority = NF_IP6_PRI_FIRST,
2513 },
2514};
2515#endif
2516
Xiaoping Fan978b3772015-05-27 14:15:18 -07002517/*
2518 * sfe_ipv6_init()
2519 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302520int sfe_ipv6_init(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002521{
2522 struct sfe_ipv6 *si = &__si6;
2523 int result = -1;
2524
2525 DEBUG_INFO("SFE IPv6 init\n");
2526
Ratheesh Kannotha212fc52021-10-20 07:50:32 +05302527 sfe_ipv6_conn_match_hash_init(si, ARRAY_SIZE(si->hlist_conn_match_hash_head));
2528
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302529 si->stats_pcpu = alloc_percpu_gfp(struct sfe_ipv6_stats, GFP_KERNEL | __GFP_ZERO);
2530 if (!si->stats_pcpu) {
2531 DEBUG_ERROR("failed to allocate stats memory for sfe_ipv6\n");
2532 goto exit0;
2533 }
2534
Xiaoping Fan978b3772015-05-27 14:15:18 -07002535 /*
2536 * Create sys/sfe_ipv6
2537 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302538 si->sys_ipv6 = kobject_create_and_add("sfe_ipv6", NULL);
2539 if (!si->sys_ipv6) {
Xiaoping Fan978b3772015-05-27 14:15:18 -07002540 DEBUG_ERROR("failed to register sfe_ipv6\n");
2541 goto exit1;
2542 }
2543
2544 /*
2545 * Create files, one for each parameter supported by this module.
2546 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302547 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002548 if (result) {
2549 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
2550 goto exit2;
2551 }
2552
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302553 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002554 if (result) {
2555 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
2556 goto exit3;
2557 }
2558
Xiaoping Fan640faf42015-08-28 15:50:55 -07002559#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302560 result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002561 if (result) {
2562 DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
Ken Zhu137722d2021-09-23 17:57:36 -07002563 goto exit4;
Xiaoping Fan640faf42015-08-28 15:50:55 -07002564 }
2565#endif /* CONFIG_NF_FLOW_COOKIE */
2566
Suruchi Suman23a279d2021-11-16 15:13:09 +05302567#ifdef SFE_PROCESS_LOCAL_OUT
2568#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2569 result = nf_register_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2570#else
2571 result = nf_register_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2572#endif
2573#endif
2574 if (result < 0) {
2575 DEBUG_ERROR("can't register nf local out hook: %d\n", result);
2576 goto exit5;
2577 } else {
2578 DEBUG_ERROR("Register nf local out hook success: %d\n", result);
2579 }
2580
Xiaoping Fan978b3772015-05-27 14:15:18 -07002581 /*
2582 * Register our debug char device.
2583 */
2584 result = register_chrdev(0, "sfe_ipv6", &sfe_ipv6_debug_dev_fops);
2585 if (result < 0) {
2586 DEBUG_ERROR("Failed to register chrdev: %d\n", result);
Suruchi Suman23a279d2021-11-16 15:13:09 +05302587 goto exit6;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002588 }
2589
2590 si->debug_dev = result;
Ken Zhu137722d2021-09-23 17:57:36 -07002591 si->work_cpu = WORK_CPU_UNBOUND;
Xiaoping Fan978b3772015-05-27 14:15:18 -07002592
2593 /*
Ken Zhu137722d2021-09-23 17:57:36 -07002594 * Create work to handle periodic statistics.
Xiaoping Fan978b3772015-05-27 14:15:18 -07002595 */
Ken Zhu137722d2021-09-23 17:57:36 -07002596 INIT_DELAYED_WORK(&(si->sync_dwork), sfe_ipv6_periodic_sync);
2597 schedule_delayed_work_on(si->work_cpu, &(si->sync_dwork), ((HZ + 99) / 100));
Xiaoping Fan978b3772015-05-27 14:15:18 -07002598 spin_lock_init(&si->lock);
2599
2600 return 0;
2601
Suruchi Suman23a279d2021-11-16 15:13:09 +05302602exit6:
2603#ifdef SFE_PROCESS_LOCAL_OUT
2604#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2605 DEBUG_TRACE("sfe: Unregister local out hook\n");
2606 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2607#else
2608 DEBUG_TRACE("sfe: Unregister local out hook\n");
2609 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2610#endif
2611#endif
2612
Ken Zhu137722d2021-09-23 17:57:36 -07002613exit5:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002614#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302615 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002616
Ken Zhu137722d2021-09-23 17:57:36 -07002617exit4:
Xiaoping Fan640faf42015-08-28 15:50:55 -07002618#endif /* CONFIG_NF_FLOW_COOKIE */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302619 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Suruchi Suman23a279d2021-11-16 15:13:09 +05302620
Ken Zhu137722d2021-09-23 17:57:36 -07002621exit3:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302622 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002623
2624exit2:
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302625 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002626
2627exit1:
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302628 free_percpu(si->stats_pcpu);
2629
2630exit0:
Xiaoping Fan978b3772015-05-27 14:15:18 -07002631 return result;
2632}
2633
2634/*
2635 * sfe_ipv6_exit()
2636 */
Ratheesh Kannoth24fb1db2021-10-20 07:28:06 +05302637void sfe_ipv6_exit(void)
Xiaoping Fan978b3772015-05-27 14:15:18 -07002638{
2639 struct sfe_ipv6 *si = &__si6;
2640
2641 DEBUG_INFO("SFE IPv6 exit\n");
2642
2643 /*
2644 * Destroy all connections.
2645 */
2646 sfe_ipv6_destroy_all_rules_for_dev(NULL);
2647
Ken Zhu137722d2021-09-23 17:57:36 -07002648 cancel_delayed_work(&si->sync_dwork);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002649
2650 unregister_chrdev(si->debug_dev, "sfe_ipv6");
2651
Ratheesh Kannoth1ed95462021-10-20 07:57:45 +05302652 free_percpu(si->stats_pcpu);
2653
Suruchi Suman23a279d2021-11-16 15:13:09 +05302654#ifdef SFE_PROCESS_LOCAL_OUT
2655#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
2656 DEBUG_TRACE("sfe: Unregister local out hook\n");
2657 nf_unregister_hooks(sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2658#else
2659 DEBUG_TRACE("sfe: Unregister local out hook\n");
2660 nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
2661#endif
2662#endif
2663
Xiaoping Fan640faf42015-08-28 15:50:55 -07002664#ifdef CONFIG_NF_FLOW_COOKIE
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302665 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
Xiaoping Fan640faf42015-08-28 15:50:55 -07002666#endif /* CONFIG_NF_FLOW_COOKIE */
Ken Zhu137722d2021-09-23 17:57:36 -07002667
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302668 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
Ken Zhu137722d2021-09-23 17:57:36 -07002669
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302670 sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002671
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05302672 kobject_put(si->sys_ipv6);
Xiaoping Fan978b3772015-05-27 14:15:18 -07002673}
2674
Xiaoping Fan978b3772015-05-27 14:15:18 -07002675#ifdef CONFIG_NF_FLOW_COOKIE
2676EXPORT_SYMBOL(sfe_ipv6_register_flow_cookie_cb);
2677EXPORT_SYMBOL(sfe_ipv6_unregister_flow_cookie_cb);
2678#endif