[qca-nss-sfe] add support for GRE tunnel handling.
Change-Id: Ie4bcdb2df8b13a7c3d63b79eac7ef8f1668ee9c5
Signed-off-by: Nitin Shetty <quic_nitinsj@quicinc.com>
diff --git a/sfe_ipv6_gre.c b/sfe_ipv6_gre.c
new file mode 100644
index 0000000..dfc0130
--- /dev/null
+++ b/sfe_ipv6_gre.c
@@ -0,0 +1,273 @@
+/*
+ * sfe_ipv6_gre.c
+ * Shortcut forwarding engine file for IPv6 GRE
+ *
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <net/gre.h>
+#include <net/protocol.h>
+#include <linux/etherdevice.h>
+#include <linux/version.h>
+#include <net/ip6_checksum.h>
+
+#include "sfe_debug.h"
+#include "sfe_api.h"
+#include "sfe.h"
+#include "sfe_flow_cookie.h"
+#include "sfe_ipv6.h"
+
+/*
+ * sfe_ipv6_recv_gre()
+ * Handle GRE packet receives and forwarding.
+ */
+int sfe_ipv6_recv_gre(struct sfe_ipv6 *si, struct sk_buff *skb, struct net_device *dev,
+ unsigned int len, struct ipv6hdr *iph, unsigned int ihl, bool sync_on_find, bool tun_outer)
+{
+ struct sfe_ipv6_connection_match *cm;
+ struct sfe_ipv6_addr *dest_ip;
+ struct sfe_ipv6_addr *src_ip;
+ struct net_device *xmit_dev;
+ bool bridge_flow;
+ bool passthrough;
+ bool ret;
+
+ /*
+ * Is our packet too short to contain a valid UDP header?
+ */
+ if (!pskb_may_pull(skb, (sizeof(struct gre_base_hdr) + ihl))) {
+
+ sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_HEADER_INCOMPLETE);
+ DEBUG_TRACE("packet too short for GRE header\n");
+ return 0;
+ }
+
+ /*
+ * Read the IP address and port information. Read the IP header data first
+ * because we've almost certainly got that in the cache. We may not yet have
+ * the UDP header cached though so allow more time for any prefetching.
+ */
+ src_ip = (struct sfe_ipv6_addr *)iph->saddr.s6_addr32;
+ dest_ip = (struct sfe_ipv6_addr *)iph->daddr.s6_addr32;
+
+ rcu_read_lock();
+
+ /*
+ * Look for a connection match.
+ */
+#ifdef CONFIG_NF_FLOW_COOKIE
+ cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
+ if (unlikely(!cm)) {
+ cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_GRE, src_ip, 0, dest_ip, 0);
+ }
+#else
+ cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_GRE, src_ip, 0, dest_ip, 0);
+#endif
+ if (unlikely(!cm)) {
+ rcu_read_unlock();
+ sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_NO_CONNECTION);
+ DEBUG_TRACE("no connection match found dev %s src ip %pI6 dest ip %pI6\n", dev->name, src_ip, dest_ip);
+ return 0;
+ }
+
+ /*
+ * Source interface validate.
+ */
+ if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
+ struct sfe_ipv6_connection *c = cm->connection;
+ int ret;
+ spin_lock_bh(&si->lock);
+ ret = sfe_ipv6_remove_connection(si, c);
+ spin_unlock_bh(&si->lock);
+
+ if (ret) {
+ sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+ }
+ rcu_read_unlock();
+ sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
+ DEBUG_TRACE("flush on wrong source interface check failure\n");
+ return 0;
+ }
+
+ passthrough = cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
+
+ /*
+ * If our packet has beern marked as "sync on find" we can't actually
+ * forward it in the fast path, but now that we've found an associated
+ * connection we need sync its status before exception it to slow path. unless
+ * it is passthrough packet.
+ * TODO: revisit to ensure that pass through traffic is not bypassing firewall for fragmented cases
+ */
+ if (unlikely(sync_on_find) && !passthrough) {
+ sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
+ rcu_read_unlock();
+
+ sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT);
+ DEBUG_TRACE("Sync on find\n");
+ return 0;
+ }
+
+ bridge_flow = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
+
+ /*
+ * Does our hop_limit allow forwarding?
+ */
+ if (!bridge_flow && (iph->hop_limit < 2) && passthrough) {
+ sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
+ rcu_read_unlock();
+
+ sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_SMALL_TTL);
+ DEBUG_TRACE("hop_limit too low\n");
+ return 0;
+ }
+
+ /*
+ * From this point on we're good to modify the packet.
+ */
+
+ /*
+ * Check if skb was cloned. If it was, unshare it. Because
+ * the data area is going to be written in this path and we don't want to
+ * change the cloned skb's data section.
+ */
+ if (unlikely(skb_cloned(skb))) {
+ DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb) {
+ DEBUG_WARN("Failed to unshare the cloned skb\n");
+ rcu_read_unlock();
+ return 1;
+ }
+
+ /*
+ * Update the iph and udph pointers with the unshared skb's data area.
+ */
+ iph = (struct ipv6hdr *)skb->data;
+ }
+
+ /*
+ * protocol handler will be valid only in decap-path.
+ */
+ if (cm->proto) {
+ struct inet6_protocol *ipprot = cm->proto;
+ skb_pull(skb, ihl);
+ skb_reset_transport_header(skb);
+ skb->fast_forwarded = 1;
+
+ ret = ipprot->handler(skb);
+ if (ret) {
+ this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
+ rcu_read_unlock();
+ return 1;
+ }
+
+ /*
+ * Update traffic stats.
+ */
+ atomic_inc(&cm->rx_packet_count);
+ atomic_add(len, &cm->rx_byte_count);
+
+ this_cpu_inc(si->stats_pcpu->packets_forwarded64);
+ rcu_read_unlock();
+ DEBUG_TRACE("%p: %s decap done\n",skb, __func__);
+ return 1;
+ }
+
+ /*
+ * If our packet is larger than the MTU of the transmit interface then
+ * we can't forward it easily.
+ */
+ if (unlikely(len > cm->xmit_dev_mtu)) {
+ sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
+ rcu_read_unlock();
+
+ sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_NEEDS_FRAGMENTATION);
+ DEBUG_TRACE("Larger than MTU\n");
+ return 0;
+ }
+
+ /*
+ * Update DSCP
+ */
+ if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+ sfe_ipv6_change_dsfield(iph, cm->dscp);
+ }
+
+ /*
+ * Decrement our hop_limit.
+ */
+ if (!bridge_flow & !tun_outer) {
+ iph->hop_limit -= iph->hop_limit - (u8)(!bridge_flow & !tun_outer);
+ }
+
+ /*
+ * Update traffic stats.
+ */
+ atomic_inc(&cm->rx_packet_count);
+ atomic_add(len, &cm->rx_byte_count);
+
+ xmit_dev = cm->xmit_dev;
+ skb->dev = xmit_dev;
+
+ if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR) {
+ /*
+ * For the simple case we write this really fast.
+ */
+ struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
+ ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
+ } else if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR) {
+ dev_hard_header(skb, xmit_dev, ETH_P_IPV6,
+ cm->xmit_dest_mac, cm->xmit_src_mac, len);
+ }
+
+ /*
+ * Update priority of skb.
+ */
+ if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
+ skb->priority = cm->priority;
+ }
+
+ /*
+ * Mark outgoing packet.
+ */
+ if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
+ skb->mark = cm->mark;
+ }
+
+ rcu_read_unlock();
+
+ this_cpu_inc(si->stats_pcpu->packets_forwarded64);
+
+ /*
+ * We're going to check for GSO flags when we transmit the packet so
+ * start fetching the necessary cache line now.
+ */
+ prefetch(skb_shinfo(skb));
+
+ /*
+ * Mark that this packet has been fast forwarded.
+ */
+ skb->fast_forwarded = 1;
+
+ /*
+ * Send the packet on its way.
+ */
+ dev_queue_xmit(skb);
+
+ return 1;
+}