[qca-nss-sfe] Support VLAN offload in SFE in tunipip6

VLAN tagging(encapsulation) and untag(decapsulation)
can be accelerated in SFE for tunipip6.

Signed-off-by: Tian Yang <quic_tiany@quicinc.com>
Change-Id: I2723cbe426fc85a8292c12be5abb8b1ca663772f
diff --git a/sfe_ipv6.c b/sfe_ipv6.c
index 16aec27..162a1d7 100644
--- a/sfe_ipv6.c
+++ b/sfe_ipv6.c
@@ -535,6 +535,12 @@
 	}
 
 	/*
+	 * dereference the decap direction top_interface_dev
+	 */
+	if (c->reply_match->top_interface_dev) {
+		dev_put(c->reply_match->top_interface_dev);
+	}
+	/*
 	 * Remove the connection match objects.
 	 */
 	sfe_ipv6_remove_connection_match(si, c->reply_match);
@@ -1440,6 +1446,7 @@
 #else
 		if (!refcount_inc_not_zero(&sk->sk_refcnt)) {
 #endif
+			this_cpu_inc(si->stats_pcpu->connection_create_failures64);
 			spin_unlock_bh(&si->lock);
 			kfree(reply_cm);
 			kfree(original_cm);
@@ -1507,6 +1514,8 @@
 	 */
 	original_cm->proto = NULL;
 	reply_cm->proto = NULL;
+	original_cm->top_interface_dev = NULL;
+	reply_cm->top_interface_dev = NULL;
 
 #ifdef SFE_GRE_TUN_ENABLE
 	if (!(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
@@ -1515,6 +1524,8 @@
 		rcu_read_unlock();
 
 		if (unlikely(!reply_cm->proto)) {
+			this_cpu_inc(si->stats_pcpu->connection_create_failures64);
+			spin_unlock_bh(&si->lock);
 			kfree(reply_cm);
 			kfree(original_cm);
 			kfree(c);
@@ -1535,6 +1546,20 @@
 		rcu_read_lock();
 		reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
 		rcu_read_unlock();
+		reply_cm->top_interface_dev = dev_get_by_index(&init_net, msg->conn_rule.return_top_interface_num);
+
+		if (unlikely(!reply_cm->top_interface_dev)) {
+			DEBUG_WARN("%px: Unable to find top_interface_dev corresponding to %d\n", msg,
+						msg->conn_rule.return_top_interface_num);
+			this_cpu_inc(si->stats_pcpu->connection_create_failures64);
+			spin_unlock_bh(&si->lock);
+			kfree(reply_cm);
+			kfree(original_cm);
+			kfree(c);
+			dev_put(src_dev);
+			dev_put(dest_dev);
+			return -EINVAL;
+		}
 	}
 	/*
 	 * If l2_features are disabled and flow uses l2 features such as macvlan/bridge/pppoe/vlan,
diff --git a/sfe_ipv6.h b/sfe_ipv6.h
index c8e628c..e478d5b 100644
--- a/sfe_ipv6.h
+++ b/sfe_ipv6.h
@@ -196,6 +196,8 @@
 	u16 pppoe_session_id;
 	u8 pppoe_remote_mac[ETH_ALEN];
 
+	struct net_device *top_interface_dev;	/* Used by tunipip6 to store decap VLAN netdevice.*/
+
 	/*
 	 * Size of all needed L2 headers
 	 */
diff --git a/sfe_ipv6_tunipip6.c b/sfe_ipv6_tunipip6.c
index 8f2b8ba..aaf2f89 100644
--- a/sfe_ipv6_tunipip6.c
+++ b/sfe_ipv6_tunipip6.c
@@ -28,6 +28,7 @@
 #include "sfe.h"
 #include "sfe_flow_cookie.h"
 #include "sfe_ipv6.h"
+#include "sfe_vlan.h"
 
 /*
  * sfe_ipv6_recv_tunipip6()
@@ -95,6 +96,26 @@
 #else
 		struct inet6_protocol *ipprot = cm->proto;
 #endif
+
+		/*
+		 * Do we expect an ingress VLAN tag for this flow?
+		 * Note: We will only have ingress tag check in decap direction.
+		 */
+		if (unlikely(!sfe_vlan_validate_ingress_tag(skb, cm->ingress_vlan_hdr_cnt, cm->ingress_vlan_hdr, l2_info))) {
+			rcu_read_unlock();
+			sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INGRESS_VLAN_TAG_MISMATCH);
+			DEBUG_TRACE("VLAN tag mismatch. skb=%px\n"
+				"cm: %u [0]=%x/%x [1]=%x/%x\n"
+				"l2_info+: %u [0]=%x/%x [1]=%x/%x\n", skb,
+				cm->ingress_vlan_hdr_cnt,
+				htons(cm->ingress_vlan_hdr[0].tpid), cm->ingress_vlan_hdr[0].tci,
+				htons(cm->ingress_vlan_hdr[1].tpid), cm->ingress_vlan_hdr[1].tci,
+				l2_info->vlan_hdr_cnt,
+				htons(l2_info->vlan_hdr[0].tpid), l2_info->vlan_hdr[0].tci,
+				htons(l2_info->vlan_hdr[1].tpid), l2_info->vlan_hdr[1].tci);
+			return 0;
+		}
+		skb_reset_network_header(skb);
 		skb_pull(skb, ihl);
 		skb_reset_transport_header(skb);
 
@@ -108,9 +129,23 @@
 		this_cpu_inc(si->stats_pcpu->packets_forwarded64);
 		rcu_read_unlock();
 		DEBUG_TRACE("%px: %s decap done \n",skb, __func__);
+
+		/*
+		 * Update top interface for tunnel searching.
+		 */
+		skb->dev = cm->top_interface_dev;
 		ipprot->handler(skb);
 		return 1;
+	}
 
+	/*
+	 * Check if skb has enough headroom to write L2 headers
+	 */
+	if (unlikely(skb_headroom(skb) < cm->l2_hdr_size)) {
+		rcu_read_unlock();
+		DEBUG_WARN("%px: Not enough headroom: %u\n", skb, skb_headroom(skb));
+		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_NO_HEADROOM);
+		return 0;
 	}
 
 	/*
@@ -142,15 +177,22 @@
 	skb->dev = cm->xmit_dev;
 
 	/*
+	 * Check to see if we need to add VLAN tags
+	 */
+	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG)) {
+		sfe_vlan_add_tag(skb, cm->egress_vlan_hdr_cnt, cm->egress_vlan_hdr);
+	}
+
+	/*
 	 * Check to see if we need to write a header.
 	 */
 	if (likely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
 		if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
-			dev_hard_header(skb, cm->xmit_dev, ETH_P_IPV6,
+			dev_hard_header(skb, cm->xmit_dev, ntohs(skb->protocol),
 					cm->xmit_dest_mac, cm->xmit_src_mac, len);
 		} else {
 			struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
-			eth->h_proto = htons(ETH_P_IPV6);
+			eth->h_proto = skb->protocol;
 			ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
 			ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
 		}