ipsec: add spd fast path matching

This patch adds matching functionality for spd fast path
policy matching. Fast path matching has been introduced
for outbound traffic only.

Type: feature
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Change-Id: I03d5edf7d7fbc03bf3e6edbe33cb15bc965f9d4e
diff --git a/src/vnet/ipsec/ipsec_output.c b/src/vnet/ipsec/ipsec_output.c
index d2eb412..96c6f27 100644
--- a/src/vnet/ipsec/ipsec_output.c
+++ b/src/vnet/ipsec/ipsec_output.c
@@ -74,6 +74,20 @@
   return 0;
 }
 
+always_inline void
+ipsec_fp_5tuple_from_ip6_range (ipsec_fp_5tuple_t *tuple, ip6_address_t *la,
+				ip6_address_t *ra, u16 lp, u16 rp, u8 pr)
+
+{
+  clib_memcpy_fast (&tuple->ip6_laddr, la, sizeof (ip6_address_t));
+  clib_memcpy_fast (&tuple->ip6_laddr, la, sizeof (ip6_address_t));
+
+  tuple->lport = lp;
+  tuple->rport = rp;
+  tuple->protocol = pr;
+  tuple->is_ipv6 = 1;
+}
+
 always_inline ipsec_policy_t *
 ipsec6_output_policy_match (ipsec_spd_t * spd,
 			    ip6_address_t * la,
@@ -81,12 +95,24 @@
 {
   ipsec_main_t *im = &ipsec_main;
   ipsec_policy_t *p;
+  ipsec_policy_t *policies[1];
+  ipsec_fp_5tuple_t tuples[1];
+  u32 fp_policy_ids[1];
 
   u32 *i;
 
   if (!spd)
     return 0;
 
+  ipsec_fp_5tuple_from_ip6_range (&tuples[0], la, ra, lp, rp, pr);
+  if (im->fp_spd_is_enabled &&
+      (0 == ipsec_fp_out_policy_match_n (&spd->fp_spd, 1, tuples, policies,
+					 fp_policy_ids, 1)))
+    {
+      p = policies[0];
+      i = fp_policy_ids;
+    }
+
   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_OUTBOUND])
   {
     p = pool_elt_at_index (im->policies, *i);
diff --git a/src/vnet/ipsec/ipsec_output.h b/src/vnet/ipsec/ipsec_output.h
index 63d97c0..6608e3c 100644
--- a/src/vnet/ipsec/ipsec_output.h
+++ b/src/vnet/ipsec/ipsec_output.h
@@ -20,6 +20,7 @@
 
 #include <vppinfra/types.h>
 #include <vnet/ipsec/ipsec_spd.h>
+#include <vnet/ipsec/ipsec_spd_fp_lookup.h>
 
 always_inline void
 ipsec4_out_spd_add_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
@@ -62,6 +63,218 @@
   return;
 }
 
+always_inline void
+ipsec4_out_spd_add_flow_cache_entry_n (ipsec_main_t *im,
+				       ipsec4_spd_5tuple_t *ip4_5tuple,
+				       u32 pol_id)
+{
+  u64 hash;
+  u8 overwrite = 0, stale_overwrite = 0;
+
+  ip4_5tuple->kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
+
+  hash = ipsec4_hash_16_8 (&ip4_5tuple->kv_16_8);
+  hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
+
+  ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
+  /* Check if we are overwriting an existing entry so we know
+  whether to increment the flow cache counter. Since flow
+  cache counter is reset on any policy add/remove, but
+  hash table values are not, we also need to check if the entry
+  we are overwriting is stale or not. If it's a stale entry
+  overwrite, we still want to increment flow cache counter */
+  overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
+  /* Check for stale entry by comparing with current epoch count */
+  if (PREDICT_FALSE (overwrite))
+    stale_overwrite =
+      (im->epoch_count !=
+       ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
+  clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple->kv_16_8,
+		    sizeof (ip4_5tuple->kv_16_8));
+  ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
+
+  /* Increment the counter to track active flow cache entries
+    when entering a fresh entry or overwriting a stale one */
+  if (!overwrite || stale_overwrite)
+    clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
+
+  return;
+}
+
+always_inline void
+ipsec_fp_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
+				u16 lp, u16 rp, u8 pr)
+{
+  clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
+  tuple->laddr.as_u32 = clib_host_to_net_u32 (la);
+  tuple->raddr.as_u32 = clib_host_to_net_u32 (ra);
+
+  if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
+		     (pr != IP_PROTOCOL_SCTP)))
+    {
+      tuple->lport = 0;
+      tuple->rport = 0;
+    }
+  else
+    {
+      tuple->lport = lp;
+      tuple->rport = rp;
+    }
+
+  tuple->protocol = pr;
+  tuple->is_ipv6 = 0;
+}
+
+always_inline void
+ipsec_fp_5tuple_from_ip4_range_n (ipsec_fp_5tuple_t *tuples,
+				  ipsec4_spd_5tuple_t *ip4_5tuple, u32 n)
+{
+  u32 n_left = n;
+  ipsec_fp_5tuple_t *tuple = tuples;
+
+  while (n_left)
+    {
+      clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
+      tuple->laddr.as_u32 =
+	clib_host_to_net_u32 (ip4_5tuple->ip4_addr[0].as_u32);
+      tuple->raddr.as_u32 =
+	clib_host_to_net_u32 (ip4_5tuple->ip4_addr[1].as_u32);
+      if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
+			 (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
+			 (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
+	{
+	  tuple->lport = 0;
+	  tuple->rport = 0;
+	}
+      else
+	{
+	  tuple->lport = ip4_5tuple->port[0];
+	  tuple->rport = ip4_5tuple->port[1];
+	}
+      tuple->protocol = ip4_5tuple->proto;
+      tuple->is_ipv6 = 0;
+      n_left--;
+      tuple++;
+    }
+}
+
+always_inline int
+ipsec_output_policy_match_n (ipsec_spd_t *spd,
+			     ipsec4_spd_5tuple_t *ip4_5tuples,
+			     ipsec_policy_t **policies, u32 n,
+			     u8 flow_cache_enabled)
+{
+  ipsec_main_t *im = &ipsec_main;
+  ipsec_policy_t *p;
+  ipsec_policy_t **pp = policies;
+  u32 n_left = n;
+  ipsec4_spd_5tuple_t *ip4_5tuple = ip4_5tuples;
+  u32 policy_ids[n], *policy_id = policy_ids;
+  ipsec_fp_5tuple_t tuples[n];
+  u32 *i;
+  u32 counter = 0;
+
+  if (!spd)
+    return 0;
+
+  clib_memset (policies, 0, n * sizeof (ipsec_policy_t *));
+
+  if (im->fp_spd_is_enabled)
+    {
+      ipsec_fp_5tuple_from_ip4_range_n (tuples, ip4_5tuples, n);
+      counter += ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples,
+					      policies, policy_ids, n);
+    }
+
+  while (n_left)
+    {
+      if (*pp != 0)
+	goto next;
+
+      vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
+	{
+	  p = pool_elt_at_index (im->policies, *i);
+	  if (PREDICT_FALSE (p->protocol &&
+			     (p->protocol != ip4_5tuple->proto)))
+	    continue;
+
+	  if (ip4_5tuple->ip4_addr[0].as_u32 <
+	      clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
+	    continue;
+
+	  if (ip4_5tuple->ip4_addr[1].as_u32 >
+	      clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
+	    continue;
+
+	  if (ip4_5tuple->ip4_addr[0].as_u32 <
+	      clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
+	    continue;
+
+	  if (ip4_5tuple->ip4_addr[1].as_u32 >
+	      clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
+	    continue;
+
+	  if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
+			     (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
+			     (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
+	    {
+	      ip4_5tuple->port[0] = 0;
+	      ip4_5tuple->port[1] = 0;
+	      goto add_policy;
+	    }
+
+	  if (ip4_5tuple->port[0] < p->lport.start)
+	    continue;
+
+	  if (ip4_5tuple->port[0] > p->lport.stop)
+	    continue;
+
+	  if (ip4_5tuple->port[1] < p->rport.start)
+	    continue;
+
+	  if (ip4_5tuple->port[1] > p->rport.stop)
+	    continue;
+
+	add_policy:
+	  *pp = p;
+	  *policy_id = *i;
+	  counter++;
+	  break;
+	}
+
+    next:
+      n_left--;
+      pp++;
+      ip4_5tuple++;
+      policy_id++;
+    }
+
+  if (flow_cache_enabled)
+    {
+      n_left = n;
+      policy_id = policy_ids;
+      ip4_5tuple = ip4_5tuples;
+      pp = policies;
+
+      while (n_left)
+	{
+	  if (*pp != NULL)
+	    {
+	      /* Add an Entry in Flow cache */
+	      ipsec4_out_spd_add_flow_cache_entry_n (im, ip4_5tuple,
+						     *policy_id);
+	    }
+
+	  n_left--;
+	  policy_id++;
+	  ip4_5tuple++;
+	  pp++;
+	}
+    }
+
+  return counter;
+}
+
 always_inline ipsec_policy_t *
 ipsec4_out_spd_find_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
 				      u16 lp, u16 rp)
@@ -108,11 +321,32 @@
 {
   ipsec_main_t *im = &ipsec_main;
   ipsec_policy_t *p;
+  ipsec_policy_t *policies[1];
+  ipsec_fp_5tuple_t tuples[1];
+  u32 fp_policy_ids[1];
+
   u32 *i;
 
   if (!spd)
     return 0;
 
+  ipsec_fp_5tuple_from_ip4_range (&tuples[0], la, ra, lp, rp, pr);
+
+  if (im->fp_spd_is_enabled &&
+      (1 == ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples, policies,
+					 fp_policy_ids, 1)))
+    {
+      p = policies[0];
+      i = fp_policy_ids;
+      if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
+			 (pr != IP_PROTOCOL_SCTP)))
+	{
+	  lp = 0;
+	  rp = 0;
+	}
+      goto add_flow_cache;
+    }
+
   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
     {
       p = pool_elt_at_index (im->policies, *i);
diff --git a/src/vnet/ipsec/ipsec_spd_fp_lookup.h b/src/vnet/ipsec/ipsec_spd_fp_lookup.h
new file mode 100644
index 0000000..571a4b8
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_spd_fp_lookup.h
@@ -0,0 +1,324 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2022 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef IPSEC_SPD_FP_LOOKUP_H
+#define IPSEC_SPD_FP_LOOKUP_H
+
+#include <vnet/ipsec/ipsec.h>
+
+/**
+ * @brief function handler to perform lookup in fastpath SPD
+ * for inbound traffic burst of n packets
+ **/
+
+inline u32
+ipsec_fp_in_policy_match_n (void *spd_fp, u8 is_ipv6,
+			    ipsec_fp_5tuple_t *tuples,
+			    ipsec_policy_t **policies, u32 *policy_ids, u32 n)
+{
+  return 0;
+}
+
+static_always_inline int
+single_rule_match_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *match)
+{
+  if (PREDICT_FALSE (policy->is_ipv6 != match->is_ipv6))
+    return (0);
+
+  if (PREDICT_FALSE (policy->protocol != IPSEC_POLICY_PROTOCOL_ANY &&
+		     (policy->protocol != match->protocol)))
+    return (0);
+
+  if (!policy->is_ipv6)
+    {
+      if (PREDICT_FALSE (
+	    clib_net_to_host_u32 (match->laddr.as_u32) <
+	    clib_net_to_host_u32 (policy->laddr.start.ip4.as_u32)))
+	return (0);
+
+      if (PREDICT_FALSE (clib_net_to_host_u32 (match->laddr.as_u32) >
+			 clib_net_to_host_u32 (policy->laddr.stop.ip4.as_u32)))
+	return (0);
+
+      if (PREDICT_FALSE (
+	    clib_net_to_host_u32 (match->raddr.as_u32) <
+	    clib_net_to_host_u32 (policy->raddr.start.ip4.as_u32)))
+	return (0);
+
+      if (PREDICT_FALSE (clib_net_to_host_u32 (match->raddr.as_u32) >
+			 clib_net_to_host_u32 (policy->raddr.stop.ip4.as_u32)))
+	return (0);
+    }
+  else
+    {
+
+      if (ip6_address_compare (&match->ip6_laddr, &policy->laddr.start.ip6) <
+	  0)
+	return (0);
+
+      if (ip6_address_compare (&policy->laddr.stop.ip6, &match->ip6_laddr) < 0)
+
+	return (0);
+
+      if (ip6_address_compare (&match->ip6_raddr, &policy->raddr.start.ip6) <
+	  0)
+
+	return (0);
+
+      if (ip6_address_compare (&policy->raddr.stop.ip6, &match->ip6_raddr) < 0)
+
+	return (0);
+    }
+
+  if (PREDICT_FALSE ((match->protocol != IP_PROTOCOL_TCP) &&
+		     (match->protocol != IP_PROTOCOL_UDP) &&
+		     (match->protocol != IP_PROTOCOL_SCTP)))
+    {
+      return (1);
+    }
+
+  if (match->lport < policy->lport.start)
+    return (0);
+
+  if (match->lport > policy->lport.stop)
+    return (0);
+
+  if (match->rport < policy->rport.start)
+    return (0);
+
+  if (match->rport > policy->rport.stop)
+    return (0);
+
+  return (1);
+}
+
+static_always_inline u32
+ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
+				 ipsec_policy_t **policies, u32 *ids, u32 n)
+
+{
+  u32 last_priority[n];
+  u32 i = 0;
+  u32 counter = 0;
+  ipsec_fp_mask_type_entry_t *mte;
+  u32 *mti;
+  ipsec_fp_5tuple_t *match = tuples;
+  ipsec_policy_t *policy;
+
+  u32 n_left = n;
+  clib_bihash_kv_40_8_t kv;
+  /* result of the lookup */
+  clib_bihash_kv_40_8_t result;
+  ipsec_fp_lookup_value_t *result_val =
+    (ipsec_fp_lookup_value_t *) &result.value;
+  u64 *pkey, *pmatch, *pmask;
+  ipsec_main_t *im = &ipsec_main;
+  ipsec_spd_fp_t *pspd_fp = (ipsec_spd_fp_t *) spd_fp;
+  u32 *mask_type_ids = pspd_fp->fp_mask_types[IPSEC_SPD_POLICY_IP4_OUTBOUND];
+
+  /*clear the list of matched policies pointers */
+  clib_memset (policies, 0, n * sizeof (*policies));
+  clib_memset (last_priority, 0, n * sizeof (u32));
+  n_left = n;
+  while (n_left)
+    {
+      vec_foreach (mti, mask_type_ids)
+	{
+	  mte = im->fp_mask_types + *mti;
+
+	  pmatch = (u64 *) match;
+	  pmask = (u64 *) &mte->mask;
+	  pkey = (u64 *) kv.key;
+
+	  *pkey++ = *pmatch++ & *pmask++;
+	  *pkey++ = *pmatch++ & *pmask++;
+	  *pkey++ = *pmatch++ & *pmask++;
+	  *pkey++ = *pmatch++ & *pmask++;
+	  *pkey++ = *pmatch++ & *pmask++;
+	  *pkey++ = *pmatch++ & *pmask++;
+
+	  int res = clib_bihash_search_inline_2_40_8 (
+	    &pspd_fp->fp_ip6_lookup_hash, &kv, &result);
+	  /* lookup the hash by each packet in the burst for this mask. */
+
+	  if (res == 0)
+	    {
+	      /* There is a hit in the hash table. */
+	      /* Find the policy with highest priority. */
+	      /* Store the lookup results in a dedicated array. */
+
+	      if (vec_len (result_val->fp_policies_ids) > 1)
+		{
+		  u32 *policy_id;
+		  vec_foreach (policy_id, result_val->fp_policies_ids)
+		    {
+		      policy = im->policies + *policy_id;
+
+		      if (single_rule_match_5tuple (policy, match))
+			{
+			  if (last_priority[i] < policy->priority)
+			    {
+			      last_priority[i] = policy->priority;
+			      if (policies[i] == 0)
+				counter++;
+			      policies[i] = policy;
+			      ids[i] = *policy_id;
+			    }
+			}
+		    }
+		}
+	      else
+		{
+		  u32 *policy_id;
+		  ASSERT (vec_len (result_val->fp_policies_ids) == 1);
+		  policy_id = result_val->fp_policies_ids;
+		  policy = im->policies + *policy_id;
+		  if (single_rule_match_5tuple (policy, match))
+		    {
+		      if (last_priority[i] < policy->priority)
+			{
+			  last_priority[i] = policy->priority;
+			  if (policies[i] == 0)
+			    counter++;
+			  policies[i] = policy;
+			  ids[i] = *policy_id;
+			}
+		    }
+		}
+	    }
+	}
+      n_left--;
+      match++;
+      i++;
+    }
+  return counter;
+}
+
+static_always_inline u32
+ipsec_fp_ip4_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
+				 ipsec_policy_t **policies, u32 *ids, u32 n)
+
+{
+  u32 last_priority[n];
+  u32 i = 0;
+  u32 counter = 0;
+  ipsec_fp_mask_type_entry_t *mte;
+  u32 *mti;
+  ipsec_fp_5tuple_t *match = tuples;
+  ipsec_policy_t *policy;
+
+  u32 n_left = n;
+  clib_bihash_kv_16_8_t kv;
+  /* result of the lookup */
+  clib_bihash_kv_16_8_t result;
+  ipsec_fp_lookup_value_t *result_val =
+    (ipsec_fp_lookup_value_t *) &result.value;
+  u64 *pkey, *pmatch, *pmask;
+  ipsec_main_t *im = &ipsec_main;
+  ipsec_spd_fp_t *pspd_fp = (ipsec_spd_fp_t *) spd_fp;
+  u32 *mask_type_ids = pspd_fp->fp_mask_types[IPSEC_SPD_POLICY_IP4_OUTBOUND];
+
+  /* clear the list of matched policies pointers */
+  clib_memset (policies, 0, n * sizeof (*policies));
+  clib_memset (last_priority, 0, n * sizeof (u32));
+  n_left = n;
+  while (n_left)
+    {
+      vec_foreach (mti, mask_type_ids)
+	{
+	  mte = im->fp_mask_types + *mti;
+
+	  pmatch = (u64 *) &match->laddr;
+	  pmask = (u64 *) &mte->mask.laddr;
+	  pkey = (u64 *) kv.key;
+
+	  *pkey++ = *pmatch++ & *pmask++;
+	  *pkey++ = *pmatch++ & *pmask++;
+
+	  int res = clib_bihash_search_inline_2_16_8 (
+	    &pspd_fp->fp_ip4_lookup_hash, &kv, &result);
+	  /* lookup the hash by each packet in the burst for this mask. */
+
+	  if (res == 0)
+	    {
+	      /* There is a hit in the hash table. */
+	      /* Find the policy with highest priority. */
+	      /* Store the lookup results in a dedicated array. */
+
+	      if (vec_len (result_val->fp_policies_ids) > 1)
+		{
+		  u32 *policy_id;
+		  vec_foreach (policy_id, result_val->fp_policies_ids)
+		    {
+		      policy = im->policies + *policy_id;
+
+		      if ((last_priority[i] < policy->priority) &&
+			  (single_rule_match_5tuple (policy, match)))
+			{
+			  last_priority[i] = policy->priority;
+			  if (policies[i] == 0)
+			    counter++;
+			  policies[i] = policy;
+			  ids[i] = *policy_id;
+			}
+		    }
+		}
+	      else
+		{
+		  u32 *policy_id;
+		  ASSERT (vec_len (result_val->fp_policies_ids) == 1);
+		  policy_id = result_val->fp_policies_ids;
+		  policy = im->policies + *policy_id;
+		  if ((last_priority[i] < policy->priority) &&
+		      (single_rule_match_5tuple (policy, match)))
+		    {
+		      last_priority[i] = policy->priority;
+		      if (policies[i] == 0)
+			counter++;
+		      policies[i] = policy;
+		      ids[i] = *policy_id;
+		    }
+		}
+	    }
+	}
+
+      i++;
+      n_left--;
+      match++;
+    }
+  return counter;
+}
+
+/**
+ * @brief function handler to perform lookup in fastpath SPD
+ * for outbound traffic burst of n packets
+ * returns number of successfully matched policies
+ **/
+
+static_always_inline u32
+ipsec_fp_out_policy_match_n (void *spd_fp, u8 is_ipv6,
+			     ipsec_fp_5tuple_t *tuples,
+			     ipsec_policy_t **policies, u32 *ids, u32 n)
+
+{
+  if (is_ipv6)
+    return ipsec_fp_ip6_out_policy_match_n (spd_fp, tuples, policies, ids, n);
+  else
+    return ipsec_fp_ip4_out_policy_match_n (spd_fp, tuples, policies, ids, n);
+}
+
+#endif /* !IPSEC_SPD_FP_LOOKUP_H */