ipsec: add spd fast path matching

This patch adds matching functionality for spd fast path
policy matching. Fast path matching has been introduced
for outbound traffic only.

Type: feature
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Change-Id: I03d5edf7d7fbc03bf3e6edbe33cb15bc965f9d4e
diff --git a/src/vnet/ipsec/ipsec_output.h b/src/vnet/ipsec/ipsec_output.h
index 63d97c0..6608e3c 100644
--- a/src/vnet/ipsec/ipsec_output.h
+++ b/src/vnet/ipsec/ipsec_output.h
@@ -20,6 +20,7 @@
 
 #include <vppinfra/types.h>
 #include <vnet/ipsec/ipsec_spd.h>
+#include <vnet/ipsec/ipsec_spd_fp_lookup.h>
 
 always_inline void
 ipsec4_out_spd_add_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
@@ -62,6 +63,218 @@
   return;
 }
 
+always_inline void
+ipsec4_out_spd_add_flow_cache_entry_n (ipsec_main_t *im,
+				       ipsec4_spd_5tuple_t *ip4_5tuple,
+				       u32 pol_id)
+{
+  u64 hash;
+  u8 overwrite = 0, stale_overwrite = 0;
+
+  ip4_5tuple->kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
+
+  hash = ipsec4_hash_16_8 (&ip4_5tuple->kv_16_8);
+  hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
+
+  ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
+  /* Check if we are overwriting an existing entry so we know
+  whether to increment the flow cache counter. Since flow
+  cache counter is reset on any policy add/remove, but
+  hash table values are not, we also need to check if the entry
+  we are overwriting is stale or not. If it's a stale entry
+  overwrite, we still want to increment flow cache counter */
+  overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
+  /* Check for stale entry by comparing with current epoch count */
+  if (PREDICT_FALSE (overwrite))
+    stale_overwrite =
+      (im->epoch_count !=
+       ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
+  clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple->kv_16_8,
+		    sizeof (ip4_5tuple->kv_16_8));
+  ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
+
+  /* Increment the counter to track active flow cache entries
+    when entering a fresh entry or overwriting a stale one */
+  if (!overwrite || stale_overwrite)
+    clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
+
+  return;
+}
+
+always_inline void
+ipsec_fp_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
+				u16 lp, u16 rp, u8 pr)
+{
+  clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
+  tuple->laddr.as_u32 = clib_host_to_net_u32 (la);
+  tuple->raddr.as_u32 = clib_host_to_net_u32 (ra);
+
+  if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
+		     (pr != IP_PROTOCOL_SCTP)))
+    {
+      tuple->lport = 0;
+      tuple->rport = 0;
+    }
+  else
+    {
+      tuple->lport = lp;
+      tuple->rport = rp;
+    }
+
+  tuple->protocol = pr;
+  tuple->is_ipv6 = 0;
+}
+
+always_inline void
+ipsec_fp_5tuple_from_ip4_range_n (ipsec_fp_5tuple_t *tuples,
+				  ipsec4_spd_5tuple_t *ip4_5tuple, u32 n)
+{
+  u32 n_left = n;
+  ipsec_fp_5tuple_t *tuple = tuples;
+
+  while (n_left)
+    {
+      clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
+      tuple->laddr.as_u32 =
+	clib_host_to_net_u32 (ip4_5tuple->ip4_addr[0].as_u32);
+      tuple->raddr.as_u32 =
+	clib_host_to_net_u32 (ip4_5tuple->ip4_addr[1].as_u32);
+      if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
+			 (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
+			 (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
+	{
+	  tuple->lport = 0;
+	  tuple->rport = 0;
+	}
+      else
+	{
+	  tuple->lport = ip4_5tuple->port[0];
+	  tuple->rport = ip4_5tuple->port[1];
+	}
+      tuple->protocol = ip4_5tuple->proto;
+      tuple->is_ipv6 = 0;
+      n_left--;
+      tuple++;
+    }
+}
+
+always_inline int
+ipsec_output_policy_match_n (ipsec_spd_t *spd,
+			     ipsec4_spd_5tuple_t *ip4_5tuples,
+			     ipsec_policy_t **policies, u32 n,
+			     u8 flow_cache_enabled)
+{
+  ipsec_main_t *im = &ipsec_main;
+  ipsec_policy_t *p;
+  ipsec_policy_t **pp = policies;
+  u32 n_left = n;
+  ipsec4_spd_5tuple_t *ip4_5tuple = ip4_5tuples;
+  u32 policy_ids[n], *policy_id = policy_ids;
+  ipsec_fp_5tuple_t tuples[n];
+  u32 *i;
+  u32 counter = 0;
+
+  if (!spd)
+    return 0;
+
+  clib_memset (policies, 0, n * sizeof (ipsec_policy_t *));
+
+  if (im->fp_spd_is_enabled)
+    {
+      ipsec_fp_5tuple_from_ip4_range_n (tuples, ip4_5tuples, n);
+      counter += ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples,
+					      policies, policy_ids, n);
+    }
+
+  while (n_left)
+    {
+      if (*pp != 0)
+	goto next;
+
+      vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
+	{
+	  p = pool_elt_at_index (im->policies, *i);
+	  if (PREDICT_FALSE (p->protocol &&
+			     (p->protocol != ip4_5tuple->proto)))
+	    continue;
+
+	  if (ip4_5tuple->ip4_addr[0].as_u32 <
+	      clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
+	    continue;
+
+	  if (ip4_5tuple->ip4_addr[1].as_u32 >
+	      clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
+	    continue;
+
+	  if (ip4_5tuple->ip4_addr[0].as_u32 <
+	      clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
+	    continue;
+
+	  if (ip4_5tuple->ip4_addr[1].as_u32 >
+	      clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
+	    continue;
+
+	  if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
+			     (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
+			     (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
+	    {
+	      ip4_5tuple->port[0] = 0;
+	      ip4_5tuple->port[1] = 0;
+	      goto add_policy;
+	    }
+
+	  if (ip4_5tuple->port[0] < p->lport.start)
+	    continue;
+
+	  if (ip4_5tuple->port[0] > p->lport.stop)
+	    continue;
+
+	  if (ip4_5tuple->port[1] < p->rport.start)
+	    continue;
+
+	  if (ip4_5tuple->port[1] > p->rport.stop)
+	    continue;
+
+	add_policy:
+	  *pp = p;
+	  *policy_id = *i;
+	  counter++;
+	  break;
+	}
+
+    next:
+      n_left--;
+      pp++;
+      ip4_5tuple++;
+      policy_id++;
+    }
+
+  if (flow_cache_enabled)
+    {
+      n_left = n;
+      policy_id = policy_ids;
+      ip4_5tuple = ip4_5tuples;
+      pp = policies;
+
+      while (n_left)
+	{
+	  if (*pp != NULL)
+	    {
+	      /* Add an Entry in Flow cache */
+	      ipsec4_out_spd_add_flow_cache_entry_n (im, ip4_5tuple,
+						     *policy_id);
+	    }
+
+	  n_left--;
+	  policy_id++;
+	  ip4_5tuple++;
+	  pp++;
+	}
+    }
+
+  return counter;
+}
+
 always_inline ipsec_policy_t *
 ipsec4_out_spd_find_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
 				      u16 lp, u16 rp)
@@ -108,11 +321,32 @@
 {
   ipsec_main_t *im = &ipsec_main;
   ipsec_policy_t *p;
+  ipsec_policy_t *policies[1];
+  ipsec_fp_5tuple_t tuples[1];
+  u32 fp_policy_ids[1];
+
   u32 *i;
 
   if (!spd)
     return 0;
 
+  ipsec_fp_5tuple_from_ip4_range (&tuples[0], la, ra, lp, rp, pr);
+
+  if (im->fp_spd_is_enabled &&
+      (1 == ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples, policies,
+					 fp_policy_ids, 1)))
+    {
+      p = policies[0];
+      i = fp_policy_ids;
+      if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
+			 (pr != IP_PROTOCOL_SCTP)))
+	{
+	  lp = 0;
+	  rp = 0;
+	}
+      goto add_flow_cache;
+    }
+
   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
     {
       p = pool_elt_at_index (im->policies, *i);