flow: Add GTP support

Type: feature

Adding:
	VNET_FLOW_TYPE_IP4_GTPC
	VNET_FLOW_TYPE_IP4_GTPU
	VNET_FLOW_TYPE_IP4_GTPU_IP4
	VNET_FLOW_TYPE_IP4_GTPU_IP6
	VNET_FLOW_TYPE_IP6_GTPC
	VNET_FLOW_TYPE_IP6_GTPU
	VNET_FLOW_TYPE_IP6_GTPU_IP4
	VNET_FLOW_TYPE_IP6_GTPU_IP6
in this patch

Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
Change-Id: I4ad53895b5ac0771432bb039b8c79e48e3c19f25
diff --git a/src/vnet/flow/flow.h b/src/vnet/flow/flow.h
index de09d34..c0aa911 100644
--- a/src/vnet/flow/flow.h
+++ b/src/vnet/flow/flow.h
@@ -26,7 +26,15 @@
   _(IP4_N_TUPLE, ip4_n_tuple, "ipv4-n-tuple") \
   _(IP6_N_TUPLE, ip6_n_tuple, "ipv6-n-tuple") \
   _(IP4_VXLAN, ip4_vxlan, "ipv4-vxlan") \
-  _(IP6_VXLAN, ip6_vxlan, "ipv6-vxlan")
+  _(IP6_VXLAN, ip6_vxlan, "ipv6-vxlan") \
+  _(IP4_GTPC, ip4_gtpc, "ipv4-gtpc") \
+  _(IP4_GTPU, ip4_gtpu, "ipv4-gtpu") \
+  _(IP4_GTPU_IP4, ip4_gtpu_ip4, "ipv4-gtpu-ipv4") \
+  _(IP4_GTPU_IP6, ip4_gtpu_ip6, "ipv4-gtpu-ipv6") \
+  _(IP6_GTPC, ip6_gtpc, "ipv6-gtpc") \
+  _(IP6_GTPU, ip6_gtpu, "ipv6-gtpu") \
+  _(IP6_GTPU_IP4, ip6_gtpu_ip4, "ipv6-gtpu-ipv4") \
+  _(IP6_GTPU_IP6, ip6_gtpu_ip6, "ipv6-gtpu-ipv6")
 
 #define foreach_flow_entry_ip4_n_tuple \
   _fe(ip4_address_and_mask_t, src_addr) \
@@ -54,6 +62,42 @@
   _fe(u16, dst_port) \
   _fe(u16, vni)
 
+#define foreach_flow_entry_ip4_gtpc \
+  foreach_flow_entry_ip4_n_tuple \
+  _fe(u32, teid)
+
+#define foreach_flow_entry_ip4_gtpu \
+  foreach_flow_entry_ip4_n_tuple \
+  _fe(u32, teid)
+
+#define foreach_flow_entry_ip4_gtpu_ip4 \
+  foreach_flow_entry_ip4_gtpu \
+  _fe(ip4_address_and_mask_t, inner_src_addr) \
+  _fe(ip4_address_and_mask_t, inner_dst_addr)
+
+#define foreach_flow_entry_ip4_gtpu_ip6 \
+  foreach_flow_entry_ip4_gtpu \
+  _fe(ip6_address_and_mask_t, inner_src_addr) \
+  _fe(ip6_address_and_mask_t, inner_dst_addr)
+
+#define foreach_flow_entry_ip6_gtpc \
+  foreach_flow_entry_ip6_n_tuple \
+  _fe(u32, teid)
+
+#define foreach_flow_entry_ip6_gtpu \
+  foreach_flow_entry_ip6_n_tuple \
+  _fe(u32, teid)
+
+#define foreach_flow_entry_ip6_gtpu_ip4 \
+  foreach_flow_entry_ip6_gtpu \
+  _fe(ip4_address_and_mask_t, inner_src_addr) \
+  _fe(ip4_address_and_mask_t, inner_dst_addr)
+
+#define foreach_flow_entry_ip6_gtpu_ip6 \
+  foreach_flow_entry_ip6_gtpu \
+  _fe(ip6_address_and_mask_t, inner_src_addr) \
+  _fe(ip6_address_and_mask_t, inner_dst_addr)
+
 #define foreach_flow_action \
   _(0, COUNT, "count") \
   _(1, MARK, "mark") \
diff --git a/src/vnet/flow/flow_cli.c b/src/vnet/flow/flow_cli.c
index 5481aa3..1c09b2b 100644
--- a/src/vnet/flow/flow_cli.c
+++ b/src/vnet/flow/flow_cli.c
@@ -274,9 +274,26 @@
     FLOW_ENABLE,
     FLOW_DISABLE
   } action = FLOW_UNKNOWN_ACTION;
-  u32 hw_if_index = ~0, tmp, flow_index = ~0;
+  u32 hw_if_index = ~0, flow_index = ~0;
   int rv;
-  u8 prot;
+  u32 prot = 0, teid = 0;
+  vnet_flow_type_t type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+  bool is_gtpc_set = false;
+  bool is_gtpu_set = false;
+  vnet_flow_type_t outer_type = VNET_FLOW_TYPE_UNKNOWN;
+  vnet_flow_type_t inner_type = VNET_FLOW_TYPE_UNKNOWN;
+  bool outer_ip4_set = false, inner_ip4_set = false;
+  bool outer_ip6_set = false, inner_ip6_set = false;
+  ip4_address_and_mask_t ip4s = { };
+  ip4_address_and_mask_t ip4d = { };
+  ip4_address_and_mask_t inner_ip4s = { };
+  ip4_address_and_mask_t inner_ip4d = { };
+  ip6_address_and_mask_t ip6s = { };
+  ip6_address_and_mask_t ip6d = { };
+  ip6_address_and_mask_t inner_ip6s = { };
+  ip6_address_and_mask_t inner_ip6d = { };
+  ip_port_and_mask_t sport = { };
+  ip_port_and_mask_t dport = { };
 
   clib_memset (&flow, 0, sizeof (vnet_flow_t));
   flow.index = ~0;
@@ -296,23 +313,44 @@
       else if (unformat (line_input, "disable"))
 	action = FLOW_DISABLE;
       else if (unformat (line_input, "src-ip %U",
-			 unformat_ip4_address_and_mask,
-			 &flow.ip4_n_tuple.src_addr))
-	;
+			 unformat_ip4_address_and_mask, &ip4s))
+	outer_ip4_set = true;
       else if (unformat (line_input, "dst-ip %U",
-			 unformat_ip4_address_and_mask,
-			 &flow.ip4_n_tuple.dst_addr))
-	;
+			 unformat_ip4_address_and_mask, &ip4d))
+	outer_ip4_set = true;
+      else if (unformat (line_input, "ip6-src-ip %U",
+			 unformat_ip6_address_and_mask, &ip6s))
+	outer_ip6_set = true;
+      else if (unformat (line_input, "ip6-dst-ip %U",
+			 unformat_ip6_address_and_mask, &ip6d))
+	outer_ip6_set = true;
+      else if (unformat (line_input, "inner-src-ip %U",
+			 unformat_ip4_address_and_mask, &inner_ip4s))
+	inner_ip4_set = true;
+      else if (unformat (line_input, "inner-dst-ip %U",
+			 unformat_ip4_address_and_mask, &inner_ip4d))
+	inner_ip4_set = true;
+      else if (unformat (line_input, "inner-ip6-src-ip %U",
+			 unformat_ip6_address_and_mask, &inner_ip6s))
+	inner_ip6_set = true;
+      else if (unformat (line_input, "inner-ip6-dst-ip %U",
+			 unformat_ip6_address_and_mask, &inner_ip6d))
+	inner_ip6_set = true;
+
       else if (unformat (line_input, "src-port %U", unformat_ip_port_and_mask,
-			 &flow.ip4_n_tuple.src_port))
+			 &sport))
 	;
       else if (unformat (line_input, "dst-port %U", unformat_ip_port_and_mask,
-			 &flow.ip4_n_tuple.dst_port))
+			 &dport))
 	;
       else if (unformat (line_input, "proto %U", unformat_ip_protocol, &prot))
-	flow.ip4_n_tuple.protocol = prot;
-      else if (unformat (line_input, "proto %u", &tmp))
-	flow.ip4_n_tuple.protocol = tmp;
+	;
+      else if (unformat (line_input, "proto %u", &prot))
+	;
+      else if (unformat (line_input, "gtpc teid %u", &teid))
+	is_gtpc_set = true;
+      else if (unformat (line_input, "gtpu teid %u", &teid))
+	is_gtpu_set = true;
       else if (unformat (line_input, "index %u", &flow_index))
 	;
       else if (unformat (line_input, "next-node %U", unformat_vlib_node, vm,
@@ -348,13 +386,167 @@
   switch (action)
     {
     case FLOW_ADD:
-      if (flow.ip4_n_tuple.protocol == (ip_protocol_t) ~ 0)
-	return clib_error_return (0, "Please specify ip protocol");
-
       if (flow.actions == 0)
 	return clib_error_return (0, "Please specify at least one action");
-      flow.type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+
+      /* Adjust the flow type */
+      if (outer_ip4_set == true)
+	outer_type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+      else if (outer_ip6_set == true)
+	outer_type = VNET_FLOW_TYPE_IP6_N_TUPLE;
+      if (inner_ip4_set == true)
+	inner_type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+      else if (inner_ip6_set == true)
+	inner_type = VNET_FLOW_TYPE_IP6_N_TUPLE;
+
+      if (outer_type == VNET_FLOW_TYPE_UNKNOWN)
+	return clib_error_return (0, "Please specify a supported flow type");
+
+      if (outer_type == VNET_FLOW_TYPE_IP4_N_TUPLE)
+	{
+	  type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+
+	  if (inner_type == VNET_FLOW_TYPE_UNKNOWN)
+	    {
+	      if (is_gtpc_set)
+		type = VNET_FLOW_TYPE_IP4_GTPC;
+	      else if (is_gtpu_set)
+		type = VNET_FLOW_TYPE_IP4_GTPU;
+	    }
+	  else if (inner_type == VNET_FLOW_TYPE_IP4_N_TUPLE)
+	    {
+	      if (is_gtpu_set)
+		type = VNET_FLOW_TYPE_IP4_GTPU_IP4;
+	    }
+	  else if (inner_type == VNET_FLOW_TYPE_IP6_N_TUPLE)
+	    {
+	      if (is_gtpu_set)
+		type = VNET_FLOW_TYPE_IP4_GTPU_IP6;
+	    }
+	}
+      else if (outer_type == VNET_FLOW_TYPE_IP6_N_TUPLE)
+	{
+	  type = VNET_FLOW_TYPE_IP6_N_TUPLE;
+
+	  if (inner_type == VNET_FLOW_TYPE_UNKNOWN)
+	    {
+	      if (is_gtpc_set)
+		type = VNET_FLOW_TYPE_IP6_GTPC;
+	      else if (is_gtpu_set)
+		type = VNET_FLOW_TYPE_IP6_GTPU;
+	    }
+	  else if (inner_type == VNET_FLOW_TYPE_IP4_N_TUPLE)
+	    {
+	      if (is_gtpu_set)
+		type = VNET_FLOW_TYPE_IP6_GTPU_IP4;
+	    }
+	  else if (inner_type == VNET_FLOW_TYPE_IP6_N_TUPLE)
+	    {
+	      if (is_gtpu_set)
+		type = VNET_FLOW_TYPE_IP6_GTPU_IP6;
+	    }
+	}
+
+      //assign specific field values per flow type
+      switch (type)
+	{
+	case VNET_FLOW_TYPE_IP4_N_TUPLE:
+	case VNET_FLOW_TYPE_IP4_GTPC:
+	case VNET_FLOW_TYPE_IP4_GTPU:
+	case VNET_FLOW_TYPE_IP4_GTPU_IP4:
+	case VNET_FLOW_TYPE_IP4_GTPU_IP6:
+	  clib_memcpy (&flow.ip4_n_tuple.src_addr, &ip4s,
+		       sizeof (ip4_address_and_mask_t));
+	  clib_memcpy (&flow.ip4_n_tuple.dst_addr, &ip4d,
+		       sizeof (ip4_address_and_mask_t));
+	  clib_memcpy (&flow.ip4_n_tuple.src_port, &sport,
+		       sizeof (ip_port_and_mask_t));
+	  clib_memcpy (&flow.ip4_n_tuple.dst_port, &dport,
+		       sizeof (ip_port_and_mask_t));
+	  flow.ip4_n_tuple.protocol = prot;
+
+	  if (type == VNET_FLOW_TYPE_IP4_GTPC)
+	    flow.ip4_gtpc.teid = teid;
+	  else if (type == VNET_FLOW_TYPE_IP4_GTPU)
+	    flow.ip4_gtpu.teid = teid;
+	  else if (type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
+	    {
+	      flow.ip4_gtpu_ip4.teid = teid;
+	      clib_memcpy (&flow.ip4_gtpu_ip4.inner_src_addr, &inner_ip4s,
+			   sizeof (ip4_address_and_mask_t));
+	      clib_memcpy (&flow.ip4_gtpu_ip4.inner_dst_addr, &inner_ip4d,
+			   sizeof (ip4_address_and_mask_t));
+	    }
+	  else if (type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
+	    {
+	      flow.ip4_gtpu_ip6.teid = teid;
+	      clib_memcpy (&flow.ip4_gtpu_ip6.inner_src_addr, &inner_ip6s,
+			   sizeof (ip6_address_and_mask_t));
+	      clib_memcpy (&flow.ip4_gtpu_ip6.inner_dst_addr, &inner_ip6d,
+			   sizeof (ip6_address_and_mask_t));
+	    }
+
+	  if (flow.ip4_n_tuple.protocol == (ip_protocol_t) ~ 0)
+	    return clib_error_return (0, "Please specify ip protocol");
+	  if ((type != VNET_FLOW_TYPE_IP4_N_TUPLE) &&
+	      (flow.ip4_n_tuple.protocol != IP_PROTOCOL_UDP))
+	    return clib_error_return (0,
+				      "For GTP related flow, ip protocol must be UDP");
+	  break;
+
+	case VNET_FLOW_TYPE_IP6_N_TUPLE:
+	case VNET_FLOW_TYPE_IP6_GTPC:
+	case VNET_FLOW_TYPE_IP6_GTPU:
+	case VNET_FLOW_TYPE_IP6_GTPU_IP4:
+	case VNET_FLOW_TYPE_IP6_GTPU_IP6:
+	  clib_memcpy (&flow.ip6_n_tuple.src_addr, &ip6s,
+		       sizeof (ip6_address_and_mask_t));
+	  clib_memcpy (&flow.ip6_n_tuple.dst_addr, &ip6d,
+		       sizeof (ip6_address_and_mask_t));
+	  clib_memcpy (&flow.ip6_n_tuple.src_port, &sport,
+		       sizeof (ip_port_and_mask_t));
+	  clib_memcpy (&flow.ip6_n_tuple.dst_port, &dport,
+		       sizeof (ip_port_and_mask_t));
+	  flow.ip6_n_tuple.protocol = prot;
+
+	  if (type == VNET_FLOW_TYPE_IP6_GTPC)
+	    flow.ip6_gtpc.teid = teid;
+	  else if (type == VNET_FLOW_TYPE_IP6_GTPU)
+	    flow.ip6_gtpu.teid = teid;
+	  else if (type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
+	    {
+	      flow.ip6_gtpu_ip4.teid = teid;
+	      clib_memcpy (&flow.ip6_gtpu_ip4.inner_src_addr, &inner_ip4s,
+			   sizeof (ip4_address_and_mask_t));
+	      clib_memcpy (&flow.ip6_gtpu_ip4.inner_dst_addr, &inner_ip4d,
+			   sizeof (ip4_address_and_mask_t));
+	    }
+	  else if (type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
+	    {
+	      flow.ip6_gtpu_ip6.teid = teid;
+	      clib_memcpy (&flow.ip6_gtpu_ip6.inner_src_addr, &inner_ip6s,
+			   sizeof (ip6_address_and_mask_t));
+	      clib_memcpy (&flow.ip6_gtpu_ip6.inner_dst_addr, &inner_ip6d,
+			   sizeof (ip6_address_and_mask_t));
+	    }
+
+	  if (flow.ip6_n_tuple.protocol == (ip_protocol_t) ~ 0)
+	    return clib_error_return (0, "Please specify ip protocol");
+	  if ((type != VNET_FLOW_TYPE_IP4_N_TUPLE) &&
+	      (flow.ip6_n_tuple.protocol != IP_PROTOCOL_UDP))
+	    return clib_error_return (0,
+				      "For GTP related flow, ip protocol must be UDP");
+	  break;
+
+	default:
+	  break;
+	}
+
+      flow.type = type;
       rv = vnet_flow_add (vnm, &flow, &flow_index);
+      if (!rv)
+	printf ("flow %u added\n", flow_index);
+
       break;
     case FLOW_DEL:
       rv = vnet_flow_del (vnm, flow_index);