udp: fix inner packet checksum calculation in udp-encap

When computing the inner packet checksum, the code wrongly
assumes that the IP version of the inner packet is the
same of the outer one. On the contrary, it is perfectly
possible to encapsulate v6 packets into v4 and viceversa,
so we need to check the IP format of the inner header before
calling vnet_calc_checksums_inline.

Ticket: VPP-2020
Type: fix

Signed-off-by: Mauro Sardara <msardara@cisco.com>
Change-Id: Ia4515563c164f6dd5096832c831a48cb0a29b3ad
Signed-off-by: Mauro Sardara <msardara@cisco.com>
diff --git a/src/vnet/udp/udp_encap.c b/src/vnet/udp/udp_encap.c
index cb93adb..a0f5a50 100644
--- a/src/vnet/udp/udp_encap.c
+++ b/src/vnet/udp/udp_encap.c
@@ -47,8 +47,7 @@
 udp_encap_restack (udp_encap_t * ue)
 {
   dpo_stack (udp_encap_dpo_types[ue->ue_ip_proto],
-	     fib_proto_to_dpo (ue->ue_ip_proto),
-	     &ue->ue_dpo,
+	     fib_proto_to_dpo (ue->ue_ip_proto), &ue->ue_dpo,
 	     fib_entry_contribute_ip_forwarding (ue->ue_fib_entry_index));
 }
 
@@ -325,12 +324,12 @@
 }
 
 const static char *const udp4_encap_ip4_nodes[] = {
-  "udp4-encap",
+  "udp4o4-encap",
   NULL,
 };
 
 const static char *const udp4_encap_ip6_nodes[] = {
-  "udp4-encap",
+  "udp6o4-encap",
   NULL,
 };
 
@@ -345,12 +344,12 @@
 };
 
 const static char *const udp6_encap_ip4_nodes[] = {
-  "udp6-encap",
+  "udp4o6-encap",
   NULL,
 };
 
 const static char *const udp6_encap_ip6_nodes[] = {
-  "udp6-encap",
+  "udp6o6-encap",
   NULL,
 };
 
diff --git a/src/vnet/udp/udp_encap.h b/src/vnet/udp/udp_encap.h
index b096e0f..648e3b5 100644
--- a/src/vnet/udp/udp_encap.h
+++ b/src/vnet/udp/udp_encap.h
@@ -85,7 +85,7 @@
   /**
    * The second cacheline contains control-plane data
    */
-    CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
+  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
 
   /**
    * linkage into the FIB graph
diff --git a/src/vnet/udp/udp_encap_node.c b/src/vnet/udp/udp_encap_node.c
index 5b9fc0b..1ebe795 100644
--- a/src/vnet/udp/udp_encap_node.c
+++ b/src/vnet/udp/udp_encap_node.c
@@ -61,9 +61,9 @@
 }
 
 always_inline uword
-udp_encap_inline (vlib_main_t * vm,
-		  vlib_node_runtime_t * node,
-		  vlib_frame_t * frame, int is_encap_v6)
+udp_encap_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+		  vlib_frame_t *frame, ip_address_family_t encap_family,
+		  ip_address_family_t payload_family)
 {
   vlib_combined_counter_main_t *cm = &udp_encap_counters;
   u32 *from = vlib_frame_vector_args (frame);
@@ -121,12 +121,13 @@
 	  ue1 = udp_encap_get (uei1);
 
 	  /* Paint */
-	  if (is_encap_v6)
+	  if (encap_family == AF_IP6)
 	    {
 	      const u8 n_bytes =
 		sizeof (udp_header_t) + sizeof (ip6_header_t);
-	      ip_udp_encap_two (vm, b0, b1, (u8 *) & ue0->ue_hdrs,
-				(u8 *) & ue1->ue_hdrs, n_bytes, 0);
+	      ip_udp_encap_two (vm, b0, b1, (u8 *) &ue0->ue_hdrs,
+				(u8 *) &ue1->ue_hdrs, n_bytes, encap_family,
+				payload_family);
 	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
 		{
 		  udp6_encap_trace_t *tr =
@@ -147,9 +148,9 @@
 	      const u8 n_bytes =
 		sizeof (udp_header_t) + sizeof (ip4_header_t);
 
-	      ip_udp_encap_two (vm, b0, b1,
-				(u8 *) & ue0->ue_hdrs,
-				(u8 *) & ue1->ue_hdrs, n_bytes, 1);
+	      ip_udp_encap_two (vm, b0, b1, (u8 *) &ue0->ue_hdrs,
+				(u8 *) &ue1->ue_hdrs, n_bytes, encap_family,
+				payload_family);
 
 	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
 		{
@@ -202,12 +203,12 @@
 									b0));
 
 	  /* Paint */
-	  if (is_encap_v6)
+	  if (encap_family == AF_IP6)
 	    {
 	      const u8 n_bytes =
 		sizeof (udp_header_t) + sizeof (ip6_header_t);
-	      ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip6, n_bytes,
-				0);
+	      ip_udp_encap_one (vm, b0, (u8 *) &ue0->ue_hdrs.ip6, n_bytes,
+				encap_family, payload_family);
 
 	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
 		{
@@ -222,8 +223,8 @@
 	      const u8 n_bytes =
 		sizeof (udp_header_t) + sizeof (ip4_header_t);
 
-	      ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip4, n_bytes,
-				1);
+	      ip_udp_encap_one (vm, b0, (u8 *) &ue0->ue_hdrs.ip4, n_bytes,
+				encap_family, payload_family);
 
 	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
 		{
@@ -248,37 +249,87 @@
   return frame->n_vectors;
 }
 
-VLIB_NODE_FN (udp4_encap_node) (vlib_main_t * vm,
-				vlib_node_runtime_t * node,
-				vlib_frame_t * frame)
+VLIB_NODE_FN (udp4o4_encap_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
 {
-  return udp_encap_inline (vm, node, frame, 0);
+  return udp_encap_inline (vm, node, frame, AF_IP4, AF_IP4);
 }
 
-VLIB_NODE_FN (udp6_encap_node) (vlib_main_t * vm,
-				vlib_node_runtime_t * node,
-				vlib_frame_t * frame)
+VLIB_NODE_FN (udp6o4_encap_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
 {
-  return udp_encap_inline (vm, node, frame, 1);
+  return udp_encap_inline (vm, node, frame, AF_IP4, AF_IP6);
+}
+
+VLIB_NODE_FN (udp4_encap_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+  return udp_encap_inline (vm, node, frame, AF_IP4, N_AF);
+}
+
+VLIB_NODE_FN (udp6o6_encap_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+  return udp_encap_inline (vm, node, frame, AF_IP6, AF_IP6);
+}
+
+VLIB_NODE_FN (udp4o6_encap_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+  return udp_encap_inline (vm, node, frame, AF_IP6, AF_IP4);
+}
+
+VLIB_NODE_FN (udp6_encap_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+  return udp_encap_inline (vm, node, frame, AF_IP6, N_AF);
 }
 
 /* *INDENT-OFF* */
+VLIB_REGISTER_NODE (udp4o4_encap_node) = {
+  .name = "udp4o4-encap",
+  .vector_size = sizeof (u32),
+  .format_trace = format_udp4_encap_trace,
+  .n_next_nodes = 0,
+};
+
+VLIB_REGISTER_NODE (udp6o4_encap_node) = {
+  .name = "udp6o4-encap",
+  .vector_size = sizeof (u32),
+  .format_trace = format_udp6_encap_trace,
+  .n_next_nodes = 0,
+  .sibling_of = "udp4o4-encap",
+};
+
 VLIB_REGISTER_NODE (udp4_encap_node) = {
   .name = "udp4-encap",
   .vector_size = sizeof (u32),
-
   .format_trace = format_udp4_encap_trace,
-
   .n_next_nodes = 0,
+  .sibling_of = "udp4o4-encap",
+};
+
+VLIB_REGISTER_NODE (udp6o6_encap_node) = {
+  .name = "udp6o6-encap",
+  .vector_size = sizeof (u32),
+  .format_trace = format_udp6_encap_trace,
+  .n_next_nodes = 0,
+};
+
+VLIB_REGISTER_NODE (udp4o6_encap_node) = {
+  .name = "udp4o6-encap",
+  .vector_size = sizeof (u32),
+  .format_trace = format_udp4_encap_trace,
+  .n_next_nodes = 0,
+  .sibling_of = "udp6o6-encap",
 };
 
 VLIB_REGISTER_NODE (udp6_encap_node) = {
   .name = "udp6-encap",
   .vector_size = sizeof (u32),
-
   .format_trace = format_udp6_encap_trace,
-
   .n_next_nodes = 0,
+  .sibling_of = "udp6o6-encap",
 };
 /* *INDENT-ON* */
 
diff --git a/src/vnet/udp/udp_inlines.h b/src/vnet/udp/udp_inlines.h
index e4eb0c8..d79dc9a 100644
--- a/src/vnet/udp/udp_inlines.h
+++ b/src/vnet/udp/udp_inlines.h
@@ -97,14 +97,20 @@
 }
 
 always_inline void
-ip_udp_encap_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 * ec0, word ec_len,
-		  u8 is_ip4)
+ip_udp_encap_one (vlib_main_t *vm, vlib_buffer_t *b0, u8 *ec0, word ec_len,
+		  ip_address_family_t encap_family,
+		  ip_address_family_t payload_family)
 {
-  vnet_calc_checksums_inline (vm, b0, is_ip4, !is_ip4);
+
+  if (payload_family < N_AF)
+    {
+      vnet_calc_checksums_inline (vm, b0, payload_family == AF_IP4,
+				  payload_family == AF_IP6);
+    }
 
   vlib_buffer_advance (b0, -ec_len);
 
-  if (is_ip4)
+  if (encap_family == AF_IP4)
     {
       ip4_header_t *ip0;
 
@@ -127,21 +133,27 @@
 }
 
 always_inline void
-ip_udp_encap_two (vlib_main_t * vm, vlib_buffer_t * b0, vlib_buffer_t * b1,
-		  u8 * ec0, u8 * ec1, word ec_len, u8 is_v4)
+ip_udp_encap_two (vlib_main_t *vm, vlib_buffer_t *b0, vlib_buffer_t *b1,
+		  u8 *ec0, u8 *ec1, word ec_len,
+		  ip_address_family_t encap_family,
+		  ip_address_family_t payload_family)
 {
   u16 new_l0, new_l1;
   udp_header_t *udp0, *udp1;
+  int payload_ip4 = (payload_family == AF_IP4);
 
   ASSERT (_vec_len (ec0) == _vec_len (ec1));
 
-  vnet_calc_checksums_inline (vm, b0, is_v4, !is_v4);
-  vnet_calc_checksums_inline (vm, b1, is_v4, !is_v4);
+  if (payload_family < N_AF)
+    {
+      vnet_calc_checksums_inline (vm, b0, payload_ip4, !payload_ip4);
+      vnet_calc_checksums_inline (vm, b1, payload_ip4, !payload_ip4);
+    }
 
   vlib_buffer_advance (b0, -ec_len);
   vlib_buffer_advance (b1, -ec_len);
 
-  if (is_v4)
+  if (encap_family == AF_IP4)
     {
       ip4_header_t *ip0, *ip1;
       ip_csum_t sum0, sum1;