vxlan: reuse inner packet flow hash for tunnel outer header load balance

Type: fix

Several tunnels encapsulation use udp as outer header and udp src port
is set by inner header flow hash, such as gtpu, geneve, vxlan, vxlan-gbd
Since flow hash of inner header is already been calculated, keeping it
to vnet_buffere[b]->ip.flow_hash should save load-balance node work to
select ECMP uplinks.

Change-Id: I0e4e2b27178f4fcc5785e221d6d1f3e8747d0d59
Signed-off-by: Shawn Ji <xiaji@tethrnet.com>
diff --git a/src/plugins/gtpu/gtpu_encap.c b/src/plugins/gtpu/gtpu_encap.c
index ec33e1e..e6b2ce5 100644
--- a/src/plugins/gtpu/gtpu_encap.c
+++ b/src/plugins/gtpu/gtpu_encap.c
@@ -413,6 +413,12 @@
 	  stats_n_packets += 4;
 	  stats_n_bytes += len0 + len1 + len2 + len3;
 
+          /* save inner packet flow_hash for load-balance node */
+          vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+          vnet_buffer (b1)->ip.flow_hash = flow_hash1;
+          vnet_buffer (b2)->ip.flow_hash = flow_hash2;
+          vnet_buffer (b3)->ip.flow_hash = flow_hash3;
+
 	  /* Batch stats increment on the same gtpu tunnel so counter is not
 	     incremented per packet. Note stats are still incremented for deleted
 	     and admin-down tunnel where packets are dropped. It is not worthwhile
@@ -611,6 +617,9 @@
 	  stats_n_packets += 1;
 	  stats_n_bytes += len0;
 
+          /* save inner packet flow_hash for load-balance node */
+          vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+
 	  /* Batch stats increment on the same gtpu tunnel so counter is not
 	     incremented per packet. Note stats are still incremented for deleted
 	     and admin-down tunnel where packets are dropped. It is not worthwhile
diff --git a/src/vnet/geneve/encap.c b/src/vnet/geneve/encap.c
index 8e59aea..af52fd2 100644
--- a/src/vnet/geneve/encap.c
+++ b/src/vnet/geneve/encap.c
@@ -120,6 +120,7 @@
 	  flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
 	  flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
 
+
 	  /* Get next node index and adj index from tunnel next_dpo */
 	  if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
 	    {
@@ -289,6 +290,10 @@
 	  stats_n_packets += 2;
 	  stats_n_bytes += len0 + len1;
 
+	  /* save inner packet flow_hash for load-balance node */
+	  vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+	  vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
+
 	  /* Batch stats increment on the same geneve tunnel so counter is not
 	     incremented per packet. Note stats are still incremented for deleted
 	     and admin-down tunnel where packets are dropped. It is not worthwhile
@@ -467,6 +472,9 @@
 	  stats_n_packets += 1;
 	  stats_n_bytes += len0;
 
+	  /* save inner packet flow_hash for load-balance node */
+	  vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+
 	  /* Batch stats increment on the same geneve tunnel so counter is not
 	     incremented per packet. Note stats are still incremented for deleted
 	     and admin-down tunnel where packets are dropped. It is not worthwhile
diff --git a/src/vnet/vxlan-gbp/encap.c b/src/vnet/vxlan-gbp/encap.c
index 4bba49a..a606b89 100644
--- a/src/vnet/vxlan-gbp/encap.c
+++ b/src/vnet/vxlan-gbp/encap.c
@@ -309,6 +309,10 @@
 		udp1->checksum = 0xffff;
 	    }
 
+	  /* save inner packet flow_hash for load-balance node */
+	  vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+	  vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
+
 	  vlib_increment_combined_counter (tx_counter, thread_index,
 					   sw_if_index0, 1, len0);
 	  vlib_increment_combined_counter (tx_counter, thread_index,
@@ -451,6 +455,9 @@
 		udp0->checksum = 0xffff;
 	    }
 
+	  /* save inner packet flow_hash for load-balance node */
+	  vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+
 	  vlib_increment_combined_counter (tx_counter, thread_index,
 					   sw_if_index0, 1, len0);
 	  pkts_encapsulated++;
diff --git a/src/vnet/vxlan/encap.c b/src/vnet/vxlan/encap.c
index eec460d..da890b2 100644
--- a/src/vnet/vxlan/encap.c
+++ b/src/vnet/vxlan/encap.c
@@ -287,6 +287,10 @@
                 udp1->checksum = 0xffff;
             }
 
+        /* save inner packet flow_hash for load-balance node */
+        vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+        vnet_buffer (b1)->ip.flow_hash = flow_hash1;
+
 	if (sw_if_index0 == sw_if_index1)
 	{
           vlib_increment_combined_counter (tx_counter, thread_index,
@@ -424,6 +428,9 @@
                 udp0->checksum = 0xffff;
             }
 
+          /* reuse inner packet flow_hash for load-balance node */
+          vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+
           vlib_increment_combined_counter (tx_counter, thread_index,
               sw_if_index0, 1, len0);
           pkts_encapsulated ++;