UDP Encap counters

Change-Id: Ib5639981dca0b11b2d62acf2c0963cc95c380f70
Signed-off-by: Neale Ranns <nranns@cisco.com>
diff --git a/src/vnet/udp/udp_encap.c b/src/vnet/udp/udp_encap.c
index 8392b39..8005fa5 100644
--- a/src/vnet/udp/udp_encap.c
+++ b/src/vnet/udp/udp_encap.c
@@ -33,6 +33,11 @@
  */
 udp_encap_t *udp_encap_pool;
 
+/**
+ * Stats for each UDP encap object
+ */
+vlib_combined_counter_main_t udp_encap_counters;
+
 static udp_encap_t *
 udp_encap_get_w_id (u32 id)
 {
@@ -79,6 +84,9 @@
       pool_get (udp_encap_pool, ue);
       uei = ue - udp_encap_pool;
 
+      vlib_validate_combined_counter (&(udp_encap_counters), uei);
+      vlib_zero_combined_counter (&(udp_encap_counters), uei);
+
       hash_set (udp_encap_db, id, uei);
 
       fib_node_init (&ue->ue_fib_node, FIB_NODE_TYPE_UDP_ENCAP);
@@ -258,6 +266,7 @@
   index_t uei = va_arg (*args, index_t);
   u32 indent = va_arg (*args, u32);
   u32 details = va_arg (*args, u32);
+  vlib_counter_t to;
   udp_encap_t *ue;
 
   ue = udp_encap_get (uei);
@@ -285,6 +294,9 @@
 		  clib_net_to_host_u16 (ue->ue_hdrs.ip6.ue_udp.src_port),
 		  clib_net_to_host_u16 (ue->ue_hdrs.ip6.ue_udp.dst_port));
     }
+  vlib_get_combined_counter (&(udp_encap_counters), uei, &to);
+  s = format (s, " to:[%Ld:%Ld]]", to.packets, to.bytes);
+
   if (details)
     {
       s = format (s, " locks:%d", ue->ue_fib_node.fn_locks);
@@ -296,6 +308,17 @@
   return (s);
 }
 
+void
+udp_encap_get_stats (index_t uei, u64 * packets, u64 * bytes)
+{
+  vlib_counter_t to;
+
+  vlib_get_combined_counter (&(udp_encap_counters), uei, &to);
+
+  *packets = to.packets;
+  *bytes = to.bytes;
+}
+
 static u8 *
 format_udp_encap_dpo (u8 * s, va_list * args)
 {
@@ -568,6 +591,20 @@
   return error;
 }
 
+void
+udp_encap_walk (udp_encap_walk_cb_t cb, void *ctx)
+{
+  index_t uei;
+
+  /* *INDENT-OFF* */
+  pool_foreach_index(uei, udp_encap_pool,
+  ({
+    if (!cb(uei, ctx))
+      break;
+  }));
+  /* *INDENT-ON* */
+}
+
 clib_error_t *
 udp_encap_show (vlib_main_t * vm,
 		unformat_input_t * input, vlib_cli_command_t * cmd)
diff --git a/src/vnet/udp/udp_encap.h b/src/vnet/udp/udp_encap.h
index b8f329d..185d5e7 100644
--- a/src/vnet/udp/udp_encap.h
+++ b/src/vnet/udp/udp_encap.h
@@ -125,6 +125,19 @@
 					     dpo_proto_t proto,
 					     dpo_id_t * dpo);
 
+extern void udp_encap_get_stats (index_t uei, u64 * packets, u64 * bytes);
+
+/**
+ * Callback function invoked when walking all encap objects.
+ * Return non-zero to continue the walk.
+ */
+typedef int (*udp_encap_walk_cb_t) (index_t uei, void *ctx);
+
+/**
+ * Walk each of the encap objects
+ */
+extern void udp_encap_walk (udp_encap_walk_cb_t cb, void *ctx);
+
 /**
  * Pool of encaps
  */
diff --git a/src/vnet/udp/udp_encap_node.c b/src/vnet/udp/udp_encap_node.c
index 09a76b5..2d37d8b 100644
--- a/src/vnet/udp/udp_encap_node.c
+++ b/src/vnet/udp/udp_encap_node.c
@@ -27,6 +27,8 @@
   ip6_header_t ip;
 } udp6_encap_trace_t;
 
+extern vlib_combined_counter_main_t udp_encap_counters;
+
 static u8 *
 format_udp4_encap_trace (u8 * s, va_list * args)
 {
@@ -62,8 +64,10 @@
 		  vlib_node_runtime_t * node,
 		  vlib_frame_t * frame, int is_encap_v6)
 {
+  vlib_combined_counter_main_t *cm = &udp_encap_counters;
   u32 *from = vlib_frame_vector_args (frame);
   u32 n_left_from, n_left_to_next, *to_next, next_index;
+  u32 thread_index = vlib_get_thread_index ();
 
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
@@ -104,6 +108,13 @@
 	  uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
 	  uei1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
 
+	  vlib_increment_combined_counter (cm, thread_index, uei0, 1,
+					   vlib_buffer_length_in_chain (vm,
+									b0));
+	  vlib_increment_combined_counter (cm, thread_index, uei1, 1,
+					   vlib_buffer_length_in_chain (vm,
+									b1));
+
 	  /* Rewrite packet header and updates lengths. */
 	  ue0 = udp_encap_get (uei0);
 	  ue1 = udp_encap_get (uei1);
@@ -185,6 +196,10 @@
 	  /* Rewrite packet header and updates lengths. */
 	  ue0 = udp_encap_get (uei0);
 
+	  vlib_increment_combined_counter (cm, thread_index, uei0, 1,
+					   vlib_buffer_length_in_chain (vm,
+									b0));
+
 	  /* Paint */
 	  if (is_encap_v6)
 	    {
diff --git a/src/vpp/stats/stats.api b/src/vpp/stats/stats.api
index 79816db..86eaa62 100644
--- a/src/vpp/stats/stats.api
+++ b/src/vpp/stats/stats.api
@@ -55,6 +55,9 @@
   rpc want_ip6_nbr_stats
     returns want_ip6_nbr_stats_reply
     events vnet_ip6_nbr_counters;
+  rpc want_udp_encap_stats
+    returns want_udp_encap_stats_reply
+    events vnet_udp_encap_counters;
 };
 
 /** \brief Want Stats, enable/disable ALL stats updates
@@ -436,6 +439,39 @@
   u32 delay;
 };
 
+/** \brief Want UDP encap Stats, register for continuous stats
+    @param client_index - opaque cookie to identify the sender
+    @param context - sender context, to match reply w/ request
+    @param enable - 1 = enable stats, 0 = disable
+    @param pid - pid of process requesting stats updates
+*/
+autoreply define want_udp_encap_stats
+{
+  u32 client_index;
+  u32 context;
+  u32 enable;
+  u32 pid;
+};
+
+/** \brief Stat for one UDP encap object
+    @param id - The ID of the object, same as passed for the create
+    @param packets - number of packets sent
+    @param bytes - number of bytes sent
+*/
+typeonly manual_print manual_endian define udp_encap_counter
+{
+  u32 id;
+  u64 packets;
+  u64 bytes;
+};
+
+manual_print manual_endian define vnet_udp_encap_counters
+{
+  u32 timestamp;
+  u32 count;
+  vl_api_udp_encap_counter_t c[count];
+};
+
 /*
  * Local Variables:
  * eval: (c-set-style "gnu")
diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c
index 05950b6..3fe03e4 100644
--- a/src/vpp/stats/stats.c
+++ b/src/vpp/stats/stats.c
@@ -18,6 +18,7 @@
 #include <vnet/fib/fib_entry.h>
 #include <vnet/mfib/mfib_entry.h>
 #include <vnet/dpo/load_balance.h>
+#include <vnet/udp/udp_encap.h>
 
 #define STATS_DEBUG 0
 
@@ -63,8 +64,8 @@
 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
-_(STATS_GET_POLLER_DELAY, stats_get_poller_delay)
-
+_(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
+_(WANT_UDP_ENCAP_STATS, want_udp_encap_stats)
 
 #define vl_msg_name_crc_list
 #include <vpp/stats/stats.api.h>
@@ -86,6 +87,7 @@
 #define IP6_FIB_COUNTER_BATCH_SIZE	30
 #define IP4_MFIB_COUNTER_BATCH_SIZE	24
 #define IP6_MFIB_COUNTER_BATCH_SIZE	15
+#define UDP_ENCAP_COUNTER_BATCH_SIZE	(1024 / sizeof(vl_api_udp_encap_counter_t))
 
 /* 5ms */
 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
@@ -2069,6 +2071,110 @@
     vl_msg_api_free (mp);
 }
 
+typedef struct udp_encap_stat_t_
+{
+  u32 ue_id;
+  u64 stats[2];
+} udp_encap_stat_t;
+
+typedef struct udp_encap_stats_walk_t_
+{
+  udp_encap_stat_t *stats;
+} udp_encap_stats_walk_t;
+
+static int
+udp_encap_stats_walk_cb (index_t uei, void *arg)
+{
+  udp_encap_stats_walk_t *ctx = arg;
+  udp_encap_stat_t *stat;
+  udp_encap_t *ue;
+
+  ue = udp_encap_get (uei);
+  vec_add2 (ctx->stats, stat, 1);
+
+  stat->ue_id = uei;
+  udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]);
+
+  return (1);
+}
+
+static void
+udp_encap_ship (udp_encap_stats_walk_t * ctx)
+{
+  vl_api_vnet_udp_encap_counters_t *mp;
+  vl_shmem_hdr_t *shmem_hdr;
+  stats_main_t *sm;
+  api_main_t *am;
+  svm_queue_t *q;
+
+  mp = NULL;
+  sm = &stats_main;
+  am = sm->api_main;
+  shmem_hdr = am->shmem_hdr;
+  q = shmem_hdr->vl_input_queue;
+
+  /*
+   * If the walk context has counters, which may be left over from the last
+   * suspend, then we continue from there.
+   */
+  while (0 != vec_len (ctx->stats))
+    {
+      u32 n_items = MIN (vec_len (ctx->stats),
+			 UDP_ENCAP_COUNTER_BATCH_SIZE);
+      u8 pause = 0;
+
+      dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+      mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
+					  (n_items *
+					   sizeof
+					   (vl_api_udp_encap_counter_t)));
+      mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
+      mp->count = ntohl (n_items);
+
+      /*
+       * copy the counters from the back of the context, then we can easily
+       * 'erase' them by resetting the vector length.
+       * The order we push the stats to the caller is not important.
+       */
+      clib_memcpy (mp->c,
+		   &ctx->stats[vec_len (ctx->stats) - n_items],
+		   n_items * sizeof (*ctx->stats));
+
+      _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
+
+      /*
+       * send to the shm q
+       */
+      svm_queue_lock (q);
+      pause = svm_queue_is_full (q);
+
+      vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+      svm_queue_unlock (q);
+      dsunlock (sm);
+
+      if (pause)
+	ip46_fib_stats_delay (sm, 0 /* sec */ ,
+			      STATS_RELEASE_DELAY_NS);
+    }
+}
+
+static void
+do_udp_encap_counters (stats_main_t * sm)
+{
+  udp_encap_stat_t *stat;
+
+  udp_encap_stats_walk_t ctx = {
+    .stats = NULL,
+  };
+
+  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+  udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
+  dsunlock (sm);
+
+  udp_encap_ship (&ctx);
+}
+
 int
 stats_set_poller_delay (u32 poller_delay_sec)
 {
@@ -2195,6 +2301,9 @@
 
       if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
 	do_ip6_nbr_counters (sm);
+
+      if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS]))
+	do_udp_encap_counters (sm);
     }
 }
 
@@ -2451,6 +2560,41 @@
 }
 
 static void
+vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
+{
+  stats_main_t *sm = &stats_main;
+  vpe_client_registration_t rp;
+  vl_api_want_udp_encap_stats_reply_t *rmp;
+  uword *p;
+  i32 retval = 0;
+  vl_api_registration_t *reg;
+  u32 fib;
+
+  fib = ~0;			//Using same mechanism as _per_interface_
+  rp.client_index = mp->client_index;
+  rp.client_pid = mp->pid;
+
+  handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
+
+reply:
+  reg = vl_api_client_index_to_registration (mp->client_index);
+
+  if (!reg)
+    {
+      sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
+						 fib, mp->client_index);
+      return;
+    }
+
+  rmp = vl_msg_api_alloc (sizeof (*rmp));
+  rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
+  rmp->context = mp->context;
+  rmp->retval = retval;
+
+  vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+static void
 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
 {
   stats_main_t *sm = &stats_main;
diff --git a/src/vpp/stats/stats.reg b/src/vpp/stats/stats.reg
index 4c548c5..dbec8c9 100644
--- a/src/vpp/stats/stats.reg
+++ b/src/vpp/stats/stats.reg
@@ -41,3 +41,4 @@
 stats_reg (PER_INTERFACE_SIMPLE_COUNTERS)
 stats_reg (IP4_MFIB_COUNTERS)
 stats_reg (IP6_MFIB_COUNTERS)
+stats_reg (UDP_ENCAP_COUNTERS)
diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py
index 52fc364..65cf766 100644
--- a/test/vpp_papi_provider.py
+++ b/test/vpp_papi_provider.py
@@ -1083,6 +1083,11 @@
     def udp_encap_dump(self):
         return self.api(self.papi.udp_encap_dump, {})
 
+    def want_udp_encap_stats(self, enable=1):
+        return self.api(self.papi.want_udp_encap_stats,
+                        {'enable': enable,
+                         'pid': os.getpid()})
+
     def mpls_fib_dump(self):
         return self.api(self.papi.mpls_fib_dump, {})