VPP-311 Coding standards cleanup for vnet/vnet/*.[ch]

Change-Id: I08ed983f594072bc8c72202e77205a7789eea599
Signed-off-by: Dave Barach <dave@barachs.net>
diff --git a/build-root/emacs-lisp/fix-coding-style.el b/build-root/emacs-lisp/fix-coding-style.el
index 3b5ff56..3c92797 100755
--- a/build-root/emacs-lisp/fix-coding-style.el
+++ b/build-root/emacs-lisp/fix-coding-style.el
@@ -74,6 +74,12 @@
 (defun fix-reply-macro2 () (interactive)
        (fix-initializer "REPLY_MACRO2 *("))
 
+(defun fix-vnet-device-class () (interactive)
+       (fix-initializer "VNET_DEVICE_CLASS *("))
+
+(defun fix-vnet-hw-interface-class () (interactive)
+       (fix-initializer "VNET_HW_INTERFACE_CLASS *("))
+
 ;; Driver routine which runs the set of functions
 ;; defined above, as well as the bottom boilerplate function
 
@@ -90,6 +96,8 @@
        (fix-vlib-cli-command)
        (fix-vlib-register-node)
        (fix-reply-macro2)
+       (fix-vnet-device-class)
+       (fix-vnet-hw-interface-class)
        (insert-style-boilerplate))
 
 
diff --git a/vnet/vnet/api_errno.h b/vnet/vnet/api_errno.h
index 2b18ef5..c534b4b 100644
--- a/vnet/vnet/api_errno.h
+++ b/vnet/vnet/api_errno.h
@@ -88,11 +88,20 @@
 _(EXCEEDED_NUMBER_OF_RANGES_CAPACITY, -95, "Operation would exceed configured capacity of ranges") \
 _(EXCEEDED_NUMBER_OF_PORTS_CAPACITY, -96, "Operation would exceed capacity of number of ports")
 
-typedef enum {
+typedef enum
+{
 #define _(a,b,c) VNET_API_ERROR_##a = (b),
-    foreach_vnet_api_error
+  foreach_vnet_api_error
 #undef _
     VNET_API_N_ERROR,
 } vnet_api_error_t;
 
 #endif /* included_vnet_api_errno_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/buffer.h b/vnet/vnet/buffer.h
index 742fe32..3fcdf07 100644
--- a/vnet/vnet/buffer.h
+++ b/vnet/vnet/buffer.h
@@ -83,7 +83,7 @@
 _(map_t)					\
 _(ip_frag)
 
-/* 
+/*
  * vnet stack buffer opaque array overlay structure.
  * The vnet_buffer_opaque_t *must* be the same size as the
  * vlib_buffer_t "opaque" structure member, 32 bytes.
@@ -94,35 +94,42 @@
  * of the union, and will announce any deviations in an
  * impossible-to-miss manner.
  */
-typedef struct {
+typedef struct
+{
   u32 sw_if_index[VLIB_N_RX_TX];
 
-  union {
+  union
+  {
     /* Ethernet. */
-    struct {
+    struct
+    {
       /* Saved value of current header by ethernet-input. */
       i32 start_of_ethernet_header;
     } ethernet;
 
     /* IP4/6 buffer opaque. */
-    struct {
+    struct
+    {
       /* Adjacency from destination IP address lookup [VLIB_TX].
-	 Adjacency from source IP address lookup [VLIB_RX].
-	 This gets set to ~0 until source lookup is performed. */
+         Adjacency from source IP address lookup [VLIB_RX].
+         This gets set to ~0 until source lookup is performed. */
       u32 adj_index[VLIB_N_RX_TX];
 
-      union {
-	struct {
+      union
+      {
+	struct
+	{
 	  /* Flow hash value for this packet computed from IP src/dst address
 	     protocol and ports. */
 	  u32 flow_hash;
 
-          /* next protocol */
-          u32 save_protocol;
+	  /* next protocol */
+	  u32 save_protocol;
 	};
 
 	/* Alternate used for local TCP packets. */
-	struct {
+	struct
+	{
 	  u32 listener_index;
 
 	  u32 established_connection_index;
@@ -133,7 +140,8 @@
 	} tcp;
 
 	/* ICMP */
-	struct {
+	struct
+	{
 	  u8 type;
 	  u8 code;
 	  u32 data;
@@ -142,40 +150,46 @@
     } ip;
 
     /* Multicast replication */
-    struct {
-      u32 pad[3];  
+    struct
+    {
+      u32 pad[3];
       u32 mcast_group_index;
       u32 mcast_current_index;
       u32 original_free_list_index;
     } mcast;
 
     /* ip4-in-ip6 softwire termination, only valid there */
-    struct {
+    struct
+    {
       u8 swt_disable;
       u32 mapping_index;
     } swt;
 
     /* l2 bridging path, only valid there */
-    struct {
+    struct
+    {
       u32 feature_bitmap;
-      u16 bd_index;       // bridge-domain index
-      u8  l2_len;         // ethernet header length
-      u8  shg;            // split-horizon group
+      u16 bd_index;		// bridge-domain index
+      u8 l2_len;		// ethernet header length
+      u8 shg;			// split-horizon group
     } l2;
 
     /* l2tpv3 softwire encap, only valid there */
-    struct {
-      u32 pad[4];               /* do not overlay w/ ip.adj_index[0,1] */
+    struct
+    {
+      u32 pad[4];		/* do not overlay w/ ip.adj_index[0,1] */
       u8 next_index;
       u32 session_index;
     } l2t;
 
-    struct {
+    struct
+    {
       u32 src, dst;
     } gre;
 
     /* L2 classify */
-    struct {
+    struct
+    {
       u64 pad;
       u32 opaque_index;
       u32 table_index;
@@ -183,18 +197,21 @@
     } l2_classify;
 
     /* IO - worker thread handoff */
-    struct {
+    struct
+    {
       u32 next_index;
     } handoff;
 
     /* vnet policer */
-    struct {
-      u32 pad[8 -VLIB_N_RX_TX -1];  /* to end of opaque */
+    struct
+    {
+      u32 pad[8 - VLIB_N_RX_TX - 1];	/* to end of opaque */
       u32 index;
     } policer;
 
     /* interface output features */
-    struct {
+    struct
+    {
       u32 ipsec_spd_index;
       u32 ipsec_sad_index;
       u32 unused[3];
@@ -202,99 +219,111 @@
     } output_features;
 
     /* vcgn udp inside input, only valid there */
-    struct {
+    struct
+    {
       /* This part forms context of the packet. The structure should be
-       * exactly same as spp_ctx_t. Also this should be the first 
+       * exactly same as spp_ctx_t. Also this should be the first
        * element of this vcgn_uii structure.
        */
       /****** BEGIN spp_ctx_t section ***********************/
-      union { /* Roddick specific */
-        u32 roddick_info;
-        struct _tx_pkt_info  { /* Used by PI to PI communication for TX */
-          u32 uidb_index:16;       /* uidb_index to transmit */
-          u32  packet_type:2;   /* 1-IPv4, 2-Ipv6, - 0,3 - Unused */
-          u32  ipv4_defrag:1;   /* 0 - Normal, 1 - update first
-                                 * segment size
-                                 * (set by 6rd defrag node)
-                                 */
+      union
+      {				/* Roddick specific */
+	u32 roddick_info;
+	struct _tx_pkt_info
+	{			/* Used by PI to PI communication for TX */
+	  u32 uidb_index:16;	/* uidb_index to transmit */
+	  u32 packet_type:2;	/* 1-IPv4, 2-Ipv6, - 0,3 - Unused */
+	  u32 ipv4_defrag:1;	/* 0 - Normal, 1 - update first
+				 * segment size
+				 * (set by 6rd defrag node)
+				 */
 
-          u32  dst_ip_port_idx:4;/* Index to dst_ip_port_table */
-          u32  from_node:4;
-          u32  calc_chksum:1;
-          u32  reserved:4;
-        } tx;
-        struct _rx_pkt_info { /* Used by PD / PI communication */
-          u32 uidb_index:16;    /* uidb_index received in packet */
-          u32  packet_type:2;   /* 1-IPv4, 2-Ipv6, - 0,3 - Unused */
-          u32  icmp_type:1;     /* 0-ICMP query type, 1-ICMP error type */
-          u32  protocol_type:2; /* 1-TCP, 2-UDP, 3-ICMP, 0 - Unused */
-          u32  ipv4_defrag:1;    /* 0 - Normal, 1 - update first
-                                  * segment size
-                                  * (set by 6rd defrag node)
-                                  */
-    
-          u32  direction:1;     /* 0-Outside, 1-Inside */
-          u32  frag:1;          /*IP fragment-1, Otherwise-0*/
-          u32  option:1;        /* 0-No IP option (v4) present, non-fragHdr
-                                 * option hdr present (v6)
-                                 */
-          u32  df_bit:1;        /* IPv4 DF bit copied here */
-          u32  reserved1:6;
-        } rx;
+	  u32 dst_ip_port_idx:4;	/* Index to dst_ip_port_table */
+	  u32 from_node:4;
+	  u32 calc_chksum:1;
+	  u32 reserved:4;
+	} tx;
+	struct _rx_pkt_info
+	{			/* Used by PD / PI communication */
+	  u32 uidb_index:16;	/* uidb_index received in packet */
+	  u32 packet_type:2;	/* 1-IPv4, 2-Ipv6, - 0,3 - Unused */
+	  u32 icmp_type:1;	/* 0-ICMP query type, 1-ICMP error type */
+	  u32 protocol_type:2;	/* 1-TCP, 2-UDP, 3-ICMP, 0 - Unused */
+	  u32 ipv4_defrag:1;	/* 0 - Normal, 1 - update first
+				 * segment size
+				 * (set by 6rd defrag node)
+				 */
+
+	  u32 direction:1;	/* 0-Outside, 1-Inside */
+	  u32 frag:1;		/*IP fragment-1, Otherwise-0 */
+	  u32 option:1;		/* 0-No IP option (v4) present, non-fragHdr
+				 * option hdr present (v6)
+				 */
+	  u32 df_bit:1;		/* IPv4 DF bit copied here */
+	  u32 reserved1:6;
+	} rx;
       } ru;
       /****** END  spp_ctx_t section ***********************/
 
-      union {
-        struct {
-          u32 ipv4;
-          u16 port;
-          u16 vrf;  //bit0-13:i/f, bit14-15:protocol
-        } k;
+      union
+      {
+	struct
+	{
+	  u32 ipv4;
+	  u16 port;
+	  u16 vrf;		//bit0-13:i/f, bit14-15:protocol
+	} k;
 
-        u64 key64;
+	u64 key64;
       } key;
 
       u32 bucket;
 
-      u16 ovrf; /* Exit interface */
+      u16 ovrf;			/* Exit interface */
       u8 frag_pkt;
       u8 vcgn_unused1;
     } vcgn_uii;
 
     /* MAP */
-    struct {
+    struct
+    {
       u16 mtu;
     } map;
 
     /* MAP-T */
-    struct {
+    struct
+    {
       u32 map_domain_index;
-      struct {
-        u32 saddr, daddr;
-        u16 frag_offset;      //Fragmentation header offset
-        u16 l4_offset;        //L4 header overall offset
-        u8  l4_protocol;      //The final protocol number
-      } v6; //Used by ip6_map_t only
-      u16 checksum_offset;    //L4 checksum overall offset
-      u16 mtu;                //Exit MTU
+      struct
+      {
+	u32 saddr, daddr;
+	u16 frag_offset;	//Fragmentation header offset
+	u16 l4_offset;		//L4 header overall offset
+	u8 l4_protocol;		//The final protocol number
+      } v6;			//Used by ip6_map_t only
+      u16 checksum_offset;	//L4 checksum overall offset
+      u16 mtu;			//Exit MTU
     } map_t;
 
     /* IP Fragmentation */
-    struct {
+    struct
+    {
       u16 header_offset;
       u16 mtu;
       u8 next_index;
-      u8 flags;          //See ip_frag.h
+      u8 flags;			//See ip_frag.h
     } ip_frag;
 
     /* COP - configurable junk filter(s) */
-    struct {
-        /* Current configuration index. */
-        u32 current_config_index;
+    struct
+    {
+      /* Current configuration index. */
+      u32 current_config_index;
     } cop;
 
     /* LISP */
-    struct {
+    struct
+    {
       /* overlay address family */
       u16 overlay_afi;
     } lisp;
@@ -306,11 +335,21 @@
 #define vnet_buffer(b) ((vnet_buffer_opaque_t *) (b)->opaque)
 
 /* Full cache line (64 bytes) of additional space */
-typedef struct {
-  union {
+typedef struct
+{
+  union
+  {
   };
 } vnet_buffer_opaque2_t;
 
 
 
 #endif /* included_vnet_buffer_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/config.c b/vnet/vnet/config.c
index 2e056c8..be48df6 100644
--- a/vnet/vnet/config.c
+++ b/vnet/vnet/config.c
@@ -42,11 +42,10 @@
 static vnet_config_feature_t *
 duplicate_feature_vector (vnet_config_feature_t * feature_vector)
 {
-  vnet_config_feature_t * result, * f;
+  vnet_config_feature_t *result, *f;
 
   result = vec_dup (feature_vector);
-  vec_foreach (f, result)
-    f->feature_config = vec_dup (f->feature_config);
+  vec_foreach (f, result) f->feature_config = vec_dup (f->feature_config);
 
   return result;
 }
@@ -54,18 +53,15 @@
 static void
 free_feature_vector (vnet_config_feature_t * feature_vector)
 {
-  vnet_config_feature_t * f;
+  vnet_config_feature_t *f;
 
-  vec_foreach (f, feature_vector)
-    vnet_config_feature_free (f);
+  vec_foreach (f, feature_vector) vnet_config_feature_free (f);
   vec_free (feature_vector);
 }
 
 static u32
 add_next (vlib_main_t * vm,
-	  vnet_config_main_t * cm,
-	  u32 last_node_index,
-	  u32 this_node_index)
+	  vnet_config_main_t * cm, u32 last_node_index, u32 this_node_index)
 {
   u32 i, ni = ~0;
 
@@ -75,7 +71,8 @@
   for (i = 0; i < vec_len (cm->start_node_indices); i++)
     {
       u32 tmp;
-      tmp = vlib_node_add_next (vm, cm->start_node_indices[i], this_node_index);
+      tmp =
+	vlib_node_add_next (vm, cm->start_node_indices[i], this_node_index);
       if (ni == ~0)
 	ni = tmp;
       /* Start nodes to first must agree on next indices. */
@@ -91,10 +88,10 @@
 			   vnet_config_feature_t * feature_vector)
 {
   u32 last_node_index = ~0;
-  vnet_config_feature_t * f;
-  u32 * config_string;
-  uword * p;
-  vnet_config_t * c;
+  vnet_config_feature_t *f;
+  u32 *config_string;
+  uword *p;
+  vnet_config_t *c;
 
   config_string = cm->config_string_temp;
   cm->config_string_temp = 0;
@@ -102,17 +99,17 @@
     _vec_len (config_string) = 0;
 
   vec_foreach (f, feature_vector)
-    {
-      /* Connect node graph. */
-      f->next_index = add_next (vm, cm, last_node_index, f->node_index);
-      last_node_index = f->node_index;
+  {
+    /* Connect node graph. */
+    f->next_index = add_next (vm, cm, last_node_index, f->node_index);
+    last_node_index = f->node_index;
 
-      /* Store next index in config string. */
-      vec_add1 (config_string, f->next_index);
+    /* Store next index in config string. */
+    vec_add1 (config_string, f->next_index);
 
-      /* Store feature config. */
-      vec_add (config_string, f->feature_config, vec_len (f->feature_config));
-    }
+    /* Store feature config. */
+    vec_add (config_string, f->feature_config, vec_len (f->feature_config));
+  }
 
   /* Terminate config string with next for end node. */
   if (last_node_index == ~0 || last_node_index != cm->end_node_index)
@@ -126,13 +123,13 @@
   if (p)
     {
       /* Not unique.  Share existing config. */
-      cm->config_string_temp = config_string; /* we'll use it again later. */
+      cm->config_string_temp = config_string;	/* we'll use it again later. */
       free_feature_vector (feature_vector);
       c = pool_elt_at_index (cm->config_pool, p[0]);
     }
   else
     {
-      u32 * d;
+      u32 *d;
 
       pool_get (cm->config_pool, c);
       c->index = c - cm->config_pool;
@@ -140,37 +137,42 @@
       c->config_string_vector = config_string;
 
       /* Allocate copy of config string in heap.
-	 VLIB buffers will maintain pointers to heap as they read out
-	 configuration data. */
+         VLIB buffers will maintain pointers to heap as they read out
+         configuration data. */
       c->config_string_heap_index
 	= heap_alloc (cm->config_string_heap, vec_len (config_string) + 1,
 		      c->config_string_heap_handle);
 
       /* First element in heap points back to pool index. */
-      d = vec_elt_at_index (cm->config_string_heap, c->config_string_heap_index);
+      d =
+	vec_elt_at_index (cm->config_string_heap,
+			  c->config_string_heap_index);
       d[0] = c->index;
       clib_memcpy (d + 1, config_string, vec_bytes (config_string));
       hash_set_mem (cm->config_string_hash, config_string, c->index);
 
-      c->reference_count = 0; /* will be incremented by caller. */
+      c->reference_count = 0;	/* will be incremented by caller. */
     }
 
   return c;
 }
 
-void vnet_config_init (vlib_main_t * vm,
-		       vnet_config_main_t * cm,
-		       char * start_node_names[],
-		       int n_start_node_names,
-		       char * feature_node_names[],
-		       int n_feature_node_names)
+void
+vnet_config_init (vlib_main_t * vm,
+		  vnet_config_main_t * cm,
+		  char *start_node_names[],
+		  int n_start_node_names,
+		  char *feature_node_names[], int n_feature_node_names)
 {
-  vlib_node_t * n;
+  vlib_node_t *n;
   u32 i;
 
   memset (cm, 0, sizeof (cm[0]));
 
-  cm->config_string_hash = hash_create_vec (0, STRUCT_SIZE_OF (vnet_config_t, config_string_vector[0]), sizeof (uword));
+  cm->config_string_hash =
+    hash_create_vec (0,
+		     STRUCT_SIZE_OF (vnet_config_t, config_string_vector[0]),
+		     sizeof (uword));
 
   ASSERT (n_start_node_names >= 1);
   ASSERT (n_feature_node_names >= 1);
@@ -187,7 +189,7 @@
   vec_resize (cm->node_index_by_feature_index, n_feature_node_names);
   for (i = 0; i < n_feature_node_names; i++)
     {
-      if (! feature_node_names[i])
+      if (!feature_node_names[i])
 	cm->node_index_by_feature_index[i] = ~0;
       else
 	{
@@ -198,8 +200,9 @@
 	      if (i + 1 == n_feature_node_names)
 		cm->end_node_index = n->index;
 	      cm->node_index_by_feature_index[i] = n->index;
-	     }
-	  else cm->node_index_by_feature_index[i] = ~0;
+	    }
+	  else
+	    cm->node_index_by_feature_index[i] = ~0;
 	}
     }
 }
@@ -218,32 +221,34 @@
 }
 
 static int
-feature_cmp (void * a1, void * a2)
+feature_cmp (void *a1, void *a2)
 {
-  vnet_config_feature_t * f1 = a1;
-  vnet_config_feature_t * f2 = a2;
+  vnet_config_feature_t *f1 = a1;
+  vnet_config_feature_t *f2 = a2;
 
   return (int) f1->feature_index - f2->feature_index;
 }
 
 always_inline u32 *
 vnet_get_config_heap (vnet_config_main_t * cm, u32 ci)
-{ return heap_elt_at_index (cm->config_string_heap, ci); }
-
-u32 vnet_config_add_feature (vlib_main_t * vm,
-			     vnet_config_main_t * cm,
-			     u32 config_string_heap_index,
-			     u32 feature_index,
-			     void * feature_config,
-			     u32 n_feature_config_bytes)
 {
-  vnet_config_t * old, * new;
-  vnet_config_feature_t * new_features, * f;
+  return heap_elt_at_index (cm->config_string_heap, ci);
+}
+
+u32
+vnet_config_add_feature (vlib_main_t * vm,
+			 vnet_config_main_t * cm,
+			 u32 config_string_heap_index,
+			 u32 feature_index,
+			 void *feature_config, u32 n_feature_config_bytes)
+{
+  vnet_config_t *old, *new;
+  vnet_config_feature_t *new_features, *f;
   u32 n_feature_config_u32s;
   u32 node_index = vec_elt (cm->node_index_by_feature_index, feature_index);
 
-  if (node_index == ~0)                 // feature node does not exist
-    return config_string_heap_index;    // return original config index
+  if (node_index == ~0)		// feature node does not exist
+    return config_string_heap_index;	// return original config index
 
   if (config_string_heap_index == ~0)
     {
@@ -252,7 +257,7 @@
     }
   else
     {
-      u32 * p = vnet_get_config_heap (cm, config_string_heap_index);
+      u32 *p = vnet_get_config_heap (cm, config_string_heap_index);
       old = pool_elt_at_index (cm->config_pool, p[-1]);
       new_features = old->features;
       if (new_features)
@@ -263,9 +268,12 @@
   f->feature_index = feature_index;
   f->node_index = node_index;
 
-  n_feature_config_u32s = round_pow2 (n_feature_config_bytes, sizeof (f->feature_config[0])) / sizeof (f->feature_config[0]);
+  n_feature_config_u32s =
+    round_pow2 (n_feature_config_bytes,
+		sizeof (f->feature_config[0])) /
+    sizeof (f->feature_config[0]);
   vec_add (f->feature_config, feature_config, n_feature_config_u32s);
-  
+
   /* Sort (prioritize) features. */
   if (vec_len (new_features) > 1)
     vec_sort_with_function (new_features, feature_cmp);
@@ -276,50 +284,54 @@
   new = find_config_with_features (vm, cm, new_features);
   new->reference_count += 1;
 
-  /* 
-   * User gets pointer to config string first element 
+  /*
+   * User gets pointer to config string first element
    * (which defines the pool index
-   * this config string comes from). 
+   * this config string comes from).
    */
   vec_validate (cm->config_pool_index_by_user_index,
-                new->config_string_heap_index + 1);
-  cm->config_pool_index_by_user_index [new->config_string_heap_index + 1]
-      = new - cm->config_pool;
+		new->config_string_heap_index + 1);
+  cm->config_pool_index_by_user_index[new->config_string_heap_index + 1]
+    = new - cm->config_pool;
   return new->config_string_heap_index + 1;
 }
 
-u32 vnet_config_del_feature (vlib_main_t * vm,
-			     vnet_config_main_t * cm,
-			     u32 config_string_heap_index,
-			     u32 feature_index,
-			     void * feature_config,
-			     u32 n_feature_config_bytes)
+u32
+vnet_config_del_feature (vlib_main_t * vm,
+			 vnet_config_main_t * cm,
+			 u32 config_string_heap_index,
+			 u32 feature_index,
+			 void *feature_config, u32 n_feature_config_bytes)
 {
-  vnet_config_t * old, * new;
-  vnet_config_feature_t * new_features, * f;
+  vnet_config_t *old, *new;
+  vnet_config_feature_t *new_features, *f;
   u32 n_feature_config_u32s;
 
   {
-    u32 * p = vnet_get_config_heap (cm, config_string_heap_index);
+    u32 *p = vnet_get_config_heap (cm, config_string_heap_index);
 
     old = pool_elt_at_index (cm->config_pool, p[-1]);
   }
 
-  n_feature_config_u32s = round_pow2 (n_feature_config_bytes, sizeof (f->feature_config[0])) / sizeof (f->feature_config[0]);
+  n_feature_config_u32s =
+    round_pow2 (n_feature_config_bytes,
+		sizeof (f->feature_config[0])) /
+    sizeof (f->feature_config[0]);
 
   /* Find feature with same index and opaque data. */
   vec_foreach (f, old->features)
-    {
-      if (f->feature_index == feature_index
-	  && vec_len (f->feature_config) == n_feature_config_u32s
-	  && (n_feature_config_u32s == 0
-	      || ! memcmp (f->feature_config, feature_config, n_feature_config_bytes)))
-	break;
-    }
+  {
+    if (f->feature_index == feature_index
+	&& vec_len (f->feature_config) == n_feature_config_u32s
+	&& (n_feature_config_u32s == 0
+	    || !memcmp (f->feature_config, feature_config,
+			n_feature_config_bytes)))
+      break;
+  }
 
   /* Feature not found. */
   if (f >= vec_end (old->features))
-    return config_string_heap_index;    // return original config index
+    return config_string_heap_index;	// return original config index
 
   new_features = duplicate_feature_vector (old->features);
   f = new_features + (f - old->features);
@@ -327,7 +339,7 @@
   vec_delete (new_features, 1, f - new_features);
 
   /* must remove old from config_pool now as it may be expanded and change
-     memory location if the following function find_config_with_features() 
+     memory location if the following function find_config_with_features()
      adds a new config because none of existing config's has matching features
      and so can be reused */
   remove_reference (cm, old);
@@ -335,8 +347,16 @@
   new->reference_count += 1;
 
   vec_validate (cm->config_pool_index_by_user_index,
-                new->config_string_heap_index + 1);
-  cm->config_pool_index_by_user_index [new->config_string_heap_index + 1]
-      = new - cm->config_pool;
+		new->config_string_heap_index + 1);
+  cm->config_pool_index_by_user_index[new->config_string_heap_index + 1]
+    = new - cm->config_pool;
   return new->config_string_heap_index + 1;
 }
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/config.h b/vnet/vnet/config.h
index 3d507c7..d80ff19 100644
--- a/vnet/vnet/config.h
+++ b/vnet/vnet/config.h
@@ -43,7 +43,8 @@
 #include <vlib/vlib.h>
 #include <vppinfra/heap.h>
 
-typedef struct {
+typedef struct
+{
   /* Features are prioritized by index.  Smaller indices get
      performed first. */
   u32 feature_index;
@@ -55,19 +56,22 @@
   u32 next_index;
 
   /* Opaque per feature configuration data. */
-  u32 * feature_config;
+  u32 *feature_config;
 } vnet_config_feature_t;
 
 always_inline void
 vnet_config_feature_free (vnet_config_feature_t * f)
-{ vec_free (f->feature_config); }
+{
+  vec_free (f->feature_config);
+}
 
-typedef struct {
+typedef struct
+{
   /* Sorted vector of features for this configuration. */
-  vnet_config_feature_t * features;
+  vnet_config_feature_t *features;
 
   /* Config string as vector for hashing. */
-  u32 * config_string_vector;
+  u32 *config_string_vector;
 
   /* Config string including all next indices and feature data as a vector. */
   u32 config_string_heap_index, config_string_heap_handle;
@@ -79,36 +83,36 @@
   u32 reference_count;
 } vnet_config_t;
 
-typedef struct {
+typedef struct
+{
   /* Pool of configs.  Index 0 is always null config and is never deleted. */
-  vnet_config_t * config_pool;
+  vnet_config_t *config_pool;
 
   /* Hash table mapping vector config string to config pool index. */
-  uword * config_string_hash;
+  uword *config_string_hash;
 
-  /* Global heap of configuration data. */ 
-  u32 * config_string_heap;
+  /* Global heap of configuration data. */
+  u32 *config_string_heap;
 
   /* Node index which starts/ends feature processing. */
-  u32 * start_node_indices, end_node_index;
+  u32 *start_node_indices, end_node_index;
 
   /* Interior feature processing nodes (not including start and end nodes). */
-  u32 * node_index_by_feature_index;
+  u32 *node_index_by_feature_index;
 
   /* vnet_config pool index by user index */
-  u32 * config_pool_index_by_user_index;
+  u32 *config_pool_index_by_user_index;
 
   /* Temporary vector for holding config strings.  Used to avoid continually
      allocating vectors. */
-  u32 * config_string_temp;
+  u32 *config_string_temp;
 } vnet_config_main_t;
 
 always_inline void
 vnet_config_free (vnet_config_main_t * cm, vnet_config_t * c)
 {
-  vnet_config_feature_t * f;
-  vec_foreach (f, c->features)
-    vnet_config_feature_free (f);
+  vnet_config_feature_t *f;
+  vec_foreach (f, c->features) vnet_config_feature_free (f);
   vec_free (c->features);
   heap_dealloc (cm->config_string_heap, c->config_string_heap_handle);
   vec_free (c->config_string_vector);
@@ -116,11 +120,9 @@
 
 always_inline void *
 vnet_get_config_data (vnet_config_main_t * cm,
-		      u32 * config_index,
-		      u32 * next_index,
-		      u32 n_data_bytes)
+		      u32 * config_index, u32 * next_index, u32 n_data_bytes)
 {
-  u32 i, n, * d;
+  u32 i, n, *d;
 
   i = *config_index;
 
@@ -140,24 +142,31 @@
 
 void vnet_config_init (vlib_main_t * vm,
 		       vnet_config_main_t * cm,
-		       char * start_node_names[],
+		       char *start_node_names[],
 		       int n_start_node_names,
-		       char * feature_node_names[],
-		       int n_feature_node_names);
+		       char *feature_node_names[], int n_feature_node_names);
 
 /* Calls to add/delete features from configurations. */
 u32 vnet_config_add_feature (vlib_main_t * vm,
 			     vnet_config_main_t * cm,
 			     u32 config_id,
 			     u32 feature_index,
-			     void * feature_config,
+			     void *feature_config,
 			     u32 n_feature_config_bytes);
 
 u32 vnet_config_del_feature (vlib_main_t * vm,
 			     vnet_config_main_t * cm,
 			     u32 config_id,
 			     u32 feature_index,
-			     void * feature_config,
+			     void *feature_config,
 			     u32 n_feature_config_bytes);
 
 #endif /* included_vnet_config_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/dpdk_replication.h b/vnet/vnet/dpdk_replication.h
index bf9bf99..07a076c 100644
--- a/vnet/vnet/dpdk_replication.h
+++ b/vnet/vnet/dpdk_replication.h
@@ -3,12 +3,12 @@
 #include <vnet/devices/dpdk/dpdk.h>
 
 /*
- * vlib_dpdk_clone_buffer - clone a buffer 
+ * vlib_dpdk_clone_buffer - clone a buffer
  * for port mirroring, lawful intercept, etc.
  * rte_pktmbuf_clone (...) requires that the forwarding path
  * not touch any of the cloned data. The hope is that we'll
- * figure out how to relax that restriction. 
- * 
+ * figure out how to relax that restriction.
+ *
  * For the moment, copy packet data.
  */
 
@@ -16,92 +16,100 @@
 vlib_dpdk_clone_buffer (vlib_main_t * vm, vlib_buffer_t * b)
 {
   u32 new_buffers_needed = 1;
-  unsigned socket_id = rte_socket_id();
+  unsigned socket_id = rte_socket_id ();
   struct rte_mempool *rmp = vm->buffer_main->pktmbuf_pools[socket_id];
   struct rte_mbuf *rte_mbufs[5];
-  vlib_buffer_free_list_t * fl;
-  vlib_buffer_t * rv;
-  u8 * copy_src, * copy_dst;
+  vlib_buffer_free_list_t *fl;
+  vlib_buffer_t *rv;
+  u8 *copy_src, *copy_dst;
   vlib_buffer_t *src_buf, *dst_buf;
 
   fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
 
-  if (PREDICT_FALSE(b->flags & VLIB_BUFFER_NEXT_PRESENT))
+  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
     {
       vlib_buffer_t *tmp = b;
       int i;
 
       while (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
-        {
-          new_buffers_needed ++;
-          tmp = vlib_get_buffer (vm, tmp->next_buffer);
-        }
+	{
+	  new_buffers_needed++;
+	  tmp = vlib_get_buffer (vm, tmp->next_buffer);
+	}
 
       /* Should never happen... */
-      if (PREDICT_FALSE(new_buffers_needed > ARRAY_LEN(rte_mbufs)))
-        {
-          clib_warning ("need %d buffers", new_buffers_needed);
-          return 0;
-        }
-      
-      if (rte_mempool_get_bulk (rmp, (void **)rte_mbufs, 
-                                new_buffers_needed) < 0)
-        return 0;
+      if (PREDICT_FALSE (new_buffers_needed > ARRAY_LEN (rte_mbufs)))
+	{
+	  clib_warning ("need %d buffers", new_buffers_needed);
+	  return 0;
+	}
+
+      if (rte_mempool_get_bulk (rmp, (void **) rte_mbufs,
+				new_buffers_needed) < 0)
+	return 0;
 
       src_buf = b;
-      rv = dst_buf = vlib_buffer_from_rte_mbuf(rte_mbufs[0]);
+      rv = dst_buf = vlib_buffer_from_rte_mbuf (rte_mbufs[0]);
       vlib_buffer_init_for_free_list (dst_buf, fl);
       copy_src = b->data + src_buf->current_data;
       copy_dst = dst_buf->data + src_buf->current_data;
 
       for (i = 0; i < new_buffers_needed; i++)
-        {
-          clib_memcpy (copy_src, copy_dst, src_buf->current_length);
-          dst_buf->current_data = src_buf->current_data;
-          dst_buf->current_length = src_buf->current_length;
-          dst_buf->flags = src_buf->flags;
+	{
+	  clib_memcpy (copy_src, copy_dst, src_buf->current_length);
+	  dst_buf->current_data = src_buf->current_data;
+	  dst_buf->current_length = src_buf->current_length;
+	  dst_buf->flags = src_buf->flags;
 
-          if (i == 0)
-            {
-              dst_buf->total_length_not_including_first_buffer = 
-                src_buf->total_length_not_including_first_buffer;
-              vnet_buffer(dst_buf)->sw_if_index[VLIB_RX] =
-                vnet_buffer(src_buf)->sw_if_index[VLIB_RX];
-              vnet_buffer(dst_buf)->sw_if_index[VLIB_TX] =
-                vnet_buffer(src_buf)->sw_if_index[VLIB_TX];
-              vnet_buffer(dst_buf)->l2 = vnet_buffer(b)->l2;
-            }
+	  if (i == 0)
+	    {
+	      dst_buf->total_length_not_including_first_buffer =
+		src_buf->total_length_not_including_first_buffer;
+	      vnet_buffer (dst_buf)->sw_if_index[VLIB_RX] =
+		vnet_buffer (src_buf)->sw_if_index[VLIB_RX];
+	      vnet_buffer (dst_buf)->sw_if_index[VLIB_TX] =
+		vnet_buffer (src_buf)->sw_if_index[VLIB_TX];
+	      vnet_buffer (dst_buf)->l2 = vnet_buffer (b)->l2;
+	    }
 
-          if (i < new_buffers_needed - 1)
-            {
-              src_buf = vlib_get_buffer (vm, src_buf->next_buffer);
-              dst_buf = vlib_buffer_from_rte_mbuf(rte_mbufs[i+1]);
-              vlib_buffer_init_for_free_list (dst_buf, fl);
-              copy_src = src_buf->data;
-              copy_dst = dst_buf->data;
-            }
-        }
+	  if (i < new_buffers_needed - 1)
+	    {
+	      src_buf = vlib_get_buffer (vm, src_buf->next_buffer);
+	      dst_buf = vlib_buffer_from_rte_mbuf (rte_mbufs[i + 1]);
+	      vlib_buffer_init_for_free_list (dst_buf, fl);
+	      copy_src = src_buf->data;
+	      copy_dst = dst_buf->data;
+	    }
+	}
       return rv;
     }
 
-  if (rte_mempool_get_bulk (rmp, (void **)rte_mbufs, 1) < 0)
+  if (rte_mempool_get_bulk (rmp, (void **) rte_mbufs, 1) < 0)
     return 0;
 
-  rv = vlib_buffer_from_rte_mbuf(rte_mbufs[0]);
+  rv = vlib_buffer_from_rte_mbuf (rte_mbufs[0]);
   vlib_buffer_init_for_free_list (rv, fl);
 
-  clib_memcpy(rv->data + b->current_data, b->data + b->current_data, 
-         b->current_length);
+  clib_memcpy (rv->data + b->current_data, b->data + b->current_data,
+	       b->current_length);
   rv->current_data = b->current_data;
   rv->current_length = b->current_length;
-  vnet_buffer(rv)->sw_if_index[VLIB_RX] =
-    vnet_buffer(b)->sw_if_index[VLIB_RX];
-  vnet_buffer(rv)->sw_if_index[VLIB_TX] =
-    vnet_buffer(b)->sw_if_index[VLIB_TX];
-  vnet_buffer(rv)->l2 = vnet_buffer(b)->l2;
+  vnet_buffer (rv)->sw_if_index[VLIB_RX] =
+    vnet_buffer (b)->sw_if_index[VLIB_RX];
+  vnet_buffer (rv)->sw_if_index[VLIB_TX] =
+    vnet_buffer (b)->sw_if_index[VLIB_TX];
+  vnet_buffer (rv)->l2 = vnet_buffer (b)->l2;
 
   return (rv);
 }
 
 
 #endif /* __included_dpdk_replication_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/global_funcs.h b/vnet/vnet/global_funcs.h
index 3958d88..92a5c04 100644
--- a/vnet/vnet/global_funcs.h
+++ b/vnet/vnet/global_funcs.h
@@ -19,6 +19,14 @@
 #ifndef included_vnet_global_funcs_h_
 #define included_vnet_global_funcs_h_
 
-vnet_main_t * vnet_get_main (void);
+vnet_main_t *vnet_get_main (void);
 
 #endif /* included_vnet_global_funcs_h_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/handoff.c b/vnet/vnet/handoff.c
index 28968c9..67fc641 100644
--- a/vnet/vnet/handoff.c
+++ b/vnet/vnet/handoff.c
@@ -19,40 +19,45 @@
 #include <vlib/threads.h>
 #include <vnet/handoff.h>
 
-typedef struct {
-  uword * workers_bitmap;
-  u32 * workers;
+typedef struct
+{
+  uword *workers_bitmap;
+  u32 *workers;
 } per_inteface_handoff_data_t;
 
-typedef struct {
+typedef struct
+{
   u32 cached_next_index;
   u32 num_workers;
   u32 first_worker_index;
 
-  per_inteface_handoff_data_t * if_data;
+  per_inteface_handoff_data_t *if_data;
 
   /* convenience variables */
-  vlib_main_t * vlib_main;
-  vnet_main_t * vnet_main;
+  vlib_main_t *vlib_main;
+  vnet_main_t *vnet_main;
 } handoff_main_t;
 
 handoff_main_t handoff_main;
 
-typedef struct {
+typedef struct
+{
   u32 sw_if_index;
   u32 next_worker_index;
   u32 buffer_index;
 } worker_handoff_trace_t;
 
 /* packet trace format function */
-static u8 * format_worker_handoff_trace (u8 * s, va_list * args)
+static u8 *
+format_worker_handoff_trace (u8 * s, va_list * args)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  worker_handoff_trace_t * t = va_arg (*args, worker_handoff_trace_t *);
+  worker_handoff_trace_t *t = va_arg (*args, worker_handoff_trace_t *);
 
-  s = format (s, "worker-handoff: sw_if_index %d, next_worker %d, buffer 0x%x",
-              t->sw_if_index, t->next_worker_index, t->buffer_index);
+  s =
+    format (s, "worker-handoff: sw_if_index %d, next_worker %d, buffer 0x%x",
+	    t->sw_if_index, t->next_worker_index, t->buffer_index);
   return s;
 }
 
@@ -60,27 +65,27 @@
 
 static uword
 worker_handoff_node_fn (vlib_main_t * vm,
-			vlib_node_runtime_t * node,
-			vlib_frame_t * frame)
+			vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  handoff_main_t * hm = &handoff_main;
-  vlib_thread_main_t * tm = vlib_get_thread_main();
-  u32 n_left_from, * from;
-  static __thread vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index;
-  static __thread vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
-  vlib_frame_queue_elt_t * hf = 0;
+  handoff_main_t *hm = &handoff_main;
+  vlib_thread_main_t *tm = vlib_get_thread_main ();
+  u32 n_left_from, *from;
+  static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index;
+  static __thread vlib_frame_queue_t **congested_handoff_queue_by_worker_index
+    = 0;
+  vlib_frame_queue_elt_t *hf = 0;
   int i;
-  u32 n_left_to_next_worker = 0, * to_next_worker = 0;
+  u32 n_left_to_next_worker = 0, *to_next_worker = 0;
   u32 next_worker_index = 0;
   u32 current_worker_index = ~0;
 
-  if (PREDICT_FALSE(handoff_queue_elt_by_worker_index == 0))
+  if (PREDICT_FALSE (handoff_queue_elt_by_worker_index == 0))
     {
       vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
 
       vec_validate_init_empty (congested_handoff_queue_by_worker_index,
-                               hm->first_worker_index + hm->num_workers - 1,
-                               (vlib_frame_queue_t *)(~0));
+			       hm->first_worker_index + hm->num_workers - 1,
+			       (vlib_frame_queue_t *) (~0));
     }
 
   from = vlib_frame_vector_args (frame);
@@ -89,11 +94,11 @@
   while (n_left_from > 0)
     {
       u32 bi0;
-      vlib_buffer_t * b0;
+      vlib_buffer_t *b0;
       u32 sw_if_index0;
       u32 hash;
       u64 hash_key;
-      per_inteface_handoff_data_t * ihd0;
+      per_inteface_handoff_data_t *ihd0;
       u32 index0;
 
       bi0 = from[0];
@@ -101,7 +106,7 @@
       n_left_from -= 1;
 
       b0 = vlib_get_buffer (vm, bi0);
-      sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
       ASSERT (hm->if_data);
       ihd0 = vec_elt_at_index (hm->if_data, sw_if_index0);
 
@@ -117,13 +122,17 @@
       hash = (u32) clib_xxhash (hash_key);
 
       /* if input node did not specify next index, then packet
-	 should go to eternet-input */
+         should go to eternet-input */
       if (PREDICT_FALSE ((b0->flags & BUFFER_HANDOFF_NEXT_VALID) == 0))
-        vnet_buffer(b0)->handoff.next_index = HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT;
-      else if (vnet_buffer(b0)->handoff.next_index == HANDOFF_DISPATCH_NEXT_IP4_INPUT ||
-	       vnet_buffer(b0)->handoff.next_index == HANDOFF_DISPATCH_NEXT_IP6_INPUT ||
-	       vnet_buffer(b0)->handoff.next_index == HANDOFF_DISPATCH_NEXT_MPLS_INPUT)
-	vlib_buffer_advance (b0, (sizeof(ethernet_header_t)));
+	vnet_buffer (b0)->handoff.next_index =
+	  HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT;
+      else if (vnet_buffer (b0)->handoff.next_index ==
+	       HANDOFF_DISPATCH_NEXT_IP4_INPUT
+	       || vnet_buffer (b0)->handoff.next_index ==
+	       HANDOFF_DISPATCH_NEXT_IP6_INPUT
+	       || vnet_buffer (b0)->handoff.next_index ==
+	       HANDOFF_DISPATCH_NEXT_MPLS_INPUT)
+	vlib_buffer_advance (b0, (sizeof (ethernet_header_t)));
 
       if (PREDICT_TRUE (is_pow2 (vec_len (ihd0->workers))))
 	index0 = hash & (vec_len (ihd0->workers) - 1);
@@ -137,8 +146,8 @@
 	  if (hf)
 	    hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
 
-	  hf = dpdk_get_handoff_queue_elt(next_worker_index,
-					  handoff_queue_elt_by_worker_index);
+	  hf = dpdk_get_handoff_queue_elt (next_worker_index,
+					   handoff_queue_elt_by_worker_index);
 
 	  n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
 	  to_next_worker = &hf->buffer_index[hf->n_vectors];
@@ -153,17 +162,17 @@
       if (n_left_to_next_worker == 0)
 	{
 	  hf->n_vectors = VLIB_FRAME_SIZE;
-	  vlib_put_handoff_queue_elt(hf);
+	  vlib_put_handoff_queue_elt (hf);
 	  current_worker_index = ~0;
 	  handoff_queue_elt_by_worker_index[next_worker_index] = 0;
 	  hf = 0;
 	}
 
-      if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
-			&& (b0->flags & VLIB_BUFFER_IS_TRACED)))
+      if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+			 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
 	{
 	  worker_handoff_trace_t *t =
-	     vlib_add_trace (vm, node, b0, sizeof (*t));
+	    vlib_add_trace (vm, node, b0, sizeof (*t));
 	  t->sw_if_index = sw_if_index0;
 	  t->next_worker_index = next_worker_index - hm->first_worker_index;
 	  t->buffer_index = bi0;
@@ -178,27 +187,29 @@
   for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
     {
       if (handoff_queue_elt_by_worker_index[i])
-        {
-          hf = handoff_queue_elt_by_worker_index[i];
-          /*
-           * It works better to let the handoff node
-           * rate-adapt, always ship the handoff queue element.
-           */
-          if (1 || hf->n_vectors == hf->last_n_vectors)
-            {
-              vlib_put_handoff_queue_elt(hf);
-              handoff_queue_elt_by_worker_index[i] = 0;
-            }
-          else
-            hf->last_n_vectors = hf->n_vectors;
-        }
-      congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
+	{
+	  hf = handoff_queue_elt_by_worker_index[i];
+	  /*
+	   * It works better to let the handoff node
+	   * rate-adapt, always ship the handoff queue element.
+	   */
+	  if (1 || hf->n_vectors == hf->last_n_vectors)
+	    {
+	      vlib_put_handoff_queue_elt (hf);
+	      handoff_queue_elt_by_worker_index[i] = 0;
+	    }
+	  else
+	    hf->last_n_vectors = hf->n_vectors;
+	}
+      congested_handoff_queue_by_worker_index[i] =
+	(vlib_frame_queue_t *) (~0);
     }
   hf = 0;
   current_worker_index = ~0;
   return frame->n_vectors;
 }
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (worker_handoff_node) = {
   .function = worker_handoff_node_fn,
   .name = "worker-handoff",
@@ -208,35 +219,34 @@
 
   .n_next_nodes = 1,
   .next_nodes = {
-        [0] = "error-drop",
+    [0] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
 VLIB_NODE_FUNCTION_MULTIARCH (worker_handoff_node, worker_handoff_node_fn)
-
-int interface_handoff_enable_disable (vlib_main_t * vm, u32 sw_if_index,
-                                      uword * bitmap, int enable_disable)
+     int interface_handoff_enable_disable (vlib_main_t * vm, u32 sw_if_index,
+					   uword * bitmap, int enable_disable)
 {
-  handoff_main_t * hm = &handoff_main;
-  vnet_sw_interface_t * sw;
-  vnet_main_t * vnm = vnet_get_main();
-  per_inteface_handoff_data_t * d;
+  handoff_main_t *hm = &handoff_main;
+  vnet_sw_interface_t *sw;
+  vnet_main_t *vnm = vnet_get_main ();
+  per_inteface_handoff_data_t *d;
   int i, rv;
   u32 node_index = enable_disable ? worker_handoff_node.index : ~0;
 
-  if (pool_is_free_index (vnm->interface_main.sw_interfaces,
-                          sw_if_index))
+  if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
     return VNET_API_ERROR_INVALID_SW_IF_INDEX;
 
   sw = vnet_get_sw_interface (vnm, sw_if_index);
   if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
     return VNET_API_ERROR_INVALID_SW_IF_INDEX;
 
-  if (clib_bitmap_last_set(bitmap) >= hm->num_workers)
+  if (clib_bitmap_last_set (bitmap) >= hm->num_workers)
     return VNET_API_ERROR_INVALID_WORKER;
 
   vec_validate (hm->if_data, sw_if_index);
-  d = vec_elt_at_index(hm->if_data, sw_if_index);
+  d = vec_elt_at_index (hm->if_data, sw_if_index);
 
   vec_free (d->workers);
   vec_free (d->workers_bitmap);
@@ -244,10 +254,12 @@
   if (enable_disable)
     {
       d->workers_bitmap = bitmap;
+      /* *INDENT-OFF* */
       clib_bitmap_foreach (i, bitmap,
 	({
 	  vec_add1(d->workers, i);
 	}));
+      /* *INDENT-ON* */
     }
 
   rv = vnet_hw_interface_rx_redirect_to_node (vnm, sw_if_index, node_index);
@@ -256,27 +268,27 @@
 
 static clib_error_t *
 set_interface_handoff_command_fn (vlib_main_t * vm,
-                                  unformat_input_t * input,
-                                  vlib_cli_command_t * cmd)
+				  unformat_input_t * input,
+				  vlib_cli_command_t * cmd)
 {
   u32 sw_if_index = ~0;
   int enable_disable = 1;
-  uword * bitmap = 0;
+  uword *bitmap = 0;
 
   int rv = 0;
 
-  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat (input, "disable"))
-      enable_disable = 0;
-    else if (unformat (input, "workers %U", unformat_bitmap_list,
-		       &bitmap))
-      ;
-    else if (unformat (input, "%U", unformat_vnet_sw_interface,
-                       vnet_get_main(), &sw_if_index))
-      ;
-    else
-      break;
-  }
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "disable"))
+	enable_disable = 0;
+      else if (unformat (input, "workers %U", unformat_bitmap_list, &bitmap))
+	;
+      else if (unformat (input, "%U", unformat_vnet_sw_interface,
+			 vnet_get_main (), &sw_if_index))
+	;
+      else
+	break;
+    }
 
   if (sw_if_index == ~0)
     return clib_error_return (0, "Please specify an interface...");
@@ -284,9 +296,12 @@
   if (bitmap == 0)
     return clib_error_return (0, "Please specify list of workers...");
 
-  rv = interface_handoff_enable_disable (vm, sw_if_index, bitmap, enable_disable);
+  rv =
+    interface_handoff_enable_disable (vm, sw_if_index, bitmap,
+				      enable_disable);
 
-  switch(rv) {
+  switch (rv)
+    {
     case 0:
       break;
 
@@ -299,39 +314,42 @@
       break;
 
     case VNET_API_ERROR_UNIMPLEMENTED:
-      return clib_error_return (0, "Device driver doesn't support redirection");
+      return clib_error_return (0,
+				"Device driver doesn't support redirection");
       break;
 
     default:
       return clib_error_return (0, "unknown return value %d", rv);
-  }
+    }
   return 0;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (set_interface_handoff_command, static) = {
-    .path = "set interface handoff",
-    .short_help =
-    "set interface handoff <interface-name> workers <workers-list>",
-    .function = set_interface_handoff_command_fn,
+  .path = "set interface handoff",
+  .short_help =
+  "set interface handoff <interface-name> workers <workers-list>",
+  .function = set_interface_handoff_command_fn,
 };
+/* *INDENT-ON* */
 
-typedef struct {
+typedef struct
+{
   u32 buffer_index;
   u32 next_index;
   u32 sw_if_index;
 } handoff_dispatch_trace_t;
 
 /* packet trace format function */
-static u8 * format_handoff_dispatch_trace (u8 * s, va_list * args)
+static u8 *
+format_handoff_dispatch_trace (u8 * s, va_list * args)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  handoff_dispatch_trace_t * t = va_arg (*args, handoff_dispatch_trace_t *);
+  handoff_dispatch_trace_t *t = va_arg (*args, handoff_dispatch_trace_t *);
 
   s = format (s, "handoff-dispatch: sw_if_index %d next_index %d buffer 0x%x",
-      t->sw_if_index,
-      t->next_index,
-      t->buffer_index);
+	      t->sw_if_index, t->next_index, t->buffer_index);
   return s;
 }
 
@@ -341,14 +359,15 @@
 #define foreach_handoff_dispatch_error \
 _(EXAMPLE, "example packets")
 
-typedef enum {
+typedef enum
+{
 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
   foreach_handoff_dispatch_error
 #undef _
-  HANDOFF_DISPATCH_N_ERROR,
+    HANDOFF_DISPATCH_N_ERROR,
 } handoff_dispatch_error_t;
 
-static char * handoff_dispatch_error_strings[] = {
+static char *handoff_dispatch_error_strings[] = {
 #define _(sym,string) string,
   foreach_handoff_dispatch_error
 #undef _
@@ -356,10 +375,9 @@
 
 static uword
 handoff_dispatch_node_fn (vlib_main_t * vm,
-		  vlib_node_runtime_t * node,
-		  vlib_frame_t * frame)
+			  vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  u32 n_left_from, * from, * to_next;
+  u32 n_left_from, *from, *to_next;
   handoff_dispatch_next_t next_index;
 
   from = vlib_frame_vector_args (frame);
@@ -370,19 +388,18 @@
     {
       u32 n_left_to_next;
 
-      vlib_get_next_frame (vm, node, next_index,
-			   to_next, n_left_to_next);
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
       while (n_left_from >= 4 && n_left_to_next >= 2)
 	{
-          u32 bi0, bi1;
-	  vlib_buffer_t * b0, * b1;
-          u32 next0, next1;
-          u32 sw_if_index0, sw_if_index1;
+	  u32 bi0, bi1;
+	  vlib_buffer_t *b0, *b1;
+	  u32 next0, next1;
+	  u32 sw_if_index0, sw_if_index1;
 
 	  /* Prefetch next iteration. */
 	  {
-	    vlib_buffer_t * p2, * p3;
+	    vlib_buffer_t *p2, *p3;
 
 	    p2 = vlib_get_buffer (vm, from[2]);
 	    p3 = vlib_get_buffer (vm, from[3]);
@@ -391,7 +408,7 @@
 	    vlib_prefetch_buffer_header (p3, LOAD);
 	  }
 
-          /* speculatively enqueue b0 and b1 to the current next frame */
+	  /* speculatively enqueue b0 and b1 to the current next frame */
 	  to_next[0] = bi0 = from[0];
 	  to_next[1] = bi1 = from[1];
 	  from += 2;
@@ -402,47 +419,49 @@
 	  b0 = vlib_get_buffer (vm, bi0);
 	  b1 = vlib_get_buffer (vm, bi1);
 
-          next0 = vnet_buffer(b0)->handoff.next_index;
-          next1 = vnet_buffer(b1)->handoff.next_index;
+	  next0 = vnet_buffer (b0)->handoff.next_index;
+	  next1 = vnet_buffer (b1)->handoff.next_index;
 
-          if (PREDICT_FALSE(vm->trace_main.trace_active_hint))
-            {
-            if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
-              {
-                vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
-                handoff_dispatch_trace_t *t =
-                  vlib_add_trace (vm, node, b0, sizeof (*t));
-                sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
-                t->sw_if_index = sw_if_index0;
-                t->next_index = next0;
-                t->buffer_index = bi0;
-              }
-            if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
-              {
-                vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */ 0);
-                handoff_dispatch_trace_t *t =
-                  vlib_add_trace (vm, node, b1, sizeof (*t));
-                sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
-                t->sw_if_index = sw_if_index1;
-                t->next_index = next1;
-                t->buffer_index = bi1;
-              }
-            }
+	  if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
+	    {
+	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+		{
+		  vlib_trace_buffer (vm, node, next0, b0,	/* follow_chain */
+				     0);
+		  handoff_dispatch_trace_t *t =
+		    vlib_add_trace (vm, node, b0, sizeof (*t));
+		  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+		  t->sw_if_index = sw_if_index0;
+		  t->next_index = next0;
+		  t->buffer_index = bi0;
+		}
+	      if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+		{
+		  vlib_trace_buffer (vm, node, next1, b1,	/* follow_chain */
+				     0);
+		  handoff_dispatch_trace_t *t =
+		    vlib_add_trace (vm, node, b1, sizeof (*t));
+		  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+		  t->sw_if_index = sw_if_index1;
+		  t->next_index = next1;
+		  t->buffer_index = bi1;
+		}
+	    }
 
-          /* verify speculative enqueues, maybe switch current next frame */
-          vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
-                                           to_next, n_left_to_next,
-                                           bi0, bi1, next0, next1);
-        }
+	  /* verify speculative enqueues, maybe switch current next frame */
+	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+					   to_next, n_left_to_next,
+					   bi0, bi1, next0, next1);
+	}
 
       while (n_left_from > 0 && n_left_to_next > 0)
 	{
-          u32 bi0;
-	  vlib_buffer_t * b0;
-          u32 next0;
-          u32 sw_if_index0;
+	  u32 bi0;
+	  vlib_buffer_t *b0;
+	  u32 next0;
+	  u32 sw_if_index0;
 
-          /* speculatively enqueue b0 to the current next frame */
+	  /* speculatively enqueue b0 to the current next frame */
 	  bi0 = from[0];
 	  to_next[0] = bi0;
 	  from += 1;
@@ -452,23 +471,24 @@
 
 	  b0 = vlib_get_buffer (vm, bi0);
 
-          next0 = vnet_buffer(b0)->handoff.next_index;
+	  next0 = vnet_buffer (b0)->handoff.next_index;
 
-          if (PREDICT_FALSE(vm->trace_main.trace_active_hint))
-            {
-            if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
-              {
-                vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
-                handoff_dispatch_trace_t *t =
-                  vlib_add_trace (vm, node, b0, sizeof (*t));
-                sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
-                t->sw_if_index = sw_if_index0;
-                t->next_index = next0;
-                t->buffer_index = bi0;
-              }
-            }
+	  if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
+	    {
+	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+		{
+		  vlib_trace_buffer (vm, node, next0, b0,	/* follow_chain */
+				     0);
+		  handoff_dispatch_trace_t *t =
+		    vlib_add_trace (vm, node, b0, sizeof (*t));
+		  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+		  t->sw_if_index = sw_if_index0;
+		  t->next_index = next0;
+		  t->buffer_index = bi0;
+		}
+	    }
 
-          /* verify speculative enqueue, maybe switch current next frame */
+	  /* verify speculative enqueue, maybe switch current next frame */
 	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
 					   to_next, n_left_to_next,
 					   bi0, next0);
@@ -480,6 +500,7 @@
   return frame->n_vectors;
 }
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (handoff_dispatch_node) = {
   .function = handoff_dispatch_node_fn,
   .name = "handoff-dispatch",
@@ -494,37 +515,37 @@
   .n_next_nodes = HANDOFF_DISPATCH_N_NEXT,
 
   .next_nodes = {
-        [HANDOFF_DISPATCH_NEXT_DROP] = "error-drop",
-        [HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT] = "ethernet-input",
-        [HANDOFF_DISPATCH_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
-        [HANDOFF_DISPATCH_NEXT_IP6_INPUT] = "ip6-input",
-        [HANDOFF_DISPATCH_NEXT_MPLS_INPUT] = "mpls-gre-input",
+    [HANDOFF_DISPATCH_NEXT_DROP] = "error-drop",
+    [HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT] = "ethernet-input",
+    [HANDOFF_DISPATCH_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
+    [HANDOFF_DISPATCH_NEXT_IP6_INPUT] = "ip6-input",
+    [HANDOFF_DISPATCH_NEXT_MPLS_INPUT] = "mpls-gre-input",
   },
 };
+/* *INDENT-ON* */
 
 VLIB_NODE_FUNCTION_MULTIARCH (handoff_dispatch_node, handoff_dispatch_node_fn)
-
-clib_error_t *handoff_init (vlib_main_t *vm)
+     clib_error_t *handoff_init (vlib_main_t * vm)
 {
-  handoff_main_t * hm = &handoff_main;
-  vlib_thread_main_t * tm = vlib_get_thread_main();
-  clib_error_t * error;
-  uword * p;
+  handoff_main_t *hm = &handoff_main;
+  vlib_thread_main_t *tm = vlib_get_thread_main ();
+  clib_error_t *error;
+  uword *p;
 
   if ((error = vlib_call_init_function (vm, threads_init)))
     return error;
 
-  vlib_thread_registration_t * tr;
+  vlib_thread_registration_t *tr;
   /* Only the standard vnet worker threads are supported */
   p = hash_get_mem (tm->thread_registrations_by_name, "workers");
   if (p)
     {
       tr = (vlib_thread_registration_t *) p[0];
       if (tr)
-        {
-          hm->num_workers = tr->count;
-          hm->first_worker_index = tr->first_index;
-        }
+	{
+	  hm->num_workers = tr->count;
+	  hm->first_worker_index = tr->first_index;
+	}
     }
 
   hm->vlib_main = vm;
@@ -537,3 +558,11 @@
 }
 
 VLIB_INIT_FUNCTION (handoff_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/handoff.h b/vnet/vnet/handoff.h
index e0938eb..0083263 100644
--- a/vnet/vnet/handoff.h
+++ b/vnet/vnet/handoff.h
@@ -22,7 +22,8 @@
 #include <vnet/ip/ip6_packet.h>
 #include <vnet/mpls-gre/packet.h>
 
-typedef enum {
+typedef enum
+{
   HANDOFF_DISPATCH_NEXT_IP4_INPUT,
   HANDOFF_DISPATCH_NEXT_IP6_INPUT,
   HANDOFF_DISPATCH_NEXT_MPLS_INPUT,
@@ -31,10 +32,10 @@
   HANDOFF_DISPATCH_N_NEXT,
 } handoff_dispatch_next_t;
 
-static inline
-void vlib_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
+static inline void
+vlib_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
 {
-  CLIB_MEMORY_BARRIER();
+  CLIB_MEMORY_BARRIER ();
   hf->valid = 1;
 }
 
@@ -52,9 +53,9 @@
 
   /* Wait until a ring slot is available */
   while (new_tail >= fq->head_hint + fq->nelts)
-      vlib_worker_thread_barrier_check ();
+    vlib_worker_thread_barrier_check ();
 
-  elt = fq->elts + (new_tail & (fq->nelts-1));
+  elt = fq->elts + (new_tail & (fq->nelts - 1));
 
   /* this would be very bad... */
   while (elt->valid)
@@ -67,28 +68,29 @@
 }
 
 static inline vlib_frame_queue_t *
-is_vlib_handoff_queue_congested (
-    u32 vlib_worker_index,
-    u32 queue_hi_thresh,
-    vlib_frame_queue_t ** handoff_queue_by_worker_index)
+is_vlib_handoff_queue_congested (u32 vlib_worker_index,
+				 u32 queue_hi_thresh,
+				 vlib_frame_queue_t **
+				 handoff_queue_by_worker_index)
 {
   vlib_frame_queue_t *fq;
 
-  fq = handoff_queue_by_worker_index [vlib_worker_index];
-  if (fq != (vlib_frame_queue_t *)(~0))
-      return fq;
+  fq = handoff_queue_by_worker_index[vlib_worker_index];
+  if (fq != (vlib_frame_queue_t *) (~0))
+    return fq;
 
   fq = vlib_frame_queues[vlib_worker_index];
   ASSERT (fq);
 
-  if (PREDICT_FALSE(fq->tail >= (fq->head_hint + queue_hi_thresh))) {
-    /* a valid entry in the array will indicate the queue has reached
-     * the specified threshold and is congested
-     */
-    handoff_queue_by_worker_index [vlib_worker_index] = fq;
-    fq->enqueue_full_events++;
-    return fq;
-  }
+  if (PREDICT_FALSE (fq->tail >= (fq->head_hint + queue_hi_thresh)))
+    {
+      /* a valid entry in the array will indicate the queue has reached
+       * the specified threshold and is congested
+       */
+      handoff_queue_by_worker_index[vlib_worker_index] = fq;
+      fq->enqueue_full_events++;
+      return fq;
+    }
 
   return NULL;
 }
@@ -96,133 +98,175 @@
 static inline vlib_frame_queue_elt_t *
 dpdk_get_handoff_queue_elt (u32 vlib_worker_index,
 			    vlib_frame_queue_elt_t **
-			      handoff_queue_elt_by_worker_index)
+			    handoff_queue_elt_by_worker_index)
 {
   vlib_frame_queue_elt_t *elt;
 
-  if (handoff_queue_elt_by_worker_index [vlib_worker_index])
-      return handoff_queue_elt_by_worker_index [vlib_worker_index];
+  if (handoff_queue_elt_by_worker_index[vlib_worker_index])
+    return handoff_queue_elt_by_worker_index[vlib_worker_index];
 
   elt = vlib_get_handoff_queue_elt (vlib_worker_index);
 
-  handoff_queue_elt_by_worker_index [vlib_worker_index] = elt;
+  handoff_queue_elt_by_worker_index[vlib_worker_index] = elt;
 
   return elt;
 }
 
-static inline u64 ipv4_get_key (ip4_header_t *ip)
+static inline u64
+ipv4_get_key (ip4_header_t * ip)
 {
-   u64  hash_key;
+  u64 hash_key;
 
-   hash_key = *((u64*)(&ip->address_pair)) ^ ip->protocol;
+  hash_key = *((u64 *) (&ip->address_pair)) ^ ip->protocol;
 
-   return hash_key;
+  return hash_key;
 }
 
-static inline u64 ipv6_get_key (ip6_header_t *ip)
+static inline u64
+ipv6_get_key (ip6_header_t * ip)
 {
-   u64  hash_key;
+  u64 hash_key;
 
-   hash_key = ip->src_address.as_u64[0] ^
-              rotate_left(ip->src_address.as_u64[1],13) ^
-              rotate_left(ip->dst_address.as_u64[0],26) ^
-              rotate_left(ip->dst_address.as_u64[1],39) ^
-              ip->protocol;
+  hash_key = ip->src_address.as_u64[0] ^
+    rotate_left (ip->src_address.as_u64[1], 13) ^
+    rotate_left (ip->dst_address.as_u64[0], 26) ^
+    rotate_left (ip->dst_address.as_u64[1], 39) ^ ip->protocol;
 
-   return hash_key;
+  return hash_key;
 }
 
 #define MPLS_BOTTOM_OF_STACK_BIT_MASK   0x00000100U
 #define MPLS_LABEL_MASK                 0xFFFFF000U
 
-static inline u64 mpls_get_key (mpls_unicast_header_t *m)
+static inline u64
+mpls_get_key (mpls_unicast_header_t * m)
 {
-   u64                     hash_key;
-   u8                      ip_ver;
+  u64 hash_key;
+  u8 ip_ver;
 
 
-   /* find the bottom of the MPLS label stack. */
-   if (PREDICT_TRUE(m->label_exp_s_ttl &
-                    clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
-       goto bottom_lbl_found;
-   }
-   m++;
+  /* find the bottom of the MPLS label stack. */
+  if (PREDICT_TRUE (m->label_exp_s_ttl &
+		    clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
+    {
+      goto bottom_lbl_found;
+    }
+  m++;
 
-   if (PREDICT_TRUE(m->label_exp_s_ttl &
-                    clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
-       goto bottom_lbl_found;
-   }
-   m++;
+  if (PREDICT_TRUE (m->label_exp_s_ttl &
+		    clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK)))
+    {
+      goto bottom_lbl_found;
+    }
+  m++;
 
-   if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
-       goto bottom_lbl_found;
-   }
-   m++;
+  if (m->label_exp_s_ttl &
+      clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
+    {
+      goto bottom_lbl_found;
+    }
+  m++;
 
-   if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
-       goto bottom_lbl_found;
-   }
-   m++;
+  if (m->label_exp_s_ttl &
+      clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
+    {
+      goto bottom_lbl_found;
+    }
+  m++;
 
-   if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
-       goto bottom_lbl_found;
-   }
+  if (m->label_exp_s_ttl &
+      clib_net_to_host_u32 (MPLS_BOTTOM_OF_STACK_BIT_MASK))
+    {
+      goto bottom_lbl_found;
+    }
 
-   /* the bottom label was not found - use the last label */
-   hash_key = m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
+  /* the bottom label was not found - use the last label */
+  hash_key = m->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);
 
-   return hash_key;
+  return hash_key;
 
 bottom_lbl_found:
-   m++;
-   ip_ver = (*((u8 *)m) >> 4);
+  m++;
+  ip_ver = (*((u8 *) m) >> 4);
 
-   /* find out if it is IPV4 or IPV6 header */
-   if (PREDICT_TRUE(ip_ver == 4)) {
-       hash_key = ipv4_get_key((ip4_header_t *)m);
-   } else if (PREDICT_TRUE(ip_ver == 6)) {
-       hash_key = ipv6_get_key((ip6_header_t *)m);
-   } else {
-       /* use the bottom label */
-       hash_key = (m-1)->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
-   }
+  /* find out if it is IPV4 or IPV6 header */
+  if (PREDICT_TRUE (ip_ver == 4))
+    {
+      hash_key = ipv4_get_key ((ip4_header_t *) m);
+    }
+  else if (PREDICT_TRUE (ip_ver == 6))
+    {
+      hash_key = ipv6_get_key ((ip6_header_t *) m);
+    }
+  else
+    {
+      /* use the bottom label */
+      hash_key =
+	(m - 1)->label_exp_s_ttl & clib_net_to_host_u32 (MPLS_LABEL_MASK);
+    }
 
-   return hash_key;
+  return hash_key;
 
 }
 
 
 static inline u64
-eth_get_key (ethernet_header_t *h0)
+eth_get_key (ethernet_header_t * h0)
 {
-   u64 hash_key;
+  u64 hash_key;
 
-   if (PREDICT_TRUE(h0->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
-       hash_key = ipv4_get_key((ip4_header_t *)(h0+1));
-   } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6)) {
-       hash_key = ipv6_get_key((ip6_header_t *)(h0+1));
-   } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
-       hash_key = mpls_get_key((mpls_unicast_header_t *)(h0+1));
-   } else if ((h0->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ||
-              (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_DOT1AD))) {
-       ethernet_vlan_header_t * outer = (ethernet_vlan_header_t *)(h0 + 1);
+  if (PREDICT_TRUE (h0->type) == clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
+    {
+      hash_key = ipv4_get_key ((ip4_header_t *) (h0 + 1));
+    }
+  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
+    {
+      hash_key = ipv6_get_key ((ip6_header_t *) (h0 + 1));
+    }
+  else if (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST))
+    {
+      hash_key = mpls_get_key ((mpls_unicast_header_t *) (h0 + 1));
+    }
+  else if ((h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ||
+	   (h0->type == clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD)))
+    {
+      ethernet_vlan_header_t *outer = (ethernet_vlan_header_t *) (h0 + 1);
 
-       outer = (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ?
-                                  outer+1 : outer;
-       if (PREDICT_TRUE(outer->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
-           hash_key = ipv4_get_key((ip4_header_t *)(outer+1));
-       } else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)) {
-           hash_key = ipv6_get_key((ip6_header_t *)(outer+1));
-       } else if (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
-           hash_key = mpls_get_key((mpls_unicast_header_t *)(outer+1));
-       }  else {
-           hash_key = outer->type;
-       }
-   } else {
-       hash_key  = 0;
-   }
+      outer = (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_VLAN)) ?
+	outer + 1 : outer;
+      if (PREDICT_TRUE (outer->type) ==
+	  clib_host_to_net_u16 (ETHERNET_TYPE_IP4))
+	{
+	  hash_key = ipv4_get_key ((ip4_header_t *) (outer + 1));
+	}
+      else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))
+	{
+	  hash_key = ipv6_get_key ((ip6_header_t *) (outer + 1));
+	}
+      else if (outer->type ==
+	       clib_host_to_net_u16 (ETHERNET_TYPE_MPLS_UNICAST))
+	{
+	  hash_key = mpls_get_key ((mpls_unicast_header_t *) (outer + 1));
+	}
+      else
+	{
+	  hash_key = outer->type;
+	}
+    }
+  else
+    {
+      hash_key = 0;
+    }
 
-   return hash_key;
+  return hash_key;
 }
 
 #endif /* included_vnet_handoff_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/interface.c b/vnet/vnet/interface.c
index 24f9cbc..3a12085 100644
--- a/vnet/vnet/interface.c
+++ b/vnet/vnet/interface.c
@@ -43,18 +43,23 @@
 #define VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE (1 << 0)
 #define VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE (1 << 1)
 
-static clib_error_t *
-vnet_hw_interface_set_flags_helper (vnet_main_t * vnm, u32 hw_if_index, u32 flags,
-				    u32 helper_flags);
+static clib_error_t *vnet_hw_interface_set_flags_helper (vnet_main_t * vnm,
+							 u32 hw_if_index,
+							 u32 flags,
+							 u32 helper_flags);
 
-static clib_error_t *
-vnet_sw_interface_set_flags_helper (vnet_main_t * vnm, u32 sw_if_index, u32 flags,
-				    u32 helper_flags);
+static clib_error_t *vnet_sw_interface_set_flags_helper (vnet_main_t * vnm,
+							 u32 sw_if_index,
+							 u32 flags,
+							 u32 helper_flags);
 
-static clib_error_t *
-vnet_hw_interface_set_class_helper (vnet_main_t * vnm, u32 hw_if_index, u32 hw_class_index, u32 redistribute);
+static clib_error_t *vnet_hw_interface_set_class_helper (vnet_main_t * vnm,
+							 u32 hw_if_index,
+							 u32 hw_class_index,
+							 u32 redistribute);
 
-typedef struct {
+typedef struct
+{
   /* Either sw or hw interface index. */
   u32 sw_hw_if_index;
 
@@ -62,35 +67,47 @@
   u32 flags;
 } vnet_sw_hw_interface_state_t;
 
-static void serialize_vec_vnet_sw_hw_interface_state (serialize_main_t * m, va_list * va)
+static void
+serialize_vec_vnet_sw_hw_interface_state (serialize_main_t * m, va_list * va)
 {
-    vnet_sw_hw_interface_state_t * s = va_arg (*va, vnet_sw_hw_interface_state_t *);
-    u32 n = va_arg (*va, u32);
-    u32 i;
-    for (i = 0; i < n; i++) {
-        serialize_integer (m, s[i].sw_hw_if_index, sizeof (s[i].sw_hw_if_index));
-        serialize_integer (m, s[i].flags, sizeof (s[i].flags));
+  vnet_sw_hw_interface_state_t *s =
+    va_arg (*va, vnet_sw_hw_interface_state_t *);
+  u32 n = va_arg (*va, u32);
+  u32 i;
+  for (i = 0; i < n; i++)
+    {
+      serialize_integer (m, s[i].sw_hw_if_index,
+			 sizeof (s[i].sw_hw_if_index));
+      serialize_integer (m, s[i].flags, sizeof (s[i].flags));
     }
 }
 
-static void unserialize_vec_vnet_sw_hw_interface_state (serialize_main_t * m, va_list * va)
+static void
+unserialize_vec_vnet_sw_hw_interface_state (serialize_main_t * m,
+					    va_list * va)
 {
-    vnet_sw_hw_interface_state_t * s = va_arg (*va, vnet_sw_hw_interface_state_t *);
-    u32 n = va_arg (*va, u32);
-    u32 i;
-    for (i = 0; i < n; i++) {
-        unserialize_integer (m, &s[i].sw_hw_if_index, sizeof (s[i].sw_hw_if_index));
-        unserialize_integer (m, &s[i].flags, sizeof (s[i].flags));
+  vnet_sw_hw_interface_state_t *s =
+    va_arg (*va, vnet_sw_hw_interface_state_t *);
+  u32 n = va_arg (*va, u32);
+  u32 i;
+  for (i = 0; i < n; i++)
+    {
+      unserialize_integer (m, &s[i].sw_hw_if_index,
+			   sizeof (s[i].sw_hw_if_index));
+      unserialize_integer (m, &s[i].flags, sizeof (s[i].flags));
     }
 }
 
-static void serialize_vnet_sw_hw_interface_set_flags (serialize_main_t * m, va_list * va)
+static void
+serialize_vnet_sw_hw_interface_set_flags (serialize_main_t * m, va_list * va)
 {
-  vnet_sw_hw_interface_state_t * s = va_arg (*va, vnet_sw_hw_interface_state_t *);
+  vnet_sw_hw_interface_state_t *s =
+    va_arg (*va, vnet_sw_hw_interface_state_t *);
   serialize (m, serialize_vec_vnet_sw_hw_interface_state, s, 1);
 }
 
-static void unserialize_vnet_sw_interface_set_flags (serialize_main_t * m, va_list * va)
+static void
+unserialize_vnet_sw_interface_set_flags (serialize_main_t * m, va_list * va)
 {
   CLIB_UNUSED (mc_main_t * mc) = va_arg (*va, mc_main_t *);
   vnet_sw_hw_interface_state_t s;
@@ -98,11 +115,12 @@
   unserialize (m, unserialize_vec_vnet_sw_hw_interface_state, &s, 1);
 
   vnet_sw_interface_set_flags_helper
-    (vnet_get_main(), s.sw_hw_if_index, s.flags,
+    (vnet_get_main (), s.sw_hw_if_index, s.flags,
      /* helper_flags no redistribution */ 0);
 }
 
-static void unserialize_vnet_hw_interface_set_flags (serialize_main_t * m, va_list * va)
+static void
+unserialize_vnet_hw_interface_set_flags (serialize_main_t * m, va_list * va)
 {
   CLIB_UNUSED (mc_main_t * mc) = va_arg (*va, mc_main_t *);
   vnet_sw_hw_interface_state_t s;
@@ -110,38 +128,42 @@
   unserialize (m, unserialize_vec_vnet_sw_hw_interface_state, &s, 1);
 
   vnet_hw_interface_set_flags_helper
-    (vnet_get_main(), s.sw_hw_if_index, s.flags,
+    (vnet_get_main (), s.sw_hw_if_index, s.flags,
      /* helper_flags no redistribution */ 0);
 }
 
-MC_SERIALIZE_MSG (vnet_sw_interface_set_flags_msg, static) = {
-  .name = "vnet_sw_interface_set_flags",
-  .serialize = serialize_vnet_sw_hw_interface_set_flags,
-  .unserialize = unserialize_vnet_sw_interface_set_flags,
-};
-
-MC_SERIALIZE_MSG (vnet_hw_interface_set_flags_msg, static) = {
-  .name = "vnet_hw_interface_set_flags",
-  .serialize = serialize_vnet_sw_hw_interface_set_flags,
-  .unserialize = unserialize_vnet_hw_interface_set_flags,
-};
-
-void serialize_vnet_interface_state (serialize_main_t * m, va_list * va)
+MC_SERIALIZE_MSG (vnet_sw_interface_set_flags_msg, static) =
 {
-  vnet_main_t * vnm = va_arg (*va, vnet_main_t *);
-  vnet_sw_hw_interface_state_t * sts = 0, * st;
-  vnet_sw_interface_t * sif;
-  vnet_hw_interface_t * hif;
-  vnet_interface_main_t * im = &vnm->interface_main;
+.name = "vnet_sw_interface_set_flags",.serialize =
+    serialize_vnet_sw_hw_interface_set_flags,.unserialize =
+    unserialize_vnet_sw_interface_set_flags,};
+
+MC_SERIALIZE_MSG (vnet_hw_interface_set_flags_msg, static) =
+{
+.name = "vnet_hw_interface_set_flags",.serialize =
+    serialize_vnet_sw_hw_interface_set_flags,.unserialize =
+    unserialize_vnet_hw_interface_set_flags,};
+
+void
+serialize_vnet_interface_state (serialize_main_t * m, va_list * va)
+{
+  vnet_main_t *vnm = va_arg (*va, vnet_main_t *);
+  vnet_sw_hw_interface_state_t *sts = 0, *st;
+  vnet_sw_interface_t *sif;
+  vnet_hw_interface_t *hif;
+  vnet_interface_main_t *im = &vnm->interface_main;
 
   /* Serialize hardware interface classes since they may have changed.
      Must do this before sending up/down flags. */
+  /* *INDENT-OFF* */
   pool_foreach (hif, im->hw_interfaces, ({
     vnet_hw_interface_class_t * hw_class = vnet_get_hw_interface_class (vnm, hif->hw_class_index);
     serialize_cstring (m, hw_class->name);
   }));
+  /* *INDENT-ON* */
 
   /* Send sw/hw interface state when non-zero. */
+  /* *INDENT-OFF* */
   pool_foreach (sif, im->sw_interfaces, ({
     if (sif->flags != 0)
       {
@@ -150,12 +172,14 @@
 	st->flags = sif->flags;
       }
   }));
+  /* *INDENT-ON* */
 
   vec_serialize (m, sts, serialize_vec_vnet_sw_hw_interface_state);
 
   if (sts)
     _vec_len (sts) = 0;
 
+  /* *INDENT-OFF* */
   pool_foreach (hif, im->hw_interfaces, ({
     if (hif->flags != 0)
       {
@@ -164,25 +188,28 @@
 	st->flags = hif->flags;
       }
   }));
+  /* *INDENT-ON* */
 
   vec_serialize (m, sts, serialize_vec_vnet_sw_hw_interface_state);
 
   vec_free (sts);
 }
 
-void unserialize_vnet_interface_state (serialize_main_t * m, va_list * va)
+void
+unserialize_vnet_interface_state (serialize_main_t * m, va_list * va)
 {
-  vnet_main_t * vnm = va_arg (*va, vnet_main_t *);
-  vnet_sw_hw_interface_state_t * sts = 0, * st;
+  vnet_main_t *vnm = va_arg (*va, vnet_main_t *);
+  vnet_sw_hw_interface_state_t *sts = 0, *st;
 
   /* First set interface hardware class. */
   {
-    vnet_interface_main_t * im = &vnm->interface_main;
-    vnet_hw_interface_t * hif;
-    char * class_name;
-    uword * p;
-    clib_error_t * error;
+    vnet_interface_main_t *im = &vnm->interface_main;
+    vnet_hw_interface_t *hif;
+    char *class_name;
+    uword *p;
+    clib_error_t *error;
 
+    /* *INDENT-OFF* */
     pool_foreach (hif, im->hw_interfaces, ({
       unserialize_cstring (m, &class_name);
       p = hash_get_mem (im->hw_interface_class_by_name, class_name);
@@ -192,6 +219,7 @@
 	clib_error_report (error);
       vec_free (class_name);
     }));
+    /* *INDENT-ON* */
   }
 
   vec_unserialize (m, &sts, unserialize_vec_vnet_sw_hw_interface_state);
@@ -208,48 +236,56 @@
 }
 
 static clib_error_t *
-call_elf_section_interface_callbacks (vnet_main_t * vnm, u32 if_index, 
-                                      u32 flags, 
-                                      _vnet_interface_function_list_elt_t *elt)
+call_elf_section_interface_callbacks (vnet_main_t * vnm, u32 if_index,
+				      u32 flags,
+				      _vnet_interface_function_list_elt_t *
+				      elt)
 {
-  clib_error_t * error = 0;
+  clib_error_t *error = 0;
 
   while (elt)
     {
-      error = elt->fp(vnm, if_index, flags);
+      error = elt->fp (vnm, if_index, flags);
       if (error)
-        return error;
+	return error;
       elt = elt->next_interface_function;
     }
   return error;
 }
 
 static clib_error_t *
-call_hw_interface_add_del_callbacks (vnet_main_t * vnm, u32 hw_if_index, u32 is_create)
+call_hw_interface_add_del_callbacks (vnet_main_t * vnm, u32 hw_if_index,
+				     u32 is_create)
 {
-  vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
-  vnet_hw_interface_class_t * hw_class = vnet_get_hw_interface_class (vnm, hi->hw_class_index);
-  vnet_device_class_t * dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
-  clib_error_t * error = 0;
+  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+  vnet_hw_interface_class_t *hw_class =
+    vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+  vnet_device_class_t *dev_class =
+    vnet_get_device_class (vnm, hi->dev_class_index);
+  clib_error_t *error = 0;
 
   if (hw_class->interface_add_del_function
-      && (error = hw_class->interface_add_del_function (vnm, hw_if_index, is_create)))
+      && (error =
+	  hw_class->interface_add_del_function (vnm, hw_if_index, is_create)))
     return error;
 
   if (dev_class->interface_add_del_function
-      && (error = dev_class->interface_add_del_function (vnm, hw_if_index, is_create)))
+      && (error =
+	  dev_class->interface_add_del_function (vnm, hw_if_index,
+						 is_create)))
     return error;
 
-  error = call_elf_section_interface_callbacks 
+  error = call_elf_section_interface_callbacks
     (vnm, hw_if_index, is_create, vnm->hw_interface_add_del_functions);
 
   return error;
 }
 
 static clib_error_t *
-call_sw_interface_add_del_callbacks (vnet_main_t * vnm, u32 sw_if_index, u32 is_create)
+call_sw_interface_add_del_callbacks (vnet_main_t * vnm, u32 sw_if_index,
+				     u32 is_create)
 {
-  return call_elf_section_interface_callbacks 
+  return call_elf_section_interface_callbacks
     (vnm, sw_if_index, is_create, vnm->sw_interface_add_del_functions);
 }
 
@@ -257,19 +293,23 @@
 #define VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE (1 << 1)
 
 static clib_error_t *
-vnet_hw_interface_set_flags_helper (vnet_main_t * vnm, u32 hw_if_index, u32 flags,
-				    u32 helper_flags)
+vnet_hw_interface_set_flags_helper (vnet_main_t * vnm, u32 hw_if_index,
+				    u32 flags, u32 helper_flags)
 {
-  vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
-  vnet_hw_interface_class_t * hw_class = vnet_get_hw_interface_class (vnm, hi->hw_class_index);
-  vnet_device_class_t * dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
-  vlib_main_t * vm = vnm->vlib_main;
+  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+  vnet_hw_interface_class_t *hw_class =
+    vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+  vnet_device_class_t *dev_class =
+    vnet_get_device_class (vnm, hi->dev_class_index);
+  vlib_main_t *vm = vnm->vlib_main;
   u32 mask;
-  clib_error_t * error = 0;
-  u32 is_create = (helper_flags & VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE) != 0;
+  clib_error_t *error = 0;
+  u32 is_create =
+    (helper_flags & VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE) != 0;
 
-  mask = (VNET_HW_INTERFACE_FLAG_LINK_UP | VNET_HW_INTERFACE_FLAG_DUPLEX_MASK |
-	  VNET_HW_INTERFACE_FLAG_SPEED_MASK);
+  mask =
+    (VNET_HW_INTERFACE_FLAG_LINK_UP | VNET_HW_INTERFACE_FLAG_DUPLEX_MASK |
+     VNET_HW_INTERFACE_FLAG_SPEED_MASK);
   flags &= mask;
 
   /* Call hardware interface add/del callbacks. */
@@ -277,12 +317,12 @@
     call_hw_interface_add_del_callbacks (vnm, hw_if_index, is_create);
 
   /* Already in the desired state? */
-  if (! is_create && (hi->flags & mask) == flags)
+  if (!is_create && (hi->flags & mask) == flags)
     goto done;
 
   /* Some interface classes do not redistribute (e.g. are local). */
-  if (! dev_class->redistribute)
-    helper_flags &= ~ VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE;
+  if (!dev_class->redistribute)
+    helper_flags &= ~VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE;
 
   if (vm->mc_main
       && (helper_flags & VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE))
@@ -302,9 +342,10 @@
 						       flags)))
 	goto done;
 
-      error = call_elf_section_interface_callbacks 
-	(vnm, hw_if_index, is_create, vnm->hw_interface_link_up_down_functions);
-      
+      error = call_elf_section_interface_callbacks
+	(vnm, hw_if_index, is_create,
+	 vnm->hw_interface_link_up_down_functions);
+
       if (error)
 	goto done;
     }
@@ -312,19 +353,20 @@
   hi->flags &= ~mask;
   hi->flags |= flags;
 
- done:
+done:
   return error;
 }
 
 static clib_error_t *
-vnet_sw_interface_set_flags_helper (vnet_main_t * vnm, u32 sw_if_index, u32 flags,
-				    u32 helper_flags)
+vnet_sw_interface_set_flags_helper (vnet_main_t * vnm, u32 sw_if_index,
+				    u32 flags, u32 helper_flags)
 {
-  vnet_sw_interface_t * si = vnet_get_sw_interface (vnm, sw_if_index);
-  vlib_main_t * vm = vnm->vlib_main;
+  vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
+  vlib_main_t *vm = vnm->vlib_main;
   u32 mask;
-  clib_error_t * error = 0;
-  u32 is_create = (helper_flags & VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE) != 0;
+  clib_error_t *error = 0;
+  u32 is_create =
+    (helper_flags & VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE) != 0;
   u32 old_flags;
 
   mask = VNET_SW_INTERFACE_FLAG_ADMIN_UP | VNET_SW_INTERFACE_FLAG_PUNT;
@@ -332,22 +374,25 @@
 
   if (is_create)
     {
-      error = call_sw_interface_add_del_callbacks (vnm, sw_if_index, is_create);
+      error =
+	call_sw_interface_add_del_callbacks (vnm, sw_if_index, is_create);
       if (error)
 	goto done;
 
       if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
-        {
-          /* Notify everyone when the interface is created as admin up */
-          error = call_elf_section_interface_callbacks (vnm, sw_if_index,
-                      flags, vnm->sw_interface_admin_up_down_functions);
-          if (error)
-            goto done;
-        }
+	{
+	  /* Notify everyone when the interface is created as admin up */
+	  error = call_elf_section_interface_callbacks (vnm, sw_if_index,
+							flags,
+							vnm->
+							sw_interface_admin_up_down_functions);
+	  if (error)
+	    goto done;
+	}
     }
   else
     {
-      vnet_sw_interface_t * si_sup = si;
+      vnet_sw_interface_t *si_sup = si;
 
       /* Check that super interface is in correct state. */
       if (si->type == VNET_SW_INTERFACE_TYPE_SUB)
@@ -355,41 +400,49 @@
 	  si_sup = vnet_get_sw_interface (vnm, si->sup_sw_if_index);
 
 	  /* Check to see if we're bringing down the soft interface and if it's parent is up */
-	  if ((flags != (si_sup->flags & mask)) && 
-		  (!((flags == 0) && ((si_sup->flags & mask) == VNET_SW_INTERFACE_FLAG_ADMIN_UP))))
+	  if ((flags != (si_sup->flags & mask)) &&
+	      (!((flags == 0)
+		 && ((si_sup->flags & mask) ==
+		     VNET_SW_INTERFACE_FLAG_ADMIN_UP))))
 	    {
 	      error = clib_error_return (0, "super-interface %U must be %U",
-					 format_vnet_sw_interface_name, vnm, si_sup,
-					 format_vnet_sw_interface_flags, flags);
+					 format_vnet_sw_interface_name, vnm,
+					 si_sup,
+					 format_vnet_sw_interface_flags,
+					 flags);
 	      goto done;
 	    }
 	}
 
       /* Donot change state for slave link of bonded interfaces */
       if (si->flags & VNET_SW_INTERFACE_FLAG_BOND_SLAVE)
-        {
-	  error = clib_error_return 
-	      (0, "not allowed as %U belong to a BondEthernet interface",
-	       format_vnet_sw_interface_name, vnm, si);
+	{
+	  error = clib_error_return
+	    (0, "not allowed as %U belong to a BondEthernet interface",
+	     format_vnet_sw_interface_name, vnm, si);
 	  goto done;
-        }
+	}
 
       /* Already in the desired state? */
       if ((si->flags & mask) == flags)
 	goto done;
 
       /* Sub-interfaces of hardware interfaces that do no redistribute,
-	 do not redistribute themselves. */
+         do not redistribute themselves. */
       if (si_sup->type == VNET_SW_INTERFACE_TYPE_HARDWARE)
 	{
-	  vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, si_sup->hw_if_index);
-	  vnet_device_class_t * dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
-	  if (! dev_class->redistribute)
-	    helper_flags &= ~ VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE;
+	  vnet_hw_interface_t *hi =
+	    vnet_get_hw_interface (vnm, si_sup->hw_if_index);
+	  vnet_device_class_t *dev_class =
+	    vnet_get_device_class (vnm, hi->dev_class_index);
+	  if (!dev_class->redistribute)
+	    helper_flags &=
+	      ~VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE;
 	}
 
       if (vm->mc_main
-	  && (helper_flags & VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE))
+	  && (helper_flags &
+	      VNET_INTERFACE_SET_FLAGS_HELPER_WANT_REDISTRIBUTE))
 	{
 	  vnet_sw_hw_interface_state_t s;
 	  s.sw_hw_if_index = sw_if_index;
@@ -397,50 +450,54 @@
 	  mc_serialize (vm->mc_main, &vnet_sw_interface_set_flags_msg, &s);
 	}
 
-      error = call_elf_section_interface_callbacks 
-        (vnm, sw_if_index, flags, vnm->sw_interface_admin_up_down_functions);
+      error = call_elf_section_interface_callbacks
+	(vnm, sw_if_index, flags, vnm->sw_interface_admin_up_down_functions);
 
       if (error)
-        goto done;
+	goto done;
 
       if (si->type == VNET_SW_INTERFACE_TYPE_HARDWARE)
 	{
-	  vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, si->hw_if_index);
-	  vnet_hw_interface_class_t * hw_class = vnet_get_hw_interface_class (vnm, hi->hw_class_index);
-	  vnet_device_class_t * dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
+	  vnet_hw_interface_t *hi =
+	    vnet_get_hw_interface (vnm, si->hw_if_index);
+	  vnet_hw_interface_class_t *hw_class =
+	    vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+	  vnet_device_class_t *dev_class =
+	    vnet_get_device_class (vnm, hi->dev_class_index);
 
-          /* save the si admin up flag */
-          old_flags = si->flags;
+	  /* save the si admin up flag */
+	  old_flags = si->flags;
 
-          /* update si admin up flag in advance if we are going admin down */
-          if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
-              si->flags &=  ~VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+	  /* update si admin up flag in advance if we are going admin down */
+	  if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
+	    si->flags &= ~VNET_SW_INTERFACE_FLAG_ADMIN_UP;
 
-          if (dev_class->admin_up_down_function
-              && (error = dev_class->admin_up_down_function(vnm,
-                                                            si->hw_if_index,
-                                                            flags)))
-            {
-              /* restore si admin up flag to it's original state on errors */
-              si->flags =  old_flags;
-              goto done;
-            }
+	  if (dev_class->admin_up_down_function
+	      && (error = dev_class->admin_up_down_function (vnm,
+							     si->hw_if_index,
+							     flags)))
+	    {
+	      /* restore si admin up flag to it's original state on errors */
+	      si->flags = old_flags;
+	      goto done;
+	    }
 
-          if (hw_class->admin_up_down_function
-              && (error = hw_class->admin_up_down_function(vnm,
-                                                           si->hw_if_index,
-                                                           flags)))
-            {
-              /* restore si admin up flag to it's original state on errors */
-              si->flags =  old_flags;
-              goto done;
-            }
+	  if (hw_class->admin_up_down_function
+	      && (error = hw_class->admin_up_down_function (vnm,
+							    si->hw_if_index,
+							    flags)))
+	    {
+	      /* restore si admin up flag to it's original state on errors */
+	      si->flags = old_flags;
+	      goto done;
+	    }
 
 	  /* Admin down implies link down. */
-	  if (! (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+	  if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
 	      && (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
 	    vnet_hw_interface_set_flags_helper (vnm, si->hw_if_index,
-						hi->flags &~ VNET_HW_INTERFACE_FLAG_LINK_UP,
+						hi->flags &
+						~VNET_HW_INTERFACE_FLAG_LINK_UP,
 						helper_flags);
 	}
     }
@@ -448,7 +505,7 @@
   si->flags &= ~mask;
   si->flags |= flags;
 
- done:
+done:
   return error;
 }
 
@@ -469,10 +526,11 @@
 }
 
 static u32
-vnet_create_sw_interface_no_callbacks (vnet_main_t * vnm, vnet_sw_interface_t * template)
+vnet_create_sw_interface_no_callbacks (vnet_main_t * vnm,
+				       vnet_sw_interface_t * template)
 {
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vnet_sw_interface_t * sw;
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vnet_sw_interface_t *sw;
   u32 sw_if_index;
 
   pool_get (im->sw_interfaces, sw);
@@ -489,7 +547,7 @@
   {
     u32 i;
 
-    vnet_interface_counter_lock(im);
+    vnet_interface_counter_lock (im);
 
     for (i = 0; i < vec_len (im->sw_if_counters); i++)
       {
@@ -499,55 +557,62 @@
 
     for (i = 0; i < vec_len (im->combined_sw_if_counters); i++)
       {
-	vlib_validate_combined_counter (&im->combined_sw_if_counters[i], 
-                                        sw_if_index);
-	vlib_zero_combined_counter (&im->combined_sw_if_counters[i], 
-                                    sw_if_index);
+	vlib_validate_combined_counter (&im->combined_sw_if_counters[i],
+					sw_if_index);
+	vlib_zero_combined_counter (&im->combined_sw_if_counters[i],
+				    sw_if_index);
       }
 
-    vnet_interface_counter_unlock(im);
+    vnet_interface_counter_unlock (im);
   }
 
   return sw_if_index;
 }
 
 clib_error_t *
-vnet_create_sw_interface (vnet_main_t * vnm, vnet_sw_interface_t * template, u32 * sw_if_index)
+vnet_create_sw_interface (vnet_main_t * vnm, vnet_sw_interface_t * template,
+			  u32 * sw_if_index)
 {
-  clib_error_t * error;
-  vnet_hw_interface_t * hi;
-  vnet_device_class_t * dev_class;
+  clib_error_t *error;
+  vnet_hw_interface_t *hi;
+  vnet_device_class_t *dev_class;
 
   hi = vnet_get_sup_hw_interface (vnm, template->sup_sw_if_index);
   dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
 
   if (template->type == VNET_SW_INTERFACE_TYPE_SUB &&
-      dev_class->subif_add_del_function) {
-        error = dev_class->subif_add_del_function (vnm, hi->hw_if_index,
-                                                   (struct vnet_sw_interface_t *) template, 1);
-        if (error)
-          return error;
-  }
+      dev_class->subif_add_del_function)
+    {
+      error = dev_class->subif_add_del_function (vnm, hi->hw_if_index,
+						 (struct vnet_sw_interface_t
+						  *) template, 1);
+      if (error)
+	return error;
+    }
 
   *sw_if_index = vnet_create_sw_interface_no_callbacks (vnm, template);
   error = vnet_sw_interface_set_flags_helper
     (vnm, *sw_if_index, template->flags,
      VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE);
 
-  if (error) {
-    // undo the work done by vnet_create_sw_interface_no_callbacks()
-    vnet_interface_main_t * im = &vnm->interface_main;
-    vnet_sw_interface_t * sw = pool_elt_at_index (im->sw_interfaces, *sw_if_index);
-    pool_put (im->sw_interfaces, sw);
-  }
+  if (error)
+    {
+      /* undo the work done by vnet_create_sw_interface_no_callbacks() */
+      vnet_interface_main_t *im = &vnm->interface_main;
+      vnet_sw_interface_t *sw =
+	pool_elt_at_index (im->sw_interfaces, *sw_if_index);
+      pool_put (im->sw_interfaces, sw);
+    }
 
   return error;
 }
 
-void vnet_delete_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
+void
+vnet_delete_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
 {
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vnet_sw_interface_t * sw = pool_elt_at_index (im->sw_interfaces, sw_if_index);
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vnet_sw_interface_t *sw =
+    pool_elt_at_index (im->sw_interfaces, sw_if_index);
 
   /* Bring down interface in case it is up. */
   if (sw->flags != 0)
@@ -558,24 +623,24 @@
   pool_put (im->sw_interfaces, sw);
 }
 
-static void setup_tx_node (vlib_main_t * vm,
-			   u32 node_index,
-			   vnet_device_class_t * dev_class)
+static void
+setup_tx_node (vlib_main_t * vm,
+	       u32 node_index, vnet_device_class_t * dev_class)
 {
-  vlib_node_t * n = vlib_get_node (vm, node_index);
+  vlib_node_t *n = vlib_get_node (vm, node_index);
 
   n->function = dev_class->tx_function;
   n->format_trace = dev_class->format_tx_trace;
-  vlib_register_errors (vm, node_index, 
-                        dev_class->tx_function_n_errors,
-                        dev_class->tx_function_error_strings);
+  vlib_register_errors (vm, node_index,
+			dev_class->tx_function_n_errors,
+			dev_class->tx_function_error_strings);
 }
 
-static void setup_output_node (vlib_main_t * vm,
-			       u32 node_index,
-			       vnet_hw_interface_class_t * hw_class)
+static void
+setup_output_node (vlib_main_t * vm,
+		   u32 node_index, vnet_hw_interface_class_t * hw_class)
 {
-  vlib_node_t * n = vlib_get_node (vm, node_index);
+  vlib_node_t *n = vlib_get_node (vm, node_index);
   n->format_buffer = hw_class->format_header;
   n->unformat_buffer = hw_class->unformat_header;
 }
@@ -585,16 +650,17 @@
 vnet_register_interface (vnet_main_t * vnm,
 			 u32 dev_class_index,
 			 u32 dev_instance,
-			 u32 hw_class_index,
-			 u32 hw_instance)
+			 u32 hw_class_index, u32 hw_instance)
 {
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vnet_hw_interface_t * hw;
-  vnet_device_class_t * dev_class = vnet_get_device_class (vnm, dev_class_index);
-  vnet_hw_interface_class_t * hw_class = vnet_get_hw_interface_class (vnm, hw_class_index);
-  vlib_main_t * vm = vnm->vlib_main;
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vnet_hw_interface_t *hw;
+  vnet_device_class_t *dev_class =
+    vnet_get_device_class (vnm, dev_class_index);
+  vnet_hw_interface_class_t *hw_class =
+    vnet_get_hw_interface_class (vnm, hw_class_index);
+  vlib_main_t *vm = vnm->vlib_main;
   u32 hw_index;
-  char * tx_node_name, * output_node_name;
+  char *tx_node_name, *output_node_name;
 
   pool_get (im->hw_interfaces, hw);
 
@@ -602,16 +668,15 @@
   hw->hw_if_index = hw_index;
 
   if (dev_class->format_device_name)
-    hw->name = format (0, "%U",
-		       dev_class->format_device_name, dev_instance);
+    hw->name = format (0, "%U", dev_class->format_device_name, dev_instance);
   else if (hw_class->format_interface_name)
     hw->name = format (0, "%U", hw_class->format_interface_name,
 		       dev_instance);
   else
     hw->name = format (0, "%s%x", hw_class->name, dev_instance);
 
-  if (! im->hw_interface_by_name)
-    im->hw_interface_by_name = hash_create_vec (/* size */ 0,
+  if (!im->hw_interface_by_name)
+    im->hw_interface_by_name = hash_create_vec ( /* size */ 0,
 						sizeof (hw->name[0]),
 						sizeof (uword));
 
@@ -644,8 +709,8 @@
   /* If we have previously deleted interface nodes, re-use them. */
   if (vec_len (im->deleted_hw_interface_nodes) > 0)
     {
-      vnet_hw_interface_nodes_t * hn;
-      vnet_interface_output_runtime_t * rt;
+      vnet_hw_interface_nodes_t *hn;
+      vnet_interface_output_runtime_t *rt;
 
       hn = vec_end (im->deleted_hw_interface_nodes) - 1;
 
@@ -658,7 +723,7 @@
       rt = vlib_node_get_runtime_data (vm, hw->output_node_index);
       ASSERT (rt->is_deleted == 1);
       rt->is_deleted = 0;
-      rt->hw_if_index = hw_index; 
+      rt->hw_if_index = hw_index;
       rt->sw_if_index = hw->sw_if_index;
       rt->dev_instance = hw->dev_instance;
 
@@ -667,7 +732,7 @@
       rt->sw_if_index = hw->sw_if_index;
       rt->dev_instance = hw->dev_instance;
 
-      vlib_worker_thread_node_runtime_update();
+      vlib_worker_thread_node_runtime_update ();
       _vec_len (im->deleted_hw_interface_nodes) -= 1;
     }
   else
@@ -699,13 +764,13 @@
 
       r.flags = 0;
       r.name = output_node_name;
-      r.function =  dev_class->no_flatten_output_chains ?
-          vnet_interface_output_node_no_flatten_multiarch_select() :
-          vnet_interface_output_node_multiarch_select() ;
+      r.function = dev_class->no_flatten_output_chains ?
+	vnet_interface_output_node_no_flatten_multiarch_select () :
+	vnet_interface_output_node_multiarch_select ();
       r.format_trace = format_vnet_interface_output_trace;
 
       {
-	static char * e[] = {
+	static char *e[] = {
 	  "interface is down",
 	  "interface is deleted",
 	};
@@ -713,7 +778,6 @@
 	r.n_errors = ARRAY_LEN (e);
 	r.error_strings = e;
       }
-
       hw->output_node_index = vlib_register_node (vm, &r);
 
 #define _(sym,str) vlib_node_add_named_next_with_slot (vm, \
@@ -721,10 +785,9 @@
                      VNET_INTERFACE_OUTPUT_NEXT_##sym);
       foreach_intf_output_feat
 #undef _
-
-      vlib_node_add_named_next_with_slot (vm, hw->output_node_index,
-					  "error-drop",
-					  VNET_INTERFACE_OUTPUT_NEXT_DROP);
+	vlib_node_add_named_next_with_slot (vm, hw->output_node_index,
+					    "error-drop",
+					    VNET_INTERFACE_OUTPUT_NEXT_DROP);
       vlib_node_add_next_with_slot (vm, hw->output_node_index,
 				    hw->tx_node_index,
 				    VNET_INTERFACE_OUTPUT_NEXT_TX);
@@ -734,21 +797,20 @@
   setup_tx_node (vm, hw->tx_node_index, dev_class);
 
   /* Call all up/down callbacks with zero flags when interface is created. */
-  vnet_sw_interface_set_flags_helper
-    (vnm, hw->sw_if_index, /* flags */ 0,
-     VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE);
-  vnet_hw_interface_set_flags_helper
-    (vnm, hw_index, /* flags */ 0,
-     VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE);
+  vnet_sw_interface_set_flags_helper (vnm, hw->sw_if_index, /* flags */ 0,
+				      VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE);
+  vnet_hw_interface_set_flags_helper (vnm, hw_index, /* flags */ 0,
+				      VNET_INTERFACE_SET_FLAGS_HELPER_IS_CREATE);
 
   return hw_index;
 }
 
-void vnet_delete_hw_interface (vnet_main_t * vnm, u32 hw_if_index)
+void
+vnet_delete_hw_interface (vnet_main_t * vnm, u32 hw_if_index)
 {
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vnet_hw_interface_t * hw = vnet_get_hw_interface (vnm, hw_if_index);
-  vlib_main_t * vm = vnm->vlib_main;
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+  vlib_main_t *vm = vnm->vlib_main;
 
   /* If it is up, mark it down. */
   if (hw->flags != 0)
@@ -763,20 +825,25 @@
   /* Delete any sub-interfaces. */
   {
     u32 id, sw_if_index;
+    /* *INDENT-OFF* */
     hash_foreach (id, sw_if_index, hw->sub_interface_sw_if_index_by_id, ({
       vnet_delete_sw_interface (vnm, sw_if_index);
     }));
+    /* *INDENT-ON* */
   }
 
   {
-    vnet_hw_interface_nodes_t * dn;
-    vnet_interface_output_runtime_t * rt = vlib_node_get_runtime_data (vm, hw->output_node_index);
+    vnet_hw_interface_nodes_t *dn;
+    vnet_interface_output_runtime_t *rt =
+      vlib_node_get_runtime_data (vm, hw->output_node_index);
 
     /* Mark node runtime as deleted so output node (if called) will drop packets. */
     rt->is_deleted = 1;
 
-    vlib_node_rename (vm, hw->output_node_index, "interface-%d-output-deleted", hw_if_index);
-    vlib_node_rename (vm, hw->tx_node_index, "interface-%d-tx-deleted", hw_if_index);
+    vlib_node_rename (vm, hw->output_node_index,
+		      "interface-%d-output-deleted", hw_if_index);
+    vlib_node_rename (vm, hw->tx_node_index, "interface-%d-tx-deleted",
+		      hw_if_index);
     vec_add2 (im->deleted_hw_interface_nodes, dn, 1);
     dn->tx_node_index = hw->tx_node_index;
     dn->output_node_index = hw->output_node_index;
@@ -788,42 +855,49 @@
   pool_put (im->hw_interfaces, hw);
 }
 
-static void serialize_vnet_hw_interface_set_class (serialize_main_t * m, va_list * va)
+static void
+serialize_vnet_hw_interface_set_class (serialize_main_t * m, va_list * va)
 {
   u32 hw_if_index = va_arg (*va, u32);
-  char * hw_class_name = va_arg (*va, char *);
+  char *hw_class_name = va_arg (*va, char *);
   serialize_integer (m, hw_if_index, sizeof (hw_if_index));
   serialize_cstring (m, hw_class_name);
 }
 
-static void unserialize_vnet_hw_interface_set_class (serialize_main_t * m, va_list * va)
+static void
+unserialize_vnet_hw_interface_set_class (serialize_main_t * m, va_list * va)
 {
   CLIB_UNUSED (mc_main_t * mc) = va_arg (*va, mc_main_t *);
-  vnet_main_t * vnm = vnet_get_main();
+  vnet_main_t *vnm = vnet_get_main ();
   u32 hw_if_index;
-  char * hw_class_name;
-  uword * p;
-  clib_error_t * error;
+  char *hw_class_name;
+  uword *p;
+  clib_error_t *error;
 
   unserialize_integer (m, &hw_if_index, sizeof (hw_if_index));
   unserialize_cstring (m, &hw_class_name);
-  p = hash_get (vnm->interface_main.hw_interface_class_by_name, hw_class_name);
+  p =
+    hash_get (vnm->interface_main.hw_interface_class_by_name, hw_class_name);
   ASSERT (p != 0);
-  error = vnet_hw_interface_set_class_helper (vnm, hw_if_index, p[0], /* redistribute */ 0);
+  error = vnet_hw_interface_set_class_helper (vnm, hw_if_index, p[0],
+					      /* redistribute */ 0);
   if (error)
     clib_error_report (error);
 }
 
-MC_SERIALIZE_MSG (vnet_hw_interface_set_class_msg, static) = {
-  .name = "vnet_hw_interface_set_class",
-  .serialize = serialize_vnet_hw_interface_set_class,
-  .unserialize = unserialize_vnet_hw_interface_set_class,
-};
-
-void vnet_hw_interface_init_for_class (vnet_main_t * vnm, u32 hw_if_index, u32 hw_class_index, u32 hw_instance)
+MC_SERIALIZE_MSG (vnet_hw_interface_set_class_msg, static) =
 {
-  vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
-  vnet_hw_interface_class_t * hc = vnet_get_hw_interface_class (vnm, hw_class_index);
+.name = "vnet_hw_interface_set_class",.serialize =
+    serialize_vnet_hw_interface_set_class,.unserialize =
+    unserialize_vnet_hw_interface_set_class,};
+
+void
+vnet_hw_interface_init_for_class (vnet_main_t * vnm, u32 hw_if_index,
+				  u32 hw_class_index, u32 hw_instance)
+{
+  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+  vnet_hw_interface_class_t *hc =
+    vnet_get_hw_interface_class (vnm, hw_class_index);
 
   hi->hw_class_index = hw_class_index;
   hi->hw_instance = hw_instance;
@@ -831,14 +905,18 @@
 }
 
 static clib_error_t *
-vnet_hw_interface_set_class_helper (vnet_main_t * vnm, u32 hw_if_index, u32 hw_class_index, u32 redistribute)
+vnet_hw_interface_set_class_helper (vnet_main_t * vnm, u32 hw_if_index,
+				    u32 hw_class_index, u32 redistribute)
 {
-  vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
-  vnet_sw_interface_t * si = vnet_get_sw_interface (vnm, hi->sw_if_index);
-  vnet_hw_interface_class_t * old_class = vnet_get_hw_interface_class (vnm, hi->hw_class_index);
-  vnet_hw_interface_class_t * new_class = vnet_get_hw_interface_class (vnm, hw_class_index);
-  vnet_device_class_t * dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
-  clib_error_t * error = 0;
+  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+  vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, hi->sw_if_index);
+  vnet_hw_interface_class_t *old_class =
+    vnet_get_hw_interface_class (vnm, hi->hw_class_index);
+  vnet_hw_interface_class_t *new_class =
+    vnet_get_hw_interface_class (vnm, hw_class_index);
+  vnet_device_class_t *dev_class =
+    vnet_get_device_class (vnm, hi->dev_class_index);
+  clib_error_t *error = 0;
 
   /* New class equals old class?  Nothing to do. */
   if (hi->hw_class_index == hw_class_index)
@@ -849,31 +927,40 @@
   if (redistribute)
     {
       if (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
-	return clib_error_return (0, "%v must be admin down to change class from %s to %s",
+	return clib_error_return (0,
+				  "%v must be admin down to change class from %s to %s",
 				  hi->name, old_class->name, new_class->name);
 
       /* Make sure interface supports given class. */
       if ((new_class->is_valid_class_for_interface
-	   && ! new_class->is_valid_class_for_interface (vnm, hw_if_index, hw_class_index))
-	  || (dev_class ->is_valid_class_for_interface
-	      && ! dev_class->is_valid_class_for_interface (vnm, hw_if_index, hw_class_index)))
-	return clib_error_return (0, "%v class cannot be changed from %s to %s",
+	   && !new_class->is_valid_class_for_interface (vnm, hw_if_index,
+							hw_class_index))
+	  || (dev_class->is_valid_class_for_interface
+	      && !dev_class->is_valid_class_for_interface (vnm, hw_if_index,
+							   hw_class_index)))
+	return clib_error_return (0,
+				  "%v class cannot be changed from %s to %s",
 				  hi->name, old_class->name, new_class->name);
 
       if (vnm->vlib_main->mc_main)
 	{
-	  mc_serialize (vnm->vlib_main->mc_main, &vnet_hw_interface_set_class_msg, hw_if_index, new_class->name);
+	  mc_serialize (vnm->vlib_main->mc_main,
+			&vnet_hw_interface_set_class_msg, hw_if_index,
+			new_class->name);
 	  return 0;
 	}
     }
 
   if (old_class->hw_class_change)
-    old_class->hw_class_change (vnm, hw_if_index, old_class->index, new_class->index);
+    old_class->hw_class_change (vnm, hw_if_index, old_class->index,
+				new_class->index);
 
-  vnet_hw_interface_init_for_class (vnm, hw_if_index, new_class->index, /* instance */ ~0);
+  vnet_hw_interface_init_for_class (vnm, hw_if_index, new_class->index,
+				    /* instance */ ~0);
 
   if (new_class->hw_class_change)
-    new_class->hw_class_change (vnm, hw_if_index, old_class->index, new_class->index);
+    new_class->hw_class_change (vnm, hw_if_index, old_class->index,
+				new_class->index);
 
   if (dev_class->hw_class_change)
     dev_class->hw_class_change (vnm, hw_if_index, new_class->index);
@@ -882,23 +969,27 @@
 }
 
 clib_error_t *
-vnet_hw_interface_set_class (vnet_main_t * vnm, u32 hw_if_index, u32 hw_class_index)
-{ return vnet_hw_interface_set_class_helper (vnm, hw_if_index, hw_class_index, /* redistribute */ 1); }
+vnet_hw_interface_set_class (vnet_main_t * vnm, u32 hw_if_index,
+			     u32 hw_class_index)
+{
+  return vnet_hw_interface_set_class_helper (vnm, hw_if_index, hw_class_index,
+					     /* redistribute */ 1);
+}
 
 static int
-vnet_hw_interface_rx_redirect_to_node_helper (vnet_main_t * vnm, 
-                                              u32 hw_if_index, 
-                                              u32 node_index, 
-                                              u32 redistribute)
+vnet_hw_interface_rx_redirect_to_node_helper (vnet_main_t * vnm,
+					      u32 hw_if_index,
+					      u32 node_index,
+					      u32 redistribute)
 {
-  vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
-  vnet_device_class_t * dev_class = vnet_get_device_class 
+  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+  vnet_device_class_t *dev_class = vnet_get_device_class
     (vnm, hi->dev_class_index);
 
   if (redistribute)
     {
       /* $$$$ fixme someday maybe */
-      ASSERT(vnm->vlib_main->mc_main == 0);
+      ASSERT (vnm->vlib_main->mc_main == 0);
     }
   if (dev_class->rx_redirect_to_node)
     {
@@ -909,20 +1000,23 @@
   return VNET_API_ERROR_UNIMPLEMENTED;
 }
 
-int vnet_hw_interface_rx_redirect_to_node (vnet_main_t * vnm, u32 hw_if_index,
-                                       u32 node_index)
-{ return vnet_hw_interface_rx_redirect_to_node_helper (vnm, hw_if_index,
-                                                       node_index,
-                                                       1 /* redistribute */); }
+int
+vnet_hw_interface_rx_redirect_to_node (vnet_main_t * vnm, u32 hw_if_index,
+				       u32 node_index)
+{
+  return vnet_hw_interface_rx_redirect_to_node_helper (vnm, hw_if_index,
+						       node_index,
+						       1 /* redistribute */ );
+}
 
 word
 vnet_sw_interface_compare (vnet_main_t * vnm,
 			   uword sw_if_index0, uword sw_if_index1)
 {
-  vnet_sw_interface_t * sup0 = vnet_get_sup_sw_interface (vnm, sw_if_index0);
-  vnet_sw_interface_t * sup1 = vnet_get_sup_sw_interface (vnm, sw_if_index1);
-  vnet_hw_interface_t * h0 = vnet_get_hw_interface (vnm, sup0->hw_if_index);
-  vnet_hw_interface_t * h1 = vnet_get_hw_interface (vnm, sup1->hw_if_index);
+  vnet_sw_interface_t *sup0 = vnet_get_sup_sw_interface (vnm, sw_if_index0);
+  vnet_sw_interface_t *sup1 = vnet_get_sup_sw_interface (vnm, sw_if_index1);
+  vnet_hw_interface_t *h0 = vnet_get_hw_interface (vnm, sup0->hw_if_index);
+  vnet_hw_interface_t *h1 = vnet_get_hw_interface (vnm, sup1->hw_if_index);
 
   if (h0 != h1)
     return vec_cmp (h0->name, h1->name);
@@ -933,8 +1027,8 @@
 vnet_hw_interface_compare (vnet_main_t * vnm,
 			   uword hw_if_index0, uword hw_if_index1)
 {
-  vnet_hw_interface_t * h0 = vnet_get_hw_interface (vnm, hw_if_index0);
-  vnet_hw_interface_t * h1 = vnet_get_hw_interface (vnm, hw_if_index1);
+  vnet_hw_interface_t *h0 = vnet_get_hw_interface (vnm, hw_if_index0);
+  vnet_hw_interface_t *h1 = vnet_get_hw_interface (vnm, hw_if_index1);
 
   if (h0 != h1)
     return vec_cmp (h0->name, h1->name);
@@ -944,38 +1038,37 @@
 clib_error_t *
 vnet_interface_init (vlib_main_t * vm)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vlib_buffer_t * b = 0;
-  vnet_buffer_opaque_t * o = 0;
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vlib_buffer_t *b = 0;
+  vnet_buffer_opaque_t *o = 0;
 
   /*
    * Keep people from shooting themselves in the foot.
    */
-  if (sizeof(b->opaque) != sizeof (vnet_buffer_opaque_t))
+  if (sizeof (b->opaque) != sizeof (vnet_buffer_opaque_t))
     {
 #define _(a) if (sizeof(o->a) > sizeof (o->unused))                     \
       clib_warning                                                      \
         ("FATAL: size of opaque union subtype %s is %d (max %d)",       \
          #a, sizeof(o->a), sizeof (o->unused));
-    foreach_buffer_opaque_union_subtype;
+      foreach_buffer_opaque_union_subtype;
 #undef _
 
-     return clib_error_return 
-           (0, "FATAL: size of vlib buffer opaque %d, size of vnet opaque %d",
-           sizeof(b->opaque), sizeof (vnet_buffer_opaque_t));
+      return clib_error_return
+	(0, "FATAL: size of vlib buffer opaque %d, size of vnet opaque %d",
+	 sizeof (b->opaque), sizeof (vnet_buffer_opaque_t));
     }
 
-  im->sw_if_counter_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, 
-                                                   CLIB_CACHE_LINE_BYTES);
-  im->sw_if_counter_lock[0] = 1; /* should be no need */
+  im->sw_if_counter_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+						   CLIB_CACHE_LINE_BYTES);
+  im->sw_if_counter_lock[0] = 1;	/* should be no need */
 
-  vec_validate (im->sw_if_counters,
-		VNET_N_SIMPLE_INTERFACE_COUNTER - 1);
+  vec_validate (im->sw_if_counters, VNET_N_SIMPLE_INTERFACE_COUNTER - 1);
   im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP].name = "drops";
   im->sw_if_counters[VNET_INTERFACE_COUNTER_PUNT].name = "punts";
-  im->sw_if_counters[VNET_INTERFACE_COUNTER_IP4].name  = "ip4";
-  im->sw_if_counters[VNET_INTERFACE_COUNTER_IP6].name  = "ip6";
+  im->sw_if_counters[VNET_INTERFACE_COUNTER_IP4].name = "ip4";
+  im->sw_if_counters[VNET_INTERFACE_COUNTER_IP6].name = "ip6";
   im->sw_if_counters[VNET_INTERFACE_COUNTER_RX_NO_BUF].name = "rx-no-buf";
   im->sw_if_counters[VNET_INTERFACE_COUNTER_RX_MISS].name = "rx-miss";
   im->sw_if_counters[VNET_INTERFACE_COUNTER_RX_ERROR].name = "rx-error";
@@ -988,43 +1081,43 @@
 
   im->sw_if_counter_lock[0] = 0;
 
-  im->device_class_by_name = hash_create_string (/* size */ 0,
+  im->device_class_by_name = hash_create_string ( /* size */ 0,
 						 sizeof (uword));
   {
-    vnet_device_class_t * c;
+    vnet_device_class_t *c;
 
     c = vnm->device_class_registrations;
 
     while (c)
       {
-        c->index = vec_len (im->device_classes);
-        hash_set_mem (im->device_class_by_name, c->name, c->index);
-        vec_add1 (im->device_classes, c[0]);
-        c = c->next_class_registration;
+	c->index = vec_len (im->device_classes);
+	hash_set_mem (im->device_class_by_name, c->name, c->index);
+	vec_add1 (im->device_classes, c[0]);
+	c = c->next_class_registration;
       }
   }
 
-  im->hw_interface_class_by_name = hash_create_string (/* size */ 0,
+  im->hw_interface_class_by_name = hash_create_string ( /* size */ 0,
 						       sizeof (uword));
 
-  im->sw_if_index_by_sup_and_sub = hash_create_mem (0, sizeof(u64), 
-                                                    sizeof (uword));
+  im->sw_if_index_by_sup_and_sub = hash_create_mem (0, sizeof (u64),
+						    sizeof (uword));
   {
-    vnet_hw_interface_class_t * c;
+    vnet_hw_interface_class_t *c;
 
     c = vnm->hw_interface_class_registrations;
-    
+
     while (c)
       {
-        c->index = vec_len (im->hw_interface_classes);
-        hash_set_mem (im->hw_interface_class_by_name, c->name, c->index);
-        vec_add1 (im->hw_interface_classes, c[0]);
-        c = c->next_class_registration;
+	c->index = vec_len (im->hw_interface_classes);
+	hash_set_mem (im->hw_interface_class_by_name, c->name, c->index);
+	vec_add1 (im->hw_interface_classes, c[0]);
+	c = c->next_class_registration;
       }
   }
 
   {
-    clib_error_t * error;
+    clib_error_t *error;
 
     if ((error = vlib_call_init_function (vm, vnet_interface_cli_init)))
       return error;
@@ -1036,18 +1129,19 @@
 VLIB_INIT_FUNCTION (vnet_interface_init);
 
 /* Kludge to renumber interface names [only!] */
-int vnet_interface_name_renumber (u32 sw_if_index, u32 new_show_dev_instance)
+int
+vnet_interface_name_renumber (u32 sw_if_index, u32 new_show_dev_instance)
 {
   int rv;
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vnet_hw_interface_t * hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
 
-  vnet_device_class_t * dev_class = vnet_get_device_class 
+  vnet_device_class_t *dev_class = vnet_get_device_class
     (vnm, hi->dev_class_index);
 
   if (dev_class->name_renumber == 0 || dev_class->format_device_name == 0)
-      return VNET_API_ERROR_UNIMPLEMENTED;
+    return VNET_API_ERROR_UNIMPLEMENTED;
 
   rv = dev_class->name_renumber (hi, new_show_dev_instance);
 
@@ -1057,72 +1151,81 @@
   hash_unset_mem (im->hw_interface_by_name, hi->name);
   vec_free (hi->name);
   /* Use the mapping we set up to call it Ishmael */
-  hi->name = format (0, "%U", dev_class->format_device_name, 
-                     hi->dev_instance);
-  
+  hi->name = format (0, "%U", dev_class->format_device_name,
+		     hi->dev_instance);
+
   hash_set_mem (im->hw_interface_by_name, hi->name, hi->hw_if_index);
   return rv;
 }
 
-int vnet_interface_add_del_feature(vnet_main_t * vnm,
-                                   vlib_main_t *vm,
-                                   u32 sw_if_index,
-                                   intf_output_feat_t feature,
-                                   int is_add)
+int
+vnet_interface_add_del_feature (vnet_main_t * vnm,
+				vlib_main_t * vm,
+				u32 sw_if_index,
+				intf_output_feat_t feature, int is_add)
 {
-  vnet_sw_interface_t * sw;
+  vnet_sw_interface_t *sw;
 
-  sw = vnet_get_sw_interface(vnm, sw_if_index);
+  sw = vnet_get_sw_interface (vnm, sw_if_index);
 
-  if (is_add) {
+  if (is_add)
+    {
 
-    sw->output_feature_bitmap |= (1 << feature);
-    sw->output_feature_bitmap |= (1<< INTF_OUTPUT_FEAT_DONE);
+      sw->output_feature_bitmap |= (1 << feature);
+      sw->output_feature_bitmap |= (1 << INTF_OUTPUT_FEAT_DONE);
 
-  } else { /* delete */
+    }
+  else
+    {				/* delete */
 
-    sw->output_feature_bitmap &= ~(1<<feature);
-    if (sw->output_feature_bitmap == (1 << INTF_OUTPUT_FEAT_DONE))
-      sw->output_feature_bitmap = 0;
+      sw->output_feature_bitmap &= ~(1 << feature);
+      if (sw->output_feature_bitmap == (1 << INTF_OUTPUT_FEAT_DONE))
+	sw->output_feature_bitmap = 0;
 
-  }
+    }
   return 0;
 }
 
 clib_error_t *
-vnet_rename_interface (vnet_main_t * vnm,
-                       u32           hw_if_index,
-                       char *        new_name)
+vnet_rename_interface (vnet_main_t * vnm, u32 hw_if_index, char *new_name)
 {
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vlib_main_t * vm = vnm->vlib_main;
-  vnet_hw_interface_t* hw;
-  u8* old_name;
-  clib_error_t * error = 0;
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vlib_main_t *vm = vnm->vlib_main;
+  vnet_hw_interface_t *hw;
+  u8 *old_name;
+  clib_error_t *error = 0;
 
-  hw = vnet_get_hw_interface(vnm, hw_if_index);
+  hw = vnet_get_hw_interface (vnm, hw_if_index);
   if (!hw)
     {
       return clib_error_return (0,
-                                "unable to find hw interface for index %u",
-                                 hw_if_index);
+				"unable to find hw interface for index %u",
+				hw_if_index);
     }
 
   old_name = hw->name;
 
-  // set new hw->name
+  /* set new hw->name */
   hw->name = format (0, "%s", new_name);
 
-  // remove the old name to hw_if_index mapping and install the new one
+  /* remove the old name to hw_if_index mapping and install the new one */
   hash_unset_mem (im->hw_interface_by_name, old_name);
   hash_set_mem (im->hw_interface_by_name, hw->name, hw_if_index);
 
-  // rename tx/output nodes
+  /* rename tx/output nodes */
   vlib_node_rename (vm, hw->tx_node_index, "%v-tx", hw->name);
   vlib_node_rename (vm, hw->output_node_index, "%v-output", hw->name);
 
-  // free the old name vector
+  /* free the old name vector */
   vec_free (old_name);
 
   return error;
 }
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/interface.h b/vnet/vnet/interface.h
index 11bb234..7738bb6 100644
--- a/vnet/vnet/interface.h
+++ b/vnet/vnet/interface.h
@@ -47,18 +47,18 @@
 struct vnet_sw_interface_t;
 
 /* Interface up/down callback. */
-typedef clib_error_t * (vnet_interface_function_t)
+typedef clib_error_t *(vnet_interface_function_t)
   (struct vnet_main_t * vnm, u32 if_index, u32 flags);
 
 /* Sub-interface add/del callback. */
-typedef clib_error_t * (vnet_subif_add_del_function_t)
+typedef clib_error_t *(vnet_subif_add_del_function_t)
   (struct vnet_main_t * vnm, u32 if_index,
-   struct vnet_sw_interface_t * template,
-   int is_add);
+   struct vnet_sw_interface_t * template, int is_add);
 
-typedef struct _vnet_interface_function_list_elt {
-  struct _vnet_interface_function_list_elt * next_interface_function;
-  clib_error_t * (*fp) (struct vnet_main_t * vnm, u32 if_index, u32 flags);
+typedef struct _vnet_interface_function_list_elt
+{
+  struct _vnet_interface_function_list_elt *next_interface_function;
+  clib_error_t *(*fp) (struct vnet_main_t * vnm, u32 if_index, u32 flags);
 } _vnet_interface_function_list_elt_t;
 
 #define _VNET_INTERFACE_FUNCTION_DECL(f,tag)                            \
@@ -73,7 +73,7 @@
  init_function.next_interface_function = vnm->tag##_functions;          \
  vnm->tag##_functions = &init_function;                                 \
  init_function.fp = (void *) &f;                                        \
-} 
+}
 
 #define VNET_HW_INTERFACE_ADD_DEL_FUNCTION(f)			\
   _VNET_INTERFACE_FUNCTION_DECL(f,hw_interface_add_del)
@@ -85,66 +85,68 @@
   _VNET_INTERFACE_FUNCTION_DECL(f,sw_interface_admin_up_down)
 
 /* A class of hardware interface devices. */
-typedef struct _vnet_device_class {
+typedef struct _vnet_device_class
+{
   /* Index into main vector. */
   u32 index;
 
   /* Device name (e.g. "FOOBAR 1234a"). */
-  char * name;
+  char *name;
 
   /* Function to call when hardware interface is added/deleted. */
-  vnet_interface_function_t * interface_add_del_function;
+  vnet_interface_function_t *interface_add_del_function;
 
   /* Function to bring device administratively up/down. */
-  vnet_interface_function_t * admin_up_down_function;
+  vnet_interface_function_t *admin_up_down_function;
 
   /* Function to call when sub-interface is added/deleted */
-  vnet_subif_add_del_function_t * subif_add_del_function;
+  vnet_subif_add_del_function_t *subif_add_del_function;
 
   /* Redistribute flag changes/existence of this interface class. */
   u32 redistribute;
 
   /* Transmit function. */
-  vlib_node_function_t * tx_function;
+  vlib_node_function_t *tx_function;
 
   /* Error strings indexed by error code for this node. */
-  char ** tx_function_error_strings;
+  char **tx_function_error_strings;
 
   /* Number of error codes used by this node. */
   u32 tx_function_n_errors;
 
   /* Renumber device name [only!] support, a control-plane kludge */
-  int (*name_renumber) (struct vnet_hw_interface_t * hi, u32 new_dev_instance);
+  int (*name_renumber) (struct vnet_hw_interface_t * hi,
+			u32 new_dev_instance);
 
   /* Format device instance as name. */
-  format_function_t * format_device_name;
+  format_function_t *format_device_name;
 
   /* Parse function for device name. */
-  unformat_function_t * unformat_device_name;
+  unformat_function_t *unformat_device_name;
 
   /* Format device verbosely for this class. */
-  format_function_t * format_device;
+  format_function_t *format_device;
 
   /* Trace buffer format for TX function. */
-  format_function_t * format_tx_trace;
+  format_function_t *format_tx_trace;
 
   /* Function to clear hardware counters for device. */
-  void (* clear_counters) (u32 dev_class_instance);
+  void (*clear_counters) (u32 dev_class_instance);
 
-  uword (* is_valid_class_for_interface) (struct vnet_main_t * vnm, u32 hw_if_index, u32 hw_class_index);
+    uword (*is_valid_class_for_interface) (struct vnet_main_t * vnm,
+					   u32 hw_if_index,
+					   u32 hw_class_index);
 
   /* Called when hardware class of an interface changes. */
-  void ( * hw_class_change) (struct vnet_main_t * vnm,
-			     u32 hw_if_index,
-			     u32 new_hw_class_index);
+  void (*hw_class_change) (struct vnet_main_t * vnm,
+			   u32 hw_if_index, u32 new_hw_class_index);
 
   /* Called to redirect traffic from a specific interface instance */
-  void (* rx_redirect_to_node) (struct vnet_main_t * vnm,
-                                u32 hw_if_index,
-                                u32 node_index);
+  void (*rx_redirect_to_node) (struct vnet_main_t * vnm,
+			       u32 hw_if_index, u32 node_index);
 
   /* Link-list of all device classes set up by constructors created below */
-  struct _vnet_device_class * next_class_registration;
+  struct _vnet_device_class *next_class_registration;
 
   /* Do not splice vnet_interface_output_node into TX path */
   u8 no_flatten_output_chains;
@@ -161,7 +163,7 @@
     x.next_class_registration = vnm->device_class_registrations;        \
     vnm->device_class_registrations = &x;                               \
 }                                                                       \
-__VA_ARGS__ vnet_device_class_t x                                       
+__VA_ARGS__ vnet_device_class_t x
 
 #define VLIB_DEVICE_TX_FUNCTION_CLONE_TEMPLATE(arch, fn, tgt)		\
   uword									\
@@ -190,57 +192,60 @@
 
 
 /* Layer-2 (e.g. Ethernet) interface class. */
-typedef struct _vnet_hw_interface_class {
+typedef struct _vnet_hw_interface_class
+{
   /* Index into main vector. */
   u32 index;
 
   /* Class name (e.g. "Ethernet"). */
-  char * name;
+  char *name;
 
   /* Function to call when hardware interface is added/deleted. */
-  vnet_interface_function_t * interface_add_del_function;
+  vnet_interface_function_t *interface_add_del_function;
 
   /* Function to bring interface administratively up/down. */
-  vnet_interface_function_t * admin_up_down_function;
+  vnet_interface_function_t *admin_up_down_function;
 
   /* Function to call when link state changes. */
-  vnet_interface_function_t * link_up_down_function;
+  vnet_interface_function_t *link_up_down_function;
 
   /* Format function to display interface name. */
-  format_function_t * format_interface_name;
+  format_function_t *format_interface_name;
 
   /* Format function to display interface address. */
-  format_function_t * format_address;
+  format_function_t *format_address;
 
   /* Format packet header for this interface class. */
-  format_function_t * format_header;
+  format_function_t *format_header;
 
   /* Format device verbosely for this class. */
-  format_function_t * format_device;
+  format_function_t *format_device;
 
   /* Parser for hardware (e.g. ethernet) address. */
-  unformat_function_t * unformat_hw_address;
+  unformat_function_t *unformat_hw_address;
 
   /* Parser for packet header for e.g. rewrite string. */
-  unformat_function_t * unformat_header;
+  unformat_function_t *unformat_header;
 
   /* Forms adjacency for given l3 packet type and destination address.
      Returns number of bytes in adjacency. */
-  uword (* set_rewrite) (struct vnet_main_t * vnm,
-			 u32 sw_if_index,
-			 u32 l3_packet_type,
-			 void * dst_address,
-			 void * rewrite,
-			 uword max_rewrite_bytes);
+    uword (*set_rewrite) (struct vnet_main_t * vnm,
+			  u32 sw_if_index,
+			  u32 l3_packet_type,
+			  void *dst_address,
+			  void *rewrite, uword max_rewrite_bytes);
 
-  uword (* is_valid_class_for_interface) (struct vnet_main_t * vnm, u32 hw_if_index, u32 hw_class_index);
+    uword (*is_valid_class_for_interface) (struct vnet_main_t * vnm,
+					   u32 hw_if_index,
+					   u32 hw_class_index);
 
   /* Called when hw interface class is changed and old hardware instance
      may want to be deleted. */
-  void (* hw_class_change) (struct vnet_main_t * vnm, u32 hw_if_index, u32 old_class_index, u32 new_class_index);
+  void (*hw_class_change) (struct vnet_main_t * vnm, u32 hw_if_index,
+			   u32 old_class_index, u32 new_class_index);
 
   /* List of hw interface classes, built by constructors */
-  struct _vnet_hw_interface_class * next_class_registration;
+  struct _vnet_hw_interface_class *next_class_registration;
 
 } vnet_hw_interface_class_t;
 
@@ -258,9 +263,10 @@
 
 /* Hardware-interface.  This corresponds to a physical wire
    that packets flow over. */
-typedef struct vnet_hw_interface_t {
+typedef struct vnet_hw_interface_t
+{
   /* Interface name. */
-  u8 * name;
+  u8 *name;
 
   u32 flags;
   /* Hardware link state is up. */
@@ -295,7 +301,7 @@
 
   /* Hardware address as vector.  Zero (e.g. zero-length vector) if no
      address for this class (e.g. PPP). */
-  u8 * hw_address;
+  u8 *hw_address;
 
   /* Interface is up as far as software is concerned. */
   /* NAME.{output,tx} nodes for this interface. */
@@ -339,7 +345,7 @@
   u32 max_l3_packet_bytes[VLIB_N_RX_TX];
 
   /* Hash table mapping sub interface id to sw_if_index. */
-  uword * sub_interface_sw_if_index_by_id;
+  uword *sub_interface_sw_if_index_by_id;
 
   /* Count of number of L2 subinterfaces */
   u32 l2_if_count;
@@ -356,7 +362,8 @@
 
 extern vnet_device_class_t vnet_local_interface_device_class;
 
-typedef enum {
+typedef enum
+{
   /* A hw interface. */
   VNET_SW_INTERFACE_TYPE_HARDWARE,
 
@@ -364,26 +371,32 @@
   VNET_SW_INTERFACE_TYPE_SUB,
 } vnet_sw_interface_type_t;
 
-typedef struct {
-  // Subinterface ID. A number 0-N to uniquely identify this subinterface under the
-  // main (parent?) interface
-  u32 id; 
+typedef struct
+{
+  /*
+   * Subinterface ID. A number 0-N to uniquely identify
+   * this subinterface under the main (parent?) interface
+   */
+  u32 id;
 
-  // Classification data. Used to associate packet header with subinterface.
-  struct {
+  /* Classification data. Used to associate packet header with subinterface. */
+  struct
+  {
     u16 outer_vlan_id;
     u16 inner_vlan_id;
-    union {
+    union
+    {
       u16 raw_flags;
-      struct {
-        u16 no_tags:1;
-        u16 one_tag:1;
-        u16 two_tags:1;
-        u16 dot1ad:1;   // 0 = dot1q, 1=dot1ad
-        u16 exact_match:1;
-        u16 default_sub:1;
-        u16 outer_vlan_id_any:1;
-        u16 inner_vlan_id_any:1;
+      struct
+      {
+	u16 no_tags:1;
+	u16 one_tag:1;
+	u16 two_tags:1;
+	u16 dot1ad:1;		/* 0 = dot1q, 1=dot1ad */
+	u16 exact_match:1;
+	u16 default_sub:1;
+	u16 outer_vlan_id_any:1;
+	u16 inner_vlan_id_any:1;
       } flags;
     };
   } eth;
@@ -392,8 +405,9 @@
 /* Software-interface.  This corresponds to a Ethernet VLAN, ATM vc, a
    tunnel, etc.  Configuration (e.g. IP address) gets attached to
    software interface. */
-typedef struct {
-  vnet_sw_interface_type_t type : 16;
+typedef struct
+{
+  vnet_sw_interface_type_t type:16;
 
   u16 flags;
   /* Interface is "up" meaning adminstratively up.
@@ -424,19 +438,18 @@
 
   u32 output_feature_bitmap;
 
-  union {
+  union
+  {
     /* VNET_SW_INTERFACE_TYPE_HARDWARE. */
     u32 hw_if_index;
 
     /* VNET_SW_INTERFACE_TYPE_SUB. */
     vnet_sub_interface_t sub;
-
-    /* SW interfaces are sorted by type and key. */
-    // u32 sort_key;
   };
 } vnet_sw_interface_t;
 
-typedef enum {
+typedef enum
+{
   /* Simple counters. */
   VNET_INTERFACE_COUNTER_DROP = 0,
   VNET_INTERFACE_COUNTER_PUNT = 1,
@@ -453,57 +466,62 @@
   VNET_N_COMBINED_INTERFACE_COUNTER = 2,
 } vnet_interface_counter_type_t;
 
-typedef struct {
+typedef struct
+{
   u32 output_node_index;
   u32 tx_node_index;
 } vnet_hw_interface_nodes_t;
 
-typedef struct {
+typedef struct
+{
   /* Hardware interfaces. */
-  vnet_hw_interface_t * hw_interfaces;
+  vnet_hw_interface_t *hw_interfaces;
 
   /* Hash table mapping HW interface name to index. */
-  uword * hw_interface_by_name;
+  uword *hw_interface_by_name;
 
   /* Vectors if hardware interface classes and device classes. */
-  vnet_hw_interface_class_t * hw_interface_classes;
-  vnet_device_class_t * device_classes;
+  vnet_hw_interface_class_t *hw_interface_classes;
+  vnet_device_class_t *device_classes;
 
   /* Hash table mapping name to hw interface/device class. */
-  uword * hw_interface_class_by_name;
-  uword * device_class_by_name;
+  uword *hw_interface_class_by_name;
+  uword *device_class_by_name;
 
   /* Software interfaces. */
-  vnet_sw_interface_t * sw_interfaces;
+  vnet_sw_interface_t *sw_interfaces;
 
   /* Hash table mapping sub intfc sw_if_index by sup sw_if_index and sub id */
-  uword * sw_if_index_by_sup_and_sub;
+  uword *sw_if_index_by_sup_and_sub;
 
   /* Software interface counters both simple and combined
      packet and byte counters. */
   volatile u32 *sw_if_counter_lock;
-  vlib_simple_counter_main_t * sw_if_counters;
-  vlib_combined_counter_main_t * combined_sw_if_counters;
+  vlib_simple_counter_main_t *sw_if_counters;
+  vlib_combined_counter_main_t *combined_sw_if_counters;
 
-  vnet_hw_interface_nodes_t * deleted_hw_interface_nodes;
+  vnet_hw_interface_nodes_t *deleted_hw_interface_nodes;
 
   /* pcap drop tracing */
   int drop_pcap_enable;
   pcap_main_t pcap_main;
-  u8 * pcap_filename;
+  u8 *pcap_filename;
   u32 pcap_sw_if_index;
   u32 pcap_pkts_to_capture;
-  uword * pcap_drop_filter_hash;
+  uword *pcap_drop_filter_hash;
 
 } vnet_interface_main_t;
 
-static inline void vnet_interface_counter_lock (vnet_interface_main_t *im)
+static inline void
+vnet_interface_counter_lock (vnet_interface_main_t * im)
 {
   if (im->sw_if_counter_lock)
     while (__sync_lock_test_and_set (im->sw_if_counter_lock, 1))
       /* zzzz */ ;
 }
-static inline void vnet_interface_counter_unlock (vnet_interface_main_t *im)
+
+static inline void
+vnet_interface_counter_unlock (vnet_interface_main_t * im)
 {
   if (im->sw_if_counter_lock)
     *im->sw_if_counter_lock = 0;
@@ -521,19 +539,28 @@
 #define foreach_intf_output_feat \
  _(IPSEC, "ipsec-output")
 
-// Feature bitmap positions
-typedef enum {
+/* Feature bitmap positions */
+typedef enum
+{
 #define _(sym,str) INTF_OUTPUT_FEAT_##sym,
   foreach_intf_output_feat
 #undef _
-  INTF_OUTPUT_N_FEAT,
+    INTF_OUTPUT_N_FEAT,
 } intf_output_feat_t;
 
 /* flag that we are done with feature path */
 #define INTF_OUTPUT_FEAT_DONE INTF_OUTPUT_N_FEAT
 
-int vnet_interface_add_del_feature(struct vnet_main_t * vnm, vlib_main_t * vm,
-                                   u32 sw_if_index,
-                                   intf_output_feat_t feature, int is_add);
+int vnet_interface_add_del_feature (struct vnet_main_t *vnm, vlib_main_t * vm,
+				    u32 sw_if_index,
+				    intf_output_feat_t feature, int is_add);
 
 #endif /* included_vnet_interface_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/interface_cli.c b/vnet/vnet/interface_cli.c
index 9052f62..7b9f545 100644
--- a/vnet/vnet/interface_cli.c
+++ b/vnet/vnet/interface_cli.c
@@ -41,12 +41,13 @@
 #include <vnet/ip/ip.h>
 #include <vppinfra/bitmap.h>
 
-static int compare_interface_names (void *a1, void *a2)
+static int
+compare_interface_names (void *a1, void *a2)
 {
-  u32 * hi1 = a1;
-  u32 * hi2 = a2;
+  u32 *hi1 = a1;
+  u32 *hi2 = a2;
 
-  return vnet_hw_interface_compare (vnet_get_main(), *hi1, *hi2);
+  return vnet_hw_interface_compare (vnet_get_main (), *hi1, *hi2);
 }
 
 static clib_error_t *
@@ -54,26 +55,27 @@
 			     unformat_input_t * input,
 			     vlib_cli_command_t * cmd)
 {
-  clib_error_t * error = 0;
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vnet_hw_interface_t * hi;
-  u32 hw_if_index, * hw_if_indices = 0;
+  clib_error_t *error = 0;
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vnet_hw_interface_t *hi;
+  u32 hw_if_index, *hw_if_indices = 0;
   int i, verbose = -1, is_show, show_bond = 0;
 
   is_show = strstr (cmd->path, "show") != 0;
   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
     {
       /* See if user wants to show a specific interface. */
-      if (unformat (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
-	  vec_add1 (hw_if_indices, hw_if_index);
+      if (unformat
+	  (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
+	vec_add1 (hw_if_indices, hw_if_index);
 
       /* See if user wants to show an interface with a specific hw_if_index. */
       else if (unformat (input, "%u", &hw_if_index))
-         vec_add1 (hw_if_indices, hw_if_index);
+	vec_add1 (hw_if_indices, hw_if_index);
 
       else if (unformat (input, "verbose"))
-	  verbose = 1; /* this is also the default */
+	verbose = 1;		/* this is also the default */
 
       else if (unformat (input, "detail"))
 	verbose = 2;
@@ -82,10 +84,11 @@
 	verbose = 0;
 
       else if (unformat (input, "bond"))
-      {
-	show_bond = 1;
-	if (verbose < 0) verbose = 0; /* default to brief for link bonding */
-      }
+	{
+	  show_bond = 1;
+	  if (verbose < 0)
+	    verbose = 0;	/* default to brief for link bonding */
+	}
 
       else
 	{
@@ -94,13 +97,14 @@
 	  goto done;
 	}
     }
-	
+
   /* Gather interfaces. */
   if (vec_len (hw_if_indices) == 0)
     pool_foreach (hi, im->hw_interfaces,
 		  vec_add1 (hw_if_indices, hi - im->hw_interfaces));
 
-  if (verbose < 0) verbose = 1; /* default to verbose (except bond) */
+  if (verbose < 0)
+    verbose = 1;		/* default to verbose (except bond) */
 
   if (is_show)
     {
@@ -111,22 +115,25 @@
       for (i = 0; i < vec_len (hw_if_indices); i++)
 	{
 	  hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
-	  if (show_bond == 0) /* show all interfaces */
-	      vlib_cli_output (vm, "%U\n", format_vnet_hw_interface, vnm, 
-			       hi, verbose);
-	  else if ((hi->bond_info) && 
+	  if (show_bond == 0)	/* show all interfaces */
+	    vlib_cli_output (vm, "%U\n", format_vnet_hw_interface, vnm,
+			     hi, verbose);
+	  else if ((hi->bond_info) &&
 		   (hi->bond_info != VNET_HW_INTERFACE_BOND_INFO_SLAVE))
-	    { /* show only bonded interface and all its slave interfaces */
+	    {			/* show only bonded interface and all its slave interfaces */
 	      int hw_idx;
-	      vnet_hw_interface_t * shi;
-	      vlib_cli_output (vm, "%U\n", format_vnet_hw_interface, vnm, 
+	      vnet_hw_interface_t *shi;
+	      vlib_cli_output (vm, "%U\n", format_vnet_hw_interface, vnm,
 			       hi, verbose);
+
+              /* *INDENT-OFF* */
 	      clib_bitmap_foreach (hw_idx, hi->bond_info,
-		({
-		  shi = vnet_get_hw_interface(vnm, hw_idx);
-		  vlib_cli_output (vm, "%U\n", 
-				   format_vnet_hw_interface, vnm, shi, verbose);
-		}));
+              ({
+                shi = vnet_get_hw_interface(vnm, hw_idx);
+                vlib_cli_output (vm, "%U\n",
+                                 format_vnet_hw_interface, vnm, shi, verbose);
+              }));
+              /* *INDENT-ON* */
 	    }
 	}
     }
@@ -134,84 +141,92 @@
     {
       for (i = 0; i < vec_len (hw_if_indices); i++)
 	{
-	  vnet_device_class_t * dc;
+	  vnet_device_class_t *dc;
 
 	  hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
 	  dc = vec_elt_at_index (im->device_classes, hi->dev_class_index);
-	  
+
 	  if (dc->clear_counters)
 	    dc->clear_counters (hi->dev_instance);
 	}
     }
 
- done:
+done:
   vec_free (hw_if_indices);
   return error;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (show_hw_interfaces_command, static) = {
   .path = "show hardware-interfaces",
   .short_help = "show hardware-interfaces [brief|verbose|detail] [bond] [<if-name1> <if-name2> ...]",
   .function = show_or_clear_hw_interfaces,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (clear_hw_interface_counters_command, static) = {
   .path = "clear hardware-interfaces",
   .short_help = "Clear hardware interfaces statistics",
   .function = show_or_clear_hw_interfaces,
 };
+/* *INDENT-ON* */
 
-static int sw_interface_name_compare (void *a1, void *a2)
+static int
+sw_interface_name_compare (void *a1, void *a2)
 {
   vnet_sw_interface_t *si1 = a1;
   vnet_sw_interface_t *si2 = a2;
 
-  return vnet_sw_interface_compare (vnet_get_main(), 
-                                    si1->sw_if_index, si2->sw_if_index);
+  return vnet_sw_interface_compare (vnet_get_main (),
+				    si1->sw_if_index, si2->sw_if_index);
 }
 
 static clib_error_t *
 show_sw_interfaces (vlib_main_t * vm,
-		    unformat_input_t * input,
-		    vlib_cli_command_t * cmd)
+		    unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  clib_error_t * error = 0;
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vnet_sw_interface_t * si, * sorted_sis = 0;
+  clib_error_t *error = 0;
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vnet_sw_interface_t *si, *sorted_sis = 0;
   u8 show_addresses = 0;
 
   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
     {
-       u32 sw_if_index;
+      u32 sw_if_index;
 
       /* See if user wants to show specific interface */
-      if (unformat (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index))
+      if (unformat
+	  (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index))
 	{
-	  si =  pool_elt_at_index (im->sw_interfaces, sw_if_index);
+	  si = pool_elt_at_index (im->sw_interfaces, sw_if_index);
 	  vec_add1 (sorted_sis, si[0]);
 	}
-
       else if (unformat (input, "address") || unformat (input, "addr"))
-	  show_addresses = 1;
-
+	show_addresses = 1;
       else
-        {
+	{
 	  error = clib_error_return (0, "unknown input `%U'",
 				     format_unformat_error, input);
 	  goto done;
-        }
+	}
     }
 
   if (!show_addresses)
-      vlib_cli_output (vm, "%U\n", format_vnet_sw_interface, vnm, 0);
+    vlib_cli_output (vm, "%U\n", format_vnet_sw_interface, vnm, 0);
 
-  if (vec_len (sorted_sis) == 0) /* Get all interfaces */
+  if (vec_len (sorted_sis) == 0)	/* Get all interfaces */
     {
       /* Gather interfaces. */
-      sorted_sis = vec_new (vnet_sw_interface_t, pool_elts (im->sw_interfaces));
+      sorted_sis =
+	vec_new (vnet_sw_interface_t, pool_elts (im->sw_interfaces));
       _vec_len (sorted_sis) = 0;
-      pool_foreach (si, im->sw_interfaces, ({ vec_add1 (sorted_sis, si[0]); }));
+      pool_foreach (si, im->sw_interfaces, (
+					     {
+					     vec_add1 (sorted_sis, si[0]);
+					     }
+		    ));
 
       /* Sort by name. */
       vec_sort_with_function (sorted_sis, sw_interface_name_compare);
@@ -220,154 +235,163 @@
   if (show_addresses)
     {
       vec_foreach (si, sorted_sis)
-        {
-	  l2input_main_t * l2m = &l2input_main;
-          ip4_main_t * im4 = &ip4_main;
-          ip6_main_t * im6 = &ip6_main;
-          ip_lookup_main_t * lm4 = &im4->lookup_main;
-          ip_lookup_main_t * lm6 = &im6->lookup_main;
-          ip_interface_address_t * ia = 0;
-          ip4_address_t * r4;
-          ip6_address_t * r6;
-          u32 fib_index4 = 0, fib_index6 = 0;
-          ip4_fib_t * fib4;
-          ip6_fib_t * fib6;
-	  l2_input_config_t * config;
+      {
+	l2input_main_t *l2m = &l2input_main;
+	ip4_main_t *im4 = &ip4_main;
+	ip6_main_t *im6 = &ip6_main;
+	ip_lookup_main_t *lm4 = &im4->lookup_main;
+	ip_lookup_main_t *lm6 = &im6->lookup_main;
+	ip_interface_address_t *ia = 0;
+	ip4_address_t *r4;
+	ip6_address_t *r6;
+	u32 fib_index4 = 0, fib_index6 = 0;
+	ip4_fib_t *fib4;
+	ip6_fib_t *fib6;
+	l2_input_config_t *config;
 
-          if (vec_len (im4->fib_index_by_sw_if_index) > si->sw_if_index)
-            fib_index4 = vec_elt (im4->fib_index_by_sw_if_index, 
-                                  si->sw_if_index);
+	if (vec_len (im4->fib_index_by_sw_if_index) > si->sw_if_index)
+	  fib_index4 = vec_elt (im4->fib_index_by_sw_if_index,
+				si->sw_if_index);
 
-          if (vec_len (im6->fib_index_by_sw_if_index) > si->sw_if_index)
-            fib_index6 = vec_elt (im6->fib_index_by_sw_if_index,
-                                  si->sw_if_index);
+	if (vec_len (im6->fib_index_by_sw_if_index) > si->sw_if_index)
+	  fib_index6 = vec_elt (im6->fib_index_by_sw_if_index,
+				si->sw_if_index);
 
-          fib4 = vec_elt_at_index (im4->fibs, fib_index4);
-          fib6 = vec_elt_at_index (im6->fibs, fib_index6);
+	fib4 = vec_elt_at_index (im4->fibs, fib_index4);
+	fib6 = vec_elt_at_index (im6->fibs, fib_index6);
 
-          if (si->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED)
-            vlib_cli_output 
-                (vm, "%U (%s): \n  unnumbered, use %U", 
-                 format_vnet_sw_if_index_name,
-                 vnm, si->sw_if_index,
-                 (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? "up" : "dn",
-                 format_vnet_sw_if_index_name,
-                 vnm, si->unnumbered_sw_if_index);
-                             
-          else
-            {
-            vlib_cli_output (vm, "%U (%s):", 
-                             format_vnet_sw_if_index_name,
-                             vnm, si->sw_if_index,
-                             (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) 
-                             ? "up" : "dn");
-            }
+	if (si->flags & VNET_SW_INTERFACE_FLAG_UNNUMBERED)
+	  vlib_cli_output
+	    (vm, "%U (%s): \n  unnumbered, use %U",
+	     format_vnet_sw_if_index_name,
+	     vnm, si->sw_if_index,
+	     (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? "up" : "dn",
+	     format_vnet_sw_if_index_name, vnm, si->unnumbered_sw_if_index);
 
-	  /* Display any L2 addressing info */
-	  vec_validate(l2m->configs, si->sw_if_index);
-	  config = vec_elt_at_index(l2m->configs, si->sw_if_index);
-	  if (config->bridge) 
-	    {
-	      u32 bd_id = l2input_main.bd_configs[config->bd_index].bd_id;
-	      vlib_cli_output (vm, "  l2 bridge bd_id %d%s%d", bd_id, 
-			     config->bvi ? " bvi shg " : " shg ", config->shg);
-            } 
-	  else if (config->xconnect) 
-	    {
-	      vlib_cli_output (vm, "  l2 xconnect %U", 
-			       format_vnet_sw_if_index_name,
-			       vnm, config->output_sw_if_index);
-	    }
+	else
+	  {
+	    vlib_cli_output (vm, "%U (%s):",
+			     format_vnet_sw_if_index_name,
+			     vnm, si->sw_if_index,
+			     (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+			     ? "up" : "dn");
+	  }
 
-	  /* Display any IP4 addressing info */
-	  foreach_ip_interface_address (lm4, ia, si->sw_if_index, 
+	/* Display any L2 addressing info */
+	vec_validate (l2m->configs, si->sw_if_index);
+	config = vec_elt_at_index (l2m->configs, si->sw_if_index);
+	if (config->bridge)
+	  {
+	    u32 bd_id = l2input_main.bd_configs[config->bd_index].bd_id;
+	    vlib_cli_output (vm, "  l2 bridge bd_id %d%s%d", bd_id,
+			     config->bvi ? " bvi shg " : " shg ",
+			     config->shg);
+	  }
+	else if (config->xconnect)
+	  {
+	    vlib_cli_output (vm, "  l2 xconnect %U",
+			     format_vnet_sw_if_index_name,
+			     vnm, config->output_sw_if_index);
+	  }
+
+	/* Display any IP4 addressing info */
+          /* *INDENT-OFF* */
+	  foreach_ip_interface_address (lm4, ia, si->sw_if_index,
 					1 /* honor unnumbered */,
 	  ({
             r4 = ip_interface_address_get_address (lm4, ia);
             if (fib4->table_id)
               {
-                vlib_cli_output (vm, "  %U/%d table %d", 
-                                 format_ip4_address, r4, 
+                vlib_cli_output (vm, "  %U/%d table %d",
+                                 format_ip4_address, r4,
                                  ia->address_length,
                                  fib4->table_id);
               }
             else
               {
-                vlib_cli_output (vm, "  %U/%d", 
-                                 format_ip4_address, r4, 
+                vlib_cli_output (vm, "  %U/%d",
+                                 format_ip4_address, r4,
                                  ia->address_length);
               }
           }));
+          /* *INDENT-ON* */
 
-	  /* Display any IP6 addressing info */
-          foreach_ip_interface_address (lm6, ia, si->sw_if_index, 
+	/* Display any IP6 addressing info */
+          /* *INDENT-OFF* */
+          foreach_ip_interface_address (lm6, ia, si->sw_if_index,
                                         1 /* honor unnumbered */,
           ({
             r6 = ip_interface_address_get_address (lm6, ia);
             if (fib6->table_id)
               {
-                vlib_cli_output (vm, "  %U/%d table %d", 
-                                 format_ip6_address, r6, 
+                vlib_cli_output (vm, "  %U/%d table %d",
+                                 format_ip6_address, r6,
                                  ia->address_length,
                                  fib6->table_id);
               }
             else
               {
-                vlib_cli_output (vm, "  %U/%d", 
-                                 format_ip6_address, r6, 
+                vlib_cli_output (vm, "  %U/%d",
+                                 format_ip6_address, r6,
                                  ia->address_length);
               }
           }));
-        }
+          /* *INDENT-ON* */
+      }
     }
   else
     {
       vec_foreach (si, sorted_sis)
-        {
-          vlib_cli_output (vm, "%U\n", format_vnet_sw_interface, vnm, si);
-        }
+      {
+	vlib_cli_output (vm, "%U\n", format_vnet_sw_interface, vnm, si);
+      }
     }
 
- done:
+done:
   vec_free (sorted_sis);
   return error;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (show_sw_interfaces_command, static) = {
   .path = "show interfaces",
   .short_help = "show interfaces [address|addr] [<if-name1> <if-name2> ...]",
   .function = show_sw_interfaces,
 };
+/* *INDENT-ON* */
 
 /* Root of all interface commands. */
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (vnet_cli_interface_command, static) = {
   .path = "interface",
   .short_help = "Interface commands",
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (vnet_cli_set_interface_command, static) = {
   .path = "set interface",
   .short_help = "Interface commands",
 };
+/* *INDENT-ON* */
 
 static clib_error_t *
 clear_interface_counters (vlib_main_t * vm,
-			  unformat_input_t * input,
-			  vlib_cli_command_t * cmd)
+			  unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vlib_simple_counter_main_t * sm;
-  vlib_combined_counter_main_t * cm;
-  static vnet_main_t ** my_vnet_mains;
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vlib_simple_counter_main_t *sm;
+  vlib_combined_counter_main_t *cm;
+  static vnet_main_t **my_vnet_mains;
   int i, j, n_counters;
 
   vec_reset_length (my_vnet_mains);
-      
+
   for (i = 0; i < vec_len (vnet_mains); i++)
     {
       if (vnet_mains[i])
-        vec_add1 (my_vnet_mains, vnet_mains[i]);
+	vec_add1 (my_vnet_mains, vnet_mains[i]);
     }
 
   if (vec_len (vnet_mains) == 0)
@@ -377,149 +401,164 @@
 
   for (j = 0; j < n_counters; j++)
     {
-      for (i = 0; i < vec_len(my_vnet_mains); i++)
-        {
-          im = &my_vnet_mains[i]->interface_main;
-          cm = im->combined_sw_if_counters + j;
-          vlib_clear_combined_counters (cm);
-        }
+      for (i = 0; i < vec_len (my_vnet_mains); i++)
+	{
+	  im = &my_vnet_mains[i]->interface_main;
+	  cm = im->combined_sw_if_counters + j;
+	  vlib_clear_combined_counters (cm);
+	}
     }
 
   n_counters = vec_len (im->sw_if_counters);
 
   for (j = 0; j < n_counters; j++)
     {
-      for (i = 0; i < vec_len(my_vnet_mains); i++)
-        {
-          im = &my_vnet_mains[i]->interface_main;
-          sm = im->sw_if_counters + j;
-          vlib_clear_simple_counters (sm);
-        }
+      for (i = 0; i < vec_len (my_vnet_mains); i++)
+	{
+	  im = &my_vnet_mains[i]->interface_main;
+	  sm = im->sw_if_counters + j;
+	  vlib_clear_simple_counters (sm);
+	}
     }
 
   return 0;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (clear_interface_counters_command, static) = {
   .path = "clear interfaces",
   .short_help = "Clear interfaces statistics",
   .function = clear_interface_counters,
 };
+/* *INDENT-ON* */
 
-// The following subinterface syntax is supported. The first two are for 
-// backwards compatability:
-//
-// <intf-name> <id>
-//     - a subinterface with the name <intf-name>.<id>. The subinterface
-//       is a single dot1q vlan with vlan id <id> and exact-match semantics.
-//
-// <intf-name> <min_id>-<max_id> 
-//     - a set of the above subinterfaces, repeating for each id
-//       in the range <min_id> to <max_id>
-//
-// In the following, exact-match semantics (i.e. the number of vlan tags on the
-// packet must match the number of tags in the configuration) are used only if 
-// the keyword exact-match is present. Non-exact match is the default.
-//
-// <intf-name> <id> dot1q <outer_id> [exact-match]
-//     - a subinterface with the name <intf-name>.<id>. The subinterface
-//       is a single dot1q vlan with vlan id <outer_id>. 
-//
-// <intf-name> <id> dot1q any [exact-match]
-//     - a subinterface with the name <intf-name>.<id>. The subinterface
-//       is a single dot1q vlan with any vlan id.
-//
-// <intf-name> <id> dot1q <outer_id> inner-dot1q <inner_id> [exact-match]
-//     - a subinterface with the name <intf-name>.<id>. The subinterface
-//       is a double dot1q vlan with outer vlan id <outer_id> and inner vlan id 
-//       <inner_id>. 
-//
-// <intf-name> <id> dot1q <outer_id> inner-dot1q any [exact-match]
-//     - a subinterface with the name <intf-name>.<id>. The subinterface
-//       is a double dot1q vlan with outer vlan id <id> and any inner vlan id.
-//
-// <intf-name> <id> dot1q any inner-dot1q any [exact-match]
-//
-//     - a subinterface with the name <intf-name>.<id>. The subinterface
-//       is a double dot1q vlan with any outer vlan id and any inner vlan id.
-//
-// For each of the above CLI, there is a duplicate that uses the keyword
-// "dot1ad" in place of the first "dot1q". These interfaces use ethertype
-// 0x88ad in place of 0x8100 for the outer ethertype. Note that for double-
-// tagged packets the inner ethertype is always 0x8100. Also note that
-// the dot1q and dot1ad naming spaces are independent, so it is legal to
-// have both "Gig3/0/0.1 dot1q 100" and "Gig3/0/0.2 dot1ad 100". For example:
-//
-// <intf-name> <id> dot1ad <outer_id> inner-dot1q <inner_id> [exact-match]
-//     - a subinterface with the name <intf-name>.<id>. The subinterface
-//       is a double dot1ad vlan with outer vlan id <outer_id> and inner vlan 
-//       id <inner_id>. 
-//
-// <intf-name> <id> untagged
-//     - a subinterface with the name <intf-name>.<id>. The subinterface
-//       has no vlan tags. Only one can be specified per interface.
-//      
-// <intf-name> <id> default
-//     - a subinterface with the name <intf-name>.<id>. This is associated
-//       with a packet that did not match any other configured subinterface
-//       on this interface. Only one can be specified per interface.
-
+/** \detail
+ * The following subinterface syntax is supported. The first two are for
+ * backwards compatability:
+ *
+ * <intf-name> <id>
+ *     - a subinterface with the name <intf-name>.<id>. The subinterface
+ *       is a single dot1q vlan with vlan id <id> and exact-match semantics.
+ *
+ * <intf-name> <min_id>-<max_id>
+ *     - a set of the above subinterfaces, repeating for each id
+ *       in the range <min_id> to <max_id>
+ *
+ * In the following, exact-match semantics (i.e. the number of vlan tags on the
+ * packet must match the number of tags in the configuration) are used only if
+ * the keyword exact-match is present. Non-exact match is the default.
+ *
+ * <intf-name> <id> dot1q <outer_id> [exact-match]
+ *     - a subinterface with the name <intf-name>.<id>. The subinterface
+ *       is a single dot1q vlan with vlan id <outer_id>.
+ *
+ * <intf-name> <id> dot1q any [exact-match]
+ *     - a subinterface with the name <intf-name>.<id>. The subinterface
+ *       is a single dot1q vlan with any vlan id.
+ *
+ * <intf-name> <id> dot1q <outer_id> inner-dot1q <inner_id> [exact-match]
+ *     - a subinterface with the name <intf-name>.<id>. The subinterface
+ *       is a double dot1q vlan with outer vlan id <outer_id> and inner vlan id
+ *       <inner_id>.
+ *
+ * <intf-name> <id> dot1q <outer_id> inner-dot1q any [exact-match]
+ *     - a subinterface with the name <intf-name>.<id>. The subinterface
+ *       is a double dot1q vlan with outer vlan id <id> and any inner vlan id.
+ *
+ * <intf-name> <id> dot1q any inner-dot1q any [exact-match]
+ *
+ *     - a subinterface with the name <intf-name>.<id>. The subinterface
+ *       is a double dot1q vlan with any outer vlan id and any inner vlan id.
+ *
+ * For each of the above CLI, there is a duplicate that uses the keyword
+ * "dot1ad" in place of the first "dot1q". These interfaces use ethertype
+ * 0x88ad in place of 0x8100 for the outer ethertype. Note that for double-
+ * tagged packets the inner ethertype is always 0x8100. Also note that
+ * the dot1q and dot1ad naming spaces are independent, so it is legal to
+ * have both "Gig3/0/0.1 dot1q 100" and "Gig3/0/0.2 dot1ad 100". For example:
+ *
+ * <intf-name> <id> dot1ad <outer_id> inner-dot1q <inner_id> [exact-match]
+ *     - a subinterface with the name <intf-name>.<id>. The subinterface
+ *       is a double dot1ad vlan with outer vlan id <outer_id> and inner vlan
+ *       id <inner_id>.
+ *
+ * <intf-name> <id> untagged
+ *     - a subinterface with the name <intf-name>.<id>. The subinterface
+ *       has no vlan tags. Only one can be specified per interface.
+ *
+ * <intf-name> <id> default
+ *     - a subinterface with the name <intf-name>.<id>. This is associated
+ *       with a packet that did not match any other configured subinterface
+ *       on this interface. Only one can be specified per interface.
+ */
 
 static clib_error_t *
-parse_vlan_sub_interfaces (unformat_input_t    * input,
-                           vnet_sw_interface_t * template)
+parse_vlan_sub_interfaces (unformat_input_t * input,
+			   vnet_sw_interface_t * template)
 {
-  clib_error_t * error = 0;
+  clib_error_t *error = 0;
   u32 inner_vlan, outer_vlan;
 
-  if (unformat (input, "any inner-dot1q any")) {
-    template->sub.eth.flags.two_tags = 1;
-    template->sub.eth.flags.outer_vlan_id_any = 1;
-    template->sub.eth.flags.inner_vlan_id_any = 1;
-  } else if (unformat (input, "any")) {
-    template->sub.eth.flags.one_tag = 1;
-    template->sub.eth.flags.outer_vlan_id_any = 1;
-  } else if (unformat (input, "%d inner-dot1q any", &outer_vlan)) {
-    template->sub.eth.flags.two_tags = 1;
-    template->sub.eth.flags.inner_vlan_id_any = 1;
-    template->sub.eth.outer_vlan_id = outer_vlan;     
-  } else if (unformat (input, "%d inner-dot1q %d", &outer_vlan, &inner_vlan)) {
-    template->sub.eth.flags.two_tags = 1;
-    template->sub.eth.outer_vlan_id = outer_vlan;
-    template->sub.eth.inner_vlan_id = inner_vlan;     
-  } else if (unformat (input, "%d", &outer_vlan)) {
-    template->sub.eth.flags.one_tag = 1;
-    template->sub.eth.outer_vlan_id = outer_vlan;
-  } else {
-    error = clib_error_return (0, "expected dot1q config, got `%U'",
-                              format_unformat_error, input);
-    goto done;
-  }
-
-  if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat (input, "exact-match")) {
-      template->sub.eth.flags.exact_match = 1;
+  if (unformat (input, "any inner-dot1q any"))
+    {
+      template->sub.eth.flags.two_tags = 1;
+      template->sub.eth.flags.outer_vlan_id_any = 1;
+      template->sub.eth.flags.inner_vlan_id_any = 1;
     }
-  }
+  else if (unformat (input, "any"))
+    {
+      template->sub.eth.flags.one_tag = 1;
+      template->sub.eth.flags.outer_vlan_id_any = 1;
+    }
+  else if (unformat (input, "%d inner-dot1q any", &outer_vlan))
+    {
+      template->sub.eth.flags.two_tags = 1;
+      template->sub.eth.flags.inner_vlan_id_any = 1;
+      template->sub.eth.outer_vlan_id = outer_vlan;
+    }
+  else if (unformat (input, "%d inner-dot1q %d", &outer_vlan, &inner_vlan))
+    {
+      template->sub.eth.flags.two_tags = 1;
+      template->sub.eth.outer_vlan_id = outer_vlan;
+      template->sub.eth.inner_vlan_id = inner_vlan;
+    }
+  else if (unformat (input, "%d", &outer_vlan))
+    {
+      template->sub.eth.flags.one_tag = 1;
+      template->sub.eth.outer_vlan_id = outer_vlan;
+    }
+  else
+    {
+      error = clib_error_return (0, "expected dot1q config, got `%U'",
+				 format_unformat_error, input);
+      goto done;
+    }
 
- done:
+  if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "exact-match"))
+	{
+	  template->sub.eth.flags.exact_match = 1;
+	}
+    }
+
+done:
   return error;
 }
 
 static clib_error_t *
 create_sub_interfaces (vlib_main_t * vm,
-		       unformat_input_t * input,
-		       vlib_cli_command_t * cmd)
+		       unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  clib_error_t * error = 0;
+  vnet_main_t *vnm = vnet_get_main ();
+  clib_error_t *error = 0;
   u32 hw_if_index, sw_if_index;
-  vnet_hw_interface_t * hi;
+  vnet_hw_interface_t *hi;
   u32 id, id_min, id_max;
   vnet_sw_interface_t template;
 
   hw_if_index = ~0;
-  if (! unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
+  if (!unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
     {
       error = clib_error_return (0, "unknown interface `%U'",
 				 format_unformat_error, input);
@@ -529,74 +568,83 @@
   memset (&template, 0, sizeof (template));
   template.sub.eth.raw_flags = 0;
 
-  if (unformat (input, "%d default", &id_min)) {
-    id_max = id_min;
-    template.sub.eth.flags.default_sub = 1;
-  } else if (unformat (input, "%d untagged", &id_min)) {
-    id_max = id_min;
-    template.sub.eth.flags.no_tags = 1;
-    template.sub.eth.flags.exact_match = 1;
-  } else if (unformat (input, "%d dot1q", &id_min)) {
-    // parse dot1q config
-    id_max = id_min;
-    error = parse_vlan_sub_interfaces(input, &template);
-    if (error) goto done;
-  } else if (unformat (input, "%d dot1ad", &id_min)) {
-    // parse dot1ad config
-    id_max = id_min;
-    template.sub.eth.flags.dot1ad = 1;
-    error = parse_vlan_sub_interfaces(input, &template);
-    if (error) goto done;
-  } else if (unformat (input, "%d-%d", &id_min, &id_max)) {
-    template.sub.eth.flags.one_tag = 1;
-    template.sub.eth.outer_vlan_id = id_min;
-    template.sub.eth.flags.exact_match = 1;
-    if (id_min > id_max)
-      goto id_error;
-  } else if (unformat (input, "%d", &id_min)) {
-    id_max = id_min;
-    template.sub.eth.flags.one_tag = 1;
-    template.sub.eth.outer_vlan_id = id_min;
-    template.sub.eth.flags.exact_match = 1;
-  } else {
+  if (unformat (input, "%d default", &id_min))
+    {
+      id_max = id_min;
+      template.sub.eth.flags.default_sub = 1;
+    }
+  else if (unformat (input, "%d untagged", &id_min))
+    {
+      id_max = id_min;
+      template.sub.eth.flags.no_tags = 1;
+      template.sub.eth.flags.exact_match = 1;
+    }
+  else if (unformat (input, "%d dot1q", &id_min))
+    {
+      /* parse dot1q config */
+      id_max = id_min;
+      error = parse_vlan_sub_interfaces (input, &template);
+      if (error)
+	goto done;
+    }
+  else if (unformat (input, "%d dot1ad", &id_min))
+    {
+      /* parse dot1ad config */
+      id_max = id_min;
+      template.sub.eth.flags.dot1ad = 1;
+      error = parse_vlan_sub_interfaces (input, &template);
+      if (error)
+	goto done;
+    }
+  else if (unformat (input, "%d-%d", &id_min, &id_max))
+    {
+      template.sub.eth.flags.one_tag = 1;
+      template.sub.eth.outer_vlan_id = id_min;
+      template.sub.eth.flags.exact_match = 1;
+      if (id_min > id_max)
+	goto id_error;
+    }
+  else if (unformat (input, "%d", &id_min))
+    {
+      id_max = id_min;
+      template.sub.eth.flags.one_tag = 1;
+      template.sub.eth.outer_vlan_id = id_min;
+      template.sub.eth.flags.exact_match = 1;
+    }
+  else
+    {
     id_error:
       error = clib_error_return (0, "expected ID or ID MIN-MAX, got `%U'",
 				 format_unformat_error, input);
       goto done;
-  }
-
-  /*
-  if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
-    error = clib_error_return (0, "unexpected text `%U'",
-                               format_unformat_error, input);
-    goto done;
-  }
-  */
+    }
 
   hi = vnet_get_hw_interface (vnm, hw_if_index);
 
-  if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE) {
-    error = clib_error_return (
-	0, "not allowed as %v belong to a BondEthernet interface", hi->name);
-    goto done;
-  }
+  if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE)
+    {
+      error =
+	clib_error_return (0,
+			   "not allowed as %v belong to a BondEthernet interface",
+			   hi->name);
+      goto done;
+    }
 
   for (id = id_min; id <= id_max; id++)
     {
-      uword * p;
-      vnet_interface_main_t * im = &vnm->interface_main;
-      u64 sup_and_sub_key = ((u64)(hi->sw_if_index) << 32) |
-          (u64) id;
-      u64 * kp;
+      uword *p;
+      vnet_interface_main_t *im = &vnm->interface_main;
+      u64 sup_and_sub_key = ((u64) (hi->sw_if_index) << 32) | (u64) id;
+      u64 *kp;
 
       p = hash_get_mem (im->sw_if_index_by_sup_and_sub, &sup_and_sub_key);
       if (p)
-        {
-          if (CLIB_DEBUG > 0)
-            clib_warning ("sup sw_if_index %d, sub id %d already exists\n",
-                          hi->sw_if_index, id);
-          continue;
-        }
+	{
+	  if (CLIB_DEBUG > 0)
+	    clib_warning ("sup sw_if_index %d, sub id %d already exists\n",
+			  hi->sw_if_index, id);
+	  continue;
+	}
 
       kp = clib_mem_alloc (sizeof (*kp));
       *kp = sup_and_sub_key;
@@ -605,43 +653,44 @@
       template.sup_sw_if_index = hi->sw_if_index;
       template.sub.id = id;
       error = vnet_create_sw_interface (vnm, &template, &sw_if_index);
-      if (error) 
-        goto done;
+      if (error)
+	goto done;
 
       hash_set (hi->sub_interface_sw_if_index_by_id, id, sw_if_index);
       hash_set_mem (im->sw_if_index_by_sup_and_sub, kp, sw_if_index);
-      vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, 
-                      vnet_get_main(), sw_if_index);
+      vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name,
+		       vnet_get_main (), sw_if_index);
     }
 
- done:
+done:
   return error;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (create_sub_interfaces_command, static) = {
   .path = "create sub-interface",
   .short_help = "create sub-interfaces <nn>[-<nn>] [dot1q|dot1ad|default|untagged]",
   .function = create_sub_interfaces,
 };
+/* *INDENT-ON* */
 
 static clib_error_t *
 set_state (vlib_main_t * vm,
-	   unformat_input_t * input,
-	   vlib_cli_command_t * cmd)
+	   unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  clib_error_t * error;
+  vnet_main_t *vnm = vnet_get_main ();
+  clib_error_t *error;
   u32 sw_if_index, flags;
 
   sw_if_index = ~0;
-  if (! unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+  if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
     {
       error = clib_error_return (0, "unknown interface `%U'",
 				 format_unformat_error, input);
       goto done;
     }
 
-  if (! unformat (input, "%U", unformat_vnet_sw_interface_flags, &flags))
+  if (!unformat (input, "%U", unformat_vnet_sw_interface_flags, &flags))
     {
       error = clib_error_return (0, "unknown flags `%U'",
 				 format_unformat_error, input);
@@ -652,89 +701,95 @@
   if (error)
     goto done;
 
- done:
+done:
   return error;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (set_state_command, static) = {
   .path = "set interface state",
   .short_help = "Set interface state",
   .function = set_state,
 };
+/* *INDENT-ON* */
 
 static clib_error_t *
 set_unnumbered (vlib_main_t * vm,
-                unformat_input_t * input,
-                vlib_cli_command_t * cmd)
+		unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  vnet_main_t * vnm = vnet_get_main();
+  vnet_main_t *vnm = vnet_get_main ();
   u32 unnumbered_sw_if_index;
   u32 inherit_from_sw_if_index;
-  vnet_sw_interface_t * si;
+  vnet_sw_interface_t *si;
   int is_set = 0;
   int is_del = 0;
 
   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
-  {
+    {
 
-      if (unformat (input, "%U use %U", 
-                    unformat_vnet_sw_interface, vnm, &unnumbered_sw_if_index,
-                    unformat_vnet_sw_interface, vnm, &inherit_from_sw_if_index))
-          is_set = 1;
+      if (unformat (input, "%U use %U",
+		    unformat_vnet_sw_interface, vnm, &unnumbered_sw_if_index,
+		    unformat_vnet_sw_interface, vnm,
+		    &inherit_from_sw_if_index))
+	is_set = 1;
       else if (unformat (input, "del %U",
-                         unformat_vnet_sw_interface, 
-                         vnm, &unnumbered_sw_if_index))
-          is_del = 1;
+			 unformat_vnet_sw_interface,
+			 vnm, &unnumbered_sw_if_index))
+	is_del = 1;
       else
-        {
-          if (is_set || is_del)
-            break;
-          else
-            return clib_error_return 
-              (0, "parse error '%U'", format_unformat_error, input);
-        }
-  }
+	{
+	  if (is_set || is_del)
+	    break;
+	  else
+	    return clib_error_return
+	      (0, "parse error '%U'", format_unformat_error, input);
+	}
+    }
 
   si = vnet_get_sw_interface (vnm, unnumbered_sw_if_index);
-  if (is_del) {
+  if (is_del)
+    {
       si->flags &= ~(VNET_SW_INTERFACE_FLAG_UNNUMBERED);
-      si->unnumbered_sw_if_index = (u32)~0;
-  } else {
+      si->unnumbered_sw_if_index = (u32) ~ 0;
+    }
+  else
+    {
       si->flags |= VNET_SW_INTERFACE_FLAG_UNNUMBERED;
       si->unnumbered_sw_if_index = inherit_from_sw_if_index;
-  }
-      
+    }
+
   return 0;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (set_unnumbered_command, static) = {
   .path = "set interface unnumbered",
   .short_help = "set interface unnumbered [<intfc> use <intfc>][del <intfc>]",
   .function = set_unnumbered,
 };
+/* *INDENT-ON* */
 
 
 
 static clib_error_t *
 set_hw_class (vlib_main_t * vm,
-	      unformat_input_t * input,
-	      vlib_cli_command_t * cmd)
+	      unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_interface_main_t * im = &vnm->interface_main;
-  clib_error_t * error;
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_main_t *im = &vnm->interface_main;
+  clib_error_t *error;
   u32 hw_if_index, hw_class_index;
 
   hw_if_index = ~0;
-  if (! unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
+  if (!unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
     {
       error = clib_error_return (0, "unknown hardware interface `%U'",
 				 format_unformat_error, input);
       goto done;
     }
 
-  if (! unformat_user (input, unformat_hash_string,
-		       im->hw_interface_class_by_name, &hw_class_index))
+  if (!unformat_user (input, unformat_hash_string,
+		      im->hw_interface_class_by_name, &hw_class_index))
     {
       error = clib_error_return (0, "unknown hardware class `%U'",
 				 format_unformat_error, input);
@@ -745,36 +800,41 @@
   if (error)
     goto done;
 
- done:
+done:
   return error;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (set_hw_class_command, static) = {
   .path = "set interface hw-class",
   .short_help = "Set interface hardware class",
   .function = set_hw_class,
 };
+/* *INDENT-ON* */
 
-static clib_error_t * vnet_interface_cli_init (vlib_main_t * vm)
-{ return 0; }
+static clib_error_t *
+vnet_interface_cli_init (vlib_main_t * vm)
+{
+  return 0;
+}
 
 VLIB_INIT_FUNCTION (vnet_interface_cli_init);
 
-static clib_error_t * 
+static clib_error_t *
 renumber_interface_command_fn (vlib_main_t * vm,
-                               unformat_input_t * input,
-                               vlib_cli_command_t * cmd)
+			       unformat_input_t * input,
+			       vlib_cli_command_t * cmd)
 {
   u32 hw_if_index;
   u32 new_dev_instance;
-  vnet_main_t * vnm = vnet_get_main();
+  vnet_main_t *vnm = vnet_get_main ();
   int rv;
 
-  if (! unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
+  if (!unformat_user (input, unformat_vnet_hw_interface, vnm, &hw_if_index))
     return clib_error_return (0, "unknown hardware interface `%U'",
-                              format_unformat_error, input);
+			      format_unformat_error, input);
 
-  if (! unformat (input, "%d", &new_dev_instance))
+  if (!unformat (input, "%d", &new_dev_instance))
     return clib_error_return (0, "new dev instance missing");
 
   rv = vnet_interface_name_renumber (hw_if_index, new_dev_instance);
@@ -786,7 +846,7 @@
 
     default:
       return clib_error_return (0, "vnet_interface_name_renumber returned %d",
-                                rv);
+				rv);
 
     }
 
@@ -794,32 +854,33 @@
 }
 
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (renumber_interface_command, static) = {
   .path = "renumber interface",
   .short_help = "renumber interface <if-name> <new-dev-instance>",
   .function = renumber_interface_command_fn,
 };
+/* *INDENT-ON* */
 
 static clib_error_t *
 promiscuous_cmd (vlib_main_t * vm,
-                 unformat_input_t * input,
-                 vlib_cli_command_t * cmd)
+		 unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  vnet_main_t * vnm = vnet_get_main();
+  vnet_main_t *vnm = vnet_get_main ();
   u32 hw_if_index;
   u32 flags = ETHERNET_INTERFACE_FLAG_ACCEPT_ALL;
-  ethernet_main_t * em = &ethernet_main;
-  ethernet_interface_t * eif;
+  ethernet_main_t *em = &ethernet_main;
+  ethernet_interface_t *eif;
 
   if (unformat (input, "on %U",
-                unformat_vnet_hw_interface, vnm, &hw_if_index))
+		unformat_vnet_hw_interface, vnm, &hw_if_index))
     ;
   else if (unformat (input, "off %U",
-                     unformat_ethernet_interface, vnm, &hw_if_index))
+		     unformat_ethernet_interface, vnm, &hw_if_index))
     flags = 0;
   else
     return clib_error_return (0, "unknown input `%U'",
-                              format_unformat_error, input);
+			      format_unformat_error, input);
 
   eif = ethernet_get_interface (em, hw_if_index);
   if (!eif)
@@ -829,28 +890,30 @@
   return 0;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (set_interface_promiscuous_cmd, static) = {
   .path = "set interface promiscuous",
   .short_help = "set interface promiscuous [on | off] <intfc>",
   .function = promiscuous_cmd,
 };
+/* *INDENT-ON* */
 
 static clib_error_t *
 mtu_cmd (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  vnet_main_t * vnm = vnet_get_main();
+  vnet_main_t *vnm = vnet_get_main ();
   u32 hw_if_index, mtu;
   u32 flags = ETHERNET_INTERFACE_FLAG_MTU;
-  ethernet_main_t * em = &ethernet_main;
+  ethernet_main_t *em = &ethernet_main;
 
   if (unformat (input, "%d %U", &mtu,
-                unformat_vnet_hw_interface, vnm, &hw_if_index))
+		unformat_vnet_hw_interface, vnm, &hw_if_index))
     {
-      vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
-      ethernet_interface_t * eif = ethernet_get_interface (em, hw_if_index);
+      vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+      ethernet_interface_t *eif = ethernet_get_interface (em, hw_if_index);
 
       if (!eif)
-        return clib_error_return (0, "not supported");
+	return clib_error_return (0, "not supported");
 
       if (mtu < hi->min_supported_packet_bytes)
 	return clib_error_return (0, "Invalid mtu (%d): "
@@ -859,7 +922,7 @@
 
       if (mtu > hi->max_supported_packet_bytes)
 	return clib_error_return (0, "Invalid mtu (%d): must be <= (%d)", mtu,
-                                  hi->max_supported_packet_bytes);
+				  hi->max_supported_packet_bytes);
 
       if (hi->max_packet_bytes != mtu)
 	{
@@ -869,13 +932,22 @@
     }
   else
     return clib_error_return (0, "unknown input `%U'",
-                              format_unformat_error, input);
+			      format_unformat_error, input);
   return 0;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (set_interface_mtu_cmd, static) = {
   .path = "set interface mtu",
   .short_help = "set interface mtu <value> <intfc>",
   .function = mtu_cmd,
 };
+/* *INDENT-ON* */
 
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/interface_format.c b/vnet/vnet/interface_format.c
index 319113f..b3a3062 100644
--- a/vnet/vnet/interface_format.c
+++ b/vnet/vnet/interface_format.c
@@ -40,35 +40,36 @@
 #include <vnet/vnet.h>
 #include <vppinfra/bitmap.h>
 
-u8 * format_vnet_sw_interface_flags (u8 * s, va_list * args)
+u8 *
+format_vnet_sw_interface_flags (u8 * s, va_list * args)
 {
   u32 flags = va_arg (*args, u32);
 
-  if (flags & VNET_SW_INTERFACE_FLAG_BOND_SLAVE) 
+  if (flags & VNET_SW_INTERFACE_FLAG_BOND_SLAVE)
     s = format (s, "bond-slave");
-  else 
+  else
     {
-      s = format (s, "%s", 
+      s = format (s, "%s",
 		  (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? "up" : "down");
-      if (flags & VNET_SW_INTERFACE_FLAG_PUNT) 
+      if (flags & VNET_SW_INTERFACE_FLAG_PUNT)
 	s = format (s, "/punt");
     }
 
   return s;
 }
 
-u8 * format_vnet_hw_interface (u8 * s, va_list * args)
+u8 *
+format_vnet_hw_interface (u8 * s, va_list * args)
 {
-  vnet_main_t * vnm = va_arg (*args, vnet_main_t *);
-  vnet_hw_interface_t * hi = va_arg (*args, vnet_hw_interface_t *);
-  vnet_hw_interface_class_t * hw_class;
-  vnet_device_class_t * dev_class;
+  vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+  vnet_hw_interface_t *hi = va_arg (*args, vnet_hw_interface_t *);
+  vnet_hw_interface_class_t *hw_class;
+  vnet_device_class_t *dev_class;
   int verbose = va_arg (*args, int);
   uword indent;
 
-  if (! hi)
-    return format (s, "%=32s%=6s%=8s%s",
-		   "Name", "Idx", "Link", "Hardware");
+  if (!hi)
+    return format (s, "%=32s%=6s%=8s%s", "Name", "Idx", "Link", "Hardware");
 
   indent = format_get_indent (s);
 
@@ -77,19 +78,20 @@
   if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE)
     s = format (s, "%=8s", "slave");
   else
-    s = format (s, "%=8s", 
+    s = format (s, "%=8s",
 		hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP ? "up" : "down");
 
   hw_class = vnet_get_hw_interface_class (vnm, hi->hw_class_index);
   dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
 
-  if (hi->bond_info && (hi->bond_info != VNET_HW_INTERFACE_BOND_INFO_SLAVE)) 
+  if (hi->bond_info && (hi->bond_info != VNET_HW_INTERFACE_BOND_INFO_SLAVE))
     {
       int hw_idx;
       s = format (s, "Slave-Idx:");
-      clib_bitmap_foreach (hw_idx, hi->bond_info, s = format(s, " %d", hw_idx));
+      clib_bitmap_foreach (hw_idx, hi->bond_info, s =
+			   format (s, " %d", hw_idx));
     }
-  else if (dev_class->format_device_name)  
+  else if (dev_class->format_device_name)
     s = format (s, "%U", dev_class->format_device_name, hi->dev_instance);
   else
     s = format (s, "%s%d", dev_class->name, hi->dev_instance);
@@ -103,10 +105,11 @@
       else
 	{
 	  s = format (s, "\n%U%s",
-		      format_white_space, indent + 2,
-		      hw_class->name);
+		      format_white_space, indent + 2, hw_class->name);
 	  if (hw_class->format_address && vec_len (hi->hw_address) > 0)
-	    s = format (s, " address %U", hw_class->format_address, hi->hw_address);
+	    s =
+	      format (s, " address %U", hw_class->format_address,
+		      hi->hw_address);
 	}
 
       if (dev_class->format_device)
@@ -118,12 +121,14 @@
   return s;
 }
 
-u8 * format_vnet_sw_interface_name (u8 * s, va_list * args)
+u8 *
+format_vnet_sw_interface_name (u8 * s, va_list * args)
 {
-  vnet_main_t * vnm = va_arg (*args, vnet_main_t *);
-  vnet_sw_interface_t * si = va_arg (*args, vnet_sw_interface_t *);
-  vnet_sw_interface_t * si_sup = vnet_get_sup_sw_interface (vnm, si->sw_if_index);
-  vnet_hw_interface_t * hi_sup;
+  vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+  vnet_sw_interface_t *si = va_arg (*args, vnet_sw_interface_t *);
+  vnet_sw_interface_t *si_sup =
+    vnet_get_sup_sw_interface (vnm, si->sw_if_index);
+  vnet_hw_interface_t *hi_sup;
 
   ASSERT (si_sup->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
   hi_sup = vnet_get_hw_interface (vnm, si_sup->hw_if_index);
@@ -136,21 +141,23 @@
   return s;
 }
 
-u8 * format_vnet_sw_if_index_name (u8 * s, va_list * args)
+u8 *
+format_vnet_sw_if_index_name (u8 * s, va_list * args)
 {
-  vnet_main_t * vnm = va_arg (*args, vnet_main_t *);
+  vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
   u32 sw_if_index = va_arg (*args, u32);
   return format (s, "%U",
 		 format_vnet_sw_interface_name, vnm,
 		 vnet_get_sw_interface (vnm, sw_if_index));
 }
 
-u8 * format_vnet_sw_interface_cntrs (u8 * s, vnet_interface_main_t * im,
-                                     vnet_sw_interface_t * si)
+u8 *
+format_vnet_sw_interface_cntrs (u8 * s, vnet_interface_main_t * im,
+				vnet_sw_interface_t * si)
 {
   uword indent, n_printed;
   int i, j, n_counters;
-  static vnet_main_t ** my_vnet_mains;
+  static vnet_main_t **my_vnet_mains;
 
   vec_reset_length (my_vnet_mains);
 
@@ -158,18 +165,18 @@
   n_printed = 0;
 
   {
-    vlib_combined_counter_main_t * cm;
+    vlib_combined_counter_main_t *cm;
     vlib_counter_t v, vtotal;
-    u8 * n = 0;
+    u8 *n = 0;
 
     for (i = 0; i < vec_len (vnet_mains); i++)
       {
-        if (vnet_mains[i])
-          vec_add1 (my_vnet_mains, vnet_mains[i]);
+	if (vnet_mains[i])
+	  vec_add1 (my_vnet_mains, vnet_mains[i]);
       }
 
-    if (vec_len(my_vnet_mains) == 0)
-        vec_add1 (my_vnet_mains, &vnet_main);
+    if (vec_len (my_vnet_mains) == 0)
+      vec_add1 (my_vnet_mains, &vnet_main);
 
     /* Each vnet_main_t has its own copy of the interface counters */
     n_counters = vec_len (im->combined_sw_if_counters);
@@ -177,17 +184,17 @@
     /* rx, tx counters... */
     for (j = 0; j < n_counters; j++)
       {
-        vtotal.packets = 0;
-        vtotal.bytes = 0;
+	vtotal.packets = 0;
+	vtotal.bytes = 0;
 
-        for (i = 0; i < vec_len(my_vnet_mains); i++)
-          {
-            im = &my_vnet_mains[i]->interface_main;
-            cm = im->combined_sw_if_counters + j;
-            vlib_get_combined_counter (cm, si->sw_if_index, &v);
-            vtotal.packets += v.packets;
-            vtotal.bytes += v.bytes;
-          }
+	for (i = 0; i < vec_len (my_vnet_mains); i++)
+	  {
+	    im = &my_vnet_mains[i]->interface_main;
+	    cm = im->combined_sw_if_counters + j;
+	    vlib_get_combined_counter (cm, si->sw_if_index, &v);
+	    vtotal.packets += v.packets;
+	    vtotal.bytes += v.bytes;
+	  }
 
 	/* Only display non-zero counters. */
 	if (vtotal.packets == 0)
@@ -205,30 +212,29 @@
 	_vec_len (n) = 0;
 	n = format (n, "%s bytes", cm->name);
 	s = format (s, "\n%U%-16v%16Ld",
-		    format_white_space, indent,
-		    n, vtotal.bytes);
+		    format_white_space, indent, n, vtotal.bytes);
       }
     vec_free (n);
   }
 
   {
-    vlib_simple_counter_main_t * cm;
-    u64 v, vtotal ;
+    vlib_simple_counter_main_t *cm;
+    u64 v, vtotal;
 
     n_counters = vec_len (im->sw_if_counters);
 
     for (j = 0; j < n_counters; j++)
       {
-        vtotal = 0;
+	vtotal = 0;
 
-        for (i = 0; i < vec_len(my_vnet_mains); i++)
-          {
-            im = &my_vnet_mains[i]->interface_main;
-            cm = im->sw_if_counters + j;
+	for (i = 0; i < vec_len (my_vnet_mains); i++)
+	  {
+	    im = &my_vnet_mains[i]->interface_main;
+	    cm = im->sw_if_counters + j;
 
-            v = vlib_get_simple_counter (cm, si->sw_if_index);
-            vtotal += v;
-          }
+	    v = vlib_get_simple_counter (cm, si->sw_if_index);
+	    vtotal += v;
+	  }
 
 	/* Only display non-zero counters. */
 	if (vtotal == 0)
@@ -245,13 +251,14 @@
   return s;
 }
 
-u8 * format_vnet_sw_interface (u8 * s, va_list * args)
+u8 *
+format_vnet_sw_interface (u8 * s, va_list * args)
 {
-  vnet_main_t * vnm = va_arg (*args, vnet_main_t *);
-  vnet_sw_interface_t * si = va_arg (*args, vnet_sw_interface_t *);
-  vnet_interface_main_t * im = &vnm->interface_main;
+  vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+  vnet_sw_interface_t *si = va_arg (*args, vnet_sw_interface_t *);
+  vnet_interface_main_t *im = &vnm->interface_main;
 
-  if (! si)
+  if (!si)
     return format (s, "%=32s%=5s%=16s%=16s%=16s",
 		   "Name", "Idx", "State", "Counter", "Count");
 
@@ -259,21 +266,22 @@
 	      format_vnet_sw_interface_name, vnm, si, si->sw_if_index,
 	      format_vnet_sw_interface_flags, si->flags);
 
-  s = format_vnet_sw_interface_cntrs(s, im, si);
+  s = format_vnet_sw_interface_cntrs (s, im, si);
 
   return s;
 }
 
-u8 * format_vnet_sw_interface_name_override (u8 * s, va_list * args)
+u8 *
+format_vnet_sw_interface_name_override (u8 * s, va_list * args)
 {
-  vnet_main_t * vnm = va_arg (*args, vnet_main_t *);
-  vnet_sw_interface_t * si = va_arg (*args, vnet_sw_interface_t *);
+  vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+  vnet_sw_interface_t *si = va_arg (*args, vnet_sw_interface_t *);
   /* caller supplied display name for this interface */
-  u8* name = va_arg (*args, u8*);
-  vnet_interface_main_t * im = &vnm->interface_main;
+  u8 *name = va_arg (*args, u8 *);
+  vnet_interface_main_t *im = &vnm->interface_main;
 
 
-  if (! si)
+  if (!si)
     return format (s, "%=32s%=5s%=16s%=16s%=16s",
 		   "Name", "Idx", "State", "Counter", "Count");
 
@@ -281,38 +289,40 @@
 	      name, si->sw_if_index,
 	      format_vnet_sw_interface_flags, si->flags);
 
-  s = format_vnet_sw_interface_cntrs(s, im, si);
+  s = format_vnet_sw_interface_cntrs (s, im, si);
 
   return s;
 }
 
-uword unformat_vnet_hw_interface (unformat_input_t * input, va_list * args)
+uword
+unformat_vnet_hw_interface (unformat_input_t * input, va_list * args)
 {
-  vnet_main_t * vnm = va_arg (*args, vnet_main_t *);
-  u32 * hw_if_index = va_arg (*args, u32 *);
-  vnet_interface_main_t * im = &vnm->interface_main;
-  vnet_device_class_t * c;
+  vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+  u32 *hw_if_index = va_arg (*args, u32 *);
+  vnet_interface_main_t *im = &vnm->interface_main;
+  vnet_device_class_t *c;
 
   /* Try per device class functions first. */
   vec_foreach (c, im->device_classes)
-    {
-      if (c->unformat_device_name
-	  && unformat_user (input, c->unformat_device_name, hw_if_index))
+  {
+    if (c->unformat_device_name
+	&& unformat_user (input, c->unformat_device_name, hw_if_index))
       return 1;
-    }
+  }
 
   return unformat_user (input, unformat_hash_vec_string,
 			im->hw_interface_by_name, hw_if_index);
 }
 
-uword unformat_vnet_sw_interface (unformat_input_t * input, va_list * args)
+uword
+unformat_vnet_sw_interface (unformat_input_t * input, va_list * args)
 {
-  vnet_main_t * vnm = va_arg (*args, vnet_main_t *);
-  u32 * result = va_arg (*args, u32 *);
-  vnet_hw_interface_t * hi;
+  vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
+  u32 *result = va_arg (*args, u32 *);
+  vnet_hw_interface_t *hi;
   u32 hw_if_index, id, id_specified;
-  u8 * if_name = 0;
-  uword * p, error = 0;
+  u8 *if_name = 0;
+  uword *p, error = 0;
 
   id = ~0;
   if (unformat (input, "%_%v.%d%_", &if_name, &id)
@@ -321,31 +331,33 @@
       hw_if_index = p[0];
       id_specified = 1;
     }
-  else if (unformat (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
+  else
+    if (unformat (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
     id_specified = 0;
   else
     goto done;
 
   hi = vnet_get_hw_interface (vnm, hw_if_index);
-  if (! id_specified)
+  if (!id_specified)
     {
       *result = hi->sw_if_index;
     }
   else
     {
-      if (! (p = hash_get (hi->sub_interface_sw_if_index_by_id, id)))
+      if (!(p = hash_get (hi->sub_interface_sw_if_index_by_id, id)))
 	return 0;
       *result = p[0];
     }
   error = 1;
- done:
+done:
   vec_free (if_name);
   return error;
 }
 
-uword unformat_vnet_sw_interface_flags (unformat_input_t * input, va_list * args)
+uword
+unformat_vnet_sw_interface_flags (unformat_input_t * input, va_list * args)
 {
-  u32 * result = va_arg (*args, u32 *);
+  u32 *result = va_arg (*args, u32 *);
   u32 flags = 0;
 
   if (unformat (input, "up"))
@@ -363,9 +375,10 @@
   return 1;
 }
 
-uword unformat_vnet_hw_interface_flags (unformat_input_t * input, va_list * args)
+uword
+unformat_vnet_hw_interface_flags (unformat_input_t * input, va_list * args)
 {
-  u32 * result = va_arg (*args, u32 *);
+  u32 *result = va_arg (*args, u32 *);
   u32 flags = 0;
 
   if (unformat (input, "up"))
@@ -379,3 +392,10 @@
   return 1;
 }
 
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/interface_funcs.h b/vnet/vnet/interface_funcs.h
index 7832afc..81a819a 100644
--- a/vnet/vnet/interface_funcs.h
+++ b/vnet/vnet/interface_funcs.h
@@ -42,17 +42,21 @@
 
 always_inline vnet_hw_interface_t *
 vnet_get_hw_interface (vnet_main_t * vnm, u32 hw_if_index)
-{ return pool_elt_at_index (vnm->interface_main.hw_interfaces, hw_if_index); }
+{
+  return pool_elt_at_index (vnm->interface_main.hw_interfaces, hw_if_index);
+}
 
 always_inline vnet_sw_interface_t *
 vnet_get_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
-{ return pool_elt_at_index (vnm->interface_main.sw_interfaces, sw_if_index); }
+{
+  return pool_elt_at_index (vnm->interface_main.sw_interfaces, sw_if_index);
+}
 
 always_inline vnet_sw_interface_t *
 vnet_get_hw_sw_interface (vnet_main_t * vnm, u32 hw_if_index)
 {
-  vnet_hw_interface_t * hw = vnet_get_hw_interface (vnm, hw_if_index);
-  vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, hw->sw_if_index);
+  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+  vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, hw->sw_if_index);
   ASSERT (sw->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
   return sw;
 }
@@ -60,7 +64,7 @@
 always_inline vnet_sw_interface_t *
 vnet_get_sup_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
 {
-  vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, sw_if_index);
+  vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
   if (sw->type == VNET_SW_INTERFACE_TYPE_SUB)
     sw = vnet_get_sw_interface (vnm, sw->sup_sw_if_index);
   return sw;
@@ -69,29 +73,35 @@
 always_inline vnet_hw_interface_t *
 vnet_get_sup_hw_interface (vnet_main_t * vnm, u32 sw_if_index)
 {
-  vnet_sw_interface_t * sw = vnet_get_sup_sw_interface (vnm, sw_if_index);
+  vnet_sw_interface_t *sw = vnet_get_sup_sw_interface (vnm, sw_if_index);
   ASSERT (sw->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
   return vnet_get_hw_interface (vnm, sw->hw_if_index);
 }
 
 always_inline vnet_hw_interface_class_t *
 vnet_get_hw_interface_class (vnet_main_t * vnm, u32 hw_class_index)
-{ return vec_elt_at_index (vnm->interface_main.hw_interface_classes, hw_class_index); }
+{
+  return vec_elt_at_index (vnm->interface_main.hw_interface_classes,
+			   hw_class_index);
+}
 
 always_inline vnet_device_class_t *
 vnet_get_device_class (vnet_main_t * vnm, u32 dev_class_index)
-{ return vec_elt_at_index (vnm->interface_main.device_classes, dev_class_index); }
+{
+  return vec_elt_at_index (vnm->interface_main.device_classes,
+			   dev_class_index);
+}
 
 /* Register a hardware interface instance. */
 u32 vnet_register_interface (vnet_main_t * vnm,
 			     u32 dev_class_index,
 			     u32 dev_instance,
-			     u32 hw_class_index,
-			     u32 hw_instance);
+			     u32 hw_class_index, u32 hw_instance);
 
 /* Creates a software interface given template. */
-clib_error_t *
-vnet_create_sw_interface (vnet_main_t * vnm, vnet_sw_interface_t * template, u32 * sw_if_index);
+clib_error_t *vnet_create_sw_interface (vnet_main_t * vnm,
+					vnet_sw_interface_t * template,
+					u32 * sw_if_index);
 
 void vnet_delete_hw_interface (vnet_main_t * vnm, u32 hw_if_index);
 void vnet_delete_sw_interface (vnet_main_t * vnm, u32 sw_if_index);
@@ -99,60 +109,68 @@
 always_inline uword
 vnet_sw_interface_get_flags (vnet_main_t * vnm, u32 sw_if_index)
 {
-  vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, sw_if_index);
+  vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
   return sw->flags;
 }
 
 always_inline uword
 vnet_sw_interface_is_admin_up (vnet_main_t * vnm, u32 sw_if_index)
-{ return (vnet_sw_interface_get_flags (vnm, sw_if_index) & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0; }
+{
+  return (vnet_sw_interface_get_flags (vnm, sw_if_index) &
+	  VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+}
 
 always_inline uword
 vnet_hw_interface_get_flags (vnet_main_t * vnm, u32 hw_if_index)
 {
-  vnet_hw_interface_t * hw = vnet_get_hw_interface (vnm, hw_if_index);
+  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
   return hw->flags;
 }
 
 always_inline uword
 vnet_hw_interface_is_link_up (vnet_main_t * vnm, u32 hw_if_index)
-{ return (vnet_hw_interface_get_flags (vnm, hw_if_index) & VNET_HW_INTERFACE_FLAG_LINK_UP) != 0; }
+{
+  return (vnet_hw_interface_get_flags (vnm, hw_if_index) &
+	  VNET_HW_INTERFACE_FLAG_LINK_UP) != 0;
+}
 
 always_inline vlib_frame_t *
 vnet_get_frame_to_sw_interface (vnet_main_t * vnm, u32 sw_if_index)
 {
-  vnet_hw_interface_t * hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+  vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
   return vlib_get_frame_to_node (vnm->vlib_main, hw->output_node_index);
 }
 
 always_inline void
-vnet_put_frame_to_sw_interface (vnet_main_t * vnm, u32 sw_if_index, vlib_frame_t * f)
+vnet_put_frame_to_sw_interface (vnet_main_t * vnm, u32 sw_if_index,
+				vlib_frame_t * f)
 {
-  vnet_hw_interface_t * hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+  vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
   return vlib_put_frame_to_node (vnm->vlib_main, hw->output_node_index, f);
 }
 
 /* Change interface flags (e.g. up, down, enable, disable). */
-clib_error_t *
-vnet_hw_interface_set_flags (vnet_main_t * vnm, u32 hw_if_index, u32 flags);
+clib_error_t *vnet_hw_interface_set_flags (vnet_main_t * vnm, u32 hw_if_index,
+					   u32 flags);
 
 /* Change interface flags (e.g. up, down, enable, disable). */
-clib_error_t *
-vnet_sw_interface_set_flags (vnet_main_t * vnm, u32 sw_if_index, u32 flags);
+clib_error_t *vnet_sw_interface_set_flags (vnet_main_t * vnm, u32 sw_if_index,
+					   u32 flags);
 
 /* Change interface class. */
-clib_error_t *
-vnet_hw_interface_set_class (vnet_main_t * vnm, u32 hw_if_index, u32 new_hw_class_index);
+clib_error_t *vnet_hw_interface_set_class (vnet_main_t * vnm, u32 hw_if_index,
+					   u32 new_hw_class_index);
 
 /* Redirect rx pkts to node */
 int vnet_hw_interface_rx_redirect_to_node (vnet_main_t * vnm, u32 hw_if_index,
-                                           u32 node_index);
+					   u32 node_index);
 
-void vnet_hw_interface_init_for_class (vnet_main_t * vnm, u32 hw_if_index, u32 hw_class_index, u32 hw_instance);
+void vnet_hw_interface_init_for_class (vnet_main_t * vnm, u32 hw_if_index,
+				       u32 hw_class_index, u32 hw_instance);
 
 /* Rename interface */
-clib_error_t *
-vnet_rename_interface (vnet_main_t * vnm, u32  hw_if_index, char * new_name);
+clib_error_t *vnet_rename_interface (vnet_main_t * vnm, u32 hw_if_index,
+				     char *new_name);
 
 /* Formats sw/hw interface. */
 format_function_t format_vnet_hw_interface;
@@ -171,7 +189,8 @@
 unformat_function_t unformat_vnet_sw_interface_flags;
 
 /* Node runtime for interface output function. */
-typedef struct {
+typedef struct
+{
   u32 hw_if_index;
   u32 sw_if_index;
   u32 dev_instance;
@@ -179,35 +198,49 @@
 } vnet_interface_output_runtime_t;
 
 /* Interface output functions. */
-void * vnet_interface_output_node_multiarch_select (void);
-void * vnet_interface_output_node_no_flatten_multiarch_select (void);
+void *vnet_interface_output_node_multiarch_select (void);
+void *vnet_interface_output_node_no_flatten_multiarch_select (void);
 
-word vnet_sw_interface_compare (vnet_main_t * vnm, uword sw_if_index0, uword sw_if_index1);
-word vnet_hw_interface_compare (vnet_main_t * vnm, uword hw_if_index0, uword hw_if_index1);
+word vnet_sw_interface_compare (vnet_main_t * vnm, uword sw_if_index0,
+				uword sw_if_index1);
+word vnet_hw_interface_compare (vnet_main_t * vnm, uword hw_if_index0,
+				uword hw_if_index1);
 
-typedef enum {
+typedef enum
+{
 #define _(sym,str) VNET_INTERFACE_OUTPUT_NEXT_##sym,
   foreach_intf_output_feat
 #undef _
-  VNET_INTERFACE_OUTPUT_NEXT_DROP,
+    VNET_INTERFACE_OUTPUT_NEXT_DROP,
   VNET_INTERFACE_OUTPUT_NEXT_TX,
 } vnet_interface_output_next_t;
 
-typedef enum {
+typedef enum
+{
   VNET_INTERFACE_TX_NEXT_DROP,
   VNET_INTERFACE_TX_N_NEXT,
 } vnet_interface_tx_next_t;
 
 #define VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT VNET_INTERFACE_TX_N_NEXT
 
-typedef enum {
+typedef enum
+{
   VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN,
   VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DELETED,
 } vnet_interface_output_error_t;
 
 /* Format for interface output traces. */
-u8 * format_vnet_interface_output_trace (u8 * s, va_list * va);
+u8 *format_vnet_interface_output_trace (u8 * s, va_list * va);
 
-serialize_function_t serialize_vnet_interface_state, unserialize_vnet_interface_state;
+serialize_function_t serialize_vnet_interface_state,
+  unserialize_vnet_interface_state;
 
 #endif /* included_vnet_interface_funcs_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/interface_output.c b/vnet/vnet/interface_output.c
index b19ca7c..173bb75 100644
--- a/vnet/vnet/interface_output.c
+++ b/vnet/vnet/interface_output.c
@@ -39,30 +39,34 @@
 
 #include <vnet/vnet.h>
 
-typedef struct {
+typedef struct
+{
   u32 sw_if_index;
   u8 data[128 - sizeof (u32)];
-} interface_output_trace_t;
+}
+interface_output_trace_t;
 
-u8 * format_vnet_interface_output_trace (u8 * s, va_list * va)
+u8 *
+format_vnet_interface_output_trace (u8 * s, va_list * va)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
-  vlib_node_t * node = va_arg (*va, vlib_node_t *);
-  interface_output_trace_t * t = va_arg (*va, interface_output_trace_t *);
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_sw_interface_t * si;
+  vlib_node_t *node = va_arg (*va, vlib_node_t *);
+  interface_output_trace_t *t = va_arg (*va, interface_output_trace_t *);
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_sw_interface_t *si;
   uword indent;
 
-  if (t->sw_if_index != (u32)~0)
+  if (t->sw_if_index != (u32) ~ 0)
     {
       si = vnet_get_sw_interface (vnm, t->sw_if_index);
       indent = format_get_indent (s);
-      
+
       s = format (s, "%U\n%U%U",
-                  format_vnet_sw_interface_name, vnm, si,
-                  format_white_space, indent,
-                  node->format_buffer ? node->format_buffer : format_hex_bytes,
-                  t->data, sizeof (t->data));
+		  format_vnet_sw_interface_name, vnm, si,
+		  format_white_space, indent,
+		  node->format_buffer ? node->
+		  format_buffer : format_hex_bytes, t->data,
+		  sizeof (t->data));
     }
   return s;
 }
@@ -70,19 +74,18 @@
 static void
 vnet_interface_output_trace (vlib_main_t * vm,
 			     vlib_node_runtime_t * node,
-			     vlib_frame_t * frame,
-			     uword n_buffers)
+			     vlib_frame_t * frame, uword n_buffers)
 {
-  u32 n_left, * from;
+  u32 n_left, *from;
 
   n_left = n_buffers;
   from = vlib_frame_args (frame);
-  
+
   while (n_left >= 4)
     {
       u32 bi0, bi1;
-      vlib_buffer_t * b0, * b1;
-      interface_output_trace_t * t0, * t1;
+      vlib_buffer_t *b0, *b1;
+      interface_output_trace_t *t0, *t1;
 
       /* Prefetch next iteration. */
       vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
@@ -99,14 +102,14 @@
 	  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
 	  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
 	  clib_memcpy (t0->data, vlib_buffer_get_current (b0),
-		  sizeof (t0->data));
+		       sizeof (t0->data));
 	}
       if (b1->flags & VLIB_BUFFER_IS_TRACED)
 	{
 	  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
 	  t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
 	  clib_memcpy (t1->data, vlib_buffer_get_current (b1),
-		  sizeof (t1->data));
+		       sizeof (t1->data));
 	}
       from += 2;
       n_left -= 2;
@@ -115,8 +118,8 @@
   while (n_left >= 1)
     {
       u32 bi0;
-      vlib_buffer_t * b0;
-      interface_output_trace_t * t0;
+      vlib_buffer_t *b0;
+      interface_output_trace_t *t0;
 
       bi0 = from[0];
 
@@ -127,7 +130,7 @@
 	  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
 	  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
 	  clib_memcpy (t0->data, vlib_buffer_get_current (b0),
-		  sizeof (t0->data));
+		       sizeof (t0->data));
 	}
       from += 1;
       n_left -= 1;
@@ -138,9 +141,7 @@
 slow_path (vlib_main_t * vm,
 	   u32 bi,
 	   vlib_buffer_t * b,
-	   u32 n_left_to_tx,
-	   u32 * to_tx,
-	   u32 * n_slow_bytes_result)
+	   u32 n_left_to_tx, u32 * to_tx, u32 * n_slow_bytes_result)
 {
   /* We've already enqueued a single buffer. */
   u32 n_buffers = 0;
@@ -155,10 +156,10 @@
       n_slow_bytes += vlib_buffer_length_in_chain (vm, b);
 
       /* Be grumpy about zero length buffers for benefit of
-	 driver tx function. */
+         driver tx function. */
       ASSERT (b->current_length > 0);
 
-      if (! (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+      if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
 	break;
 
       bi = b->next_buffer;
@@ -173,52 +174,53 @@
   return n_buffers;
 }
 
-/* 
- * Increment TX stats. Roll up consecutive increments to the same sw_if_index 
+/*
+ * Increment TX stats. Roll up consecutive increments to the same sw_if_index
  * into one increment.
  */
-static_always_inline
-void incr_output_stats (vnet_main_t * vnm,
-                        u32 cpu_index, 
-                        u32 length, 
-                        u32 sw_if_index,
-                        u32 * last_sw_if_index, 
-                        u32 * n_packets, 
-                        u32 * n_bytes) {
-  vnet_interface_main_t * im;
+static_always_inline void
+incr_output_stats (vnet_main_t * vnm,
+		   u32 cpu_index,
+		   u32 length,
+		   u32 sw_if_index,
+		   u32 * last_sw_if_index, u32 * n_packets, u32 * n_bytes)
+{
+  vnet_interface_main_t *im;
 
-  if (PREDICT_TRUE (sw_if_index == *last_sw_if_index)) {
-    *n_packets += 1;
-    *n_bytes += length;
-  } else {
-    if (PREDICT_TRUE (*last_sw_if_index != ~0)) {
-      im = &vnm->interface_main;
-
-      vlib_increment_combined_counter (im->combined_sw_if_counters
-                                       + VNET_INTERFACE_COUNTER_TX,
-                                       cpu_index, 
-                                       *last_sw_if_index,
-				       *n_packets,
-				       *n_bytes);
+  if (PREDICT_TRUE (sw_if_index == *last_sw_if_index))
+    {
+      *n_packets += 1;
+      *n_bytes += length;
     }
-    *last_sw_if_index = sw_if_index;
-    *n_packets = 1;
-    *n_bytes = length;
-  }
+  else
+    {
+      if (PREDICT_TRUE (*last_sw_if_index != ~0))
+	{
+	  im = &vnm->interface_main;
+
+	  vlib_increment_combined_counter (im->combined_sw_if_counters
+					   + VNET_INTERFACE_COUNTER_TX,
+					   cpu_index,
+					   *last_sw_if_index,
+					   *n_packets, *n_bytes);
+	}
+      *last_sw_if_index = sw_if_index;
+      *n_packets = 1;
+      *n_bytes = length;
+    }
 }
 
 
 /* Interface output functions. */
 uword
 vnet_interface_output_node (vlib_main_t * vm,
-			    vlib_node_runtime_t * node,
-			    vlib_frame_t * frame)
+			    vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_interface_output_runtime_t * rt = (void *) node->runtime_data;
-  vnet_sw_interface_t * si;
-  vnet_hw_interface_t * hi;
-  u32 n_left_to_tx, * from, * from_end, * to_tx;
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
+  vnet_sw_interface_t *si;
+  vnet_hw_interface_t *hi;
+  u32 n_left_to_tx, *from, *from_end, *to_tx;
   u32 n_bytes, n_buffers, n_packets;
   u32 last_sw_if_index;
   u32 cpu_index = vm->cpu_index;
@@ -231,8 +233,7 @@
   from = vlib_frame_args (frame);
 
   if (rt->is_deleted)
-    return vlib_error_drop_buffers (vm, node,
-				    from,
+    return vlib_error_drop_buffers (vm, node, from,
 				    /* buffer stride */ 1,
 				    n_buffers,
 				    VNET_INTERFACE_OUTPUT_NEXT_DROP,
@@ -241,22 +242,21 @@
 
   si = vnet_get_sw_interface (vnm, rt->sw_if_index);
   hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
-  if (! (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
-      ! (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
+  if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
+      !(hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
     {
-      vlib_simple_counter_main_t * cm;
-     
+      vlib_simple_counter_main_t *cm;
+
       cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
-                             VNET_INTERFACE_COUNTER_TX_ERROR);
+			     VNET_INTERFACE_COUNTER_TX_ERROR);
       vlib_increment_simple_counter (cm, cpu_index,
-                                     rt->sw_if_index, n_buffers);
-      return vlib_error_drop_buffers (vm, node,
-                                      from,
-                                      /* buffer stride */ 1,
-                                      n_buffers,
-                                      VNET_INTERFACE_OUTPUT_NEXT_DROP,
-                                      node->node_index,
-                                   VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
+				     rt->sw_if_index, n_buffers);
+      return vlib_error_drop_buffers (vm, node, from,
+				      /* buffer stride */ 1,
+				      n_buffers,
+				      VNET_INTERFACE_OUTPUT_NEXT_DROP,
+				      node->node_index,
+				      VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
     }
 
   from_end = from + n_buffers;
@@ -269,14 +269,14 @@
   while (from < from_end)
     {
       /* Get new next frame since previous incomplete frame may have less
-	 than VNET_FRAME_SIZE vectors in it. */
+         than VNET_FRAME_SIZE vectors in it. */
       vlib_get_new_next_frame (vm, node, VNET_INTERFACE_OUTPUT_NEXT_TX,
 			       to_tx, n_left_to_tx);
 
       while (from + 4 <= from_end && n_left_to_tx >= 2)
 	{
 	  u32 bi0, bi1;
-	  vlib_buffer_t * b0, * b1;
+	  vlib_buffer_t *b0, *b1;
 
 	  /* Prefetch next iteration. */
 	  vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
@@ -298,7 +298,8 @@
 	  ASSERT (b0->current_length > 0);
 	  ASSERT (b1->current_length > 0);
 
-	  if (PREDICT_FALSE ((b0->flags | b1->flags) & VLIB_BUFFER_NEXT_PRESENT))
+	  if (PREDICT_FALSE
+	      ((b0->flags | b1->flags) & VLIB_BUFFER_NEXT_PRESENT))
 	    {
 	      u32 n_buffers, n_slow_bytes, i;
 
@@ -311,7 +312,7 @@
 	      for (i = 0; i < 2; i++)
 		{
 		  u32 bi = i ? bi1 : bi0;
-		  vlib_buffer_t * b = i ? b1 : b0;
+		  vlib_buffer_t *b = i ? b1 : b0;
 
 		  n_buffers = slow_path (vm, bi, b,
 					 n_left_to_tx, to_tx, &n_slow_bytes);
@@ -324,25 +325,27 @@
 		  to_tx += n_buffers;
 		  n_left_to_tx -= n_buffers;
 		  incr_output_stats (vnm, cpu_index, n_slow_bytes,
-				     vnet_buffer(b)->sw_if_index[VLIB_TX],
-                                     &last_sw_if_index, &n_packets, &n_bytes);
+				     vnet_buffer (b)->sw_if_index[VLIB_TX],
+				     &last_sw_if_index, &n_packets, &n_bytes);
 		}
-            } else {
-	      incr_output_stats (vnm, cpu_index, 
-                                 vlib_buffer_length_in_chain (vm, b0),
-				 vnet_buffer(b0)->sw_if_index[VLIB_TX],
-                                 &last_sw_if_index, &n_packets, &n_bytes);
-	      incr_output_stats (vnm, cpu_index, 
-                                 vlib_buffer_length_in_chain (vm, b0),
-				 vnet_buffer(b1)->sw_if_index[VLIB_TX],
-                                 &last_sw_if_index, &n_packets, &n_bytes);
-            }
+	    }
+	  else
+	    {
+	      incr_output_stats (vnm, cpu_index,
+				 vlib_buffer_length_in_chain (vm, b0),
+				 vnet_buffer (b0)->sw_if_index[VLIB_TX],
+				 &last_sw_if_index, &n_packets, &n_bytes);
+	      incr_output_stats (vnm, cpu_index,
+				 vlib_buffer_length_in_chain (vm, b0),
+				 vnet_buffer (b1)->sw_if_index[VLIB_TX],
+				 &last_sw_if_index, &n_packets, &n_bytes);
+	    }
 	}
 
       while (from + 1 <= from_end && n_left_to_tx >= 1)
 	{
 	  u32 bi0;
-	  vlib_buffer_t * b0;
+	  vlib_buffer_t *b0;
 
 	  bi0 = from[0];
 	  to_tx[0] = bi0;
@@ -376,41 +379,42 @@
 	      to_tx += n_buffers;
 	      n_left_to_tx -= n_buffers;
 	    }
-          incr_output_stats (vnm, cpu_index, 
-                             vlib_buffer_length_in_chain (vm, b0),
-			     vnet_buffer(b0)->sw_if_index[VLIB_TX],
-                             &last_sw_if_index, &n_packets, &n_bytes);
+	  incr_output_stats (vnm, cpu_index,
+			     vlib_buffer_length_in_chain (vm, b0),
+			     vnet_buffer (b0)->sw_if_index[VLIB_TX],
+			     &last_sw_if_index, &n_packets, &n_bytes);
 	}
 
     put:
-      vlib_put_next_frame (vm, node, VNET_INTERFACE_OUTPUT_NEXT_TX, n_left_to_tx);
+      vlib_put_next_frame (vm, node, VNET_INTERFACE_OUTPUT_NEXT_TX,
+			   n_left_to_tx);
     }
 
   /* Final update of interface stats. */
-  incr_output_stats (vnm, cpu_index, 0, ~0, /* ~0 will flush stats */
-                     &last_sw_if_index, &n_packets, &n_bytes); 
+  incr_output_stats (vnm, cpu_index, 0, ~0,	/* ~0 will flush stats */
+		     &last_sw_if_index, &n_packets, &n_bytes);
 
   return n_buffers;
 }
 
-VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node)
-CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node)
+VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node);
+CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node);
 
 always_inline uword
-vnet_interface_output_node_no_flatten_inline  (vlib_main_t * vm,
-                                               vlib_node_runtime_t * node,
-                                               vlib_frame_t * frame,
-                                               int with_features)
+vnet_interface_output_node_no_flatten_inline (vlib_main_t * vm,
+					      vlib_node_runtime_t * node,
+					      vlib_frame_t * frame,
+					      int with_features)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_interface_output_runtime_t * rt = (void *) node->runtime_data;
-  vnet_sw_interface_t * si;
-  vnet_hw_interface_t * hi;
-  u32 n_left_to_tx, * from, * from_end, * to_tx;
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
+  vnet_sw_interface_t *si;
+  vnet_hw_interface_t *hi;
+  u32 n_left_to_tx, *from, *from_end, *to_tx;
   u32 n_bytes, n_buffers, n_packets;
   u32 n_bytes_b0, n_bytes_b1;
   u32 cpu_index = vm->cpu_index;
-  vnet_interface_main_t * im = &vnm->interface_main;
+  vnet_interface_main_t *im = &vnm->interface_main;
   u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX;
 
   n_buffers = frame->n_vectors;
@@ -421,8 +425,7 @@
   from = vlib_frame_args (frame);
 
   if (rt->is_deleted)
-    return vlib_error_drop_buffers (vm, node,
-				    from,
+    return vlib_error_drop_buffers (vm, node, from,
 				    /* buffer stride */ 1,
 				    n_buffers,
 				    VNET_INTERFACE_OUTPUT_NEXT_DROP,
@@ -431,23 +434,22 @@
 
   si = vnet_get_sw_interface (vnm, rt->sw_if_index);
   hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
-  if (! (si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
-      ! (hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
+  if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ||
+      !(hi->flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
     {
-      vlib_simple_counter_main_t * cm;
-     
+      vlib_simple_counter_main_t *cm;
+
       cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
-                             VNET_INTERFACE_COUNTER_TX_ERROR);
+			     VNET_INTERFACE_COUNTER_TX_ERROR);
       vlib_increment_simple_counter (cm, cpu_index,
-                                     rt->sw_if_index, n_buffers);
-      
-      return vlib_error_drop_buffers (vm, node,
-                                      from,
-                                      /* buffer stride */ 1,
-                                      n_buffers,
-                                      VNET_INTERFACE_OUTPUT_NEXT_DROP,
-                                      node->node_index,
-                                      VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
+				     rt->sw_if_index, n_buffers);
+
+      return vlib_error_drop_buffers (vm, node, from,
+				      /* buffer stride */ 1,
+				      n_buffers,
+				      VNET_INTERFACE_OUTPUT_NEXT_DROP,
+				      node->node_index,
+				      VNET_INTERFACE_OUTPUT_ERROR_INTERFACE_DOWN);
     }
 
   from_end = from + n_buffers;
@@ -459,16 +461,15 @@
   while (from < from_end)
     {
       /* Get new next frame since previous incomplete frame may have less
-	 than VNET_FRAME_SIZE vectors in it. */
-      vlib_get_new_next_frame (vm, node, next_index,
-			       to_tx, n_left_to_tx);
+         than VNET_FRAME_SIZE vectors in it. */
+      vlib_get_new_next_frame (vm, node, next_index, to_tx, n_left_to_tx);
 
       while (from + 4 <= from_end && n_left_to_tx >= 2)
 	{
 	  u32 bi0, bi1;
-	  vlib_buffer_t * b0, * b1;
-          u32 tx_swif0, tx_swif1;
-          u32 next0, next1;
+	  vlib_buffer_t *b0, *b1;
+	  u32 tx_swif0, tx_swif1;
+	  u32 next0, next1;
 
 	  /* Prefetch next iteration. */
 	  vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
@@ -490,71 +491,74 @@
 	  ASSERT (b0->current_length > 0);
 	  ASSERT (b1->current_length > 0);
 
-          n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
-          n_bytes_b1 = vlib_buffer_length_in_chain (vm, b1);
-          tx_swif0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
-          tx_swif1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
+	  n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
+	  n_bytes_b1 = vlib_buffer_length_in_chain (vm, b1);
+	  tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+	  tx_swif1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
 
 	  n_bytes += n_bytes_b0 + n_bytes_b1;
 	  n_packets += 2;
-          if (with_features)
-            {
-              b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
-              vnet_buffer(b0)->output_features.bitmap = si->output_feature_bitmap;
-              count_trailing_zeros(next0, vnet_buffer(b0)->output_features.bitmap);
-              vnet_buffer(b0)->output_features.bitmap &= ~(1 << next0);
-            }
-          else
-            {
-	      next0 = VNET_INTERFACE_OUTPUT_NEXT_TX;
-              vnet_buffer(b0)->output_features.bitmap = 0;
-
-              if (PREDICT_FALSE(tx_swif0 != rt->sw_if_index))
-                {
-                  /* update vlan subif tx counts, if required */
-                  vlib_increment_combined_counter (im->combined_sw_if_counters
-                                                   + VNET_INTERFACE_COUNTER_TX,
-                                                   cpu_index,
-                                                   tx_swif0,
-                                                   1,
-                                                   n_bytes_b0);
-                }
-            }
-
-          if (with_features)
-            {
-              b1->flags |= BUFFER_OUTPUT_FEAT_DONE;
-              vnet_buffer(b1)->output_features.bitmap = si->output_feature_bitmap;
-              count_trailing_zeros(next1, vnet_buffer(b1)->output_features.bitmap);
-              vnet_buffer(b1)->output_features.bitmap &= ~(1 << next1);
-            }
-          else
-            {
-	      next1 = VNET_INTERFACE_OUTPUT_NEXT_TX;
-              vnet_buffer(b1)->output_features.bitmap = 0;
-
-              /* update vlan subif tx counts, if required */
-              if (PREDICT_FALSE(tx_swif1 != rt->sw_if_index))
-                {
-
-                  vlib_increment_combined_counter (im->combined_sw_if_counters
-                                                   + VNET_INTERFACE_COUNTER_TX,
-                                                   cpu_index,
-                                                   tx_swif1,
-                                                   1,
-                                                   n_bytes_b1);
-                }
-            }
 	  if (with_features)
-	    vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_tx,
-					    n_left_to_tx, bi0, bi1, next0, next1);
+	    {
+	      b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
+	      vnet_buffer (b0)->output_features.bitmap =
+		si->output_feature_bitmap;
+	      count_trailing_zeros (next0,
+				    vnet_buffer (b0)->output_features.bitmap);
+	      vnet_buffer (b0)->output_features.bitmap &= ~(1 << next0);
+	    }
+	  else
+	    {
+	      next0 = VNET_INTERFACE_OUTPUT_NEXT_TX;
+	      vnet_buffer (b0)->output_features.bitmap = 0;
+
+	      if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
+		{
+		  /* update vlan subif tx counts, if required */
+		  vlib_increment_combined_counter (im->combined_sw_if_counters
+						   +
+						   VNET_INTERFACE_COUNTER_TX,
+						   cpu_index, tx_swif0, 1,
+						   n_bytes_b0);
+		}
+	    }
+
+	  if (with_features)
+	    {
+	      b1->flags |= BUFFER_OUTPUT_FEAT_DONE;
+	      vnet_buffer (b1)->output_features.bitmap =
+		si->output_feature_bitmap;
+	      count_trailing_zeros (next1,
+				    vnet_buffer (b1)->output_features.bitmap);
+	      vnet_buffer (b1)->output_features.bitmap &= ~(1 << next1);
+	    }
+	  else
+	    {
+	      next1 = VNET_INTERFACE_OUTPUT_NEXT_TX;
+	      vnet_buffer (b1)->output_features.bitmap = 0;
+
+	      /* update vlan subif tx counts, if required */
+	      if (PREDICT_FALSE (tx_swif1 != rt->sw_if_index))
+		{
+
+		  vlib_increment_combined_counter (im->combined_sw_if_counters
+						   +
+						   VNET_INTERFACE_COUNTER_TX,
+						   cpu_index, tx_swif1, 1,
+						   n_bytes_b1);
+		}
+	    }
+	  if (with_features)
+	    vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_tx,
+					     n_left_to_tx, bi0, bi1, next0,
+					     next1);
 	}
 
       while (from + 1 <= from_end && n_left_to_tx >= 1)
 	{
 	  u32 bi0;
-	  vlib_buffer_t * b0;
-          u32 tx_swif0;
+	  vlib_buffer_t *b0;
+	  u32 tx_swif0;
 
 	  bi0 = from[0];
 	  to_tx[0] = bi0;
@@ -568,78 +572,77 @@
 	     driver tx function. */
 	  ASSERT (b0->current_length > 0);
 
-          n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
-          tx_swif0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+	  n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
+	  tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
 	  n_bytes += n_bytes_b0;
 	  n_packets += 1;
 
-          if (with_features)
-            {
-              u32 next0;
-              b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
-              vnet_buffer(b0)->output_features.bitmap = si->output_feature_bitmap;
-              count_trailing_zeros(next0, vnet_buffer(b0)->output_features.bitmap);
-              vnet_buffer(b0)->output_features.bitmap &= ~(1 << next0);
-              vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_tx,
-                                               n_left_to_tx, bi0, next0);
-            }
-          else
-            {
-              vnet_buffer(b0)->output_features.bitmap = 0;
+	  if (with_features)
+	    {
+	      u32 next0;
+	      b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
+	      vnet_buffer (b0)->output_features.bitmap =
+		si->output_feature_bitmap;
+	      count_trailing_zeros (next0,
+				    vnet_buffer (b0)->output_features.bitmap);
+	      vnet_buffer (b0)->output_features.bitmap &= ~(1 << next0);
+	      vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_tx,
+					       n_left_to_tx, bi0, next0);
+	    }
+	  else
+	    {
+	      vnet_buffer (b0)->output_features.bitmap = 0;
 
-              if (PREDICT_FALSE(tx_swif0 != rt->sw_if_index))
-                {
+	      if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
+		{
 
-                  vlib_increment_combined_counter (im->combined_sw_if_counters
-                                                   + VNET_INTERFACE_COUNTER_TX,
-                                                   cpu_index,
-                                                   tx_swif0,
-                                                   1,
-                                                   n_bytes_b0);
-                }
-            }
+		  vlib_increment_combined_counter (im->combined_sw_if_counters
+						   +
+						   VNET_INTERFACE_COUNTER_TX,
+						   cpu_index, tx_swif0, 1,
+						   n_bytes_b0);
+		}
+	    }
 	}
 
-      vlib_put_next_frame (vm, node, next_index,
-                           n_left_to_tx);
+      vlib_put_next_frame (vm, node, next_index, n_left_to_tx);
     }
 
   /* Update main interface stats. */
   vlib_increment_combined_counter (im->combined_sw_if_counters
-                                   + VNET_INTERFACE_COUNTER_TX,
-                                   cpu_index,
-                                   rt->sw_if_index,
-                                   n_packets,
-                                   n_bytes);
+				   + VNET_INTERFACE_COUNTER_TX,
+				   cpu_index,
+				   rt->sw_if_index, n_packets, n_bytes);
   return n_buffers;
 }
 
 uword
 vnet_interface_output_node_no_flatten (vlib_main_t * vm,
-                                       vlib_node_runtime_t * node,
-                                       vlib_frame_t * frame)
+				       vlib_node_runtime_t * node,
+				       vlib_frame_t * frame)
 {
-  vnet_main_t * vnm = vnet_get_main ();
-  vnet_interface_output_runtime_t * rt = (void *) node->runtime_data;
-  vnet_sw_interface_t * si;
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
+  vnet_sw_interface_t *si;
   si = vnet_get_sw_interface (vnm, rt->sw_if_index);
 
-  if (PREDICT_FALSE(si->output_feature_bitmap))
+  if (PREDICT_FALSE (si->output_feature_bitmap))
     {
       /* if first pakcet in the frame have BUFFER_OUTPUT_FEAT_DONE flag set
-	 then whole frame is arriving from feature node */
+         then whole frame is arriving from feature node */
 
-      u32 * from = vlib_frame_args (frame);
-      vlib_buffer_t * b = vlib_get_buffer (vm, from[0]);
+      u32 *from = vlib_frame_args (frame);
+      vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
 
       if ((b->flags & BUFFER_OUTPUT_FEAT_DONE) == 0)
-        return vnet_interface_output_node_no_flatten_inline (vm, node, frame, 1);
+	return vnet_interface_output_node_no_flatten_inline (vm, node, frame,
+							     1);
     }
-    return vnet_interface_output_node_no_flatten_inline (vm, node, frame, 0);
+  return vnet_interface_output_node_no_flatten_inline (vm, node, frame, 0);
 }
 
-VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node_no_flatten)
-CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node_no_flatten)
+VLIB_NODE_FUNCTION_MULTIARCH_CLONE (vnet_interface_output_node_no_flatten);
+CLIB_MULTIARCH_SELECT_FN (vnet_interface_output_node_no_flatten);
 
 /* Use buffer's sw_if_index[VNET_TX] to choose output interface. */
 static uword
@@ -647,8 +650,8 @@
 				  vlib_node_runtime_t * node,
 				  vlib_frame_t * frame)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  u32 n_left_to_next, * from, * to_next;
+  vnet_main_t *vnm = vnet_get_main ();
+  u32 n_left_to_next, *from, *to_next;
   u32 n_left_from, next_index;
 
   n_left_from = frame->n_vectors;
@@ -663,8 +666,8 @@
       while (n_left_from >= 4 && n_left_to_next >= 2)
 	{
 	  u32 bi0, bi1, next0, next1;
-	  vlib_buffer_t * b0, * b1;
-	  vnet_hw_interface_t * hi0, * hi1;
+	  vlib_buffer_t *b0, *b1;
+	  vnet_hw_interface_t *hi0, *hi1;
 
 	  /* Prefetch next iteration. */
 	  vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
@@ -682,21 +685,28 @@
 	  b0 = vlib_get_buffer (vm, bi0);
 	  b1 = vlib_get_buffer (vm, bi1);
 
-	  hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer (b0)->sw_if_index[VLIB_TX]);
-	  hi1 = vnet_get_sup_hw_interface (vnm, vnet_buffer (b1)->sw_if_index[VLIB_TX]);
+	  hi0 =
+	    vnet_get_sup_hw_interface (vnm,
+				       vnet_buffer (b0)->sw_if_index
+				       [VLIB_TX]);
+	  hi1 =
+	    vnet_get_sup_hw_interface (vnm,
+				       vnet_buffer (b1)->sw_if_index
+				       [VLIB_TX]);
 
 	  next0 = hi0->hw_if_index;
 	  next1 = hi1->hw_if_index;
 
-	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next,
-					   bi0, bi1, next0, next1);
+	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+					   n_left_to_next, bi0, bi1, next0,
+					   next1);
 	}
 
       while (n_left_from > 0 && n_left_to_next > 0)
 	{
 	  u32 bi0, next0;
-	  vlib_buffer_t * b0;
-	  vnet_hw_interface_t * hi0;
+	  vlib_buffer_t *b0;
+	  vnet_hw_interface_t *hi0;
 
 	  bi0 = from[0];
 	  to_next[0] = bi0;
@@ -707,12 +717,15 @@
 
 	  b0 = vlib_get_buffer (vm, bi0);
 
-	  hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer (b0)->sw_if_index[VLIB_TX]);
+	  hi0 =
+	    vnet_get_sup_hw_interface (vnm,
+				       vnet_buffer (b0)->sw_if_index
+				       [VLIB_TX]);
 
 	  next0 = hi0->hw_if_index;
 
-	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next,
-					   bi0, next0);
+	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+					   n_left_to_next, bi0, next0);
 	}
 
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
@@ -724,7 +737,7 @@
 always_inline u32
 counter_index (vlib_main_t * vm, vlib_error_t e)
 {
-  vlib_node_t * n;
+  vlib_node_t *n;
   u32 ci, ni;
 
   ni = vlib_error_get_node (e);
@@ -738,13 +751,14 @@
   return ci;
 }
 
-static u8 * format_vnet_error_trace (u8 * s, va_list * va)
+static u8 *
+format_vnet_error_trace (u8 * s, va_list * va)
 {
-  vlib_main_t * vm = va_arg (*va, vlib_main_t *);
+  vlib_main_t *vm = va_arg (*va, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
-  vlib_error_t * e = va_arg (*va, vlib_error_t *);
-  vlib_node_t * error_node;
-  vlib_error_main_t * em = &vm->error_main;
+  vlib_error_t *e = va_arg (*va, vlib_error_t *);
+  vlib_node_t *error_node;
+  vlib_error_main_t *em = &vm->error_main;
   u32 i;
 
   error_node = vlib_get_node (vm, vlib_error_get_node (e[0]));
@@ -756,19 +770,18 @@
 
 static void
 trace_errors_with_buffers (vlib_main_t * vm,
-			   vlib_node_runtime_t * node,
-			   vlib_frame_t * frame)
+			   vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  u32 n_left, * buffers;
+  u32 n_left, *buffers;
 
   buffers = vlib_frame_vector_args (frame);
   n_left = frame->n_vectors;
-  
+
   while (n_left >= 4)
     {
       u32 bi0, bi1;
-      vlib_buffer_t * b0, * b1;
-      vlib_error_t * t0, * t1;
+      vlib_buffer_t *b0, *b1;
+      vlib_error_t *t0, *t1;
 
       /* Prefetch next iteration. */
       vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
@@ -797,8 +810,8 @@
   while (n_left >= 1)
     {
       u32 bi0;
-      vlib_buffer_t * b0;
-      vlib_error_t * t0;
+      vlib_buffer_t *b0;
+      vlib_error_t *t0;
 
       bi0 = buffers[0];
 
@@ -819,7 +832,7 @@
 {
   uword node_index = vlib_error_get_node (e[0]);
   uword code = vlib_error_get_code (e[0]);
-  vlib_node_t * n;
+  vlib_node_t *n;
 
   if (node_index >= vec_len (vm->node_main.nodes))
     return format (0, "[%d], node index out of range 0x%x, error 0x%x",
@@ -835,12 +848,11 @@
 
 static u8 *
 validate_error_frame (vlib_main_t * vm,
-		      vlib_node_runtime_t * node,
-		      vlib_frame_t * f)
+		      vlib_node_runtime_t * node, vlib_frame_t * f)
 {
-  u32 * buffers = vlib_frame_args (f);
-  vlib_buffer_t * b;
-  u8 * msg = 0;
+  u32 *buffers = vlib_frame_args (f);
+  vlib_buffer_t *b;
+  u8 *msg = 0;
   uword i;
 
   for (i = 0; i < f->n_vectors; i++)
@@ -854,7 +866,8 @@
   return msg;
 }
 
-typedef enum {
+typedef enum
+{
   VNET_ERROR_DISPOSITION_DROP,
   VNET_ERROR_DISPOSITION_PUNT,
   VNET_ERROR_N_DISPOSITION,
@@ -863,26 +876,25 @@
 always_inline void
 do_packet (vlib_main_t * vm, vlib_error_t a)
 {
-  vlib_error_main_t * em = &vm->error_main;
+  vlib_error_main_t *em = &vm->error_main;
   u32 i = counter_index (vm, a);
   em->counters[i] += 1;
   vlib_error_elog_count (vm, i, 1);
 }
-    
+
 static_always_inline uword
 process_drop_punt (vlib_main_t * vm,
 		   vlib_node_runtime_t * node,
-		   vlib_frame_t * frame,
-		   vnet_error_disposition_t disposition)
+		   vlib_frame_t * frame, vnet_error_disposition_t disposition)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  vlib_error_main_t * em = &vm->error_main;
-  u32 * buffers, * first_buffer;
+  vnet_main_t *vnm = vnet_get_main ();
+  vlib_error_main_t *em = &vm->error_main;
+  u32 *buffers, *first_buffer;
   vlib_error_t current_error;
   u32 current_counter_index, n_errors_left;
   u32 current_sw_if_index, n_errors_current_sw_if_index;
   u64 current_counter;
-  vlib_simple_counter_main_t * cm;
+  vlib_simple_counter_main_t *cm;
   u32 cpu_index = vm->cpu_index;
 
   static vlib_error_t memory[VNET_ERROR_N_DISPOSITION];
@@ -892,9 +904,9 @@
   first_buffer = buffers;
 
   {
-    vlib_buffer_t * b = vlib_get_buffer (vm, first_buffer[0]);
+    vlib_buffer_t *b = vlib_get_buffer (vm, first_buffer[0]);
 
-    if (! memory_init[disposition])
+    if (!memory_init[disposition])
       {
 	memory_init[disposition] = 1;
 	memory[disposition] = b->error;
@@ -910,7 +922,7 @@
 
   if (node->flags & VLIB_NODE_FLAG_TRACE)
     trace_errors_with_buffers (vm, node, frame);
-  
+
   n_errors_left = frame->n_vectors;
   cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
 			 (disposition == VNET_ERROR_DISPOSITION_PUNT
@@ -919,8 +931,8 @@
 
   while (n_errors_left >= 2)
     {
-      vlib_buffer_t * b0, * b1;
-      vnet_sw_interface_t * sw_if0, * sw_if1;
+      vlib_buffer_t *b0, *b1;
+      vnet_sw_interface_t *sw_if0, *sw_if1;
       vlib_error_t e0, e1;
       u32 bi0, bi1;
       u32 sw_if_index0, sw_if_index1;
@@ -944,7 +956,7 @@
       n_errors_current_sw_if_index += 2;
 
       /* Speculatively assume all 2 (node, code) pairs are equal
-	 to current (node, code). */
+         to current (node, code). */
       current_counter += 2;
 
       if (PREDICT_FALSE (e0 != current_error
@@ -962,13 +974,13 @@
 	     sub-interfaces. */
 	  sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
 	  vlib_increment_simple_counter
-              (cm, cpu_index, sw_if0->sup_sw_if_index,
-               sw_if0->sup_sw_if_index != sw_if_index0);
+	    (cm, cpu_index, sw_if0->sup_sw_if_index,
+	     sw_if0->sup_sw_if_index != sw_if_index0);
 
 	  sw_if1 = vnet_get_sw_interface (vnm, sw_if_index1);
 	  vlib_increment_simple_counter
-              (cm, cpu_index, sw_if1->sup_sw_if_index, 
-               sw_if1->sup_sw_if_index != sw_if_index1);
+	    (cm, cpu_index, sw_if1->sup_sw_if_index,
+	     sw_if1->sup_sw_if_index != sw_if_index1);
 
 	  em->counters[current_counter_index] = current_counter;
 	  do_packet (vm, e0);
@@ -986,8 +998,8 @@
 
   while (n_errors_left >= 1)
     {
-      vlib_buffer_t * b0;
-      vnet_sw_interface_t * sw_if0;
+      vlib_buffer_t *b0;
+      vnet_sw_interface_t *sw_if0;
       vlib_error_t e0;
       u32 bi0, sw_if_index0;
 
@@ -1007,7 +1019,7 @@
 
       /* Increment super-interface drop/punt counters for sub-interfaces. */
       sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
-      vlib_increment_simple_counter (cm, cpu_index, sw_if0->sup_sw_if_index, 
+      vlib_increment_simple_counter (cm, cpu_index, sw_if0->sup_sw_if_index,
 				     sw_if0->sup_sw_if_index != sw_if_index0);
 
       if (PREDICT_FALSE (e0 != current_error))
@@ -1017,7 +1029,7 @@
 	  vlib_error_elog_count (vm, current_counter_index,
 				 (current_counter
 				  - em->counters[current_counter_index]));
-	    
+
 	  em->counters[current_counter_index] = current_counter;
 
 	  do_packet (vm, e0);
@@ -1029,15 +1041,15 @@
 
   if (n_errors_current_sw_if_index > 0)
     {
-      vnet_sw_interface_t * si;
+      vnet_sw_interface_t *si;
 
       vlib_increment_simple_counter (cm, cpu_index, current_sw_if_index,
 				     n_errors_current_sw_if_index);
 
       si = vnet_get_sw_interface (vnm, current_sw_if_index);
       if (si->sup_sw_if_index != current_sw_if_index)
-          vlib_increment_simple_counter (cm, cpu_index, si->sup_sw_if_index,
-                                         n_errors_current_sw_if_index);
+	vlib_increment_simple_counter (cm, cpu_index, si->sup_sw_if_index,
+				       n_errors_current_sw_if_index);
     }
 
   vlib_error_elog_count (vm, current_counter_index,
@@ -1050,16 +1062,12 @@
   /* Save memory for next iteration. */
   memory[disposition] = current_error;
 
-  if (disposition == VNET_ERROR_DISPOSITION_DROP
-      || ! vm->os_punt_frame)
+  if (disposition == VNET_ERROR_DISPOSITION_DROP || !vm->os_punt_frame)
     {
-      vlib_buffer_free
-	(vm,
-	 first_buffer,
-	 frame->n_vectors);
+      vlib_buffer_free (vm, first_buffer, frame->n_vectors);
 
       /* If there is no punt function, free the frame as well. */
-      if (disposition == VNET_ERROR_DISPOSITION_PUNT && ! vm->os_punt_frame)
+      if (disposition == VNET_ERROR_DISPOSITION_PUNT && !vm->os_punt_frame)
 	vlib_frame_free (vm, node, frame);
     }
   else
@@ -1068,14 +1076,13 @@
   return frame->n_vectors;
 }
 
-static inline void 
-pcap_drop_trace (vlib_main_t * vm, 
-                 vnet_interface_main_t * im, 
-                 vlib_frame_t * f)
+static inline void
+pcap_drop_trace (vlib_main_t * vm,
+		 vnet_interface_main_t * im, vlib_frame_t * f)
 {
-  u32 * from;
+  u32 *from;
   u32 n_left = f->n_vectors;
-  vlib_buffer_t * b0, * p1;
+  vlib_buffer_t *b0, *p1;
   u32 bi0;
   i16 save_current_data;
   u16 save_current_length;
@@ -1085,48 +1092,49 @@
   while (n_left > 0)
     {
       if (PREDICT_TRUE (n_left > 1))
-        {
-          p1 = vlib_get_buffer (vm, from[1]);
-          vlib_prefetch_buffer_header (p1, LOAD);
-        }
-      
+	{
+	  p1 = vlib_get_buffer (vm, from[1]);
+	  vlib_prefetch_buffer_header (p1, LOAD);
+	}
+
       bi0 = from[0];
       b0 = vlib_get_buffer (vm, bi0);
       from++;
       n_left--;
-      
+
       /* See if we're pointedly ignoring this specific error */
-      if (im->pcap_drop_filter_hash 
-          && hash_get (im->pcap_drop_filter_hash, b0->error))
-        continue;
+      if (im->pcap_drop_filter_hash
+	  && hash_get (im->pcap_drop_filter_hash, b0->error))
+	continue;
 
       /* Trace all drops, or drops received on a specific interface */
       if (im->pcap_sw_if_index == 0 ||
-          im->pcap_sw_if_index == vnet_buffer(b0)->sw_if_index [VLIB_RX])
-        {
-          save_current_data = b0->current_data;
-          save_current_length = b0->current_length;
-          
-          /* 
-           * Typically, we'll need to rewind the buffer
-           */
-          if (b0->current_data > 0)
-            vlib_buffer_advance (b0, (word) -b0->current_data);
+	  im->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
+	{
+	  save_current_data = b0->current_data;
+	  save_current_length = b0->current_length;
 
-          pcap_add_buffer (&im->pcap_main, vm, bi0, 512);
+	  /*
+	   * Typically, we'll need to rewind the buffer
+	   */
+	  if (b0->current_data > 0)
+	    vlib_buffer_advance (b0, (word) - b0->current_data);
 
-          b0->current_data = save_current_data;
-          b0->current_length = save_current_length;
-        }
+	  pcap_add_buffer (&im->pcap_main, vm, bi0, 512);
+
+	  b0->current_data = save_current_data;
+	  b0->current_length = save_current_length;
+	}
     }
 }
 
-void vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add)
+void
+vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add)
 {
-  vnet_interface_main_t * im = &vnet_get_main()->interface_main;
+  vnet_interface_main_t *im = &vnet_get_main ()->interface_main;
 
   if (im->pcap_drop_filter_hash == 0)
-      im->pcap_drop_filter_hash = hash_create (0, sizeof (uword));
+    im->pcap_drop_filter_hash = hash_create (0, sizeof (uword));
 
   if (is_add)
     hash_set (im->pcap_drop_filter_hash, error_index, 1);
@@ -1136,10 +1144,9 @@
 
 static uword
 process_drop (vlib_main_t * vm,
-	      vlib_node_runtime_t * node,
-	      vlib_frame_t * frame)
+	      vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  vnet_interface_main_t * im = &vnet_get_main()->interface_main;
+  vnet_interface_main_t *im = &vnet_get_main ()->interface_main;
 
   if (PREDICT_FALSE (im->drop_pcap_enable))
     pcap_drop_trace (vm, im, frame);
@@ -1149,12 +1156,12 @@
 
 static uword
 process_punt (vlib_main_t * vm,
-	      vlib_node_runtime_t * node,
-	      vlib_frame_t * frame)
+	      vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   return process_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_PUNT);
 }
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (drop_buffers,static) = {
   .function = process_drop,
   .name = "error-drop",
@@ -1163,9 +1170,11 @@
   .format_trace = format_vnet_error_trace,
   .validate_frame = validate_error_frame,
 };
+/* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (drop_buffers, process_drop)
+VLIB_NODE_FUNCTION_MULTIARCH (drop_buffers, process_drop);
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (punt_buffers,static) = {
   .function = process_punt,
   .flags = (VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH
@@ -1175,23 +1184,27 @@
   .format_trace = format_vnet_error_trace,
   .validate_frame = validate_error_frame,
 };
+/* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (punt_buffers, process_punt)
+VLIB_NODE_FUNCTION_MULTIARCH (punt_buffers, process_punt);
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (vnet_per_buffer_interface_output_node,static) = {
   .function = vnet_per_buffer_interface_output,
   .name = "interface-output",
   .vector_size = sizeof (u32),
 };
+/* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (vnet_per_buffer_interface_output_node, vnet_per_buffer_interface_output)
+VLIB_NODE_FUNCTION_MULTIARCH (vnet_per_buffer_interface_output_node,
+			      vnet_per_buffer_interface_output);
 
 clib_error_t *
 vnet_per_buffer_interface_output_hw_interface_add_del (vnet_main_t * vnm,
 						       u32 hw_if_index,
 						       u32 is_create)
 {
-  vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
+  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
   u32 next_index;
 
   next_index = vlib_node_add_next_with_slot
@@ -1204,136 +1217,147 @@
   return 0;
 }
 
-VNET_HW_INTERFACE_ADD_DEL_FUNCTION 
-(vnet_per_buffer_interface_output_hw_interface_add_del);
+VNET_HW_INTERFACE_ADD_DEL_FUNCTION
+  (vnet_per_buffer_interface_output_hw_interface_add_del);
 
 static clib_error_t *
 pcap_drop_trace_command_fn (vlib_main_t * vm,
-                            unformat_input_t * input,
-                            vlib_cli_command_t * cmd)
+			    unformat_input_t * input,
+			    vlib_cli_command_t * cmd)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  vnet_interface_main_t * im = &vnm->interface_main;
-  u8 * filename;
+  vnet_main_t *vnm = vnet_get_main ();
+  vnet_interface_main_t *im = &vnm->interface_main;
+  u8 *filename;
   u32 max;
   int matched = 0;
-  clib_error_t * error = 0;
+  clib_error_t *error = 0;
 
-  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) 
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
     {
       if (unformat (input, "on"))
-        {
-          if (im->drop_pcap_enable == 0)
-            {
-              if (im->pcap_filename == 0)
-                im->pcap_filename = format (0, "/tmp/drop.pcap%c", 0);
-              
-              memset (&im->pcap_main, 0, sizeof (im->pcap_main));
-              im->pcap_main.file_name = (char *) im->pcap_filename;
-              im->pcap_main.n_packets_to_capture = 100;
-              if (im->pcap_pkts_to_capture)
-                im->pcap_main.n_packets_to_capture = im->pcap_pkts_to_capture;
+	{
+	  if (im->drop_pcap_enable == 0)
+	    {
+	      if (im->pcap_filename == 0)
+		im->pcap_filename = format (0, "/tmp/drop.pcap%c", 0);
 
-              im->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
-              im->drop_pcap_enable = 1;
-              matched = 1;
-              vlib_cli_output (vm, "pcap drop capture on...");
-            }
-          else
-            {
-              vlib_cli_output (vm, "pcap drop capture already on...");
-            }
-          matched = 1;
-        }
+	      memset (&im->pcap_main, 0, sizeof (im->pcap_main));
+	      im->pcap_main.file_name = (char *) im->pcap_filename;
+	      im->pcap_main.n_packets_to_capture = 100;
+	      if (im->pcap_pkts_to_capture)
+		im->pcap_main.n_packets_to_capture = im->pcap_pkts_to_capture;
+
+	      im->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
+	      im->drop_pcap_enable = 1;
+	      matched = 1;
+	      vlib_cli_output (vm, "pcap drop capture on...");
+	    }
+	  else
+	    {
+	      vlib_cli_output (vm, "pcap drop capture already on...");
+	    }
+	  matched = 1;
+	}
       else if (unformat (input, "off"))
-        {
-          matched = 1;
+	{
+	  matched = 1;
 
-          if (im->drop_pcap_enable)
-            {
-              vlib_cli_output (vm, "captured %d pkts...", 
-                               im->pcap_main.n_packets_captured);
-              if (im->pcap_main.n_packets_captured)
-                {
-                  im->pcap_main.n_packets_to_capture = 
-                    im->pcap_main.n_packets_captured;
-                  error = pcap_write (&im->pcap_main);
-                  if (error)
-                    clib_error_report (error);
-                  else
-                    vlib_cli_output (vm, "saved to %s...", im->pcap_filename);
-                }
-            }
-          else
-            {
-              vlib_cli_output (vm, "pcap drop capture already off...");
-            }
+	  if (im->drop_pcap_enable)
+	    {
+	      vlib_cli_output (vm, "captured %d pkts...",
+			       im->pcap_main.n_packets_captured);
+	      if (im->pcap_main.n_packets_captured)
+		{
+		  im->pcap_main.n_packets_to_capture =
+		    im->pcap_main.n_packets_captured;
+		  error = pcap_write (&im->pcap_main);
+		  if (error)
+		    clib_error_report (error);
+		  else
+		    vlib_cli_output (vm, "saved to %s...", im->pcap_filename);
+		}
+	    }
+	  else
+	    {
+	      vlib_cli_output (vm, "pcap drop capture already off...");
+	    }
 
-          im->drop_pcap_enable = 0;
-        }
+	  im->drop_pcap_enable = 0;
+	}
       else if (unformat (input, "max %d", &max))
-        {
-          im->pcap_pkts_to_capture = max;
-          matched = 1;
-        }
+	{
+	  im->pcap_pkts_to_capture = max;
+	  matched = 1;
+	}
 
-      else if (unformat (input, "intfc %U", 
-                         unformat_vnet_sw_interface, vnm,
-                         &im->pcap_sw_if_index))
-        matched = 1;
+      else if (unformat (input, "intfc %U",
+			 unformat_vnet_sw_interface, vnm,
+			 &im->pcap_sw_if_index))
+	matched = 1;
       else if (unformat (input, "intfc any"))
-        {
-          im->pcap_sw_if_index = 0;
-          matched = 1;
-        }
+	{
+	  im->pcap_sw_if_index = 0;
+	  matched = 1;
+	}
       else if (unformat (input, "file %s", &filename))
-        {
-          u8 * chroot_filename;
-          /* Brain-police user path input */
-          if (strstr((char *)filename, "..") || index((char *)filename, '/'))
-            {
-              vlib_cli_output (vm, "illegal characters in filename '%s'", 
-                               filename);
-              continue;
-            }
+	{
+	  u8 *chroot_filename;
+	  /* Brain-police user path input */
+	  if (strstr ((char *) filename, "..")
+	      || index ((char *) filename, '/'))
+	    {
+	      vlib_cli_output (vm, "illegal characters in filename '%s'",
+			       filename);
+	      continue;
+	    }
 
-          chroot_filename = format (0, "/tmp/%s%c", filename, 0);
-          vec_free (filename);
-          
-          if (im->pcap_filename)
-            vec_free (im->pcap_filename);
-          vec_add1 (filename, 0);
-          im->pcap_filename = chroot_filename;
-          matched = 1;
-        }
+	  chroot_filename = format (0, "/tmp/%s%c", filename, 0);
+	  vec_free (filename);
+
+	  if (im->pcap_filename)
+	    vec_free (im->pcap_filename);
+	  vec_add1 (filename, 0);
+	  im->pcap_filename = chroot_filename;
+	  matched = 1;
+	}
       else if (unformat (input, "status"))
-        {
-          if (im->drop_pcap_enable == 0)
-            {
-              vlib_cli_output (vm, "pcap drop capture is off...");
-              continue;
-            }
+	{
+	  if (im->drop_pcap_enable == 0)
+	    {
+	      vlib_cli_output (vm, "pcap drop capture is off...");
+	      continue;
+	    }
 
-          vlib_cli_output (vm, "pcap drop capture: %d of %d pkts...",
-                           im->pcap_main.n_packets_captured,
-                           im->pcap_main.n_packets_to_capture);
-          matched = 1;
-        }
+	  vlib_cli_output (vm, "pcap drop capture: %d of %d pkts...",
+			   im->pcap_main.n_packets_captured,
+			   im->pcap_main.n_packets_to_capture);
+	  matched = 1;
+	}
 
       else
-        break;
+	break;
     }
 
   if (matched == 0)
-    return clib_error_return (0, "unknown input `%U'", 
-                              format_unformat_error, input);
+    return clib_error_return (0, "unknown input `%U'",
+			      format_unformat_error, input);
 
   return 0;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (pcap_trace_command, static) = {
-    .path = "pcap drop trace",
-    .short_help = 
-    "pcap drop trace on off max <nn> intfc <intfc> file <name> status",
-    .function = pcap_drop_trace_command_fn,
+  .path = "pcap drop trace",
+  .short_help =
+  "pcap drop trace on off max <nn> intfc <intfc> file <name> status",
+  .function = pcap_drop_trace_command_fn,
 };
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/l3_types.h b/vnet/vnet/l3_types.h
index 2902072..28b0891 100644
--- a/vnet/vnet/l3_types.h
+++ b/vnet/vnet/l3_types.h
@@ -41,10 +41,19 @@
 #define included_vnet_l3_types_h
 
 /* Inherit generic L3 packet types from ethernet. */
-typedef enum {
+typedef enum
+{
 #define ethernet_type(n,f) VNET_L3_PACKET_TYPE_##f,
 #include <vnet/ethernet/types.def>
 #undef ethernet_type
 } vnet_l3_packet_type_t;
 
 #endif /* included_vnet_l3_types_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/misc.c b/vnet/vnet/misc.c
index 89bc15c..c0729f7 100644
--- a/vnet/vnet/misc.c
+++ b/vnet/vnet/misc.c
@@ -49,29 +49,32 @@
 
 static uword
 vnet_local_interface_tx (vlib_main_t * vm,
-			 vlib_node_runtime_t * node,
-			 vlib_frame_t * f)
+			 vlib_node_runtime_t * node, vlib_frame_t * f)
 {
   ASSERT (0);
   return f->n_vectors;
 }
 
+/* *INDENT-OFF* */
 VNET_DEVICE_CLASS (vnet_local_interface_device_class) = {
   .name = "local",
   .tx_function = vnet_local_interface_tx,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VNET_HW_INTERFACE_CLASS (vnet_local_interface_hw_class,static) = {
   .name = "local",
 };
+/* *INDENT-ON* */
 
 clib_error_t *
 vnet_main_init (vlib_main_t * vm)
 {
-  vnet_main_t * vnm = vnet_get_main();
-  clib_error_t * error;
+  vnet_main_t *vnm = vnet_get_main ();
+  clib_error_t *error;
   u32 hw_if_index;
-  vnet_hw_interface_t * hw;
+  vnet_hw_interface_t *hw;
 
   if ((error = vlib_call_init_function (vm, vnet_interface_init)))
     return error;
@@ -88,8 +91,7 @@
   vnm->vlib_main = vm;
 
   hw_if_index = vnet_register_interface
-    (vnm,
-     vnet_local_interface_device_class.index, /* instance */ 0,
+    (vnm, vnet_local_interface_device_class.index, /* instance */ 0,
      vnet_local_interface_hw_class.index, /* instance */ 0);
   hw = vnet_get_hw_interface (vnm, hw_if_index);
 
@@ -100,3 +102,11 @@
 }
 
 VLIB_INIT_FUNCTION (vnet_main_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/pipeline.h b/vnet/vnet/pipeline.h
index 5a0d4dc..a4aa5cf 100644
--- a/vnet/vnet/pipeline.h
+++ b/vnet/vnet/pipeline.h
@@ -23,14 +23,14 @@
  * <Define pipeline stages>
  *
  * #include <vnet/pipeline.h>
- * 
+ *
  * static uword my_node_fn (vlib_main_t * vm,
  *                               vlib_node_runtime_t * node,
  *                               vlib_frame_t * frame)
  * {
  *     return dispatch_pipeline (vm, node, frame);
  * }
- * 
+ *
  */
 
 #ifndef NSTAGES
@@ -41,20 +41,20 @@
 #define STAGE_INLINE inline
 #endif
 
-/* 
+/*
  * A prefetch stride of 2 is quasi-equivalent to doubling the number
  * of stages with every other pipeline stage empty.
  */
 
-/* 
- * This is a typical first pipeline stage, which prefetches 
- * buffer metadata and the first line of pkt data. 
+/*
+ * This is a typical first pipeline stage, which prefetches
+ * buffer metadata and the first line of pkt data.
  * To use it:
  *  #define stage0 generic_stage0
  */
-static STAGE_INLINE void generic_stage0 (vlib_main_t * vm,
-                                   vlib_node_runtime_t * node,
-                                   u32 buffer_index)
+static STAGE_INLINE void
+generic_stage0 (vlib_main_t * vm,
+		vlib_node_runtime_t * node, u32 buffer_index)
 {
   /* generic default stage 0 here */
   vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
@@ -66,62 +66,61 @@
 
 static STAGE_INLINE uword
 dispatch_pipeline (vlib_main_t * vm,
-                   vlib_node_runtime_t * node,
-                   vlib_frame_t * frame)
+		   vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  u32 * from = vlib_frame_vector_args (frame);
-  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
+  u32 *from = vlib_frame_vector_args (frame);
+  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
   int pi, pi_limit;
-  
+
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  
-  while (n_left_from > 0) 
+
+  while (n_left_from > 0)
     {
       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-      
-      pi_limit = clib_min (n_left_from, n_left_to_next);
-      
-      for (pi = 0; pi < NSTAGES-1; pi++) 
-        {
-          if(pi == pi_limit)
-            break;
-          stage0 (vm, node, from[pi]);
-        }
 
-      for (; pi < pi_limit; pi++) 
-        {
-          stage0 (vm, node, from[pi]);
-          to_next[0] = from [pi - 1];
-          to_next++;
-          n_left_to_next--;
-          next0 = last_stage (vm, node, from [pi - 1]);
-          vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                           to_next, n_left_to_next,
-                                           from[pi - 1], next0);
-          n_left_from--;
-          if ((int) n_left_to_next  < 0 && n_left_from > 0)
-            vlib_get_next_frame (vm, node, next_index, to_next, 
-                                 n_left_to_next);
-        }
-      
-      for (; pi < (pi_limit + (NSTAGES-1)); pi++) 
-        {
-        if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) 
-          {
-            to_next[0] = from [pi - 1];
-            to_next++;
-            n_left_to_next--;
-            next0 = last_stage (vm, node, from [pi - 1]);
-            vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                             to_next, n_left_to_next,
-                                             from[pi - 1], next0);
-            n_left_from--;
-            if ((int) n_left_to_next  < 0 && n_left_from > 0)
-              vlib_get_next_frame (vm, node, next_index, to_next, 
-                                   n_left_to_next);
-          }
-        }
+      pi_limit = clib_min (n_left_from, n_left_to_next);
+
+      for (pi = 0; pi < NSTAGES - 1; pi++)
+	{
+	  if (pi == pi_limit)
+	    break;
+	  stage0 (vm, node, from[pi]);
+	}
+
+      for (; pi < pi_limit; pi++)
+	{
+	  stage0 (vm, node, from[pi]);
+	  to_next[0] = from[pi - 1];
+	  to_next++;
+	  n_left_to_next--;
+	  next0 = last_stage (vm, node, from[pi - 1]);
+	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					   to_next, n_left_to_next,
+					   from[pi - 1], next0);
+	  n_left_from--;
+	  if ((int) n_left_to_next < 0 && n_left_from > 0)
+	    vlib_get_next_frame (vm, node, next_index, to_next,
+				 n_left_to_next);
+	}
+
+      for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+	{
+	  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+	    {
+	      to_next[0] = from[pi - 1];
+	      to_next++;
+	      n_left_to_next--;
+	      next0 = last_stage (vm, node, from[pi - 1]);
+	      vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					       to_next, n_left_to_next,
+					       from[pi - 1], next0);
+	      n_left_from--;
+	      if ((int) n_left_to_next < 0 && n_left_from > 0)
+		vlib_get_next_frame (vm, node, next_index, to_next,
+				     n_left_to_next);
+	    }
+	}
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
       from += pi_limit;
     }
@@ -132,69 +131,68 @@
 #if NSTAGES == 3
 static STAGE_INLINE uword
 dispatch_pipeline (vlib_main_t * vm,
-                   vlib_node_runtime_t * node,
-                   vlib_frame_t * frame)
+		   vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  u32 * from = vlib_frame_vector_args (frame);
-  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
+  u32 *from = vlib_frame_vector_args (frame);
+  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
   int pi, pi_limit;
-  
+
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  
-  while (n_left_from > 0) 
+
+  while (n_left_from > 0)
     {
       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-      
+
       pi_limit = clib_min (n_left_from, n_left_to_next);
-      
-      for (pi = 0; pi < NSTAGES-1; pi++) 
-        {
-          if(pi == pi_limit)
-            break;
-          stage0 (vm, node, from[pi]);
-          if (pi-1 >= 0)
-            stage1 (vm, node, from[pi-1]);
-        }
-      
-      for (; pi < pi_limit; pi++) 
-        {
-          stage0 (vm, node, from[pi]);
-          stage1 (vm, node, from[pi-1]);
-          to_next[0] = from [pi - 2];
-          to_next++;
-          n_left_to_next--;
-          next0 = last_stage (vm, node, from [pi - 2]);
-          vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                           to_next, n_left_to_next,
-                                           from[pi - 2], next0);
-          n_left_from--;
-          if ((int) n_left_to_next  < 0 && n_left_from > 0)
-            vlib_get_next_frame (vm, node, next_index, to_next, 
-                                 n_left_to_next);
-        }
-      
-      
-      for (; pi < (pi_limit + (NSTAGES-1)); pi++) 
-        {
-          if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
-            stage1 (vm, node, from[pi-1]);
-          if (((pi - 2) >= 0) && ((pi - 2) < pi_limit)) 
-            {
-              to_next[0] = from[pi - 2];
-              to_next++;
-              n_left_to_next--;
-              next0 = last_stage (vm, node, from [pi - 2]);
-              vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                               to_next, n_left_to_next,
-                                               from[pi - 2], next0);
-              n_left_from--;
-              if ((int) n_left_to_next < 0 && n_left_from > 0)
-                vlib_get_next_frame (vm, node, next_index, to_next, 
-                                     n_left_to_next);
-            }
-        }
-      
+
+      for (pi = 0; pi < NSTAGES - 1; pi++)
+	{
+	  if (pi == pi_limit)
+	    break;
+	  stage0 (vm, node, from[pi]);
+	  if (pi - 1 >= 0)
+	    stage1 (vm, node, from[pi - 1]);
+	}
+
+      for (; pi < pi_limit; pi++)
+	{
+	  stage0 (vm, node, from[pi]);
+	  stage1 (vm, node, from[pi - 1]);
+	  to_next[0] = from[pi - 2];
+	  to_next++;
+	  n_left_to_next--;
+	  next0 = last_stage (vm, node, from[pi - 2]);
+	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					   to_next, n_left_to_next,
+					   from[pi - 2], next0);
+	  n_left_from--;
+	  if ((int) n_left_to_next < 0 && n_left_from > 0)
+	    vlib_get_next_frame (vm, node, next_index, to_next,
+				 n_left_to_next);
+	}
+
+
+      for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+	{
+	  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+	    stage1 (vm, node, from[pi - 1]);
+	  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
+	    {
+	      to_next[0] = from[pi - 2];
+	      to_next++;
+	      n_left_to_next--;
+	      next0 = last_stage (vm, node, from[pi - 2]);
+	      vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					       to_next, n_left_to_next,
+					       from[pi - 2], next0);
+	      n_left_from--;
+	      if ((int) n_left_to_next < 0 && n_left_from > 0)
+		vlib_get_next_frame (vm, node, next_index, to_next,
+				     n_left_to_next);
+	    }
+	}
+
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
       from += pi_limit;
     }
@@ -205,76 +203,75 @@
 #if NSTAGES == 4
 static STAGE_INLINE uword
 dispatch_pipeline (vlib_main_t * vm,
-                   vlib_node_runtime_t * node,
-                   vlib_frame_t * frame)
+		   vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  u32 * from = vlib_frame_vector_args (frame);
-  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
+  u32 *from = vlib_frame_vector_args (frame);
+  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
   int pi, pi_limit;
-  
+
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  
-  while (n_left_from > 0) 
+
+  while (n_left_from > 0)
     {
       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-      
+
       pi_limit = clib_min (n_left_from, n_left_to_next);
-      
-      for (pi = 0; pi < NSTAGES-1; pi++) 
-        {
-          if(pi == pi_limit)
-            break;
-          stage0 (vm, node, from[pi]);
-          if (pi-1 >= 0)
-            stage1 (vm, node, from[pi-1]);
-          if (pi-2 >= 0)
-            stage2 (vm, node, from[pi-2]);
-        }
-      
-        for (; pi < pi_limit; pi++) 
-          {
-            stage0 (vm, node, from[pi]);
-            stage1 (vm, node, from[pi-1]);
-            stage2 (vm, node, from[pi-2]);
-            to_next[0] = from [pi - 3];
-            to_next++;
-            n_left_to_next--;
-            next0 = last_stage (vm, node, from [pi - 3]);
-            vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                             to_next, n_left_to_next,
-                                             from[pi - 3], next0);
-            n_left_from--;
-            if ((int) n_left_to_next  < 0 && n_left_from > 0)
-              vlib_get_next_frame (vm, node, next_index, to_next, 
-                                   n_left_to_next);
-          }
-        
-        
-        for (; pi < (pi_limit + (NSTAGES-1)); pi++) 
-          {
-            if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
-              stage1 (vm, node, from[pi-1]);
-            if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
-              stage2 (vm, node, from[pi-2]);
-            if (((pi - 3) >= 0) && ((pi - 3) < pi_limit)) 
-              {
-                to_next[0] = from[pi - 3];
-                to_next++;
-                n_left_to_next--;
-                next0 = last_stage (vm, node, from [pi - 3]);
-                vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                                 to_next, n_left_to_next,
-                                                 from[pi - 3], next0);
-                n_left_from--;
-                if ((int) n_left_to_next  < 0 && n_left_from > 0)
-                  vlib_get_next_frame (vm, node, next_index, to_next, 
-                                       n_left_to_next);
-              }
-          }
-        
-        vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-        from += pi_limit;
+
+      for (pi = 0; pi < NSTAGES - 1; pi++)
+	{
+	  if (pi == pi_limit)
+	    break;
+	  stage0 (vm, node, from[pi]);
+	  if (pi - 1 >= 0)
+	    stage1 (vm, node, from[pi - 1]);
+	  if (pi - 2 >= 0)
+	    stage2 (vm, node, from[pi - 2]);
+	}
+
+      for (; pi < pi_limit; pi++)
+	{
+	  stage0 (vm, node, from[pi]);
+	  stage1 (vm, node, from[pi - 1]);
+	  stage2 (vm, node, from[pi - 2]);
+	  to_next[0] = from[pi - 3];
+	  to_next++;
+	  n_left_to_next--;
+	  next0 = last_stage (vm, node, from[pi - 3]);
+	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					   to_next, n_left_to_next,
+					   from[pi - 3], next0);
+	  n_left_from--;
+	  if ((int) n_left_to_next < 0 && n_left_from > 0)
+	    vlib_get_next_frame (vm, node, next_index, to_next,
+				 n_left_to_next);
+	}
+
+
+      for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+	{
+	  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+	    stage1 (vm, node, from[pi - 1]);
+	  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
+	    stage2 (vm, node, from[pi - 2]);
+	  if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
+	    {
+	      to_next[0] = from[pi - 3];
+	      to_next++;
+	      n_left_to_next--;
+	      next0 = last_stage (vm, node, from[pi - 3]);
+	      vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					       to_next, n_left_to_next,
+					       from[pi - 3], next0);
+	      n_left_from--;
+	      if ((int) n_left_to_next < 0 && n_left_from > 0)
+		vlib_get_next_frame (vm, node, next_index, to_next,
+				     n_left_to_next);
+	    }
+	}
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      from += pi_limit;
     }
   return frame->n_vectors;
 }
@@ -284,81 +281,80 @@
 #if NSTAGES == 5
 static STAGE_INLINE uword
 dispatch_pipeline (vlib_main_t * vm,
-                   vlib_node_runtime_t * node,
-                   vlib_frame_t * frame)
+		   vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  u32 * from = vlib_frame_vector_args (frame);
-  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
+  u32 *from = vlib_frame_vector_args (frame);
+  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
   int pi, pi_limit;
-  
+
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  
-  while (n_left_from > 0) 
+
+  while (n_left_from > 0)
     {
       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-      
+
       pi_limit = clib_min (n_left_from, n_left_to_next);
-      
-      for (pi = 0; pi < NSTAGES-1; pi++) 
-        {
-          if(pi == pi_limit)
-            break;
-          stage0 (vm, node, from[pi]);
-          if (pi-1 >= 0)
-            stage1 (vm, node, from[pi-1]);
-          if (pi-2 >= 0)
-            stage2 (vm, node, from[pi-2]);
-          if (pi-3 >= 0)
-            stage3 (vm, node, from[pi-3]);
-        }
-      
-        for (; pi < pi_limit; pi++) 
-          {
-            stage0 (vm, node, from[pi]);
-            stage1 (vm, node, from[pi-1]);
-            stage2 (vm, node, from[pi-2]);
-            stage3 (vm, node, from[pi-3]);
-            to_next[0] = from [pi - 4];
-            to_next++;
-            n_left_to_next--;
-            next0 = last_stage (vm, node, from [pi - 4]);
-            vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                             to_next, n_left_to_next,
-                                             from[pi - 4], next0);
-            n_left_from--;
-            if ((int) n_left_to_next  < 0 && n_left_from > 0)
-              vlib_get_next_frame (vm, node, next_index, to_next, 
-                                   n_left_to_next);
-          }
-        
-        
-        for (; pi < (pi_limit + (NSTAGES-1)); pi++) 
-          {
-            if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
-              stage1 (vm, node, from[pi-1]);
-            if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
-              stage2 (vm, node, from[pi - 2]);
-            if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
-              stage3 (vm, node, from[pi - 3]);
-            if (((pi - 4) >= 0) && ((pi - 4) < pi_limit)) 
-              {
-                to_next[0] = from[pi - 4];
-                to_next++;
-                n_left_to_next--;
-                next0 = last_stage (vm, node, from [pi - 4]);
-                vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                                 to_next, n_left_to_next,
-                                                 from[pi - 4], next0);
-                n_left_from--;
-                if ((int) n_left_to_next  < 0 && n_left_from > 0)
-                  vlib_get_next_frame (vm, node, next_index, to_next, 
-                                       n_left_to_next);
-              }
-          }
-        
-        vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-        from += pi_limit;
+
+      for (pi = 0; pi < NSTAGES - 1; pi++)
+	{
+	  if (pi == pi_limit)
+	    break;
+	  stage0 (vm, node, from[pi]);
+	  if (pi - 1 >= 0)
+	    stage1 (vm, node, from[pi - 1]);
+	  if (pi - 2 >= 0)
+	    stage2 (vm, node, from[pi - 2]);
+	  if (pi - 3 >= 0)
+	    stage3 (vm, node, from[pi - 3]);
+	}
+
+      for (; pi < pi_limit; pi++)
+	{
+	  stage0 (vm, node, from[pi]);
+	  stage1 (vm, node, from[pi - 1]);
+	  stage2 (vm, node, from[pi - 2]);
+	  stage3 (vm, node, from[pi - 3]);
+	  to_next[0] = from[pi - 4];
+	  to_next++;
+	  n_left_to_next--;
+	  next0 = last_stage (vm, node, from[pi - 4]);
+	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					   to_next, n_left_to_next,
+					   from[pi - 4], next0);
+	  n_left_from--;
+	  if ((int) n_left_to_next < 0 && n_left_from > 0)
+	    vlib_get_next_frame (vm, node, next_index, to_next,
+				 n_left_to_next);
+	}
+
+
+      for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+	{
+	  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+	    stage1 (vm, node, from[pi - 1]);
+	  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
+	    stage2 (vm, node, from[pi - 2]);
+	  if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
+	    stage3 (vm, node, from[pi - 3]);
+	  if (((pi - 4) >= 0) && ((pi - 4) < pi_limit))
+	    {
+	      to_next[0] = from[pi - 4];
+	      to_next++;
+	      n_left_to_next--;
+	      next0 = last_stage (vm, node, from[pi - 4]);
+	      vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					       to_next, n_left_to_next,
+					       from[pi - 4], next0);
+	      n_left_from--;
+	      if ((int) n_left_to_next < 0 && n_left_from > 0)
+		vlib_get_next_frame (vm, node, next_index, to_next,
+				     n_left_to_next);
+	    }
+	}
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      from += pi_limit;
     }
   return frame->n_vectors;
 }
@@ -367,87 +363,94 @@
 #if NSTAGES == 6
 static STAGE_INLINE uword
 dispatch_pipeline (vlib_main_t * vm,
-                   vlib_node_runtime_t * node,
-                   vlib_frame_t * frame)
+		   vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  u32 * from = vlib_frame_vector_args (frame);
-  u32 n_left_from, n_left_to_next, * to_next, next_index, next0;
+  u32 *from = vlib_frame_vector_args (frame);
+  u32 n_left_from, n_left_to_next, *to_next, next_index, next0;
   int pi, pi_limit;
-  
+
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  
-  while (n_left_from > 0) 
+
+  while (n_left_from > 0)
     {
       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-      
+
       pi_limit = clib_min (n_left_from, n_left_to_next);
-      
-      for (pi = 0; pi < NSTAGES-1; pi++) 
-        {
-          if(pi == pi_limit)
-            break;
-          stage0 (vm, node, from[pi]);
-          if (pi-1 >= 0)
-            stage1 (vm, node, from[pi-1]);
-          if (pi-2 >= 0)
-            stage2 (vm, node, from[pi-2]);
-          if (pi-3 >= 0)
-            stage3 (vm, node, from[pi-3]);
-          if (pi-4 >= 0)
-            stage4 (vm, node, from[pi-4]);
-        }
-      
-        for (; pi < pi_limit; pi++) 
-          {
-            stage0 (vm, node, from[pi]);
-            stage1 (vm, node, from[pi-1]);
-            stage2 (vm, node, from[pi-2]);
-            stage3 (vm, node, from[pi-3]);
-            stage4 (vm, node, from[pi-4]);
-            to_next[0] = from [pi - 5];
-            to_next++;
-            n_left_to_next--;
-            next0 = last_stage (vm, node, from [pi - 5]);
-            vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                             to_next, n_left_to_next,
-                                             from[pi - 5], next0);
-            n_left_from--;
-            if ((int) n_left_to_next  < 0 && n_left_from > 0)
-              vlib_get_next_frame (vm, node, next_index, to_next, 
-                                   n_left_to_next);
-          }
-        
-        
-        for (; pi < (pi_limit + (NSTAGES-1)); pi++) 
-          {
-            if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
-              stage1 (vm, node, from[pi-1]);
-            if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
-              stage2 (vm, node, from[pi - 2]);
-            if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
-              stage3 (vm, node, from[pi - 3]);
-            if (((pi - 4) >= 0) && ((pi - 4) < pi_limit))
-              stage4 (vm, node, from[pi - 4]);
-            if (((pi - 5) >= 0) && ((pi - 5) < pi_limit)) 
-              {
-                to_next[0] = from[pi - 5];
-                to_next++;
-                n_left_to_next--;
-                next0 = last_stage (vm, node, from [pi - 5]);
-                vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                                 to_next, n_left_to_next,
-                                                 from[pi - 5], next0);
-                n_left_from--;
-                if ((int) n_left_to_next  < 0 && n_left_from > 0)
-                  vlib_get_next_frame (vm, node, next_index, to_next, 
-                                       n_left_to_next);
-              }
-          }
-        
-        vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-        from += pi_limit;
+
+      for (pi = 0; pi < NSTAGES - 1; pi++)
+	{
+	  if (pi == pi_limit)
+	    break;
+	  stage0 (vm, node, from[pi]);
+	  if (pi - 1 >= 0)
+	    stage1 (vm, node, from[pi - 1]);
+	  if (pi - 2 >= 0)
+	    stage2 (vm, node, from[pi - 2]);
+	  if (pi - 3 >= 0)
+	    stage3 (vm, node, from[pi - 3]);
+	  if (pi - 4 >= 0)
+	    stage4 (vm, node, from[pi - 4]);
+	}
+
+      for (; pi < pi_limit; pi++)
+	{
+	  stage0 (vm, node, from[pi]);
+	  stage1 (vm, node, from[pi - 1]);
+	  stage2 (vm, node, from[pi - 2]);
+	  stage3 (vm, node, from[pi - 3]);
+	  stage4 (vm, node, from[pi - 4]);
+	  to_next[0] = from[pi - 5];
+	  to_next++;
+	  n_left_to_next--;
+	  next0 = last_stage (vm, node, from[pi - 5]);
+	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					   to_next, n_left_to_next,
+					   from[pi - 5], next0);
+	  n_left_from--;
+	  if ((int) n_left_to_next < 0 && n_left_from > 0)
+	    vlib_get_next_frame (vm, node, next_index, to_next,
+				 n_left_to_next);
+	}
+
+
+      for (; pi < (pi_limit + (NSTAGES - 1)); pi++)
+	{
+	  if (((pi - 1) >= 0) && ((pi - 1) < pi_limit))
+	    stage1 (vm, node, from[pi - 1]);
+	  if (((pi - 2) >= 0) && ((pi - 2) < pi_limit))
+	    stage2 (vm, node, from[pi - 2]);
+	  if (((pi - 3) >= 0) && ((pi - 3) < pi_limit))
+	    stage3 (vm, node, from[pi - 3]);
+	  if (((pi - 4) >= 0) && ((pi - 4) < pi_limit))
+	    stage4 (vm, node, from[pi - 4]);
+	  if (((pi - 5) >= 0) && ((pi - 5) < pi_limit))
+	    {
+	      to_next[0] = from[pi - 5];
+	      to_next++;
+	      n_left_to_next--;
+	      next0 = last_stage (vm, node, from[pi - 5]);
+	      vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					       to_next, n_left_to_next,
+					       from[pi - 5], next0);
+	      n_left_from--;
+	      if ((int) n_left_to_next < 0 && n_left_from > 0)
+		vlib_get_next_frame (vm, node, next_index, to_next,
+				     n_left_to_next);
+	    }
+	}
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      from += pi_limit;
     }
   return frame->n_vectors;
 }
 #endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/replication.c b/vnet/vnet/replication.c
index fc2cbd1..571be7d 100644
--- a/vnet/vnet/replication.c
+++ b/vnet/vnet/replication.c
@@ -27,57 +27,62 @@
 
 replication_context_t *
 replication_prep (vlib_main_t * vm,
-                  vlib_buffer_t * b0,
-                  u32 recycle_node_index,
-                  u32 l2_packet)
+		  vlib_buffer_t * b0, u32 recycle_node_index, u32 l2_packet)
 {
-  replication_main_t * rm = &replication_main;
-  replication_context_t * ctx;
+  replication_main_t *rm = &replication_main;
+  replication_context_t *ctx;
   uword cpu_number = vm->cpu_index;
-  ip4_header_t * ip;
+  ip4_header_t *ip;
   u32 ctx_id;
 
-  // Allocate a context, reserve context 0
-  if (PREDICT_FALSE(rm->contexts[cpu_number] == 0))
+  /* Allocate a context, reserve context 0 */
+  if (PREDICT_FALSE (rm->contexts[cpu_number] == 0))
     pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
-      
+
   pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
   ctx_id = ctx - rm->contexts[cpu_number];
 
-  // Save state from vlib buffer
+  /* Save state from vlib buffer */
   ctx->saved_free_list_index = b0->free_list_index;
   ctx->current_data = b0->current_data;
 
-  // Set up vlib buffer hooks
+  /* Set up vlib buffer hooks */
   b0->recycle_count = ctx_id;
   b0->free_list_index = rm->recycle_list_index;
   b0->flags |= VLIB_BUFFER_RECYCLE;
 
-  // Save feature state
+  /* Save feature state */
   ctx->recycle_node_index = recycle_node_index;
 
-  // Save vnet state
-  clib_memcpy (ctx->vnet_buffer, vnet_buffer(b0), sizeof(vnet_buffer_opaque_t));
+  /* Save vnet state */
+  clib_memcpy (ctx->vnet_buffer, vnet_buffer (b0),
+	       sizeof (vnet_buffer_opaque_t));
 
-  // Save packet contents
+  /* Save packet contents */
   ctx->l2_packet = l2_packet;
-  ip = (ip4_header_t *)vlib_buffer_get_current (b0);
-  if (l2_packet) {
-    // Save ethernet header
-    ctx->l2_header[0] = ((u64 *)ip)[0];
-    ctx->l2_header[1] = ((u64 *)ip)[1];
-    ctx->l2_header[2] = ((u64 *)ip)[2];
-    // set ip to the true ip header
-    ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
-  }
+  ip = (ip4_header_t *) vlib_buffer_get_current (b0);
+  if (l2_packet)
+    {
+      /* Save ethernet header */
+      ctx->l2_header[0] = ((u64 *) ip)[0];
+      ctx->l2_header[1] = ((u64 *) ip)[1];
+      ctx->l2_header[2] = ((u64 *) ip)[2];
+      /* set ip to the true ip header */
+      ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len);
+    }
 
-  // Copy L3 fields. 
-  // We need to save TOS for ip4 and ip6 packets. Fortunately the TOS field is 
-  // in the first two bytes of both the ip4 and ip6 headers.
-  ctx->ip_tos = *((u16 *)(ip));
+  /*
+   * Copy L3 fields.
+   * We need to save TOS for ip4 and ip6 packets.
+   * Fortunately the TOS field is
+   * in the first two bytes of both the ip4 and ip6 headers.
+   */
+  ctx->ip_tos = *((u16 *) (ip));
 
-  // Save the ip4 checksum as well. We just blindly save the corresponding two
-  // bytes even for ip6 packets. 
+  /*
+   * Save the ip4 checksum as well. We just blindly save the corresponding two
+   * bytes even for ip6 packets.
+   */
   ctx->ip4_checksum = ip->checksum;
 
   return ctx;
@@ -85,48 +90,51 @@
 
 
 replication_context_t *
-replication_recycle (vlib_main_t * vm,
-                     vlib_buffer_t * b0,
-                     u32 is_last)
+replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last)
 {
-  replication_main_t * rm = &replication_main;
-  replication_context_t * ctx;
+  replication_main_t *rm = &replication_main;
+  replication_context_t *ctx;
   uword cpu_number = vm->cpu_index;
-  ip4_header_t * ip;
+  ip4_header_t *ip;
 
-  // Get access to the replication context
+  /* Get access to the replication context */
   ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
 
-  // Restore vnet buffer state
-  clib_memcpy (vnet_buffer(b0), ctx->vnet_buffer, sizeof(vnet_buffer_opaque_t));
+  /* Restore vnet buffer state */
+  clib_memcpy (vnet_buffer (b0), ctx->vnet_buffer,
+	       sizeof (vnet_buffer_opaque_t));
 
-  // Restore the packet start (current_data) and length
-  vlib_buffer_advance(b0, ctx->current_data - b0->current_data);
+  /* Restore the packet start (current_data) and length */
+  vlib_buffer_advance (b0, ctx->current_data - b0->current_data);
 
-  // Restore packet contents
-  ip = (ip4_header_t *)vlib_buffer_get_current (b0);
-  if (ctx->l2_packet) {
-    // Restore ethernet header
-    ((u64 *)ip)[0] = ctx->l2_header[0];
-    ((u64 *)ip)[1] = ctx->l2_header[1];
-    ((u64 *)ip)[2] = ctx->l2_header[2];
-    // set ip to the true ip header
-    ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
-  }
+  /* Restore packet contents */
+  ip = (ip4_header_t *) vlib_buffer_get_current (b0);
+  if (ctx->l2_packet)
+    {
+      /* Restore ethernet header */
+      ((u64 *) ip)[0] = ctx->l2_header[0];
+      ((u64 *) ip)[1] = ctx->l2_header[1];
+      ((u64 *) ip)[2] = ctx->l2_header[2];
+      /* set ip to the true ip header */
+      ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len);
+    }
 
   // Restore L3 fields
-  *((u16 *)(ip)) = ctx->ip_tos;
+  *((u16 *) (ip)) = ctx->ip_tos;
   ip->checksum = ctx->ip4_checksum;
 
-  if (is_last) {
-    // This is the last replication in the list. 
-    // Restore original buffer free functionality.
-    b0->free_list_index = ctx->saved_free_list_index;
-    b0->flags &= ~VLIB_BUFFER_RECYCLE;
+  if (is_last)
+    {
+      /*
+       * This is the last replication in the list.
+       * Restore original buffer free functionality.
+       */
+      b0->free_list_index = ctx->saved_free_list_index;
+      b0->flags &= ~VLIB_BUFFER_RECYCLE;
 
-    // Free context back to its pool
-    pool_put (rm->contexts[cpu_number], ctx);
-  }
+      /* Free context back to its pool */
+      pool_put (rm->contexts[cpu_number], ctx);
+    }
 
   return ctx;
 }
@@ -137,133 +145,143 @@
  * fish pkts back from the recycle queue/freelist
  * un-flatten the context chains
  */
-static void replication_recycle_callback (vlib_main_t *vm, 
-                                          vlib_buffer_free_list_t * fl)
+static void
+replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl)
 {
-  vlib_frame_t * f = 0;
+  vlib_frame_t *f = 0;
   u32 n_left_from;
   u32 n_left_to_next = 0;
   u32 n_this_frame = 0;
-  u32 * from;
-  u32 * to_next = 0;
+  u32 *from;
+  u32 *to_next = 0;
   u32 bi0, pi0;
   vlib_buffer_t *b0;
   int i;
-  replication_main_t * rm = &replication_main;
-  replication_context_t * ctx;
-  u32 feature_node_index = 0; 
+  replication_main_t *rm = &replication_main;
+  replication_context_t *ctx;
+  u32 feature_node_index = 0;
   uword cpu_number = vm->cpu_index;
 
-  // All buffers in the list are destined to the same recycle node.
-  // Pull the recycle node index from the first buffer. 
-  // Note: this could be sped up if the node index were stuffed into
-  // the freelist itself.
-  if (vec_len (fl->aligned_buffers) > 0) {
-    bi0 = fl->aligned_buffers[0];
-    b0 = vlib_get_buffer (vm, bi0);
-    ctx = pool_elt_at_index (rm->contexts[cpu_number],
-                             b0->recycle_count);
-    feature_node_index = ctx->recycle_node_index;
-  } else if (vec_len (fl->unaligned_buffers) > 0) {
-    bi0 = fl->unaligned_buffers[0];
-    b0 = vlib_get_buffer (vm, bi0);
-    ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
-    feature_node_index = ctx->recycle_node_index;
-  }
+  /*
+   * All buffers in the list are destined to the same recycle node.
+   * Pull the recycle node index from the first buffer.
+   * Note: this could be sped up if the node index were stuffed into
+   * the freelist itself.
+   */
+  if (vec_len (fl->aligned_buffers) > 0)
+    {
+      bi0 = fl->aligned_buffers[0];
+      b0 = vlib_get_buffer (vm, bi0);
+      ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
+      feature_node_index = ctx->recycle_node_index;
+    }
+  else if (vec_len (fl->unaligned_buffers) > 0)
+    {
+      bi0 = fl->unaligned_buffers[0];
+      b0 = vlib_get_buffer (vm, bi0);
+      ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
+      feature_node_index = ctx->recycle_node_index;
+    }
 
   /* aligned, unaligned buffers */
-  for (i = 0; i < 2; i++) 
+  for (i = 0; i < 2; i++)
     {
       if (i == 0)
-        {
-          from = fl->aligned_buffers;
-          n_left_from = vec_len (from);
-        }
+	{
+	  from = fl->aligned_buffers;
+	  n_left_from = vec_len (from);
+	}
       else
-        {
-          from = fl->unaligned_buffers;
-          n_left_from = vec_len (from);
-        }
-    
+	{
+	  from = fl->unaligned_buffers;
+	  n_left_from = vec_len (from);
+	}
+
       while (n_left_from > 0)
-        {
-          if (PREDICT_FALSE(n_left_to_next == 0)) 
-            {
-              if (f)
-                {
-                  f->n_vectors = n_this_frame;
-                  vlib_put_frame_to_node (vm, feature_node_index, f);
-                }
-              
-              f = vlib_get_frame_to_node (vm, feature_node_index);
-              to_next = vlib_frame_vector_args (f);
-              n_left_to_next = VLIB_FRAME_SIZE;
-              n_this_frame = 0;
-            }
-          
-          bi0 = from[0];
-          if (PREDICT_TRUE(n_left_from > 1))
-            {
-              pi0 = from[1];
-              vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
-            }
+	{
+	  if (PREDICT_FALSE (n_left_to_next == 0))
+	    {
+	      if (f)
+		{
+		  f->n_vectors = n_this_frame;
+		  vlib_put_frame_to_node (vm, feature_node_index, f);
+		}
+
+	      f = vlib_get_frame_to_node (vm, feature_node_index);
+	      to_next = vlib_frame_vector_args (f);
+	      n_left_to_next = VLIB_FRAME_SIZE;
+	      n_this_frame = 0;
+	    }
+
+	  bi0 = from[0];
+	  if (PREDICT_TRUE (n_left_from > 1))
+	    {
+	      pi0 = from[1];
+	      vlib_prefetch_buffer_with_index (vm, pi0, LOAD);
+	    }
 
 	  b0 = vlib_get_buffer (vm, bi0);
 
-          // Mark that this buffer was just recycled
-          b0->flags |= VLIB_BUFFER_IS_RECYCLED;
+	  /* Mark that this buffer was just recycled */
+	  b0->flags |= VLIB_BUFFER_IS_RECYCLED;
 
-          // If buffer is traced, mark frame as traced
-          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
-              f->flags |= VLIB_FRAME_TRACE;
+	  /* If buffer is traced, mark frame as traced */
+	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+	    f->flags |= VLIB_FRAME_TRACE;
 
-          to_next[0] = bi0;
+	  to_next[0] = bi0;
 
-          from++;
-          to_next++;
-          n_this_frame++;
-          n_left_to_next--;
-          n_left_from--;
-        }
+	  from++;
+	  to_next++;
+	  n_this_frame++;
+	  n_left_to_next--;
+	  n_left_from--;
+	}
     }
-  
+
   vec_reset_length (fl->aligned_buffers);
   vec_reset_length (fl->unaligned_buffers);
 
   if (f)
     {
-      ASSERT(n_this_frame);
+      ASSERT (n_this_frame);
       f->n_vectors = n_this_frame;
       vlib_put_frame_to_node (vm, feature_node_index, f);
     }
 }
 
-
-
-clib_error_t *replication_init (vlib_main_t *vm)
+clib_error_t *
+replication_init (vlib_main_t * vm)
 {
-  replication_main_t * rm = &replication_main;
-  vlib_buffer_main_t * bm = vm->buffer_main;
-  vlib_buffer_free_list_t * fl;
-  __attribute__((unused)) replication_context_t * ctx;
-  vlib_thread_main_t * tm = vlib_get_thread_main();
-    
-  rm->vlib_main = vm;
-  rm->vnet_main = vnet_get_main();
-  rm->recycle_list_index = 
-    vlib_buffer_create_free_list (vm, 1024 /* fictional */, 
-                                  "replication-recycle");
+  replication_main_t *rm = &replication_main;
+  vlib_buffer_main_t *bm = vm->buffer_main;
+  vlib_buffer_free_list_t *fl;
+  __attribute__ ((unused)) replication_context_t *ctx;
+  vlib_thread_main_t *tm = vlib_get_thread_main ();
 
-  fl = pool_elt_at_index (bm->buffer_free_list_pool, 
-                          rm->recycle_list_index);
+  rm->vlib_main = vm;
+  rm->vnet_main = vnet_get_main ();
+  rm->recycle_list_index =
+    vlib_buffer_create_free_list (vm, 1024 /* fictional */ ,
+				  "replication-recycle");
+
+  fl = pool_elt_at_index (bm->buffer_free_list_pool, rm->recycle_list_index);
 
   fl->buffers_added_to_freelist_function = replication_recycle_callback;
 
-  // Verify the replication context is the expected size
-  ASSERT(sizeof(replication_context_t) == 128); // 2 cache lines
+  /* Verify the replication context is the expected size */
+  ASSERT (sizeof (replication_context_t) == 128);	/* 2 cache lines */
 
   vec_validate (rm->contexts, tm->n_vlib_mains - 1);
   return 0;
 }
 
 VLIB_INIT_FUNCTION (replication_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/replication.h b/vnet/vnet/replication.h
index b16d5dc..5dc554c 100644
--- a/vnet/vnet/replication.h
+++ b/vnet/vnet/replication.h
@@ -24,42 +24,46 @@
 #include <vnet/replication.h>
 
 
-typedef struct {
+typedef struct
+{
+  /* The entire vnet buffer header restored for each replica */
+  u8 vnet_buffer[32];		/* 16B aligned to allow vector unit copy */
+  u8 reserved[32];		/* space for future expansion of vnet buffer header */
 
-  // The entire vnet buffer header restored for each replica
-  u8   vnet_buffer[32];      // 16B aligned to allow vector unit copy
-  u8   reserved[32];         // space for future expansion of vnet buffer header
+  /* feature state used during this replication */
+  u64 feature_replicas;		/* feature's id for its set of replicas */
+  u32 feature_counter;		/* feature's current index into set of replicas */
+  u32 recycle_node_index;	/* feature's recycle node index */
 
-  // feature state used during this replication
-  u64  feature_replicas;     // feature's id for its set of replicas
-  u32  feature_counter;      // feature's current index into set of replicas
-  u32  recycle_node_index;   // feature's recycle node index
+  /*
+   * data saved from the start of replication and restored
+   * at the end of replication
+   */
+  u32 saved_free_list_index;	/* from vlib buffer */
 
-  // data saved from the start of replication and restored at the end of replication
-  u32  saved_free_list_index; // from vlib buffer
+  /* data saved from the original packet and restored for each replica */
+  u64 l2_header[3];		/*  24B (must be at least 22B for l2 packets) */
+  u16 ip_tos;			/* v4 and v6 */
+  u16 ip4_checksum;		/* needed for v4 only */
 
-  // data saved from the original packet and restored for each replica
-  u64  l2_header[3];         // 24B (must be at least 22B for l2 packets)
-  u16  ip_tos;               // v4 and v6
-  u16  ip4_checksum;         // needed for v4 only
+  /* data saved from the vlib buffer header and restored for each replica */
+  i16 current_data;		/* offset of first byte of packet in packet data */
+  u8 pad[6];			/* to 64B */
+  u8 l2_packet;			/* flag for l2 vs l3 packet data */
 
-  // data saved from the vlib buffer header and restored for each replica
-  i16  current_data;         // offset of first byte of packet in packet data
-  u8   pad[6];               // to 64B
-  u8   l2_packet;            // flag for l2 vs l3 packet data
-
-} replication_context_t;    // 128B
+} replication_context_t;	/* 128B */
 
 
-typedef struct {
+typedef struct
+{
 
   u32 recycle_list_index;
 
-  // per-thread pools of replication contexts
-  replication_context_t ** contexts;
+  /* per-thread pools of replication contexts */
+  replication_context_t **contexts;
 
-  vlib_main_t * vlib_main;
-  vnet_main_t * vnet_main;
+  vlib_main_t *vlib_main;
+  vnet_main_t *vnet_main;
 
 } replication_main_t;
 
@@ -67,56 +71,66 @@
 extern replication_main_t replication_main;
 
 
-// Return 1 if this buffer just came from the replication recycle handler.
+/* Return 1 if this buffer just came from the replication recycle handler. */
 always_inline u32
 replication_is_recycled (vlib_buffer_t * b0)
 {
   return b0->flags & VLIB_BUFFER_IS_RECYCLED;
 }
 
-// Clear the recycle flag. If buffer came from the replication recycle
-// handler, this flag must be cleared before the packet is transmitted again.
+/*
+ * Clear the recycle flag. If buffer came from the replication recycle
+ * handler, this flag must be cleared before the packet is transmitted again.
+ */
 always_inline void
 replication_clear_recycled (vlib_buffer_t * b0)
 {
   b0->flags &= ~VLIB_BUFFER_IS_RECYCLED;
 }
 
-// Return the active replication context if this buffer has 
-// been recycled, otherwise return 0. (Note that this essentially
-// restricts access to the replication context to the replication
-// feature's prep and recycle nodes.)
+/*
+ * Return the active replication context if this buffer has
+ * been recycled, otherwise return 0. (Note that this essentially
+ * restricts access to the replication context to the replication
+ * feature's prep and recycle nodes.)
+ */
 always_inline replication_context_t *
 replication_get_ctx (vlib_buffer_t * b0)
 {
-  replication_main_t * rm = &replication_main;
+  replication_main_t *rm = &replication_main;
 
-  return replication_is_recycled (b0) ? 
-   pool_elt_at_index (rm->contexts[os_get_cpu_number()], b0->recycle_count) :
-   0;
+  return replication_is_recycled (b0) ?
+    pool_elt_at_index (rm->contexts[os_get_cpu_number ()],
+		       b0->recycle_count) : 0;
 }
 
-// Prefetch the replication context for this buffer, if it exists
+/* Prefetch the replication context for this buffer, if it exists */
 always_inline void
 replication_prefetch_ctx (vlib_buffer_t * b0)
 {
   replication_context_t *ctx = replication_get_ctx (b0);
 
-  if (ctx) {
-    CLIB_PREFETCH (ctx, (2*CLIB_CACHE_LINE_BYTES), STORE);
-  }
+  if (ctx)
+    {
+      CLIB_PREFETCH (ctx, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+    }
 }
 
-replication_context_t *
-replication_prep (vlib_main_t * vm,
-                  vlib_buffer_t * b0,
-                  u32 recycle_node_index,
-                  u32 l2_packet);
+replication_context_t *replication_prep (vlib_main_t * vm,
+					 vlib_buffer_t * b0,
+					 u32 recycle_node_index,
+					 u32 l2_packet);
 
-replication_context_t *
-replication_recycle (vlib_main_t * vm,
-                     vlib_buffer_t * b0,
-                     u32 is_last);
+replication_context_t *replication_recycle (vlib_main_t * vm,
+					    vlib_buffer_t * b0, u32 is_last);
 
 
 #endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/rewrite.c b/vnet/vnet/rewrite.c
index 3435b0f..0dcec40 100644
--- a/vnet/vnet/rewrite.c
+++ b/vnet/vnet/rewrite.c
@@ -40,18 +40,19 @@
 #include <vnet/vnet.h>
 #include <vnet/ip/lookup.h>
 
-void vnet_rewrite_copy_slow_path (vnet_rewrite_data_t * p0,
-				  vnet_rewrite_data_t * rw0,
-				  word n_left,
-				  uword most_likely_size)
+void
+vnet_rewrite_copy_slow_path (vnet_rewrite_data_t * p0,
+			     vnet_rewrite_data_t * rw0,
+			     word n_left, uword most_likely_size)
 {
-  uword n_done = round_pow2 (most_likely_size, sizeof (rw0[0])) / sizeof (rw0[0]);
+  uword n_done =
+    round_pow2 (most_likely_size, sizeof (rw0[0])) / sizeof (rw0[0]);
 
   p0 -= n_done;
   rw0 -= n_done;
 
   /* As we enter the cleanup loop, p0 and rw0 point to the last chunk written
-     by the fast path. Hence, the constant 1, which the 
+     by the fast path. Hence, the constant 1, which the
      vnet_rewrite_copy_one macro renders as p0[-1] = rw0[-1]. */
 
   while (n_left > 0)
@@ -63,13 +64,14 @@
     }
 }
 
-u8 * format_vnet_rewrite (u8 * s, va_list * args)
+u8 *
+format_vnet_rewrite (u8 * s, va_list * args)
 {
-  vlib_main_t * vm = va_arg (*args, vlib_main_t *);
-  vnet_rewrite_header_t * rw = va_arg (*args, vnet_rewrite_header_t *);
+  vlib_main_t *vm = va_arg (*args, vlib_main_t *);
+  vnet_rewrite_header_t *rw = va_arg (*args, vnet_rewrite_header_t *);
   u32 max_data_bytes = va_arg (*args, u32);
-  vnet_main_t * vnm = vnet_get_main();
-  vlib_node_t * next;
+  vnet_main_t *vnm = vnet_get_main ();
+  vlib_node_t *next;
   uword indent;
 
   next = vlib_get_next_node (vm, rw->node_index, rw->next_index);
@@ -78,7 +80,7 @@
 
   if (rw->sw_if_index != ~0)
     {
-      vnet_sw_interface_t * si;
+      vnet_sw_interface_t *si;
       si = vnet_get_sw_interface (vnm, rw->sw_if_index);
       s = format (s, "%U", format_vnet_sw_interface_name, vnm, si);
     }
@@ -90,19 +92,19 @@
     s = format (s, "\n%U%U",
 		format_white_space, indent,
 		next->format_buffer ? next->format_buffer : format_hex_bytes,
-		rw->data + max_data_bytes - rw->data_bytes,
-		rw->data_bytes);
+		rw->data + max_data_bytes - rw->data_bytes, rw->data_bytes);
 
   return s;
 }
 
-u8 * format_vnet_rewrite_header (u8 * s, va_list * args)
+u8 *
+format_vnet_rewrite_header (u8 * s, va_list * args)
 {
-  vlib_main_t * vm = va_arg (*args, vlib_main_t *);
-  vnet_rewrite_header_t * rw = va_arg (*args, vnet_rewrite_header_t *);
-  u8 * packet_data = va_arg (*args, u8 *);
+  vlib_main_t *vm = va_arg (*args, vlib_main_t *);
+  vnet_rewrite_header_t *rw = va_arg (*args, vnet_rewrite_header_t *);
+  u8 *packet_data = va_arg (*args, u8 *);
   u32 packet_data_bytes = va_arg (*args, u32);
-  vlib_node_t * next;
+  vlib_node_t *next;
 
   next = vlib_get_next_node (vm, rw->node_index, rw->next_index);
 
@@ -114,15 +116,16 @@
   return s;
 }
 
-uword unformat_vnet_rewrite (unformat_input_t * input, va_list * args)
+uword
+unformat_vnet_rewrite (unformat_input_t * input, va_list * args)
 {
-  vlib_main_t * vm = va_arg (*args, vlib_main_t *);
-  vnet_rewrite_header_t * rw = va_arg (*args, vnet_rewrite_header_t *);
+  vlib_main_t *vm = va_arg (*args, vlib_main_t *);
+  vnet_rewrite_header_t *rw = va_arg (*args, vnet_rewrite_header_t *);
   u32 max_data_bytes = va_arg (*args, u32);
-  vnet_main_t * vnm = vnet_get_main();
-  vlib_node_t * next;
+  vnet_main_t *vnm = vnet_get_main ();
+  vlib_node_t *next;
   u32 next_index, sw_if_index, max_packet_bytes, error;
-  u8 * rw_data;
+  u8 *rw_data;
 
   rw_data = 0;
   sw_if_index = ~0;
@@ -130,10 +133,9 @@
   error = 1;
 
   /* Parse sw interface. */
-  if (unformat (input, "%U",
-		unformat_vnet_sw_interface, vnm, &sw_if_index))
+  if (unformat (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index))
     {
-      vnet_hw_interface_t * hi;
+      vnet_hw_interface_t *hi;
 
       hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
 
@@ -141,8 +143,7 @@
       max_packet_bytes = hi->max_l3_packet_bytes[VLIB_RX];
     }
 
-  else if (unformat (input, "%U",
-		     unformat_vlib_node, vm, &next_index))
+  else if (unformat (input, "%U", unformat_vlib_node, vm, &next_index))
     ;
 
   else
@@ -157,7 +158,7 @@
   else if (unformat_user (input, unformat_hex_string, &rw_data)
 	   || unformat (input, "0x%U", unformat_hex_string, &rw_data))
     ;
-      
+
   else
     goto done;
 
@@ -177,29 +178,33 @@
   rw->sw_if_index = sw_if_index;
   rw->max_l3_packet_bytes = max_packet_bytes;
   rw->next_index = vlib_node_add_next (vm, rw->node_index, next_index);
-  vnet_rewrite_set_data_internal (rw, max_data_bytes, rw_data, vec_len (rw_data));
+  vnet_rewrite_set_data_internal (rw, max_data_bytes, rw_data,
+				  vec_len (rw_data));
 
- done:
+done:
   vec_free (rw_data);
   return error == 0;
 }
 
-void vnet_rewrite_for_sw_interface (vnet_main_t * vnm,
-				    vnet_l3_packet_type_t packet_type,
-				    u32 sw_if_index,
-				    u32 node_index,
-				    void * dst_address,
-				    vnet_rewrite_header_t * rw,
-				    u32 max_rewrite_bytes)
+void
+vnet_rewrite_for_sw_interface (vnet_main_t * vnm,
+			       vnet_l3_packet_type_t packet_type,
+			       u32 sw_if_index,
+			       u32 node_index,
+			       void *dst_address,
+			       vnet_rewrite_header_t * rw,
+			       u32 max_rewrite_bytes)
 {
-  vnet_hw_interface_t * hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
-  vnet_hw_interface_class_t * hc = vnet_get_hw_interface_class (vnm, hw->hw_class_index);
-  static u8 * rw_tmp = 0;
+  vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+  vnet_hw_interface_class_t *hc =
+    vnet_get_hw_interface_class (vnm, hw->hw_class_index);
+  static u8 *rw_tmp = 0;
   uword n_rw_tmp;
 
   rw->sw_if_index = sw_if_index;
   rw->node_index = node_index;
-  rw->next_index = vlib_node_add_next (vnm->vlib_main, node_index, hw->output_node_index);
+  rw->next_index =
+    vlib_node_add_next (vnm->vlib_main, node_index, hw->output_node_index);
   rw->max_l3_packet_bytes = hw->max_l3_packet_bytes[VLIB_TX];
 
   ASSERT (max_rewrite_bytes > 0);
@@ -207,56 +212,62 @@
   vec_validate (rw_tmp, max_rewrite_bytes - 1);
 
   ASSERT (hc->set_rewrite);
-  n_rw_tmp = hc->set_rewrite (vnm, sw_if_index, packet_type, dst_address, rw_tmp, max_rewrite_bytes);
+  n_rw_tmp =
+    hc->set_rewrite (vnm, sw_if_index, packet_type, dst_address, rw_tmp,
+		     max_rewrite_bytes);
 
   ASSERT (n_rw_tmp < max_rewrite_bytes);
   vnet_rewrite_set_data_internal (rw, max_rewrite_bytes, rw_tmp, n_rw_tmp);
 }
 
-void vnet_rewrite_for_tunnel (vnet_main_t * vnm,
-                              u32 tx_sw_if_index,
-                              u32 rewrite_node_index,
-                              u32 post_rewrite_node_index,
-                              vnet_rewrite_header_t * rw,
-                              u8 *rewrite_data,
-                              u32 rewrite_length)
+void
+vnet_rewrite_for_tunnel (vnet_main_t * vnm,
+			 u32 tx_sw_if_index,
+			 u32 rewrite_node_index,
+			 u32 post_rewrite_node_index,
+			 vnet_rewrite_header_t * rw,
+			 u8 * rewrite_data, u32 rewrite_length)
 {
-  ip_adjacency_t * adj = 0;
-  /* 
+  ip_adjacency_t *adj = 0;
+  /*
    * Installed into vnet_buffer(b)->sw_if_index[VLIB_TX] e.g.
-   * by ip4_rewrite_inline. If the post-rewrite node injects into 
-   * ipX-forward, this will be interpreted as a FIB number. 
+   * by ip4_rewrite_inline. If the post-rewrite node injects into
+   * ipX-forward, this will be interpreted as a FIB number.
    */
-  rw->sw_if_index = tx_sw_if_index; 
+  rw->sw_if_index = tx_sw_if_index;
   rw->node_index = rewrite_node_index;
-  rw->next_index = vlib_node_add_next (vnm->vlib_main, rewrite_node_index, 
-                                       post_rewrite_node_index);
-  rw->max_l3_packet_bytes = (u16) ~0; /* we can't know at this point */
+  rw->next_index = vlib_node_add_next (vnm->vlib_main, rewrite_node_index,
+				       post_rewrite_node_index);
+  rw->max_l3_packet_bytes = (u16) ~ 0;	/* we can't know at this point */
 
   ASSERT (rewrite_length < sizeof (adj->rewrite_data));
   /* Leave room for ethernet + VLAN tag */
-  vnet_rewrite_set_data_internal (rw, sizeof(adj->rewrite_data), 
-                                  rewrite_data, rewrite_length);
+  vnet_rewrite_set_data_internal (rw, sizeof (adj->rewrite_data),
+				  rewrite_data, rewrite_length);
 }
 
-void serialize_vnet_rewrite (serialize_main_t * m, va_list * va)
+void
+serialize_vnet_rewrite (serialize_main_t * m, va_list * va)
 {
-  vnet_rewrite_header_t * rw = va_arg (*va, vnet_rewrite_header_t *);
+  vnet_rewrite_header_t *rw = va_arg (*va, vnet_rewrite_header_t *);
   u32 max_data_bytes = va_arg (*va, u32);
-  u8 * p;
+  u8 *p;
 
   serialize_integer (m, rw->sw_if_index, sizeof (rw->sw_if_index));
   serialize_integer (m, rw->data_bytes, sizeof (rw->data_bytes));
-  serialize_integer (m, rw->max_l3_packet_bytes, sizeof (rw->max_l3_packet_bytes));
+  serialize_integer (m, rw->max_l3_packet_bytes,
+		     sizeof (rw->max_l3_packet_bytes));
   p = serialize_get (m, rw->data_bytes);
-  clib_memcpy (p, vnet_rewrite_get_data_internal (rw, max_data_bytes), rw->data_bytes);
+  clib_memcpy (p, vnet_rewrite_get_data_internal (rw, max_data_bytes),
+	       rw->data_bytes);
 }
 
-void unserialize_vnet_rewrite (serialize_main_t * m, va_list * va)
+void
+unserialize_vnet_rewrite (serialize_main_t * m, va_list * va)
 {
-  vnet_rewrite_header_t * rw = va_arg (*va, vnet_rewrite_header_t *);
+  vnet_rewrite_header_t *rw = va_arg (*va, vnet_rewrite_header_t *);
   u32 max_data_bytes = va_arg (*va, u32);
-  u8 * p;
+  u8 *p;
 
   /* It is up to user to fill these in. */
   rw->node_index = ~0;
@@ -264,7 +275,17 @@
 
   unserialize_integer (m, &rw->sw_if_index, sizeof (rw->sw_if_index));
   unserialize_integer (m, &rw->data_bytes, sizeof (rw->data_bytes));
-  unserialize_integer (m, &rw->max_l3_packet_bytes, sizeof (rw->max_l3_packet_bytes));
+  unserialize_integer (m, &rw->max_l3_packet_bytes,
+		       sizeof (rw->max_l3_packet_bytes));
   p = unserialize_get (m, rw->data_bytes);
-  clib_memcpy (vnet_rewrite_get_data_internal (rw, max_data_bytes), p, rw->data_bytes);
+  clib_memcpy (vnet_rewrite_get_data_internal (rw, max_data_bytes), p,
+	       rw->data_bytes);
 }
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/rewrite.h b/vnet/vnet/rewrite.h
index 7ae33ba..fb800da 100644
--- a/vnet/vnet/rewrite.h
+++ b/vnet/vnet/rewrite.h
@@ -46,6 +46,7 @@
 /* Consider using vector types for speed? */
 typedef uword vnet_rewrite_data_t;
 
+/* *INDENT-OFF* */
 typedef CLIB_PACKED (struct {
   /* Interface to mark re-written packets with. */
   u32 sw_if_index;
@@ -66,13 +67,14 @@
   /* Rewrite string starting at end and going backwards. */
   u8 data[0];
 }) vnet_rewrite_header_t;
+/* *INDENT-ON* */
 
 /*
   Helper macro for declaring rewrite string w/ given max-size.
 
   Typical usage:
     typedef struct {
-      // User data.
+      //
       int a, b;
 
       // Total adjacency is 64 bytes.
@@ -88,11 +90,9 @@
 
 always_inline void
 vnet_rewrite_set_data_internal (vnet_rewrite_header_t * rw,
-				int max_size,
-				void * data,
-				int data_bytes)
+				int max_size, void *data, int data_bytes)
 {
-  /* Sanity check values carefully for this memset operation*/
+  /* Sanity check values carefully for this memset operation */
   ASSERT ((max_size > 0) && (max_size < VLIB_BUFFER_PRE_DATA_SIZE));
   ASSERT ((data_bytes >= 0) && (data_bytes < max_size));
 
@@ -118,30 +118,30 @@
   vnet_rewrite_get_data_internal (&((rw).rewrite_header), sizeof ((rw).rewrite_data))
 
 always_inline void
-vnet_rewrite_copy_one (vnet_rewrite_data_t * p0, vnet_rewrite_data_t * rw0, int i)
+vnet_rewrite_copy_one (vnet_rewrite_data_t * p0, vnet_rewrite_data_t * rw0,
+		       int i)
 {
   p0[-i] = rw0[-i];
 }
 
 void vnet_rewrite_copy_slow_path (vnet_rewrite_data_t * p0,
 				  vnet_rewrite_data_t * rw0,
-				  word n_left,
-				  uword most_likely_size);
+				  word n_left, uword most_likely_size);
 
+/* *INDENT-OFF* */
 typedef CLIB_PACKED (struct {
   u64 a;
   u32 b;
   u16 c;
 }) eh_copy_t;
+/* *INDENT-ON* */
 
 always_inline void
 _vnet_rewrite_one_header (vnet_rewrite_header_t * h0,
-			  void * packet0,
-			  int max_size,
-			  int most_likely_size)
+			  void *packet0, int max_size, int most_likely_size)
 {
-  vnet_rewrite_data_t * p0 = packet0;
-  vnet_rewrite_data_t * rw0 = (vnet_rewrite_data_t *) (h0->data + max_size);
+  vnet_rewrite_data_t *p0 = packet0;
+  vnet_rewrite_data_t *rw0 = (vnet_rewrite_data_t *) (h0->data + max_size);
   word n_left0;
 
   /* 0xfefe => poisoned adjacency => crash */
@@ -149,13 +149,13 @@
 
   if (PREDICT_TRUE (h0->data_bytes == sizeof (eh_copy_t)))
     {
-      eh_copy_t * s, * d;
-      s = (eh_copy_t *)(h0->data + max_size - sizeof (eh_copy_t));
-      d = (eh_copy_t *)(((u8 *)packet0) - sizeof (eh_copy_t));
+      eh_copy_t *s, *d;
+      s = (eh_copy_t *) (h0->data + max_size - sizeof (eh_copy_t));
+      d = (eh_copy_t *) (((u8 *) packet0) - sizeof (eh_copy_t));
       clib_memcpy (d, s, sizeof (eh_copy_t));
       return;
     }
-      
+
 
 #define _(i)								\
   do {									\
@@ -163,16 +163,16 @@
       vnet_rewrite_copy_one (p0, rw0, (i));				\
   } while (0)
 
-  _ (4);
-  _ (3);
-  _ (2);
-  _ (1);
+  _(4);
+  _(3);
+  _(2);
+  _(1);
 
 #undef _
-    
+
   n_left0 = (int)
-      (((int) h0->data_bytes - most_likely_size) + (sizeof(rw0[0])-1))
-      / (int) sizeof (rw0[0]);
+    (((int) h0->data_bytes - most_likely_size) + (sizeof (rw0[0]) - 1))
+    / (int) sizeof (rw0[0]);
   if (PREDICT_FALSE (n_left0 > 0))
     vnet_rewrite_copy_slow_path (p0, rw0, n_left0, most_likely_size);
 }
@@ -180,15 +180,13 @@
 always_inline void
 _vnet_rewrite_two_headers (vnet_rewrite_header_t * h0,
 			   vnet_rewrite_header_t * h1,
-			   void * packet0,
-			   void * packet1,
-			   int max_size,
-			   int most_likely_size)
+			   void *packet0,
+			   void *packet1, int max_size, int most_likely_size)
 {
-  vnet_rewrite_data_t * p0 = packet0;
-  vnet_rewrite_data_t * p1 = packet1;
-  vnet_rewrite_data_t * rw0 = (vnet_rewrite_data_t *) (h0->data + max_size);
-  vnet_rewrite_data_t * rw1 = (vnet_rewrite_data_t *) (h1->data + max_size);
+  vnet_rewrite_data_t *p0 = packet0;
+  vnet_rewrite_data_t *p1 = packet1;
+  vnet_rewrite_data_t *rw0 = (vnet_rewrite_data_t *) (h0->data + max_size);
+  vnet_rewrite_data_t *rw1 = (vnet_rewrite_data_t *) (h1->data + max_size);
   word n_left0, n_left1;
   int slow_path;
 
@@ -202,12 +200,12 @@
 
   if (PREDICT_TRUE (slow_path == 0))
     {
-      eh_copy_t * s0, * d0, * s1, * d1;
-      s0 = (eh_copy_t *)(h0->data + max_size - sizeof (eh_copy_t));
-      d0 = (eh_copy_t *)(((u8 *)packet0) - sizeof (eh_copy_t));
+      eh_copy_t *s0, *d0, *s1, *d1;
+      s0 = (eh_copy_t *) (h0->data + max_size - sizeof (eh_copy_t));
+      d0 = (eh_copy_t *) (((u8 *) packet0) - sizeof (eh_copy_t));
       clib_memcpy (d0, s0, sizeof (eh_copy_t));
-      s1 = (eh_copy_t *)(h1->data + max_size - sizeof (eh_copy_t));
-      d1 = (eh_copy_t *)(((u8 *)packet1) - sizeof (eh_copy_t));
+      s1 = (eh_copy_t *) (h1->data + max_size - sizeof (eh_copy_t));
+      d1 = (eh_copy_t *) (((u8 *) packet1) - sizeof (eh_copy_t));
       clib_memcpy (d1, s1, sizeof (eh_copy_t));
       return;
     }
@@ -221,19 +219,19 @@
       }									\
   } while (0)
 
-  _ (4);
-  _ (3);
-  _ (2);
-  _ (1);
+  _(4);
+  _(3);
+  _(2);
+  _(1);
 
 #undef _
-    
+
   n_left0 = (int)
-      (((int) h0->data_bytes - most_likely_size) + (sizeof(rw0[0])-1))
-      / (int) sizeof (rw0[0]);
+    (((int) h0->data_bytes - most_likely_size) + (sizeof (rw0[0]) - 1))
+    / (int) sizeof (rw0[0]);
   n_left1 = (int)
-      (((int) h1->data_bytes - most_likely_size) + (sizeof(rw1[0])-1))
-      / (int) sizeof (rw1[0]);
+    (((int) h1->data_bytes - most_likely_size) + (sizeof (rw1[0]) - 1))
+    / (int) sizeof (rw1[0]);
 
   if (PREDICT_FALSE (n_left0 > 0 || n_left1 > 0))
     {
@@ -254,21 +252,20 @@
 			     (most_likely_size))
 
 #define VNET_REWRITE_FOR_SW_INTERFACE_ADDRESS_BROADCAST ((void *) 0)
-void vnet_rewrite_for_sw_interface (struct vnet_main_t * vnm,
+void vnet_rewrite_for_sw_interface (struct vnet_main_t *vnm,
 				    vnet_l3_packet_type_t packet_type,
 				    u32 sw_if_index,
 				    u32 node_index,
-				    void * dst_address,
+				    void *dst_address,
 				    vnet_rewrite_header_t * rw,
 				    u32 max_rewrite_bytes);
 
-void vnet_rewrite_for_tunnel (struct vnet_main_t * vnm,
-                              u32 tx_sw_if_index,
-                              u32 rewrite_node_index,
-                              u32 post_rewrite_node_index,
-                              vnet_rewrite_header_t * rw,
-                              u8 *rewrite_data,
-                              u32 rewrite_length);
+void vnet_rewrite_for_tunnel (struct vnet_main_t *vnm,
+			      u32 tx_sw_if_index,
+			      u32 rewrite_node_index,
+			      u32 post_rewrite_node_index,
+			      vnet_rewrite_header_t * rw,
+			      u8 * rewrite_data, u32 rewrite_length);
 
 /* Parser for unformat header & rewrite string. */
 unformat_function_t unformat_vnet_rewrite;
@@ -279,3 +276,11 @@
 serialize_function_t serialize_vnet_rewrite, unserialize_vnet_rewrite;
 
 #endif /* included_vnet_rewrite_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/vnet.h b/vnet/vnet/vnet.h
index 9254da6..3bca6bf 100644
--- a/vnet/vnet/vnet.h
+++ b/vnet/vnet/vnet.h
@@ -42,7 +42,8 @@
 
 #include <vppinfra/types.h>
 
-typedef enum {
+typedef enum
+{
   VNET_UNICAST,
   VNET_MULTICAST,
   VNET_N_CAST,
@@ -55,27 +56,28 @@
 #include <vnet/rewrite.h>
 #include <vnet/api_errno.h>
 
-typedef struct vnet_main_t {
+typedef struct vnet_main_t
+{
   u32 local_interface_hw_if_index;
   u32 local_interface_sw_if_index;
 
   vnet_interface_main_t interface_main;
 
   /* set up by constructors */
-  vnet_device_class_t * device_class_registrations;
-  vnet_hw_interface_class_t * hw_interface_class_registrations;
-  _vnet_interface_function_list_elt_t * hw_interface_add_del_functions;
-  _vnet_interface_function_list_elt_t * hw_interface_link_up_down_functions;
-  _vnet_interface_function_list_elt_t * sw_interface_add_del_functions;
-  _vnet_interface_function_list_elt_t * sw_interface_admin_up_down_functions;
+  vnet_device_class_t *device_class_registrations;
+  vnet_hw_interface_class_t *hw_interface_class_registrations;
+  _vnet_interface_function_list_elt_t *hw_interface_add_del_functions;
+  _vnet_interface_function_list_elt_t *hw_interface_link_up_down_functions;
+  _vnet_interface_function_list_elt_t *sw_interface_add_del_functions;
+  _vnet_interface_function_list_elt_t *sw_interface_admin_up_down_functions;
 
-  /* 
+  /*
    * Last "api" error, preserved so we can issue reasonable diagnostics
    * at or near the top of the food chain
    */
   vnet_api_error_t api_errno;
 
-  vlib_main_t * vlib_main;
+  vlib_main_t *vlib_main;
 } vnet_main_t;
 
 vnet_main_t vnet_main;
@@ -85,3 +87,11 @@
 #include <vnet/global_funcs.h>
 
 #endif /* included_vnet_vnet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */