ip: refactor reassembly

this is a preparation step for introducing other reassembly types

Type: refactor

Change-Id: I197e299dbd729b00eead31667913b8ceff915d63
Signed-off-by: Klement Sekera <ksekera@cisco.com>
diff --git a/src/vnet/CMakeLists.txt b/src/vnet/CMakeLists.txt
index 1e51f3d..aaa16d8 100644
--- a/src/vnet/CMakeLists.txt
+++ b/src/vnet/CMakeLists.txt
@@ -431,7 +431,7 @@
   ip/ip4_pg.c
   ip/ip4_source_and_port_range_check.c
   ip/ip4_source_check.c
-  ip/ip4_reassembly.c
+  ip/reass/ip4_full_reass.c
   ip/ip6_format.c
   ip/ip6_forward.c
   ip/ip6_ll_table.c
@@ -441,7 +441,7 @@
   ip/ip6_input.c
   ip/ip6_neighbor.c
   ip/ip6_pg.c
-  ip/ip6_reassembly.c
+  ip/reass/ip6_full_reass.c
   ip/rd_cp.c
   ip/ip_neighbor.c
   ip/ip_api.c
@@ -461,9 +461,9 @@
 list(APPEND VNET_MULTIARCH_SOURCES
   ip/ip4_source_check.c
   ip/ip4_punt_drop.c
-  ip/ip4_reassembly.c
+  ip/reass/ip4_full_reass.c
   ip/ip6_hop_by_hop.c
-  ip/ip6_reassembly.c
+  ip/reass/ip6_full_reass.c
   ip/ip6_input.c
   ip/ip6_punt_drop.c
   ip/punt_node.c
diff --git a/src/vnet/ip/ip46_cli.c b/src/vnet/ip/ip46_cli.c
index e0e26a2..ee797ab 100644
--- a/src/vnet/ip/ip46_cli.c
+++ b/src/vnet/ip/ip46_cli.c
@@ -38,8 +38,8 @@
  */
 
 #include <vnet/ip/ip.h>
-#include <vnet/ip/ip4_reassembly.h>
-#include <vnet/ip/ip6_reassembly.h>
+#include <vnet/ip/reass/ip4_full_reass.h>
+#include <vnet/ip/reass/ip6_full_reass.h>
 
 /**
  * @file
@@ -269,26 +269,26 @@
     }
 
 
-  vnet_api_error_t rv4 = ip4_reass_enable_disable (sw_if_index, ip4_on);
-  vnet_api_error_t rv6 = ip6_reass_enable_disable (sw_if_index, ip6_on);
+  vnet_api_error_t rv4 = ip4_full_reass_enable_disable (sw_if_index, ip4_on);
+  vnet_api_error_t rv6 = ip6_full_reass_enable_disable (sw_if_index, ip6_on);
   if (rv4 && rv6)
     {
       return clib_error_return (0,
-				"`ip4_reass_enable_disable' API call failed, rv=%d:%U, "
-				"`ip6_reass_enable_disable' API call failed, rv=%d:%U",
+				"`ip4_full_reass_enable_disable' API call failed, rv=%d:%U, "
+				"`ip6_full_reass_enable_disable' API call failed, rv=%d:%U",
 				(int) rv4, format_vnet_api_errno, rv4,
 				(int) rv6, format_vnet_api_errno, rv6);
     }
   else if (rv4)
     {
       return clib_error_return (0,
-				"`ip4_reass_enable_disable' API call failed, rv=%d:%U",
+				"`ip4_full_reass_enable_disable' API call failed, rv=%d:%U",
 				(int) rv4, format_vnet_api_errno, rv4);
     }
   else if (rv6)
     {
       return clib_error_return (0,
-				"`ip6_reass_enable_disable' API call failed, rv=%d:%U",
+				"`ip6_full_reass_enable_disable' API call failed, rv=%d:%U",
 				(int) rv6, format_vnet_api_errno, rv6);
     }
   return NULL;
diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c
index 2250e03..d4717c6 100644
--- a/src/vnet/ip/ip4_forward.c
+++ b/src/vnet/ip/ip4_forward.c
@@ -1871,7 +1871,7 @@
     [IP_LOCAL_NEXT_PUNT] = "ip4-punt",
     [IP_LOCAL_NEXT_UDP_LOOKUP] = "ip4-udp-lookup",
     [IP_LOCAL_NEXT_ICMP] = "ip4-icmp-input",
-    [IP_LOCAL_NEXT_REASSEMBLY] = "ip4-reassembly",
+    [IP_LOCAL_NEXT_REASSEMBLY] = "ip4-full-reassembly",
   },
 };
 /* *INDENT-ON* */
diff --git a/src/vnet/ip/ip4_input.c b/src/vnet/ip/ip4_input.c
index 94c4aac..6093b13 100644
--- a/src/vnet/ip/ip4_input.c
+++ b/src/vnet/ip/ip4_input.c
@@ -398,7 +398,7 @@
     [IP4_INPUT_NEXT_LOOKUP] = "ip4-lookup",
     [IP4_INPUT_NEXT_LOOKUP_MULTICAST] = "ip4-mfib-forward-lookup",
     [IP4_INPUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
-    [IP4_INPUT_NEXT_REASSEMBLY] = "ip4-reassembly",
+    [IP4_INPUT_NEXT_REASSEMBLY] = "ip4-full-reassembly",
   },
 
   .format_buffer = format_ip4_header,
diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c
index 067db77..eb6c89b 100644
--- a/src/vnet/ip/ip6_forward.c
+++ b/src/vnet/ip/ip6_forward.c
@@ -1517,7 +1517,7 @@
     [IP_LOCAL_NEXT_PUNT] = "ip6-punt",
     [IP_LOCAL_NEXT_UDP_LOOKUP] = "ip6-udp-lookup",
     [IP_LOCAL_NEXT_ICMP] = "ip6-icmp-input",
-    [IP_LOCAL_NEXT_REASSEMBLY] = "ip6-reassembly",
+    [IP_LOCAL_NEXT_REASSEMBLY] = "ip6-full-reassembly",
   },
 };
 /* *INDENT-ON* */
diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c
index 7e87f4b..3e04b9f 100644
--- a/src/vnet/ip/ip_api.c
+++ b/src/vnet/ip/ip_api.c
@@ -43,8 +43,8 @@
 #include <vnet/fib/ip6_fib.h>
 #include <vnet/fib/fib_path_list.h>
 #include <vnet/ip/ip6_hop_by_hop.h>
-#include <vnet/ip/ip4_reassembly.h>
-#include <vnet/ip/ip6_reassembly.h>
+#include <vnet/ip/reass/ip4_full_reass.h>
+#include <vnet/ip/reass/ip6_full_reass.h>
 #include <vnet/ethernet/arp.h>
 #include <vnet/ip/ip_types_api.h>
 
@@ -2738,17 +2738,21 @@
   int rv = 0;
   if (mp->is_ip6)
     {
-      rv = ip6_reass_set (clib_net_to_host_u32 (mp->timeout_ms),
-			  clib_net_to_host_u32 (mp->max_reassemblies),
-			  clib_net_to_host_u32 (mp->max_reassembly_length),
-			  clib_net_to_host_u32 (mp->expire_walk_interval_ms));
+      rv = ip6_full_reass_set (clib_net_to_host_u32 (mp->timeout_ms),
+			       clib_net_to_host_u32 (mp->max_reassemblies),
+			       clib_net_to_host_u32
+			       (mp->max_reassembly_length),
+			       clib_net_to_host_u32
+			       (mp->expire_walk_interval_ms));
     }
   else
     {
-      rv = ip4_reass_set (clib_net_to_host_u32 (mp->timeout_ms),
-			  clib_net_to_host_u32 (mp->max_reassemblies),
-			  clib_net_to_host_u32 (mp->max_reassembly_length),
-			  clib_net_to_host_u32 (mp->expire_walk_interval_ms));
+      rv = ip4_full_reass_set (clib_net_to_host_u32 (mp->timeout_ms),
+			       clib_net_to_host_u32 (mp->max_reassemblies),
+			       clib_net_to_host_u32
+			       (mp->max_reassembly_length),
+			       clib_net_to_host_u32
+			       (mp->expire_walk_interval_ms));
     }
 
   REPLY_MACRO (VL_API_IP_REASSEMBLY_SET_REPLY);
@@ -2771,15 +2775,16 @@
   if (mp->is_ip6)
     {
       rmp->is_ip6 = 1;
-      ip6_reass_get (&rmp->timeout_ms, &rmp->max_reassemblies,
-		     &rmp->expire_walk_interval_ms);
+      ip6_full_reass_get (&rmp->timeout_ms, &rmp->max_reassemblies,
+			  &rmp->max_reassembly_length,
+			  &rmp->expire_walk_interval_ms);
     }
   else
     {
       rmp->is_ip6 = 0;
-      ip4_reass_get (&rmp->timeout_ms, &rmp->max_reassemblies,
-		     &rmp->max_reassembly_length,
-		     &rmp->expire_walk_interval_ms);
+      ip4_full_reass_get (&rmp->timeout_ms, &rmp->max_reassemblies,
+			  &rmp->max_reassembly_length,
+			  &rmp->expire_walk_interval_ms);
     }
   rmp->timeout_ms = clib_host_to_net_u32 (rmp->timeout_ms);
   rmp->max_reassemblies = clib_host_to_net_u32 (rmp->max_reassemblies);
@@ -2794,12 +2799,13 @@
 {
   vl_api_ip_reassembly_enable_disable_reply_t *rmp;
   int rv = 0;
-  rv = ip4_reass_enable_disable (clib_net_to_host_u32 (mp->sw_if_index),
-				 mp->enable_ip4);
+  rv = ip4_full_reass_enable_disable (clib_net_to_host_u32 (mp->sw_if_index),
+				      mp->enable_ip4);
   if (0 == rv)
     {
-      rv = ip6_reass_enable_disable (clib_net_to_host_u32 (mp->sw_if_index),
-				     mp->enable_ip6);
+      rv =
+	ip6_full_reass_enable_disable (clib_net_to_host_u32 (mp->sw_if_index),
+				       mp->enable_ip6);
     }
 
   REPLY_MACRO (VL_API_IP_REASSEMBLY_ENABLE_DISABLE_REPLY);
diff --git a/src/vnet/ip/ip4_reassembly.c b/src/vnet/ip/reass/ip4_full_reass.c
similarity index 62%
rename from src/vnet/ip/ip4_reassembly.c
rename to src/vnet/ip/reass/ip4_full_reass.c
index 682cad9..7cf4f43 100644
--- a/src/vnet/ip/ip4_reassembly.c
+++ b/src/vnet/ip/reass/ip4_full_reass.c
@@ -15,16 +15,17 @@
 
 /**
  * @file
- * @brief IPv4 Reassembly.
+ * @brief IPv4 Full Reassembly.
  *
- * This file contains the source code for IPv4 reassembly.
+ * This file contains the source code for IPv4 full reassembly.
  */
 
 #include <vppinfra/vec.h>
 #include <vnet/vnet.h>
 #include <vnet/ip/ip.h>
+#include <vppinfra/fifo.h>
 #include <vppinfra/bihash_16_8.h>
-#include <vnet/ip/ip4_reassembly.h>
+#include <vnet/ip/reass/ip4_full_reass.h>
 #include <stddef.h>
 
 #define MSEC_PER_SEC 1000
@@ -63,7 +64,7 @@
   IP4_REASS_RC_INTERNAL_ERROR,
   IP4_REASS_RC_NO_BUF,
   IP4_REASS_RC_HANDOFF,
-} ip4_reass_rc_t;
+} ip4_full_reass_rc_t;
 
 typedef struct
 {
@@ -80,7 +81,7 @@
     };
     u64 as_u64[2];
   };
-} ip4_reass_key_t;
+} ip4_full_reass_key_t;
 
 typedef union
 {
@@ -90,37 +91,38 @@
     u32 memory_owner_thread_index;
   };
   u64 as_u64;
-} ip4_reass_val_t;
+} ip4_full_reass_val_t;
 
 typedef union
 {
   struct
   {
-    ip4_reass_key_t k;
-    ip4_reass_val_t v;
+    ip4_full_reass_key_t k;
+    ip4_full_reass_val_t v;
   };
   clib_bihash_kv_16_8_t kv;
-} ip4_reass_kv_t;
+} ip4_full_reass_kv_t;
 
 always_inline u32
-ip4_reass_buffer_get_data_offset (vlib_buffer_t * b)
+ip4_full_reass_buffer_get_data_offset (vlib_buffer_t * b)
 {
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
   return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
 }
 
 always_inline u16
-ip4_reass_buffer_get_data_len (vlib_buffer_t * b)
+ip4_full_reass_buffer_get_data_len (vlib_buffer_t * b)
 {
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
   return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
-    (vnb->ip.reass.fragment_first + ip4_reass_buffer_get_data_offset (b)) + 1;
+    (vnb->ip.reass.fragment_first +
+     ip4_full_reass_buffer_get_data_offset (b)) + 1;
 }
 
 typedef struct
 {
   // hash table key
-  ip4_reass_key_t key;
+  ip4_full_reass_key_t key;
   // time when last packet was received
   f64 last_heard;
   // internal id of this reassembly
@@ -146,16 +148,15 @@
   // thread which received fragment with offset 0 and which sends out the
   // completed reassembly
   u32 sendout_thread_index;
-} ip4_reass_t;
+} ip4_full_reass_t;
 
 typedef struct
 {
-  // pool of reassembly contexts
-  ip4_reass_t *pool;
+  ip4_full_reass_t *pool;
   u32 reass_n;
   u32 id_counter;
   clib_spinlock_t lock;
-} ip4_reass_per_thread_t;
+} ip4_full_reass_per_thread_t;
 
 typedef struct
 {
@@ -171,33 +172,34 @@
   // IPv4 runtime
   clib_bihash_16_8_t hash;
   // per-thread data
-  ip4_reass_per_thread_t *per_thread_data;
+  ip4_full_reass_per_thread_t *per_thread_data;
 
   // convenience
   vlib_main_t *vlib_main;
 
   // node index of ip4-drop node
   u32 ip4_drop_idx;
-  u32 ip4_reass_expire_node_idx;
+  u32 ip4_full_reass_expire_node_idx;
 
   /** Worker handoff */
   u32 fq_index;
   u32 fq_feature_index;
-} ip4_reass_main_t;
 
-extern ip4_reass_main_t ip4_reass_main;
+} ip4_full_reass_main_t;
+
+extern ip4_full_reass_main_t ip4_full_reass_main;
 
 #ifndef CLIB_MARCH_VARIANT
-ip4_reass_main_t ip4_reass_main;
+ip4_full_reass_main_t ip4_full_reass_main;
 #endif /* CLIB_MARCH_VARIANT */
 
 typedef enum
 {
-  IP4_REASSEMBLY_NEXT_INPUT,
-  IP4_REASSEMBLY_NEXT_DROP,
-  IP4_REASSEMBLY_NEXT_HANDOFF,
-  IP4_REASSEMBLY_N_NEXT,
-} ip4_reass_next_t;
+  IP4_FULL_REASS_NEXT_INPUT,
+  IP4_FULL_REASS_NEXT_DROP,
+  IP4_FULL_REASS_NEXT_HANDOFF,
+  IP4_FULL_REASS_N_NEXT,
+} ip4_full_reass_next_t;
 
 typedef enum
 {
@@ -207,7 +209,7 @@
   RANGE_OVERLAP,
   FINALIZE,
   HANDOFF,
-} ip4_reass_trace_operation_e;
+} ip4_full_reass_trace_operation_e;
 
 typedef struct
 {
@@ -217,13 +219,13 @@
   i32 data_offset;
   u32 data_len;
   u32 first_bi;
-} ip4_reass_range_trace_t;
+} ip4_full_reass_range_trace_t;
 
 typedef struct
 {
-  ip4_reass_trace_operation_e action;
+  ip4_full_reass_trace_operation_e action;
   u32 reass_id;
-  ip4_reass_range_trace_t trace_range;
+  ip4_full_reass_range_trace_t trace_range;
   u32 size_diff;
   u32 op_id;
   u32 thread_id;
@@ -231,40 +233,42 @@
   u32 fragment_first;
   u32 fragment_last;
   u32 total_data_len;
-} ip4_reass_trace_t;
+} ip4_full_reass_trace_t;
 
-extern vlib_node_registration_t ip4_reass_node;
-extern vlib_node_registration_t ip4_reass_node_feature;
+extern vlib_node_registration_t ip4_full_reass_node;
+extern vlib_node_registration_t ip4_full_reass_node_feature;
 
 static void
-ip4_reass_trace_details (vlib_main_t * vm, u32 bi,
-			 ip4_reass_range_trace_t * trace)
+ip4_full_reass_trace_details (vlib_main_t * vm, u32 bi,
+			      ip4_full_reass_range_trace_t * trace)
 {
   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
   trace->range_first = vnb->ip.reass.range_first;
   trace->range_last = vnb->ip.reass.range_last;
-  trace->data_offset = ip4_reass_buffer_get_data_offset (b);
-  trace->data_len = ip4_reass_buffer_get_data_len (b);
+  trace->data_offset = ip4_full_reass_buffer_get_data_offset (b);
+  trace->data_len = ip4_full_reass_buffer_get_data_len (b);
   trace->range_bi = bi;
 }
 
 static u8 *
-format_ip4_reass_range_trace (u8 * s, va_list * args)
+format_ip4_full_reass_range_trace (u8 * s, va_list * args)
 {
-  ip4_reass_range_trace_t *trace = va_arg (*args, ip4_reass_range_trace_t *);
-  s = format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
-	      trace->range_last, trace->data_offset, trace->data_len,
-	      trace->range_bi);
+  ip4_full_reass_range_trace_t *trace =
+    va_arg (*args, ip4_full_reass_range_trace_t *);
+  s =
+    format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
+	    trace->range_last, trace->data_offset, trace->data_len,
+	    trace->range_bi);
   return s;
 }
 
 static u8 *
-format_ip4_reass_trace (u8 * s, va_list * args)
+format_ip4_full_reass_trace (u8 * s, va_list * args)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  ip4_reass_trace_t *t = va_arg (*args, ip4_reass_trace_t *);
+  ip4_full_reass_trace_t *t = va_arg (*args, ip4_full_reass_trace_t *);
   u32 indent = 0;
   if (~0 != t->reass_id)
     {
@@ -280,20 +284,20 @@
     {
     case RANGE_SHRINK:
       s = format (s, "\n%Ushrink %U by %u", format_white_space, indent,
-		  format_ip4_reass_range_trace, &t->trace_range,
+		  format_ip4_full_reass_range_trace, &t->trace_range,
 		  t->size_diff);
       break;
     case RANGE_DISCARD:
       s = format (s, "\n%Udiscard %U", format_white_space, indent,
-		  format_ip4_reass_range_trace, &t->trace_range);
+		  format_ip4_full_reass_range_trace, &t->trace_range);
       break;
     case RANGE_NEW:
       s = format (s, "\n%Unew %U", format_white_space, indent,
-		  format_ip4_reass_range_trace, &t->trace_range);
+		  format_ip4_full_reass_range_trace, &t->trace_range);
       break;
     case RANGE_OVERLAP:
       s = format (s, "\n%Uoverlapping/ignored %U", format_white_space, indent,
-		  format_ip4_reass_range_trace, &t->trace_range);
+		  format_ip4_full_reass_range_trace, &t->trace_range);
       break;
     case FINALIZE:
       s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
@@ -308,29 +312,40 @@
 }
 
 static void
-ip4_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
-		     ip4_reass_main_t * rm, u32 reass_id, u32 op_id,
-		     u32 bi, u32 first_bi, u32 data_len,
-		     ip4_reass_trace_operation_e action, u32 size_diff,
-		     u32 thread_id_to)
+ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
+			  ip4_full_reass_main_t * rm,
+			  ip4_full_reass_t * reass, u32 bi,
+			  ip4_full_reass_trace_operation_e action,
+			  u32 size_diff, u32 thread_id_to)
 {
   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
-  ip4_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
-  t->reass_id = reass_id;
+  ip4_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
+  if (reass)
+    {
+      t->reass_id = reass->id;
+      t->op_id = reass->trace_op_counter;
+      t->trace_range.first_bi = reass->first_bi;
+      t->total_data_len = reass->data_len;
+      ++reass->trace_op_counter;
+    }
+  else
+    {
+      t->reass_id = ~0;
+      t->op_id = 0;
+      t->trace_range.first_bi = 0;
+      t->total_data_len = 0;
+    }
   t->action = action;
-  ip4_reass_trace_details (vm, bi, &t->trace_range);
+  ip4_full_reass_trace_details (vm, bi, &t->trace_range);
   t->size_diff = size_diff;
-  t->op_id = op_id;
   t->thread_id = vm->thread_index;
   t->thread_id_to = thread_id_to;
   t->fragment_first = vnb->ip.reass.fragment_first;
   t->fragment_last = vnb->ip.reass.fragment_last;
-  t->trace_range.first_bi = first_bi;
-  t->total_data_len = data_len;
 #if 0
   static u8 *s = NULL;
-  s = format (s, "%U", format_ip4_reass_trace, NULL, NULL, t);
+  s = format (s, "%U", format_ip4_full_reass_trace, NULL, NULL, t);
   printf ("%.*s\n", vec_len (s), s);
   fflush (stdout);
   vec_reset_length (s);
@@ -338,26 +353,28 @@
 }
 
 always_inline void
-ip4_reass_free_ctx (ip4_reass_per_thread_t * rt, ip4_reass_t * reass)
+ip4_full_reass_free_ctx (ip4_full_reass_per_thread_t * rt,
+			 ip4_full_reass_t * reass)
 {
   pool_put (rt->pool, reass);
   --rt->reass_n;
 }
 
 always_inline void
-ip4_reass_free (vlib_main_t * vm, ip4_reass_main_t * rm,
-		ip4_reass_per_thread_t * rt, ip4_reass_t * reass)
+ip4_full_reass_free (ip4_full_reass_main_t * rm,
+		     ip4_full_reass_per_thread_t * rt,
+		     ip4_full_reass_t * reass)
 {
   clib_bihash_kv_16_8_t kv;
   kv.key[0] = reass->key.as_u64[0];
   kv.key[1] = reass->key.as_u64[1];
   clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
-  return ip4_reass_free_ctx (rt, reass);
+  return ip4_full_reass_free_ctx (rt, reass);
 }
 
 always_inline void
-ip4_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
-		    ip4_reass_main_t * rm, ip4_reass_t * reass)
+ip4_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
+			 ip4_full_reass_main_t * rm, ip4_full_reass_t * reass)
 {
   u32 range_bi = reass->first_bi;
   vlib_buffer_t *range_b;
@@ -416,12 +433,23 @@
     }
 }
 
-static ip4_reass_t *
-ip4_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
-			  ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
-			  ip4_reass_kv_t * kv, u8 * do_handoff)
+always_inline void
+ip4_full_reass_init (ip4_full_reass_t * reass)
 {
-  ip4_reass_t *reass;
+  reass->first_bi = ~0;
+  reass->last_packet_octet = ~0;
+  reass->data_len = 0;
+  reass->next_index = ~0;
+  reass->error_next_index = ~0;
+}
+
+always_inline ip4_full_reass_t *
+ip4_full_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
+			       ip4_full_reass_main_t * rm,
+			       ip4_full_reass_per_thread_t * rt,
+			       ip4_full_reass_kv_t * kv, u8 * do_handoff)
+{
+  ip4_full_reass_t *reass;
   f64 now;
 
 again:
@@ -443,8 +471,8 @@
 
       if (now > reass->last_heard + rm->timeout)
 	{
-	  ip4_reass_drop_all (vm, node, rm, reass);
-	  ip4_reass_free (vm, rm, rt, reass);
+	  ip4_full_reass_drop_all (vm, node, rm, reass);
+	  ip4_full_reass_free (rm, rt, reass);
 	  reass = NULL;
 	}
     }
@@ -467,11 +495,7 @@
       reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
       reass->memory_owner_thread_index = vm->thread_index;
       ++rt->id_counter;
-      reass->first_bi = ~0;
-      reass->last_packet_octet = ~0;
-      reass->data_len = 0;
-      reass->next_index = ~0;
-      reass->error_next_index = ~0;
+      ip4_full_reass_init (reass);
       ++rt->reass_n;
     }
 
@@ -485,7 +509,7 @@
     clib_bihash_add_del_16_8 (&rm->hash, (clib_bihash_kv_16_8_t *) kv, 2);
   if (rv)
     {
-      ip4_reass_free_ctx (rt, reass);
+      ip4_full_reass_free_ctx (rt, reass);
       reass = NULL;
       // if other worker created a context already work with the other copy
       if (-2 == rv)
@@ -495,11 +519,12 @@
   return reass;
 }
 
-always_inline ip4_reass_rc_t
-ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
-		    ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
-		    ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
-		    bool is_custom_app)
+always_inline ip4_full_reass_rc_t
+ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
+			 ip4_full_reass_main_t * rm,
+			 ip4_full_reass_per_thread_t * rt,
+			 ip4_full_reass_t * reass, u32 * bi0,
+			 u32 * next0, u32 * error0, bool is_custom_app)
 {
   vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
   vlib_buffer_t *last_b = NULL;
@@ -518,15 +543,15 @@
 	  return IP4_REASS_RC_INTERNAL_ERROR;
 	}
 
-      u32 data_len = ip4_reass_buffer_get_data_len (tmp);
+      u32 data_len = ip4_full_reass_buffer_get_data_len (tmp);
       u32 trim_front =
-	ip4_header_bytes (ip) + ip4_reass_buffer_get_data_offset (tmp);
+	ip4_header_bytes (ip) + ip4_full_reass_buffer_get_data_offset (tmp);
       u32 trim_end =
 	vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
       if (tmp_bi == reass->first_bi)
 	{
 	  /* first buffer - keep ip4 header */
-	  if (0 != ip4_reass_buffer_get_data_offset (tmp))
+	  if (0 != ip4_full_reass_buffer_get_data_offset (tmp))
 	    {
 	      return IP4_REASS_RC_INTERNAL_ERROR;
 	    }
@@ -653,10 +678,8 @@
   first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
   if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
     {
-      ip4_reass_add_trace (vm, node, rm, reass->id, reass->trace_op_counter,
-			   reass->first_bi, reass->first_bi, reass->data_len,
-			   FINALIZE, 0, ~0);
-      ++reass->trace_op_counter;
+      ip4_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
+				FINALIZE, 0, ~0);
 #if 0
       // following code does a hexdump of packet fragments to stdout ...
       do
@@ -687,7 +710,7 @@
   *bi0 = reass->first_bi;
   if (!is_custom_app)
     {
-      *next0 = IP4_REASSEMBLY_NEXT_INPUT;
+      *next0 = IP4_FULL_REASS_NEXT_INPUT;
     }
   else
     {
@@ -695,17 +718,17 @@
     }
   vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
   *error0 = IP4_ERROR_NONE;
-  ip4_reass_free (vm, rm, rt, reass);
+  ip4_full_reass_free (rm, rt, reass);
   reass = NULL;
   return IP4_REASS_RC_OK;
 }
 
-always_inline ip4_reass_rc_t
-ip4_reass_insert_range_in_chain (vlib_main_t * vm,
-				 ip4_reass_main_t * rm,
-				 ip4_reass_per_thread_t * rt,
-				 ip4_reass_t * reass,
-				 u32 prev_range_bi, u32 new_next_bi)
+always_inline ip4_full_reass_rc_t
+ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
+				      ip4_full_reass_main_t * rm,
+				      ip4_full_reass_per_thread_t * rt,
+				      ip4_full_reass_t * reass,
+				      u32 prev_range_bi, u32 new_next_bi)
 {
   vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
   vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
@@ -730,16 +753,16 @@
     {
       return IP4_REASS_RC_INTERNAL_ERROR;
     }
-  reass->data_len += ip4_reass_buffer_get_data_len (new_next_b);
+  reass->data_len += ip4_full_reass_buffer_get_data_len (new_next_b);
   return IP4_REASS_RC_OK;
 }
 
-always_inline ip4_reass_rc_t
-ip4_reass_remove_range_from_chain (vlib_main_t * vm,
-				   vlib_node_runtime_t * node,
-				   ip4_reass_main_t * rm,
-				   ip4_reass_t * reass, u32 prev_range_bi,
-				   u32 discard_bi)
+always_inline ip4_full_reass_rc_t
+ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
+					vlib_node_runtime_t * node,
+					ip4_full_reass_main_t * rm,
+					ip4_full_reass_t * reass,
+					u32 prev_range_bi, u32 discard_bi)
 {
   vlib_buffer_t *discard_b = vlib_get_buffer (vm, discard_bi);
   vnet_buffer_opaque_t *discard_vnb = vnet_buffer (discard_b);
@@ -763,17 +786,14 @@
     {
       return IP4_REASS_RC_INTERNAL_ERROR;
     }
-  reass->data_len -= ip4_reass_buffer_get_data_len (discard_b);
+  reass->data_len -= ip4_full_reass_buffer_get_data_len (discard_b);
   while (1)
     {
       u32 to_be_freed_bi = discard_bi;
       if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
 	{
-	  ip4_reass_add_trace (vm, node, rm, reass->id,
-			       reass->trace_op_counter, discard_bi,
-			       reass->first_bi, reass->data_len,
-			       RANGE_DISCARD, 0, ~0);
-	  ++reass->trace_op_counter;
+	  ip4_full_reass_add_trace (vm, node, rm, reass, discard_bi,
+				    RANGE_DISCARD, 0, ~0);
 	}
       if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
 	{
@@ -793,16 +813,15 @@
   return IP4_REASS_RC_OK;
 }
 
-always_inline ip4_reass_rc_t
-ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
-		  ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
-		  ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
-		  bool is_custom_app, u32 * handoff_thread_idx)
+always_inline ip4_full_reass_rc_t
+ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
+		       ip4_full_reass_main_t * rm,
+		       ip4_full_reass_per_thread_t * rt,
+		       ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
+		       u32 * error0, bool is_custom_app,
+		       u32 * handoff_thread_idx)
 {
-  ip4_reass_rc_t rc = IP4_REASS_RC_OK;
-  int consumed = 0;
   vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
-  ip4_header_t *fip = vlib_buffer_get_current (fb);
   vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
   if (is_custom_app)
     {
@@ -810,6 +829,9 @@
       reass->next_index = fvnb->ip.reass.next_index;
       reass->error_next_index = fvnb->ip.reass.error_next_index;
     }
+  ip4_full_reass_rc_t rc = IP4_REASS_RC_OK;
+  int consumed = 0;
+  ip4_header_t *fip = vlib_buffer_get_current (fb);
   const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
   const u32 fragment_length =
     clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
@@ -830,26 +852,25 @@
     {
       // starting a new reassembly
       rc =
-	ip4_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
-					 *bi0);
+	ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
+					      prev_range_bi, *bi0);
       if (IP4_REASS_RC_OK != rc)
 	{
 	  return rc;
 	}
       if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
 	{
-	  ip4_reass_add_trace (vm, node, rm, reass->id,
-			       reass->trace_op_counter, *bi0, reass->first_bi,
-			       reass->data_len, RANGE_NEW, 0, ~0);
-	  ++reass->trace_op_counter;
+	  ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
+				    ~0);
 	}
       *bi0 = ~0;
       reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
       reass->fragments_n = 1;
       return IP4_REASS_RC_OK;
     }
-  reass->min_fragment_length = clib_min (clib_net_to_host_u16 (fip->length),
-					 fvnb->ip.reass.estimated_mtu);
+  reass->min_fragment_length =
+    clib_min (clib_net_to_host_u16 (fip->length),
+	      fvnb->ip.reass.estimated_mtu);
   while (~0 != candidate_range_bi)
     {
       vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
@@ -864,8 +885,8 @@
 	    {
 	      // special case - this fragment falls beyond all known ranges
 	      rc =
-		ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
-						 prev_range_bi, *bi0);
+		ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
+						      prev_range_bi, *bi0);
 	      if (IP4_REASS_RC_OK != rc)
 		{
 		  return rc;
@@ -879,8 +900,8 @@
 	{
 	  // this fragment ends before candidate range without any overlap
 	  rc =
-	    ip4_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
-					     *bi0);
+	    ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
+						  prev_range_bi, *bi0);
 	  if (IP4_REASS_RC_OK != rc)
 	    {
 	      return rc;
@@ -895,11 +916,8 @@
 	      // this fragment is a (sub)part of existing range, ignore it
 	      if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
 		{
-		  ip4_reass_add_trace (vm, node, rm, reass->id,
-				       reass->trace_op_counter, *bi0,
-				       reass->first_bi, reass->data_len,
-				       RANGE_OVERLAP, 0, ~0);
-		  ++reass->trace_op_counter;
+		  ip4_full_reass_add_trace (vm, node, rm, reass, *bi0,
+					    RANGE_OVERLAP, 0, ~0);
 		}
 	      break;
 	    }
@@ -908,7 +926,7 @@
 	    {
 	      u32 overlap =
 		fragment_last - candidate_vnb->ip.reass.range_first + 1;
-	      if (overlap < ip4_reass_buffer_get_data_len (candidate_b))
+	      if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
 		{
 		  candidate_vnb->ip.reass.range_first += overlap;
 		  if (reass->data_len < overlap)
@@ -918,16 +936,14 @@
 		  reass->data_len -= overlap;
 		  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
 		    {
-		      ip4_reass_add_trace (vm, node, rm, reass->id,
-					   reass->trace_op_counter,
-					   candidate_range_bi,
-					   reass->first_bi, reass->data_len,
-					   RANGE_SHRINK, 0, ~0);
-		      ++reass->trace_op_counter;
+		      ip4_full_reass_add_trace (vm, node, rm, reass,
+						candidate_range_bi,
+						RANGE_SHRINK, 0, ~0);
 		    }
 		  rc =
-		    ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
-						     prev_range_bi, *bi0);
+		    ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
+							  prev_range_bi,
+							  *bi0);
 		  if (IP4_REASS_RC_OK != rc)
 		    {
 		      return rc;
@@ -943,7 +959,7 @@
 	    {
 	      u32 overlap =
 		candidate_vnb->ip.reass.range_last - fragment_first + 1;
-	      if (overlap < ip4_reass_buffer_get_data_len (candidate_b))
+	      if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
 		{
 		  fvnb->ip.reass.range_first += overlap;
 		  if (~0 != candidate_vnb->ip.reass.next_range_bi)
@@ -957,9 +973,10 @@
 		    {
 		      // special case - last range discarded
 		      rc =
-			ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
-							 candidate_range_bi,
-							 *bi0);
+			ip4_full_reass_insert_range_in_chain (vm, rm, rt,
+							      reass,
+							      candidate_range_bi,
+							      *bi0);
 		      if (IP4_REASS_RC_OK != rc)
 			{
 			  return rc;
@@ -981,9 +998,9 @@
 	      u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
 	      // discard candidate range, probe next range
 	      rc =
-		ip4_reass_remove_range_from_chain (vm, node, rm, reass,
-						   prev_range_bi,
-						   candidate_range_bi);
+		ip4_full_reass_remove_range_from_chain (vm, node, rm, reass,
+							prev_range_bi,
+							candidate_range_bi);
 	      if (IP4_REASS_RC_OK != rc)
 		{
 		  return rc;
@@ -997,8 +1014,9 @@
 		{
 		  // special case - last range discarded
 		  rc =
-		    ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
-						     prev_range_bi, *bi0);
+		    ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
+							  prev_range_bi,
+							  *bi0);
 		  if (IP4_REASS_RC_OK != rc)
 		    {
 		      return rc;
@@ -1014,10 +1032,8 @@
     {
       if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
 	{
-	  ip4_reass_add_trace (vm, node, rm, reass->id,
-			       reass->trace_op_counter, *bi0, reass->first_bi,
-			       reass->data_len, RANGE_NEW, 0, ~0);
-	  ++reass->trace_op_counter;
+	  ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
+				    ~0);
 	}
     }
   if (~0 != reass->last_packet_octet &&
@@ -1025,8 +1041,8 @@
     {
       *handoff_thread_idx = reass->sendout_thread_index;
       rc =
-	ip4_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
-			    is_custom_app);
+	ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
+				 is_custom_app);
       if (IP4_REASS_RC_OK == rc
 	  && reass->memory_owner_thread_index != reass->sendout_thread_index)
 	{
@@ -1045,7 +1061,7 @@
 	}
       else
 	{
-	  *next0 = IP4_REASSEMBLY_NEXT_DROP;
+	  *next0 = IP4_FULL_REASS_NEXT_DROP;
 	  *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
 	}
     }
@@ -1053,14 +1069,14 @@
 }
 
 always_inline uword
-ip4_reassembly_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
 		       vlib_frame_t * frame, bool is_feature,
 		       bool is_custom_app)
 {
   u32 *from = vlib_frame_vector_args (frame);
   u32 n_left_from, n_left_to_next, *to_next, next_index;
-  ip4_reass_main_t *rm = &ip4_reass_main;
-  ip4_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
+  ip4_full_reass_main_t *rm = &ip4_full_reass_main;
+  ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
   clib_spinlock_lock (&rt->lock);
 
   n_left_from = frame->n_vectors;
@@ -1085,138 +1101,133 @@
 	      // this is a whole packet - no fragmentation
 	      if (!is_custom_app)
 		{
-		  next0 = IP4_REASSEMBLY_NEXT_INPUT;
+		  next0 = IP4_FULL_REASS_NEXT_INPUT;
 		}
 	      else
 		{
 		  next0 = vnet_buffer (b0)->ip.reass.next_index;
 		}
+	      goto packet_enqueue;
+	    }
+	  const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
+	  const u32 fragment_length =
+	    clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
+	  const u32 fragment_last = fragment_first + fragment_length - 1;
+	  if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0)))	// 8 is minimum frag length per RFC 791
+	    {
+	      next0 = IP4_FULL_REASS_NEXT_DROP;
+	      error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
+	      goto packet_enqueue;
+	    }
+	  ip4_full_reass_kv_t kv;
+	  u8 do_handoff = 0;
+
+	  kv.k.as_u64[0] =
+	    (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
+			   vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
+	    (u64) ip0->src_address.as_u32 << 32;
+	  kv.k.as_u64[1] =
+	    (u64) ip0->dst_address.
+	    as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
+
+	  ip4_full_reass_t *reass =
+	    ip4_full_reass_find_or_create (vm, node, rm, rt, &kv,
+					   &do_handoff);
+
+	  if (reass)
+	    {
+	      const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
+	      if (0 == fragment_first)
+		{
+		  reass->sendout_thread_index = vm->thread_index;
+		}
+	    }
+
+	  if (PREDICT_FALSE (do_handoff))
+	    {
+	      next0 = IP4_FULL_REASS_NEXT_HANDOFF;
+	      if (is_feature)
+		vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
+		  kv.v.memory_owner_thread_index;
+	      else
+		vnet_buffer (b0)->ip.reass.owner_thread_index =
+		  kv.v.memory_owner_thread_index;
+	    }
+	  else if (reass)
+	    {
+	      u32 handoff_thread_idx;
+	      switch (ip4_full_reass_update
+		      (vm, node, rm, rt, reass, &bi0, &next0,
+		       &error0, is_custom_app, &handoff_thread_idx))
+		{
+		case IP4_REASS_RC_OK:
+		  /* nothing to do here */
+		  break;
+		case IP4_REASS_RC_HANDOFF:
+		  next0 = IP4_FULL_REASS_NEXT_HANDOFF;
+		  b0 = vlib_get_buffer (vm, bi0);
+		  if (is_feature)
+		    vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
+		      handoff_thread_idx;
+		  else
+		    vnet_buffer (b0)->ip.reass.owner_thread_index =
+		      handoff_thread_idx;
+		  break;
+		case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
+		  vlib_node_increment_counter (vm, node->node_index,
+					       IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
+					       1);
+		  ip4_full_reass_drop_all (vm, node, rm, reass);
+		  ip4_full_reass_free (rm, rt, reass);
+		  goto next_packet;
+		  break;
+		case IP4_REASS_RC_NO_BUF:
+		  vlib_node_increment_counter (vm, node->node_index,
+					       IP4_ERROR_REASS_NO_BUF, 1);
+		  ip4_full_reass_drop_all (vm, node, rm, reass);
+		  ip4_full_reass_free (rm, rt, reass);
+		  goto next_packet;
+		  break;
+		case IP4_REASS_RC_INTERNAL_ERROR:
+		  /* drop everything and start with a clean slate */
+		  vlib_node_increment_counter (vm, node->node_index,
+					       IP4_ERROR_REASS_INTERNAL_ERROR,
+					       1);
+		  ip4_full_reass_drop_all (vm, node, rm, reass);
+		  ip4_full_reass_free (rm, rt, reass);
+		  goto next_packet;
+		  break;
+		}
 	    }
 	  else
 	    {
-	      const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
-	      const u32 fragment_length =
-		clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
-	      const u32 fragment_last = fragment_first + fragment_length - 1;
-	      if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0)))	// 8 is minimum frag length per RFC 791
-		{
-		  next0 = IP4_REASSEMBLY_NEXT_DROP;
-		  error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
-		}
-	      else
-		{
-		  ip4_reass_kv_t kv;
-		  u8 do_handoff = 0;
-
-		  kv.k.as_u64[0] =
-		    (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
-				   vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
-		    (u64) ip0->src_address.as_u32 << 32;
-		  kv.k.as_u64[1] =
-		    (u64) ip0->dst_address.as_u32 |
-		    (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
-
-		  ip4_reass_t *reass =
-		    ip4_reass_find_or_create (vm, node, rm, rt, &kv,
-					      &do_handoff);
-		  if (reass)
-		    {
-		      const u32 fragment_first =
-			ip4_get_fragment_offset_bytes (ip0);
-		      if (0 == fragment_first)
-			{
-			  reass->sendout_thread_index = vm->thread_index;
-			}
-		    }
-		  if (PREDICT_FALSE (do_handoff))
-		    {
-		      next0 = IP4_REASSEMBLY_NEXT_HANDOFF;
-		      if (is_feature)
-			vnet_buffer (b0)->ip.
-			  reass.owner_feature_thread_index =
-			  kv.v.memory_owner_thread_index;
-		      else
-			vnet_buffer (b0)->ip.reass.owner_thread_index =
-			  kv.v.memory_owner_thread_index;
-		    }
-		  else if (reass)
-		    {
-		      u32 handoff_thread_idx;
-		      switch (ip4_reass_update
-			      (vm, node, rm, rt, reass, &bi0, &next0,
-			       &error0, is_custom_app, &handoff_thread_idx))
-			{
-			case IP4_REASS_RC_OK:
-			  /* nothing to do here */
-			  break;
-			case IP4_REASS_RC_HANDOFF:
-			  next0 = IP4_REASSEMBLY_NEXT_HANDOFF;
-			  b0 = vlib_get_buffer (vm, bi0);
-			  if (is_feature)
-			    vnet_buffer (b0)->ip.
-			      reass.owner_feature_thread_index =
-			      handoff_thread_idx;
-			  else
-			    vnet_buffer (b0)->ip.reass.owner_thread_index =
-			      handoff_thread_idx;
-			  break;
-			case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
-			  vlib_node_increment_counter (vm, node->node_index,
-						       IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
-						       1);
-			  ip4_reass_drop_all (vm, node, rm, reass);
-			  ip4_reass_free (vm, rm, rt, reass);
-			  goto next_packet;
-			  break;
-			case IP4_REASS_RC_NO_BUF:
-			  vlib_node_increment_counter (vm, node->node_index,
-						       IP4_ERROR_REASS_NO_BUF,
-						       1);
-			  ip4_reass_drop_all (vm, node, rm, reass);
-			  ip4_reass_free (vm, rm, rt, reass);
-			  goto next_packet;
-			  break;
-			case IP4_REASS_RC_INTERNAL_ERROR:
-			  /* drop everything and start with a clean slate */
-			  vlib_node_increment_counter (vm, node->node_index,
-						       IP4_ERROR_REASS_INTERNAL_ERROR,
-						       1);
-			  ip4_reass_drop_all (vm, node, rm, reass);
-			  ip4_reass_free (vm, rm, rt, reass);
-			  goto next_packet;
-			  break;
-			}
-		    }
-		  else
-		    {
-		      next0 = IP4_REASSEMBLY_NEXT_DROP;
-		      error0 = IP4_ERROR_REASS_LIMIT_REACHED;
-		    }
-		}
-
-	      b0->error = node->errors[error0];
+	      next0 = IP4_FULL_REASS_NEXT_DROP;
+	      error0 = IP4_ERROR_REASS_LIMIT_REACHED;
 	    }
 
+
+	packet_enqueue:
+	  b0->error = node->errors[error0];
+
 	  if (bi0 != ~0)
 	    {
 	      to_next[0] = bi0;
 	      to_next += 1;
 	      n_left_to_next -= 1;
-	      if (next0 == IP4_REASSEMBLY_NEXT_HANDOFF)
+	      if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
 		{
 		  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
 		    {
 		      if (is_feature)
-			ip4_reass_add_trace (vm, node, rm, ~0,
-					     ~0,
-					     bi0, ~0, ~0, HANDOFF, 0,
-					     vnet_buffer (b0)->ip.
-					     reass.owner_feature_thread_index);
+			ip4_full_reass_add_trace (vm, node, rm, NULL,
+						  bi0, HANDOFF, 0,
+						  vnet_buffer (b0)->ip.
+						  reass.owner_feature_thread_index);
 		      else
-			ip4_reass_add_trace (vm, node, rm, ~0, ~0, bi0,
-					     ~0, ~0, HANDOFF, 0,
-					     vnet_buffer (b0)->ip.
-					     reass.owner_thread_index);
+			ip4_full_reass_add_trace (vm, node, rm, NULL,
+						  bi0, HANDOFF, 0,
+						  vnet_buffer (b0)->ip.
+						  reass.owner_thread_index);
 		    }
 		}
 	      else if (is_feature && IP4_ERROR_NONE == error0)
@@ -1242,66 +1253,67 @@
   return frame->n_vectors;
 }
 
-static char *ip4_reassembly_error_strings[] = {
+static char *ip4_full_reass_error_strings[] = {
 #define _(sym, string) string,
   foreach_ip4_error
 #undef _
 };
 
-VLIB_NODE_FN (ip4_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
-			       vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
+				    vlib_node_runtime_t * node,
+				    vlib_frame_t * frame)
 {
-  return ip4_reassembly_inline (vm, node, frame, false /* is_feature */ ,
+  return ip4_full_reass_inline (vm, node, frame, false /* is_feature */ ,
 				false /* is_custom_app */ );
 }
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip4_reass_node) = {
-    .name = "ip4-reassembly",
+VLIB_REGISTER_NODE (ip4_full_reass_node) = {
+    .name = "ip4-full-reassembly",
     .vector_size = sizeof (u32),
-    .format_trace = format_ip4_reass_trace,
-    .n_errors = ARRAY_LEN (ip4_reassembly_error_strings),
-    .error_strings = ip4_reassembly_error_strings,
-    .n_next_nodes = IP4_REASSEMBLY_N_NEXT,
+    .format_trace = format_ip4_full_reass_trace,
+    .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
+    .error_strings = ip4_full_reass_error_strings,
+    .n_next_nodes = IP4_FULL_REASS_N_NEXT,
     .next_nodes =
         {
-                [IP4_REASSEMBLY_NEXT_INPUT] = "ip4-input",
-                [IP4_REASSEMBLY_NEXT_DROP] = "ip4-drop",
-                [IP4_REASSEMBLY_NEXT_HANDOFF] = "ip4-reassembly-handoff",
+                [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
+                [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
+                [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reassembly-handoff",
 
         },
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FN (ip4_reass_node_feature) (vlib_main_t * vm,
-				       vlib_node_runtime_t * node,
-				       vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
+					    vlib_node_runtime_t * node,
+					    vlib_frame_t * frame)
 {
-  return ip4_reassembly_inline (vm, node, frame, true /* is_feature */ ,
+  return ip4_full_reass_inline (vm, node, frame, true /* is_feature */ ,
 				false /* is_custom_app */ );
 }
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip4_reass_node_feature) = {
-    .name = "ip4-reassembly-feature",
+VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
+    .name = "ip4-full-reassembly-feature",
     .vector_size = sizeof (u32),
-    .format_trace = format_ip4_reass_trace,
-    .n_errors = ARRAY_LEN (ip4_reassembly_error_strings),
-    .error_strings = ip4_reassembly_error_strings,
-    .n_next_nodes = IP4_REASSEMBLY_N_NEXT,
+    .format_trace = format_ip4_full_reass_trace,
+    .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
+    .error_strings = ip4_full_reass_error_strings,
+    .n_next_nodes = IP4_FULL_REASS_N_NEXT,
     .next_nodes =
         {
-                [IP4_REASSEMBLY_NEXT_INPUT] = "ip4-input",
-                [IP4_REASSEMBLY_NEXT_DROP] = "ip4-drop",
-                [IP4_REASSEMBLY_NEXT_HANDOFF] = "ip4-reass-feature-hoff",
+                [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
+                [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
+                [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
         },
 };
 /* *INDENT-ON* */
 
 /* *INDENT-OFF* */
-VNET_FEATURE_INIT (ip4_reassembly_feature, static) = {
+VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
     .arc_name = "ip4-unicast",
-    .node_name = "ip4-reassembly-feature",
+    .node_name = "ip4-full-reassembly-feature",
     .runs_before = VNET_FEATURES ("ip4-lookup",
                                   "ipsec4-input-feature"),
     .runs_after = 0,
@@ -1310,9 +1322,9 @@
 
 #ifndef CLIB_MARCH_VARIANT
 always_inline u32
-ip4_reass_get_nbuckets ()
+ip4_full_reass_get_nbuckets ()
 {
-  ip4_reass_main_t *rm = &ip4_reass_main;
+  ip4_full_reass_main_t *rm = &ip4_full_reass_main;
   u32 nbuckets;
   u8 i;
 
@@ -1330,7 +1342,7 @@
 typedef enum
 {
   IP4_EVENT_CONFIG_CHANGED = 1,
-} ip4_reass_event_t;
+} ip4_full_reass_event_t;
 
 typedef struct
 {
@@ -1350,37 +1362,38 @@
 }
 
 static void
-ip4_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
-		      u32 max_reassembly_length, u32 expire_walk_interval_ms)
+ip4_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
+			   u32 max_reassembly_length,
+			   u32 expire_walk_interval_ms)
 {
-  ip4_reass_main.timeout_ms = timeout_ms;
-  ip4_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
-  ip4_reass_main.max_reass_n = max_reassemblies;
-  ip4_reass_main.max_reass_len = max_reassembly_length;
-  ip4_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
+  ip4_full_reass_main.timeout_ms = timeout_ms;
+  ip4_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
+  ip4_full_reass_main.max_reass_n = max_reassemblies;
+  ip4_full_reass_main.max_reass_len = max_reassembly_length;
+  ip4_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
 }
 
 vnet_api_error_t
-ip4_reass_set (u32 timeout_ms, u32 max_reassemblies,
-	       u32 max_reassembly_length, u32 expire_walk_interval_ms)
+ip4_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
+		    u32 max_reassembly_length, u32 expire_walk_interval_ms)
 {
-  u32 old_nbuckets = ip4_reass_get_nbuckets ();
-  ip4_reass_set_params (timeout_ms, max_reassemblies, max_reassembly_length,
-			expire_walk_interval_ms);
-  vlib_process_signal_event (ip4_reass_main.vlib_main,
-			     ip4_reass_main.ip4_reass_expire_node_idx,
+  u32 old_nbuckets = ip4_full_reass_get_nbuckets ();
+  ip4_full_reass_set_params (timeout_ms, max_reassemblies,
+			     max_reassembly_length, expire_walk_interval_ms);
+  vlib_process_signal_event (ip4_full_reass_main.vlib_main,
+			     ip4_full_reass_main.ip4_full_reass_expire_node_idx,
 			     IP4_EVENT_CONFIG_CHANGED, 0);
-  u32 new_nbuckets = ip4_reass_get_nbuckets ();
-  if (ip4_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
+  u32 new_nbuckets = ip4_full_reass_get_nbuckets ();
+  if (ip4_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
     {
       clib_bihash_16_8_t new_hash;
       clib_memset (&new_hash, 0, sizeof (new_hash));
       ip4_rehash_cb_ctx ctx;
       ctx.failure = 0;
       ctx.new_hash = &new_hash;
-      clib_bihash_init_16_8 (&new_hash, "ip4-reass", new_nbuckets,
+      clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
 			     new_nbuckets * 1024);
-      clib_bihash_foreach_key_value_pair_16_8 (&ip4_reass_main.hash,
+      clib_bihash_foreach_key_value_pair_16_8 (&ip4_full_reass_main.hash,
 					       ip4_rehash_cb, &ctx);
       if (ctx.failure)
 	{
@@ -1389,30 +1402,31 @@
 	}
       else
 	{
-	  clib_bihash_free_16_8 (&ip4_reass_main.hash);
-	  clib_memcpy_fast (&ip4_reass_main.hash, &new_hash,
-			    sizeof (ip4_reass_main.hash));
-	  clib_bihash_copied (&ip4_reass_main.hash, &new_hash);
+	  clib_bihash_free_16_8 (&ip4_full_reass_main.hash);
+	  clib_memcpy_fast (&ip4_full_reass_main.hash, &new_hash,
+			    sizeof (ip4_full_reass_main.hash));
+	  clib_bihash_copied (&ip4_full_reass_main.hash, &new_hash);
 	}
     }
   return 0;
 }
 
 vnet_api_error_t
-ip4_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
-	       u32 * max_reassembly_length, u32 * expire_walk_interval_ms)
+ip4_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
+		    u32 * max_reassembly_length,
+		    u32 * expire_walk_interval_ms)
 {
-  *timeout_ms = ip4_reass_main.timeout_ms;
-  *max_reassemblies = ip4_reass_main.max_reass_n;
-  *max_reassembly_length = ip4_reass_main.max_reass_len;
-  *expire_walk_interval_ms = ip4_reass_main.expire_walk_interval_ms;
+  *timeout_ms = ip4_full_reass_main.timeout_ms;
+  *max_reassemblies = ip4_full_reass_main.max_reass_n;
+  *max_reassembly_length = ip4_full_reass_main.max_reass_len;
+  *expire_walk_interval_ms = ip4_full_reass_main.expire_walk_interval_ms;
   return 0;
 }
 
 static clib_error_t *
-ip4_reass_init_function (vlib_main_t * vm)
+ip4_full_reass_init_function (vlib_main_t * vm)
 {
-  ip4_reass_main_t *rm = &ip4_reass_main;
+  ip4_full_reass_main_t *rm = &ip4_full_reass_main;
   clib_error_t *error = 0;
   u32 nbuckets;
   vlib_node_t *node;
@@ -1420,44 +1434,44 @@
   rm->vlib_main = vm;
 
   vec_validate (rm->per_thread_data, vlib_num_workers ());
-  ip4_reass_per_thread_t *rt;
+  ip4_full_reass_per_thread_t *rt;
   vec_foreach (rt, rm->per_thread_data)
   {
     clib_spinlock_init (&rt->lock);
     pool_alloc (rt->pool, rm->max_reass_n);
   }
 
-  node = vlib_get_node_by_name (vm, (u8 *) "ip4-reassembly-expire-walk");
+  node = vlib_get_node_by_name (vm, (u8 *) "ip4-full-reassembly-expire-walk");
   ASSERT (node);
-  rm->ip4_reass_expire_node_idx = node->index;
+  rm->ip4_full_reass_expire_node_idx = node->index;
 
-  ip4_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
-			IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
-			IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
-			IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
+  ip4_full_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
+			     IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
+			     IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
+			     IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
 
-  nbuckets = ip4_reass_get_nbuckets ();
-  clib_bihash_init_16_8 (&rm->hash, "ip4-reass", nbuckets, nbuckets * 1024);
+  nbuckets = ip4_full_reass_get_nbuckets ();
+  clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
 
   node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
   ASSERT (node);
   rm->ip4_drop_idx = node->index;
 
-  rm->fq_index = vlib_frame_queue_main_init (ip4_reass_node.index, 0);
+  rm->fq_index = vlib_frame_queue_main_init (ip4_full_reass_node.index, 0);
   rm->fq_feature_index =
-    vlib_frame_queue_main_init (ip4_reass_node_feature.index, 0);
+    vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
 
   return error;
 }
 
-VLIB_INIT_FUNCTION (ip4_reass_init_function);
+VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
 #endif /* CLIB_MARCH_VARIANT */
 
 static uword
-ip4_reass_walk_expired (vlib_main_t * vm,
-			vlib_node_runtime_t * node, vlib_frame_t * f)
+ip4_full_reass_walk_expired (vlib_main_t * vm,
+			     vlib_node_runtime_t * node, vlib_frame_t * f)
 {
-  ip4_reass_main_t *rm = &ip4_reass_main;
+  ip4_full_reass_main_t *rm = &ip4_full_reass_main;
   uword event_type, *event_data = 0;
 
   while (true)
@@ -1481,7 +1495,7 @@
 	}
       f64 now = vlib_time_now (vm);
 
-      ip4_reass_t *reass;
+      ip4_full_reass_t *reass;
       int *pool_indexes_to_free = NULL;
 
       uword thread_index = 0;
@@ -1489,7 +1503,8 @@
       const uword nthreads = vlib_num_workers () + 1;
       for (thread_index = 0; thread_index < nthreads; ++thread_index)
 	{
-	  ip4_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
+	  ip4_full_reass_per_thread_t *rt =
+	    &rm->per_thread_data[thread_index];
 	  clib_spinlock_lock (&rt->lock);
 
 	  vec_reset_length (pool_indexes_to_free);
@@ -1506,9 +1521,9 @@
           /* *INDENT-OFF* */
           vec_foreach (i, pool_indexes_to_free)
           {
-            ip4_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
-            ip4_reass_drop_all (vm, node, rm, reass);
-            ip4_reass_free (vm, rm, rt, reass);
+            ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
+            ip4_full_reass_drop_all (vm, node, rm, reass);
+            ip4_full_reass_free (rm, rt, reass);
           }
           /* *INDENT-ON* */
 
@@ -1526,24 +1541,26 @@
 }
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip4_reass_expire_node) = {
-    .function = ip4_reass_walk_expired,
+VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
+    .function = ip4_full_reass_walk_expired,
     .type = VLIB_NODE_TYPE_PROCESS,
-    .name = "ip4-reassembly-expire-walk",
-    .format_trace = format_ip4_reass_trace,
-    .n_errors = ARRAY_LEN (ip4_reassembly_error_strings),
-    .error_strings = ip4_reassembly_error_strings,
+    .name = "ip4-full-reassembly-expire-walk",
+    .format_trace = format_ip4_full_reass_trace,
+    .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
+    .error_strings = ip4_full_reass_error_strings,
 
 };
 /* *INDENT-ON* */
 
 static u8 *
-format_ip4_reass_key (u8 * s, va_list * args)
+format_ip4_full_reass_key (u8 * s, va_list * args)
 {
-  ip4_reass_key_t *key = va_arg (*args, ip4_reass_key_t *);
-  s = format (s, "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
-	      key->xx_id, format_ip4_address, &key->src, format_ip4_address,
-	      &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
+  ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *);
+  s =
+    format (s,
+	    "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
+	    key->xx_id, format_ip4_address, &key->src, format_ip4_address,
+	    &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
   return s;
 }
 
@@ -1551,26 +1568,28 @@
 format_ip4_reass (u8 * s, va_list * args)
 {
   vlib_main_t *vm = va_arg (*args, vlib_main_t *);
-  ip4_reass_t *reass = va_arg (*args, ip4_reass_t *);
+  ip4_full_reass_t *reass = va_arg (*args, ip4_full_reass_t *);
 
   s = format (s, "ID: %lu, key: %U\n  first_bi: %u, data_len: %u, "
 	      "last_packet_octet: %u, trace_op_counter: %u\n",
-	      reass->id, format_ip4_reass_key, &reass->key, reass->first_bi,
-	      reass->data_len, reass->last_packet_octet,
-	      reass->trace_op_counter);
+	      reass->id, format_ip4_full_reass_key, &reass->key,
+	      reass->first_bi, reass->data_len,
+	      reass->last_packet_octet, reass->trace_op_counter);
+
   u32 bi = reass->first_bi;
   u32 counter = 0;
   while (~0 != bi)
     {
       vlib_buffer_t *b = vlib_get_buffer (vm, bi);
       vnet_buffer_opaque_t *vnb = vnet_buffer (b);
-      s = format (s, "  #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
-		  "fragment[%u, %u]\n",
-		  counter, vnb->ip.reass.range_first,
-		  vnb->ip.reass.range_last, bi,
-		  ip4_reass_buffer_get_data_offset (b),
-		  ip4_reass_buffer_get_data_len (b),
-		  vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
+      s =
+	format (s,
+		"  #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
+		"fragment[%u, %u]\n", counter, vnb->ip.reass.range_first,
+		vnb->ip.reass.range_last, bi,
+		ip4_full_reass_buffer_get_data_offset (b),
+		ip4_full_reass_buffer_get_data_len (b),
+		vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
       if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
 	{
 	  bi = b->next_buffer;
@@ -1588,7 +1607,7 @@
 		unformat_input_t * input,
 		CLIB_UNUSED (vlib_cli_command_t * lmd))
 {
-  ip4_reass_main_t *rm = &ip4_reass_main;
+  ip4_full_reass_main_t *rm = &ip4_full_reass_main;
 
   vlib_cli_output (vm, "---------------------");
   vlib_cli_output (vm, "IP4 reassembly status");
@@ -1600,12 +1619,12 @@
     }
 
   u32 sum_reass_n = 0;
-  ip4_reass_t *reass;
+  ip4_full_reass_t *reass;
   uword thread_index;
   const uword nthreads = vlib_num_workers () + 1;
   for (thread_index = 0; thread_index < nthreads; ++thread_index)
     {
-      ip4_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
+      ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
       clib_spinlock_lock (&rt->lock);
       if (details)
 	{
@@ -1628,68 +1647,68 @@
 }
 
 /* *INDENT-OFF* */
-VLIB_CLI_COMMAND (show_ip4_reassembly_cmd, static) = {
-    .path = "show ip4-reassembly",
-    .short_help = "show ip4-reassembly [details]",
+VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
+    .path = "show ip4-full-reassembly",
+    .short_help = "show ip4-full-reassembly [details]",
     .function = show_ip4_reass,
 };
 /* *INDENT-ON* */
 
 #ifndef CLIB_MARCH_VARIANT
 vnet_api_error_t
-ip4_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
+ip4_full_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
 {
   return vnet_feature_enable_disable ("ip4-unicast",
-				      "ip4-reassembly-feature", sw_if_index,
-				      enable_disable, 0, 0);
+				      "ip4-full-reassembly-feature",
+				      sw_if_index, enable_disable, 0, 0);
 }
 #endif /* CLIB_MARCH_VARIANT */
 
 
-#define foreach_ip4_reassembly_handoff_error                       \
+#define foreach_ip4_full_reass_handoff_error                       \
 _(CONGESTION_DROP, "congestion drop")
 
 
 typedef enum
 {
-#define _(sym,str) IP4_REASSEMBLY_HANDOFF_ERROR_##sym,
-  foreach_ip4_reassembly_handoff_error
+#define _(sym,str) IP4_FULL_REASS_HANDOFF_ERROR_##sym,
+  foreach_ip4_full_reass_handoff_error
 #undef _
-    IP4_REASSEMBLY_HANDOFF_N_ERROR,
-} ip4_reassembly_handoff_error_t;
+    IP4_FULL_REASS_HANDOFF_N_ERROR,
+} ip4_full_reass_handoff_error_t;
 
-static char *ip4_reassembly_handoff_error_strings[] = {
+static char *ip4_full_reass_handoff_error_strings[] = {
 #define _(sym,string) string,
-  foreach_ip4_reassembly_handoff_error
+  foreach_ip4_full_reass_handoff_error
 #undef _
 };
 
 typedef struct
 {
   u32 next_worker_index;
-} ip4_reassembly_handoff_trace_t;
+} ip4_full_reass_handoff_trace_t;
 
 static u8 *
-format_ip4_reassembly_handoff_trace (u8 * s, va_list * args)
+format_ip4_full_reass_handoff_trace (u8 * s, va_list * args)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  ip4_reassembly_handoff_trace_t *t =
-    va_arg (*args, ip4_reassembly_handoff_trace_t *);
+  ip4_full_reass_handoff_trace_t *t =
+    va_arg (*args, ip4_full_reass_handoff_trace_t *);
 
   s =
-    format (s, "ip4-reassembly-handoff: next-worker %d",
+    format (s, "ip4-full-reassembly-handoff: next-worker %d",
 	    t->next_worker_index);
 
   return s;
 }
 
 always_inline uword
-ip4_reassembly_handoff_node_inline (vlib_main_t * vm,
+ip4_full_reass_handoff_node_inline (vlib_main_t * vm,
 				    vlib_node_runtime_t * node,
 				    vlib_frame_t * frame, bool is_feature)
 {
-  ip4_reass_main_t *rm = &ip4_reass_main;
+  ip4_full_reass_main_t *rm = &ip4_full_reass_main;
 
   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
   u32 n_enq, n_left_from, *from;
@@ -1716,7 +1735,7 @@
 	  ((node->flags & VLIB_NODE_FLAG_TRACE)
 	   && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
 	{
-	  ip4_reassembly_handoff_trace_t *t =
+	  ip4_full_reass_handoff_trace_t *t =
 	    vlib_add_trace (vm, node, b[0], sizeof (*t));
 	  t->next_worker_index = ti[0];
 	}
@@ -1731,27 +1750,27 @@
 
   if (n_enq < frame->n_vectors)
     vlib_node_increment_counter (vm, node->node_index,
-				 IP4_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
+				 IP4_FULL_REASS_HANDOFF_ERROR_CONGESTION_DROP,
 				 frame->n_vectors - n_enq);
   return frame->n_vectors;
 }
 
-VLIB_NODE_FN (ip4_reassembly_handoff_node) (vlib_main_t * vm,
+VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
 					    vlib_node_runtime_t * node,
 					    vlib_frame_t * frame)
 {
-  return ip4_reassembly_handoff_node_inline (vm, node, frame,
+  return ip4_full_reass_handoff_node_inline (vm, node, frame,
 					     false /* is_feature */ );
 }
 
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip4_reassembly_handoff_node) = {
-  .name = "ip4-reassembly-handoff",
+VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
+  .name = "ip4-full-reassembly-handoff",
   .vector_size = sizeof (u32),
-  .n_errors = ARRAY_LEN(ip4_reassembly_handoff_error_strings),
-  .error_strings = ip4_reassembly_handoff_error_strings,
-  .format_trace = format_ip4_reassembly_handoff_trace,
+  .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
+  .error_strings = ip4_full_reass_handoff_error_strings,
+  .format_trace = format_ip4_full_reass_handoff_trace,
 
   .n_next_nodes = 1,
 
@@ -1763,24 +1782,24 @@
 
 
 /* *INDENT-OFF* */
-VLIB_NODE_FN (ip4_reassembly_feature_handoff_node) (vlib_main_t * vm,
+VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
 						    vlib_node_runtime_t *
 						    node,
 						    vlib_frame_t * frame)
 {
-  return ip4_reassembly_handoff_node_inline (vm, node, frame,
+  return ip4_full_reass_handoff_node_inline (vm, node, frame,
 					     true /* is_feature */ );
 }
 /* *INDENT-ON* */
 
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip4_reassembly_feature_handoff_node) = {
-  .name = "ip4-reass-feature-hoff",
+VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
+  .name = "ip4-full-reass-feature-hoff",
   .vector_size = sizeof (u32),
-  .n_errors = ARRAY_LEN(ip4_reassembly_handoff_error_strings),
-  .error_strings = ip4_reassembly_handoff_error_strings,
-  .format_trace = format_ip4_reassembly_handoff_trace,
+  .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
+  .error_strings = ip4_full_reass_handoff_error_strings,
+  .format_trace = format_ip4_full_reass_handoff_trace,
 
   .n_next_nodes = 1,
 
diff --git a/src/vnet/ip/ip4_reassembly.h b/src/vnet/ip/reass/ip4_full_reass.h
similarity index 65%
rename from src/vnet/ip/ip4_reassembly.h
rename to src/vnet/ip/reass/ip4_full_reass.h
index 4ceb0ab..b8419eb 100644
--- a/src/vnet/ip/ip4_reassembly.h
+++ b/src/vnet/ip/reass/ip4_full_reass.h
@@ -20,8 +20,8 @@
  * This file contains the source code for IPv4 reassembly.
  */
 
-#ifndef __included_ip4_reassembly_h__
-#define __included_ip4_reassembly_h__
+#ifndef __included_ip4_full_reass_h__
+#define __included_ip4_full_reass_h__
 
 #include <vnet/api_errno.h>
 #include <vnet/vnet.h>
@@ -29,21 +29,21 @@
 /**
  * @brief set ip4 reassembly configuration
  */
-vnet_api_error_t ip4_reass_set (u32 timeout_ms, u32 max_reassemblies,
-				u32 max_reassembly_length,
-				u32 expire_walk_interval_ms);
+vnet_api_error_t ip4_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
+				     u32 max_reassembly_length,
+				     u32 expire_walk_interval_ms);
 
 /**
  * @brief get ip4 reassembly configuration
  */
-vnet_api_error_t ip4_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
-				u32 * max_reassembly_length,
-				u32 * expire_walk_interval_ms);
+vnet_api_error_t ip4_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
+				     u32 * max_reassembly_length,
+				     u32 * expire_walk_interval_ms);
 
-vnet_api_error_t ip4_reass_enable_disable (u32 sw_if_index,
-					   u8 enable_disable);
+vnet_api_error_t ip4_full_reass_enable_disable (u32 sw_if_index,
+						u8 enable_disable);
 
-#endif /* __included_ip4_reassembly_h */
+#endif /* __included_ip4_full_reass_h__ */
 
 /*
  * fd.io coding-style-patch-verification: ON
diff --git a/src/vnet/ip/ip6_reassembly.c b/src/vnet/ip/reass/ip6_full_reass.c
similarity index 65%
rename from src/vnet/ip/ip6_reassembly.c
rename to src/vnet/ip/reass/ip6_full_reass.c
index 4f27304..7b11e78 100644
--- a/src/vnet/ip/ip6_reassembly.c
+++ b/src/vnet/ip/reass/ip6_full_reass.c
@@ -15,32 +15,32 @@
 
 /**
  * @file
- * @brief IPv6 Reassembly.
+ * @brief IPv6 Full Reassembly.
  *
- * This file contains the source code for IPv6 reassembly.
+ * This file contains the source code for IPv6 full reassembly.
  */
 
 #include <vppinfra/vec.h>
 #include <vnet/vnet.h>
 #include <vnet/ip/ip.h>
 #include <vppinfra/bihash_48_8.h>
-#include <vnet/ip/ip6_reassembly.h>
+#include <vnet/ip/reass/ip6_full_reass.h>
 
 #define MSEC_PER_SEC 1000
-#define IP6_REASS_TIMEOUT_DEFAULT_MS 100
-#define IP6_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000	// 10 seconds default
-#define IP6_REASS_MAX_REASSEMBLIES_DEFAULT 1024
-#define IP6_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
-#define IP6_REASS_HT_LOAD_FACTOR (0.75)
+#define IP6_FULL_REASS_TIMEOUT_DEFAULT_MS 100
+#define IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000	// 10 seconds default
+#define IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT 1024
+#define IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
+#define IP6_FULL_REASS_HT_LOAD_FACTOR (0.75)
 
 typedef enum
 {
-  IP6_REASS_RC_OK,
-  IP6_REASS_RC_INTERNAL_ERROR,
-  IP6_REASS_RC_TOO_MANY_FRAGMENTS,
-  IP6_REASS_RC_NO_BUF,
-  IP6_REASS_RC_HANDOFF,
-} ip6_reass_rc_t;
+  IP6_FULL_REASS_RC_OK,
+  IP6_FULL_REASS_RC_INTERNAL_ERROR,
+  IP6_FULL_REASS_RC_TOO_MANY_FRAGMENTS,
+  IP6_FULL_REASS_RC_NO_BUF,
+  IP6_FULL_REASS_RC_HANDOFF,
+} ip6_full_reass_rc_t;
 
 typedef struct
 {
@@ -57,7 +57,7 @@
     };
     u64 as_u64[6];
   };
-} ip6_reass_key_t;
+} ip6_full_reass_key_t;
 
 typedef union
 {
@@ -67,38 +67,39 @@
     u32 memory_owner_thread_index;
   };
   u64 as_u64;
-} ip6_reass_val_t;
+} ip6_full_reass_val_t;
 
 typedef union
 {
   struct
   {
-    ip6_reass_key_t k;
-    ip6_reass_val_t v;
+    ip6_full_reass_key_t k;
+    ip6_full_reass_val_t v;
   };
   clib_bihash_kv_48_8_t kv;
-} ip6_reass_kv_t;
+} ip6_full_reass_kv_t;
 
 
 always_inline u32
-ip6_reass_buffer_get_data_offset (vlib_buffer_t * b)
+ip6_full_reass_buffer_get_data_offset (vlib_buffer_t * b)
 {
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
   return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
 }
 
 always_inline u16
-ip6_reass_buffer_get_data_len (vlib_buffer_t * b)
+ip6_full_reass_buffer_get_data_len (vlib_buffer_t * b)
 {
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
   return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
-    (vnb->ip.reass.fragment_first + ip6_reass_buffer_get_data_offset (b)) + 1;
+    (vnb->ip.reass.fragment_first +
+     ip6_full_reass_buffer_get_data_offset (b)) + 1;
 }
 
 typedef struct
 {
   // hash table key
-  ip6_reass_key_t key;
+  ip6_full_reass_key_t key;
   // time when last packet was received
   f64 last_heard;
   // internal id of this reassembly
@@ -124,15 +125,15 @@
   // thread which received fragment with offset 0 and which sends out the
   // completed reassembly
   u32 sendout_thread_index;
-} ip6_reass_t;
+} ip6_full_reass_t;
 
 typedef struct
 {
-  ip6_reass_t *pool;
+  ip6_full_reass_t *pool;
   u32 reass_n;
   u32 id_counter;
   clib_spinlock_t lock;
-} ip6_reass_per_thread_t;
+} ip6_full_reass_per_thread_t;
 
 typedef struct
 {
@@ -149,7 +150,7 @@
   clib_bihash_48_8_t hash;
 
   // per-thread data
-  ip6_reass_per_thread_t *per_thread_data;
+  ip6_full_reass_per_thread_t *per_thread_data;
 
   // convenience
   vlib_main_t *vlib_main;
@@ -157,28 +158,28 @@
   // node index of ip6-drop node
   u32 ip6_drop_idx;
   u32 ip6_icmp_error_idx;
-  u32 ip6_reass_expire_node_idx;
+  u32 ip6_full_reass_expire_node_idx;
 
   /** Worker handoff */
   u32 fq_index;
   u32 fq_feature_index;
 
-} ip6_reass_main_t;
+} ip6_full_reass_main_t;
 
-extern ip6_reass_main_t ip6_reass_main;
+extern ip6_full_reass_main_t ip6_full_reass_main;
 
 #ifndef CLIB_MARCH_VARIANT
-ip6_reass_main_t ip6_reass_main;
+ip6_full_reass_main_t ip6_full_reass_main;
 #endif /* CLIB_MARCH_VARIANT */
 
 typedef enum
 {
-  IP6_REASSEMBLY_NEXT_INPUT,
-  IP6_REASSEMBLY_NEXT_DROP,
-  IP6_REASSEMBLY_NEXT_ICMP_ERROR,
-  IP6_REASSEMBLY_NEXT_HANDOFF,
-  IP6_REASSEMBLY_N_NEXT,
-} ip6_reass_next_t;
+  IP6_FULL_REASSEMBLY_NEXT_INPUT,
+  IP6_FULL_REASSEMBLY_NEXT_DROP,
+  IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR,
+  IP6_FULL_REASSEMBLY_NEXT_HANDOFF,
+  IP6_FULL_REASSEMBLY_N_NEXT,
+} ip6_full_reass_next_t;
 
 typedef enum
 {
@@ -189,7 +190,7 @@
   ICMP_ERROR_FL_NOT_MULT_8,
   FINALIZE,
   HANDOFF,
-} ip6_reass_trace_operation_e;
+} ip6_full_reass_trace_operation_e;
 
 typedef struct
 {
@@ -199,50 +200,52 @@
   i32 data_offset;
   u32 data_len;
   u32 first_bi;
-} ip6_reass_range_trace_t;
+} ip6_full_reass_range_trace_t;
 
 typedef struct
 {
-  ip6_reass_trace_operation_e action;
+  ip6_full_reass_trace_operation_e action;
   u32 reass_id;
-  ip6_reass_range_trace_t trace_range;
+  ip6_full_reass_range_trace_t trace_range;
   u32 op_id;
   u32 fragment_first;
   u32 fragment_last;
   u32 total_data_len;
   u32 thread_id;
   u32 thread_id_to;
-} ip6_reass_trace_t;
+} ip6_full_reass_trace_t;
 
 static void
-ip6_reass_trace_details (vlib_main_t * vm, u32 bi,
-			 ip6_reass_range_trace_t * trace)
+ip6_full_reass_trace_details (vlib_main_t * vm, u32 bi,
+			      ip6_full_reass_range_trace_t * trace)
 {
   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
   trace->range_first = vnb->ip.reass.range_first;
   trace->range_last = vnb->ip.reass.range_last;
-  trace->data_offset = ip6_reass_buffer_get_data_offset (b);
-  trace->data_len = ip6_reass_buffer_get_data_len (b);
+  trace->data_offset = ip6_full_reass_buffer_get_data_offset (b);
+  trace->data_len = ip6_full_reass_buffer_get_data_len (b);
   trace->range_bi = bi;
 }
 
 static u8 *
-format_ip6_reass_range_trace (u8 * s, va_list * args)
+format_ip6_full_reass_range_trace (u8 * s, va_list * args)
 {
-  ip6_reass_range_trace_t *trace = va_arg (*args, ip6_reass_range_trace_t *);
-  s = format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
-	      trace->range_last, trace->data_offset, trace->data_len,
-	      trace->range_bi);
+  ip6_full_reass_range_trace_t *trace =
+    va_arg (*args, ip6_full_reass_range_trace_t *);
+  s =
+    format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
+	    trace->range_last, trace->data_offset, trace->data_len,
+	    trace->range_bi);
   return s;
 }
 
 static u8 *
-format_ip6_reass_trace (u8 * s, va_list * args)
+format_ip6_full_reass_trace (u8 * s, va_list * args)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  ip6_reass_trace_t *t = va_arg (*args, ip6_reass_trace_t *);
+  ip6_full_reass_trace_t *t = va_arg (*args, ip6_full_reass_trace_t *);
   u32 indent = 0;
   if (~0 != t->reass_id)
     {
@@ -256,21 +259,21 @@
     {
     case RANGE_NEW:
       s = format (s, "\n%Unew %U", format_white_space, indent,
-		  format_ip6_reass_range_trace, &t->trace_range);
+		  format_ip6_full_reass_range_trace, &t->trace_range);
       break;
     case RANGE_OVERLAP:
       s = format (s, "\n%Uoverlap %U", format_white_space, indent,
-		  format_ip6_reass_range_trace, &t->trace_range);
+		  format_ip6_full_reass_range_trace, &t->trace_range);
       break;
     case ICMP_ERROR_FL_TOO_BIG:
       s = format (s, "\n%Uicmp-error - frag_len > 65535 %U",
-		  format_white_space, indent, format_ip6_reass_range_trace,
-		  &t->trace_range);
+		  format_white_space, indent,
+		  format_ip6_full_reass_range_trace, &t->trace_range);
       break;
     case ICMP_ERROR_FL_NOT_MULT_8:
       s = format (s, "\n%Uicmp-error - frag_len mod 8 != 0 %U",
-		  format_white_space, indent, format_ip6_reass_range_trace,
-		  &t->trace_range);
+		  format_white_space, indent,
+		  format_ip6_full_reass_range_trace, &t->trace_range);
       break;
     case ICMP_ERROR_RT_EXCEEDED:
       s = format (s, "\n%Uicmp-error - reassembly time exceeded",
@@ -289,27 +292,36 @@
 }
 
 static void
-ip6_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
-		     ip6_reass_main_t * rm, u32 reass_id, u32 op_id,
-		     u32 bi, u32 first_bi, u32 data_len,
-		     ip6_reass_trace_operation_e action, u32 thread_id_to)
+ip6_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
+			  ip6_full_reass_main_t * rm,
+			  ip6_full_reass_t * reass, u32 bi,
+			  ip6_full_reass_trace_operation_e action,
+			  u32 thread_id_to)
 {
   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
-  ip6_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
-  t->reass_id = reass_id;
+  ip6_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
+  if (reass)
+    {
+      t->reass_id = reass->id;
+      t->op_id = reass->trace_op_counter;
+      t->trace_range.first_bi = reass->first_bi;
+      t->total_data_len = reass->data_len;
+      ++reass->trace_op_counter;
+    }
+  else
+    {
+      t->reass_id = ~0;
+    }
   t->action = action;
-  ip6_reass_trace_details (vm, bi, &t->trace_range);
-  t->op_id = op_id;
   t->thread_id = vm->thread_index;
   t->thread_id_to = thread_id_to;
+  ip6_full_reass_trace_details (vm, bi, &t->trace_range);
   t->fragment_first = vnb->ip.reass.fragment_first;
   t->fragment_last = vnb->ip.reass.fragment_last;
-  t->trace_range.first_bi = first_bi;
-  t->total_data_len = data_len;
 #if 0
   static u8 *s = NULL;
-  s = format (s, "%U", format_ip6_reass_trace, NULL, NULL, t);
+  s = format (s, "%U", format_ip6_full_reass_trace, NULL, NULL, t);
   printf ("%.*s\n", vec_len (s), s);
   fflush (stdout);
   vec_reset_length (s);
@@ -317,15 +329,17 @@
 }
 
 always_inline void
-ip6_reass_free_ctx (ip6_reass_per_thread_t * rt, ip6_reass_t * reass)
+ip6_full_reass_free_ctx (ip6_full_reass_per_thread_t * rt,
+			 ip6_full_reass_t * reass)
 {
   pool_put (rt->pool, reass);
   --rt->reass_n;
 }
 
 always_inline void
-ip6_reass_free (ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
-		ip6_reass_t * reass)
+ip6_full_reass_free (ip6_full_reass_main_t * rm,
+		     ip6_full_reass_per_thread_t * rt,
+		     ip6_full_reass_t * reass)
 {
   clib_bihash_kv_48_8_t kv;
   kv.key[0] = reass->key.as_u64[0];
@@ -335,12 +349,12 @@
   kv.key[4] = reass->key.as_u64[4];
   kv.key[5] = reass->key.as_u64[5];
   clib_bihash_add_del_48_8 (&rm->hash, &kv, 0);
-  ip6_reass_free_ctx (rt, reass);
+  ip6_full_reass_free_ctx (rt, reass);
 }
 
 always_inline void
-ip6_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
-		    ip6_reass_main_t * rm, ip6_reass_t * reass)
+ip6_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
+			 ip6_full_reass_main_t * rm, ip6_full_reass_t * reass)
 {
   u32 range_bi = reass->first_bi;
   vlib_buffer_t *range_b;
@@ -401,9 +415,9 @@
 }
 
 always_inline void
-ip6_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
-		      ip6_reass_main_t * rm, ip6_reass_t * reass,
-		      u32 * icmp_bi)
+ip6_full_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
+			   ip6_full_reass_main_t * rm,
+			   ip6_full_reass_t * reass, u32 * icmp_bi)
 {
   if (~0 == reass->first_bi)
     {
@@ -417,11 +431,8 @@
 	  *icmp_bi = reass->first_bi;
 	  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
 	    {
-	      ip6_reass_add_trace (vm, node, rm, reass->id,
-				   reass->trace_op_counter, reass->first_bi,
-				   reass->first_bi, reass->data_len,
-				   ICMP_ERROR_RT_EXCEEDED, ~0);
-	      ++reass->trace_op_counter;
+	      ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
+					ICMP_ERROR_RT_EXCEEDED, ~0);
 	    }
 	  // fragment with offset zero received - send icmp message back
 	  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
@@ -439,15 +450,17 @@
 				       0);
 	}
     }
-  ip6_reass_drop_all (vm, node, rm, reass);
+  ip6_full_reass_drop_all (vm, node, rm, reass);
 }
 
-always_inline ip6_reass_t *
-ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
-			  ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
-			  ip6_reass_kv_t * kv, u32 * icmp_bi, u8 * do_handoff)
+always_inline ip6_full_reass_t *
+ip6_full_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
+			       ip6_full_reass_main_t * rm,
+			       ip6_full_reass_per_thread_t * rt,
+			       ip6_full_reass_kv_t * kv, u32 * icmp_bi,
+			       u8 * do_handoff)
 {
-  ip6_reass_t *reass;
+  ip6_full_reass_t *reass;
   f64 now;
 
 again:
@@ -470,8 +483,8 @@
 
       if (now > reass->last_heard + rm->timeout)
 	{
-	  ip6_reass_on_timeout (vm, node, rm, reass, icmp_bi);
-	  ip6_reass_free (rm, rt, reass);
+	  ip6_full_reass_on_timeout (vm, node, rm, reass, icmp_bi);
+	  ip6_full_reass_free (rm, rt, reass);
 	  reass = NULL;
 	}
     }
@@ -515,7 +528,7 @@
     clib_bihash_add_del_48_8 (&rm->hash, (clib_bihash_kv_48_8_t *) kv, 2);
   if (rv)
     {
-      ip6_reass_free_ctx (rt, reass);
+      ip6_full_reass_free (rm, rt, reass);
       reass = NULL;
       // if other worker created a context already work with the other copy
       if (-2 == rv)
@@ -525,11 +538,12 @@
   return reass;
 }
 
-always_inline ip6_reass_rc_t
-ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
-		    ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
-		    ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
-		    bool is_custom_app)
+always_inline ip6_full_reass_rc_t
+ip6_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
+			 ip6_full_reass_main_t * rm,
+			 ip6_full_reass_per_thread_t * rt,
+			 ip6_full_reass_t * reass, u32 * bi0, u32 * next0,
+			 u32 * error0, bool is_custom_app)
 {
   *bi0 = reass->first_bi;
   *error0 = IP6_ERROR_NONE;
@@ -540,7 +554,7 @@
   u32 buf_cnt = 0;
   u32 dropped_cnt = 0;
   u32 *vec_drop_compress = NULL;
-  ip6_reass_rc_t rv = IP6_REASS_RC_OK;
+  ip6_full_reass_rc_t rv = IP6_FULL_REASS_RC_OK;
   do
     {
       u32 tmp_bi = sub_chain_bi;
@@ -549,21 +563,21 @@
       if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
 	  !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
 	{
-	  rv = IP6_REASS_RC_INTERNAL_ERROR;
+	  rv = IP6_FULL_REASS_RC_INTERNAL_ERROR;
 	  goto free_buffers_and_return;
 	}
 
-      u32 data_len = ip6_reass_buffer_get_data_len (tmp);
+      u32 data_len = ip6_full_reass_buffer_get_data_len (tmp);
       u32 trim_front = vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
-	sizeof (*frag_hdr) + ip6_reass_buffer_get_data_offset (tmp);
+	sizeof (*frag_hdr) + ip6_full_reass_buffer_get_data_offset (tmp);
       u32 trim_end =
 	vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
       if (tmp_bi == reass->first_bi)
 	{
 	  /* first buffer - keep ip6 header */
-	  if (0 != ip6_reass_buffer_get_data_offset (tmp))
+	  if (0 != ip6_full_reass_buffer_get_data_offset (tmp))
 	    {
-	      rv = IP6_REASS_RC_INTERNAL_ERROR;
+	      rv = IP6_FULL_REASS_RC_INTERNAL_ERROR;
 	      goto free_buffers_and_return;
 	    }
 	  trim_front = 0;
@@ -572,7 +586,7 @@
 	     sizeof (*frag_hdr));
 	  if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
 	    {
-	      rv = IP6_REASS_RC_INTERNAL_ERROR;
+	      rv = IP6_FULL_REASS_RC_INTERNAL_ERROR;
 	      goto free_buffers_and_return;
 	    }
 	}
@@ -590,7 +604,7 @@
 		  trim_front -= tmp->current_length;
 		  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
 		    {
-		      rv = IP6_REASS_RC_INTERNAL_ERROR;
+		      rv = IP6_FULL_REASS_RC_INTERNAL_ERROR;
 		      goto free_buffers_and_return;
 		    }
 		  tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
@@ -622,7 +636,7 @@
 		  keep_data -= tmp->current_length;
 		  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
 		    {
-		      rv = IP6_REASS_RC_INTERNAL_ERROR;
+		      rv = IP6_FULL_REASS_RC_INTERNAL_ERROR;
 		      goto free_buffers_and_return;
 		    }
 		}
@@ -633,7 +647,7 @@
 	      vec_add1 (vec_drop_compress, tmp_bi);
 	      if (reass->first_bi == tmp_bi)
 		{
-		  rv = IP6_REASS_RC_INTERNAL_ERROR;
+		  rv = IP6_FULL_REASS_RC_INTERNAL_ERROR;
 		  goto free_buffers_and_return;
 		}
 	      ++dropped_cnt;
@@ -656,14 +670,14 @@
 
   if (!last_b)
     {
-      rv = IP6_REASS_RC_INTERNAL_ERROR;
+      rv = IP6_FULL_REASS_RC_INTERNAL_ERROR;
       goto free_buffers_and_return;
     }
   last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
   vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
   if (total_length < first_b->current_length)
     {
-      rv = IP6_REASS_RC_INTERNAL_ERROR;
+      rv = IP6_FULL_REASS_RC_INTERNAL_ERROR;
       goto free_buffers_and_return;
     }
   total_length -= first_b->current_length;
@@ -686,7 +700,7 @@
     }
   if (!((u8 *) frag_hdr - (u8 *) ip == ip6_frag_hdr_offset))
     {
-      rv = IP6_REASS_RC_INTERNAL_ERROR;
+      rv = IP6_FULL_REASS_RC_INTERNAL_ERROR;
       goto free_buffers_and_return;
     }
   memmove (frag_hdr, (u8 *) frag_hdr + sizeof (*frag_hdr),
@@ -698,16 +712,14 @@
 			  sizeof (*ip));
   if (!vlib_buffer_chain_linearize (vm, first_b))
     {
-      rv = IP6_REASS_RC_NO_BUF;
+      rv = IP6_FULL_REASS_RC_NO_BUF;
       goto free_buffers_and_return;
     }
   first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
   if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
     {
-      ip6_reass_add_trace (vm, node, rm, reass->id, reass->trace_op_counter,
-			   reass->first_bi, reass->first_bi, reass->data_len,
-			   FINALIZE, ~0);
-      ++reass->trace_op_counter;
+      ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
+				FINALIZE, ~0);
 #if 0
       // following code does a hexdump of packet fragments to stdout ...
       do
@@ -737,14 +749,14 @@
     }
   if (!is_custom_app)
     {
-      *next0 = IP6_REASSEMBLY_NEXT_INPUT;
+      *next0 = IP6_FULL_REASSEMBLY_NEXT_INPUT;
     }
   else
     {
       *next0 = reass->next_index;
     }
   vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
-  ip6_reass_free (rm, rt, reass);
+  ip6_full_reass_free (rm, rt, reass);
   reass = NULL;
 free_buffers_and_return:
   vlib_buffer_free (vm, vec_drop_compress, vec_len (vec_drop_compress));
@@ -753,10 +765,11 @@
 }
 
 always_inline void
-ip6_reass_insert_range_in_chain (vlib_main_t * vm, ip6_reass_main_t * rm,
-				 ip6_reass_per_thread_t * rt,
-				 ip6_reass_t * reass, u32 prev_range_bi,
-				 u32 new_next_bi)
+ip6_full_reass_insert_range_in_chain (vlib_main_t * vm,
+				      ip6_full_reass_main_t * rm,
+				      ip6_full_reass_per_thread_t * rt,
+				      ip6_full_reass_t * reass,
+				      u32 prev_range_bi, u32 new_next_bi)
 {
 
   vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
@@ -776,15 +789,16 @@
 	}
       reass->first_bi = new_next_bi;
     }
-  reass->data_len += ip6_reass_buffer_get_data_len (new_next_b);
+  reass->data_len += ip6_full_reass_buffer_get_data_len (new_next_b);
 }
 
-always_inline ip6_reass_rc_t
-ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
-		  ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
-		  ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
-		  ip6_frag_hdr_t * frag_hdr, bool is_custom_app,
-		  u32 * handoff_thread_idx)
+always_inline ip6_full_reass_rc_t
+ip6_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
+		       ip6_full_reass_main_t * rm,
+		       ip6_full_reass_per_thread_t * rt,
+		       ip6_full_reass_t * reass, u32 * bi0, u32 * next0,
+		       u32 * error0, ip6_frag_hdr_t * frag_hdr,
+		       bool is_custom_app, u32 * handoff_thread_idx)
 {
   int consumed = 0;
   vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
@@ -802,7 +816,7 @@
       fvnb->ip.reass.ip6_frag_hdr_offset == 0 ||
       fvnb->ip.reass.ip6_frag_hdr_offset >= fb->current_length)
     {
-      return IP6_REASS_RC_INTERNAL_ERROR;
+      return IP6_FULL_REASS_RC_INTERNAL_ERROR;
     }
 
   u32 fragment_first = fvnb->ip.reass.fragment_first =
@@ -825,8 +839,8 @@
   if (~0 == reass->first_bi)
     {
       // starting a new reassembly
-      ip6_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
-				       *bi0);
+      ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
+					    *bi0);
       reass->min_fragment_length = clib_net_to_host_u16 (fip->payload_length);
       consumed = 1;
       reass->fragments_n = 1;
@@ -848,8 +862,8 @@
 	      ~0 == candidate_range_bi)
 	    {
 	      // special case - this fragment falls beyond all known ranges
-	      ip6_reass_insert_range_in_chain (vm, rm, rt, reass,
-					       prev_range_bi, *bi0);
+	      ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
+						    prev_range_bi, *bi0);
 	      consumed = 1;
 	      break;
 	    }
@@ -858,8 +872,8 @@
       if (fragment_last < candidate_vnb->ip.reass.range_first)
 	{
 	  // this fragment ends before candidate range without any overlap
-	  ip6_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
-					   *bi0);
+	  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
+						prev_range_bi, *bi0);
 	  consumed = 1;
 	}
       else if (fragment_first == candidate_vnb->ip.reass.range_first &&
@@ -870,19 +884,16 @@
       else
 	{
 	  // overlapping fragment - not allowed by RFC 8200
-	  ip6_reass_drop_all (vm, node, rm, reass);
-	  ip6_reass_free (rm, rt, reass);
+	  ip6_full_reass_drop_all (vm, node, rm, reass);
+	  ip6_full_reass_free (rm, rt, reass);
 	  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
 	    {
-	      ip6_reass_add_trace (vm, node, rm, reass->id,
-				   reass->trace_op_counter, *bi0,
-				   reass->first_bi, reass->data_len,
-				   RANGE_OVERLAP, ~0);
-	      ++reass->trace_op_counter;
+	      ip6_full_reass_add_trace (vm, node, rm, reass, *bi0,
+					RANGE_OVERLAP, ~0);
 	    }
-	  *next0 = IP6_REASSEMBLY_NEXT_DROP;
+	  *next0 = IP6_FULL_REASSEMBLY_NEXT_DROP;
 	  *error0 = IP6_ERROR_REASS_OVERLAPPING_FRAGMENT;
-	  return IP6_REASS_RC_OK;
+	  return IP6_FULL_REASS_RC_OK;
 	}
       break;
     }
@@ -892,23 +903,20 @@
     {
       if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
 	{
-	  ip6_reass_add_trace (vm, node, rm, reass->id,
-			       reass->trace_op_counter, *bi0, reass->first_bi,
-			       reass->data_len, RANGE_NEW, ~0);
-	  ++reass->trace_op_counter;
+	  ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, ~0);
 	}
     }
   if (~0 != reass->last_packet_octet &&
       reass->data_len == reass->last_packet_octet + 1)
     {
       *handoff_thread_idx = reass->sendout_thread_index;
-      ip6_reass_rc_t rc =
-	ip6_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
-			    is_custom_app);
-      if (IP6_REASS_RC_OK == rc
+      ip6_full_reass_rc_t rc =
+	ip6_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
+				 is_custom_app);
+      if (IP6_FULL_REASS_RC_OK == rc
 	  && reass->memory_owner_thread_index != reass->sendout_thread_index)
 	{
-	  return IP6_REASS_RC_HANDOFF;
+	  return IP6_FULL_REASS_RC_HANDOFF;
 	}
       return rc;
     }
@@ -919,22 +927,22 @@
 	  *bi0 = ~0;
 	  if (reass->fragments_n > rm->max_reass_len)
 	    {
-	      return IP6_REASS_RC_TOO_MANY_FRAGMENTS;
+	      return IP6_FULL_REASS_RC_TOO_MANY_FRAGMENTS;
 	    }
 	}
       else
 	{
-	  *next0 = IP6_REASSEMBLY_NEXT_DROP;
+	  *next0 = IP6_FULL_REASSEMBLY_NEXT_DROP;
 	  *error0 = IP6_ERROR_REASS_DUPLICATE_FRAGMENT;
 	}
     }
-  return IP6_REASS_RC_OK;
+  return IP6_FULL_REASS_RC_OK;
 }
 
 always_inline bool
-ip6_reass_verify_upper_layer_present (vlib_node_runtime_t * node,
-				      vlib_buffer_t * b,
-				      ip6_frag_hdr_t * frag_hdr)
+ip6_full_reass_verify_upper_layer_present (vlib_node_runtime_t * node,
+					   vlib_buffer_t * b,
+					   ip6_frag_hdr_t * frag_hdr)
 {
   ip6_ext_header_t *tmp = (ip6_ext_header_t *) frag_hdr;
   while (ip6_ext_hdr (tmp->next_hdr))
@@ -954,10 +962,10 @@
 }
 
 always_inline bool
-ip6_reass_verify_fragment_multiple_8 (vlib_main_t * vm,
-				      vlib_node_runtime_t * node,
-				      vlib_buffer_t * b,
-				      ip6_frag_hdr_t * frag_hdr)
+ip6_full_reass_verify_fragment_multiple_8 (vlib_main_t * vm,
+					   vlib_node_runtime_t * node,
+					   vlib_buffer_t * b,
+					   ip6_frag_hdr_t * frag_hdr)
 {
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
   ip6_header_t *ip = vlib_buffer_get_current (b);
@@ -976,10 +984,10 @@
 }
 
 always_inline bool
-ip6_reass_verify_packet_size_lt_64k (vlib_main_t * vm,
-				     vlib_node_runtime_t * node,
-				     vlib_buffer_t * b,
-				     ip6_frag_hdr_t * frag_hdr)
+ip6_full_reass_verify_packet_size_lt_64k (vlib_main_t * vm,
+					  vlib_node_runtime_t * node,
+					  vlib_buffer_t * b,
+					  ip6_frag_hdr_t * frag_hdr)
 {
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
   u32 fragment_first = ip6_frag_hdr_offset_bytes (frag_hdr);
@@ -999,14 +1007,15 @@
 }
 
 always_inline uword
-ip6_reassembly_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-		       vlib_frame_t * frame, bool is_feature,
-		       bool is_custom_app)
+ip6_full_reassembly_inline (vlib_main_t * vm,
+			    vlib_node_runtime_t * node,
+			    vlib_frame_t * frame, bool is_feature,
+			    bool is_custom_app)
 {
   u32 *from = vlib_frame_vector_args (frame);
   u32 n_left_from, n_left_to_next, *to_next, next_index;
-  ip6_reass_main_t *rm = &ip6_reass_main;
-  ip6_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
+  ip6_full_reass_main_t *rm = &ip6_full_reass_main;
+  ip6_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
   clib_spinlock_lock (&rt->lock);
 
   n_left_from = frame->n_vectors;
@@ -1019,7 +1028,7 @@
 	{
 	  u32 bi0;
 	  vlib_buffer_t *b0;
-	  u32 next0 = IP6_REASSEMBLY_NEXT_DROP;
+	  u32 next0 = IP6_FULL_REASSEMBLY_NEXT_DROP;
 	  u32 error0 = IP6_ERROR_NONE;
 	  u32 icmp_bi = ~0;
 
@@ -1037,29 +1046,31 @@
 	  if (!frag_hdr)
 	    {
 	      // this is a regular packet - no fragmentation
-	      next0 = IP6_REASSEMBLY_NEXT_INPUT;
+	      next0 = IP6_FULL_REASSEMBLY_NEXT_INPUT;
 	      goto skip_reass;
 	    }
 	  if (0 == ip6_frag_hdr_offset (frag_hdr))
 	    {
 	      // first fragment - verify upper-layer is present
-	      if (!ip6_reass_verify_upper_layer_present (node, b0, frag_hdr))
+	      if (!ip6_full_reass_verify_upper_layer_present
+		  (node, b0, frag_hdr))
 		{
-		  next0 = IP6_REASSEMBLY_NEXT_ICMP_ERROR;
+		  next0 = IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR;
 		  goto skip_reass;
 		}
 	    }
-	  if (!ip6_reass_verify_fragment_multiple_8 (vm, node, b0, frag_hdr)
-	      || !ip6_reass_verify_packet_size_lt_64k (vm, node, b0,
-						       frag_hdr))
+	  if (!ip6_full_reass_verify_fragment_multiple_8
+	      (vm, node, b0, frag_hdr)
+	      || !ip6_full_reass_verify_packet_size_lt_64k (vm, node, b0,
+							    frag_hdr))
 	    {
-	      next0 = IP6_REASSEMBLY_NEXT_ICMP_ERROR;
+	      next0 = IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR;
 	      goto skip_reass;
 	    }
 	  vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset =
 	    (u8 *) frag_hdr - (u8 *) ip0;
 
-	  ip6_reass_kv_t kv;
+	  ip6_full_reass_kv_t kv;
 	  u8 do_handoff = 0;
 
 	  kv.k.as_u64[0] = ip0->src_address.as_u64[0];
@@ -1072,9 +1083,9 @@
 	    (u64) frag_hdr->identification;
 	  kv.k.as_u64[5] = ip0->protocol;
 
-	  ip6_reass_t *reass =
-	    ip6_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi,
-				      &do_handoff);
+	  ip6_full_reass_t *reass =
+	    ip6_full_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi,
+					   &do_handoff);
 
 	  if (reass)
 	    {
@@ -1086,7 +1097,7 @@
 	    }
 	  if (PREDICT_FALSE (do_handoff))
 	    {
-	      next0 = IP6_REASSEMBLY_NEXT_HANDOFF;
+	      next0 = IP6_FULL_REASSEMBLY_NEXT_HANDOFF;
 	      if (is_feature)
 		vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
 		  kv.v.memory_owner_thread_index;
@@ -1097,15 +1108,15 @@
 	  else if (reass)
 	    {
 	      u32 handoff_thread_idx;
-	      switch (ip6_reass_update (vm, node, rm, rt, reass, &bi0, &next0,
-					&error0, frag_hdr, is_custom_app,
-					&handoff_thread_idx))
+	      switch (ip6_full_reass_update
+		      (vm, node, rm, rt, reass, &bi0, &next0, &error0,
+		       frag_hdr, is_custom_app, &handoff_thread_idx))
 		{
-		case IP6_REASS_RC_OK:
+		case IP6_FULL_REASS_RC_OK:
 		  /* nothing to do here */
 		  break;
-		case IP6_REASS_RC_HANDOFF:
-		  next0 = IP6_REASSEMBLY_NEXT_HANDOFF;
+		case IP6_FULL_REASS_RC_HANDOFF:
+		  next0 = IP6_FULL_REASSEMBLY_NEXT_HANDOFF;
 		  b0 = vlib_get_buffer (vm, bi0);
 		  if (is_feature)
 		    vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
@@ -1114,28 +1125,27 @@
 		    vnet_buffer (b0)->ip.reass.owner_thread_index =
 		      handoff_thread_idx;
 		  break;
-		case IP6_REASS_RC_TOO_MANY_FRAGMENTS:
+		case IP6_FULL_REASS_RC_TOO_MANY_FRAGMENTS:
 		  vlib_node_increment_counter (vm, node->node_index,
 					       IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
 					       1);
-		  ip6_reass_drop_all (vm, node, rm, reass);
-		  ip6_reass_free (rm, rt, reass);
+		  ip6_full_reass_drop_all (vm, node, rm, reass);
+		  ip6_full_reass_free (rm, rt, reass);
 		  goto next_packet;
 		  break;
-		case IP6_REASS_RC_NO_BUF:
+		case IP6_FULL_REASS_RC_NO_BUF:
 		  vlib_node_increment_counter (vm, node->node_index,
 					       IP6_ERROR_REASS_NO_BUF, 1);
-		  ip6_reass_drop_all (vm, node, rm, reass);
-		  ip6_reass_free (rm, rt, reass);
+		  ip6_full_reass_drop_all (vm, node, rm, reass);
+		  ip6_full_reass_free (rm, rt, reass);
 		  goto next_packet;
 		  break;
-		case IP6_REASS_RC_INTERNAL_ERROR:
-		  /* drop everything and start with a clean slate */
+		case IP6_FULL_REASS_RC_INTERNAL_ERROR:
 		  vlib_node_increment_counter (vm, node->node_index,
 					       IP6_ERROR_REASS_INTERNAL_ERROR,
 					       1);
-		  ip6_reass_drop_all (vm, node, rm, reass);
-		  ip6_reass_free (rm, rt, reass);
+		  ip6_full_reass_drop_all (vm, node, rm, reass);
+		  ip6_full_reass_free (rm, rt, reass);
 		  goto next_packet;
 		  break;
 		}
@@ -1144,7 +1154,7 @@
 	    {
 	      if (is_feature)
 		{
-		  next0 = IP6_REASSEMBLY_NEXT_DROP;
+		  next0 = IP6_FULL_REASSEMBLY_NEXT_DROP;
 		}
 	      else
 		{
@@ -1162,21 +1172,20 @@
 	      to_next[0] = bi0;
 	      to_next += 1;
 	      n_left_to_next -= 1;
-	      if (next0 == IP6_REASSEMBLY_NEXT_HANDOFF)
+	      if (next0 == IP6_FULL_REASSEMBLY_NEXT_HANDOFF)
 		{
 		  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
 		    {
 		      if (is_feature)
-			ip6_reass_add_trace (vm, node, rm, ~0,
-					     ~0,
-					     bi0, ~0, ~0, HANDOFF,
-					     vnet_buffer (b0)->ip.
-					     reass.owner_feature_thread_index);
+			ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
+						  HANDOFF,
+						  vnet_buffer (b0)->ip.
+						  reass.owner_feature_thread_index);
 		      else
-			ip6_reass_add_trace (vm, node, rm, ~0, ~0, bi0,
-					     ~0, ~0, HANDOFF,
-					     vnet_buffer (b0)->ip.
-					     reass.owner_thread_index);
+			ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
+						  HANDOFF,
+						  vnet_buffer (b0)->ip.
+						  reass.owner_thread_index);
 		    }
 		}
 	      else if (is_feature && IP6_ERROR_NONE == error0)
@@ -1190,7 +1199,7 @@
 
 	  if (~0 != icmp_bi)
 	    {
-	      next0 = IP6_REASSEMBLY_NEXT_ICMP_ERROR;
+	      next0 = IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR;
 	      to_next[0] = icmp_bi;
 	      to_next += 1;
 	      n_left_to_next -= 1;
@@ -1210,67 +1219,68 @@
   return frame->n_vectors;
 }
 
-static char *ip6_reassembly_error_strings[] = {
+static char *ip6_full_reassembly_error_strings[] = {
 #define _(sym, string) string,
   foreach_ip6_error
 #undef _
 };
 
-VLIB_NODE_FN (ip6_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
-			       vlib_frame_t * frame)
+VLIB_NODE_FN (ip6_full_reass_node) (vlib_main_t * vm,
+				    vlib_node_runtime_t * node,
+				    vlib_frame_t * frame)
 {
-  return ip6_reassembly_inline (vm, node, frame, false /* is_feature */ ,
-				false /* is_custom_app */ );
+  return ip6_full_reassembly_inline (vm, node, frame, false /* is_feature */ ,
+				     false /* is_custom_app */ );
 }
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip6_reass_node) = {
-    .name = "ip6-reassembly",
+VLIB_REGISTER_NODE (ip6_full_reass_node) = {
+    .name = "ip6-full-reassembly",
     .vector_size = sizeof (u32),
-    .format_trace = format_ip6_reass_trace,
-    .n_errors = ARRAY_LEN (ip6_reassembly_error_strings),
-    .error_strings = ip6_reassembly_error_strings,
-    .n_next_nodes = IP6_REASSEMBLY_N_NEXT,
+    .format_trace = format_ip6_full_reass_trace,
+    .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
+    .error_strings = ip6_full_reassembly_error_strings,
+    .n_next_nodes = IP6_FULL_REASSEMBLY_N_NEXT,
     .next_nodes =
         {
-                [IP6_REASSEMBLY_NEXT_INPUT] = "ip6-input",
-                [IP6_REASSEMBLY_NEXT_DROP] = "ip6-drop",
-                [IP6_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
-                [IP6_REASSEMBLY_NEXT_HANDOFF] = "ip6-reassembly-handoff",
+                [IP6_FULL_REASSEMBLY_NEXT_INPUT] = "ip6-input",
+                [IP6_FULL_REASSEMBLY_NEXT_DROP] = "ip6-drop",
+                [IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
+                [IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reassembly-handoff",
         },
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FN (ip6_reass_node_feature) (vlib_main_t * vm,
-				       vlib_node_runtime_t * node,
-				       vlib_frame_t * frame)
+VLIB_NODE_FN (ip6_full_reass_node_feature) (vlib_main_t * vm,
+					    vlib_node_runtime_t * node,
+					    vlib_frame_t * frame)
 {
-  return ip6_reassembly_inline (vm, node, frame, true /* is_feature */ ,
-				false /* is_custom_app */ );
+  return ip6_full_reassembly_inline (vm, node, frame, true /* is_feature */ ,
+				     false /* is_custom_app */ );
 }
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip6_reass_node_feature) = {
-    .name = "ip6-reassembly-feature",
+VLIB_REGISTER_NODE (ip6_full_reass_node_feature) = {
+    .name = "ip6-full-reassembly-feature",
     .vector_size = sizeof (u32),
-    .format_trace = format_ip6_reass_trace,
-    .n_errors = ARRAY_LEN (ip6_reassembly_error_strings),
-    .error_strings = ip6_reassembly_error_strings,
-    .n_next_nodes = IP6_REASSEMBLY_N_NEXT,
+    .format_trace = format_ip6_full_reass_trace,
+    .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
+    .error_strings = ip6_full_reassembly_error_strings,
+    .n_next_nodes = IP6_FULL_REASSEMBLY_N_NEXT,
     .next_nodes =
         {
-                [IP6_REASSEMBLY_NEXT_INPUT] = "ip6-input",
-                [IP6_REASSEMBLY_NEXT_DROP] = "ip6-drop",
-                [IP6_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
-                [IP6_REASSEMBLY_NEXT_HANDOFF] = "ip6-reass-feature-hoff",
+                [IP6_FULL_REASSEMBLY_NEXT_INPUT] = "ip6-input",
+                [IP6_FULL_REASSEMBLY_NEXT_DROP] = "ip6-drop",
+                [IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
+                [IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reass-feature-hoff",
         },
 };
 /* *INDENT-ON* */
 
 /* *INDENT-OFF* */
-VNET_FEATURE_INIT (ip6_reassembly_feature, static) = {
+VNET_FEATURE_INIT (ip6_full_reassembly_feature, static) = {
     .arc_name = "ip6-unicast",
-    .node_name = "ip6-reassembly-feature",
+    .node_name = "ip6-full-reassembly-feature",
     .runs_before = VNET_FEATURES ("ip6-lookup",
                                   "ipsec6-input-feature"),
     .runs_after = 0,
@@ -1279,13 +1289,13 @@
 
 #ifndef CLIB_MARCH_VARIANT
 static u32
-ip6_reass_get_nbuckets ()
+ip6_full_reass_get_nbuckets ()
 {
-  ip6_reass_main_t *rm = &ip6_reass_main;
+  ip6_full_reass_main_t *rm = &ip6_full_reass_main;
   u32 nbuckets;
   u8 i;
 
-  nbuckets = (u32) (rm->max_reass_n / IP6_REASS_HT_LOAD_FACTOR);
+  nbuckets = (u32) (rm->max_reass_n / IP6_FULL_REASS_HT_LOAD_FACTOR);
 
   for (i = 0; i < 31; i++)
     if ((1 << i) >= nbuckets)
@@ -1299,7 +1309,7 @@
 typedef enum
 {
   IP6_EVENT_CONFIG_CHANGED = 1,
-} ip6_reass_event_t;
+} ip6_full_reass_event_t;
 
 #ifndef CLIB_MARCH_VARIANT
 typedef struct
@@ -1319,37 +1329,38 @@
 }
 
 static void
-ip6_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
-		      u32 max_reassembly_length, u32 expire_walk_interval_ms)
+ip6_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
+			   u32 max_reassembly_length,
+			   u32 expire_walk_interval_ms)
 {
-  ip6_reass_main.timeout_ms = timeout_ms;
-  ip6_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
-  ip6_reass_main.max_reass_n = max_reassemblies;
-  ip6_reass_main.max_reass_len = max_reassembly_length;
-  ip6_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
+  ip6_full_reass_main.timeout_ms = timeout_ms;
+  ip6_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
+  ip6_full_reass_main.max_reass_n = max_reassemblies;
+  ip6_full_reass_main.max_reass_len = max_reassembly_length;
+  ip6_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
 }
 
 vnet_api_error_t
-ip6_reass_set (u32 timeout_ms, u32 max_reassemblies,
-	       u32 max_reassembly_length, u32 expire_walk_interval_ms)
+ip6_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
+		    u32 max_reassembly_length, u32 expire_walk_interval_ms)
 {
-  u32 old_nbuckets = ip6_reass_get_nbuckets ();
-  ip6_reass_set_params (timeout_ms, max_reassemblies, max_reassembly_length,
-			expire_walk_interval_ms);
-  vlib_process_signal_event (ip6_reass_main.vlib_main,
-			     ip6_reass_main.ip6_reass_expire_node_idx,
+  u32 old_nbuckets = ip6_full_reass_get_nbuckets ();
+  ip6_full_reass_set_params (timeout_ms, max_reassemblies,
+			     max_reassembly_length, expire_walk_interval_ms);
+  vlib_process_signal_event (ip6_full_reass_main.vlib_main,
+			     ip6_full_reass_main.ip6_full_reass_expire_node_idx,
 			     IP6_EVENT_CONFIG_CHANGED, 0);
-  u32 new_nbuckets = ip6_reass_get_nbuckets ();
-  if (ip6_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
+  u32 new_nbuckets = ip6_full_reass_get_nbuckets ();
+  if (ip6_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
     {
       clib_bihash_48_8_t new_hash;
       clib_memset (&new_hash, 0, sizeof (new_hash));
       ip6_rehash_cb_ctx ctx;
       ctx.failure = 0;
       ctx.new_hash = &new_hash;
-      clib_bihash_init_48_8 (&new_hash, "ip6-reass", new_nbuckets,
+      clib_bihash_init_48_8 (&new_hash, "ip6-full-reass", new_nbuckets,
 			     new_nbuckets * 1024);
-      clib_bihash_foreach_key_value_pair_48_8 (&ip6_reass_main.hash,
+      clib_bihash_foreach_key_value_pair_48_8 (&ip6_full_reass_main.hash,
 					       ip6_rehash_cb, &ctx);
       if (ctx.failure)
 	{
@@ -1358,29 +1369,31 @@
 	}
       else
 	{
-	  clib_bihash_free_48_8 (&ip6_reass_main.hash);
-	  clib_memcpy_fast (&ip6_reass_main.hash, &new_hash,
-			    sizeof (ip6_reass_main.hash));
-	  clib_bihash_copied (&ip6_reass_main.hash, &new_hash);
+	  clib_bihash_free_48_8 (&ip6_full_reass_main.hash);
+	  clib_memcpy_fast (&ip6_full_reass_main.hash, &new_hash,
+			    sizeof (ip6_full_reass_main.hash));
+	  clib_bihash_copied (&ip6_full_reass_main.hash, &new_hash);
 	}
     }
   return 0;
 }
 
 vnet_api_error_t
-ip6_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
-	       u32 * expire_walk_interval_ms)
+ip6_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
+		    u32 * max_reassembly_length,
+		    u32 * expire_walk_interval_ms)
 {
-  *timeout_ms = ip6_reass_main.timeout_ms;
-  *max_reassemblies = ip6_reass_main.max_reass_n;
-  *expire_walk_interval_ms = ip6_reass_main.expire_walk_interval_ms;
+  *timeout_ms = ip6_full_reass_main.timeout_ms;
+  *max_reassemblies = ip6_full_reass_main.max_reass_n;
+  *max_reassembly_length = ip6_full_reass_main.max_reass_len;
+  *expire_walk_interval_ms = ip6_full_reass_main.expire_walk_interval_ms;
   return 0;
 }
 
 static clib_error_t *
-ip6_reass_init_function (vlib_main_t * vm)
+ip6_full_reass_init_function (vlib_main_t * vm)
 {
-  ip6_reass_main_t *rm = &ip6_reass_main;
+  ip6_full_reass_main_t *rm = &ip6_full_reass_main;
   clib_error_t *error = 0;
   u32 nbuckets;
   vlib_node_t *node;
@@ -1388,24 +1401,25 @@
   rm->vlib_main = vm;
 
   vec_validate (rm->per_thread_data, vlib_num_workers ());
-  ip6_reass_per_thread_t *rt;
+  ip6_full_reass_per_thread_t *rt;
   vec_foreach (rt, rm->per_thread_data)
   {
     clib_spinlock_init (&rt->lock);
     pool_alloc (rt->pool, rm->max_reass_n);
   }
 
-  node = vlib_get_node_by_name (vm, (u8 *) "ip6-reassembly-expire-walk");
+  node = vlib_get_node_by_name (vm, (u8 *) "ip6-full-reassembly-expire-walk");
   ASSERT (node);
-  rm->ip6_reass_expire_node_idx = node->index;
+  rm->ip6_full_reass_expire_node_idx = node->index;
 
-  ip6_reass_set_params (IP6_REASS_TIMEOUT_DEFAULT_MS,
-			IP6_REASS_MAX_REASSEMBLIES_DEFAULT,
-			IP6_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
-			IP6_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
+  ip6_full_reass_set_params (IP6_FULL_REASS_TIMEOUT_DEFAULT_MS,
+			     IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT,
+			     IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
+			     IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
 
-  nbuckets = ip6_reass_get_nbuckets ();
-  clib_bihash_init_48_8 (&rm->hash, "ip6-reass", nbuckets, nbuckets * 1024);
+  nbuckets = ip6_full_reass_get_nbuckets ();
+  clib_bihash_init_48_8 (&rm->hash, "ip6-full-reass", nbuckets,
+			 nbuckets * 1024);
 
   node = vlib_get_node_by_name (vm, (u8 *) "ip6-drop");
   ASSERT (node);
@@ -1417,23 +1431,23 @@
   if ((error = vlib_call_init_function (vm, ip_main_init)))
     return error;
   ip6_register_protocol (IP_PROTOCOL_IPV6_FRAGMENTATION,
-			 ip6_reass_node.index);
+			 ip6_full_reass_node.index);
 
-  rm->fq_index = vlib_frame_queue_main_init (ip6_reass_node.index, 0);
+  rm->fq_index = vlib_frame_queue_main_init (ip6_full_reass_node.index, 0);
   rm->fq_feature_index =
-    vlib_frame_queue_main_init (ip6_reass_node_feature.index, 0);
+    vlib_frame_queue_main_init (ip6_full_reass_node_feature.index, 0);
 
   return error;
 }
 
-VLIB_INIT_FUNCTION (ip6_reass_init_function);
+VLIB_INIT_FUNCTION (ip6_full_reass_init_function);
 #endif /* CLIB_MARCH_VARIANT */
 
 static uword
-ip6_reass_walk_expired (vlib_main_t * vm,
-			vlib_node_runtime_t * node, vlib_frame_t * f)
+ip6_full_reass_walk_expired (vlib_main_t * vm,
+			     vlib_node_runtime_t * node, vlib_frame_t * f)
 {
-  ip6_reass_main_t *rm = &ip6_reass_main;
+  ip6_full_reass_main_t *rm = &ip6_full_reass_main;
   uword event_type, *event_data = 0;
 
   while (true)
@@ -1456,7 +1470,7 @@
 	}
       f64 now = vlib_time_now (vm);
 
-      ip6_reass_t *reass;
+      ip6_full_reass_t *reass;
       int *pool_indexes_to_free = NULL;
 
       uword thread_index = 0;
@@ -1465,7 +1479,8 @@
       u32 *vec_icmp_bi = NULL;
       for (thread_index = 0; thread_index < nthreads; ++thread_index)
 	{
-	  ip6_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
+	  ip6_full_reass_per_thread_t *rt =
+	    &rm->per_thread_data[thread_index];
 	  clib_spinlock_lock (&rt->lock);
 
 	  vec_reset_length (pool_indexes_to_free);
@@ -1482,13 +1497,13 @@
           /* *INDENT-OFF* */
           vec_foreach (i, pool_indexes_to_free)
           {
-            ip6_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
+            ip6_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
             u32 icmp_bi = ~0;
-            ip6_reass_on_timeout (vm, node, rm, reass, &icmp_bi);
+            ip6_full_reass_on_timeout (vm, node, rm, reass, &icmp_bi);
             if (~0 != icmp_bi)
               vec_add1 (vec_icmp_bi, icmp_bi);
 
-            ip6_reass_free (rm, rt, reass);
+            ip6_full_reass_free (rm, rt, reass);
           }
           /* *INDENT-ON* */
 
@@ -1530,22 +1545,22 @@
 }
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip6_reass_expire_node) = {
-    .function = ip6_reass_walk_expired,
-    .format_trace = format_ip6_reass_trace,
+VLIB_REGISTER_NODE (ip6_full_reass_expire_node) = {
+    .function = ip6_full_reass_walk_expired,
+    .format_trace = format_ip6_full_reass_trace,
     .type = VLIB_NODE_TYPE_PROCESS,
-    .name = "ip6-reassembly-expire-walk",
+    .name = "ip6-full-reassembly-expire-walk",
 
-    .n_errors = ARRAY_LEN (ip6_reassembly_error_strings),
-    .error_strings = ip6_reassembly_error_strings,
+    .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
+    .error_strings = ip6_full_reassembly_error_strings,
 
 };
 /* *INDENT-ON* */
 
 static u8 *
-format_ip6_reass_key (u8 * s, va_list * args)
+format_ip6_full_reass_key (u8 * s, va_list * args)
 {
-  ip6_reass_key_t *key = va_arg (*args, ip6_reass_key_t *);
+  ip6_full_reass_key_t *key = va_arg (*args, ip6_full_reass_key_t *);
   s = format (s, "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
 	      key->xx_id, format_ip6_address, &key->src, format_ip6_address,
 	      &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
@@ -1553,15 +1568,15 @@
 }
 
 static u8 *
-format_ip6_reass (u8 * s, va_list * args)
+format_ip6_full_reass (u8 * s, va_list * args)
 {
   vlib_main_t *vm = va_arg (*args, vlib_main_t *);
-  ip6_reass_t *reass = va_arg (*args, ip6_reass_t *);
+  ip6_full_reass_t *reass = va_arg (*args, ip6_full_reass_t *);
 
   s = format (s, "ID: %lu, key: %U\n  first_bi: %u, data_len: %u, "
 	      "last_packet_octet: %u, trace_op_counter: %u\n",
-	      reass->id, format_ip6_reass_key, &reass->key, reass->first_bi,
-	      reass->data_len, reass->last_packet_octet,
+	      reass->id, format_ip6_full_reass_key, &reass->key,
+	      reass->first_bi, reass->data_len, reass->last_packet_octet,
 	      reass->trace_op_counter);
   u32 bi = reass->first_bi;
   u32 counter = 0;
@@ -1573,8 +1588,8 @@
 		  "fragment[%u, %u]\n",
 		  counter, vnb->ip.reass.range_first,
 		  vnb->ip.reass.range_last, bi,
-		  ip6_reass_buffer_get_data_offset (b),
-		  ip6_reass_buffer_get_data_len (b),
+		  ip6_full_reass_buffer_get_data_offset (b),
+		  ip6_full_reass_buffer_get_data_len (b),
 		  vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
       if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
 	{
@@ -1589,10 +1604,10 @@
 }
 
 static clib_error_t *
-show_ip6_reass (vlib_main_t * vm, unformat_input_t * input,
-		CLIB_UNUSED (vlib_cli_command_t * lmd))
+show_ip6_full_reass (vlib_main_t * vm, unformat_input_t * input,
+		     CLIB_UNUSED (vlib_cli_command_t * lmd))
 {
-  ip6_reass_main_t *rm = &ip6_reass_main;
+  ip6_full_reass_main_t *rm = &ip6_full_reass_main;
 
   vlib_cli_output (vm, "---------------------");
   vlib_cli_output (vm, "IP6 reassembly status");
@@ -1605,18 +1620,18 @@
 
   u32 sum_reass_n = 0;
   u64 sum_buffers_n = 0;
-  ip6_reass_t *reass;
+  ip6_full_reass_t *reass;
   uword thread_index;
   const uword nthreads = vlib_num_workers () + 1;
   for (thread_index = 0; thread_index < nthreads; ++thread_index)
     {
-      ip6_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
+      ip6_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
       clib_spinlock_lock (&rt->lock);
       if (details)
 	{
           /* *INDENT-OFF* */
           pool_foreach (reass, rt->pool, {
-            vlib_cli_output (vm, "%U", format_ip6_reass, vm, reass);
+            vlib_cli_output (vm, "%U", format_ip6_full_reass, vm, reass);
           });
           /* *INDENT-ON* */
 	}
@@ -1634,65 +1649,67 @@
 }
 
 /* *INDENT-OFF* */
-VLIB_CLI_COMMAND (show_ip6_reassembly_cmd, static) = {
-    .path = "show ip6-reassembly",
-    .short_help = "show ip6-reassembly [details]",
-    .function = show_ip6_reass,
+VLIB_CLI_COMMAND (show_ip6_full_reassembly_cmd, static) = {
+    .path = "show ip6-full-reassembly",
+    .short_help = "show ip6-full-reassembly [details]",
+    .function = show_ip6_full_reass,
 };
 /* *INDENT-ON* */
 
 #ifndef CLIB_MARCH_VARIANT
 vnet_api_error_t
-ip6_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
+ip6_full_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
 {
-  return vnet_feature_enable_disable ("ip6-unicast", "ip6-reassembly-feature",
+  return vnet_feature_enable_disable ("ip6-unicast",
+				      "ip6-full-reassembly-feature",
 				      sw_if_index, enable_disable, 0, 0);
 }
 #endif /* CLIB_MARCH_VARIANT */
 
-#define foreach_ip6_reassembly_handoff_error                       \
+#define foreach_ip6_full_reassembly_handoff_error                       \
 _(CONGESTION_DROP, "congestion drop")
 
 
 typedef enum
 {
-#define _(sym,str) IP6_REASSEMBLY_HANDOFF_ERROR_##sym,
-  foreach_ip6_reassembly_handoff_error
+#define _(sym,str) IP6_FULL_REASSEMBLY_HANDOFF_ERROR_##sym,
+  foreach_ip6_full_reassembly_handoff_error
 #undef _
-    IP6_REASSEMBLY_HANDOFF_N_ERROR,
-} ip6_reassembly_handoff_error_t;
+    IP6_FULL_REASSEMBLY_HANDOFF_N_ERROR,
+} ip6_full_reassembly_handoff_error_t;
 
-static char *ip6_reassembly_handoff_error_strings[] = {
+static char *ip6_full_reassembly_handoff_error_strings[] = {
 #define _(sym,string) string,
-  foreach_ip6_reassembly_handoff_error
+  foreach_ip6_full_reassembly_handoff_error
 #undef _
 };
 
 typedef struct
 {
   u32 next_worker_index;
-} ip6_reassembly_handoff_trace_t;
+} ip6_full_reassembly_handoff_trace_t;
 
 static u8 *
-format_ip6_reassembly_handoff_trace (u8 * s, va_list * args)
+format_ip6_full_reassembly_handoff_trace (u8 * s, va_list * args)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  ip6_reassembly_handoff_trace_t *t =
-    va_arg (*args, ip6_reassembly_handoff_trace_t *);
+  ip6_full_reassembly_handoff_trace_t *t =
+    va_arg (*args, ip6_full_reassembly_handoff_trace_t *);
 
   s =
-    format (s, "ip6-reassembly-handoff: next-worker %d",
+    format (s, "ip6-full-reassembly-handoff: next-worker %d",
 	    t->next_worker_index);
 
   return s;
 }
 
 always_inline uword
-ip6_reassembly_handoff_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-			       vlib_frame_t * frame, bool is_feature)
+ip6_full_reassembly_handoff_inline (vlib_main_t * vm,
+				    vlib_node_runtime_t * node,
+				    vlib_frame_t * frame, bool is_feature)
 {
-  ip6_reass_main_t *rm = &ip6_reass_main;
+  ip6_full_reass_main_t *rm = &ip6_full_reass_main;
 
   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
   u32 n_enq, n_left_from, *from;
@@ -1719,7 +1736,7 @@
 	  ((node->flags & VLIB_NODE_FLAG_TRACE)
 	   && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
 	{
-	  ip6_reassembly_handoff_trace_t *t =
+	  ip6_full_reassembly_handoff_trace_t *t =
 	    vlib_add_trace (vm, node, b[0], sizeof (*t));
 	  t->next_worker_index = ti[0];
 	}
@@ -1734,26 +1751,26 @@
 
   if (n_enq < frame->n_vectors)
     vlib_node_increment_counter (vm, node->node_index,
-				 IP6_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
+				 IP6_FULL_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
 				 frame->n_vectors - n_enq);
   return frame->n_vectors;
 }
 
-VLIB_NODE_FN (ip6_reassembly_handoff_node) (vlib_main_t * vm,
-					    vlib_node_runtime_t * node,
-					    vlib_frame_t * frame)
+VLIB_NODE_FN (ip6_full_reassembly_handoff_node) (vlib_main_t * vm,
+						 vlib_node_runtime_t * node,
+						 vlib_frame_t * frame)
 {
-  return ip6_reassembly_handoff_inline (vm, node, frame,
-					false /* is_feature */ );
+  return ip6_full_reassembly_handoff_inline (vm, node, frame,
+					     false /* is_feature */ );
 }
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip6_reassembly_handoff_node) = {
-  .name = "ip6-reassembly-handoff",
+VLIB_REGISTER_NODE (ip6_full_reassembly_handoff_node) = {
+  .name = "ip6-full-reassembly-handoff",
   .vector_size = sizeof (u32),
-  .n_errors = ARRAY_LEN(ip6_reassembly_handoff_error_strings),
-  .error_strings = ip6_reassembly_handoff_error_strings,
-  .format_trace = format_ip6_reassembly_handoff_trace,
+  .n_errors = ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
+  .error_strings = ip6_full_reassembly_handoff_error_strings,
+  .format_trace = format_ip6_full_reassembly_handoff_trace,
 
   .n_next_nodes = 1,
 
@@ -1763,20 +1780,20 @@
 };
 
 
-VLIB_NODE_FN (ip6_reassembly_feature_handoff_node) (vlib_main_t * vm,
+VLIB_NODE_FN (ip6_full_reassembly_feature_handoff_node) (vlib_main_t * vm,
                                vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  return ip6_reassembly_handoff_inline (vm, node, frame, true /* is_feature */ );
+  return ip6_full_reassembly_handoff_inline (vm, node, frame, true /* is_feature */ );
 }
 
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip6_reassembly_feature_handoff_node) = {
-  .name = "ip6-reass-feature-hoff",
+VLIB_REGISTER_NODE (ip6_full_reassembly_feature_handoff_node) = {
+  .name = "ip6-full-reass-feature-hoff",
   .vector_size = sizeof (u32),
-  .n_errors = ARRAY_LEN(ip6_reassembly_handoff_error_strings),
-  .error_strings = ip6_reassembly_handoff_error_strings,
-  .format_trace = format_ip6_reassembly_handoff_trace,
+  .n_errors = ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
+  .error_strings = ip6_full_reassembly_handoff_error_strings,
+  .format_trace = format_ip6_full_reassembly_handoff_trace,
 
   .n_next_nodes = 1,
 
diff --git a/src/vnet/ip/ip6_reassembly.h b/src/vnet/ip/reass/ip6_full_reass.h
similarity index 65%
rename from src/vnet/ip/ip6_reassembly.h
rename to src/vnet/ip/reass/ip6_full_reass.h
index 1ca2b20..c2463e0 100644
--- a/src/vnet/ip/ip6_reassembly.h
+++ b/src/vnet/ip/reass/ip6_full_reass.h
@@ -20,8 +20,8 @@
  * This file contains the source code for IPv6 reassembly.
  */
 
-#ifndef __included_ip6_reassembly_h__
-#define __included_ip6_reassembly_h__
+#ifndef __included_ip6_full_reass_h__
+#define __included_ip6_full_reass_h__
 
 #include <vnet/api_errno.h>
 #include <vnet/vnet.h>
@@ -29,20 +29,21 @@
 /**
  * @brief set ip6 reassembly configuration
  */
-vnet_api_error_t ip6_reass_set (u32 timeout_ms, u32 max_reassemblies,
-				u32 max_reassembly_length,
-				u32 expire_walk_interval_ms);
+vnet_api_error_t ip6_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
+				     u32 max_reassembly_length,
+				     u32 expire_walk_interval_ms);
 
 /**
  * @brief get ip6 reassembly configuration
  */
-vnet_api_error_t ip6_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
-				u32 * expire_walk_interval_ms);
+vnet_api_error_t ip6_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
+				     u32 * max_reassembly_length,
+				     u32 * expire_walk_interval_ms);
 
-vnet_api_error_t ip6_reass_enable_disable (u32 sw_if_index,
-					   u8 enable_disable);
+vnet_api_error_t ip6_full_reass_enable_disable (u32 sw_if_index,
+						u8 enable_disable);
 
-#endif /* __included_ip6_reassembly_h */
+#endif /* __included_ip6_full_reass_h */
 
 /*
  * fd.io coding-style-patch-verification: ON