GBP Endpoint Learning

Learning GBP endpoints over vxlan-gbp tunnels

Change-Id: I1db9fda5a16802d9ad8b4efd4e475614f3b21502
Signed-off-by: Neale Ranns <neale.ranns@cisco.com>
diff --git a/src/plugins/gbp/CMakeLists.txt b/src/plugins/gbp/CMakeLists.txt
index c099060..377197a 100644
--- a/src/plugins/gbp/CMakeLists.txt
+++ b/src/plugins/gbp/CMakeLists.txt
@@ -13,17 +13,23 @@
 
 add_vpp_plugin(gbp
   SOURCES
-  gbp_subnet.c
+  gbp_api.c
+  gbp_bridge_domain.c
+  gbp_classify.c
   gbp_contract.c
   gbp_endpoint.c
   gbp_endpoint_group.c
-  gbp_classify.c
-  gbp_recirc.c
-  gbp_policy.c
-  gbp_policy_dpo.c
   gbp_fwd.c
   gbp_fwd_dpo.c
-  gbp_api.c
+  gbp_itf.c
+  gbp_learn.c
+  gbp_policy.c
+  gbp_policy_dpo.c
+  gbp_recirc.c
+  gbp_route_domain.c
+  gbp_scanner.c
+  gbp_subnet.c
+  gbp_vxlan.c
 
   API_FILES
   gbp.api
diff --git a/src/plugins/gbp/gbp.api b/src/plugins/gbp/gbp.api
index d7c6d83..bf42243 100644
--- a/src/plugins/gbp/gbp.api
+++ b/src/plugins/gbp/gbp.api
@@ -19,16 +19,96 @@
 import "vnet/ip/ip_types.api";
 import "vnet/ethernet/ethernet_types.api";
 
+typedef gbp_bridge_domain
+{
+  u32 bd_id;
+  u32 bvi_sw_if_index;
+  u32 uu_fwd_sw_if_index;
+};
+
+autoreply define gbp_bridge_domain_add
+{
+  u32 client_index;
+  u32 context;
+  vl_api_gbp_bridge_domain_t bd;
+};
+autoreply define gbp_bridge_domain_del
+{
+  u32 client_index;
+  u32 context;
+  u32 bd_id;
+};
+autoreply define gbp_bridge_domain_dump
+{
+  u32 client_index;
+  u32 context;
+};
+define gbp_bridge_domain_details
+{
+  u32 context;
+  vl_api_gbp_bridge_domain_t bd;
+};
+
+typedef gbp_route_domain
+{
+  u32 rd_id;
+  u32 ip4_table_id;
+  u32 ip6_table_id;
+  u32 ip4_uu_sw_if_index;
+  u32 ip6_uu_sw_if_index;
+};
+
+autoreply define gbp_route_domain_add
+{
+  u32 client_index;
+  u32 context;
+  vl_api_gbp_route_domain_t rd;
+};
+autoreply define gbp_route_domain_del
+{
+  u32 client_index;
+  u32 context;
+  u32 rd_id;
+};
+autoreply define gbp_route_domain_dump
+{
+  u32 client_index;
+  u32 context;
+};
+define gbp_route_domain_details
+{
+  u32 context;
+  vl_api_gbp_route_domain_t rd;
+};
+
 /** \brief Endpoint
     @param client_index - opaque cookie to identify the sender
     @param context - sender context, to match reply w/ request
 */
 
+enum gbp_endpoint_flags
+{
+  NONE = 0,
+  BOUNCE = 0x1,
+  REMOTE = 0x2,
+  LEARNT = 0x4,
+  /* hey Ole WTF */
+  REMOTE_LEARNT = 0x6,
+};
+
+typedef gbp_endpoint_tun
+{
+  vl_api_address_t src;
+  vl_api_address_t dst;
+};
+
 typedef gbp_endpoint
 {
   u32 sw_if_index;
   u16 epg_id;
+  vl_api_gbp_endpoint_flags_t flags;
   vl_api_mac_address_t mac;
+  vl_api_gbp_endpoint_tun_t tun;
   u8 n_ips;
   vl_api_address_t ips[n_ips];
 };
@@ -63,6 +143,8 @@
 define gbp_endpoint_details
 {
   u32 context;
+  f64 age;
+  u32 handle;
   vl_api_gbp_endpoint_t endpoint;
 };
 
@@ -70,18 +152,22 @@
 {
   u16 epg_id;
   u32 bd_id;
-  u32 ip4_table_id;
-  u32 ip6_table_id;
+  u32 rd_id;
   u32 uplink_sw_if_index;
 };
 
-autoreply define gbp_endpoint_group_add_del
+autoreply define gbp_endpoint_group_add
 {
   u32 client_index;
   u32 context;
-  u8  is_add;
   vl_api_gbp_endpoint_group_t epg;
 };
+autoreply define gbp_endpoint_group_del
+{
+  u32 client_index;
+  u32 context;
+  u16 epg_id;
+};
 
 define gbp_endpoint_group_dump
 {
@@ -122,12 +208,19 @@
   vl_api_gbp_recirc_t recirc;
 };
 
+enum gbp_subnet_type
+{
+  GBP_API_SUBNET_TRANSPORT,
+  GBP_API_SUBNET_STITCHED_INTERNAL,
+  GBP_API_SUBNET_STITCHED_EXTERNAL,
+};
+
 typeonly define gbp_subnet
 {
-  u32 table_id;
+  u32 rd_id;
   u32 sw_if_index;
   u16 epg_id;
-  u8  is_internal;
+  vl_api_gbp_subnet_type_t type;
   vl_api_prefix_t prefix;
 };
 
@@ -178,6 +271,70 @@
   vl_api_gbp_contract_t contract;
 };
 
+/**
+ * @brief Set the time throeshold after which an endpoint is
+          considered inative and is aged/reaped by the scanner
+ * @param threshold In seconds
+ */
+autoreply define gbp_endpoint_learn_set_inactive_threshold
+{
+  u32 client_index;
+  u32 context;
+  u32 threshold;
+};
+
+/**
+ * @brief Configure a 'base' tunnel from which learned tunnels
+ *        are permitted to derive
+ *        A base tunnel consists only of the VNI, any src,dst IP
+ *        pair is thus allowed.
+ */
+enum gbp_vxlan_tunnel_mode
+{
+  GBP_VXLAN_TUNNEL_MODE_L2,
+  GBP_VXLAN_TUNNEL_MODE_L3,
+};
+
+typedef gbp_vxlan_tunnel
+{
+  u32 vni;
+  vl_api_gbp_vxlan_tunnel_mode_t mode;
+  u32 bd_rd_id;
+};
+
+define gbp_vxlan_tunnel_add
+{
+  u32 client_index;
+  u32 context;
+  vl_api_gbp_vxlan_tunnel_t tunnel;
+};
+
+define gbp_vxlan_tunnel_add_reply
+{
+  u32 context;
+  i32 retval;
+  u32 sw_if_index;
+};
+
+autoreply define gbp_vxlan_tunnel_del
+{
+  u32 client_index;
+  u32 context;
+  u32 vni;
+};
+
+define gbp_vxlan_tunnel_dump
+{
+  u32 client_index;
+  u32 context;
+};
+
+define gbp_vxlan_tunnel_details
+{
+  u32 context;
+  vl_api_gbp_vxlan_tunnel_t tunnel;
+};
+
 /*
  * Local Variables:
  * eval: (c-set-style "gnu")
diff --git a/src/plugins/gbp/gbp_api.c b/src/plugins/gbp/gbp_api.c
index 6bd1abc..faf036e 100644
--- a/src/plugins/gbp/gbp_api.c
+++ b/src/plugins/gbp/gbp_api.c
@@ -25,6 +25,11 @@
 #include <vpp/app/version.h>
 
 #include <gbp/gbp.h>
+#include <gbp/gbp_learn.h>
+#include <gbp/gbp_itf.h>
+#include <gbp/gbp_vxlan.h>
+#include <gbp/gbp_bridge_domain.h>
+#include <gbp/gbp_route_domain.h>
 
 #include <vlibapi/api.h>
 #include <vlibmemory/api.h>
@@ -59,12 +64,23 @@
   _(GBP_ENDPOINT_DUMP, gbp_endpoint_dump)                   \
   _(GBP_SUBNET_ADD_DEL, gbp_subnet_add_del)                 \
   _(GBP_SUBNET_DUMP, gbp_subnet_dump)                       \
-  _(GBP_ENDPOINT_GROUP_ADD_DEL, gbp_endpoint_group_add_del) \
+  _(GBP_ENDPOINT_GROUP_ADD, gbp_endpoint_group_add)         \
+  _(GBP_ENDPOINT_GROUP_DEL, gbp_endpoint_group_del)         \
   _(GBP_ENDPOINT_GROUP_DUMP, gbp_endpoint_group_dump)       \
+  _(GBP_BRIDGE_DOMAIN_ADD, gbp_bridge_domain_add)           \
+  _(GBP_BRIDGE_DOMAIN_DEL, gbp_bridge_domain_del)           \
+  _(GBP_BRIDGE_DOMAIN_DUMP, gbp_bridge_domain_dump)         \
+  _(GBP_ROUTE_DOMAIN_ADD, gbp_route_domain_add)             \
+  _(GBP_ROUTE_DOMAIN_DEL, gbp_route_domain_del)             \
+  _(GBP_ROUTE_DOMAIN_DUMP, gbp_route_domain_dump)           \
   _(GBP_RECIRC_ADD_DEL, gbp_recirc_add_del)                 \
   _(GBP_RECIRC_DUMP, gbp_recirc_dump)                       \
   _(GBP_CONTRACT_ADD_DEL, gbp_contract_add_del)             \
-  _(GBP_CONTRACT_DUMP, gbp_contract_dump)
+  _(GBP_CONTRACT_DUMP, gbp_contract_dump)                   \
+  _(GBP_ENDPOINT_LEARN_SET_INACTIVE_THRESHOLD, gbp_endpoint_learn_set_inactive_threshold) \
+  _(GBP_VXLAN_TUNNEL_ADD, gbp_vxlan_tunnel_add)                         \
+  _(GBP_VXLAN_TUNNEL_DEL, gbp_vxlan_tunnel_del)                         \
+  _(GBP_VXLAN_TUNNEL_DUMP, gbp_vxlan_tunnel_dump)
 
 gbp_main_t gbp_main;
 
@@ -72,10 +88,46 @@
 
 #define GBP_MSG_BASE msg_id_base
 
+static gbp_endpoint_flags_t
+gbp_endpoint_flags_decode (vl_api_gbp_endpoint_flags_t v)
+{
+  gbp_endpoint_flags_t f = GBP_ENDPOINT_FLAG_NONE;
+
+  v = ntohl (v);
+
+  if (v & BOUNCE)
+    f |= GBP_ENDPOINT_FLAG_BOUNCE;
+  if (v & REMOTE)
+    f |= GBP_ENDPOINT_FLAG_REMOTE;
+  if (v & LEARNT)
+    f |= GBP_ENDPOINT_FLAG_LEARNT;
+
+  return (f);
+}
+
+static vl_api_gbp_endpoint_flags_t
+gbp_endpoint_flags_encode (gbp_endpoint_flags_t f)
+{
+  vl_api_gbp_endpoint_flags_t v = 0;
+
+
+  if (f & GBP_ENDPOINT_FLAG_BOUNCE)
+    v |= BOUNCE;
+  if (f & GBP_ENDPOINT_FLAG_REMOTE)
+    v |= REMOTE;
+  if (f & GBP_ENDPOINT_FLAG_LEARNT)
+    v |= LEARNT;
+
+  v = htonl (v);
+
+  return (v);
+}
+
 static void
 vl_api_gbp_endpoint_add_t_handler (vl_api_gbp_endpoint_add_t * mp)
 {
   vl_api_gbp_endpoint_add_reply_t *rmp;
+  gbp_endpoint_flags_t gef;
   u32 sw_if_index, handle;
   ip46_address_t *ips;
   mac_address_t mac;
@@ -83,10 +135,9 @@
 
   VALIDATE_SW_IF_INDEX (&(mp->endpoint));
 
+  gef = gbp_endpoint_flags_decode (mp->endpoint.flags), ips = NULL;
   sw_if_index = ntohl (mp->endpoint.sw_if_index);
 
-  ips = NULL;
-
   if (mp->endpoint.n_ips)
     {
       vec_validate (ips, mp->endpoint.n_ips - 1);
@@ -98,11 +149,23 @@
     }
   mac_address_decode (&mp->endpoint.mac, &mac);
 
-  rv = gbp_endpoint_update (sw_if_index, ips, &mac,
-			    ntohs (mp->endpoint.epg_id), &handle);
+  if (GBP_ENDPOINT_FLAG_REMOTE & gef)
+    {
+      ip46_address_t tun_src, tun_dst;
 
-  vec_free (ips);
+      ip_address_decode (&mp->endpoint.tun.src, &tun_src);
+      ip_address_decode (&mp->endpoint.tun.dst, &tun_dst);
 
+      rv = gbp_endpoint_update (sw_if_index, ips, &mac,
+				ntohs (mp->endpoint.epg_id),
+				gef, &tun_src, &tun_dst, &handle);
+    }
+  else
+    {
+      rv = gbp_endpoint_update (sw_if_index, ips, &mac,
+				ntohs (mp->endpoint.epg_id),
+				gef, NULL, NULL, &handle);
+    }
   BAD_SW_IF_INDEX_LABEL;
 
   /* *INDENT-OFF* */
@@ -124,6 +187,19 @@
   REPLY_MACRO (VL_API_GBP_ENDPOINT_DEL_REPLY + GBP_MSG_BASE);
 }
 
+static void
+  vl_api_gbp_endpoint_learn_set_inactive_threshold_t_handler
+  (vl_api_gbp_endpoint_learn_set_inactive_threshold_t * mp)
+{
+  vl_api_gbp_endpoint_learn_set_inactive_threshold_reply_t *rmp;
+  int rv = 0;
+
+  gbp_learn_set_inactive_threshold (ntohl (mp->threshold));
+
+  REPLY_MACRO (VL_API_GBP_ENDPOINT_LEARN_SET_INACTIVE_THRESHOLD_REPLY +
+	       GBP_MSG_BASE);
+}
+
 typedef struct gbp_walk_ctx_t_
 {
   vl_api_registration_t *reg;
@@ -131,14 +207,17 @@
 } gbp_walk_ctx_t;
 
 static walk_rc_t
-gbp_endpoint_send_details (gbp_endpoint_t * gbpe, void *args)
+gbp_endpoint_send_details (index_t gei, void *args)
 {
   vl_api_gbp_endpoint_details_t *mp;
+  gbp_endpoint_t *ge;
   gbp_walk_ctx_t *ctx;
   u8 n_ips, ii;
 
   ctx = args;
-  n_ips = vec_len (gbpe->ge_ips);
+  ge = gbp_endpoint_get (gei);
+
+  n_ips = vec_len (ge->ge_ips);
   mp = vl_msg_api_alloc (sizeof (*mp) + (sizeof (*mp->endpoint.ips) * n_ips));
   if (!mp)
     return 1;
@@ -147,15 +226,28 @@
   mp->_vl_msg_id = ntohs (VL_API_GBP_ENDPOINT_DETAILS + GBP_MSG_BASE);
   mp->context = ctx->context;
 
-  mp->endpoint.sw_if_index = ntohl (gbpe->ge_sw_if_index);
-  mp->endpoint.epg_id = ntohs (gbpe->ge_epg_id);
+  if (gbp_endpoint_is_remote (ge))
+    {
+      mp->endpoint.sw_if_index = ntohl (ge->tun.ge_parent_sw_if_index);
+      ip_address_encode (&ge->tun.ge_src, IP46_TYPE_ANY,
+			 &mp->endpoint.tun.src);
+      ip_address_encode (&ge->tun.ge_dst, IP46_TYPE_ANY,
+			 &mp->endpoint.tun.dst);
+    }
+  else
+    {
+      mp->endpoint.sw_if_index = ntohl (ge->ge_sw_if_index);
+    }
+  mp->endpoint.epg_id = ntohs (ge->ge_epg_id);
   mp->endpoint.n_ips = n_ips;
-  mac_address_encode (&gbpe->ge_mac, &mp->endpoint.mac);
+  mp->endpoint.flags = gbp_endpoint_flags_encode (ge->ge_flags);
+  mp->handle = htonl (gei);
+  mp->age = vlib_time_now (vlib_get_main ()) - ge->ge_last_time;
+  mac_address_encode (&ge->ge_mac, &mp->endpoint.mac);
 
-  vec_foreach_index (ii, gbpe->ge_ips)
+  vec_foreach_index (ii, ge->ge_ips)
   {
-    ip_address_encode (&gbpe->ge_ips[ii], IP46_TYPE_ANY,
-		       &mp->endpoint.ips[ii]);
+    ip_address_encode (&ge->ge_ips[ii], IP46_TYPE_ANY, &mp->endpoint.ips[ii]);
   }
 
   vl_api_send_msg (ctx->reg, (u8 *) mp);
@@ -181,58 +273,158 @@
 }
 
 static void
-  vl_api_gbp_endpoint_group_add_del_t_handler
-  (vl_api_gbp_endpoint_group_add_del_t * mp)
+  vl_api_gbp_endpoint_group_add_t_handler
+  (vl_api_gbp_endpoint_group_add_t * mp)
 {
-  vl_api_gbp_endpoint_group_add_del_reply_t *rmp;
-  u32 uplink_sw_if_index;
+  vl_api_gbp_endpoint_group_add_reply_t *rmp;
   int rv = 0;
 
-  uplink_sw_if_index = ntohl (mp->epg.uplink_sw_if_index);
-  if (!vnet_sw_if_index_is_api_valid (uplink_sw_if_index))
-    goto bad_sw_if_index;
+  rv = gbp_endpoint_group_add_and_lock (ntohs (mp->epg.epg_id),
+					ntohl (mp->epg.bd_id),
+					ntohl (mp->epg.rd_id),
+					ntohl (mp->epg.uplink_sw_if_index));
 
-  if (mp->is_add)
+  REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_ADD_REPLY + GBP_MSG_BASE);
+}
+
+static void
+  vl_api_gbp_endpoint_group_del_t_handler
+  (vl_api_gbp_endpoint_group_del_t * mp)
+{
+  vl_api_gbp_endpoint_group_del_reply_t *rmp;
+  int rv = 0;
+
+  rv = gbp_endpoint_group_delete (ntohs (mp->epg_id));
+
+  REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_bridge_domain_add_t_handler (vl_api_gbp_bridge_domain_add_t * mp)
+{
+  vl_api_gbp_bridge_domain_add_reply_t *rmp;
+  int rv = 0;
+
+  rv = gbp_bridge_domain_add_and_lock (ntohl (mp->bd.bd_id),
+				       ntohl (mp->bd.bvi_sw_if_index),
+				       ntohl (mp->bd.uu_fwd_sw_if_index));
+
+  REPLY_MACRO (VL_API_GBP_BRIDGE_DOMAIN_ADD_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_bridge_domain_del_t_handler (vl_api_gbp_bridge_domain_del_t * mp)
+{
+  vl_api_gbp_bridge_domain_del_reply_t *rmp;
+  int rv = 0;
+
+  rv = gbp_bridge_domain_delete (ntohl (mp->bd_id));
+
+  REPLY_MACRO (VL_API_GBP_BRIDGE_DOMAIN_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_route_domain_add_t_handler (vl_api_gbp_route_domain_add_t * mp)
+{
+  vl_api_gbp_route_domain_add_reply_t *rmp;
+  int rv = 0;
+
+  rv = gbp_route_domain_add_and_lock (ntohl (mp->rd.rd_id),
+				      ntohl (mp->rd.ip4_table_id),
+				      ntohl (mp->rd.ip6_table_id),
+				      ntohl (mp->rd.ip4_uu_sw_if_index),
+				      ntohl (mp->rd.ip6_uu_sw_if_index));
+
+  REPLY_MACRO (VL_API_GBP_ROUTE_DOMAIN_ADD_REPLY + GBP_MSG_BASE);
+}
+
+static void
+vl_api_gbp_route_domain_del_t_handler (vl_api_gbp_route_domain_del_t * mp)
+{
+  vl_api_gbp_route_domain_del_reply_t *rmp;
+  int rv = 0;
+
+  rv = gbp_route_domain_delete (ntohl (mp->rd_id));
+
+  REPLY_MACRO (VL_API_GBP_ROUTE_DOMAIN_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static int
+gub_subnet_type_from_api (vl_api_gbp_subnet_type_t a, gbp_subnet_type_t * t)
+{
+  a = clib_net_to_host_u32 (a);
+
+  switch (a)
     {
-      rv = gbp_endpoint_group_add (ntohs (mp->epg.epg_id),
-				   ntohl (mp->epg.bd_id),
-				   ntohl (mp->epg.ip4_table_id),
-				   ntohl (mp->epg.ip6_table_id),
-				   uplink_sw_if_index);
-    }
-  else
-    {
-      gbp_endpoint_group_delete (ntohs (mp->epg.epg_id));
+    case GBP_API_SUBNET_TRANSPORT:
+      *t = GBP_SUBNET_TRANSPORT;
+      return (0);
+    case GBP_API_SUBNET_STITCHED_INTERNAL:
+      *t = GBP_SUBNET_STITCHED_INTERNAL;
+      return (0);
+    case GBP_API_SUBNET_STITCHED_EXTERNAL:
+      *t = GBP_SUBNET_STITCHED_EXTERNAL;
+      return (0);
     }
 
-  BAD_SW_IF_INDEX_LABEL;
-
-  REPLY_MACRO (VL_API_GBP_ENDPOINT_GROUP_ADD_DEL_REPLY + GBP_MSG_BASE);
+  return (-1);
 }
 
 static void
 vl_api_gbp_subnet_add_del_t_handler (vl_api_gbp_subnet_add_del_t * mp)
 {
   vl_api_gbp_subnet_add_del_reply_t *rmp;
+  gbp_subnet_type_t type;
   fib_prefix_t pfx;
   int rv = 0;
 
   ip_prefix_decode (&mp->subnet.prefix, &pfx);
 
-  rv = gbp_subnet_add_del (ntohl (mp->subnet.table_id),
-			   &pfx,
-			   ntohl (mp->subnet.sw_if_index),
-			   ntohs (mp->subnet.epg_id),
-			   mp->is_add, mp->subnet.is_internal);
+  rv = gub_subnet_type_from_api (mp->subnet.type, &type);
 
+  if (0 != rv)
+    goto out;
+
+  if (mp->is_add)
+    rv = gbp_subnet_add (ntohl (mp->subnet.rd_id),
+			 &pfx, type,
+			 ntohl (mp->subnet.sw_if_index),
+			 ntohs (mp->subnet.epg_id));
+  else
+    rv = gbp_subnet_del (ntohl (mp->subnet.rd_id), &pfx);
+
+out:
   REPLY_MACRO (VL_API_GBP_SUBNET_ADD_DEL_REPLY + GBP_MSG_BASE);
 }
 
-static int
-gbp_subnet_send_details (u32 table_id,
+static vl_api_gbp_subnet_type_t
+gub_subnet_type_to_api (gbp_subnet_type_t t)
+{
+  vl_api_gbp_subnet_type_t a = 0;
+
+  switch (t)
+    {
+    case GBP_SUBNET_TRANSPORT:
+      a = GBP_API_SUBNET_TRANSPORT;
+      break;
+    case GBP_SUBNET_STITCHED_INTERNAL:
+      a = GBP_API_SUBNET_STITCHED_INTERNAL;
+      break;
+    case GBP_SUBNET_STITCHED_EXTERNAL:
+      a = GBP_API_SUBNET_STITCHED_EXTERNAL;
+      break;
+    }
+
+  a = clib_host_to_net_u32 (a);
+
+  return (a);
+}
+
+static walk_rc_t
+gbp_subnet_send_details (u32 rd_id,
 			 const fib_prefix_t * pfx,
-			 u32 sw_if_index,
-			 epg_id_t epg, u8 is_internal, void *args)
+			 gbp_subnet_type_t type,
+			 u32 sw_if_index, epg_id_t epg, void *args)
 {
   vl_api_gbp_subnet_details_t *mp;
   gbp_walk_ctx_t *ctx;
@@ -246,15 +438,15 @@
   mp->_vl_msg_id = ntohs (VL_API_GBP_SUBNET_DETAILS + GBP_MSG_BASE);
   mp->context = ctx->context;
 
-  mp->subnet.is_internal = is_internal;
+  mp->subnet.type = gub_subnet_type_to_api (type);
   mp->subnet.sw_if_index = ntohl (sw_if_index);
   mp->subnet.epg_id = ntohs (epg);
-  mp->subnet.table_id = ntohl (table_id);
+  mp->subnet.rd_id = ntohl (rd_id);
   ip_prefix_encode (pfx, &mp->subnet.prefix);
 
   vl_api_send_msg (ctx->reg, (u8 *) mp);
 
-  return (1);
+  return (WALK_CONTINUE);
 }
 
 static void
@@ -275,7 +467,7 @@
 }
 
 static int
-gbp_endpoint_group_send_details (gbp_endpoint_group_t * gepg, void *args)
+gbp_endpoint_group_send_details (gbp_endpoint_group_t * gg, void *args)
 {
   vl_api_gbp_endpoint_group_details_t *mp;
   gbp_walk_ctx_t *ctx;
@@ -289,11 +481,10 @@
   mp->_vl_msg_id = ntohs (VL_API_GBP_ENDPOINT_GROUP_DETAILS + GBP_MSG_BASE);
   mp->context = ctx->context;
 
-  mp->epg.uplink_sw_if_index = ntohl (gepg->gepg_uplink_sw_if_index);
-  mp->epg.epg_id = ntohs (gepg->gepg_id);
-  mp->epg.bd_id = ntohl (gepg->gepg_bd);
-  mp->epg.ip4_table_id = ntohl (gepg->gepg_rd[FIB_PROTOCOL_IP4]);
-  mp->epg.ip6_table_id = ntohl (gepg->gepg_rd[FIB_PROTOCOL_IP6]);
+  mp->epg.uplink_sw_if_index = ntohl (gg->gg_uplink_sw_if_index);
+  mp->epg.epg_id = ntohs (gg->gg_id);
+  mp->epg.bd_id = ntohl (gbp_endpoint_group_get_bd_id (gg));
+  mp->epg.rd_id = ntohl (gg->gg_rd);
 
   vl_api_send_msg (ctx->reg, (u8 *) mp);
 
@@ -318,6 +509,90 @@
   gbp_endpoint_group_walk (gbp_endpoint_group_send_details, &ctx);
 }
 
+static int
+gbp_bridge_domain_send_details (gbp_bridge_domain_t * gb, void *args)
+{
+  vl_api_gbp_bridge_domain_details_t *mp;
+  gbp_walk_ctx_t *ctx;
+
+  ctx = args;
+  mp = vl_msg_api_alloc (sizeof (*mp));
+  if (!mp)
+    return 1;
+
+  memset (mp, 0, sizeof (*mp));
+  mp->_vl_msg_id = ntohs (VL_API_GBP_BRIDGE_DOMAIN_DETAILS + GBP_MSG_BASE);
+  mp->context = ctx->context;
+
+  mp->bd.bd_id = ntohl (gb->gb_bd_id);
+  mp->bd.bvi_sw_if_index = ntohl (gb->gb_bvi_sw_if_index);
+  mp->bd.uu_fwd_sw_if_index = ntohl (gb->gb_uu_fwd_sw_if_index);
+
+  vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+  return (1);
+}
+
+static void
+vl_api_gbp_bridge_domain_dump_t_handler (vl_api_gbp_bridge_domain_dump_t * mp)
+{
+  vl_api_registration_t *reg;
+
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    return;
+
+  gbp_walk_ctx_t ctx = {
+    .reg = reg,
+    .context = mp->context,
+  };
+
+  gbp_bridge_domain_walk (gbp_bridge_domain_send_details, &ctx);
+}
+
+static int
+gbp_route_domain_send_details (gbp_route_domain_t * grd, void *args)
+{
+  vl_api_gbp_route_domain_details_t *mp;
+  gbp_walk_ctx_t *ctx;
+
+  ctx = args;
+  mp = vl_msg_api_alloc (sizeof (*mp));
+  if (!mp)
+    return 1;
+
+  memset (mp, 0, sizeof (*mp));
+  mp->_vl_msg_id = ntohs (VL_API_GBP_ROUTE_DOMAIN_DETAILS + GBP_MSG_BASE);
+  mp->context = ctx->context;
+
+  mp->rd.rd_id = ntohl (grd->grd_id);
+  mp->rd.ip4_uu_sw_if_index =
+    ntohl (grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4]);
+  mp->rd.ip6_uu_sw_if_index =
+    ntohl (grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6]);
+
+  vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+  return (1);
+}
+
+static void
+vl_api_gbp_route_domain_dump_t_handler (vl_api_gbp_route_domain_dump_t * mp)
+{
+  vl_api_registration_t *reg;
+
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    return;
+
+  gbp_walk_ctx_t ctx = {
+    .reg = reg,
+    .context = mp->context,
+  };
+
+  gbp_route_domain_walk (gbp_route_domain_send_details, &ctx);
+}
+
 static void
 vl_api_gbp_recirc_add_del_t_handler (vl_api_gbp_recirc_add_del_t * mp)
 {
@@ -439,6 +714,121 @@
   gbp_contract_walk (gbp_contract_send_details, &ctx);
 }
 
+static int
+gbp_vxlan_tunnel_mode_2_layer (vl_api_gbp_vxlan_tunnel_mode_t mode,
+			       gbp_vxlan_tunnel_layer_t * l)
+{
+  mode = clib_net_to_host_u32 (mode);
+
+  switch (mode)
+    {
+    case GBP_VXLAN_TUNNEL_MODE_L2:
+      *l = GBP_VXLAN_TUN_L2;
+      return (0);
+    case GBP_VXLAN_TUNNEL_MODE_L3:
+      *l = GBP_VXLAN_TUN_L3;
+      return (0);
+    }
+  return (-1);
+}
+
+static void
+vl_api_gbp_vxlan_tunnel_add_t_handler (vl_api_gbp_vxlan_tunnel_add_t * mp)
+{
+  vl_api_gbp_vxlan_tunnel_add_reply_t *rmp;
+  gbp_vxlan_tunnel_layer_t layer;
+  u32 sw_if_index;
+  int rv = 0;
+
+  rv = gbp_vxlan_tunnel_mode_2_layer (mp->tunnel.mode, &layer);
+
+  if (0 != rv)
+    goto out;
+
+  rv = gbp_vxlan_tunnel_add (ntohl (mp->tunnel.vni),
+			     layer,
+			     ntohl (mp->tunnel.bd_rd_id), &sw_if_index);
+
+out:
+  /* *INDENT-OFF* */
+  REPLY_MACRO2 (VL_API_GBP_VXLAN_TUNNEL_ADD_REPLY + GBP_MSG_BASE,
+  ({
+    rmp->sw_if_index = htonl (sw_if_index);
+  }));
+  /* *INDENT-ON* */
+}
+
+static void
+vl_api_gbp_vxlan_tunnel_del_t_handler (vl_api_gbp_vxlan_tunnel_add_t * mp)
+{
+  vl_api_gbp_vxlan_tunnel_del_reply_t *rmp;
+  int rv = 0;
+
+  rv = gbp_vxlan_tunnel_del (ntohl (mp->tunnel.vni));
+
+  REPLY_MACRO (VL_API_GBP_VXLAN_TUNNEL_DEL_REPLY + GBP_MSG_BASE);
+}
+
+static vl_api_gbp_vxlan_tunnel_mode_t
+gbp_vxlan_tunnel_layer_2_mode (gbp_vxlan_tunnel_layer_t layer)
+{
+  vl_api_gbp_vxlan_tunnel_mode_t mode = GBP_VXLAN_TUNNEL_MODE_L2;
+
+  switch (layer)
+    {
+    case GBP_VXLAN_TUN_L2:
+      mode = GBP_VXLAN_TUNNEL_MODE_L2;
+      break;
+    case GBP_VXLAN_TUN_L3:
+      mode = GBP_VXLAN_TUNNEL_MODE_L3;
+      break;
+    }
+  mode = clib_host_to_net_u32 (mode);
+
+  return (mode);
+}
+
+static walk_rc_t
+gbp_vxlan_tunnel_send_details (gbp_vxlan_tunnel_t * gt, void *args)
+{
+  vl_api_gbp_vxlan_tunnel_details_t *mp;
+  gbp_walk_ctx_t *ctx;
+
+  ctx = args;
+  mp = vl_msg_api_alloc (sizeof (*mp));
+  if (!mp)
+    return 1;
+
+  memset (mp, 0, sizeof (*mp));
+  mp->_vl_msg_id = htons (VL_API_GBP_VXLAN_TUNNEL_DETAILS + GBP_MSG_BASE);
+  mp->context = ctx->context;
+
+  mp->tunnel.vni = htonl (gt->gt_vni);
+  mp->tunnel.mode = gbp_vxlan_tunnel_layer_2_mode (gt->gt_layer);
+  mp->tunnel.bd_rd_id = htonl (gt->gt_bd_rd_id);
+
+  vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+  return (1);
+}
+
+static void
+vl_api_gbp_vxlan_tunnel_dump_t_handler (vl_api_gbp_vxlan_tunnel_dump_t * mp)
+{
+  vl_api_registration_t *reg;
+
+  reg = vl_api_client_index_to_registration (mp->client_index);
+  if (!reg)
+    return;
+
+  gbp_walk_ctx_t ctx = {
+    .reg = reg,
+    .context = mp->context,
+  };
+
+  gbp_vxlan_walk (gbp_vxlan_tunnel_send_details, &ctx);
+}
+
 /*
  * gbp_api_hookup
  * Add vpe's API message handlers to the table.
diff --git a/src/plugins/gbp/gbp_bridge_domain.c b/src/plugins/gbp/gbp_bridge_domain.c
new file mode 100644
index 0000000..b7812eb
--- /dev/null
+++ b/src/plugins/gbp/gbp_bridge_domain.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_endpoint.h>
+
+#include <vnet/dpo/dvr_dpo.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_bvi.h>
+#include <vnet/l2/l2_fib.h>
+
+/**
+ * Pool of GBP bridge_domains
+ */
+gbp_bridge_domain_t *gbp_bridge_domain_pool;
+
+/**
+ * DB of bridge_domains
+ */
+typedef struct gbp_bridge_domain_db_t
+{
+  uword *gbd_by_bd_id;
+} gbp_bridge_domain_db_t;
+
+static gbp_bridge_domain_db_t gbp_bridge_domain_db;
+
+/**
+ * logger
+ */
+vlib_log_class_t gb_logger;
+
+#define GBP_BD_DBG(...)                           \
+    vlib_log_debug (gb_logger, __VA_ARGS__);
+
+gbp_bridge_domain_t *
+gbp_bridge_domain_get (index_t i)
+{
+  return (pool_elt_at_index (gbp_bridge_domain_pool, i));
+}
+
+static void
+gbp_bridge_domain_lock (index_t i)
+{
+  gbp_bridge_domain_t *gb;
+
+  gb = gbp_bridge_domain_get (i);
+  gb->gb_locks++;
+}
+
+static index_t
+gbp_bridge_domain_find (u32 bd_id)
+{
+  uword *p;
+
+  p = hash_get (gbp_bridge_domain_db.gbd_by_bd_id, bd_id);
+
+  if (NULL != p)
+    return p[0];
+
+  return (INDEX_INVALID);
+}
+
+index_t
+gbp_bridge_domain_find_and_lock (u32 bd_id)
+{
+  uword *p;
+
+  p = hash_get (gbp_bridge_domain_db.gbd_by_bd_id, bd_id);
+
+  if (NULL != p)
+    {
+      gbp_bridge_domain_lock (p[0]);
+      return p[0];
+    }
+  return (INDEX_INVALID);
+}
+
+static void
+gbp_bridge_domain_db_add (gbp_bridge_domain_t * gb)
+{
+  index_t gbi = gb - gbp_bridge_domain_pool;
+
+  hash_set (gbp_bridge_domain_db.gbd_by_bd_id, gb->gb_bd_id, gbi);
+}
+
+static void
+gbp_bridge_domain_db_remove (gbp_bridge_domain_t * gb)
+{
+  hash_unset (gbp_bridge_domain_db.gbd_by_bd_id, gb->gb_bd_id);
+}
+
+int
+gbp_bridge_domain_add_and_lock (u32 bd_id,
+				u32 bvi_sw_if_index, u32 uu_fwd_sw_if_index)
+{
+  gbp_bridge_domain_t *gb;
+  index_t gbi;
+
+  gbi = gbp_bridge_domain_find (bd_id);
+
+  if (INDEX_INVALID == gbi)
+    {
+      u32 bd_index;
+
+      bd_index = bd_find_index (&bd_main, bd_id);
+
+      if (~0 == bd_index)
+	return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
+
+      /*
+       * unset learning in the bridge
+       */
+      bd_set_flags (vlib_get_main (), bd_index, L2_LEARN, 0);
+
+      pool_get (gbp_bridge_domain_pool, gb);
+      memset (gb, 0, sizeof (*gb));
+
+      gb->gb_bd_id = bd_id;
+      gb->gb_bd_index = bd_index;
+      gb->gb_uu_fwd_sw_if_index = uu_fwd_sw_if_index;
+      gb->gb_bvi_sw_if_index = bvi_sw_if_index;
+      gb->gb_locks = 1;
+
+      /*
+       * Set the BVI and uu-flood interfaces into the BD
+       */
+      set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+		       MODE_L2_BRIDGE, gb->gb_bvi_sw_if_index,
+		       bd_index, L2_BD_PORT_TYPE_BVI, 0, 0);
+      if (~0 != gb->gb_uu_fwd_sw_if_index)
+	set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+			 MODE_L2_BRIDGE, gb->gb_uu_fwd_sw_if_index,
+			 bd_index, L2_BD_PORT_TYPE_UU_FWD, 0, 0);
+
+      /*
+       * Add the BVI's MAC to the L2FIB
+       */
+      l2fib_add_entry (vnet_sw_interface_get_hw_address
+		       (vnet_get_main (), gb->gb_bvi_sw_if_index),
+		       gb->gb_bd_index, gb->gb_bvi_sw_if_index,
+		       (L2FIB_ENTRY_RESULT_FLAG_STATIC |
+			L2FIB_ENTRY_RESULT_FLAG_BVI));
+
+      gbp_bridge_domain_db_add (gb);
+    }
+  else
+    {
+      gb = gbp_bridge_domain_get (gbi);
+      gb->gb_locks++;
+    }
+
+  GBP_BD_DBG ("add: %U", format_gbp_bridge_domain, gb);
+
+  return (0);
+}
+
+void
+gbp_bridge_domain_unlock (index_t index)
+{
+  gbp_bridge_domain_t *gb;
+
+  gb = gbp_bridge_domain_get (index);
+
+  gb->gb_locks--;
+
+  if (0 == gb->gb_locks)
+    {
+      GBP_BD_DBG ("destroy: %U", format_gbp_bridge_domain, gb);
+
+      l2fib_del_entry (vnet_sw_interface_get_hw_address
+		       (vnet_get_main (), gb->gb_bvi_sw_if_index),
+		       gb->gb_bd_index, gb->gb_bvi_sw_if_index);
+
+      set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+		       MODE_L3, gb->gb_bvi_sw_if_index,
+		       gb->gb_bd_index, L2_BD_PORT_TYPE_BVI, 0, 0);
+      if (~0 != gb->gb_uu_fwd_sw_if_index)
+	set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+			 MODE_L3, gb->gb_uu_fwd_sw_if_index,
+			 gb->gb_bd_index, L2_BD_PORT_TYPE_UU_FWD, 0, 0);
+
+      gbp_bridge_domain_db_remove (gb);
+
+      pool_put (gbp_bridge_domain_pool, gb);
+    }
+}
+
+int
+gbp_bridge_domain_delete (u32 bd_id)
+{
+  index_t gbi;
+
+  GBP_BD_DBG ("del: %d", bd_id);
+  gbi = gbp_bridge_domain_find (bd_id);
+
+  if (INDEX_INVALID != gbi)
+    {
+      GBP_BD_DBG ("del: %U", format_gbp_bridge_domain,
+		  gbp_bridge_domain_get (gbi));
+      gbp_bridge_domain_unlock (gbi);
+
+      return (0);
+    }
+
+  return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+void
+gbp_bridge_domain_walk (gbp_bridge_domain_cb_t cb, void *ctx)
+{
+  gbp_bridge_domain_t *gbpe;
+
+  /* *INDENT-OFF* */
+  pool_foreach(gbpe, gbp_bridge_domain_pool,
+  {
+    if (!cb(gbpe, ctx))
+      break;
+  });
+  /* *INDENT-ON* */
+}
+
+static clib_error_t *
+gbp_bridge_domain_cli (vlib_main_t * vm,
+		       unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+  vnet_main_t *vnm = vnet_get_main ();
+  u32 uu_fwd_sw_if_index = ~0;
+  u32 bvi_sw_if_index = ~0;
+  u32 bd_id = ~0;
+  u8 add = 1;
+
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "bvi %U", unformat_vnet_sw_interface,
+		    vnm, &bvi_sw_if_index))
+	;
+      else if (unformat (input, "uu-flood %U", unformat_vnet_sw_interface,
+			 vnm, &uu_fwd_sw_if_index))
+	;
+      else if (unformat (input, "add"))
+	add = 1;
+      else if (unformat (input, "del"))
+	add = 0;
+      else if (unformat (input, "bd %d", &bd_id))
+	;
+      else
+	break;
+    }
+
+  if (~0 == bd_id)
+    return clib_error_return (0, "EPG-ID must be specified");
+
+  if (add)
+    {
+      if (~0 == bvi_sw_if_index)
+	return clib_error_return (0, "interface must be specified");
+
+      gbp_bridge_domain_add_and_lock (bd_id,
+				      bvi_sw_if_index, uu_fwd_sw_if_index);
+    }
+  else
+    gbp_bridge_domain_delete (bd_id);
+
+  return (NULL);
+}
+
+/*?
+ * Configure a GBP bridge-domain
+ *
+ * @cliexpar
+ * @cliexstart{set gbp bridge-domain [del] bd <ID> bvi <interface> uu-flood <interface>}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_bridge_domain_cli_node, static) = {
+  .path = "gbp bridge-domain",
+  .short_help = "gbp bridge-domain [del] epg bd <ID> bvi <interface> uu-flood <interface>",
+  .function = gbp_bridge_domain_cli,
+};
+
+u8 *
+format_gbp_bridge_domain (u8 * s, va_list * args)
+{
+  gbp_bridge_domain_t *gb = va_arg (*args, gbp_bridge_domain_t*);
+  vnet_main_t *vnm = vnet_get_main ();
+
+  if (NULL != gb)
+    s = format (s, "[%d] bd:[%d,%d], bvi:%U uu-flood:%U locks:%d",
+                gb - gbp_bridge_domain_pool,
+                gb->gb_bd_id,
+                gb->gb_bd_index,
+                format_vnet_sw_if_index_name, vnm, gb->gb_bvi_sw_if_index,
+                format_vnet_sw_if_index_name, vnm, gb->gb_uu_fwd_sw_if_index,
+                gb->gb_locks);
+  else
+    s = format (s, "NULL");
+
+  return (s);
+}
+
+static int
+gbp_bridge_domain_show_one (gbp_bridge_domain_t *gb, void *ctx)
+{
+  vlib_main_t *vm;
+
+  vm = ctx;
+  vlib_cli_output (vm, "  %U",format_gbp_bridge_domain, gb);
+
+  return (1);
+}
+
+static clib_error_t *
+gbp_bridge_domain_show (vlib_main_t * vm,
+		   unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+  vlib_cli_output (vm, "Bridge-Domains:");
+  gbp_bridge_domain_walk (gbp_bridge_domain_show_one, vm);
+
+  return (NULL);
+}
+
+
+/*?
+ * Show Group Based Policy Bridge_Domains and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp bridge_domain}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_bridge_domain_show_node, static) = {
+  .path = "show gbp bridge-domain",
+  .short_help = "show gbp bridge-domain\n",
+  .function = gbp_bridge_domain_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_bridge_domain_init (vlib_main_t * vm)
+{
+  gb_logger = vlib_log_register_class ("gbp", "bd");
+
+  return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_bridge_domain_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_bridge_domain.h b/src/plugins/gbp/gbp_bridge_domain.h
new file mode 100644
index 0000000..992900b
--- /dev/null
+++ b/src/plugins/gbp/gbp_bridge_domain.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_BRIDGE_DOMAIN_H__
+#define __GBP_BRIDGE_DOMAIN_H__
+
+#include <plugins/gbp/gbp_types.h>
+
+#include <vnet/fib/fib_types.h>
+
+/**
+ * A bridge Domain Representation.
+ * This is a standard bridge-domain plus all the attributes it must
+ * have to supprt the GBP model.
+ */
+typedef struct gpb_bridge_domain_t_
+{
+  /**
+   * Bridge-domain ID
+   */
+  u32 gb_bd_id;
+  u32 gb_bd_index;
+
+  /**
+   * The BD's BVI interface (obligatory)
+   */
+  u32 gb_bvi_sw_if_index;
+
+  /**
+   * The BD's MAC spine-proxy interface (optional)
+   */
+  u32 gb_uu_fwd_sw_if_index;
+
+  /**
+   * The BD's VNI interface on which packets from unkown endpoints
+   * arrive
+   */
+  u32 gb_vni_sw_if_index;
+
+  /**
+   * locks/references to the BD so it does not get deleted (from the API)
+   * whilst it is still being used
+   */
+  u32 gb_locks;
+} gbp_bridge_domain_t;
+
+extern int gbp_bridge_domain_add_and_lock (u32 bd_id,
+					   u32 bvi_sw_if_index,
+					   u32 uu_fwd_sw_if_index);
+extern void gbp_bridge_domain_unlock (index_t gbi);
+extern index_t gbp_bridge_domain_find_and_lock (u32 bd_id);
+extern int gbp_bridge_domain_delete (u32 bd_id);
+extern gbp_bridge_domain_t *gbp_bridge_domain_get (index_t i);
+
+typedef int (*gbp_bridge_domain_cb_t) (gbp_bridge_domain_t * gb, void *ctx);
+extern void gbp_bridge_domain_walk (gbp_bridge_domain_cb_t bgpe, void *ctx);
+
+extern u8 *format_gbp_bridge_domain (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_classify.c b/src/plugins/gbp/gbp_classify.c
index 3dc6699..fb57426 100644
--- a/src/plugins/gbp/gbp_classify.c
+++ b/src/plugins/gbp/gbp_classify.c
@@ -18,6 +18,8 @@
 #include <plugins/gbp/gbp.h>
 #include <vnet/l2/l2_input.h>
 #include <vnet/l2/feat_bitmap.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
 
 typedef enum gbp_src_classify_type_t_
 {
@@ -56,7 +58,7 @@
 gbp_classify_inline (vlib_main_t * vm,
 		     vlib_node_runtime_t * node,
 		     vlib_frame_t * frame,
-		     gbp_src_classify_type_t type, u8 is_l3)
+		     gbp_src_classify_type_t type, dpo_proto_t dproto)
 {
   gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
   u32 n_left_from, *from, *to_next;
@@ -75,7 +77,7 @@
       while (n_left_from > 0 && n_left_to_next > 0)
 	{
 	  u32 next0, bi0, src_epg, sw_if_index0;
-	  const gbp_endpoint_t *gep0;
+	  const gbp_endpoint_t *ge0;
 	  vlib_buffer_t *b0;
 
 	  bi0 = from[0];
@@ -88,6 +90,7 @@
 	  b0 = vlib_get_buffer (vm, bi0);
 
 	  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+	  vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
 
 	  if (GBP_SRC_CLASSIFY_NULL == type)
 	    {
@@ -98,10 +101,46 @@
 	    }
 	  else
 	    {
-	      gep0 = gbp_endpoint_get_itf (sw_if_index0);
-	      src_epg = gep0->ge_epg_id;
-	      if (is_l3)
+	      if (DPO_PROTO_ETHERNET == dproto)
 		{
+		  const ethernet_header_t *h0;
+
+		  h0 = vlib_buffer_get_current (b0);
+		  next0 =
+		    vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
+					  L2INPUT_FEAT_GBP_SRC_CLASSIFY);
+		  ge0 = gbp_endpoint_find_mac (h0->src_address,
+					       vnet_buffer (b0)->l2.bd_index);
+		}
+	      else if (DPO_PROTO_IP4 == dproto)
+		{
+		  const ip4_header_t *h0;
+
+		  h0 = vlib_buffer_get_current (b0);
+
+		  ge0 = gbp_endpoint_find_ip4
+		    (&h0->src_address,
+		     fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+							  sw_if_index0));
+
+
+		  /*
+		   * Go straight to looukp, do not pass go, do not collect $200
+		   */
+		  next0 = 0;
+		}
+	      else if (DPO_PROTO_IP6 == dproto)
+		{
+		  const ip6_header_t *h0;
+
+		  h0 = vlib_buffer_get_current (b0);
+
+		  ge0 = gbp_endpoint_find_ip6
+		    (&h0->src_address,
+		     fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
+							  sw_if_index0));
+
+
 		  /*
 		   * Go straight to lookup, do not pass go, do not collect $200
 		   */
@@ -109,10 +148,15 @@
 		}
 	      else
 		{
-		  next0 =
-		    vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
-					  L2INPUT_FEAT_GBP_SRC_CLASSIFY);
+		  ge0 = NULL;
+		  next0 = 0;
+		  ASSERT (0);
 		}
+
+	      if (PREDICT_TRUE (NULL != ge0))
+		src_epg = ge0->ge_epg_id;
+	      else
+		src_epg = EPG_INVALID;
 	    }
 
 	  vnet_buffer2 (b0)->gbp.src_epg = src_epg;
@@ -139,28 +183,32 @@
 gbp_src_classify (vlib_main_t * vm,
 		  vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  return (gbp_classify_inline (vm, node, frame, GBP_SRC_CLASSIFY_PORT, 0));
+  return (gbp_classify_inline (vm, node, frame,
+			       GBP_SRC_CLASSIFY_PORT, DPO_PROTO_ETHERNET));
 }
 
 static uword
 gbp_null_classify (vlib_main_t * vm,
 		   vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  return (gbp_classify_inline (vm, node, frame, GBP_SRC_CLASSIFY_NULL, 0));
+  return (gbp_classify_inline (vm, node, frame,
+			       GBP_SRC_CLASSIFY_NULL, DPO_PROTO_ETHERNET));
 }
 
 static uword
 gbp_ip4_src_classify (vlib_main_t * vm,
 		      vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  return (gbp_classify_inline (vm, node, frame, 0, 1));
+  return (gbp_classify_inline (vm, node, frame,
+			       GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP4));
 }
 
 static uword
 gbp_ip6_src_classify (vlib_main_t * vm,
 		      vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
-  return (gbp_classify_inline (vm, node, frame, 0, 1));
+  return (gbp_classify_inline (vm, node, frame,
+			       GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP6));
 }
 
 
diff --git a/src/plugins/gbp/gbp_endpoint.c b/src/plugins/gbp/gbp_endpoint.c
index a261527..79c140f 100644
--- a/src/plugins/gbp/gbp_endpoint.c
+++ b/src/plugins/gbp/gbp_endpoint.c
@@ -17,64 +17,79 @@
 
 #include <plugins/gbp/gbp_endpoint.h>
 #include <plugins/gbp/gbp_endpoint_group.h>
+#include <plugins/gbp/gbp_itf.h>
+#include <plugins/gbp/gbp_scanner.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_vxlan.h>
 
-#include <vnet/ethernet/arp_packet.h>
+#include <vnet/ethernet/arp.h>
 #include <vnet/l2/l2_input.h>
 #include <vnet/l2/l2_output.h>
 #include <vnet/l2/feat_bitmap.h>
+#include <vnet/l2/l2_fib.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/ip/ip_neighbor.h>
 
-gbp_ep_by_itf_db_t gbp_ep_by_itf_db;
-gbp_ep_by_mac_itf_db_t gbp_ep_by_mac_itf_db;
-gbp_ep_by_ip_itf_db_t gbp_ep_by_ip_itf_db;
+static const char *gbp_endpoint_attr_names[] = GBP_ENDPOINT_ATTR_NAMES;
+
+/**
+ * EP DBs
+ */
+gbp_ep_db_t gbp_ep_db;
+
+vlib_log_class_t gbp_ep_logger;
+
+#define GBP_ENDPOINT_DBG(...)                           \
+    vlib_log_debug (gbp_ep_logger, __VA_ARGS__);
+
+#define GBP_ENDPOINT_INFO(...)                          \
+    vlib_log_notice (gbp_ep_logger, __VA_ARGS__);
+
+/**
+ * GBP Endpoint inactive timeout (in seconds)
+ * If a dynamically learned Endpoint has not been heard from in this
+ * amount of time it is considered inactive and discarded
+ */
+static u32 GBP_ENDPOINT_INACTIVE_TIME = 30;
 
 /**
  * Pool of GBP endpoints
  */
 gbp_endpoint_t *gbp_endpoint_pool;
 
-/* void */
-/* gbp_itf_epg_update (u32 sw_if_index, epg_id_t src_epg, u8 do_policy) */
-/* { */
-/*   vec_validate_init_empty (gbp_itf_to_epg_db.gte_vec, */
-/* 			   sw_if_index, ITF_INVALID); */
+/**
+ * A count of the number of dynamic entries
+ */
+static u32 gbp_n_learnt_endpoints;
 
-/*   if (0 == gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_ref_count) */
-/*     { */
-/*       l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SRC_CLASSIFY, */
-/* 				  1); */
-/*       l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_FWD, 1); */
-/*       if (do_policy) */
-/* 	l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_POLICY, */
-/* 				     1); */
-/*     } */
-/*   gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_epg = src_epg; */
-/*   gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_ref_count++; */
-/* } */
+#define FOR_EACH_GBP_ENDPOINT_ATTR(_item)		\
+    for (_item = GBP_ENDPOINT_ATTR_FIRST;		\
+	 _item < GBP_ENDPOINT_ATTR_LAST;		\
+	 _item++)
 
-/* void */
-/* gbp_itf_epg_delete (u32 sw_if_index) */
-/* { */
-/*   if (vec_len (gbp_itf_to_epg_db.gte_vec) <= sw_if_index) */
-/*     return; */
-
-/*   if (1 == gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_ref_count) */
-/*     { */
-/*       gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_epg = EPG_INVALID; */
-
-/*       l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SRC_CLASSIFY, */
-/* 				  0); */
-/*       l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_FWD, 0); */
-/*       l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_POLICY, 0); */
-/*     } */
-/*   gbp_itf_to_epg_db.gte_vec[sw_if_index].gi_ref_count--; */
-/* } */
-
-static void
-gbp_endpoint_mk_key_mac_itf (const mac_address_t * mac,
-			     u32 sw_if_index, clib_bihash_kv_16_8_t * key)
+u8 *
+format_gbp_endpoint_flags (u8 * s, va_list * args)
 {
-  key->key[0] = mac_address_as_u64 (mac);
-  key->key[1] = sw_if_index;
+  gbp_endpoint_attr_t attr;
+  gbp_endpoint_flags_t flags = va_arg (*args, gbp_endpoint_flags_t);
+
+  FOR_EACH_GBP_ENDPOINT_ATTR (attr)
+  {
+    if ((1 << attr) & flags)
+      {
+	s = format (s, "%s,", gbp_endpoint_attr_names[attr]);
+      }
+  }
+
+  return (s);
+}
+
+int
+gbp_endpoint_is_remote (const gbp_endpoint_t * ge)
+{
+  return (ge->ge_flags & GBP_ENDPOINT_FLAG_REMOTE);
 }
 
 static void
@@ -85,32 +100,6 @@
   *sw_if_index = key->key[1];
 }
 
-gbp_endpoint_t *
-gbp_endpoint_find_mac_itf (const mac_address_t * mac, u32 sw_if_index)
-{
-  clib_bihash_kv_16_8_t key, value;
-  int rv;
-
-  gbp_endpoint_mk_key_mac_itf (mac, sw_if_index, &key);
-
-  rv =
-    clib_bihash_search_16_8 (&gbp_ep_by_mac_itf_db.gte_table, &key, &value);
-
-  if (0 != rv)
-    return NULL;
-
-  return (gbp_endpoint_get (value.value));
-}
-
-static void
-gbp_endpoint_mk_key_ip_itf (const ip46_address_t * ip,
-			    u32 sw_if_index, clib_bihash_kv_24_8_t * key)
-{
-  key->key[0] = ip->as_u64[0];
-  key->key[1] = ip->as_u64[1];
-  key->key[2] = sw_if_index;
-}
-
 static void
 gbp_endpoint_extract_key_ip_itf (const clib_bihash_kv_24_8_t * key,
 				 ip46_address_t * ip, u32 * sw_if_index)
@@ -121,14 +110,14 @@
 }
 
 gbp_endpoint_t *
-gbp_endpoint_find_ip_itf (const ip46_address_t * ip, u32 sw_if_index)
+gbp_endpoint_find_ip (const ip46_address_t * ip, u32 fib_index)
 {
   clib_bihash_kv_24_8_t key, value;
   int rv;
 
-  gbp_endpoint_mk_key_ip_itf (ip, sw_if_index, &key);
+  gbp_endpoint_mk_key_ip (ip, fib_index, &key);
 
-  rv = clib_bihash_search_24_8 (&gbp_ep_by_ip_itf_db.gte_table, &key, &value);
+  rv = clib_bihash_search_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, &value);
 
   if (0 != rv)
     return NULL;
@@ -136,238 +125,552 @@
   return (gbp_endpoint_get (value.value));
 }
 
-gbp_endpoint_t *
-gbp_endpoint_find_itf (u32 sw_if_index)
+static void
+gbp_endpoint_add_itf (u32 sw_if_index, index_t gei)
 {
-  /* if (vec_len(gbp_ep_by_itf_db.gte_vec) >= sw_if_index) */
-  /*   return NULL; */
+  vec_validate_init_empty (gbp_ep_db.ged_by_sw_if_index, sw_if_index, ~0);
 
-  /* vec_search(gbp_ep_by_itf_db.gte_vec[sw_if_index],  */
-  /* return (gbp_endpoint_get(gbp_ep_by_itf_db.gte_vec[sw_if_index][0])); */
-  return (NULL);
+  gbp_ep_db.ged_by_sw_if_index[sw_if_index] = gei;
 }
 
 static bool
-gbp_endpoint_add_mac_itf (const mac_address_t * mac,
-			  u32 sw_if_index, index_t gbpei)
+gbp_endpoint_add_mac (const mac_address_t * mac, u32 bd_index, index_t gei)
 {
   clib_bihash_kv_16_8_t key;
   int rv;
 
-  gbp_endpoint_mk_key_mac_itf (mac, sw_if_index, &key);
-  key.value = gbpei;
+  gbp_endpoint_mk_key_mac (mac->bytes, bd_index, &key);
+  key.value = gei;
 
-  rv = clib_bihash_add_del_16_8 (&gbp_ep_by_mac_itf_db.gte_table, &key, 1);
+  rv = clib_bihash_add_del_16_8 (&gbp_ep_db.ged_by_mac_bd, &key, 1);
+
 
   return (0 == rv);
 }
 
 static bool
-gbp_endpoint_add_ip_itf (const ip46_address_t * ip,
-			 u32 sw_if_index, index_t gbpei)
+gbp_endpoint_add_ip (const ip46_address_t * ip, u32 fib_index, index_t gei)
 {
   clib_bihash_kv_24_8_t key;
   int rv;
 
-  gbp_endpoint_mk_key_ip_itf (ip, sw_if_index, &key);
-  key.value = gbpei;
+  gbp_endpoint_mk_key_ip (ip, fib_index, &key);
+  key.value = gei;
 
-  rv = clib_bihash_add_del_24_8 (&gbp_ep_by_ip_itf_db.gte_table, &key, 1);
+  rv = clib_bihash_add_del_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, 1);
 
   return (0 == rv);
 }
 
 static void
-gbp_endpoint_add_itf (u32 sw_if_index, index_t gbpei)
-{
-  vec_validate_init_empty (gbp_ep_by_itf_db.gte_vec, sw_if_index,
-			   INDEX_INVALID);
-
-  if (INDEX_INVALID == gbp_ep_by_itf_db.gte_vec[sw_if_index])
-    {
-      l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SRC_CLASSIFY,
-				  1);
-      l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_FWD, 1);
-      l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_POLICY, 1);
-    }
-  gbp_ep_by_itf_db.gte_vec[sw_if_index] = gbpei;
-}
-
-static void
-gbp_endpoint_del_mac_itf (const mac_address_t * mac, u32 sw_if_index)
+gbp_endpoint_del_mac (const mac_address_t * mac, u32 bd_index)
 {
   clib_bihash_kv_16_8_t key;
 
-  gbp_endpoint_mk_key_mac_itf (mac, sw_if_index, &key);
+  gbp_endpoint_mk_key_mac (mac->bytes, bd_index, &key);
 
-  clib_bihash_add_del_16_8 (&gbp_ep_by_mac_itf_db.gte_table, &key, 0);
+  clib_bihash_add_del_16_8 (&gbp_ep_db.ged_by_mac_bd, &key, 0);
 }
 
 static void
-gbp_endpoint_del_ip_itf (const ip46_address_t * ip, u32 sw_if_index)
+gbp_endpoint_del_ip (const ip46_address_t * ip, u32 fib_index)
 {
   clib_bihash_kv_24_8_t key;
 
-  gbp_endpoint_mk_key_ip_itf (ip, sw_if_index, &key);
+  gbp_endpoint_mk_key_ip (ip, fib_index, &key);
 
-  clib_bihash_add_del_24_8 (&gbp_ep_by_ip_itf_db.gte_table, &key, 0);
-}
-
-static void
-gbp_endpoint_del_itf (u32 sw_if_index)
-{
-  if (vec_len (gbp_ep_by_itf_db.gte_vec) <= sw_if_index)
-    return;
-
-  l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_SRC_CLASSIFY, 0);
-  l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_FWD, 0);
-  l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_GBP_POLICY, 0);
-
-  gbp_ep_by_itf_db.gte_vec[sw_if_index] = INDEX_INVALID;
+  clib_bihash_add_del_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, 0);
 }
 
 static index_t
-gbp_endpoint_index (const gbp_endpoint_t * gbpe)
+gbp_endpoint_index (const gbp_endpoint_t * ge)
 {
-  return (gbpe - gbp_endpoint_pool);
+  return (ge - gbp_endpoint_pool);
+}
+
+static ip46_type_t
+ip46_address_get_type (const ip46_address_t * a)
+{
+  return (ip46_address_is_ip4 (a) ? IP46_TYPE_IP4 : IP46_TYPE_IP6);
+}
+
+static ip46_type_t
+ip46_address_get_len (const ip46_address_t * a)
+{
+  return (ip46_address_is_ip4 (a) ? 32 : 128);
+}
+
+static gbp_endpoint_t *
+gbp_endpoint_alloc (epg_id_t epg_id,
+		    index_t ggi, u32 sw_if_index, gbp_endpoint_flags_t flags,
+		    const ip46_address_t * tun_src,
+		    const ip46_address_t * tun_dst)
+{
+  gbp_endpoint_t *ge;
+
+  pool_get_zero (gbp_endpoint_pool, ge);
+
+  ge->ge_epg = ggi;
+  ge->ge_epg_id = epg_id;
+  ge->ge_flags = flags;
+  ge->ge_sw_if_index = sw_if_index;
+  ge->ge_last_time = vlib_time_now (vlib_get_main ());
+
+  gbp_endpoint_group_find_and_lock (epg_id);
+
+  if (gbp_endpoint_is_remote (ge))
+    {
+      if (NULL != tun_src)
+	ip46_address_copy (&ge->tun.ge_src, tun_src);
+      if (NULL != tun_dst)
+	ip46_address_copy (&ge->tun.ge_dst, tun_dst);
+
+      /*
+       * the input interface may be the parent GBP-vxlan interface,
+       * create a child vlxan-gbp tunnel and use that as the endpoint's
+       * interface.
+       */
+      switch (gbp_vxlan_tunnel_get_type (sw_if_index))
+	{
+	case GBP_VXLAN_TEMPLATE_TUNNEL:
+	  ge->tun.ge_parent_sw_if_index = sw_if_index;
+	  ge->ge_sw_if_index =
+	    gbp_vxlan_tunnel_clone_and_lock (sw_if_index, tun_src, tun_dst);
+	  break;
+	case VXLAN_GBP_TUNNEL:
+	  ge->tun.ge_parent_sw_if_index =
+	    vxlan_gbp_tunnel_get_parent (sw_if_index);
+	  ge->ge_sw_if_index = sw_if_index;
+	  vxlan_gbp_tunnel_lock (ge->ge_sw_if_index);
+	  break;
+	}
+    }
+
+  return (ge);
 }
 
 int
 gbp_endpoint_update (u32 sw_if_index,
 		     const ip46_address_t * ips,
-		     const mac_address_t * mac, epg_id_t epg_id, u32 * handle)
+		     const mac_address_t * mac,
+		     epg_id_t epg_id,
+		     gbp_endpoint_flags_t flags,
+		     const ip46_address_t * tun_src,
+		     const ip46_address_t * tun_dst, u32 * handle)
 {
-  gbp_endpoint_group_t *gepg;
-  const ip46_address_t *ip;
-  gbp_endpoint_t *gbpe;
+  gbp_endpoint_group_t *gg;
+  gbp_endpoint_t *ge;
+  index_t ggi, gei;
 
-  gbpe = NULL;
-  gepg = gbp_endpoint_group_find (epg_id);
+  if (~0 == sw_if_index)
+    return (VNET_API_ERROR_INVALID_SW_IF_INDEX);
 
-  if (NULL == gepg)
+  ge = NULL;
+  ggi = gbp_endpoint_group_find_and_lock (epg_id);
+
+  if (INDEX_INVALID == ggi)
     return (VNET_API_ERROR_NO_SUCH_ENTRY);
 
+  gg = gbp_endpoint_group_get (ggi);
+
   /*
-   * find an existing endpoint matching one of the key types
+   * L2 EP
    */
-  if (NULL != mac)
+  if (NULL != mac && !mac_address_is_zero (mac))
     {
-      gbpe = gbp_endpoint_find_mac_itf (mac, sw_if_index);
-    }
-  if (NULL == gbpe && NULL != ips)
-    {
-      vec_foreach (ip, ips)
-      {
-	gbpe = gbp_endpoint_find_ip_itf (ip, sw_if_index);
-
-	if (NULL != gbpe)
-	  break;
-      }
-    }
-  if (NULL == gbpe)
-    {
-      gbpe = gbp_endpoint_find_itf (sw_if_index);
-    }
-
-  if (NULL == gbpe)
-    {
-      index_t gbpei;
-      u32 ii;
       /*
-       * new entry
+       * find an existing endpoint matching one of the key types
        */
-      pool_get (gbp_endpoint_pool, gbpe);
-      gbpei = gbp_endpoint_index (gbpe);
-
-      gbpe->ge_epg_id = epg_id;
-      gbpe->ge_sw_if_index = sw_if_index;
-      gbp_endpoint_add_itf (gbpe->ge_sw_if_index, gbpei);
-
-      if (NULL != mac)
+      ge = gbp_endpoint_find_mac (mac->bytes, gg->gg_bd_index);
+      if (NULL == ge)
 	{
-	  gbpe->ge_mac = *mac;
+	  /*
+	   * new entry
+	   */
+	  ge = gbp_endpoint_alloc (epg_id, ggi, sw_if_index, flags,
+				   tun_src, tun_dst);
+	  gei = gbp_endpoint_index (ge);
+	  mac_address_copy (&ge->ge_mac, mac);
 
-	  // FIXME ERROR
-	  gbp_endpoint_add_mac_itf (mac, sw_if_index, gbpei);
+	  ge->ge_itf = gbp_itf_add_and_lock (ge->ge_sw_if_index,
+					     gg->gg_bd_index);
+
+	  gbp_itf_set_l2_input_feature (ge->ge_itf, gei,
+					L2INPUT_FEAT_GBP_FWD);
+
+	  if (gbp_endpoint_is_remote (ge))
+	    {
+	      gbp_itf_set_l2_output_feature (ge->ge_itf, gei,
+					     L2OUTPUT_FEAT_GBP_POLICY_MAC);
+	    }
+	  else
+	    {
+	      gbp_endpoint_add_itf (ge->ge_sw_if_index, gei);
+	      gbp_itf_set_l2_output_feature (ge->ge_itf, gei,
+					     L2OUTPUT_FEAT_GBP_POLICY_PORT);
+	    }
+
+	  gbp_endpoint_add_mac (mac, gg->gg_bd_index, gei);
+
+	  l2fib_add_entry (mac->bytes, gg->gg_bd_index, ge->ge_sw_if_index,
+			   L2FIB_ENTRY_RESULT_FLAG_STATIC);
+	}
+      else
+	{
+	  /*
+	   * update existing entry..
+	   */
+	  ge->ge_flags = flags;
+	  gei = gbp_endpoint_index (ge);
+	  goto out;
+	}
+    }
+
+  /*
+   * L3 EP
+   */
+  if (NULL != ips && !ip46_address_is_zero (ips))
+    {
+      const ip46_address_t *ip;
+      fib_protocol_t fproto;
+      gbp_endpoint_t *l3_ge;
+      u32 ii;
+
+      /*
+       * look for a matching EP by any of the address
+       * An EP's IP addresses cannot change so we can search based on
+       * the first
+       */
+      fproto = fib_proto_from_ip46 (ip46_address_get_type (&ips[0]));
+
+      l3_ge = gbp_endpoint_find_ip (&ips[0],
+				    gbp_endpoint_group_get_fib_index (gg,
+								      fproto));
+      if (NULL == l3_ge)
+	{
+	  if (NULL == ge)
+	    {
+	      ge = gbp_endpoint_alloc (epg_id, ggi, sw_if_index, flags,
+				       tun_src, tun_dst);
+	      ge->ge_itf = gbp_itf_add_and_lock (sw_if_index, ~0);
+	    }
+	  else
+	    /* L2 EP with IPs */
+	    gei = gbp_endpoint_index (ge);
+	}
+      else
+	{
+	  /* modify */
+	  ge = l3_ge;
+	  ge->ge_flags = flags;
+	  gei = gbp_endpoint_index (ge);
+	  goto out;
 	}
 
-      if (NULL != ips)
-	{
-	  vec_validate (gbpe->ge_ips, vec_len (ips) - 1);
-	  vec_foreach_index (ii, ips)
-	  {
-	    ip46_address_copy (&gbpe->ge_ips[ii], &ips[ii]);
+      gei = gbp_endpoint_index (ge);
+      ge->ge_ips = ips;
+      vec_validate (ge->ge_adjs, vec_len (ips) - 1);
 
-	    // FIXME ERROR
-	    gbp_endpoint_add_ip_itf (&ips[ii], sw_if_index, gbpei);
+      vec_foreach_index (ii, ge->ge_ips)
+      {
+	ethernet_header_t *eth;
+	ip46_type_t ip_type;
+	u32 ip_sw_if_index;
+	u8 *rewrite;
+
+	rewrite = NULL;
+	ip = &ge->ge_ips[ii];
+	ip_type = ip46_address_get_type (ip);
+	fproto = fib_proto_from_ip46 (ip_type);
+
+	bd_add_del_ip_mac (gg->gg_bd_index, ip_type, ip, &ge->ge_mac, 1);
+
+	// FIXME - check error
+	gbp_endpoint_add_ip (ip,
+			     gbp_endpoint_group_get_fib_index (gg, fproto),
+			     gei);
+
+	/*
+	 * add a host route via the EPG's BVI we need this because the
+	 * adj fib does not install, due to cover refinement check, since
+	 * the BVI's prefix is /32
+	 */
+	fib_prefix_t pfx = {
+	  .fp_proto = fproto,
+	  .fp_len = ip46_address_get_len (ip),
+	  .fp_addr = *ip,
+	};
+	vec_validate (rewrite, sizeof (*eth) - 1);
+	eth = (ethernet_header_t *) rewrite;
+
+	eth->type = clib_host_to_net_u16 ((fproto == FIB_PROTOCOL_IP4 ?
+					   ETHERNET_TYPE_IP4 :
+					   ETHERNET_TYPE_IP6));
+
+	if (gbp_endpoint_is_remote (ge))
+	  {
+	    /*
+	     * for dynamic EPs we msut add the IP adjacency via the learned
+	     * tunnel since the BD will not contain the EP's MAC since it was
+	     * L3 learned. The dst MAC address used is the 'BD's MAC'.
+	     */
+	    ip_sw_if_index = ge->ge_sw_if_index;
+
+	    mac_address_to_bytes (gbp_route_domain_get_local_mac (),
+				  eth->src_address);
+	    mac_address_to_bytes (gbp_route_domain_get_remote_mac (),
+				  eth->dst_address);
+	  }
+	else
+	  {
+	    /*
+	     * for the static EPs we add the IP adjacency via the BVI
+	     * knowing that the BD has the MAC address to route to and
+	     * that policy will be applied on egress to the EP's port
+	     */
+	    ip_sw_if_index = gbp_endpoint_group_get_bvi (gg);
+
+	    clib_memcpy (eth->src_address,
+			 vnet_sw_interface_get_hw_address (vnet_get_main (),
+							   ip_sw_if_index),
+			 sizeof (eth->src_address));
+	    mac_address_to_bytes (&ge->ge_mac, eth->dst_address);
+	  }
+
+	fib_table_entry_path_add
+	  (gbp_endpoint_group_get_fib_index (gg, fproto),
+	   &pfx, FIB_SOURCE_PLUGIN_LOW,
+	   FIB_ENTRY_FLAG_NONE,
+	   fib_proto_to_dpo (fproto), ip, ip_sw_if_index,
+	   ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE);
+
+	ge->ge_adjs[ii] = adj_nbr_add_or_lock_w_rewrite (fproto,
+							 fib_proto_to_link
+							 (fproto), ip,
+							 ip_sw_if_index,
+							 rewrite);
+
+	if (gbp_endpoint_is_remote (ge))
+	  {
+	    dpo_id_t policy_dpo = DPO_INVALID;
 
 	    /*
-	     * send a gratuitous ARP on the EPG's uplink. this is done so
-	     * that if this EP has moved from some other place in the
-	     * 'fabric', upstream devices are informed
+	     * interpose a policy DPO from the endpoint so that policy
+	     * is applied
 	     */
-	    if (ip46_address_is_ip4 (&ips[ii]))
+	    gbp_policy_dpo_add_or_lock (fib_proto_to_dpo (fproto),
+					gg->gg_id, ~0, &policy_dpo);
+
+	    fib_table_entry_special_dpo_add
+	      (gbp_endpoint_group_get_fib_index (gg, fproto),
+	       &pfx,
+	       FIB_SOURCE_PLUGIN_HI, FIB_ENTRY_FLAG_INTERPOSE, &policy_dpo);
+	  }
+
+	/*
+	 * send a gratuitous ARP on the EPG's uplink. this is done so
+	 * that if this EP has moved from some other place in the
+	 * 'fabric', upstream devices are informed
+	 */
+	if (!(gbp_endpoint_is_remote (ge)) && ~0 != gg->gg_uplink_sw_if_index)
+	  {
+	    gbp_endpoint_add_itf (sw_if_index, gei);
+	    if (ip46_address_is_ip4 (ip))
 	      send_ip4_garp_w_addr (vlib_get_main (),
-				    &ips[ii].ip4,
-				    gepg->gepg_uplink_sw_if_index);
+				    &ip->ip4, gg->gg_uplink_sw_if_index);
 	    else
 	      send_ip6_na_w_addr (vlib_get_main (),
-				  &ips[ii].ip6,
-				  gepg->gepg_uplink_sw_if_index);
+				  &ip->ip6, gg->gg_uplink_sw_if_index);
 	  }
+      }
+    }
+
+  if (NULL == ge)
+    return (0);
+
+  /*
+   * count the number of dynamic entries and kick off the scanner
+   * process is this is our first.
+   */
+  if (gbp_endpoint_is_remote (ge))
+    {
+      gbp_n_learnt_endpoints++;
+
+      if (1 == gbp_n_learnt_endpoints)
+	{
+	  vlib_process_signal_event (vlib_get_main (),
+				     gbp_scanner_node.index,
+				     GBP_ENDPOINT_SCAN_START, 0);
 	}
     }
   else
     {
       /*
-       * update existing entry..
+       * non-remote endpoints (i.e. those not arriving on iVXLAN
+       * tunnels) need to be classifed based on the the input interface.
+       * We enable the GBP-FWD feature only is the group has an uplink
+       * interface (on which the GBP-FWD feature would send UU traffic).
        */
-      ASSERT (0);
-    }
+      l2input_feat_masks_t feats = L2INPUT_FEAT_GBP_SRC_CLASSIFY;
 
-  *handle = (gbpe - gbp_endpoint_pool);
+      if (~0 != gg->gg_uplink_sw_if_index)
+	feats |= L2INPUT_FEAT_GBP_FWD;
+      gbp_itf_set_l2_input_feature (ge->ge_itf, gbp_endpoint_index (ge),
+				    feats);
+    }
+out:
+
+  if (handle)
+    *handle = (ge - gbp_endpoint_pool);
+
+  gbp_endpoint_group_unlock (ggi);
+  GBP_ENDPOINT_INFO ("update: %U", format_gbp_endpoint, gei);
 
   return (0);
 }
 
 void
-gbp_endpoint_delete (u32 handle)
+gbp_endpoint_delete (index_t gei)
 {
-  gbp_endpoint_t *gbpe;
+  gbp_endpoint_group_t *gg;
+  gbp_endpoint_t *ge;
 
-  if (pool_is_free_index (gbp_endpoint_pool, handle))
+  if (pool_is_free_index (gbp_endpoint_pool, gei))
     return;
 
-  gbpe = pool_elt_at_index (gbp_endpoint_pool, handle);
+  GBP_ENDPOINT_INFO ("delete: %U", format_gbp_endpoint, gei);
 
-  gbp_endpoint_del_itf (gbpe->ge_sw_if_index);
+  ge = gbp_endpoint_get (gei);
+  gg = gbp_endpoint_group_get (ge->ge_epg);
 
-  if (!mac_address_is_zero (&gbpe->ge_mac))
-    {
-      gbp_endpoint_del_mac_itf (&gbpe->ge_mac, gbpe->ge_sw_if_index);
-    }
+  gbp_endpoint_del_mac (&ge->ge_mac, gg->gg_bd_index);
+  l2fib_del_entry (ge->ge_mac.bytes, gg->gg_bd_index, ge->ge_sw_if_index);
+  gbp_itf_set_l2_input_feature (ge->ge_itf, gei, (L2INPUT_FEAT_NONE));
+  gbp_itf_set_l2_output_feature (ge->ge_itf, gei, L2OUTPUT_FEAT_NONE);
 
-  if (NULL != gbpe->ge_ips)
+  if (NULL != ge->ge_ips)
     {
       const ip46_address_t *ip;
+      index_t *ai;
 
-      vec_foreach (ip, gbpe->ge_ips)
+      vec_foreach (ai, ge->ge_adjs)
       {
-	gbp_endpoint_del_ip_itf (ip, gbpe->ge_sw_if_index);
+	adj_unlock (*ai);
+      }
+      vec_foreach (ip, ge->ge_ips)
+      {
+	fib_protocol_t fproto;
+	ip46_type_t ip_type;
+
+	ip_type = ip46_address_get_type (ip);
+	fproto = fib_proto_from_ip46 (ip_type);
+
+	gbp_endpoint_del_ip (ip,
+			     gbp_endpoint_group_get_fib_index (gg, fproto));
+
+	bd_add_del_ip_mac (gg->gg_bd_index, ip_type, ip, &ge->ge_mac, 0);
+
+	/*
+	 * remove a host route via the EPG's BVI
+	 */
+	fib_prefix_t pfx = {
+	  .fp_proto = fproto,
+	  .fp_len = ip46_address_get_len (ip),
+	  .fp_addr = *ip,
+	};
+
+	if (gbp_endpoint_is_remote (ge))
+	  {
+	    fib_table_entry_special_remove
+	      (gbp_endpoint_group_get_fib_index (gg, fproto),
+	       &pfx, FIB_SOURCE_PLUGIN_HI);
+	  }
+
+	fib_table_entry_path_remove
+	  (gbp_endpoint_group_get_fib_index (gg, fproto),
+	   &pfx, FIB_SOURCE_PLUGIN_LOW,
+	   fib_proto_to_dpo (fproto), ip,
+	   (gbp_endpoint_is_remote (ge) ?
+	    ge->ge_sw_if_index :
+	    gbp_endpoint_group_get_bvi (gg)),
+	   ~0, 1, FIB_ROUTE_PATH_FLAG_NONE);
       }
     }
-  pool_put (gbp_endpoint_pool, gbpe);
+
+  if (ge->ge_flags & GBP_ENDPOINT_FLAG_LEARNT)
+    {
+      gbp_n_learnt_endpoints--;
+
+      if (0 == gbp_n_learnt_endpoints)
+	{
+	  vlib_process_signal_event (vlib_get_main (),
+				     gbp_scanner_node.index,
+				     GBP_ENDPOINT_SCAN_STOP, 0);
+	}
+    }
+
+  gbp_itf_unlock (ge->ge_itf);
+  if (gbp_endpoint_is_remote (ge))
+    {
+      vxlan_gbp_tunnel_unlock (ge->ge_sw_if_index);
+    }
+  gbp_endpoint_group_unlock (ge->ge_epg);
+  pool_put (gbp_endpoint_pool, ge);
+}
+
+typedef struct gbp_endpoint_flush_ctx_t_
+{
+  u32 sw_if_index;
+  index_t *geis;
+} gbp_endpoint_flush_ctx_t;
+
+static walk_rc_t
+gbp_endpoint_flush_cb (index_t gei, void *args)
+{
+  gbp_endpoint_flush_ctx_t *ctx = args;
+  gbp_endpoint_t *ge;
+
+  ge = gbp_endpoint_get (gei);
+
+  if (gbp_endpoint_is_remote (ge) &&
+      ctx->sw_if_index == ge->tun.ge_parent_sw_if_index)
+    {
+      vec_add1 (ctx->geis, gei);
+    }
+
+  return (WALK_CONTINUE);
+}
+
+/**
+ * remove all learnt endpoints using the interface
+ */
+void
+gbp_endpoint_flush (u32 sw_if_index)
+{
+  gbp_endpoint_flush_ctx_t ctx = {
+    .sw_if_index = sw_if_index,
+  };
+  index_t *gei;
+
+  gbp_endpoint_walk (gbp_endpoint_flush_cb, &ctx);
+
+  vec_foreach (gei, ctx.geis)
+  {
+    gbp_endpoint_delete (*gei);
+  }
+
+  vec_free (ctx.geis);
 }
 
 void
 gbp_endpoint_walk (gbp_endpoint_cb_t cb, void *ctx)
 {
-  gbp_endpoint_t *gbpe;
+  u32 index;
 
   /* *INDENT-OFF* */
-  pool_foreach(gbpe, gbp_endpoint_pool,
+  pool_foreach_index(index, gbp_endpoint_pool,
   {
-    if (!cb(gbpe, ctx))
+    if (!cb(index, ctx))
       break;
   });
   /* *INDENT-ON* */
@@ -380,7 +683,7 @@
   ip46_address_t ip = ip46_address_initializer, *ips = NULL;
   mac_address_t mac = ZERO_MAC_ADDRESS;
   vnet_main_t *vnm = vnet_get_main ();
-  epg_id_t epg_id = EPG_INVALID;
+  u32 epg_id = EPG_INVALID;
   u32 handle = INDEX_INVALID;
   u32 sw_if_index = ~0;
   u8 add = 1;
@@ -418,7 +721,9 @@
       if (EPG_INVALID == epg_id)
 	return clib_error_return (0, "EPG-ID must be specified");
 
-      rv = gbp_endpoint_update (sw_if_index, ips, &mac, epg_id, &handle);
+      rv =
+	gbp_endpoint_update (sw_if_index, ips, &mac, epg_id,
+			     GBP_ENDPOINT_FLAG_NONE, NULL, NULL, &handle);
 
       if (rv)
 	return clib_error_return (0, "GBP Endpoint update returned %d", rv);
@@ -457,37 +762,41 @@
 u8 *
 format_gbp_endpoint (u8 * s, va_list * args)
 {
-  index_t gbpei = va_arg (*args, index_t);
-  vnet_main_t *vnm = vnet_get_main ();
+  index_t gei = va_arg (*args, index_t);
   const ip46_address_t *ip;
-  gbp_endpoint_t *gbpe;
+  gbp_endpoint_t *ge;
 
-  gbpe = gbp_endpoint_get (gbpei);
+  ge = gbp_endpoint_get (gei);
 
-  s = format (s, "[@%d] ", gbpei);
-  s =
-    format (s, "%U", format_vnet_sw_if_index_name, vnm, gbpe->ge_sw_if_index);
-  s = format (s, ", IPs:[");
+  s = format (s, "[@%d] ", gei);
+  s = format (s, "IPs:[");
 
-  vec_foreach (ip, gbpe->ge_ips)
+  vec_foreach (ip, ge->ge_ips)
   {
     s = format (s, "%U, ", format_ip46_address, ip, IP46_TYPE_ANY);
   }
   s = format (s, "]");
 
-  s = format (s, " MAC:%U", format_mac_address_t, &gbpe->ge_mac);
-  s = format (s, " EPG-ID:%d", gbpe->ge_epg_id);
+  s = format (s, " MAC:%U", format_mac_address_t, &ge->ge_mac);
+  s = format (s, " EPG-ID:%d", ge->ge_epg_id);
+  if (GBP_ENDPOINT_FLAG_NONE != ge->ge_flags)
+    {
+      s = format (s, " flags:%U", format_gbp_endpoint_flags, ge->ge_flags);
+    }
+
+  s = format (s, " itf:[%U]", format_gbp_itf, ge->ge_itf);
+  s = format (s, " last-time:[%f]", ge->ge_last_time);
 
   return s;
 }
 
 static walk_rc_t
-gbp_endpoint_show_one (gbp_endpoint_t * gbpe, void *ctx)
+gbp_endpoint_show_one (index_t gei, void *ctx)
 {
   vlib_main_t *vm;
 
   vm = ctx;
-  vlib_cli_output (vm, " %U", format_gbp_endpoint, gbp_endpoint_index (gbpe));
+  vlib_cli_output (vm, " %U", format_gbp_endpoint, gei);
 
   return (WALK_CONTINUE);
 }
@@ -530,7 +839,7 @@
 gbp_endpoint_show (vlib_main_t * vm,
 		   unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  u32 sw_if_index, show_dbs, handle;
+  u32 show_dbs, handle;
 
   handle = INDEX_INVALID;
   show_dbs = 0;
@@ -539,7 +848,7 @@
     {
       if (unformat (input, "%d", &handle))
 	;
-      else if (unformat (input, "db", &handle))
+      else if (unformat (input, "db"))
 	show_dbs = 1;
       else
 	break;
@@ -552,19 +861,10 @@
   else if (show_dbs)
     {
       vlib_cli_output (vm, "\nDatabases:");
-      clib_bihash_foreach_key_value_pair_24_8 (&gbp_ep_by_ip_itf_db.gte_table,
+      clib_bihash_foreach_key_value_pair_24_8 (&gbp_ep_db.ged_by_ip_rd,
 					       gbp_endpoint_walk_ip_itf, vm);
       clib_bihash_foreach_key_value_pair_16_8
-	(&gbp_ep_by_mac_itf_db.gte_table, gbp_endpoint_walk_mac_itf, vm);
-
-      vec_foreach_index (sw_if_index, gbp_ep_by_itf_db.gte_vec)
-      {
-	if (INDEX_INVALID != gbp_ep_by_itf_db.gte_vec[sw_if_index])
-	  vlib_cli_output (vm, " {%U} -> %d",
-			   format_vnet_sw_if_index_name, vnet_get_main (),
-			   sw_if_index,
-			   gbp_ep_by_itf_db.gte_vec[sw_if_index]);
-      }
+	(&gbp_ep_db.ged_by_mac_bd, gbp_endpoint_walk_mac_itf, vm);
     }
   else
     {
@@ -590,20 +890,161 @@
 };
 /* *INDENT-ON* */
 
+static void
+gbp_endpoint_check (index_t gei, f64 start_time)
+{
+  gbp_endpoint_t *ge;
+
+  ge = gbp_endpoint_get (gei);
+
+  GBP_ENDPOINT_DBG ("scan at:%f -> %U", start_time, format_gbp_endpoint, gei);
+
+  if ((ge->ge_flags & GBP_ENDPOINT_FLAG_LEARNT) &&
+      ((start_time - ge->ge_last_time) > GBP_ENDPOINT_INACTIVE_TIME))
+    {
+      gbp_endpoint_delete (gei);
+    }
+}
+
+static void
+gbp_endpoint_scan_l2 (vlib_main_t * vm)
+{
+  clib_bihash_16_8_t *gte_table = &gbp_ep_db.ged_by_mac_bd;
+  f64 last_start, start_time, delta_t;
+  int i, j, k;
+
+  delta_t = 0;
+  last_start = start_time = vlib_time_now (vm);
+
+  for (i = 0; i < gte_table->nbuckets; i++)
+    {
+      clib_bihash_bucket_16_8_t *b;
+      clib_bihash_value_16_8_t *v;
+
+      /* allow no more than 20us without a pause */
+      delta_t = vlib_time_now (vm) - last_start;
+      if (delta_t > 20e-6)
+	{
+	  /* suspend for 100 us */
+	  vlib_process_suspend (vm, 100e-6);
+	  last_start = vlib_time_now (vm);
+	}
+
+      b = &gte_table->buckets[i];
+      if (b->offset == 0)
+	continue;
+      v = clib_bihash_get_value_16_8 (gte_table, b->offset);
+
+      for (j = 0; j < (1 << b->log2_pages); j++)
+	{
+	  for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+	    {
+	      if (clib_bihash_is_free_16_8 (&v->kvp[k]))
+		continue;
+
+	      gbp_endpoint_check (v->kvp[k].value, start_time);
+
+	      /*
+	       * Note: we may have just freed the bucket's backing
+	       * storage, so check right here...
+	       */
+	      if (b->offset == 0)
+		goto doublebreak;
+	    }
+	  v++;
+	}
+    doublebreak:
+      ;
+    }
+}
+
+static void
+gbp_endpoint_scan_l3 (vlib_main_t * vm)
+{
+  clib_bihash_24_8_t *gte_table = &gbp_ep_db.ged_by_ip_rd;
+  f64 last_start, start_time, delta_t;
+  int i, j, k;
+
+  delta_t = 0;
+  last_start = start_time = vlib_time_now (vm);
+
+  for (i = 0; i < gte_table->nbuckets; i++)
+    {
+      clib_bihash_bucket_24_8_t *b;
+      clib_bihash_value_24_8_t *v;
+
+      /* allow no more than 20us without a pause */
+      delta_t = vlib_time_now (vm) - last_start;
+      if (delta_t > 20e-6)
+	{
+	  /* suspend for 100 us */
+	  vlib_process_suspend (vm, 100e-6);
+	  last_start = vlib_time_now (vm);
+	}
+
+      b = &gte_table->buckets[i];
+      if (b->offset == 0)
+	continue;
+      v = clib_bihash_get_value_24_8 (gte_table, b->offset);
+
+      for (j = 0; j < (1 << b->log2_pages); j++)
+	{
+	  for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+	    {
+	      if (clib_bihash_is_free_24_8 (&v->kvp[k]))
+		continue;
+
+	      gbp_endpoint_check (v->kvp[k].value, start_time);
+
+	      /*
+	       * Note: we may have just freed the bucket's backing
+	       * storage, so check right here...
+	       */
+	      if (b->offset == 0)
+		goto doublebreak;
+	    }
+	  v++;
+	}
+    doublebreak:
+      ;
+    }
+}
+
+void
+gbp_endpoint_scan (vlib_main_t * vm)
+{
+  gbp_endpoint_scan_l2 (vm);
+  gbp_endpoint_scan_l3 (vm);
+}
+
+void
+gbp_learn_set_inactive_threshold (u32 threshold)
+{
+  GBP_ENDPOINT_INACTIVE_TIME = threshold;
+}
+
+f64
+gbp_endpoint_scan_threshold (void)
+{
+  return (GBP_ENDPOINT_INACTIVE_TIME);
+}
+
 #define GBP_EP_HASH_NUM_BUCKETS (2 * 1024)
 #define GBP_EP_HASH_MEMORY_SIZE (1 << 20)
 
 static clib_error_t *
 gbp_endpoint_init (vlib_main_t * vm)
 {
-  clib_bihash_init_24_8 (&gbp_ep_by_ip_itf_db.gte_table,
-			 "GBP Endpoints - IP/Interface",
+  clib_bihash_init_24_8 (&gbp_ep_db.ged_by_ip_rd,
+			 "GBP Endpoints - IP/RD",
 			 GBP_EP_HASH_NUM_BUCKETS, GBP_EP_HASH_MEMORY_SIZE);
 
-  clib_bihash_init_16_8 (&gbp_ep_by_mac_itf_db.gte_table,
-			 "GBP Endpoints - MAC/Interface",
+  clib_bihash_init_16_8 (&gbp_ep_db.ged_by_mac_bd,
+			 "GBP Endpoints - MAC/BD",
 			 GBP_EP_HASH_NUM_BUCKETS, GBP_EP_HASH_MEMORY_SIZE);
 
+  gbp_ep_logger = vlib_log_register_class ("gbp", "ep");
+
   return (NULL);
 }
 
diff --git a/src/plugins/gbp/gbp_endpoint.h b/src/plugins/gbp/gbp_endpoint.h
index c92b217..bd157c9 100644
--- a/src/plugins/gbp/gbp_endpoint.h
+++ b/src/plugins/gbp/gbp_endpoint.h
@@ -28,13 +28,31 @@
 /**
  * Flags for each endpoint
  */
+typedef enum gbp_endpoint_attr_t_
+{
+  GBP_ENDPOINT_ATTR_FIRST = 0,
+  GBP_ENDPOINT_ATTR_BOUNCE = GBP_ENDPOINT_ATTR_FIRST,
+  GBP_ENDPOINT_ATTR_REMOTE = 1,
+  GBP_ENDPOINT_ATTR_LEARNT = 2,
+  GBP_ENDPOINT_ATTR_LAST,
+} gbp_endpoint_attr_t;
+
 typedef enum gbp_endpoint_flags_t_
 {
   GBP_ENDPOINT_FLAG_NONE = 0,
-  GBP_ENDPOINT_FLAG_BOUNCE = (1 << 0),
-  GBP_ENDPOINT_FLAG_DYNAMIC = (1 << 1),
+  GBP_ENDPOINT_FLAG_BOUNCE = (1 << GBP_ENDPOINT_ATTR_BOUNCE),
+  GBP_ENDPOINT_FLAG_REMOTE = (1 << GBP_ENDPOINT_ATTR_REMOTE),
+  GBP_ENDPOINT_FLAG_LEARNT = (1 << GBP_ENDPOINT_ATTR_LEARNT),
 } gbp_endpoint_flags_t;
 
+#define GBP_ENDPOINT_ATTR_NAMES {                 \
+    [GBP_ENDPOINT_ATTR_BOUNCE] = "bounce",        \
+    [GBP_ENDPOINT_ATTR_REMOTE] = "remote",        \
+    [GBP_ENDPOINT_ATTR_LEARNT] = "learnt",        \
+}
+
+extern u8 *format_gbp_endpoint_flags (u8 * s, va_list * args);
+
 /**
  * A Group Based Policy Endpoint.
  * This is typically a VM or container. If the endpoint is local (i.e. on
@@ -48,12 +66,13 @@
   /**
    * The interface on which the EP is connected
    */
+  index_t ge_itf;
   u32 ge_sw_if_index;
 
   /**
    * A vector of ip addresses that below to the endpoint
    */
-  ip46_address_t *ge_ips;
+  const ip46_address_t *ge_ips;
 
   /**
    * MAC address of the endpoint
@@ -61,52 +80,74 @@
   mac_address_t ge_mac;
 
   /**
-   * The endpoint's designated EPG
+   * Index of the Endpoint's Group
    */
-  epg_id_t ge_epg_id;
+  index_t ge_epg;
+
+  /**
+   * Endpoint Group's ID
+   */
+  index_t ge_epg_id;
 
   /**
    * Endpoint flags
    */
   gbp_endpoint_flags_t ge_flags;
+
+  /**
+   * The L3 adj, if created
+   */
+  index_t *ge_adjs;
+
+  /**
+   * The last time a packet from seen from this end point
+   */
+  f64 ge_last_time;
+
+  /**
+   * Tunnel info for remote endpoints
+   */
+  struct
+  {
+    u32 ge_parent_sw_if_index;
+    ip46_address_t ge_src;
+    ip46_address_t ge_dst;
+  } tun;
 } gbp_endpoint_t;
 
 extern u8 *format_gbp_endpoint (u8 * s, va_list * args);
 
 /**
- * Interface to source EPG DB - a per-interface vector
+ * GBP Endpoint Databases
  */
-typedef struct gbp_ep_by_itf_db_t_
-{
-  index_t *gte_vec;
-} gbp_ep_by_itf_db_t;
-
 typedef struct gbp_ep_by_ip_itf_db_t_
 {
-  clib_bihash_24_8_t gte_table;
-} gbp_ep_by_ip_itf_db_t;
-
-typedef struct gbp_ep_by_mac_itf_db_t_
-{
-  clib_bihash_16_8_t gte_table;
-} gbp_ep_by_mac_itf_db_t;
+  index_t *ged_by_sw_if_index;
+  clib_bihash_24_8_t ged_by_ip_rd;
+  clib_bihash_16_8_t ged_by_mac_bd;
+} gbp_ep_db_t;
 
 extern int gbp_endpoint_update (u32 sw_if_index,
 				const ip46_address_t * ip,
 				const mac_address_t * mac,
-				epg_id_t epg_id, u32 * handle);
-extern void gbp_endpoint_delete (u32 handle);
+				epg_id_t epg_id,
+				gbp_endpoint_flags_t flags,
+				const ip46_address_t * tun_src,
+				const ip46_address_t * tun_dst, u32 * handle);
+extern void gbp_endpoint_delete (index_t gbpei);
 
-typedef walk_rc_t (*gbp_endpoint_cb_t) (gbp_endpoint_t * gbpe, void *ctx);
+typedef walk_rc_t (*gbp_endpoint_cb_t) (index_t gbpei, void *ctx);
 extern void gbp_endpoint_walk (gbp_endpoint_cb_t cb, void *ctx);
+extern void gbp_endpoint_scan (vlib_main_t * vm);
+extern f64 gbp_endpoint_scan_threshold (void);
+extern int gbp_endpoint_is_remote (const gbp_endpoint_t * ge);
 
+extern void gbp_endpoint_flush (u32 sw_if_index);
 
 /**
  * DP functions and databases
  */
-extern gbp_ep_by_itf_db_t gbp_ep_by_itf_db;
-extern gbp_ep_by_mac_itf_db_t gbp_ep_by_mac_itf_db;
-extern gbp_ep_by_ip_itf_db_t gbp_ep_by_ip_itf_db;
+extern gbp_ep_db_t gbp_ep_db;
 extern gbp_endpoint_t *gbp_endpoint_pool;
 
 /**
@@ -118,12 +159,104 @@
   return (pool_elt_at_index (gbp_endpoint_pool, gbpei));
 }
 
-always_inline gbp_endpoint_t *
-gbp_endpoint_get_itf (u32 sw_if_index)
+static_always_inline void
+gbp_endpoint_mk_key_mac (const u8 * mac,
+			 u32 bd_index, clib_bihash_kv_16_8_t * key)
 {
-  return (gbp_endpoint_get (gbp_ep_by_itf_db.gte_vec[sw_if_index]));
+  key->key[0] = ethernet_mac_address_u64 (mac);
+  key->key[1] = bd_index;
 }
 
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_mac (const u8 * mac, u32 bd_index)
+{
+  clib_bihash_kv_16_8_t key, value;
+  int rv;
+
+  gbp_endpoint_mk_key_mac (mac, bd_index, &key);
+
+  rv = clib_bihash_search_16_8 (&gbp_ep_db.ged_by_mac_bd, &key, &value);
+
+  if (0 != rv)
+    return NULL;
+
+  return (gbp_endpoint_get (value.value));
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_ip (const ip46_address_t * ip,
+			u32 fib_index, clib_bihash_kv_24_8_t * key)
+{
+  key->key[0] = ip->as_u64[0];
+  key->key[1] = ip->as_u64[1];
+  key->key[2] = fib_index;
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_ip4 (const ip4_address_t * ip,
+			 u32 fib_index, clib_bihash_kv_24_8_t * key)
+{
+  const ip46_address_t a = {
+    .ip4 = *ip,
+  };
+  gbp_endpoint_mk_key_ip (&a, fib_index, key);
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_ip4 (const ip4_address_t * ip, u32 fib_index)
+{
+  clib_bihash_kv_24_8_t key, value;
+  int rv;
+
+  gbp_endpoint_mk_key_ip4 (ip, fib_index, &key);
+
+  rv = clib_bihash_search_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, &value);
+
+  if (0 != rv)
+    return NULL;
+
+  return (gbp_endpoint_get (value.value));
+}
+
+static_always_inline void
+gbp_endpoint_mk_key_ip6 (const ip6_address_t * ip,
+			 u32 fib_index, clib_bihash_kv_24_8_t * key)
+{
+  key->key[0] = ip->as_u64[0];
+  key->key[1] = ip->as_u64[1];
+  key->key[2] = fib_index;
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_ip6 (const ip6_address_t * ip, u32 fib_index)
+{
+  clib_bihash_kv_24_8_t key, value;
+  int rv;
+
+  gbp_endpoint_mk_key_ip6 (ip, fib_index, &key);
+
+  rv = clib_bihash_search_24_8 (&gbp_ep_db.ged_by_ip_rd, &key, &value);
+
+  if (0 != rv)
+    return NULL;
+
+  return (gbp_endpoint_get (value.value));
+}
+
+static_always_inline gbp_endpoint_t *
+gbp_endpoint_find_itf (u32 sw_if_index)
+{
+  index_t gei;
+
+  gei = gbp_ep_db.ged_by_sw_if_index[sw_if_index];
+
+  if (INDEX_INVALID != gei)
+    return (gbp_endpoint_get (gei));
+
+  return (NULL);
+}
+
+
 #endif
 
 /*
diff --git a/src/plugins/gbp/gbp_endpoint_group.c b/src/plugins/gbp/gbp_endpoint_group.c
index 095c8fe..ee4af2c 100644
--- a/src/plugins/gbp/gbp_endpoint_group.c
+++ b/src/plugins/gbp/gbp_endpoint_group.c
@@ -17,11 +17,12 @@
 
 #include <plugins/gbp/gbp_endpoint_group.h>
 #include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
 
 #include <vnet/dpo/dvr_dpo.h>
 #include <vnet/fib/fib_table.h>
 #include <vnet/l2/l2_input.h>
-#include <vnet/l2/feat_bitmap.h>
 
 /**
  * Pool of GBP endpoint_groups
@@ -32,108 +33,220 @@
  * DB of endpoint_groups
  */
 gbp_endpoint_group_db_t gbp_endpoint_group_db;
+vlib_log_class_t gg_logger;
+
+#define GBP_EPG_DBG(...)                           \
+    vlib_log_debug (gg_logger, __VA_ARGS__);
 
 gbp_endpoint_group_t *
+gbp_endpoint_group_get (index_t i)
+{
+  return (pool_elt_at_index (gbp_endpoint_group_pool, i));
+}
+
+static void
+gbp_endpoint_group_lock (index_t i)
+{
+  gbp_endpoint_group_t *gg;
+
+  gg = gbp_endpoint_group_get (i);
+  gg->gg_locks++;
+}
+
+index_t
 gbp_endpoint_group_find (epg_id_t epg_id)
 {
   uword *p;
 
-  p = hash_get (gbp_endpoint_group_db.gepg_hash, epg_id);
+  p = hash_get (gbp_endpoint_group_db.gg_hash, epg_id);
 
   if (NULL != p)
-    return (pool_elt_at_index (gbp_endpoint_group_pool, p[0]));
+    return p[0];
 
-  return (NULL);
+  return (INDEX_INVALID);
+}
+
+index_t
+gbp_endpoint_group_find_and_lock (epg_id_t epg_id)
+{
+  uword *p;
+
+  p = hash_get (gbp_endpoint_group_db.gg_hash, epg_id);
+
+  if (NULL != p)
+    {
+      gbp_endpoint_group_lock (p[0]);
+      return p[0];
+    }
+  return (INDEX_INVALID);
 }
 
 int
-gbp_endpoint_group_add (epg_id_t epg_id,
-			u32 bd_id,
-			u32 ip4_table_id,
-			u32 ip6_table_id, u32 uplink_sw_if_index)
+gbp_endpoint_group_add_and_lock (epg_id_t epg_id,
+				 u32 bd_id, u32 rd_id, u32 uplink_sw_if_index)
 {
-  gbp_endpoint_group_t *gepg;
+  gbp_endpoint_group_t *gg;
+  index_t ggi;
 
-  gepg = gbp_endpoint_group_find (epg_id);
+  ggi = gbp_endpoint_group_find (epg_id);
 
-  if (NULL == gepg)
+  if (INDEX_INVALID == ggi)
     {
+      gbp_bridge_domain_t *gb;
       fib_protocol_t fproto;
+      index_t gbi, grdi;
 
-      pool_get (gbp_endpoint_group_pool, gepg);
-      clib_memset (gepg, 0, sizeof (*gepg));
+      gbi = gbp_bridge_domain_find_and_lock (bd_id);
 
-      gepg->gepg_id = epg_id;
-      gepg->gepg_bd = bd_id;
-      gepg->gepg_rd[FIB_PROTOCOL_IP4] = ip4_table_id;
-      gepg->gepg_rd[FIB_PROTOCOL_IP6] = ip6_table_id;
-      gepg->gepg_uplink_sw_if_index = uplink_sw_if_index;
+      if (~0 == gbi)
+	return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
+
+      grdi = gbp_route_domain_find_and_lock (rd_id);
+
+      if (~0 == grdi)
+	{
+	  gbp_bridge_domain_unlock (gbi);
+	  return (VNET_API_ERROR_NO_SUCH_FIB);
+	}
+
+      gb = gbp_bridge_domain_get (gbi);
+
+      pool_get_zero (gbp_endpoint_group_pool, gg);
+
+      gg->gg_id = epg_id;
+      gg->gg_rd = grdi;
+      gg->gg_gbd = gbi;
+      gg->gg_bd_index = gb->gb_bd_index;
+
+      gg->gg_uplink_sw_if_index = uplink_sw_if_index;
+      gg->gg_locks = 1;
 
       /*
        * an egress DVR dpo for internal subnets to use when sending
        * on the uplink interface
        */
-      FOR_EACH_FIB_IP_PROTOCOL (fproto)
-      {
-	gepg->gepg_fib_index[fproto] =
-	  fib_table_find_or_create_and_lock (fproto,
-					     gepg->gepg_rd[fproto],
-					     FIB_SOURCE_PLUGIN_HI);
-
-	if (~0 == gepg->gepg_fib_index[fproto])
+      if (~0 != gg->gg_uplink_sw_if_index)
+	{
+	  FOR_EACH_FIB_IP_PROTOCOL (fproto)
 	  {
-	    return (VNET_API_ERROR_NO_SUCH_FIB);
+	    dvr_dpo_add_or_lock (uplink_sw_if_index,
+				 fib_proto_to_dpo (fproto),
+				 &gg->gg_dpo[fproto]);
 	  }
 
-	dvr_dpo_add_or_lock (uplink_sw_if_index,
-			     fib_proto_to_dpo (fproto),
-			     &gepg->gepg_dpo[fproto]);
-      }
+	  /*
+	   * Add the uplink to the BD
+	   * packets direct from the uplink have had policy applied
+	   */
+	  set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+			   MODE_L2_BRIDGE, gg->gg_uplink_sw_if_index,
+			   gg->gg_bd_index, L2_BD_PORT_TYPE_NORMAL, 0, 0);
+	  l2input_intf_bitmap_enable (gg->gg_uplink_sw_if_index,
+				      L2INPUT_FEAT_GBP_NULL_CLASSIFY, 1);
+	}
 
-      /*
-       * packets direct from the uplink have had policy applied
-       */
-      l2input_intf_bitmap_enable (gepg->gepg_uplink_sw_if_index,
-				  L2INPUT_FEAT_GBP_NULL_CLASSIFY, 1);
-
-      hash_set (gbp_endpoint_group_db.gepg_hash,
-		gepg->gepg_id, gepg - gbp_endpoint_group_pool);
+      hash_set (gbp_endpoint_group_db.gg_hash,
+		gg->gg_id, gg - gbp_endpoint_group_pool);
 
     }
+  else
+    {
+      gg = gbp_endpoint_group_get (ggi);
+      gg->gg_locks++;
+    }
+
+  GBP_EPG_DBG ("add: %U", format_gbp_endpoint_group, gg);
 
   return (0);
 }
 
 void
-gbp_endpoint_group_delete (epg_id_t epg_id)
+gbp_endpoint_group_unlock (index_t ggi)
 {
-  gbp_endpoint_group_t *gepg;
-  uword *p;
+  gbp_endpoint_group_t *gg;
 
-  p = hash_get (gbp_endpoint_group_db.gepg_hash, epg_id);
+  gg = gbp_endpoint_group_get (ggi);
 
-  if (NULL != p)
+  gg->gg_locks--;
+
+  if (0 == gg->gg_locks)
     {
       fib_protocol_t fproto;
 
-      gepg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
+      gg = pool_elt_at_index (gbp_endpoint_group_pool, ggi);
 
-      l2input_intf_bitmap_enable (gepg->gepg_uplink_sw_if_index,
-				  L2INPUT_FEAT_GBP_NULL_CLASSIFY, 0);
+      if (~0 != gg->gg_uplink_sw_if_index)
+	{
+	  set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+			   MODE_L3, gg->gg_uplink_sw_if_index,
+			   gg->gg_bd_index, L2_BD_PORT_TYPE_NORMAL, 0, 0);
 
+	  l2input_intf_bitmap_enable (gg->gg_uplink_sw_if_index,
+				      L2INPUT_FEAT_GBP_NULL_CLASSIFY, 0);
+	}
       FOR_EACH_FIB_IP_PROTOCOL (fproto)
       {
-	dpo_reset (&gepg->gepg_dpo[fproto]);
-	fib_table_unlock (gepg->gepg_fib_index[fproto],
-			  fproto, FIB_SOURCE_PLUGIN_HI);
+	dpo_reset (&gg->gg_dpo[fproto]);
       }
+      gbp_bridge_domain_unlock (gg->gg_gbd);
+      gbp_route_domain_unlock (gg->gg_rd);
 
-      hash_unset (gbp_endpoint_group_db.gepg_hash, epg_id);
+      hash_unset (gbp_endpoint_group_db.gg_hash, gg->gg_id);
 
-      pool_put (gbp_endpoint_group_pool, gepg);
+      pool_put (gbp_endpoint_group_pool, gg);
     }
 }
 
+int
+gbp_endpoint_group_delete (epg_id_t epg_id)
+{
+  index_t ggi;
+
+  ggi = gbp_endpoint_group_find (epg_id);
+
+  if (INDEX_INVALID != ggi)
+    {
+      GBP_EPG_DBG ("del: %U", format_gbp_endpoint_group,
+		   gbp_endpoint_group_get (ggi));
+      gbp_endpoint_group_unlock (ggi);
+
+      return (0);
+    }
+
+  return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+u32
+gbp_endpoint_group_get_bd_id (const gbp_endpoint_group_t * gg)
+{
+  const gbp_bridge_domain_t *gb;
+
+  gb = gbp_bridge_domain_get (gg->gg_gbd);
+
+  return (gb->gb_bd_id);
+}
+
+index_t
+gbp_endpoint_group_get_fib_index (gbp_endpoint_group_t * gg,
+				  fib_protocol_t fproto)
+{
+  const gbp_route_domain_t *grd;
+
+  grd = gbp_route_domain_get (gg->gg_rd);
+
+  return (grd->grd_fib_index[fproto]);
+}
+
+u32
+gbp_endpoint_group_get_bvi (gbp_endpoint_group_t * gg)
+{
+  const gbp_bridge_domain_t *gb;
+
+  gb = gbp_bridge_domain_get (gg->gg_gbd);
+
+  return (gb->gb_bvi_sw_if_index);
+}
+
 void
 gbp_endpoint_group_walk (gbp_endpoint_group_cb_t cb, void *ctx)
 {
@@ -190,8 +303,8 @@
       if (~0 == rd_id)
 	return clib_error_return (0, "route-domain must be specified");
 
-      gbp_endpoint_group_add (epg_id, bd_id, rd_id, rd_id,
-			      uplink_sw_if_index);
+      gbp_endpoint_group_add_and_lock (epg_id, bd_id, rd_id,
+				       uplink_sw_if_index);
     }
   else
     gbp_endpoint_group_delete (epg_id);
@@ -213,19 +326,32 @@
   .function = gbp_endpoint_group_cli,
 };
 
-static int
-gbp_endpoint_group_show_one (gbp_endpoint_group_t *gepg, void *ctx)
+u8 *
+format_gbp_endpoint_group (u8 * s, va_list * args)
 {
+  gbp_endpoint_group_t *gg = va_arg (*args, gbp_endpoint_group_t*);
   vnet_main_t *vnm = vnet_get_main ();
+
+  if (NULL != gg)
+    s = format (s, "%d, bd:[%d,%d], rd:[%d] uplink:%U locks:%d",
+                gg->gg_id,
+                gbp_endpoint_group_get_bd_id(gg), gg->gg_bd_index,
+                gg->gg_rd,
+                format_vnet_sw_if_index_name, vnm, gg->gg_uplink_sw_if_index,
+                gg->gg_locks);
+  else
+    s = format (s, "NULL");
+
+  return (s);
+}
+
+static int
+gbp_endpoint_group_show_one (gbp_endpoint_group_t *gg, void *ctx)
+{
   vlib_main_t *vm;
 
   vm = ctx;
-  vlib_cli_output (vm, "  %d, bd:%d, ip4:%d ip6:%d uplink:%U",
-                   gepg->gepg_id,
-                   gepg->gepg_bd,
-                   gepg->gepg_rd[FIB_PROTOCOL_IP4],
-                   gepg->gepg_rd[FIB_PROTOCOL_IP6],
-		   format_vnet_sw_if_index_name, vnm, gepg->gepg_uplink_sw_if_index);
+  vlib_cli_output (vm, "  %U",format_gbp_endpoint_group, gg);
 
   return (1);
 }
@@ -256,6 +382,16 @@
 };
 /* *INDENT-ON* */
 
+static clib_error_t *
+gbp_endpoint_group_init (vlib_main_t * vm)
+{
+  gg_logger = vlib_log_register_class ("gbp", "epg");
+
+  return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_endpoint_group_init);
+
 /*
  * fd.io coding-style-patch-verification: ON
  *
diff --git a/src/plugins/gbp/gbp_endpoint_group.h b/src/plugins/gbp/gbp_endpoint_group.h
index f71e5f5..7116a05 100644
--- a/src/plugins/gbp/gbp_endpoint_group.h
+++ b/src/plugins/gbp/gbp_endpoint_group.h
@@ -28,37 +28,38 @@
   /**
    * ID
    */
-  epg_id_t gepg_id;
+  epg_id_t gg_id;
 
   /**
    * Bridge-domain ID the EPG is in
    */
-  u32 gepg_bd;
+  index_t gg_gbd;
+  index_t gg_bd_index;
 
   /**
    * route-domain/IP-table ID the EPG is in
    */
-  u32 gepg_rd[FIB_PROTOCOL_IP_MAX];
-
-  /**
-   * resulting FIB indices
-   */
-  u32 gepg_fib_index[FIB_PROTOCOL_IP_MAX];
+  index_t gg_rd;
 
   /**
    * Is the EPG an external/NAT
    */
-  u8 gepg_is_ext;
+  u8 gg_is_ext;
 
   /**
    * the uplink interface dedicated to the EPG
    */
-  u32 gepg_uplink_sw_if_index;
+  u32 gg_uplink_sw_if_index;
 
   /**
    * The DPO used in the L3 path for forwarding internal subnets
    */
-  dpo_id_t gepg_dpo[FIB_PROTOCOL_IP_MAX];
+  dpo_id_t gg_dpo[FIB_PROTOCOL_IP_MAX];
+
+  /**
+   * Locks/references to this EPG
+   */
+  u32 gg_locks;
 } gbp_endpoint_group_t;
 
 /**
@@ -66,20 +67,30 @@
  */
 typedef struct gbp_endpoint_group_db_t_
 {
-  uword *gepg_hash;
+  uword *gg_hash;
 } gbp_endpoint_group_db_t;
 
-extern int gbp_endpoint_group_add (epg_id_t epg_id,
-				   u32 bd_id,
-				   u32 ip4_table_id,
-				   u32 ip6_table_id, u32 uplink_sw_if_index);
-extern void gbp_endpoint_group_delete (epg_id_t epg_id);
+extern int gbp_endpoint_group_add_and_lock (epg_id_t epg_id,
+					    u32 bd_id,
+					    u32 rd_id,
+					    u32 uplink_sw_if_index);
+extern index_t gbp_endpoint_group_find_and_lock (epg_id_t epg_id);
+extern index_t gbp_endpoint_group_find (epg_id_t epg_id);
+extern int gbp_endpoint_group_delete (epg_id_t epg_id);
+extern void gbp_endpoint_group_unlock (index_t index);
+extern u32 gbp_endpoint_group_get_bd_id (const gbp_endpoint_group_t *);
+
+extern gbp_endpoint_group_t *gbp_endpoint_group_get (index_t i);
+extern index_t gbp_endpoint_group_get_fib_index (gbp_endpoint_group_t * gg,
+						 fib_protocol_t fproto);
+extern u32 gbp_endpoint_group_get_bvi (gbp_endpoint_group_t * gg);
 
 typedef int (*gbp_endpoint_group_cb_t) (gbp_endpoint_group_t * gbpe,
 					void *ctx);
 extern void gbp_endpoint_group_walk (gbp_endpoint_group_cb_t bgpe, void *ctx);
 
-extern gbp_endpoint_group_t *gbp_endpoint_group_find (epg_id_t epg_id);
+
+extern u8 *format_gbp_endpoint_group (u8 * s, va_list * args);
 
 /**
  * DP functions and databases
@@ -92,14 +103,14 @@
 {
   uword *p;
 
-  p = hash_get (gbp_endpoint_group_db.gepg_hash, epg);
+  p = hash_get (gbp_endpoint_group_db.gg_hash, epg);
 
   if (NULL != p)
     {
-      gbp_endpoint_group_t *gepg;
+      gbp_endpoint_group_t *gg;
 
-      gepg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
-      return (gepg->gepg_uplink_sw_if_index);
+      gg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
+      return (gg->gg_uplink_sw_if_index);
     }
   return (~0);
 }
@@ -109,14 +120,14 @@
 {
   uword *p;
 
-  p = hash_get (gbp_endpoint_group_db.gepg_hash, epg);
+  p = hash_get (gbp_endpoint_group_db.gg_hash, epg);
 
   if (NULL != p)
     {
-      gbp_endpoint_group_t *gepg;
+      gbp_endpoint_group_t *gg;
 
-      gepg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
-      return (&gepg->gepg_dpo[fproto]);
+      gg = pool_elt_at_index (gbp_endpoint_group_pool, p[0]);
+      return (&gg->gg_dpo[fproto]);
     }
   return (NULL);
 }
diff --git a/src/plugins/gbp/gbp_itf.c b/src/plugins/gbp/gbp_itf.c
new file mode 100644
index 0000000..39a1124
--- /dev/null
+++ b/src/plugins/gbp/gbp_itf.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_itf.h>
+
+/**
+ * Attributes and configurations attached to interfaces by GBP
+ */
+typedef struct gbp_itf_t_
+{
+  /**
+   * Number of references to this interface
+   */
+  u32 gi_locks;
+
+  u32 gi_sw_if_index;
+  u32 gi_bd_index;
+
+  /**
+   * L2/L3 Features configured by each user
+   */
+  u32 *gi_l2_input_fbs;
+  u32 gi_l2_input_fb;
+  u32 *gi_l2_output_fbs;
+  u32 gi_l2_output_fb;
+} gbp_itf_t;
+
+static gbp_itf_t *gbp_itfs;
+
+static gbp_itf_t *
+gbp_itf_get (index_t gii)
+{
+  vec_validate (gbp_itfs, gii);
+
+  return (&gbp_itfs[gii]);
+}
+
+static index_t
+gbp_itf_get_itf (u32 sw_if_index)
+{
+  return (sw_if_index);
+}
+
+index_t
+gbp_itf_add_and_lock (u32 sw_if_index, u32 bd_index)
+{
+  gbp_itf_t *gi;
+
+  gi = gbp_itf_get (gbp_itf_get_itf (sw_if_index));
+
+  if (0 == gi->gi_locks)
+    {
+      gi->gi_sw_if_index = sw_if_index;
+      gi->gi_bd_index = bd_index;
+
+      if (~0 != gi->gi_bd_index)
+	set_int_l2_mode (vlib_get_main (), vnet_get_main (),
+			 MODE_L2_BRIDGE, sw_if_index, bd_index,
+			 L2_BD_PORT_TYPE_NORMAL, 0, 0);
+
+    }
+
+  gi->gi_locks++;
+
+  return (sw_if_index);
+}
+
+void
+gbp_itf_unlock (index_t gii)
+{
+  gbp_itf_t *gi;
+
+  gi = gbp_itf_get (gii);
+  ASSERT (gi->gi_locks > 0);
+  gi->gi_locks--;
+
+  if (0 == gi->gi_locks)
+    {
+      if (~0 != gi->gi_bd_index)
+	set_int_l2_mode (vlib_get_main (), vnet_get_main (), MODE_L3,
+			 gi->gi_sw_if_index, 0, L2_BD_PORT_TYPE_NORMAL, 0, 0);
+      vec_free (gi->gi_l2_input_fbs);
+      vec_free (gi->gi_l2_output_fbs);
+
+      memset (gi, 0, sizeof (*gi));
+    }
+}
+
+void
+gbp_itf_set_l2_input_feature (index_t gii,
+			      index_t useri, l2input_feat_masks_t feats)
+{
+  u32 diff_fb, new_fb, *fb, feat;
+  gbp_itf_t *gi;
+
+  gi = gbp_itf_get (gii);
+
+  if (gi->gi_bd_index == ~0)
+    return;
+
+  vec_validate (gi->gi_l2_input_fbs, useri);
+  gi->gi_l2_input_fbs[useri] = feats;
+
+  new_fb = 0;
+  vec_foreach (fb, gi->gi_l2_input_fbs)
+  {
+    new_fb |= *fb;
+  }
+
+  /* add new features */
+  diff_fb = (gi->gi_l2_input_fb ^ new_fb) & new_fb;
+
+  /* *INDENT-OFF* */
+  foreach_set_bit (feat, diff_fb,
+  ({
+    l2input_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 1);
+  }));
+  /* *INDENT-ON* */
+
+  /* remove unneeded features */
+  diff_fb = (gi->gi_l2_input_fb ^ new_fb) & gi->gi_l2_input_fb;
+
+  /* *INDENT-OFF* */
+  foreach_set_bit (feat, diff_fb,
+  ({
+    l2input_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 0);
+  }));
+  /* *INDENT-ON* */
+
+  gi->gi_l2_input_fb = new_fb;
+}
+
+void
+gbp_itf_set_l2_output_feature (index_t gii,
+			       index_t useri, l2output_feat_masks_t feats)
+{
+  u32 diff_fb, new_fb, *fb, feat;
+  gbp_itf_t *gi;
+
+  gi = gbp_itf_get (gii);
+
+  if (gi->gi_bd_index == ~0)
+    return;
+
+  vec_validate (gi->gi_l2_output_fbs, useri);
+  gi->gi_l2_output_fbs[useri] = feats;
+
+  new_fb = 0;
+  vec_foreach (fb, gi->gi_l2_output_fbs)
+  {
+    new_fb |= *fb;
+  }
+
+  /* add new features */
+  diff_fb = (gi->gi_l2_output_fb ^ new_fb) & new_fb;
+
+  /* *INDENT-OFF* */
+  foreach_set_bit (feat, diff_fb,
+  ({
+    l2output_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 1);
+  }));
+  /* *INDENT-ON* */
+
+  /* remove unneeded features */
+  diff_fb = (gi->gi_l2_output_fb ^ new_fb) & gi->gi_l2_output_fb;
+
+  /* *INDENT-OFF* */
+  foreach_set_bit (feat, diff_fb,
+  ({
+    l2output_intf_bitmap_enable (gi->gi_sw_if_index, (1 << feat), 0);
+  }));
+  /* *INDENT-ON* */
+
+  gi->gi_l2_output_fb = new_fb;
+}
+
+u8 *
+format_gbp_itf (u8 * s, va_list * args)
+{
+  index_t gii = va_arg (*args, index_t);
+  gbp_itf_t *gi;
+
+  gi = gbp_itf_get (gii);
+
+  s = format (s, "%U locks:%d input-feats:%U output-feats:%U",
+	      format_vnet_sw_if_index_name, vnet_get_main (),
+	      gi->gi_sw_if_index, gi->gi_locks, format_l2_input_features,
+	      gi->gi_l2_input_fb, format_l2_output_features,
+	      gi->gi_l2_output_fb);
+
+  return (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_itf.h b/src/plugins/gbp/gbp_itf.h
new file mode 100644
index 0000000..6ece7b1
--- /dev/null
+++ b/src/plugins/gbp/gbp_itf.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_INTERFACE_H__
+#define __GBP_INTERFACE_H__
+
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+
+extern index_t gbp_itf_add_and_lock (u32 sw_if_index, u32 bd_index);
+extern void gbp_itf_unlock (index_t index);
+
+extern void gbp_itf_set_l2_input_feature (index_t gii,
+					  index_t useri,
+					  l2input_feat_masks_t feats);
+extern void gbp_itf_set_l2_output_feature (index_t gii,
+					   index_t useri,
+					   l2output_feat_masks_t feats);
+
+extern u8 *format_gbp_itf (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_learn.c b/src/plugins/gbp/gbp_learn.c
new file mode 100644
index 0000000..9239779
--- /dev/null
+++ b/src/plugins/gbp/gbp_learn.c
@@ -0,0 +1,756 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/util/throttle.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+/**
+ * Grouping of global data for the GBP source EPG classification feature
+ */
+typedef struct gbp_learn_main_t_
+{
+  /**
+   * Next nodes for L2 output features
+   */
+  u32 gl_l2_input_feat_next[32];
+
+  /**
+   * logger - VLIB log class
+   */
+  vlib_log_class_t gl_logger;
+
+  /**
+   * throttles for the DP leanring
+   */
+  throttle_t gl_l2_throttle;
+  throttle_t gl_l3_throttle;
+} gbp_learn_main_t;
+
+/**
+ * The maximum learning rate per-hashed EP
+ */
+#define GBP_ENDPOINT_HASH_LEARN_RATE (1e-2)
+
+static gbp_learn_main_t gbp_learn_main;
+
+#define GBP_LEARN_DBG(...)                                      \
+    vlib_log_debug (gbp_learn_main.gl_logger, __VA_ARGS__);
+
+#define foreach_gbp_learn                      \
+  _(DROP,    "drop")
+
+typedef enum
+{
+#define _(sym,str) GBP_LEARN_ERROR_##sym,
+  foreach_gbp_learn
+#undef _
+    GBP_LEARN_N_ERROR,
+} gbp_learn_error_t;
+
+static char *gbp_learn_error_strings[] = {
+#define _(sym,string) string,
+  foreach_gbp_learn
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) GBP_LEARN_NEXT_##sym,
+  foreach_gbp_learn
+#undef _
+    GBP_LEARN_N_NEXT,
+} gbp_learn_next_t;
+
+typedef struct gbp_learn_l2_t_
+{
+  ip46_address_t ip;
+  mac_address_t mac;
+  u32 sw_if_index;
+  u32 bd_index;
+  epg_id_t epg;
+  ip46_address_t outer_src;
+  ip46_address_t outer_dst;
+} gbp_learn_l2_t;
+
+
+static void
+gbp_learn_l2_cp (const gbp_learn_l2_t * gl2)
+{
+  ip46_address_t *ips = NULL;
+
+  GBP_LEARN_DBG ("L2 EP: %U %U, %d",
+		 format_mac_address_t, &gl2->mac,
+		 format_ip46_address, &gl2->ip, IP46_TYPE_ANY, gl2->epg);
+
+  vec_add1 (ips, gl2->ip);
+
+  ASSERT (!ip46_address_is_zero (&gl2->outer_src));
+  ASSERT (!ip46_address_is_zero (&gl2->outer_dst));
+
+  /*
+   * flip the source and dst, since that's how it was received, this API
+   * takes how it's sent
+   */
+  gbp_endpoint_update (gl2->sw_if_index, ips,
+		       &gl2->mac, gl2->epg,
+		       (GBP_ENDPOINT_FLAG_LEARNT |
+			GBP_ENDPOINT_FLAG_REMOTE),
+		       &gl2->outer_dst, &gl2->outer_src, NULL);
+}
+
+static void
+gbp_learn_l2_ip4_dp (const u8 * mac, const ip4_address_t * ip,
+		     u32 bd_index, u32 sw_if_index, epg_id_t epg,
+		     const ip4_address_t * outer_src,
+		     const ip4_address_t * outer_dst)
+{
+  gbp_learn_l2_t gl2 = {
+    .sw_if_index = sw_if_index,
+    .bd_index = bd_index,
+    .epg = epg,
+    .ip.ip4 = *ip,
+    .outer_src.ip4 = *outer_src,
+    .outer_dst.ip4 = *outer_dst,
+  };
+  mac_address_from_bytes (&gl2.mac, mac);
+
+  ASSERT (!ip46_address_is_zero (&gl2.outer_src));
+  ASSERT (!ip46_address_is_zero (&gl2.outer_dst));
+
+  vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+static void
+gbp_learn_l2_ip6_dp (const u8 * mac, const ip6_address_t * ip,
+		     u32 bd_index, u32 sw_if_index, epg_id_t epg,
+		     const ip4_address_t * outer_src,
+		     const ip4_address_t * outer_dst)
+{
+  gbp_learn_l2_t gl2 = {
+    .sw_if_index = sw_if_index,
+    .bd_index = bd_index,
+    .epg = epg,
+    .ip.ip6 = *ip,
+    .outer_src.ip4 = *outer_src,
+    .outer_dst.ip4 = *outer_dst,
+  };
+  mac_address_from_bytes (&gl2.mac, mac);
+
+  vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+static void
+gbp_learn_l2_dp (const u8 * mac, u32 bd_index, u32 sw_if_index,
+		 epg_id_t epg,
+		 const ip4_address_t * outer_src,
+		 const ip4_address_t * outer_dst)
+{
+  gbp_learn_l2_t gl2 = {
+    .sw_if_index = sw_if_index,
+    .bd_index = bd_index,
+    .epg = epg,
+    .outer_src.ip4 = *outer_src,
+    .outer_dst.ip4 = *outer_dst,
+  };
+  mac_address_from_bytes (&gl2.mac, mac);
+
+  vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
+}
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_learn_l2_trace_t_
+{
+  /* per-pkt trace data */
+  mac_address_t mac;
+  u32 sw_if_index;
+  u32 new;
+  u32 throttled;
+  u32 epg;
+  u32 d_bit;
+} gbp_learn_l2_trace_t;
+
+always_inline void
+gbp_learn_get_outer (const ethernet_header_t * eh0,
+		     ip4_address_t * outer_src, ip4_address_t * outer_dst)
+{
+  ip4_header_t *ip0;
+  u8 *buff;
+
+  /* rewind back to the ivxlan header */
+  buff = (u8 *) eh0;
+  buff -= (sizeof (vxlan_gbp_header_t) +
+	   sizeof (udp_header_t) + sizeof (ip4_header_t));
+
+  ip0 = (ip4_header_t *) buff;
+
+  *outer_src = ip0->src_address;
+  *outer_dst = ip0->dst_address;
+}
+
+static uword
+gbp_learn_l2 (vlib_main_t * vm,
+	      vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+  u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
+  gbp_learn_main_t *glm;
+  f64 time_now;
+
+  glm = &gbp_learn_main;
+  next_index = 0;
+  n_left_from = frame->n_vectors;
+  from = vlib_frame_vector_args (frame);
+  time_now = vlib_time_now (vm);
+  thread_index = vm->thread_index;
+
+  seed = throttle_seed (&glm->gl_l2_throttle, thread_index, time_now);
+
+  while (n_left_from > 0)
+    {
+      u32 n_left_to_next;
+
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+	{
+	  ip4_address_t outer_src, outer_dst;
+	  u32 bi0, sw_if_index0, t0, epg0;
+	  const ethernet_header_t *eh0;
+	  gbp_learn_next_t next0;
+	  gbp_endpoint_t *ge0;
+	  vlib_buffer_t *b0;
+
+	  next0 = GBP_LEARN_NEXT_DROP;
+	  bi0 = from[0];
+	  to_next[0] = bi0;
+	  from += 1;
+	  to_next += 1;
+	  n_left_from -= 1;
+	  n_left_to_next -= 1;
+
+	  b0 = vlib_get_buffer (vm, bi0);
+	  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+	  eh0 = vlib_buffer_get_current (b0);
+	  epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+
+	  next0 = vnet_l2_feature_next (b0, glm->gl_l2_input_feat_next,
+					L2INPUT_FEAT_GBP_LEARN);
+
+	  ge0 = gbp_endpoint_find_mac (eh0->src_address,
+				       vnet_buffer (b0)->l2.bd_index);
+
+	  if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D)
+	    {
+	      ge0 = NULL;
+	      t0 = 1;
+	      goto trace;
+	    }
+
+	  /*
+	   * check for new EP or a moved EP
+	   */
+	  if (NULL == ge0 || ge0->ge_sw_if_index != sw_if_index0)
+
+	    {
+	      /*
+	       * use the last 4 bytes of the mac address as the hash for the EP
+	       */
+	      t0 = throttle_check (&glm->gl_l2_throttle, thread_index,
+				   *((u32 *) (eh0->src_address + 2)), seed);
+	      if (!t0)
+		{
+		  gbp_learn_get_outer (eh0, &outer_src, &outer_dst);
+
+		  switch (clib_net_to_host_u16 (eh0->type))
+		    {
+		    case ETHERNET_TYPE_IP4:
+		      {
+			const ip4_header_t *ip0;
+
+			ip0 = (ip4_header_t *) (eh0 + 1);
+
+			gbp_learn_l2_ip4_dp (eh0->src_address,
+					     &ip0->src_address,
+					     vnet_buffer (b0)->l2.bd_index,
+					     sw_if_index0, epg0,
+					     &outer_src, &outer_dst);
+
+			break;
+		      }
+		    case ETHERNET_TYPE_IP6:
+		      {
+			const ip6_header_t *ip0;
+
+			ip0 = (ip6_header_t *) (eh0 + 1);
+
+			gbp_learn_l2_ip6_dp (eh0->src_address,
+					     &ip0->src_address,
+					     vnet_buffer (b0)->l2.bd_index,
+					     sw_if_index0, epg0,
+					     &outer_src, &outer_dst);
+
+			break;
+		      }
+		    default:
+		      gbp_learn_l2_dp (eh0->src_address,
+				       vnet_buffer (b0)->l2.bd_index,
+				       sw_if_index0, epg0,
+				       &outer_src, &outer_dst);
+		      break;
+		    }
+		}
+	    }
+	  else
+	    {
+	      /*
+	       * this update could happen simultaneoulsy from multiple workers
+	       * but that's ok we are not interested in being very accurate.
+	       */
+	      t0 = 0;
+	      ge0->ge_last_time = time_now;
+	    }
+	trace:
+	  if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+	    {
+	      gbp_learn_l2_trace_t *t =
+		vlib_add_trace (vm, node, b0, sizeof (*t));
+	      clib_memcpy (t->mac.bytes, eh0->src_address, 6);
+	      t->new = (NULL == ge0);
+	      t->throttled = t0;
+	      t->sw_if_index = sw_if_index0;
+	      t->epg = epg0;
+	      t->d_bit = ! !(vnet_buffer2 (b0)->gbp.flags &
+			     VXLAN_GBP_GPFLAGS_D);
+	    }
+
+	  /* verify speculative enqueue, maybe switch current next frame */
+	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					   to_next, n_left_to_next,
+					   bi0, next0);
+	}
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+    }
+
+  return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_learn_l2_trace (u8 * s, va_list * args)
+{
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  gbp_learn_l2_trace_t *t = va_arg (*args, gbp_learn_l2_trace_t *);
+
+  s = format (s, "new:%d throttled:%d d-bit:%d mac:%U itf:%d epg:%d",
+	      t->new, t->throttled, t->d_bit,
+	      format_mac_address_t, &t->mac, t->sw_if_index, t->epg);
+
+  return s;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_learn_l2_node) = {
+  .function = gbp_learn_l2,
+  .name = "gbp-learn-l2",
+  .vector_size = sizeof (u32),
+  .format_trace = format_gbp_learn_l2_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+
+  .n_errors = ARRAY_LEN(gbp_learn_error_strings),
+  .error_strings = gbp_learn_error_strings,
+
+  .n_next_nodes = GBP_LEARN_N_NEXT,
+
+  .next_nodes = {
+    [GBP_LEARN_NEXT_DROP] = "error-drop",
+  },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_l2_node, gbp_learn_l2);
+/* *INDENT-ON* */
+
+typedef struct gbp_learn_l3_t_
+{
+  ip46_address_t ip;
+  u32 fib_index;
+  u32 sw_if_index;
+  epg_id_t epg;
+  ip46_address_t outer_src;
+  ip46_address_t outer_dst;
+} gbp_learn_l3_t;
+
+static void
+gbp_learn_l3_cp (const gbp_learn_l3_t * gl3)
+{
+  ip46_address_t *ips = NULL;
+
+  GBP_LEARN_DBG ("L3 EP: %U, %d", format_ip46_address, &gl3->ip,
+		 IP46_TYPE_ANY, gl3->epg);
+
+  vec_add1 (ips, gl3->ip);
+
+  gbp_endpoint_update (gl3->sw_if_index, ips, NULL, gl3->epg,
+		       (GBP_ENDPOINT_FLAG_REMOTE |
+			GBP_ENDPOINT_FLAG_LEARNT),
+		       &gl3->outer_dst, &gl3->outer_src, NULL);
+}
+
+static void
+gbp_learn_ip4_dp (const ip4_address_t * ip,
+		  u32 fib_index, u32 sw_if_index, epg_id_t epg,
+		  const ip4_address_t * outer_src,
+		  const ip4_address_t * outer_dst)
+{
+  /* *INDENT-OFF* */
+  gbp_learn_l3_t gl3 = {
+    .ip = {
+      .ip4 = *ip,
+    },
+    .sw_if_index = sw_if_index,
+    .fib_index = fib_index,
+    .epg = epg,
+    .outer_src.ip4 = *outer_src,
+    .outer_dst.ip4 = *outer_dst,
+  };
+  /* *INDENT-ON* */
+
+  vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
+}
+
+static void
+gbp_learn_ip6_dp (const ip6_address_t * ip,
+		  u32 fib_index, u32 sw_if_index, epg_id_t epg,
+		  const ip4_address_t * outer_src,
+		  const ip4_address_t * outer_dst)
+{
+  /* *INDENT-OFF* */
+  gbp_learn_l3_t gl3 = {
+    .ip = {
+      .ip6 = *ip,
+    },
+    .sw_if_index = sw_if_index,
+    .fib_index = fib_index,
+    .epg = epg,
+    .outer_src.ip4 = *outer_src,
+    .outer_dst.ip4 = *outer_dst,
+  };
+  /* *INDENT-ON* */
+
+  vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
+}
+
+/**
+ * per-packet trace data
+ */
+typedef struct gbp_learn_l3_trace_t_
+{
+  /* per-pkt trace data */
+  ip46_address_t ip;
+  u32 sw_if_index;
+  u32 new;
+  u32 throttled;
+  u32 epg;
+} gbp_learn_l3_trace_t;
+
+static uword
+gbp_learn_l3 (vlib_main_t * vm,
+	      vlib_node_runtime_t * node, vlib_frame_t * frame,
+	      fib_protocol_t fproto)
+{
+  u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
+  gbp_learn_main_t *glm;
+  f64 time_now;
+
+  glm = &gbp_learn_main;
+  next_index = 0;
+  n_left_from = frame->n_vectors;
+  from = vlib_frame_vector_args (frame);
+  time_now = vlib_time_now (vm);
+  thread_index = vm->thread_index;
+
+  seed = throttle_seed (&glm->gl_l3_throttle, thread_index, time_now);
+
+  while (n_left_from > 0)
+    {
+      u32 n_left_to_next;
+
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+	{
+	  u32 bi0, sw_if_index0, t0, epg0, fib_index0;
+	  CLIB_UNUSED (const ip4_header_t *) ip4_0;
+	  CLIB_UNUSED (const ip6_header_t *) ip6_0;
+	  ip4_address_t outer_src, outer_dst;
+	  ethernet_header_t *eth0;
+	  gbp_learn_next_t next0;
+	  gbp_endpoint_t *ge0;
+	  vlib_buffer_t *b0;
+
+	  next0 = GBP_LEARN_NEXT_DROP;
+	  bi0 = from[0];
+	  to_next[0] = bi0;
+	  from += 1;
+	  to_next += 1;
+	  n_left_from -= 1;
+	  n_left_to_next -= 1;
+
+	  b0 = vlib_get_buffer (vm, bi0);
+	  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+	  epg0 = vnet_buffer2 (b0)->gbp.src_epg;
+	  ip6_0 = NULL;
+	  ip4_0 = NULL;
+
+	  vnet_feature_next (&next0, b0);
+
+	  if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D)
+	    {
+	      t0 = 1;
+	      ge0 = NULL;
+	      goto trace;
+	    }
+
+	  fib_index0 = fib_table_get_index_for_sw_if_index (fproto,
+							    sw_if_index0);
+
+	  if (FIB_PROTOCOL_IP6 == fproto)
+	    {
+	      ip6_0 = vlib_buffer_get_current (b0);
+	      eth0 = (ethernet_header_t *) (((u8 *) ip6_0) - sizeof (*eth0));
+
+	      gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
+
+	      ge0 = gbp_endpoint_find_ip6 (&ip6_0->src_address, fib_index0);
+
+	      if (NULL == ge0)
+		{
+		  t0 = throttle_check (&glm->gl_l3_throttle,
+				       thread_index,
+				       ip6_address_hash_to_u32
+				       (&ip6_0->src_address), seed);
+
+		  if (!t0)
+		    {
+		      gbp_learn_ip6_dp (&ip6_0->src_address,
+					fib_index0, sw_if_index0, epg0,
+					&outer_src, &outer_dst);
+		    }
+		}
+	      else
+		{
+		  /*
+		   * this update could happen simultaneoulsy from multiple
+		   * workers but that's ok we are not interested in being
+		   * very accurate.
+		   */
+		  t0 = 0;
+		  ge0->ge_last_time = time_now;
+		}
+	    }
+	  else
+	    {
+	      ip4_0 = vlib_buffer_get_current (b0);
+	      eth0 = (ethernet_header_t *) (((u8 *) ip4_0) - sizeof (*eth0));
+
+	      gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
+	      ge0 = gbp_endpoint_find_ip4 (&ip4_0->src_address, fib_index0);
+
+	      if (NULL == ge0)
+		{
+		  t0 = throttle_check (&glm->gl_l3_throttle, thread_index,
+				       ip4_0->src_address.as_u32, seed);
+
+		  if (!t0)
+		    {
+		      gbp_learn_ip4_dp (&ip4_0->src_address,
+					fib_index0, sw_if_index0, epg0,
+					&outer_src, &outer_dst);
+		    }
+		}
+	      else
+		{
+		  /*
+		   * this update could happen simultaneoulsy from multiple
+		   * workers but that's ok we are not interested in being
+		   * very accurate.
+		   */
+		  t0 = 0;
+		  ge0->ge_last_time = time_now;
+		}
+	    }
+	trace:
+	  if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
+	    {
+	      gbp_learn_l3_trace_t *t;
+
+	      t = vlib_add_trace (vm, node, b0, sizeof (*t));
+	      if (FIB_PROTOCOL_IP6 == fproto && ip6_0)
+		ip46_address_set_ip6 (&t->ip, &ip6_0->src_address);
+	      if (FIB_PROTOCOL_IP4 == fproto && ip4_0)
+		ip46_address_set_ip4 (&t->ip, &ip4_0->src_address);
+	      t->new = (NULL == ge0);
+	      t->throttled = t0;
+	      t->sw_if_index = sw_if_index0;
+	      t->epg = epg0;
+	    }
+
+	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					   to_next, n_left_to_next,
+					   bi0, next0);
+	}
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+    }
+
+  return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+format_gbp_learn_l3_trace (u8 * s, va_list * args)
+{
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  gbp_learn_l3_trace_t *t = va_arg (*args, gbp_learn_l3_trace_t *);
+
+  s = format (s, "new:%d throttled:%d ip:%U itf:%d epg:%d",
+	      t->new, t->throttled,
+	      format_ip46_address, &t->ip, IP46_TYPE_ANY, t->sw_if_index,
+	      t->epg);
+
+  return s;
+}
+
+static uword
+gbp_learn_ip4 (vlib_main_t * vm,
+	       vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+  return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP4));
+}
+
+static uword
+gbp_learn_ip6 (vlib_main_t * vm,
+	       vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+  return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP6));
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_learn_ip4_node) = {
+  .function = gbp_learn_ip4,
+  .name = "gbp-learn-ip4",
+  .vector_size = sizeof (u32),
+  .format_trace = format_gbp_learn_l3_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_ip4_node, gbp_learn_ip4);
+
+VNET_FEATURE_INIT (gbp_learn_ip4, static) =
+{
+  .arc_name = "ip4-unicast",
+  .node_name = "gbp-learn-ip4",
+};
+
+VLIB_REGISTER_NODE (gbp_learn_ip6_node) = {
+  .function = gbp_learn_ip6,
+  .name = "gbp-learn-ip6",
+  .vector_size = sizeof (u32),
+  .format_trace = format_gbp_learn_l3_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_ip6_node, gbp_learn_ip6);
+
+VNET_FEATURE_INIT (gbp_learn_ip6, static) =
+{
+  .arc_name = "ip6-unicast",
+  .node_name = "gbp-learn-ip6",
+};
+
+/* *INDENT-ON* */
+
+void
+gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode)
+{
+  if (GBP_LEARN_MODE_L2 == mode)
+    l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 1);
+  else
+    {
+      vnet_feature_enable_disable ("ip4-unicast",
+				   "gbp-learn-ip4", sw_if_index, 1, 0, 0);
+      vnet_feature_enable_disable ("ip6-unicast",
+				   "gbp-learn-ip6", sw_if_index, 1, 0, 0);
+    }
+}
+
+void
+gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode)
+{
+  if (GBP_LEARN_MODE_L2 == mode)
+    l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 0);
+  else
+    {
+      vnet_feature_enable_disable ("ip4-unicast",
+				   "gbp-learn-ip4", sw_if_index, 0, 0, 0);
+      vnet_feature_enable_disable ("ip6-unicast",
+				   "gbp-learn-ip6", sw_if_index, 0, 0, 0);
+    }
+}
+
+static clib_error_t *
+gbp_learn_init (vlib_main_t * vm)
+{
+  gbp_learn_main_t *glm = &gbp_learn_main;
+  vlib_thread_main_t *tm = &vlib_thread_main;
+
+  /* Initialize the feature next-node indices */
+  feat_bitmap_init_next_nodes (vm,
+			       gbp_learn_l2_node.index,
+			       L2INPUT_N_FEAT,
+			       l2input_get_feat_names (),
+			       glm->gl_l2_input_feat_next);
+
+  throttle_init (&glm->gl_l2_throttle,
+		 tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
+
+  throttle_init (&glm->gl_l3_throttle,
+		 tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
+
+  glm->gl_logger = vlib_log_register_class ("gbp", "learn");
+
+  return 0;
+}
+
+VLIB_INIT_FUNCTION (gbp_learn_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_learn.h b/src/plugins/gbp/gbp_learn.h
new file mode 100644
index 0000000..836daf8
--- /dev/null
+++ b/src/plugins/gbp/gbp_learn.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_LEARN_H__
+#define __GBP_LEARN_H__
+
+#include <plugins/gbp/gbp.h>
+
+typedef enum gbp_learn_mode_t_
+{
+  GBP_LEARN_MODE_L2,
+  GBP_LEARN_MODE_L3,
+} gbb_learn_mode_t;
+
+extern void gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode);
+extern void gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode);
+
+extern void gbp_learn_set_inactive_threshold (u32 max_age);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_policy.c b/src/plugins/gbp/gbp_policy.c
index f57aa07..6d84a99 100644
--- a/src/plugins/gbp/gbp_policy.c
+++ b/src/plugins/gbp/gbp_policy.c
@@ -15,6 +15,8 @@
 
 #include <plugins/gbp/gbp.h>
 
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
 /**
  * Grouping of global data for the GBP source EPG classification feature
  */
@@ -23,7 +25,7 @@
   /**
    * Next nodes for L2 output features
    */
-  u32 l2_output_feat_next[32];
+  u32 l2_output_feat_next[2][32];
 } gbp_policy_main_t;
 
 static gbp_policy_main_t gbp_policy_main;
@@ -59,14 +61,16 @@
 typedef struct gbp_policy_trace_t_
 {
   /* per-pkt trace data */
-  epg_id_t src_epg;
-  epg_id_t dst_epg;
+  u32 src_epg;
+  u32 dst_epg;
   u32 acl_index;
+  u32 allowed;
 } gbp_policy_trace_t;
 
 static uword
-gbp_policy (vlib_main_t * vm,
-	    vlib_node_runtime_t * node, vlib_frame_t * frame)
+gbp_policy_inline (vlib_main_t * vm,
+		   vlib_node_runtime_t * node,
+		   vlib_frame_t * frame, u8 is_port_based)
 {
   gbp_main_t *gm = &gbp_main;
   gbp_policy_main_t *gpm = &gbp_policy_main;
@@ -85,7 +89,8 @@
 
       while (n_left_from > 0 && n_left_to_next > 0)
 	{
-	  const gbp_endpoint_t *gep0;
+	  const ethernet_header_t *h0;
+	  const gbp_endpoint_t *ge0;
 	  gbp_policy_next_t next0;
 	  gbp_contract_key_t key0;
 	  gbp_contract_value_t value0 = {
@@ -103,13 +108,39 @@
 	  n_left_to_next -= 1;
 
 	  b0 = vlib_get_buffer (vm, bi0);
+	  h0 = vlib_buffer_get_current (b0);
+	  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
 
 	  /*
+	   * If the A0bit is set then policy has already been applied
+	   * and we skip enforcement here.
+	   */
+	  if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
+	    {
+	      next0 = vnet_l2_feature_next (b0,
+					    gpm->l2_output_feat_next
+					    [is_port_based],
+					    (is_port_based ?
+					     L2OUTPUT_FEAT_GBP_POLICY_PORT :
+					     L2OUTPUT_FEAT_GBP_POLICY_MAC));
+	      key0.as_u32 = ~0;
+	      goto trace;
+	    }
+	  /*
 	   * determine the src and dst EPG
 	   */
-	  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
-	  gep0 = gbp_endpoint_get_itf (sw_if_index0);
-	  key0.gck_dst = gep0->ge_epg_id;
+	  if (is_port_based)
+	    ge0 = gbp_endpoint_find_itf (sw_if_index0);
+	  else
+	    ge0 = gbp_endpoint_find_mac (h0->dst_address,
+					 vnet_buffer (b0)->l2.bd_index);
+
+	  if (NULL != ge0)
+	    key0.gck_dst = ge0->ge_epg_id;
+	  else
+	    /* If you cannot determine the destination EP then drop */
+	    goto trace;
+
 	  key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
 
 	  if (EPG_INVALID != key0.gck_src)
@@ -119,8 +150,14 @@
 		  /*
 		   * intra-epg allowed
 		   */
-		  next0 = vnet_l2_feature_next (b0, gpm->l2_output_feat_next,
-						L2OUTPUT_FEAT_GBP_POLICY);
+		  next0 =
+		    vnet_l2_feature_next (b0,
+					  gpm->l2_output_feat_next
+					  [is_port_based],
+					  (is_port_based ?
+					   L2OUTPUT_FEAT_GBP_POLICY_PORT :
+					   L2OUTPUT_FEAT_GBP_POLICY_MAC));
+		  vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
 		}
 	      else
 		{
@@ -163,9 +200,19 @@
 						      &trace_bitmap0);
 
 		      if (action0 > 0)
-			next0 =
-			  vnet_l2_feature_next (b0, gpm->l2_output_feat_next,
-						L2OUTPUT_FEAT_GBP_POLICY);
+			{
+			  vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+
+			  next0 =
+			    vnet_l2_feature_next (b0,
+						  gpm->l2_output_feat_next
+						  [is_port_based],
+						  (is_port_based ?
+						   L2OUTPUT_FEAT_GBP_POLICY_PORT
+						   :
+						   L2OUTPUT_FEAT_GBP_POLICY_MAC));
+			  ;
+			}
 		    }
 		}
 	    }
@@ -175,10 +222,15 @@
 	       * the src EPG is not set when the packet arrives on an EPG
 	       * uplink interface and we do not need to apply policy
 	       */
-	      next0 = vnet_l2_feature_next (b0, gpm->l2_output_feat_next,
-					    L2OUTPUT_FEAT_GBP_POLICY);
+	      next0 =
+		vnet_l2_feature_next (b0,
+				      gpm->l2_output_feat_next[is_port_based],
+				      (is_port_based ?
+				       L2OUTPUT_FEAT_GBP_POLICY_PORT :
+				       L2OUTPUT_FEAT_GBP_POLICY_MAC));
 	    }
 
+	trace:
 	  if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
 	    {
 	      gbp_policy_trace_t *t =
@@ -186,6 +238,7 @@
 	      t->src_epg = key0.gck_src;
 	      t->dst_epg = key0.gck_dst;
 	      t->acl_index = value0.gc_acl_index;
+	      t->allowed = (next0 != GBP_POLICY_NEXT_DENY);
 	    }
 
 	  /* verify speculative enqueue, maybe switch current next frame */
@@ -200,6 +253,20 @@
   return frame->n_vectors;
 }
 
+static uword
+gbp_policy_port (vlib_main_t * vm,
+		 vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+  return (gbp_policy_inline (vm, node, frame, 1));
+}
+
+static uword
+gbp_policy_mac (vlib_main_t * vm,
+		vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+  return (gbp_policy_inline (vm, node, frame, 0));
+}
+
 /* packet trace format function */
 static u8 *
 format_gbp_policy_trace (u8 * s, va_list * args)
@@ -209,16 +276,16 @@
   gbp_policy_trace_t *t = va_arg (*args, gbp_policy_trace_t *);
 
   s =
-    format (s, "src:%d, dst:%d, acl:%d", t->src_epg, t->dst_epg,
-	    t->acl_index);
+    format (s, "src:%d, dst:%d, acl:%d allowed:%d",
+	    t->src_epg, t->dst_epg, t->acl_index, t->allowed);
 
   return s;
 }
 
 /* *INDENT-OFF* */
-VLIB_REGISTER_NODE (gbp_policy_node) = {
-  .function = gbp_policy,
-  .name = "gbp-policy",
+VLIB_REGISTER_NODE (gbp_policy_port_node) = {
+  .function = gbp_policy_port,
+  .name = "gbp-policy-port",
   .vector_size = sizeof (u32),
   .format_trace = format_gbp_policy_trace,
   .type = VLIB_NODE_TYPE_INTERNAL,
@@ -233,7 +300,26 @@
   },
 };
 
-VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_node, gbp_policy);
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_port_node, gbp_policy_port);
+
+VLIB_REGISTER_NODE (gbp_policy_mac_node) = {
+  .function = gbp_policy_mac,
+  .name = "gbp-policy-mac",
+  .vector_size = sizeof (u32),
+  .format_trace = format_gbp_policy_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+
+  .n_errors = ARRAY_LEN(gbp_policy_error_strings),
+  .error_strings = gbp_policy_error_strings,
+
+  .n_next_nodes = GBP_POLICY_N_NEXT,
+
+  .next_nodes = {
+    [GBP_POLICY_NEXT_DENY] = "error-drop",
+  },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_mac_node, gbp_policy_mac);
 
 /* *INDENT-ON* */
 
@@ -245,10 +331,15 @@
 
   /* Initialize the feature next-node indexes */
   feat_bitmap_init_next_nodes (vm,
-			       gbp_policy_node.index,
+			       gbp_policy_port_node.index,
 			       L2OUTPUT_N_FEAT,
 			       l2output_get_feat_names (),
-			       gpm->l2_output_feat_next);
+			       gpm->l2_output_feat_next[1]);
+  feat_bitmap_init_next_nodes (vm,
+			       gbp_policy_mac_node.index,
+			       L2OUTPUT_N_FEAT,
+			       l2output_get_feat_names (),
+			       gpm->l2_output_feat_next[0]);
 
   return error;
 }
diff --git a/src/plugins/gbp/gbp_policy_dpo.c b/src/plugins/gbp/gbp_policy_dpo.c
index a2d9510..fd9dbce 100644
--- a/src/plugins/gbp/gbp_policy_dpo.c
+++ b/src/plugins/gbp/gbp_policy_dpo.c
@@ -17,6 +17,8 @@
 #include <vnet/fib/ip4_fib.h>
 #include <vnet/fib/ip6_fib.h>
 #include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
 
 #include <plugins/gbp/gbp.h>
 #include <plugins/gbp/gbp_policy_dpo.h>
@@ -49,7 +51,7 @@
 {
   gbp_policy_dpo_t *gpd;
 
-  pool_get (gbp_policy_dpo_pool, gpd);
+  pool_get_zero (gbp_policy_dpo_pool, gpd);
 
   return (gpd);
 }
@@ -110,19 +112,24 @@
   dpo_id_t parent = DPO_INVALID;
 
   gpd = gbp_policy_dpo_alloc ();
-  clib_memset (gpd, 0, sizeof (*gpd));
 
   gpd->gpd_proto = dproto;
   gpd->gpd_sw_if_index = sw_if_index;
   gpd->gpd_epg = epg;
 
-  /*
-   * stack on the DVR DPO for the output interface
-   */
-  dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
+  if (~0 != sw_if_index)
+    {
+      /*
+       * stack on the DVR DPO for the output interface
+       */
+      dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
+    }
+  else
+    {
+      dpo_copy (&parent, drop_dpo_get (dproto));
+    }
 
   dpo_stack (gbp_policy_dpo_type, dproto, &gpd->gpd_dpo, &parent);
-
   dpo_set (dpo, gbp_policy_dpo_type, dproto, gbp_policy_dpo_get_index (gpd));
 }
 
@@ -144,11 +151,36 @@
   return (s);
 }
 
+/**
+ * Interpose a policy DPO
+ */
+static void
+gbp_policy_dpo_interpose (const dpo_id_t * original,
+			  const dpo_id_t * parent, dpo_id_t * clone)
+{
+  gbp_policy_dpo_t *gpd, *gpd_clone;
+
+  gpd_clone = gbp_policy_dpo_alloc ();
+  gpd = gbp_policy_dpo_get (original->dpoi_index);
+
+  gpd_clone->gpd_proto = gpd->gpd_proto;
+  gpd_clone->gpd_epg = gpd->gpd_epg;
+  gpd_clone->gpd_sw_if_index = gpd->gpd_sw_if_index;
+
+  dpo_stack (gbp_policy_dpo_type,
+	     gpd_clone->gpd_proto, &gpd_clone->gpd_dpo, parent);
+
+  dpo_set (clone,
+	   gbp_policy_dpo_type,
+	   gpd_clone->gpd_proto, gbp_policy_dpo_get_index (gpd_clone));
+}
+
 const static dpo_vft_t gbp_policy_dpo_vft = {
   .dv_lock = gbp_policy_dpo_lock,
   .dv_unlock = gbp_policy_dpo_unlock,
   .dv_format = format_gbp_policy_dpo,
   .dv_get_urpf = gbp_policy_dpo_get_urpf,
+  .dv_mk_interpose = gbp_policy_dpo_interpose,
 };
 
 /**
@@ -195,6 +227,7 @@
   u32 src_epg;
   u32 dst_epg;
   u32 acl_index;
+  u32 a_bit;
 } gbp_policy_dpo_trace_t;
 
 typedef enum
@@ -241,10 +274,18 @@
 	  next0 = GBP_POLICY_DROP;
 
 	  b0 = vlib_get_buffer (vm, bi0);
+
 	  gpd0 =
 	    gbp_policy_dpo_get_i (vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
 	  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index;
 
+	  if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
+	    {
+	      next0 = gpd0->gpd_dpo.dpoi_next_node;
+	      key0.as_u32 = ~0;
+	      goto trace;
+	    }
+
 	  key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
 	  key0.gck_dst = gpd0->gpd_epg;
 
@@ -256,6 +297,7 @@
 		   * intra-epg allowed
 		   */
 		  next0 = gpd0->gpd_dpo.dpoi_next_node;
+		  vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
 		}
 	      else
 		{
@@ -287,7 +329,10 @@
 						      &trace_bitmap0);
 
 		      if (action0 > 0)
-			next0 = gpd0->gpd_dpo.dpoi_next_node;
+			{
+			  vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
+			  next0 = gpd0->gpd_dpo.dpoi_next_node;
+			}
 		    }
 		}
 	    }
@@ -299,7 +344,7 @@
 	       */
 	      next0 = gpd0->gpd_dpo.dpoi_next_node;
 	    }
-
+	trace:
 	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
 	    {
 	      gbp_policy_dpo_trace_t *tr;
@@ -308,6 +353,7 @@
 	      tr->src_epg = key0.gck_src;
 	      tr->dst_epg = key0.gck_dst;
 	      tr->acl_index = value0.gc_acl_index;
+	      tr->a_bit = vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A;
 	    }
 
 	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
@@ -325,8 +371,8 @@
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
   gbp_policy_dpo_trace_t *t = va_arg (*args, gbp_policy_dpo_trace_t *);
 
-  s = format (s, " src-epg:%d dst-epg:%d acl-index:%d",
-	      t->src_epg, t->dst_epg, t->acl_index);
+  s = format (s, " src-epg:%d dst-epg:%d acl-index:%d a-bit:%d",
+	      t->src_epg, t->dst_epg, t->acl_index, t->a_bit);
 
   return s;
 }
diff --git a/src/plugins/gbp/gbp_recirc.c b/src/plugins/gbp/gbp_recirc.c
index 95e8066..57ba408 100644
--- a/src/plugins/gbp/gbp_recirc.c
+++ b/src/plugins/gbp/gbp_recirc.c
@@ -16,6 +16,7 @@
 #include <plugins/gbp/gbp_recirc.h>
 #include <plugins/gbp/gbp_endpoint_group.h>
 #include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_itf.h>
 
 #include <vnet/dpo/dvr_dpo.h>
 #include <vnet/fib/fib_table.h>
@@ -30,6 +31,25 @@
  */
 index_t *gbp_recirc_db;
 
+/**
+ * logger
+ */
+vlib_log_class_t gr_logger;
+
+#define GBP_RECIRC_DBG(...)                           \
+    vlib_log_debug (gr_logger, __VA_ARGS__);
+
+u8 *
+format_gbp_recirc (u8 * s, va_list * args)
+{
+  gbp_recirc_t *gr = va_arg (*args, gbp_recirc_t *);
+  vnet_main_t *vnm = vnet_get_main ();
+
+  return format (s, "  %U, epg:%d, ext:%d",
+		 format_vnet_sw_if_index_name, vnm,
+		 gr->gr_sw_if_index, gr->gr_epg, gr->gr_is_ext);
+}
+
 int
 gbp_recirc_add (u32 sw_if_index, epg_id_t epg_id, u8 is_ext)
 {
@@ -42,8 +62,14 @@
 
   if (INDEX_INVALID == gri)
     {
-      gbp_endpoint_group_t *gepg;
+      gbp_endpoint_group_t *gg;
       fib_protocol_t fproto;
+      index_t ggi;
+
+      ggi = gbp_endpoint_group_find_and_lock (epg_id);
+
+      if (INDEX_INVALID == ggi)
+	return (VNET_API_ERROR_NO_SUCH_ENTRY);
 
       pool_get (gbp_recirc_pool, gr);
       clib_memset (gr, 0, sizeof (*gr));
@@ -62,17 +88,21 @@
       /*
        * cache the FIB indicies of the EPG
        */
-      gepg = gbp_endpoint_group_find (gr->gr_epg);
+      gr->gr_epgi = ggi;
 
-      if (NULL == gepg)
-	return (VNET_API_ERROR_NO_SUCH_ENTRY);
-
+      gg = gbp_endpoint_group_get (gr->gr_epgi);
       FOR_EACH_FIB_IP_PROTOCOL (fproto)
       {
-	gr->gr_fib_index[fproto] = gepg->gepg_fib_index[fproto];
+	gr->gr_fib_index[fproto] =
+	  gbp_endpoint_group_get_fib_index (gg, fproto);
       }
 
       /*
+       * bind to the bridge-domain of the EPG
+       */
+      gr->gr_itf = gbp_itf_add_and_lock (gr->gr_sw_if_index, gg->gg_bd_index);
+
+      /*
        * Packets on the recirculation interface are subject to src-EPG
        * classification. Recirc interfaces are L2-emulation mode.
        *   for internal EPGs this is via an LPM on all external subnets.
@@ -80,13 +110,19 @@
        */
       if (gr->gr_is_ext)
 	{
+	  mac_address_t mac;
 	  /*
 	   * recirc is for post-NAT translation packets going into
 	   * the external EPG, these are classified to the NAT EPG
 	   * based on its port
 	   */
+	  mac_address_from_bytes (&mac,
+				  vnet_sw_interface_get_hw_address
+				  (vnet_get_main (), gr->gr_sw_if_index));
 	  gbp_endpoint_update (gr->gr_sw_if_index,
-			       NULL, NULL, gr->gr_epg, &gr->gr_ep);
+			       NULL, &mac, gr->gr_epg,
+			       GBP_ENDPOINT_FLAG_NONE,
+			       NULL, NULL, &gr->gr_ep);
 	  vnet_feature_enable_disable ("ip4-unicast",
 				       "ip4-gbp-src-classify",
 				       gr->gr_sw_if_index, 1, 0, 0);
@@ -111,7 +147,12 @@
 
       gbp_recirc_db[sw_if_index] = gri;
     }
+  else
+    {
+      gr = gbp_recirc_get (gri);
+    }
 
+  GBP_RECIRC_DBG ("add: %U", format_gbp_recirc, gr);
   return (0);
 }
 
@@ -127,6 +168,8 @@
     {
       gr = pool_elt_at_index (gbp_recirc_pool, gri);
 
+      GBP_RECIRC_DBG ("del: %U", format_gbp_recirc, gr);
+
       if (gr->gr_is_ext)
 	{
 	  gbp_endpoint_delete (gr->gr_ep);
@@ -150,6 +193,9 @@
       ip4_sw_interface_enable_disable (gr->gr_sw_if_index, 0);
       ip6_sw_interface_enable_disable (gr->gr_sw_if_index, 0);
 
+      gbp_itf_unlock (gr->gr_itf);
+
+      gbp_endpoint_group_unlock (gr->gr_epgi);
       gbp_recirc_db[sw_if_index] = INDEX_INVALID;
       pool_put (gbp_recirc_pool, gr);
     }
@@ -158,12 +204,12 @@
 void
 gbp_recirc_walk (gbp_recirc_cb_t cb, void *ctx)
 {
-  gbp_recirc_t *gbpe;
+  gbp_recirc_t *ge;
 
   /* *INDENT-OFF* */
-  pool_foreach(gbpe, gbp_recirc_pool,
+  pool_foreach(ge, gbp_recirc_pool,
   {
-    if (!cb(gbpe, ctx))
+    if (!cb(ge, ctx))
       break;
   });
   /* *INDENT-ON* */
@@ -172,13 +218,7 @@
 static int
 gbp_recirc_show_one (gbp_recirc_t * gr, void *ctx)
 {
-  vnet_main_t *vnm = vnet_get_main ();
-  vlib_main_t *vm;
-
-  vm = ctx;
-  vlib_cli_output (vm, "  %U, epg:%d, ext:%d",
-		   format_vnet_sw_if_index_name, vnm,
-		   gr->gr_sw_if_index, gr->gr_epg, gr->gr_is_ext);
+  vlib_cli_output (ctx, "  %U", format_gbp_recirc, gr);
 
   return (1);
 }
@@ -193,7 +233,6 @@
   return (NULL);
 }
 
-
 /*?
  * Show Group Based Policy Recircs and derived information
  *
@@ -209,6 +248,16 @@
 };
 /* *INDENT-ON* */
 
+static clib_error_t *
+gbp_recirc_init (vlib_main_t * vm)
+{
+  gr_logger = vlib_log_register_class ("gbp", "recirc");
+
+  return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_recirc_init);
+
 /*
  * fd.io coding-style-patch-verification: ON
  *
diff --git a/src/plugins/gbp/gbp_recirc.h b/src/plugins/gbp/gbp_recirc.h
index 148a5be..1d1a88a 100644
--- a/src/plugins/gbp/gbp_recirc.h
+++ b/src/plugins/gbp/gbp_recirc.h
@@ -30,6 +30,11 @@
   epg_id_t gr_epg;
 
   /**
+   * The index of the EPG
+   */
+  index_t gr_epgi;
+
+  /**
    * FIB indices the EPG is mapped to
    */
   u32 gr_fib_index[FIB_PROTOCOL_IP_MAX];
@@ -43,6 +48,7 @@
   /**
    */
   u32 gr_sw_if_index;
+  u32 gr_itf;
 
   /**
    * The endpoint created to represent the reric interface
@@ -62,7 +68,7 @@
 extern gbp_recirc_t *gbp_recirc_pool;
 extern index_t *gbp_recirc_db;
 
-always_inline const gbp_recirc_t *
+always_inline gbp_recirc_t *
 gbp_recirc_get (u32 sw_if_index)
 {
   return (pool_elt_at_index (gbp_recirc_pool, gbp_recirc_db[sw_if_index]));
diff --git a/src/plugins/gbp/gbp_route_domain.c b/src/plugins/gbp/gbp_route_domain.c
new file mode 100644
index 0000000..5518cc1
--- /dev/null
+++ b/src/plugins/gbp/gbp_route_domain.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_route_domain.h>
+#include <plugins/gbp/gbp_endpoint.h>
+
+#include <vnet/dpo/dvr_dpo.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/ip/ip_neighbor.h>
+
+/**
+ * A fixed MAC address to use as the source MAC for packets L3 switched
+ * onto the routed uu-fwd interfaces.
+ * Magic values - origin lost to the mists of time...
+ */
+/* *INDENT-OFF* */
+const static mac_address_t GBP_ROUTED_SRC_MAC = {
+  .bytes = {
+    0x0, 0x22, 0xBD, 0xF8, 0x19, 0xFF,
+  }
+};
+
+const static mac_address_t GBP_ROUTED_DST_MAC = {
+  .bytes = {
+    00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+  }
+};
+/* *INDENT-ON* */
+
+/**
+ * Pool of GBP route_domains
+ */
+gbp_route_domain_t *gbp_route_domain_pool;
+
+/**
+ * DB of route_domains
+ */
+typedef struct gbp_route_domain_db_t
+{
+  uword *gbd_by_rd_id;
+} gbp_route_domain_db_t;
+
+static gbp_route_domain_db_t gbp_route_domain_db;
+
+/**
+ * logger
+ */
+vlib_log_class_t grd_logger;
+
+#define GBP_BD_DBG(...)                           \
+    vlib_log_debug (grd_logger, __VA_ARGS__);
+
+gbp_route_domain_t *
+gbp_route_domain_get (index_t i)
+{
+  return (pool_elt_at_index (gbp_route_domain_pool, i));
+}
+
+static void
+gbp_route_domain_lock (index_t i)
+{
+  gbp_route_domain_t *grd;
+
+  grd = gbp_route_domain_get (i);
+  grd->grd_locks++;
+}
+
+index_t
+gbp_route_domain_find (u32 rd_id)
+{
+  uword *p;
+
+  p = hash_get (gbp_route_domain_db.gbd_by_rd_id, rd_id);
+
+  if (NULL != p)
+    return p[0];
+
+  return (INDEX_INVALID);
+}
+
+index_t
+gbp_route_domain_find_and_lock (u32 rd_id)
+{
+  index_t grdi;
+
+  grdi = gbp_route_domain_find (rd_id);
+
+  if (INDEX_INVALID != grdi)
+    {
+      gbp_route_domain_lock (grdi);
+    }
+  return (grdi);
+}
+
+static void
+gbp_route_domain_db_add (gbp_route_domain_t * grd)
+{
+  index_t grdi = grd - gbp_route_domain_pool;
+
+  hash_set (gbp_route_domain_db.gbd_by_rd_id, grd->grd_id, grdi);
+}
+
+static void
+gbp_route_domain_db_remove (gbp_route_domain_t * grd)
+{
+  hash_unset (gbp_route_domain_db.gbd_by_rd_id, grd->grd_id);
+}
+
+int
+gbp_route_domain_add_and_lock (u32 rd_id,
+			       u32 ip4_table_id,
+			       u32 ip6_table_id,
+			       u32 ip4_uu_sw_if_index, u32 ip6_uu_sw_if_index)
+{
+  gbp_route_domain_t *grd;
+  index_t grdi;
+
+  grdi = gbp_route_domain_find (rd_id);
+
+  if (INDEX_INVALID == grdi)
+    {
+      fib_protocol_t fproto;
+
+      pool_get_zero (gbp_route_domain_pool, grd);
+
+      grd->grd_id = rd_id;
+      grd->grd_table_id[FIB_PROTOCOL_IP4] = ip4_table_id;
+      grd->grd_table_id[FIB_PROTOCOL_IP6] = ip6_table_id;
+      grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4] = ip4_uu_sw_if_index;
+      grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6] = ip6_uu_sw_if_index;
+
+      FOR_EACH_FIB_IP_PROTOCOL (fproto)
+      {
+	grd->grd_fib_index[fproto] =
+	  fib_table_find_or_create_and_lock (fproto,
+					     grd->grd_table_id[fproto],
+					     FIB_SOURCE_PLUGIN_HI);
+
+	if (~0 != grd->grd_uu_sw_if_index[fproto])
+	  {
+	    ethernet_header_t *eth;
+	    u8 *rewrite;
+
+	    rewrite = NULL;
+	    vec_validate (rewrite, sizeof (*eth) - 1);
+	    eth = (ethernet_header_t *) rewrite;
+
+	    eth->type = clib_host_to_net_u16 ((fproto == FIB_PROTOCOL_IP4 ?
+					       ETHERNET_TYPE_IP4 :
+					       ETHERNET_TYPE_IP6));
+
+	    mac_address_to_bytes (gbp_route_domain_get_local_mac (),
+				  eth->src_address);
+	    mac_address_to_bytes (gbp_route_domain_get_remote_mac (),
+				  eth->src_address);
+
+	    /*
+	     * create an adjacency out of the uu-fwd interfaces that will
+	     * be used when adding subnet routes.
+	     */
+	    grd->grd_adj[fproto] =
+	      adj_nbr_add_or_lock_w_rewrite (fproto,
+					     fib_proto_to_link (fproto),
+					     &ADJ_BCAST_ADDR,
+					     grd->grd_uu_sw_if_index[fproto],
+					     rewrite);
+	  }
+	else
+	  {
+	    grd->grd_adj[fproto] = INDEX_INVALID;
+	  }
+      }
+
+      gbp_route_domain_db_add (grd);
+    }
+  else
+    {
+      grd = gbp_route_domain_get (grdi);
+    }
+
+  grd->grd_locks++;
+  GBP_BD_DBG ("add: %U", format_gbp_route_domain, grd);
+
+  return (0);
+}
+
+void
+gbp_route_domain_unlock (index_t index)
+{
+  gbp_route_domain_t *grd;
+
+  grd = gbp_route_domain_get (index);
+
+  grd->grd_locks--;
+
+  if (0 == grd->grd_locks)
+    {
+      fib_protocol_t fproto;
+
+      GBP_BD_DBG ("destroy: %U", format_gbp_route_domain, grd);
+
+      FOR_EACH_FIB_IP_PROTOCOL (fproto)
+      {
+	fib_table_unlock (grd->grd_fib_index[fproto],
+			  fproto, FIB_SOURCE_PLUGIN_HI);
+	if (INDEX_INVALID != grd->grd_adj[fproto])
+	  adj_unlock (grd->grd_adj[fproto]);
+      }
+
+      gbp_route_domain_db_remove (grd);
+
+      pool_put (gbp_route_domain_pool, grd);
+    }
+}
+
+int
+gbp_route_domain_delete (u32 rd_id)
+{
+  index_t grdi;
+
+  GBP_BD_DBG ("del: %d", rd_id);
+  grdi = gbp_route_domain_find (rd_id);
+
+  if (INDEX_INVALID != grdi)
+    {
+      GBP_BD_DBG ("del: %U", format_gbp_route_domain,
+		  gbp_route_domain_get (grdi));
+      gbp_route_domain_unlock (grdi);
+
+      return (0);
+    }
+
+  return (VNET_API_ERROR_NO_SUCH_ENTRY);
+}
+
+const mac_address_t *
+gbp_route_domain_get_local_mac (void)
+{
+  return (&GBP_ROUTED_SRC_MAC);
+}
+
+const mac_address_t *
+gbp_route_domain_get_remote_mac (void)
+{
+  return (&GBP_ROUTED_DST_MAC);
+}
+
+void
+gbp_route_domain_walk (gbp_route_domain_cb_t cb, void *ctx)
+{
+  gbp_route_domain_t *gbpe;
+
+  /* *INDENT-OFF* */
+  pool_foreach(gbpe, gbp_route_domain_pool,
+  {
+    if (!cb(gbpe, ctx))
+      break;
+  });
+  /* *INDENT-ON* */
+}
+
+static clib_error_t *
+gbp_route_domain_cli (vlib_main_t * vm,
+		      unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+  vnet_main_t *vnm = vnet_get_main ();
+  u32 ip4_uu_sw_if_index = ~0;
+  u32 ip6_uu_sw_if_index = ~0;
+  u32 ip4_table_id = ~0;
+  u32 ip6_table_id = ~0;
+  u32 rd_id = ~0;
+  u8 add = 1;
+
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "ip4-uu %U", unformat_vnet_sw_interface,
+		    vnm, &ip4_uu_sw_if_index))
+	;
+      else if (unformat (input, "ip6-uu %U", unformat_vnet_sw_interface,
+			 vnm, &ip6_uu_sw_if_index))
+	;
+      else if (unformat (input, "ip4-table-id %d", ip4_table_id))
+	;
+      else if (unformat (input, "ip6-table-id %d", ip6_table_id))
+	;
+      else if (unformat (input, "add"))
+	add = 1;
+      else if (unformat (input, "del"))
+	add = 0;
+      else if (unformat (input, "rd %d", &rd_id))
+	;
+      else
+	break;
+    }
+
+  if (~0 == rd_id)
+    return clib_error_return (0, "RD-ID must be specified");
+
+  if (add)
+    {
+      if (~0 == ip4_table_id)
+	return clib_error_return (0, "IP4 table-ID must be specified");
+      if (~0 == ip6_table_id)
+	return clib_error_return (0, "IP6 table-ID must be specified");
+
+      gbp_route_domain_add_and_lock (rd_id, ip4_table_id,
+				     ip6_table_id,
+				     ip4_uu_sw_if_index, ip6_uu_sw_if_index);
+    }
+  else
+    gbp_route_domain_delete (rd_id);
+
+  return (NULL);
+}
+
+/*?
+ * Configure a GBP route-domain
+ *
+ * @cliexpar
+ * @cliexstart{set gbp route-domain [del] bd <ID> bvi <interface> uu-flood <interface>}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_route_domain_cli_node, static) = {
+  .path = "gbp route-domain",
+  .short_help = "gbp route-domain [del] epg bd <ID> bvi <interface> uu-flood <interface>",
+  .function = gbp_route_domain_cli,
+};
+
+u8 *
+format_gbp_route_domain (u8 * s, va_list * args)
+{
+  gbp_route_domain_t *grd = va_arg (*args, gbp_route_domain_t*);
+  vnet_main_t *vnm = vnet_get_main ();
+
+  if (NULL != grd)
+    s = format (s, "[%d] rd:%d ip4-uu:%U ip6-uu:%U locks:%d",
+                grd - gbp_route_domain_pool,
+                grd->grd_id,
+                format_vnet_sw_if_index_name, vnm, grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP4],
+                format_vnet_sw_if_index_name, vnm, grd->grd_uu_sw_if_index[FIB_PROTOCOL_IP6],
+                grd->grd_locks);
+  else
+    s = format (s, "NULL");
+
+  return (s);
+}
+
+static int
+gbp_route_domain_show_one (gbp_route_domain_t *gb, void *ctx)
+{
+  vlib_main_t *vm;
+
+  vm = ctx;
+  vlib_cli_output (vm, "  %U",format_gbp_route_domain, gb);
+
+  return (1);
+}
+
+static clib_error_t *
+gbp_route_domain_show (vlib_main_t * vm,
+		   unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+  vlib_cli_output (vm, "Route-Domains:");
+  gbp_route_domain_walk (gbp_route_domain_show_one, vm);
+
+  return (NULL);
+}
+
+/*?
+ * Show Group Based Policy Route_Domains and derived information
+ *
+ * @cliexpar
+ * @cliexstart{show gbp route_domain}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_route_domain_show_node, static) = {
+  .path = "show gbp route-domain",
+  .short_help = "show gbp route-domain\n",
+  .function = gbp_route_domain_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_route_domain_init (vlib_main_t * vm)
+{
+  grd_logger = vlib_log_register_class ("gbp", "rd");
+
+  return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_route_domain_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_route_domain.h b/src/plugins/gbp/gbp_route_domain.h
new file mode 100644
index 0000000..f7fc4a4
--- /dev/null
+++ b/src/plugins/gbp/gbp_route_domain.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_ROUTE_DOMAIN_H__
+#define __GBP_ROUTE_DOMAIN_H__
+
+#include <plugins/gbp/gbp_types.h>
+
+#include <vnet/fib/fib_types.h>
+#include <vnet/ethernet/mac_address.h>
+
+/**
+ * A route Domain Representation.
+ * This is a standard route-domain plus all the attributes it must
+ * have to supprt the GBP model.
+ */
+typedef struct gpb_route_domain_t_
+{
+  /**
+   * Route-domain ID
+   */
+  u32 grd_id;
+  u32 grd_fib_index[FIB_PROTOCOL_IP_MAX];
+  u32 grd_table_id[FIB_PROTOCOL_IP_MAX];
+
+  /**
+   * The RD's VNI interface on which packets from unkown endpoints
+   * arrive
+   */
+  u32 grd_vni_sw_if_index;
+
+  /**
+   * The interfaces on which to send packets to unnknown EPs
+   */
+  u32 grd_uu_sw_if_index[FIB_PROTOCOL_IP_MAX];
+
+  /**
+   * adjacencies on the UU interfaces.
+   */
+  u32 grd_adj[FIB_PROTOCOL_IP_MAX];
+
+  u32 grd_locks;
+} gbp_route_domain_t;
+
+extern int gbp_route_domain_add_and_lock (u32 rd_id,
+					  u32 ip4_table_id,
+					  u32 ip6_table_id,
+					  u32 ip4_uu_sw_if_index,
+					  u32 ip6_uu_sw_if_index);
+extern void gbp_route_domain_unlock (index_t grdi);
+extern index_t gbp_route_domain_find_and_lock (u32 rd_id);
+extern index_t gbp_route_domain_find (u32 rd_id);
+
+extern int gbp_route_domain_delete (u32 rd_id);
+extern gbp_route_domain_t *gbp_route_domain_get (index_t i);
+
+typedef int (*gbp_route_domain_cb_t) (gbp_route_domain_t * gb, void *ctx);
+extern void gbp_route_domain_walk (gbp_route_domain_cb_t bgpe, void *ctx);
+
+extern const mac_address_t *gbp_route_domain_get_local_mac (void);
+extern const mac_address_t *gbp_route_domain_get_remote_mac (void);
+
+extern u8 *format_gbp_route_domain (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_scanner.c b/src/plugins/gbp/gbp_scanner.c
new file mode 100644
index 0000000..a2d0c9a
--- /dev/null
+++ b/src/plugins/gbp/gbp_scanner.c
@@ -0,0 +1,104 @@
+/*
+ * gbp.h : Group Based Policy
+ *
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_scanner.h>
+#include <plugins/gbp/gbp_endpoint.h>
+#include <plugins/gbp/gbp_vxlan.h>
+
+vlib_log_class_t gs_logger;
+
+#define GBP_SCANNER_DBG(...)                                      \
+    vlib_log_debug (gs_logger, __VA_ARGS__);
+
+static uword
+gbp_scanner (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+  uword event_type, *event_data = 0;
+  bool enabled = 0, do_scan = 0;
+
+  while (1)
+    {
+      do_scan = 0;
+
+      if (enabled)
+	{
+	  /* scan every 'inactive threshold' seconds */
+	  vlib_process_wait_for_event_or_clock (vm,
+						gbp_endpoint_scan_threshold
+						());
+	}
+      else
+	vlib_process_wait_for_event (vm);
+
+      event_type = vlib_process_get_events (vm, &event_data);
+      vec_reset_length (event_data);
+
+      switch (event_type)
+	{
+	case ~0:
+	  /* timer expired */
+	  do_scan = 1;
+	  break;
+
+	case GBP_ENDPOINT_SCAN_START:
+	  enabled = 1;
+	  break;
+
+	case GBP_ENDPOINT_SCAN_STOP:
+	  enabled = 0;
+	  break;
+
+	default:
+	  ASSERT (0);
+	}
+
+      if (do_scan)
+	{
+	  GBP_SCANNER_DBG ("start");
+	  gbp_endpoint_scan (vm);
+	  GBP_SCANNER_DBG ("stop");
+	}
+    }
+  return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_scanner_node) = {
+    .function = gbp_scanner,
+    .type = VLIB_NODE_TYPE_PROCESS,
+    .name = "gbp-scanner",
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+gbp_scanner_init (vlib_main_t * vm)
+{
+  gs_logger = vlib_log_register_class ("gbp", "scan");
+
+  return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_scanner_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_scanner.h b/src/plugins/gbp/gbp_scanner.h
new file mode 100644
index 0000000..070da38
--- /dev/null
+++ b/src/plugins/gbp/gbp_scanner.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_SCANNER_H__
+#define __GBP_SCANNER_H__
+
+#include <vlib/vlib.h>
+
+typedef enum gbp_scan_event_t_
+{
+  GBP_ENDPOINT_SCAN_START,
+  GBP_ENDPOINT_SCAN_STOP,
+  GBP_VXLAN_SCAN_START,
+  GBP_VXLAN_SCAN_STOP,
+} gbp_scan_event_t;
+
+extern vlib_node_registration_t gbp_scanner_node;
+
+#endif
diff --git a/src/plugins/gbp/gbp_subnet.c b/src/plugins/gbp/gbp_subnet.c
index b392511..d9d4299 100644
--- a/src/plugins/gbp/gbp_subnet.c
+++ b/src/plugins/gbp/gbp_subnet.c
@@ -16,19 +16,125 @@
 #include <plugins/gbp/gbp.h>
 #include <plugins/gbp/gbp_fwd_dpo.h>
 #include <plugins/gbp/gbp_policy_dpo.h>
+#include <plugins/gbp/gbp_route_domain.h>
 
 #include <vnet/fib/fib_table.h>
 #include <vnet/dpo/load_balance.h>
 
+/**
+ * a key for the DB
+ */
+typedef struct gbp_subnet_key_t_
+{
+  fib_prefix_t gsk_pfx;
+  u32 gsk_fib_index;
+} gbp_subnet_key_t;
+
+/**
+ * Subnet
+ */
+typedef struct gbp_subnet_t_
+{
+  gbp_subnet_key_t *gs_key;
+  gbp_subnet_type_t gs_type;
+  index_t gs_rd;
+
+  union
+  {
+    struct
+    {
+      epg_id_t gs_epg;
+      u32 gs_sw_if_index;
+    } gs_stitched_external;
+  };
+} gbp_subnet_t;
+
+/**
+ * A DB of the subnets; key={pfx,fib-index}
+ */
+uword *gbp_subnet_db;
+
+/**
+ * pool of subnets
+ */
+gbp_subnet_t *gbp_subnet_pool;
+
+static index_t
+gbp_subnet_db_find (u32 fib_index, const fib_prefix_t * pfx)
+{
+  gbp_subnet_key_t key = {
+    .gsk_pfx = *pfx,
+    .gsk_fib_index = fib_index,
+  };
+  uword *p;
+
+  p = hash_get_mem (gbp_subnet_db, &key);
+
+  if (NULL != p)
+    return p[0];
+
+  return (INDEX_INVALID);
+}
+
+static void
+gbp_subnet_db_add (u32 fib_index, const fib_prefix_t * pfx, gbp_subnet_t * gs)
+{
+  gbp_subnet_key_t *key;
+
+  key = clib_mem_alloc (sizeof (*key));
+
+  clib_memcpy (&(key->gsk_pfx), pfx, sizeof (*pfx));
+  key->gsk_fib_index = fib_index;
+
+  hash_set_mem (gbp_subnet_db, key, (gs - gbp_subnet_pool));
+
+  gs->gs_key = key;
+}
+
+static void
+gbp_subnet_db_del (gbp_subnet_t * gs)
+{
+  hash_unset_mem (gbp_subnet_db, gs->gs_key);
+
+  clib_mem_free (gs->gs_key);
+  gs->gs_key = NULL;
+}
+
+
 static int
-gbp_internal_subnet_add (u32 fib_index, const fib_prefix_t * pfx)
+gbp_subnet_transport_add (const gbp_subnet_t * gs)
+{
+  dpo_id_t gfd = DPO_INVALID;
+  gbp_route_domain_t *grd;
+  fib_protocol_t fproto;
+
+  fproto = gs->gs_key->gsk_pfx.fp_proto;
+  grd = gbp_route_domain_get (gs->gs_rd);
+
+  fib_table_entry_update_one_path (gs->gs_key->gsk_fib_index,
+				   &gs->gs_key->gsk_pfx,
+				   FIB_SOURCE_PLUGIN_HI,
+				   FIB_ENTRY_FLAG_NONE,
+				   fib_proto_to_dpo (fproto),
+				   &ADJ_BCAST_ADDR,
+				   grd->grd_uu_sw_if_index[fproto],
+				   ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE);
+
+  dpo_reset (&gfd);
+
+  return (0);
+}
+
+static int
+gbp_subnet_internal_add (const gbp_subnet_t * gs)
 {
   dpo_id_t gfd = DPO_INVALID;
 
-  gbp_fwd_dpo_add_or_lock (fib_proto_to_dpo (pfx->fp_proto), &gfd);
+  gbp_fwd_dpo_add_or_lock (fib_proto_to_dpo (gs->gs_key->gsk_pfx.fp_proto),
+			   &gfd);
 
-  fib_table_entry_special_dpo_update (fib_index,
-				      pfx,
+  fib_table_entry_special_dpo_update (gs->gs_key->gsk_fib_index,
+				      &gs->gs_key->gsk_pfx,
 				      FIB_SOURCE_PLUGIN_HI,
 				      FIB_ENTRY_FLAG_EXCLUSIVE, &gfd);
 
@@ -38,17 +144,19 @@
 }
 
 static int
-gbp_external_subnet_add (u32 fib_index,
-			 const fib_prefix_t * pfx,
-			 u32 sw_if_index, epg_id_t epg)
+gbp_subnet_external_add (gbp_subnet_t * gs, u32 sw_if_index, epg_id_t epg)
 {
   dpo_id_t gpd = DPO_INVALID;
 
-  gbp_policy_dpo_add_or_lock (fib_proto_to_dpo (pfx->fp_proto),
-			      epg, sw_if_index, &gpd);
+  gs->gs_stitched_external.gs_epg = epg;
+  gs->gs_stitched_external.gs_sw_if_index = sw_if_index;
 
-  fib_table_entry_special_dpo_update (fib_index,
-				      pfx,
+  gbp_policy_dpo_add_or_lock (fib_proto_to_dpo (gs->gs_key->gsk_pfx.fp_proto),
+			      gs->gs_stitched_external.gs_epg,
+			      gs->gs_stitched_external.gs_sw_if_index, &gpd);
+
+  fib_table_entry_special_dpo_update (gs->gs_key->gsk_fib_index,
+				      &gs->gs_key->gsk_pfx,
 				      FIB_SOURCE_PLUGIN_HI,
 				      (FIB_ENTRY_FLAG_EXCLUSIVE |
 				       FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT),
@@ -59,114 +167,255 @@
   return (0);
 }
 
-static int
-gbp_subnet_del (u32 fib_index, const fib_prefix_t * pfx)
+int
+gbp_subnet_del (u32 rd_id, const fib_prefix_t * pfx)
 {
+  gbp_route_domain_t *grd;
+  index_t gsi, grdi;
+  gbp_subnet_t *gs;
+  u32 fib_index;
+
+  grdi = gbp_route_domain_find (rd_id);
+
+  if (~0 == grdi)
+    return (VNET_API_ERROR_NO_SUCH_FIB);
+
+  grd = gbp_route_domain_get (grdi);
+  fib_index = grd->grd_fib_index[pfx->fp_proto];
+
+  gsi = gbp_subnet_db_find (fib_index, pfx);
+
+  if (INDEX_INVALID == gsi)
+    return (VNET_API_ERROR_NO_SUCH_ENTRY);
+
+  gs = pool_elt_at_index (gbp_subnet_pool, gsi);
+
   fib_table_entry_delete (fib_index, pfx, FIB_SOURCE_PLUGIN_HI);
 
+  gbp_subnet_db_del (gs);
+  gbp_route_domain_unlock (gs->gs_rd);
+
+  pool_put (gbp_subnet_pool, gs);
+
   return (0);
 }
 
 int
-gbp_subnet_add_del (u32 table_id,
-		    const fib_prefix_t * pfx,
-		    u32 sw_if_index, epg_id_t epg, u8 is_add, u8 is_internal)
+gbp_subnet_add (u32 rd_id,
+		const fib_prefix_t * pfx,
+		gbp_subnet_type_t type, u32 sw_if_index, epg_id_t epg)
 {
+  gbp_route_domain_t *grd;
+  index_t grdi, gsi;
+  gbp_subnet_t *gs;
   u32 fib_index;
+  int rv;
 
-  fib_index = fib_table_find (pfx->fp_proto, table_id);
+  grdi = gbp_route_domain_find_and_lock (rd_id);
 
-  if (~0 == fib_index)
+  if (~0 == grdi)
     return (VNET_API_ERROR_NO_SUCH_FIB);
 
-  if (is_internal && is_add)
-    return (gbp_internal_subnet_add (fib_index, pfx));
-  else if (!is_internal && is_add)
-    return (gbp_external_subnet_add (fib_index, pfx, sw_if_index, epg));
+  grd = gbp_route_domain_get (grdi);
+  fib_index = grd->grd_fib_index[pfx->fp_proto];
 
-  return (gbp_subnet_del (fib_index, pfx));
-}
+  gsi = gbp_subnet_db_find (fib_index, pfx);
 
-typedef struct gbp_subnet_fib_table_walk_ctx_t_
-{
-  gbp_subnet_cb_t cb;
-  void *ctx;
-} gbp_subnet_fib_table_walk_ctx_t;
+  if (INDEX_INVALID != gsi)
+    return (VNET_API_ERROR_ENTRY_ALREADY_EXISTS);
 
-static fib_table_walk_rc_t
-gbp_subnet_fib_table_walk (fib_node_index_t fei, void *arg)
-{
-  gbp_subnet_fib_table_walk_ctx_t *ctx = arg;
-  const fib_prefix_t *pfx;
-  const dpo_id_t *dpo;
-  u32 table_id;
+  rv = -2;
 
-  pfx = fib_entry_get_prefix (fei);
-  table_id = fib_table_get_table_id (fib_entry_get_fib_index (fei),
-				     pfx->fp_proto);
-  dpo = fib_entry_contribute_ip_forwarding (fei);
+  pool_get (gbp_subnet_pool, gs);
 
-  if (DPO_LOAD_BALANCE == dpo->dpoi_type)
+  gs->gs_type = type;
+  gs->gs_rd = grdi;
+  gbp_subnet_db_add (fib_index, pfx, gs);
+
+  switch (type)
     {
-      dpo = load_balance_get_bucket (dpo->dpoi_index, 0);
-
-      if (dpo->dpoi_type == gbp_policy_dpo_get_type ())
-	{
-	  gbp_policy_dpo_t *gpd;
-
-	  gpd = gbp_policy_dpo_get (dpo->dpoi_index);
-
-          /* *INDENT-OFF* */
-          ctx->cb (table_id, pfx,
-                   gpd->gpd_sw_if_index,
-                   gpd->gpd_epg,
-                   0,	// is_internal
-                   ctx->ctx);
-          /* *INDENT-ON* */
-	}
-      else if (dpo->dpoi_type == gbp_fwd_dpo_get_type ())
-	{
-          /* *INDENT-OFF* */
-          ctx->cb (table_id, pfx,
-                   ~0,	// sw_if_index
-                   EPG_INVALID,  // epg
-                   1,   // is_internal
-                   ctx->ctx);
-          /* *INDENT-ON* */
-	}
+    case GBP_SUBNET_STITCHED_INTERNAL:
+      rv = gbp_subnet_internal_add (gs);
+      break;
+    case GBP_SUBNET_STITCHED_EXTERNAL:
+      rv = gbp_subnet_external_add (gs, sw_if_index, epg);
+      break;
+    case GBP_SUBNET_TRANSPORT:
+      rv = gbp_subnet_transport_add (gs);
+      break;
     }
 
-  return (FIB_TABLE_WALK_CONTINUE);
+  return (rv);
 }
 
 void
 gbp_subnet_walk (gbp_subnet_cb_t cb, void *ctx)
 {
-  fib_table_t *fib_table;
+  gbp_route_domain_t *grd;
+  gbp_subnet_t *gs;
+  u32 sw_if_index;
+  epg_id_t epg;
 
-  gbp_subnet_fib_table_walk_ctx_t wctx = {
-    .cb = cb,
-    .ctx = ctx,
-  };
+  epg = EPG_INVALID;
+  sw_if_index = ~0;
 
   /* *INDENT-OFF* */
-  pool_foreach (fib_table, ip4_main.fibs,
+  pool_foreach (gs, gbp_subnet_pool,
   ({
-    fib_table_walk(fib_table->ft_index,
-                   FIB_PROTOCOL_IP4,
-                   gbp_subnet_fib_table_walk,
-                   &wctx);
-  }));
-  pool_foreach (fib_table, ip6_main.fibs,
-  ({
-    fib_table_walk(fib_table->ft_index,
-                   FIB_PROTOCOL_IP6,
-                   gbp_subnet_fib_table_walk,
-                   &wctx);
+    grd = gbp_route_domain_get(gs->gs_rd);
+
+    switch (gs->gs_type)
+      {
+      case GBP_SUBNET_STITCHED_INTERNAL:
+      case GBP_SUBNET_TRANSPORT:
+        /* use defaults above */
+        break;
+      case GBP_SUBNET_STITCHED_EXTERNAL:
+        sw_if_index = gs->gs_stitched_external.gs_sw_if_index;
+        epg = gs->gs_stitched_external.gs_epg;
+        break;
+      }
+
+    if (WALK_STOP == cb (grd->grd_id, &gs->gs_key->gsk_pfx,
+                         gs->gs_type, epg, sw_if_index, ctx))
+      break;
   }));
   /* *INDENT-ON* */
 }
 
+typedef enum gsb_subnet_show_flags_t_
+{
+  GBP_SUBNET_SHOW_BRIEF,
+  GBP_SUBNET_SHOW_DETAILS,
+} gsb_subnet_show_flags_t;
+
+static u8 *
+format_gbp_subnet_type (u8 * s, va_list * args)
+{
+  gbp_subnet_type_t type = va_arg (*args, gbp_subnet_type_t);
+
+  switch (type)
+    {
+    case GBP_SUBNET_STITCHED_INTERNAL:
+      return (format (s, "stitched-internal"));
+    case GBP_SUBNET_STITCHED_EXTERNAL:
+      return (format (s, "stitched-external"));
+    case GBP_SUBNET_TRANSPORT:
+      return (format (s, "transport"));
+    }
+
+  return (format (s, "unknown"));
+}
+
+u8 *
+format_gbp_subnet (u8 * s, va_list * args)
+{
+  index_t gsi = va_arg (*args, index_t);
+  gsb_subnet_show_flags_t flags = va_arg (*args, gsb_subnet_show_flags_t);
+  gbp_subnet_t *gs;
+  u32 table_id;
+
+  gs = pool_elt_at_index (gbp_subnet_pool, gsi);
+
+  table_id = fib_table_get_table_id (gs->gs_key->gsk_fib_index,
+				     gs->gs_key->gsk_pfx.fp_proto);
+
+  s = format (s, "[%d] tbl:%d %U %U", gsi, table_id,
+	      format_fib_prefix, &gs->gs_key->gsk_pfx,
+	      format_gbp_subnet_type, gs->gs_type);
+
+  switch (gs->gs_type)
+    {
+    case GBP_SUBNET_STITCHED_INTERNAL:
+    case GBP_SUBNET_TRANSPORT:
+      break;
+    case GBP_SUBNET_STITCHED_EXTERNAL:
+      s = format (s, " {epg:%d %U}", gs->gs_stitched_external.gs_epg,
+		  format_vnet_sw_if_index_name,
+		  vnet_get_main (), gs->gs_stitched_external.gs_sw_if_index);
+      break;
+    }
+
+  switch (flags)
+    {
+    case GBP_SUBNET_SHOW_DETAILS:
+      {
+	fib_node_index_t fei;
+
+	fei = fib_table_lookup_exact_match (gs->gs_key->gsk_fib_index,
+					    &gs->gs_key->gsk_pfx);
+
+	s =
+	  format (s, "\n  %U", format_fib_entry, fei,
+		  FIB_ENTRY_FORMAT_DETAIL);
+      }
+    case GBP_SUBNET_SHOW_BRIEF:
+      break;
+    }
+  return (s);
+}
+
+static clib_error_t *
+gbp_subnet_show (vlib_main_t * vm,
+		 unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+  u32 gsi;
+
+  gsi = INDEX_INVALID;
+
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "%d", &gsi))
+	;
+      else
+	break;
+    }
+
+  if (INDEX_INVALID != gsi)
+    {
+      vlib_cli_output (vm, "%U", format_gbp_subnet, gsi,
+		       GBP_SUBNET_SHOW_DETAILS);
+    }
+  else
+    {
+      /* *INDENT-OFF* */
+      pool_foreach_index(gsi, gbp_subnet_pool,
+      ({
+        vlib_cli_output (vm, "%U", format_gbp_subnet, gsi,
+                         GBP_SUBNET_SHOW_BRIEF);
+      }));
+      /* *INDENT-ON* */
+    }
+
+  return (NULL);
+}
+
+/*?
+ * Show Group Based Policy Subnets
+ *
+ * @cliexpar
+ * @cliexstart{show gbp subnet}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_subnet_show_node, static) = {
+  .path = "show gbp subnet",
+  .short_help = "show gbp subnet\n",
+  .function = gbp_subnet_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_subnet_init (vlib_main_t * vm)
+{
+  gbp_subnet_db = hash_create_mem (0,
+				   sizeof (gbp_subnet_key_t), sizeof (u32));
+
+  return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_subnet_init);
+
 /*
  * fd.io coding-style-patch-verification: ON
  *
diff --git a/src/plugins/gbp/gbp_subnet.h b/src/plugins/gbp/gbp_subnet.h
index 24b4f3a..b6906de 100644
--- a/src/plugins/gbp/gbp_subnet.h
+++ b/src/plugins/gbp/gbp_subnet.h
@@ -18,16 +18,26 @@
 
 #include <plugins/gbp/gbp_types.h>
 
-extern int gbp_subnet_add_del (u32 table_id,
-			       const fib_prefix_t * pfx,
-			       u32 sw_if_index,
-			       epg_id_t epg, u8 is_add, u8 is_internal);
+typedef enum gbp_subnet_type_t_
+{
+  GBP_SUBNET_TRANSPORT,
+  GBP_SUBNET_STITCHED_INTERNAL,
+  GBP_SUBNET_STITCHED_EXTERNAL,
+} gbp_subnet_type_t;
 
+extern int gbp_subnet_add (u32 rd_id,
+			   const fib_prefix_t * pfx,
+			   gbp_subnet_type_t type,
+			   u32 sw_if_index, epg_id_t epg);
 
-typedef int (*gbp_subnet_cb_t) (u32 table_id,
-				const fib_prefix_t * pfx,
-				u32 sw_if_index,
-				epg_id_t epg, u8 is_internal, void *ctx);
+extern int gbp_subnet_del (u32 rd_id, const fib_prefix_t * pfx);
+
+typedef walk_rc_t (*gbp_subnet_cb_t) (u32 rd_id,
+				      const fib_prefix_t * pfx,
+				      gbp_subnet_type_t type,
+				      u32 sw_if_index,
+				      epg_id_t epg, void *ctx);
+
 extern void gbp_subnet_walk (gbp_subnet_cb_t cb, void *ctx);
 
 #endif
diff --git a/src/plugins/gbp/gbp_vxlan.c b/src/plugins/gbp/gbp_vxlan.c
new file mode 100644
index 0000000..b29fc11
--- /dev/null
+++ b/src/plugins/gbp/gbp_vxlan.c
@@ -0,0 +1,880 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <plugins/gbp/gbp_vxlan.h>
+#include <plugins/gbp/gbp_itf.h>
+#include <plugins/gbp/gbp_learn.h>
+#include <plugins/gbp/gbp_bridge_domain.h>
+#include <plugins/gbp/gbp_route_domain.h>
+
+#include <vnet/vxlan-gbp/vxlan_gbp.h>
+#include <vlibmemory/api.h>
+#include <vnet/fib/fib_table.h>
+
+/**
+ * A reference to a VXLAN-GBP tunnel created as a child/dependent tunnel
+ * of the tempplate GBP-VXLAN tunnel
+ */
+typedef struct vxlan_tunnel_ref_t_
+{
+  u32 vxr_sw_if_index;
+  index_t vxr_itf;
+  u32 vxr_locks;
+  index_t vxr_parent;
+  gbp_vxlan_tunnel_layer_t vxr_layer;
+} vxlan_tunnel_ref_t;
+
+/**
+ * DB of added tunnels
+ */
+uword *gv_db;
+
+/**
+ * Logger
+ */
+vlib_log_class_t gt_logger;
+
+/**
+ * Pool of template tunnels
+ */
+gbp_vxlan_tunnel_t *gbp_vxlan_tunnel_pool;
+
+/**
+ * Pool of child tunnels
+ */
+vxlan_tunnel_ref_t *vxlan_tunnel_ref_pool;
+
+/**
+ * DB of template interfaces by SW interface index
+ */
+index_t *gbp_vxlan_tunnel_db;
+
+/**
+ * DB of child interfaces by SW interface index
+ */
+index_t *vxlan_tunnel_ref_db;
+
+
+static char *gbp_vxlan_tunnel_layer_strings[] = {
+#define _(n,s) [GBP_VXLAN_TUN_##n] = s,
+  forecah_gbp_vxlan_tunnel_layer
+#undef _
+};
+
+#define GBP_VXLAN_TUN_DBG(...)                          \
+    vlib_log_debug (gt_logger, __VA_ARGS__);
+
+
+
+always_inline gbp_vxlan_tunnel_t *
+gbp_vxlan_tunnel_get (index_t gti)
+{
+  return (pool_elt_at_index (gbp_vxlan_tunnel_pool, gti));
+}
+
+static vxlan_tunnel_ref_t *
+vxlan_tunnel_ref_get (index_t vxri)
+{
+  return (pool_elt_at_index (vxlan_tunnel_ref_pool, vxri));
+}
+
+static u8 *
+format_vxlan_tunnel_ref (u8 * s, va_list * args)
+{
+  index_t vxri = va_arg (*args, u32);
+  vxlan_tunnel_ref_t *vxr;
+
+  vxr = vxlan_tunnel_ref_get (vxri);
+
+  s = format (s, "[%U locks:%d]", format_vnet_sw_if_index_name,
+	      vnet_get_main (), vxr->vxr_sw_if_index, vxr->vxr_locks);
+
+  return (s);
+}
+
+static u32
+gdb_vxlan_dep_add (gbp_vxlan_tunnel_t * gt,
+		   u32 vni,
+		   const ip46_address_t * src, const ip46_address_t * dst)
+{
+  vnet_vxlan_gbp_tunnel_add_del_args_t args = {
+    .is_add = 1,
+    .is_ip6 = !ip46_address_is_ip4 (src),
+    .vni = vni,
+    .src = *src,
+    .dst = *dst,
+    .instance = ~0,
+    .mode = (GBP_VXLAN_TUN_L2 == gt->gt_layer ?
+	     VXLAN_GBP_TUNNEL_MODE_L2 : VXLAN_GBP_TUNNEL_MODE_L3),
+  };
+  vxlan_tunnel_ref_t *vxr;
+  u32 sw_if_index;
+  index_t vxri;
+  int rv;
+
+  sw_if_index = ~0;
+  rv = vnet_vxlan_gbp_tunnel_add_del (&args, &sw_if_index);
+
+  if (VNET_API_ERROR_TUNNEL_EXIST == rv)
+    {
+      vxri = vxlan_tunnel_ref_db[sw_if_index];
+
+      vxr = vxlan_tunnel_ref_get (vxri);
+      vxr->vxr_locks++;
+    }
+  else if (0 == rv)
+    {
+      ASSERT (~0 != sw_if_index);
+      GBP_VXLAN_TUN_DBG ("add-dep:%U %U %U %d", format_vnet_sw_if_index_name,
+			 vnet_get_main (), sw_if_index,
+			 format_ip46_address, src, IP46_TYPE_ANY,
+			 format_ip46_address, dst, IP46_TYPE_ANY, vni);
+
+      pool_get_zero (vxlan_tunnel_ref_pool, vxr);
+
+      vxri = (vxr - vxlan_tunnel_ref_pool);
+      vxr->vxr_parent = gt - gbp_vxlan_tunnel_pool;
+      vxr->vxr_sw_if_index = sw_if_index;
+      vxr->vxr_locks = 1;
+      vxr->vxr_layer = gt->gt_layer;
+
+      /*
+       * store the child both on the parent's list and the global DB
+       */
+      vec_add1 (gt->gt_tuns, vxri);
+
+      vec_validate_init_empty (vxlan_tunnel_ref_db,
+			       vxr->vxr_sw_if_index, INDEX_INVALID);
+      vxlan_tunnel_ref_db[vxr->vxr_sw_if_index] = vxri;
+
+      if (GBP_VXLAN_TUN_L2 == vxr->vxr_layer)
+	{
+	  vxr->vxr_itf = gbp_itf_add_and_lock (vxr->vxr_sw_if_index,
+					       gt->gt_bd_index);
+
+	  gbp_itf_set_l2_output_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
+					 L2OUTPUT_FEAT_GBP_POLICY_MAC);
+	  gbp_itf_set_l2_input_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
+					L2INPUT_FEAT_GBP_LEARN);
+	}
+      else
+	{
+	  const gbp_route_domain_t *grd;
+	  fib_protocol_t fproto;
+
+	  grd = gbp_route_domain_get (gt->gt_grd);
+
+	  FOR_EACH_FIB_IP_PROTOCOL (fproto)
+	    ip_table_bind (fproto, vxr->vxr_sw_if_index,
+			   grd->grd_table_id[fproto], 1);
+
+	  gbp_learn_enable (vxr->vxr_sw_if_index, GBP_LEARN_MODE_L3);
+	}
+    }
+
+  return (sw_if_index);
+}
+
+u32
+vxlan_gbp_tunnel_get_parent (u32 sw_if_index)
+{
+  ASSERT ((sw_if_index < vec_len (vxlan_tunnel_ref_db)) &&
+	  (INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index]));
+
+  gbp_vxlan_tunnel_t *gt;
+  vxlan_tunnel_ref_t *vxr;
+
+  vxr = vxlan_tunnel_ref_get (vxlan_tunnel_ref_db[sw_if_index]);
+  gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
+
+  return (gt->gt_sw_if_index);
+}
+
+gbp_vxlan_tunnel_type_t
+gbp_vxlan_tunnel_get_type (u32 sw_if_index)
+{
+  if (sw_if_index < vec_len (vxlan_tunnel_ref_db) &&
+      INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index])
+    {
+      return (VXLAN_GBP_TUNNEL);
+    }
+  else if (sw_if_index < vec_len (gbp_vxlan_tunnel_db) &&
+	   INDEX_INVALID != gbp_vxlan_tunnel_db[sw_if_index])
+    {
+      return (GBP_VXLAN_TEMPLATE_TUNNEL);
+    }
+
+  ASSERT (0);
+  return (GBP_VXLAN_TEMPLATE_TUNNEL);
+}
+
+u32
+gbp_vxlan_tunnel_clone_and_lock (u32 sw_if_index,
+				 const ip46_address_t * src,
+				 const ip46_address_t * dst)
+{
+  gbp_vxlan_tunnel_t *gt;
+  index_t gti;
+
+  gti = gbp_vxlan_tunnel_db[sw_if_index];
+
+  if (INDEX_INVALID == gti)
+    return (~0);
+
+  gt = pool_elt_at_index (gbp_vxlan_tunnel_pool, gti);
+
+  return (gdb_vxlan_dep_add (gt, gt->gt_vni, src, dst));
+}
+
+static void
+gdb_vxlan_dep_del (index_t vxri)
+{
+  vxlan_tunnel_ref_t *vxr;
+  gbp_vxlan_tunnel_t *gt;
+  u32 pos;
+
+  vxr = vxlan_tunnel_ref_get (vxri);
+  gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
+
+  GBP_VXLAN_TUN_DBG ("del-dep:%U", format_vxlan_tunnel_ref, vxri);
+
+  vxlan_tunnel_ref_db[vxr->vxr_sw_if_index] = INDEX_INVALID;
+  pos = vec_search (gt->gt_tuns, vxri);
+
+  ASSERT (~0 != pos);
+  vec_del1 (gt->gt_tuns, pos);
+
+  if (GBP_VXLAN_TUN_L2 == vxr->vxr_layer)
+    {
+      gbp_itf_set_l2_output_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
+				     L2OUTPUT_FEAT_NONE);
+      gbp_itf_set_l2_input_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
+				    L2INPUT_FEAT_NONE);
+      gbp_itf_unlock (vxr->vxr_itf);
+    }
+  else
+    {
+      fib_protocol_t fproto;
+
+      FOR_EACH_FIB_IP_PROTOCOL (fproto)
+	ip_table_bind (fproto, vxr->vxr_sw_if_index, 0, 0);
+    }
+
+  vnet_vxlan_gbp_tunnel_del (vxr->vxr_sw_if_index);
+
+  pool_put (vxlan_tunnel_ref_pool, vxr);
+}
+
+void
+vxlan_gbp_tunnel_unlock (u32 sw_if_index)
+{
+  vxlan_tunnel_ref_t *vxr;
+  index_t vxri;
+
+  vxri = vxlan_tunnel_ref_db[sw_if_index];
+
+  ASSERT (vxri != INDEX_INVALID);
+
+  vxr = vxlan_tunnel_ref_get (vxri);
+  vxr->vxr_locks--;
+
+  if (0 == vxr->vxr_locks)
+    {
+      gdb_vxlan_dep_del (vxri);
+    }
+}
+
+void
+vxlan_gbp_tunnel_lock (u32 sw_if_index)
+{
+  vxlan_tunnel_ref_t *vxr;
+  index_t vxri;
+
+  vxri = vxlan_tunnel_ref_db[sw_if_index];
+
+  ASSERT (vxri != INDEX_INVALID);
+
+  vxr = vxlan_tunnel_ref_get (vxri);
+  vxr->vxr_locks++;
+}
+
+#define foreach_gbp_vxlan_input_next         \
+  _(DROP, "error-drop")                      \
+  _(L2_INPUT, "l2-input")                    \
+  _(IP4_INPUT, "ip4-input")                  \
+  _(IP6_INPUT, "ip6-input")
+
+typedef enum
+{
+#define _(s,n) GBP_VXLAN_INPUT_NEXT_##s,
+  foreach_gbp_vxlan_input_next
+#undef _
+    GBP_VXLAN_INPUT_N_NEXT,
+} gbp_vxlan_input_next_t;
+
+#define foreach_gbp_vxlan_error              \
+  _(DECAPPED, "decapped")                    \
+  _(LEARNED, "learned")
+
+typedef enum
+{
+#define _(s,n) GBP_VXLAN_ERROR_##s,
+  foreach_gbp_vxlan_error
+#undef _
+    GBP_VXLAN_N_ERROR,
+} gbp_vxlan_input_error_t;
+
+static char *gbp_vxlan_error_strings[] = {
+#define _(n,s) s
+  foreach_gbp_vxlan_error
+#undef _
+};
+
+typedef struct gbp_vxlan_trace_t_
+{
+  u8 dropped;
+  u32 vni;
+  u32 sw_if_index;
+  u16 sclass;
+  u8 flags;
+} gbp_vxlan_trace_t;
+
+
+static uword
+gbp_vxlan_decap (vlib_main_t * vm,
+		 vlib_node_runtime_t * node,
+		 vlib_frame_t * from_frame, u8 is_ip4)
+{
+  u32 n_left_to_next, n_left_from, next_index, *to_next, *from;
+
+  next_index = 0;
+  from = vlib_frame_vector_args (from_frame);
+  n_left_from = from_frame->n_vectors;
+
+  while (n_left_from > 0)
+    {
+
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+	{
+	  vxlan_gbp_header_t *vxlan_gbp0;
+	  gbp_vxlan_input_next_t next0;
+	  gbp_vxlan_tunnel_t *gt0;
+	  vlib_buffer_t *b0;
+	  u32 bi0, vni0;
+	  uword *p;
+
+	  bi0 = to_next[0] = from[0];
+	  from += 1;
+	  to_next += 1;
+	  n_left_from -= 1;
+	  n_left_to_next -= 1;
+	  next0 = GBP_VXLAN_INPUT_NEXT_DROP;
+
+	  b0 = vlib_get_buffer (vm, bi0);
+	  vxlan_gbp0 =
+	    vlib_buffer_get_current (b0) - sizeof (vxlan_gbp_header_t);
+
+	  vni0 = vxlan_gbp_get_vni (vxlan_gbp0);
+	  p = hash_get (gv_db, vni0);
+
+	  if (PREDICT_FALSE (NULL == p))
+	    {
+	      gt0 = NULL;
+	      next0 = GBP_VXLAN_INPUT_NEXT_DROP;
+	    }
+	  else
+	    {
+	      gt0 = gbp_vxlan_tunnel_get (p[0]);
+
+	      vnet_buffer (b0)->sw_if_index[VLIB_RX] = gt0->gt_sw_if_index;
+
+	      if (GBP_VXLAN_TUN_L2 == gt0->gt_layer)
+		/*
+		 * An L2 layer tunnel goes into the BD
+		 */
+		next0 = GBP_VXLAN_INPUT_NEXT_L2_INPUT;
+	      else
+		{
+		  /*
+		   * An L3 layer tunnel needs to strip the L2 header
+		   * an inject into the RD
+		   */
+		  ethernet_header_t *e0;
+		  u16 type0;
+
+		  e0 = vlib_buffer_get_current (b0);
+		  type0 = clib_net_to_host_u16 (e0->type);
+		  switch (type0)
+		    {
+		    case ETHERNET_TYPE_IP4:
+		      next0 = GBP_VXLAN_INPUT_NEXT_IP4_INPUT;
+		      break;
+		    case ETHERNET_TYPE_IP6:
+		      next0 = GBP_VXLAN_INPUT_NEXT_IP6_INPUT;
+		      break;
+		    default:
+		      goto trace;
+		    }
+		  vlib_buffer_advance (b0, sizeof (*e0));
+		}
+	    }
+
+	trace:
+	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+	    {
+	      gbp_vxlan_trace_t *tr;
+
+	      tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+	      tr->dropped = (next0 == GBP_VXLAN_INPUT_NEXT_DROP);
+	      tr->vni = vni0;
+	      tr->sw_if_index = (gt0 ? gt0->gt_sw_if_index : ~0);
+	      tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+	      tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+	    }
+
+	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+					   to_next, n_left_to_next,
+					   bi0, next0);
+	}
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+    }
+
+  return from_frame->n_vectors;
+}
+
+static u8 *
+format_gbp_vxlan_rx_trace (u8 * s, va_list * args)
+{
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  gbp_vxlan_trace_t *t = va_arg (*args, gbp_vxlan_trace_t *);
+
+  s = format (s, "vni:%d dropped:%d rx:%d sclass:%d flags:%U",
+	      t->vni, t->dropped, t->sw_if_index,
+	      t->sclass, format_vxlan_gbp_header_gpflags, t->flags);
+
+  return (s);
+}
+
+static uword
+gbp_vxlan4_decap (vlib_main_t * vm,
+		  vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+  return gbp_vxlan_decap (vm, node, from_frame, 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (gbp_vxlan4_input_node) =
+{
+  .function = gbp_vxlan4_decap,
+  .name = "gbp-vxlan4",
+  .vector_size = sizeof (u32),
+  .n_errors = GBP_VXLAN_N_ERROR,
+  .error_strings = gbp_vxlan_error_strings,
+  .n_next_nodes = GBP_VXLAN_INPUT_N_NEXT,
+  .format_trace = format_gbp_vxlan_rx_trace,
+  .next_nodes = {
+#define _(s,n) [GBP_VXLAN_INPUT_NEXT_##s] = n,
+    foreach_gbp_vxlan_input_next
+#undef _
+  },
+};
+VLIB_NODE_FUNCTION_MULTIARCH (gbp_vxlan4_input_node, gbp_vxlan4_decap)
+
+/* *INDENT-ON* */
+
+void
+gbp_vxlan_walk (gbp_vxlan_cb_t cb, void *ctx)
+{
+  gbp_vxlan_tunnel_t *gt;
+
+  /* *INDENT-OFF* */
+  pool_foreach (gt, gbp_vxlan_tunnel_pool,
+    ({
+      if (WALK_CONTINUE != cb(gt, ctx))
+        break;
+    }));
+  /* *INDENT-ON* */
+}
+
+static walk_rc_t
+gbp_vxlan_tunnel_show_one (gbp_vxlan_tunnel_t * gt, void *ctx)
+{
+  vlib_cli_output (ctx, "%U", format_gbp_vxlan_tunnel,
+		   gt - gbp_vxlan_tunnel_pool);
+
+  return (WALK_CONTINUE);
+}
+
+static u8 *
+format_gbp_vxlan_tunnel_name (u8 * s, va_list * args)
+{
+  u32 dev_instance = va_arg (*args, u32);
+
+  return format (s, "gbp-vxlan-%d", dev_instance);
+}
+
+u8 *
+format_gbp_vxlan_tunnel_layer (u8 * s, va_list * args)
+{
+  gbp_vxlan_tunnel_layer_t gl = va_arg (*args, gbp_vxlan_tunnel_layer_t);
+  s = format (s, "%s", gbp_vxlan_tunnel_layer_strings[gl]);
+
+  return (s);
+}
+
+u8 *
+format_gbp_vxlan_tunnel (u8 * s, va_list * args)
+{
+  u32 dev_instance = va_arg (*args, u32);
+  CLIB_UNUSED (int verbose) = va_arg (*args, int);
+  gbp_vxlan_tunnel_t *gt = gbp_vxlan_tunnel_get (dev_instance);
+  index_t *vxri;
+
+  s = format (s, "GBP VXLAN tunnel: hw:%d sw:%d vni:%d %U",
+	      gt->gt_hw_if_index, gt->gt_sw_if_index, gt->gt_vni,
+	      format_gbp_vxlan_tunnel_layer, gt->gt_layer);
+  if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
+    s = format (s, " BD:%d bd-index:%d", gt->gt_bd_rd_id, gt->gt_bd_index);
+  else
+    s = format (s, " RD:%d fib-index:[%d,%d]",
+		gt->gt_bd_rd_id,
+		gt->gt_fib_index[FIB_PROTOCOL_IP4],
+		gt->gt_fib_index[FIB_PROTOCOL_IP6]);
+
+  s = format (s, " children:[");
+  vec_foreach (vxri, gt->gt_tuns)
+  {
+    s = format (s, "%U, ", format_vxlan_tunnel_ref, *vxri);
+  }
+  s = format (s, "]");
+
+  return s;
+}
+
+typedef struct gbp_vxlan_tx_trace_t_
+{
+  u32 vni;
+} gbp_vxlan_tx_trace_t;
+
+u8 *
+format_gbp_vxlan_tx_trace (u8 * s, va_list * args)
+{
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  gbp_vxlan_tx_trace_t *t = va_arg (*args, gbp_vxlan_tx_trace_t *);
+
+  s = format (s, "GBP-VXLAN: vni:%d", t->vni);
+
+  return (s);
+}
+
+clib_error_t *
+gbp_vxlan_interface_admin_up_down (vnet_main_t * vnm,
+				   u32 hw_if_index, u32 flags)
+{
+  vnet_hw_interface_t *hi;
+  u32 ti;
+
+  hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+  if (NULL == gbp_vxlan_tunnel_db ||
+      hi->sw_if_index >= vec_len (gbp_vxlan_tunnel_db))
+    return (NULL);
+
+  ti = gbp_vxlan_tunnel_db[hi->sw_if_index];
+
+  if (~0 == ti)
+    /* not one of ours */
+    return (NULL);
+
+  if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+    vnet_hw_interface_set_flags (vnm, hw_if_index,
+				 VNET_HW_INTERFACE_FLAG_LINK_UP);
+  else
+    vnet_hw_interface_set_flags (vnm, hw_if_index, 0);
+
+  return (NULL);
+}
+
+static uword
+gbp_vxlan_interface_tx (vlib_main_t * vm,
+			vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+  clib_warning ("you shouldn't be here, leaking buffers...");
+  return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (gbp_vxlan_device_class) = {
+  .name = "GBP VXLAN tunnel-template",
+  .format_device_name = format_gbp_vxlan_tunnel_name,
+  .format_device = format_gbp_vxlan_tunnel,
+  .format_tx_trace = format_gbp_vxlan_tx_trace,
+  .admin_up_down_function = gbp_vxlan_interface_admin_up_down,
+  .tx_function = gbp_vxlan_interface_tx,
+};
+
+VNET_HW_INTERFACE_CLASS (gbp_vxlan_hw_interface_class) = {
+  .name = "GBP-VXLAN",
+  .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+/* *INDENT-ON* */
+
+int
+gbp_vxlan_tunnel_add (u32 vni, gbp_vxlan_tunnel_layer_t layer,
+		      u32 bd_rd_id, u32 * sw_if_indexp)
+{
+  gbp_vxlan_tunnel_t *gt;
+  index_t gti;
+  uword *p;
+  int rv;
+
+  rv = 0;
+  p = hash_get (gv_db, vni);
+
+  GBP_VXLAN_TUN_DBG ("add: %d %d %d", vni, layer, bd_rd_id);
+
+  if (NULL == p)
+    {
+      vnet_sw_interface_t *si;
+      vnet_hw_interface_t *hi;
+      index_t gbi, grdi;
+      vnet_main_t *vnm;
+
+      gbi = grdi = INDEX_INVALID;
+
+      if (layer == GBP_VXLAN_TUN_L2)
+	{
+	  gbi = gbp_bridge_domain_find_and_lock (bd_rd_id);
+
+	  if (INDEX_INVALID == gbi)
+	    {
+	      return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
+	    }
+	}
+      else
+	{
+	  grdi = gbp_route_domain_find_and_lock (bd_rd_id);
+
+	  if (INDEX_INVALID == grdi)
+	    {
+	      return (VNET_API_ERROR_NO_SUCH_FIB);
+	    }
+	}
+
+      vnm = vnet_get_main ();
+      pool_get (gbp_vxlan_tunnel_pool, gt);
+      gti = gt - gbp_vxlan_tunnel_pool;
+
+      gt->gt_vni = vni;
+      gt->gt_layer = layer;
+      gt->gt_bd_rd_id = bd_rd_id;
+      gt->gt_hw_if_index = vnet_register_interface (vnm,
+						    gbp_vxlan_device_class.index,
+						    gti,
+						    gbp_vxlan_hw_interface_class.index,
+						    gti);
+
+      hi = vnet_get_hw_interface (vnm, gt->gt_hw_if_index);
+
+      gt->gt_sw_if_index = hi->sw_if_index;
+
+      /* don't flood packets in a BD to these interfaces */
+      si = vnet_get_sw_interface (vnm, gt->gt_sw_if_index);
+      si->flood_class = VNET_FLOOD_CLASS_NO_FLOOD;
+
+      if (layer == GBP_VXLAN_TUN_L2)
+	{
+	  gbp_bridge_domain_t *gb;
+
+	  gb = gbp_bridge_domain_get (gbi);
+
+	  gt->gt_gbd = gbi;
+	  gt->gt_bd_index = gb->gb_bd_id;
+	  gb->gb_vni_sw_if_index = gt->gt_sw_if_index;
+	  /* set it up as a GBP interface */
+	  gt->gt_itf = gbp_itf_add_and_lock (gt->gt_sw_if_index,
+					     gt->gt_bd_index);
+	  gbp_learn_enable (gt->gt_sw_if_index, GBP_LEARN_MODE_L2);
+	}
+      else
+	{
+	  gbp_route_domain_t *grd;
+	  fib_protocol_t fproto;
+
+	  grd = gbp_route_domain_get (grdi);
+
+	  gt->gt_grd = grdi;
+	  grd->grd_vni_sw_if_index = gt->gt_sw_if_index;
+
+	  gbp_learn_enable (gt->gt_sw_if_index, GBP_LEARN_MODE_L3);
+
+	  ip4_sw_interface_enable_disable (gt->gt_sw_if_index, 1);
+	  ip6_sw_interface_enable_disable (gt->gt_sw_if_index, 1);
+
+	  FOR_EACH_FIB_IP_PROTOCOL (fproto)
+	  {
+	    gt->gt_fib_index[fproto] = grd->grd_fib_index[fproto];
+
+	    ip_table_bind (fproto, gt->gt_sw_if_index,
+			   grd->grd_table_id[fproto], 1);
+	  }
+	}
+
+      /*
+       * save the tunnel by VNI and by sw_if_index
+       */
+      hash_set (gv_db, vni, gti);
+
+      vec_validate (gbp_vxlan_tunnel_db, gt->gt_sw_if_index);
+      gbp_vxlan_tunnel_db[gt->gt_sw_if_index] = gti;
+
+      if (sw_if_indexp)
+	*sw_if_indexp = gt->gt_sw_if_index;
+
+      vxlan_gbp_register_udp_ports ();
+    }
+  else
+    {
+      gti = p[0];
+      rv = VNET_API_ERROR_IF_ALREADY_EXISTS;
+    }
+
+  GBP_VXLAN_TUN_DBG ("add: %U", format_gbp_vxlan_tunnel, gti);
+
+  return (rv);
+}
+
+int
+gbp_vxlan_tunnel_del (u32 vni)
+{
+  gbp_vxlan_tunnel_t *gt;
+  uword *p;
+
+  p = hash_get (gv_db, vni);
+
+  if (NULL != p)
+    {
+      vnet_main_t *vnm;
+
+      vnm = vnet_get_main ();
+      gt = gbp_vxlan_tunnel_get (p[0]);
+
+      vxlan_gbp_unregister_udp_ports ();
+
+      GBP_VXLAN_TUN_DBG ("del: %U", format_gbp_vxlan_tunnel,
+			 gt - gbp_vxlan_tunnel_pool);
+
+      gbp_endpoint_flush (gt->gt_sw_if_index);
+      ASSERT (0 == vec_len (gt->gt_tuns));
+      vec_free (gt->gt_tuns);
+
+      if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
+	{
+	  gbp_learn_disable (gt->gt_sw_if_index, GBP_LEARN_MODE_L2);
+	  gbp_itf_unlock (gt->gt_itf);
+	  gbp_bridge_domain_unlock (gt->gt_gbd);
+	}
+      else
+	{
+	  fib_protocol_t fproto;
+
+	  FOR_EACH_FIB_IP_PROTOCOL (fproto)
+	    ip_table_bind (fproto, gt->gt_sw_if_index, 0, 0);
+
+	  ip4_sw_interface_enable_disable (gt->gt_sw_if_index, 0);
+	  ip6_sw_interface_enable_disable (gt->gt_sw_if_index, 0);
+
+	  gbp_learn_disable (gt->gt_sw_if_index, GBP_LEARN_MODE_L3);
+	  gbp_route_domain_unlock (gt->gt_grd);
+	}
+
+      vnet_sw_interface_set_flags (vnm, gt->gt_sw_if_index, 0);
+      vnet_delete_hw_interface (vnm, gt->gt_hw_if_index);
+
+      hash_unset (gv_db, vni);
+      gbp_vxlan_tunnel_db[gt->gt_sw_if_index] = INDEX_INVALID;
+
+      pool_put (gbp_vxlan_tunnel_pool, gt);
+    }
+  else
+    return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+  return (0);
+}
+
+static clib_error_t *
+gbp_vxlan_show (vlib_main_t * vm,
+		unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+  gbp_vxlan_walk (gbp_vxlan_tunnel_show_one, vm);
+
+  return (NULL);
+}
+
+/*?
+ * Show Group Based Policy VXLAN tunnels
+ *
+ * @cliexpar
+ * @cliexstart{show gbp vxlan}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (gbp_vxlan_show_node, static) = {
+  .path = "show gbp vxlan",
+  .short_help = "show gbp vxlan\n",
+  .function = gbp_vxlan_show,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+gbp_vxlan_init (vlib_main_t * vm)
+{
+  u32 slot4;
+
+  /*
+   * insert ourselves into the VXLAN-GBP arc to collect the no-tunnel
+   * packets.
+   */
+  slot4 = vlib_node_add_next_with_slot (vm,
+					vxlan4_gbp_input_node.index,
+					gbp_vxlan4_input_node.index,
+					VXLAN_GBP_INPUT_NEXT_NO_TUNNEL);
+  ASSERT (slot4 == VXLAN_GBP_INPUT_NEXT_NO_TUNNEL);
+
+  /* slot6 = vlib_node_add_next_with_slot (vm, */
+  /*                                    vxlan6_gbp_input_node.index, */
+  /*                                    gbp_vxlan6_input_node.index, */
+  /*                                    VXLAN_GBP_INPUT_NEXT_NO_TUNNEL); */
+  /* ASSERT (slot6 == VXLAN_GBP_INPUT_NEXT_NO_TUNNEL); */
+
+  gt_logger = vlib_log_register_class ("gbp", "tun");
+
+  return (NULL);
+}
+
+VLIB_INIT_FUNCTION (gbp_vxlan_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/gbp/gbp_vxlan.h b/src/plugins/gbp/gbp_vxlan.h
new file mode 100644
index 0000000..7aa22e3
--- /dev/null
+++ b/src/plugins/gbp/gbp_vxlan.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __GBP_VXLAN_H__
+#define __GBP_VXLAN_H__
+
+#include <vnet/fib/fib_types.h>
+
+#define forecah_gbp_vxlan_tunnel_layer          \
+  _(L2, "l2")                                   \
+  _(L3, "l3")
+
+typedef enum gbp_vxlan_tunnel_layer_t_
+{
+#define _(s,n) GBP_VXLAN_TUN_##s,
+  forecah_gbp_vxlan_tunnel_layer
+#undef _
+} gbp_vxlan_tunnel_layer_t;
+
+/**
+ * GBP VXLAN (template) tunnel.
+ * A template tunnel has only a VNI, it does not have src,dst address.
+ * As such it cannot be used to send traffic. It is used in the RX path
+ * to RX vxlan-gbp packets that do not match an existing tunnel;
+ */
+typedef struct gbp_vxlan_tunnel_t_
+{
+  u32 gt_hw_if_index;
+  u32 gt_sw_if_index;
+  u32 gt_vni;
+
+  /**
+   * The BD or RD value (depending on the layer) that the tunnel is bound to
+   */
+  u32 gt_bd_rd_id;
+  gbp_vxlan_tunnel_layer_t gt_layer;
+
+  union
+  {
+    struct
+    {
+      /**
+       * BD index (if L2)
+       */
+      u32 gt_bd_index;
+      /**
+       * Reference to the GPB-BD
+       */
+      index_t gt_gbd;
+    };
+    struct
+    {
+      /**
+       * FIB inidices (if L3)
+       */
+      u32 gt_fib_index[FIB_PROTOCOL_IP_MAX];
+      /**
+       * References to the GBP-RD
+       */
+      index_t gt_grd;
+    };
+  };
+
+  /**
+   * gbp-itf config for this interface
+   */
+  index_t gt_itf;
+
+  /**
+   * list of child vxlan-gbp tunnels built from this template
+   */
+  index_t *gt_tuns;
+} gbp_vxlan_tunnel_t;
+
+/**
+ * The different types of interfaces that endpoints are learned on
+ */
+typedef enum gbp_vxlan_tunnel_type_t_
+{
+  /**
+   * This is the object type deifend above.
+   *  A template representation of a vxlan-gbp tunnel. from this tunnel
+   *  type, real vxlan-gbp tunnels are created (by cloning the VNI)
+   */
+  GBP_VXLAN_TEMPLATE_TUNNEL,
+
+  /**
+   * A real VXLAN-GBP tunnel (from vnet/vxlan-gbp/...)
+   */
+  VXLAN_GBP_TUNNEL,
+} gbp_vxlan_tunnel_type_t;
+
+extern int gbp_vxlan_tunnel_add (u32 vni, gbp_vxlan_tunnel_layer_t layer,
+				 u32 bd_rd_id, u32 * sw_if_indexp);
+extern int gbp_vxlan_tunnel_del (u32 vni);
+
+extern gbp_vxlan_tunnel_type_t gbp_vxlan_tunnel_get_type (u32 sw_if_index);
+
+extern u32 gbp_vxlan_tunnel_clone_and_lock (u32 parent_tunnel,
+					    const ip46_address_t * src,
+					    const ip46_address_t * dst);
+
+extern void vxlan_gbp_tunnel_lock (u32 sw_if_index);
+extern void vxlan_gbp_tunnel_unlock (u32 sw_if_index);
+extern u32 vxlan_gbp_tunnel_get_parent (u32 sw_if_index);
+
+typedef walk_rc_t (*gbp_vxlan_cb_t) (gbp_vxlan_tunnel_t * gt, void *ctx);
+extern void gbp_vxlan_walk (gbp_vxlan_cb_t cb, void *ctx);
+
+extern u8 *format_gbp_vxlan_tunnel (u8 * s, va_list * args);
+extern u8 *format_gbp_vxlan_tunnel_layer (u8 * s, va_list * args);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/CMakeLists.txt b/src/vnet/CMakeLists.txt
index a45f921..6d26b5a 100644
--- a/src/vnet/CMakeLists.txt
+++ b/src/vnet/CMakeLists.txt
@@ -679,6 +679,7 @@
   vxlan-gbp/encap.c
   vxlan-gbp/vxlan_gbp_api.c
   vxlan-gbp/vxlan_gbp.c
+  vxlan-gbp/vxlan_gbp_packet.c
 )
 
 list(APPEND VNET_HEADERS
diff --git a/src/vnet/ethernet/mac_address.h b/src/vnet/ethernet/mac_address.h
index 7b4390d..a249cb5 100644
--- a/src/vnet/ethernet/mac_address.h
+++ b/src/vnet/ethernet/mac_address.h
@@ -37,6 +37,13 @@
   clib_memcpy (mac->bytes, bytes, 6);
 }
 
+static_always_inline void
+mac_address_to_bytes (const mac_address_t * mac, u8 * bytes)
+{
+  /* zero out the last 2 bytes, then copy over only 6 */
+  clib_memcpy (bytes, mac->bytes, 6);
+}
+
 static_always_inline int
 mac_address_is_zero (const mac_address_t * mac)
 {
@@ -57,6 +64,12 @@
   mac->bytes[5] = 0;
 }
 
+static_always_inline void
+mac_address_copy (mac_address_t * dst, const mac_address_t * src)
+{
+  mac_address_from_bytes (dst, src->bytes);
+}
+
 extern uword unformat_mac_address_t (unformat_input_t * input,
 				     va_list * args);
 extern u8 *format_mac_address_t (u8 * s, va_list * args);
diff --git a/src/vnet/interface_funcs.h b/src/vnet/interface_funcs.h
index a3bfdc9..b7d9007 100644
--- a/src/vnet/interface_funcs.h
+++ b/src/vnet/interface_funcs.h
@@ -271,6 +271,13 @@
     && vnet_sw_interface_is_api_visible (vnm, sw_if_index);
 }
 
+always_inline const u8 *
+vnet_sw_interface_get_hw_address (vnet_main_t * vnm, u32 sw_if_index)
+{
+  vnet_hw_interface_t *hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+  return hw->hw_address;
+}
+
 always_inline uword
 vnet_hw_interface_get_flags (vnet_main_t * vnm, u32 hw_if_index)
 {
diff --git a/src/vnet/ip/ip_types_api.c b/src/vnet/ip/ip_types_api.c
index 11b5276..3d1f806 100644
--- a/src/vnet/ip/ip_types_api.c
+++ b/src/vnet/ip/ip_types_api.c
@@ -107,6 +107,7 @@
       break;
     }
   out->fp_len = in->address_length;
+  out->___fp___pad = 0;
   ip_address_decode (&in->address, &out->fp_addr);
 }
 
diff --git a/src/vnet/l2/l2.api b/src/vnet/l2/l2.api
index 8b65bc3..7c71ea6 100644
--- a/src/vnet/l2/l2.api
+++ b/src/vnet/l2/l2.api
@@ -483,7 +483,6 @@
   u32 context;
   u32 bd_id;
   u8 is_add;
-  u8 is_ipv6;
   vl_api_address_t ip;
   vl_api_mac_address_t mac;
 };
diff --git a/src/vnet/l2/l2_input.h b/src/vnet/l2/l2_input.h
index f55e703..57fca57 100644
--- a/src/vnet/l2/l2_input.h
+++ b/src/vnet/l2/l2_input.h
@@ -104,12 +104,13 @@
  _(FLOOD,         "l2-flood")                   \
  _(ARP_TERM,      "arp-term-l2bd")              \
  _(UU_FLOOD,      "l2-flood")                   \
- _(UU_FWD,        "l2-uu-fwd")                  \
  _(GBP_FWD,       "gbp-fwd")                    \
+ _(UU_FWD,        "l2-uu-fwd")                  \
  _(FWD,           "l2-fwd")                     \
  _(RW,            "l2-rw")                      \
  _(LEARN,         "l2-learn")                   \
  _(L2_EMULATION,  "l2-emulation")               \
+ _(GBP_LEARN,     "gbp-learn-l2")               \
  _(GBP_NULL_CLASSIFY, "gbp-null-classify")      \
  _(GBP_SRC_CLASSIFY,  "gbp-src-classify")       \
  _(VTR,           "l2-input-vtr")               \
diff --git a/src/vnet/l2/l2_output.h b/src/vnet/l2/l2_output.h
index a6db776..33eeb8e 100644
--- a/src/vnet/l2/l2_output.h
+++ b/src/vnet/l2/l2_output.h
@@ -81,7 +81,8 @@
 #define foreach_l2output_feat \
  _(OUTPUT,            "interface-output")           \
  _(SPAN,              "span-l2-output")             \
- _(GBP_POLICY,        "gbp-policy")                 \
+ _(GBP_POLICY_PORT,   "gbp-policy-port")            \
+ _(GBP_POLICY_MAC,    "gbp-policy-mac")             \
  _(CFM,               "feature-bitmap-drop")        \
  _(QOS,               "feature-bitmap-drop")        \
  _(ACL,               "l2-output-acl")              \
diff --git a/src/vnet/vxlan-gbp/decap.c b/src/vnet/vxlan-gbp/decap.c
index 1602e94..0d361a3 100644
--- a/src/vnet/vxlan-gbp/decap.c
+++ b/src/vnet/vxlan-gbp/decap.c
@@ -29,6 +29,7 @@
   u32 error;
   u32 vni;
   u16 sclass;
+  u8 flags;
 } vxlan_gbp_rx_trace_t;
 
 static u8 *
@@ -44,8 +45,10 @@
 		   t->vni);
   return format (s,
 		 "VXLAN_GBP decap from vxlan_gbp_tunnel%d vni %d sclass %d"
-		 " next %d error %d",
-		 t->tunnel_index, t->vni, t->sclass, t->next_index, t->error);
+		 " flags %U next %d error %d",
+		 t->tunnel_index, t->vni, t->sclass,
+		 format_vxlan_gbp_header_gpflags, t->flags,
+		 t->next_index, t->error);
 }
 
 always_inline u32
@@ -161,10 +164,34 @@
   return t0;
 }
 
+always_inline vxlan_gbp_input_next_t
+vxlan_gbp_tunnel_get_next (const vxlan_gbp_tunnel_t * t, vlib_buffer_t * b0)
+{
+  if (VXLAN_GBP_TUNNEL_MODE_L2 == t->mode)
+    return (VXLAN_GBP_INPUT_NEXT_L2_INPUT);
+  else
+    {
+      ethernet_header_t *e0;
+      u16 type0;
+
+      e0 = vlib_buffer_get_current (b0);
+      vlib_buffer_advance (b0, sizeof (*e0));
+      type0 = clib_net_to_host_u16 (e0->type);
+      switch (type0)
+	{
+	case ETHERNET_TYPE_IP4:
+	  return (VXLAN_GBP_INPUT_NEXT_IP4_INPUT);
+	case ETHERNET_TYPE_IP6:
+	  return (VXLAN_GBP_INPUT_NEXT_IP6_INPUT);
+	}
+    }
+  return (VXLAN_GBP_INPUT_NEXT_DROP);
+}
+
 always_inline uword
 vxlan_gbp_input (vlib_main_t * vm,
 		 vlib_node_runtime_t * node,
-		 vlib_frame_t * from_frame, u32 is_ip4)
+		 vlib_frame_t * from_frame, u8 is_ip4)
 {
   vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
   vnet_main_t *vnm = vxm->vnet_main;
@@ -239,10 +266,6 @@
 	      ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
 	    }
 
-	  /* pop vxlan_gbp */
-	  vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
-	  vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
-
 	  u32 fi0 = buf_fib_index (b0, is_ip4);
 	  u32 fi1 = buf_fib_index (b1, is_ip4);
 
@@ -270,16 +293,19 @@
 	  u32 len0 = vlib_buffer_length_in_chain (vm, b0);
 	  u32 len1 = vlib_buffer_length_in_chain (vm, b1);
 
-	  u32 next0, next1;
+	  vxlan_gbp_input_next_t next0, next1;
 	  u8 error0 = 0, error1 = 0;
 	  u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
 	  u8 flags1 = vxlan_gbp_get_flags (vxlan_gbp1);
+	  /* Required to make the l2 tag push / pop code work on l2 subifs */
+	  /* pop vxlan_gbp */
+	  vlib_buffer_advance (b0, sizeof *vxlan_gbp0);
+	  vlib_buffer_advance (b1, sizeof *vxlan_gbp1);
+
 	  /* Validate VXLAN_GBP tunnel encap-fib index against packet */
 	  if (PREDICT_FALSE
 	      (t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
 	    {
-	      next0 = VXLAN_GBP_INPUT_NEXT_DROP;
-
 	      if (t0 != 0
 		  && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
 		{
@@ -287,22 +313,18 @@
 		  vlib_increment_combined_counter
 		    (drop_counter, thread_index, stats_t0->sw_if_index, 1,
 		     len0);
+		  next0 = VXLAN_GBP_INPUT_NEXT_DROP;
 		}
 	      else
-		error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+		{
+		  error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+		  next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
+		}
 	      b0->error = node->errors[error0];
 	    }
 	  else
 	    {
-	      next0 = t0->decap_next_index;
-	      vnet_buffer2 (b0)->gbp.flags =
-		vxlan_gbp_get_gpflags (vxlan_gbp0);
-	      vnet_buffer2 (b0)->gbp.src_epg =
-		vxlan_gbp_get_sclass (vxlan_gbp0);
-
-	      /* Required to make the l2 tag push / pop code work on l2 subifs */
-	      if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT))
-		vnet_update_l2_len (b0);
+	      next0 = vxlan_gbp_tunnel_get_next (t0, b0);
 
 	      /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
 	      vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
@@ -311,12 +333,13 @@
 	      pkts_decapsulated++;
 	    }
 
-	  /* Validate VXLAN_GBP tunnel encap-fib index against packet */
+	  vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+	  vnet_buffer2 (b0)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp0);
+
+
 	  if (PREDICT_FALSE
 	      (t1 == 0 || flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
 	    {
-	      next1 = VXLAN_GBP_INPUT_NEXT_DROP;
-
 	      if (t1 != 0
 		  && flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
 		{
@@ -324,22 +347,18 @@
 		  vlib_increment_combined_counter
 		    (drop_counter, thread_index, stats_t1->sw_if_index, 1,
 		     len1);
+		  next1 = VXLAN_GBP_INPUT_NEXT_DROP;
 		}
 	      else
-		error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+		{
+		  error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+		  next1 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
+		}
 	      b1->error = node->errors[error1];
 	    }
 	  else
 	    {
-	      next1 = t1->decap_next_index;
-	      vnet_buffer2 (b1)->gbp.flags =
-		vxlan_gbp_get_gpflags (vxlan_gbp1);
-	      vnet_buffer2 (b1)->gbp.src_epg =
-		vxlan_gbp_get_sclass (vxlan_gbp1);
-
-	      /* Required to make the l2 tag push / pop code work on l2 subifs */
-	      if (PREDICT_TRUE (next1 == VXLAN_GBP_INPUT_NEXT_L2_INPUT))
-		vnet_update_l2_len (b1);
+	      next1 = vxlan_gbp_tunnel_get_next (t1, b1);
 
 	      /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
 	      vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
@@ -349,6 +368,12 @@
 		(rx_counter, thread_index, stats_t1->sw_if_index, 1, len1);
 	    }
 
+	  vnet_buffer2 (b1)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp1);
+	  vnet_buffer2 (b1)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp1);
+
+	  vnet_update_l2_len (b0);
+	  vnet_update_l2_len (b1);
+
 	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
 	    {
 	      vxlan_gbp_rx_trace_t *tr =
@@ -358,6 +383,7 @@
 	      tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
 	      tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
 	      tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+	      tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
 	    }
 	  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
 	    {
@@ -368,6 +394,7 @@
 	      tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels;
 	      tr->vni = vxlan_gbp_get_vni (vxlan_gbp1);
 	      tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp1);
+	      tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
 	    }
 
 	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
@@ -395,9 +422,6 @@
 	  else
 	    ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
 
-	  /* pop (ip, udp, vxlan_gbp) */
-	  vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
-
 	  u32 fi0 = buf_fib_index (b0, is_ip4);
 
 	  vxlan_gbp_tunnel_t *t0, *stats_t0 = 0;
@@ -412,15 +436,16 @@
 
 	  uword len0 = vlib_buffer_length_in_chain (vm, b0);
 
-	  u32 next0;
+	  vxlan_gbp_input_next_t next0;
 	  u8 error0 = 0;
 	  u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0);
+
+	  /* pop (ip, udp, vxlan_gbp) */
+	  vlib_buffer_advance (b0, sizeof (*vxlan_gbp0));
 	  /* Validate VXLAN_GBP tunnel encap-fib index against packet */
 	  if (PREDICT_FALSE
 	      (t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)))
 	    {
-	      next0 = VXLAN_GBP_INPUT_NEXT_DROP;
-
 	      if (t0 != 0
 		  && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))
 		{
@@ -428,24 +453,18 @@
 		  vlib_increment_combined_counter
 		    (drop_counter, thread_index, stats_t0->sw_if_index, 1,
 		     len0);
+		  next0 = VXLAN_GBP_INPUT_NEXT_DROP;
 		}
 	      else
-		error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+		{
+		  error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL;
+		  next0 = VXLAN_GBP_INPUT_NEXT_NO_TUNNEL;
+		}
 	      b0->error = node->errors[error0];
 	    }
 	  else
 	    {
-	      next0 = t0->decap_next_index;
-	      vnet_buffer2 (b0)->gbp.flags =
-		vxlan_gbp_get_gpflags (vxlan_gbp0);
-	      vnet_buffer2 (b0)->gbp.src_epg =
-		vxlan_gbp_get_sclass (vxlan_gbp0);
-
-
-	      /* Required to make the l2 tag push / pop code work on l2 subifs */
-	      if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT))
-		vnet_update_l2_len (b0);
-
+	      next0 = vxlan_gbp_tunnel_get_next (t0, b0);
 	      /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */
 	      vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
 	      pkts_decapsulated++;
@@ -453,6 +472,11 @@
 	      vlib_increment_combined_counter
 		(rx_counter, thread_index, stats_t0->sw_if_index, 1, len0);
 	    }
+	  vnet_buffer2 (b0)->gbp.flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
+	  vnet_buffer2 (b0)->gbp.src_epg = vxlan_gbp_get_sclass (vxlan_gbp0);
+
+	  /* Required to make the l2 tag push / pop code work on l2 subifs */
+	  vnet_update_l2_len (b0);
 
 	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
 	    {
@@ -463,6 +487,7 @@
 	      tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels;
 	      tr->vni = vxlan_gbp_get_vni (vxlan_gbp0);
 	      tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
+	      tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
 	    }
 	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
 					   to_next, n_left_to_next,
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.api b/src/vnet/vxlan-gbp/vxlan_gbp.api
index 6e41ec8..3e213dd 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.api
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.api
@@ -23,7 +23,6 @@
     @param dst - Destination IP address, can be multicast
     @param mcast_sw_if_index - Interface for multicast destination
     @param encap_table_id - Encap route table 
-    @param decap_next_index - Name of decap next graph node
     @param vni - The VXLAN Network Identifier, uint24
     @param sw_ifindex - Ignored in add message, set in details
 */
@@ -34,7 +33,6 @@
   vl_api_address_t dst;
   u32 mcast_sw_if_index;
   u32 encap_table_id;
-  u32 decap_next_index;
   u32 vni;
   u32 sw_if_index;
 };
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.c b/src/vnet/vxlan-gbp/vxlan_gbp.c
index ec4f923..691cc76 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.c
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.c
@@ -32,16 +32,21 @@
 
 vxlan_gbp_main_t vxlan_gbp_main;
 
-static u8 *
-format_decap_next (u8 * s, va_list * args)
+u8 *
+format_vxlan_gbp_tunnel_mode (u8 * s, va_list * args)
 {
-  u32 next_index = va_arg (*args, u32);
+  vxlan_gbp_tunnel_mode_t mode = va_arg (*args, vxlan_gbp_tunnel_mode_t);
 
-  if (next_index == VXLAN_GBP_INPUT_NEXT_DROP)
-    return format (s, "drop");
-  else
-    return format (s, "index %d", next_index);
-  return s;
+  switch (mode)
+    {
+    case VXLAN_GBP_TUNNEL_MODE_L2:
+      s = format (s, "L2");
+      break;
+    case VXLAN_GBP_TUNNEL_MODE_L3:
+      s = format (s, "L3");
+      break;
+    }
+  return (s);
 }
 
 u8 *
@@ -51,17 +56,15 @@
 
   s = format (s,
 	      "[%d] instance %d src %U dst %U vni %d fib-idx %d"
-	      " sw-if-idx %d ",
+	      " sw-if-idx %d mode %U ",
 	      t->dev_instance, t->user_instance,
 	      format_ip46_address, &t->src, IP46_TYPE_ANY,
 	      format_ip46_address, &t->dst, IP46_TYPE_ANY,
-	      t->vni, t->encap_fib_index, t->sw_if_index);
+	      t->vni, t->encap_fib_index, t->sw_if_index,
+	      format_vxlan_gbp_tunnel_mode, t->mode);
 
   s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index);
 
-  if (PREDICT_FALSE (t->decap_next_index != VXLAN_GBP_INPUT_NEXT_L2_INPUT))
-    s = format (s, "decap-next-%U ", format_decap_next, t->decap_next_index);
-
   if (PREDICT_FALSE (ip46_address_is_multicast (&t->dst)))
     s = format (s, "mcast-sw-if-idx %d ", t->mcast_sw_if_index);
 
@@ -210,9 +213,9 @@
 
 #define foreach_copy_field                      \
 _(vni)                                          \
+_(mode)                                         \
 _(mcast_sw_if_index)                            \
 _(encap_fib_index)                              \
-_(decap_next_index)                             \
 _(src)                                          \
 _(dst)
 
@@ -267,18 +270,6 @@
   vnet_rewrite_set_data (*t, &h, len);
 }
 
-static bool
-vxlan_gbp_decap_next_is_valid (vxlan_gbp_main_t * vxm, u32 is_ip6,
-			       u32 decap_next_index)
-{
-  vlib_main_t *vm = vxm->vlib_main;
-  u32 input_idx = (!is_ip6) ?
-    vxlan4_gbp_input_node.index : vxlan6_gbp_input_node.index;
-  vlib_node_runtime_t *r = vlib_node_get_runtime (vm, input_idx);
-
-  return decap_next_index < r->n_next_nodes;
-}
-
 static uword
 vtep_addr_ref (ip46_address_t * ip)
 {
@@ -434,14 +425,11 @@
 
       /* adding a tunnel: tunnel must not already exist */
       if (p)
-	return VNET_API_ERROR_TUNNEL_EXIST;
-
-      /* if not set explicitly, default to l2 */
-      if (a->decap_next_index == ~0)
-	a->decap_next_index = VXLAN_GBP_INPUT_NEXT_L2_INPUT;
-      if (!vxlan_gbp_decap_next_is_valid (vxm, is_ip6, a->decap_next_index))
-	return VNET_API_ERROR_INVALID_DECAP_NEXT;
-
+	{
+	  t = pool_elt_at_index (vxm->tunnels, *p);
+	  *sw_if_indexp = t->sw_if_index;
+	  return VNET_API_ERROR_TUNNEL_EXIST;
+	}
       pool_get_aligned (vxm->tunnels, t, CLIB_CACHE_LINE_BYTES);
       clib_memset (t, 0, sizeof (*t));
       dev_instance = t - vxm->tunnels;
@@ -505,6 +493,12 @@
 
       t->sw_if_index = sw_if_index = hi->sw_if_index;
 
+      if (VXLAN_GBP_TUNNEL_MODE_L3 == t->mode)
+	{
+	  ip4_sw_interface_enable_disable (t->sw_if_index, 1);
+	  ip6_sw_interface_enable_disable (t->sw_if_index, 1);
+	}
+
       vec_validate_init_empty (vxm->tunnel_index_by_sw_if_index, sw_if_index,
 			       ~0);
       vxm->tunnel_index_by_sw_if_index[sw_if_index] = dev_instance;
@@ -626,6 +620,12 @@
       sw_if_index = t->sw_if_index;
       vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */ );
 
+      if (VXLAN_GBP_TUNNEL_MODE_L3 == t->mode)
+	{
+	  ip4_sw_interface_enable_disable (t->sw_if_index, 0);
+	  ip6_sw_interface_enable_disable (t->sw_if_index, 0);
+	}
+
       vxm->tunnel_index_by_sw_if_index[sw_if_index] = ~0;
 
       if (!is_ip6)
@@ -660,6 +660,36 @@
   return 0;
 }
 
+int
+vnet_vxlan_gbp_tunnel_del (u32 sw_if_index)
+{
+  vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
+  vxlan_gbp_tunnel_t *t = 0;
+  u32 ti;
+
+  if (sw_if_index >= vec_len (vxm->tunnel_index_by_sw_if_index))
+    return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+  ti = vxm->tunnel_index_by_sw_if_index[sw_if_index];
+  if (~0 != ti)
+    {
+      t = pool_elt_at_index (vxm->tunnels, ti);
+
+      vnet_vxlan_gbp_tunnel_add_del_args_t args = {
+	.is_add = 0,
+	.is_ip6 = !ip46_address_is_ip4 (&t->src),
+	.vni = t->vni,
+	.src = t->src,
+	.dst = t->dst,
+	.instance = ~0,
+      };
+
+      return (vnet_vxlan_gbp_tunnel_add_del (&args, NULL));
+    }
+
+  return VNET_API_ERROR_NO_SUCH_ENTRY;
+}
+
 static uword
 get_decap_next_for_node (u32 node_index, u32 ipv4_set)
 {
@@ -700,6 +730,7 @@
   unformat_input_t _line_input, *line_input = &_line_input;
   ip46_address_t src = ip46_address_initializer, dst =
     ip46_address_initializer;
+  vxlan_gbp_tunnel_mode_t mode = VXLAN_GBP_TUNNEL_MODE_L2;
   u8 is_add = 1;
   u8 src_set = 0;
   u8 dst_set = 0;
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp.h b/src/vnet/vxlan-gbp/vxlan_gbp.h
index f9edcdc..66f0cff 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp.h
+++ b/src/vnet/vxlan-gbp/vxlan_gbp.h
@@ -59,6 +59,14 @@
 */
 typedef clib_bihash_kv_24_8_t vxlan6_gbp_tunnel_key_t;
 
+typedef enum vxlan_gbp_tunnel_mode_t_
+{
+  VXLAN_GBP_TUNNEL_MODE_L2,
+  VXLAN_GBP_TUNNEL_MODE_L3,
+} vxlan_gbp_tunnel_mode_t;
+
+extern u8 *format_vxlan_gbp_tunnel_mode (u8 * s, va_list * args);
+
 typedef struct
 {
   /* Required for pool_get_aligned */
@@ -67,9 +75,6 @@
   /* FIB DPO for IP forwarding of VXLAN encap packet */
   dpo_id_t next_dpo;
 
-  /*  Group Policy ID */
-  u16 sclass;
-
   /* flags */
   u16 flags;
 
@@ -83,9 +88,6 @@
   /* mcast packet output intfc index (used only if dst is mcast) */
   u32 mcast_sw_if_index;
 
-  /* decap next index */
-  u32 decap_next_index;
-
   /* The FIB index for src/dst addresses */
   u32 encap_fib_index;
 
@@ -97,6 +99,12 @@
   uword encap_next_node;
 
   /**
+   * Tunnel mode.
+   * L2 tunnels decap to L2 path, L3 tunnels to the L3 path
+   */
+  vxlan_gbp_tunnel_mode_t mode;
+
+  /**
    * Linkage into the FIB object graph
    */
   fib_node_t node;
@@ -122,9 +130,12 @@
     vnet_declare_rewrite (VLIB_BUFFER_PRE_DATA_SIZE);
 } vxlan_gbp_tunnel_t;
 
-#define foreach_vxlan_gbp_input_next        \
-_(DROP, "error-drop")                   \
-_(L2_INPUT, "l2-input")
+#define foreach_vxlan_gbp_input_next         \
+  _(DROP, "error-drop")                      \
+  _(NO_TUNNEL, "error-punt")                 \
+  _(L2_INPUT, "l2-input")                    \
+  _(IP4_INPUT, "ip4-input")                  \
+  _(IP6_INPUT, "ip6-input")
 
 typedef enum
 {
@@ -142,6 +153,13 @@
   VXLAN_GBP_N_ERROR,
 } vxlan_gbp_input_error_t;
 
+/**
+ * Call back function packets that do not match a configured tunnel
+ */
+typedef vxlan_gbp_input_next_t (*vxlan_bgp_no_tunnel_t) (vlib_buffer_t * b,
+							 u32 thread_index,
+							 u8 is_ip6);
+
 typedef struct
 {
   /* vector of encap tunnel instances */
@@ -189,20 +207,22 @@
   u8 is_add;
   u8 is_ip6;
   u32 instance;
+  vxlan_gbp_tunnel_mode_t mode;
   ip46_address_t src, dst;
   u32 mcast_sw_if_index;
   u32 encap_fib_index;
-  u32 decap_next_index;
   u32 vni;
 } vnet_vxlan_gbp_tunnel_add_del_args_t;
 
 int vnet_vxlan_gbp_tunnel_add_del
   (vnet_vxlan_gbp_tunnel_add_del_args_t * a, u32 * sw_if_indexp);
+int vnet_vxlan_gbp_tunnel_del (u32 sw_if_indexp);
 
 void vnet_int_vxlan_gbp_bypass_mode (u32 sw_if_index, u8 is_ip6,
 				     u8 is_enable);
 
 u32 vnet_vxlan_gbp_get_tunnel_index (u32 sw_if_index);
+
 #endif /* included_vnet_vxlan_gbp_h */
 
 /*
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_api.c b/src/vnet/vxlan-gbp/vxlan_gbp_api.c
index b7e6935..f5e97e5 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp_api.c
+++ b/src/vnet/vxlan-gbp/vxlan_gbp_api.c
@@ -92,10 +92,10 @@
     .instance = ntohl (mp->tunnel.instance),
     .mcast_sw_if_index = ntohl (mp->tunnel.mcast_sw_if_index),
     .encap_fib_index = fib_index,
-    .decap_next_index = ntohl (mp->tunnel.decap_next_index),
     .vni = ntohl (mp->tunnel.vni),
     .dst = dst,
     .src = src,
+    .mode = VXLAN_GBP_TUNNEL_MODE_L2,
   };
 
   /* Check src & dst are different */
@@ -142,7 +142,6 @@
   rmp->tunnel.instance = htonl (t->user_instance);
   rmp->tunnel.mcast_sw_if_index = htonl (t->mcast_sw_if_index);
   rmp->tunnel.vni = htonl (t->vni);
-  rmp->tunnel.decap_next_index = htonl (t->decap_next_index);
   rmp->tunnel.sw_if_index = htonl (t->sw_if_index);
   rmp->context = context;
 
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_packet.c b/src/vnet/vxlan-gbp/vxlan_gbp_packet.c
new file mode 100644
index 0000000..01c7a19
--- /dev/null
+++ b/src/vnet/vxlan-gbp/vxlan_gbp_packet.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
+
+u8 *
+format_vxlan_gbp_header_flags (u8 * s, va_list * args)
+{
+  vxlan_gbp_flags_t flags = va_arg (*args, int);
+
+  if (VXLAN_GBP_FLAGS_NONE == flags)
+    {
+      s = format (s, "None");
+    }
+#define _(n,f) {                          \
+    if (VXLAN_GBP_FLAGS_##f & flags)      \
+      s = format (s, #f);                 \
+  }
+  foreach_vxlan_gbp_flags
+#undef _
+    return (s);
+}
+
+u8 *
+format_vxlan_gbp_header_gpflags (u8 * s, va_list * args)
+{
+  vxlan_gbp_gpflags_t flags = va_arg (*args, int);
+
+  if (VXLAN_GBP_GPFLAGS_NONE == flags)
+    {
+      s = format (s, "None");
+    }
+#define _(n,f) {                          \
+    if (VXLAN_GBP_GPFLAGS_##f & flags)    \
+      s = format (s, #f);                 \
+  }
+  foreach_vxlan_gbp_gpflags
+#undef _
+    return (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/vxlan-gbp/vxlan_gbp_packet.h b/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
index e1674a0..33bccd6 100644
--- a/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
+++ b/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
@@ -15,6 +15,8 @@
 #ifndef __included_vxlan_gbp_packet_h__
 #define __included_vxlan_gbp_packet_h__ 1
 
+#include <vlib/vlib.h>
+
 /*
  * From draft-smith-vxlan-group-policy-04.txt
  *
@@ -85,8 +87,17 @@
   u32 vni_reserved;
 } vxlan_gbp_header_t;
 
-#define VXLAN_GBP_FLAGS_G 0x80
-#define VXLAN_GBP_FLAGS_I 0x08
+#define foreach_vxlan_gbp_flags    \
+  _ (0x80, G)                      \
+  _ (0x08, I)
+
+typedef enum
+{
+  VXLAN_GBP_FLAGS_NONE = 0,
+#define _(n,f) VXLAN_GBP_FLAGS_##f = n,
+  foreach_vxlan_gbp_flags
+#undef _
+} __attribute__ ((packed)) vxlan_gbp_flags_t;
 
 #define foreach_vxlan_gbp_gpflags \
 _ (0x40, D)                       \
@@ -96,10 +107,11 @@
 
 typedef enum
 {
+  VXLAN_GBP_GPFLAGS_NONE = 0,
 #define _(n,f) VXLAN_GBP_GPFLAGS_##f = n,
   foreach_vxlan_gbp_gpflags
 #undef _
-} vxlan_gbp_gpflag_t;
+} __attribute__ ((packed)) vxlan_gbp_gpflags_t;
 
 static inline u32
 vxlan_gbp_get_vni (vxlan_gbp_header_t * h)
@@ -119,13 +131,13 @@
   return sclass_host_byte_order;
 }
 
-static inline u8
+static inline vxlan_gbp_gpflags_t
 vxlan_gbp_get_gpflags (vxlan_gbp_header_t * h)
 {
   return h->gpflags;
 }
 
-static inline u8
+static inline vxlan_gbp_flags_t
 vxlan_gbp_get_flags (vxlan_gbp_header_t * h)
 {
   return h->flag_g_i;
@@ -139,6 +151,9 @@
   h->flag_g_i = VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G;
 }
 
+extern u8 *format_vxlan_gbp_header_flags (u8 * s, va_list * args);
+extern u8 *format_vxlan_gbp_header_gpflags (u8 * s, va_list * args);
+
 #endif /* __included_vxlan_gbp_packet_h__ */
 
 /*