ipfix: add classification nodes for flow statistics (VPP-204)
In order to have meaningfull IPFIX implementation we should be able
to classify all packets flowing through vpp. But existing IPv4 and IPv6
classifier nodes are called only if destination IP address is local
to vpp. This commit adds new IPv4 and IPv6 classifier nodes that should
be used for collecting flow statistics.
Change-Id: I60e60105663ba15b5200862a23bb817047fe4d1a
Signed-off-by: Juraj Sloboda <jsloboda@cisco.com>
diff --git a/vnet/Makefile.am b/vnet/Makefile.am
index c0ae70d..f130d15 100644
--- a/vnet/Makefile.am
+++ b/vnet/Makefile.am
@@ -249,12 +249,15 @@
vnet/classify/ip_classify.c \
vnet/classify/input_acl.c \
vnet/classify/policer_classify.c \
+ vnet/classify/flow_classify.c \
+ vnet/classify/flow_classify_node.c \
vnet/classify/vnet_classify.h
nobase_include_HEADERS += \
vnet/classify/vnet_classify.h \
vnet/classify/input_acl.h \
- vnet/classify/policer_classify.h
+ vnet/classify/policer_classify.h \
+ vnet/classify/flow_classify.h
########################################
# Layer 3 protocols go here
diff --git a/vnet/vnet/classify/flow_classify.c b/vnet/vnet/classify/flow_classify.c
new file mode 100644
index 0000000..3269994
--- /dev/null
+++ b/vnet/vnet/classify/flow_classify.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/classify/flow_classify.h>
+
+static void
+vnet_flow_classify_feature_enable (vlib_main_t * vnm,
+ flow_classify_main_t * fcm,
+ u32 sw_if_index,
+ flow_classify_table_id_t tid,
+ int feature_enable)
+{
+ ip_lookup_main_t * lm;
+ ip_config_main_t * ifcm;
+ u32 ftype;
+ u32 ci;
+
+ if (tid == FLOW_CLASSIFY_TABLE_IP4)
+ {
+ lm = &ip4_main.lookup_main;
+ ftype = ip4_main.ip4_unicast_rx_feature_flow_classify;
+ }
+ else
+ {
+ lm = &ip6_main.lookup_main;
+ ftype = ip6_main.ip6_unicast_rx_feature_flow_classify;
+ }
+
+ ifcm = &lm->feature_config_mains[VNET_IP_RX_UNICAST_FEAT];
+
+ ci = ifcm->config_index_by_sw_if_index[sw_if_index];
+ ci = (feature_enable ? vnet_config_add_feature : vnet_config_del_feature)
+ (vnm, &ifcm->config_main, ci, ftype, 0, 0);
+
+ ifcm->config_index_by_sw_if_index[sw_if_index] = ci;
+ fcm->vnet_config_main[tid] = &ifcm->config_main;
+}
+
+int vnet_set_flow_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 ip4_table_index, u32 ip6_table_index,
+ u32 is_add)
+{
+ flow_classify_main_t * fcm = &flow_classify_main;
+ vnet_classify_main_t * vcm = fcm->vnet_classify_main;
+ u32 pct[FLOW_CLASSIFY_N_TABLES] = {ip4_table_index, ip6_table_index};
+ u32 ti;
+
+ /* Assume that we've validated sw_if_index in the API layer */
+
+ for (ti = 0; ti < FLOW_CLASSIFY_N_TABLES; ti++)
+ {
+ if (pct[ti] == ~0)
+ continue;
+
+ if (pool_is_free_index (vcm->tables, pct[ti]))
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+
+ vec_validate_init_empty
+ (fcm->classify_table_index_by_sw_if_index[ti], sw_if_index, ~0);
+
+ /* Reject any DEL operation with wrong sw_if_index */
+ if (!is_add &&
+ (pct[ti] != fcm->classify_table_index_by_sw_if_index[ti][sw_if_index]))
+ {
+ clib_warning ("Non-existent intf_idx=%d with table_index=%d for delete",
+ sw_if_index, pct[ti]);
+ return VNET_API_ERROR_NO_SUCH_TABLE;
+ }
+
+ /* Return ok on ADD operaton if feature is already enabled */
+ if (is_add &&
+ fcm->classify_table_index_by_sw_if_index[ti][sw_if_index] != ~0)
+ return 0;
+
+ vnet_flow_classify_feature_enable (vm, fcm, sw_if_index, ti, is_add);
+
+ if (is_add)
+ fcm->classify_table_index_by_sw_if_index[ti][sw_if_index] = pct[ti];
+ else
+ fcm->classify_table_index_by_sw_if_index[ti][sw_if_index] = ~0;
+ }
+
+
+ return 0;
+}
+
+static clib_error_t *
+set_flow_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t * vnm = vnet_get_main();
+ u32 sw_if_index = ~0;
+ u32 ip4_table_index = ~0;
+ u32 ip6_table_index = ~0;
+ u32 is_add = 1;
+ u32 idx_cnt = 0;
+ int rv;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "interface %U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else if (unformat (input, "ip4-table %d", &ip4_table_index))
+ idx_cnt++;
+ else if (unformat (input, "ip6-table %d", &ip6_table_index))
+ idx_cnt++;
+ else if (unformat (input, "del"))
+ is_add = 0;
+ else
+ break;
+ }
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0, "Interface must be specified.");
+
+ if (!idx_cnt)
+ return clib_error_return (0, "Table index should be specified.");
+
+ if (idx_cnt > 1)
+ return clib_error_return (0, "Only one table index per API is allowed.");
+
+ rv = vnet_set_flow_classify_intfc(vm, sw_if_index, ip4_table_index,
+ ip6_table_index, is_add);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_NO_MATCHING_INTERFACE:
+ return clib_error_return (0, "No such interface");
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return clib_error_return (0, "No such classifier table");
+ }
+ return 0;
+}
+
+VLIB_CLI_COMMAND (set_input_acl_command, static) = {
+ .path = "set flow classify",
+ .short_help =
+ "set flow classify interface <int> [ip4-table <index>]\n"
+ " [ip6-table <index>] [del]",
+ .function = set_flow_classify_command_fn,
+};
+
+static uword
+unformat_table_type (unformat_input_t * input, va_list * va)
+{
+ u32 * r = va_arg (*va, u32 *);
+ u32 tid;
+
+ if (unformat (input, "ip4"))
+ tid = FLOW_CLASSIFY_TABLE_IP4;
+ else if (unformat (input, "ip6"))
+ tid = FLOW_CLASSIFY_TABLE_IP6;
+ else
+ return 0;
+
+ *r = tid;
+ return 1;
+}
+static clib_error_t *
+show_flow_classify_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ flow_classify_main_t * fcm = &flow_classify_main;
+ u32 type = FLOW_CLASSIFY_N_TABLES;
+ u32 * vec_tbl;
+ int i;
+
+ if (unformat (input, "type %U", unformat_table_type, &type))
+ ;
+ else
+ return clib_error_return (0, "Type must be specified.");;
+
+ if (type == FLOW_CLASSIFY_N_TABLES)
+ return clib_error_return (0, "Invalid table type.");
+
+ vec_tbl = fcm->classify_table_index_by_sw_if_index[type];
+
+ if (vec_len(vec_tbl))
+ vlib_cli_output (vm, "%10s%20s\t\t%s", "Intfc idx", "Classify table",
+ "Interface name");
+ else
+ vlib_cli_output (vm, "No tables configured.");
+
+ for (i = 0; i < vec_len (vec_tbl); i++)
+ {
+ if (vec_elt(vec_tbl, i) == ~0)
+ continue;
+
+ vlib_cli_output (vm, "%10d%20d\t\t%U", i, vec_elt(vec_tbl, i),
+ format_vnet_sw_if_index_name, fcm->vnet_main, i);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_flow_classify_command, static) = {
+ .path = "show classify flow",
+ .short_help = "show classify flow type [ip4|ip6]",
+ .function = show_flow_classify_command_fn,
+};
diff --git a/vnet/vnet/classify/flow_classify.h b/vnet/vnet/classify/flow_classify.h
new file mode 100644
index 0000000..3ae04cd
--- /dev/null
+++ b/vnet/vnet/classify/flow_classify.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_vnet_flow_classify_h__
+#define __included_vnet_flow_classify_h__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/classify/vnet_classify.h>
+
+typedef enum {
+ FLOW_CLASSIFY_TABLE_IP4,
+ FLOW_CLASSIFY_TABLE_IP6,
+ FLOW_CLASSIFY_N_TABLES,
+} flow_classify_table_id_t;
+
+typedef enum {
+ FLOW_CLASSIFY_NEXT_INDEX_DROP,
+ FLOW_CLASSIFY_NEXT_INDEX_N_NEXT,
+} flow_classify_next_index_t;
+
+typedef struct {
+ /* Classifier table vectors */
+ u32 * classify_table_index_by_sw_if_index [FLOW_CLASSIFY_N_TABLES];
+
+ /* Convenience variables */
+ vlib_main_t * vlib_main;
+ vnet_main_t * vnet_main;
+ vnet_classify_main_t * vnet_classify_main;
+ vnet_config_main_t * vnet_config_main [FLOW_CLASSIFY_N_TABLES];
+} flow_classify_main_t;
+
+flow_classify_main_t flow_classify_main;
+
+int vnet_set_flow_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
+ u32 ip4_table_index, u32 ip6_table_index,
+ u32 is_add);
+
+#endif /* __included_vnet_flow_classify_h__ */
diff --git a/vnet/vnet/classify/flow_classify_node.c b/vnet/vnet/classify/flow_classify_node.c
new file mode 100644
index 0000000..aa3bce5
--- /dev/null
+++ b/vnet/vnet/classify/flow_classify_node.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/classify/flow_classify.h>
+#include <vnet/classify/vnet_classify.h>
+
+typedef struct {
+ u32 sw_if_index;
+ u32 next_index;
+ u32 table_index;
+ u32 offset;
+} flow_classify_trace_t;
+
+static u8 *
+format_flow_classify_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ flow_classify_trace_t * t = va_arg (*args, flow_classify_trace_t *);
+
+ s = format (s, "FLOW_CLASSIFY: sw_if_index %d next %d table %d offset %d",
+ t->sw_if_index, t->next_index, t->table_index, t->offset);
+ return s;
+}
+
+#define foreach_flow_classify_error \
+_(MISS, "Flow classify misses") \
+_(HIT, "Flow classify hits") \
+_(CHAIN_HIT, "Flow classify hits after chain walk") \
+_(DROP, "Flow classify action drop")
+
+typedef enum {
+#define _(sym,str) FLOW_CLASSIFY_ERROR_##sym,
+ foreach_flow_classify_error
+#undef _
+ FLOW_CLASSIFY_N_ERROR,
+} flow_classify_error_t;
+
+static char * flow_classify_error_strings[] = {
+#define _(sym,string) string,
+ foreach_flow_classify_error
+#undef _
+};
+
+static inline uword
+flow_classify_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ flow_classify_table_id_t tid)
+{
+ u32 n_left_from, * from, * to_next;
+ flow_classify_next_index_t next_index;
+ flow_classify_main_t * fcm = &flow_classify_main;
+ vnet_classify_main_t * vcm = fcm->vnet_classify_main;
+ f64 now = vlib_time_now (vm);
+ u32 hits = 0;
+ u32 misses = 0;
+ u32 chain_hits = 0;
+ u32 drop = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ /* First pass: compute hashes */
+ while (n_left_from > 2)
+ {
+ vlib_buffer_t * b0, * b1;
+ u32 bi0, bi1;
+ u8 * h0, * h1;
+ u32 sw_if_index0, sw_if_index1;
+ u32 table_index0, table_index1;
+ vnet_classify_table_t * t0, * t1;
+
+ /* Prefetch next iteration */
+ {
+ vlib_buffer_t * p1, * p2;
+
+ p1 = vlib_get_buffer (vm, from[1]);
+ p2 = vlib_get_buffer (vm, from[2]);
+
+ vlib_prefetch_buffer_header (p1, STORE);
+ CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+
+ bi1 = from[1];
+ b1 = vlib_get_buffer (vm, bi1);
+ h1 = b1->data;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 = fcm->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ table_index1 = fcm->classify_table_index_by_sw_if_index[tid][sw_if_index1];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+
+ t1 = pool_elt_at_index (vcm->tables, table_index1);
+
+ vnet_buffer(b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_classify_prefetch_bucket (t0, vnet_buffer(b0)->l2_classify.hash);
+
+ vnet_buffer(b1)->l2_classify.hash =
+ vnet_classify_hash_packet (t1, (u8 *) h1);
+
+ vnet_classify_prefetch_bucket (t1, vnet_buffer(b1)->l2_classify.hash);
+
+ vnet_buffer(b0)->l2_classify.table_index = table_index0;
+
+ vnet_buffer(b1)->l2_classify.table_index = table_index1;
+
+ from += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from > 0)
+ {
+ vlib_buffer_t * b0;
+ u32 bi0;
+ u8 * h0;
+ u32 sw_if_index0;
+ u32 table_index0;
+ vnet_classify_table_t * t0;
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ table_index0 = fcm->classify_table_index_by_sw_if_index[tid][sw_if_index0];
+
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+ vnet_buffer(b0)->l2_classify.hash =
+ vnet_classify_hash_packet (t0, (u8 *) h0);
+
+ vnet_buffer(b0)->l2_classify.table_index = table_index0;
+ vnet_classify_prefetch_bucket (t0, vnet_buffer(b0)->l2_classify.hash);
+
+ from++;
+ n_left_from--;
+ }
+
+ next_index = node->cached_next_index;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Not enough load/store slots to dual loop... */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ u32 next0 = FLOW_CLASSIFY_NEXT_INDEX_DROP;
+ u32 table_index0;
+ vnet_classify_table_t * t0;
+ vnet_classify_entry_t * e0;
+ u64 hash0;
+ u8 * h0;
+
+ /* Stride 3 seems to work best */
+ if (PREDICT_TRUE (n_left_from > 3))
+ {
+ vlib_buffer_t * p1 = vlib_get_buffer(vm, from[3]);
+ vnet_classify_table_t * tp1;
+ u32 table_index1;
+ u64 phash1;
+
+ table_index1 = vnet_buffer(p1)->l2_classify.table_index;
+
+ if (PREDICT_TRUE (table_index1 != ~0))
+ {
+ tp1 = pool_elt_at_index (vcm->tables, table_index1);
+ phash1 = vnet_buffer(p1)->l2_classify.hash;
+ vnet_classify_prefetch_entry (tp1, phash1);
+ }
+ }
+
+ /* Speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ h0 = b0->data;
+ table_index0 = vnet_buffer(b0)->l2_classify.table_index;
+ e0 = 0;
+ t0 = 0;
+
+ vnet_get_config_data (fcm->vnet_config_main[tid],
+ &b0->current_config_index,
+ &next0,
+ /* # bytes of config data */ 0);
+
+ if (PREDICT_TRUE(table_index0 != ~0))
+ {
+ hash0 = vnet_buffer(b0)->l2_classify.hash;
+ t0 = pool_elt_at_index (vcm->tables, table_index0);
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+
+ if (e0)
+ {
+ hits++;
+ }
+ else
+ {
+ while (1)
+ {
+ if (PREDICT_TRUE(t0->next_table_index != ~0))
+ {
+ t0 = pool_elt_at_index (vcm->tables,
+ t0->next_table_index);
+ }
+ else
+ {
+ misses++;
+ break;
+ }
+
+ hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
+ e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
+ if (e0)
+ {
+ hits++;
+ chain_hits++;
+ break;
+ }
+ }
+ }
+ }
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ flow_classify_trace_t * t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->table_index = t0 ? t0 - vcm->tables : ~0;
+ t->offset = e0 ? vnet_classify_get_offset (t0, e0): ~0;
+ }
+
+ /* Verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ FLOW_CLASSIFY_ERROR_MISS,
+ misses);
+ vlib_node_increment_counter (vm, node->node_index,
+ FLOW_CLASSIFY_ERROR_HIT,
+ hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ FLOW_CLASSIFY_ERROR_CHAIN_HIT,
+ chain_hits);
+ vlib_node_increment_counter (vm, node->node_index,
+ FLOW_CLASSIFY_ERROR_DROP,
+ drop);
+
+ return frame->n_vectors;
+}
+
+static uword
+ip4_flow_classify (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return flow_classify_inline(vm, node, frame, FLOW_CLASSIFY_TABLE_IP4);
+}
+
+VLIB_REGISTER_NODE (ip4_flow_classify_node) = {
+ .function = ip4_flow_classify,
+ .name = "ip4-flow-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_flow_classify_trace,
+ .n_errors = ARRAY_LEN(flow_classify_error_strings),
+ .error_strings = flow_classify_error_strings,
+ .n_next_nodes = FLOW_CLASSIFY_NEXT_INDEX_N_NEXT,
+ .next_nodes = {
+ [FLOW_CLASSIFY_NEXT_INDEX_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_flow_classify_node, ip4_flow_classify);
+
+static uword
+ip6_flow_classify (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return flow_classify_inline(vm, node, frame, FLOW_CLASSIFY_TABLE_IP6);
+}
+
+VLIB_REGISTER_NODE (ip6_flow_classify_node) = {
+ .function = ip6_flow_classify,
+ .name = "ip6-flow-classify",
+ .vector_size = sizeof (u32),
+ .format_trace = format_flow_classify_trace,
+ .n_errors = ARRAY_LEN(flow_classify_error_strings),
+ .error_strings = flow_classify_error_strings,
+ .n_next_nodes = FLOW_CLASSIFY_NEXT_INDEX_N_NEXT,
+ .next_nodes = {
+ [FLOW_CLASSIFY_NEXT_INDEX_DROP] = "error-drop",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_flow_classify_node, ip6_flow_classify);
+
+
+static clib_error_t *
+flow_classify_init (vlib_main_t *vm)
+{
+ flow_classify_main_t * fcm = &flow_classify_main;
+
+ fcm->vlib_main = vm;
+ fcm->vnet_main = vnet_get_main();
+ fcm->vnet_classify_main = &vnet_classify_main;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (flow_classify_init);
diff --git a/vnet/vnet/ip/ip4.h b/vnet/vnet/ip/ip4.h
index d3db4de..745ce1e 100644
--- a/vnet/vnet/ip/ip4.h
+++ b/vnet/vnet/ip/ip4.h
@@ -128,6 +128,8 @@
/** Built-in unicast feature path index, see @ref ip_feature_init_cast() */
u32 ip4_unicast_rx_feature_policer_classify;
/** Built-in unicast feature path index, see @ref ip_feature_init_cast() */
+ u32 ip4_unicast_rx_feature_flow_classify;
+ /** Built-in unicast feature path indix, see @ref ip_feature_init_cast() */
u32 ip4_unicast_rx_feature_ipsec;
/** Built-in unicast feature path index, see @ref ip_feature_init_cast() */
u32 ip4_unicast_rx_feature_vpath;
diff --git a/vnet/vnet/ip/ip4_forward.c b/vnet/vnet/ip/ip4_forward.c
index 72c9fb0..e997366 100644
--- a/vnet/vnet/ip/ip4_forward.c
+++ b/vnet/vnet/ip/ip4_forward.c
@@ -797,6 +797,12 @@
}
/* Built-in ip4 unicast rx feature path definition */
+VNET_IP4_UNICAST_FEATURE_INIT (ip4_flow_classify, static) = {
+ .node_name = "ip4-flow-classify",
+ .runs_before = ORDER_CONSTRAINTS {"ip4-inacl", 0},
+ .feature_index = &ip4_main.ip4_unicast_rx_feature_flow_classify,
+};
+
VNET_IP4_UNICAST_FEATURE_INIT (ip4_inacl, static) = {
.node_name = "ip4-inacl",
.runs_before = ORDER_CONSTRAINTS {"ip4-source-check-via-rx", 0},
diff --git a/vnet/vnet/ip/ip6.h b/vnet/vnet/ip/ip6.h
index 58bc7f8..b80317e 100644
--- a/vnet/vnet/ip/ip6.h
+++ b/vnet/vnet/ip/ip6.h
@@ -166,6 +166,7 @@
/* Built-in unicast feature path indices, see ip_feature_init_cast(...) */
u32 ip6_unicast_rx_feature_check_access;
u32 ip6_unicast_rx_feature_policer_classify;
+ u32 ip6_unicast_rx_feature_flow_classify;
u32 ip6_unicast_rx_feature_ipsec;
u32 ip6_unicast_rx_feature_l2tp_decap;
u32 ip6_unicast_rx_feature_vpath;
diff --git a/vnet/vnet/ip/ip6_forward.c b/vnet/vnet/ip/ip6_forward.c
index 4833c11..4db6be5 100644
--- a/vnet/vnet/ip/ip6_forward.c
+++ b/vnet/vnet/ip/ip6_forward.c
@@ -576,6 +576,12 @@
VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ip6_sw_interface_admin_up_down);
/* Built-in ip6 unicast rx feature path definition */
+VNET_IP6_UNICAST_FEATURE_INIT (ip6_flow_classify, static) = {
+ .node_name = "ip6-flow-classify",
+ .runs_before = ORDER_CONSTRAINTS {"ip6-inacl", 0},
+ .feature_index = &ip6_main.ip6_unicast_rx_feature_flow_classify,
+};
+
VNET_IP6_UNICAST_FEATURE_INIT (ip6_inacl, static) = {
.node_name = "ip6-inacl",
.runs_before = ORDER_CONSTRAINTS {"ip6-policer-classify", 0},
diff --git a/vnet/vnet/ip/ip_init.c b/vnet/vnet/ip/ip_init.c
index 02da664..603d1f7 100644
--- a/vnet/vnet/ip/ip_init.c
+++ b/vnet/vnet/ip/ip_init.c
@@ -136,6 +136,9 @@
if ((error = vlib_call_init_function (vm, policer_classify_init)))
return error;
+ if ((error = vlib_call_init_function (vm, flow_classify_init)))
+ return error;
+
return error;
}