dpdk: dpdk i40e patch cherry-pick

This patch fixes flex payload flow creation failure issue

Type: fix

Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
Change-Id: I3ac0a168dadd033a668d7bd6f5e78798aedb61a5
diff --git a/build/external/patches/dpdk_20.11/0001-net-i40e-fix-flex-payload-rule-conflict.patch b/build/external/patches/dpdk_20.11/0001-net-i40e-fix-flex-payload-rule-conflict.patch
new file mode 100644
index 0000000..1a92b88
--- /dev/null
+++ b/build/external/patches/dpdk_20.11/0001-net-i40e-fix-flex-payload-rule-conflict.patch
@@ -0,0 +1,134 @@
+From a304784408adfaab3918d8578264c48004d4e81e Mon Sep 17 00:00:00 2001
+From: Beilei Xing <beilei.xing@intel.com>
+Date: Tue, 5 Jan 2021 11:12:56 +0800
+Subject: [FDIO] net/i40e: fix flex payload rule conflict
+
+With the following commands, the second flow can't
+be created successfully.
+
+1. flow create 0 ingress pattern eth / ipv4 / udp /
+   raw relative is 1 pattern is 0102030405 / end
+   actions drop / end
+2. flow destroy 0 rule 0
+3. flow create 0 ingress pattern eth / ipv4 / udp /
+   raw relative is 1 pattern is 010203040506 / end
+   actions drop / end
+
+The root cause is that a flag for flex pit isn't reset.
+
+Fixes: 6ced3dd72f5f ("net/i40e: support flexible payload parsing for FDIR")
+Cc: stable@dpdk.org
+
+Reported-by: Chenmin Sun <chenmin.sun@intel.com>
+Signed-off-by: Beilei Xing <beilei.xing@intel.com>
+Acked-by: Jeff Guo <jia.guo@intel.com>
+---
+ drivers/net/i40e/i40e_ethdev.h |  3 +++
+ drivers/net/i40e/i40e_fdir.c   | 19 ++++++++++++++++---
+ drivers/net/i40e/i40e_flow.c   |  4 ++++
+ 3 files changed, 23 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
+index 696c5aaf7..aac226999 100644
+--- a/drivers/net/i40e/i40e_ethdev.h
++++ b/drivers/net/i40e/i40e_ethdev.h
+@@ -636,6 +636,7 @@ struct i40e_fdir_flow_ext {
+ 	bool is_udp; /* ipv4|ipv6 udp flow */
+ 	enum i40e_flxpld_layer_idx layer_idx;
+ 	struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
++	bool is_flex_flow;
+ };
+
+ /* A structure used to define the input for a flow director filter entry */
+@@ -784,6 +785,8 @@ struct i40e_fdir_info {
+ 	bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX];
+
+ 	bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set */
++
++	uint32_t flex_flow_count[I40E_MAX_FLXPLD_LAYER];
+ };
+
+ /* Ethertype filter number HW supports */
+diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
+index 50c0eee9f..0343e8b09 100644
+--- a/drivers/net/i40e/i40e_fdir.c
++++ b/drivers/net/i40e/i40e_fdir.c
+@@ -355,6 +355,7 @@ i40e_init_flx_pld(struct i40e_pf *pf)
+ 			I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
+ 		I40E_WRITE_REG(hw,
+ 			I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
++		pf->fdir.flex_pit_flag[i] = 0;
+ 	}
+
+ 	/* initialize the masks */
+@@ -1513,8 +1514,6 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
+ 		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ 		min_next_off++;
+ 	}
+-
+-	pf->fdir.flex_pit_flag[layer_idx] = 1;
+ }
+
+ static int
+@@ -1686,7 +1685,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+ 	i40e_fdir_filter_convert(filter, &check_filter);
+
+ 	if (add) {
+-		if (!filter->input.flow_ext.customized_pctype) {
++		if (filter->input.flow_ext.is_flex_flow) {
+ 			for (i = 0; i < filter->input.flow_ext.raw_id; i++) {
+ 				layer_idx = filter->input.flow_ext.layer_idx;
+ 				field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+@@ -1738,6 +1737,9 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+ 				fdir_info->fdir_guarantee_free_space > 0)
+ 			wait_status = false;
+ 	} else {
++		if (filter->input.flow_ext.is_flex_flow)
++			layer_idx = filter->input.flow_ext.layer_idx;
++
+ 		node = i40e_sw_fdir_filter_lookup(fdir_info,
+ 				&check_filter.fdir.input);
+ 		if (!node) {
+@@ -1785,6 +1787,17 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+ 		goto error_op;
+ 	}
+
++	if (filter->input.flow_ext.is_flex_flow) {
++		if (add) {
++			fdir_info->flex_flow_count[layer_idx]++;
++			pf->fdir.flex_pit_flag[layer_idx] = 1;
++		} else {
++			fdir_info->flex_flow_count[layer_idx]--;
++			if (!fdir_info->flex_flow_count[layer_idx])
++				pf->fdir.flex_pit_flag[layer_idx] = 0;
++		}
++	}
++
+ 	if (add) {
+ 		fdir_info->fdir_actual_cnt++;
+ 		if (fdir_info->fdir_invalprio == 1 &&
+diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
+index b09ff6590..bbd666b7a 100644
+--- a/drivers/net/i40e/i40e_flow.c
++++ b/drivers/net/i40e/i40e_flow.c
+@@ -3069,6 +3069,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ 			       &flex_pit, sizeof(struct i40e_fdir_flex_pit));
+ 			filter->input.flow_ext.layer_idx = layer_idx;
+ 			filter->input.flow_ext.raw_id = raw_id;
++			filter->input.flow_ext.is_flex_flow = true;
+ 			break;
+ 		case RTE_FLOW_ITEM_TYPE_VF:
+ 			vf_spec = item->spec;
+@@ -5515,6 +5516,9 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
+ 			pf->fdir.flex_mask_flag[pctype] = 0;
+ 		}
+
++		for (i = 0; i < I40E_MAX_FLXPLD_LAYER; i++)
++			pf->fdir.flex_pit_flag[i] = 0;
++
+ 		/* Disable FDIR processing as all FDIR rules are now flushed */
+ 		i40e_fdir_rx_proc_enable(dev, 0);
+ 	}
+--
+2.17.1
+