blob: e0daab060c77180d2f6479d9738ed506e41a5d33 [file] [log] [blame]
Damjan Marion19414de2016-06-07 12:46:55 +02001From f03d5a02fc2b3cc24bf059a273ea1473cdb9993b Mon Sep 17 00:00:00 2001
2From: John Lo <loj@cisco.com>
3Date: Tue, 7 Jun 2016 12:40:07 +0200
4Subject: [PATCH 16/17] ENIC scatter RX
5
6---
7 drivers/net/enic/base/rq_enet_desc.h | 2 +-
8 drivers/net/enic/base/vnic_rq.c | 12 +-
9 drivers/net/enic/base/vnic_rq.h | 18 ++-
10 drivers/net/enic/enic.h | 10 ++
11 drivers/net/enic/enic_main.c | 236 +++++++++++++++++++++++++++--------
12 drivers/net/enic/enic_rxtx.c | 139 ++++++++++++++-------
13 6 files changed, 313 insertions(+), 104 deletions(-)
14
15diff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h
16index 7292d9d..13e24b4 100644
17--- a/drivers/net/enic/base/rq_enet_desc.h
18+++ b/drivers/net/enic/base/rq_enet_desc.h
19@@ -55,7 +55,7 @@ enum rq_enet_type_types {
John Lo1220afe2016-05-27 22:07:44 -040020 #define RQ_ENET_TYPE_BITS 2
21 #define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
22
23-static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
24+static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
25 u64 address, u8 type, u16 length)
26 {
27 desc->address = cpu_to_le64(address);
Damjan Marion19414de2016-06-07 12:46:55 +020028diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c
29index cb62c5e..d97f93e 100644
30--- a/drivers/net/enic/base/vnic_rq.c
31+++ b/drivers/net/enic/base/vnic_rq.c
32@@ -84,11 +84,16 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
John Lo1220afe2016-05-27 22:07:44 -040033 iowrite32(cq_index, &rq->ctrl->cq_index);
34 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
35 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
36- iowrite32(0, &rq->ctrl->dropped_packet_count);
37 iowrite32(0, &rq->ctrl->error_status);
38 iowrite32(fetch_index, &rq->ctrl->fetch_index);
39 iowrite32(posted_index, &rq->ctrl->posted_index);
40-
41+ if (rq->is_sop) {
42+// printf("Writing 0x%x to %s rq\n",
43+// ((rq->is_sop << 10) | rq->data_queue_idx),
44+// rq->is_sop ? "sop":"data");
45+ iowrite32(((rq->is_sop << 10) | rq->data_queue_idx),
46+ &rq->ctrl->data_ring);
47+ }
48 }
49
50 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
Damjan Marion19414de2016-06-07 12:46:55 +020051@@ -96,6 +101,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
John Lo1220afe2016-05-27 22:07:44 -040052 unsigned int error_interrupt_offset)
53 {
54 u32 fetch_index = 0;
55+
56 /* Use current fetch_index as the ring starting point */
57 fetch_index = ioread32(&rq->ctrl->fetch_index);
58
Damjan Marion19414de2016-06-07 12:46:55 +020059@@ -110,6 +116,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
John Lo1220afe2016-05-27 22:07:44 -040060 error_interrupt_offset);
61 rq->rxst_idx = 0;
62 rq->tot_pkts = 0;
63+ rq->pkt_first_seg = NULL;
64+ rq->pkt_last_seg = NULL;
65 }
66
67 void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
Damjan Marion19414de2016-06-07 12:46:55 +020068diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
69index 424415c..d1e2f52 100644
70--- a/drivers/net/enic/base/vnic_rq.h
71+++ b/drivers/net/enic/base/vnic_rq.h
72@@ -60,10 +60,18 @@ struct vnic_rq_ctrl {
John Lo1220afe2016-05-27 22:07:44 -040073 u32 pad7;
74 u32 error_status; /* 0x48 */
75 u32 pad8;
76- u32 dropped_packet_count; /* 0x50 */
77+ u32 tcp_sn; /* 0x50 */
78 u32 pad9;
79- u32 dropped_packet_count_rc; /* 0x58 */
80+ u32 unused; /* 0x58 */
81 u32 pad10;
82+ u32 dca_select; /* 0x60 */
83+ u32 pad11;
84+ u32 dca_value; /* 0x68 */
85+ u32 pad12;
86+ u32 data_ring; /* 0x70 */
87+ u32 pad13;
88+ u32 header_split; /* 0x78 */
89+ u32 pad14;
90 };
91
92 struct vnic_rq {
Damjan Marion19414de2016-06-07 12:46:55 +020093@@ -82,6 +90,12 @@ struct vnic_rq {
John Lo1220afe2016-05-27 22:07:44 -040094 struct rte_mempool *mp;
95 uint16_t rxst_idx;
96 uint32_t tot_pkts;
97+ uint16_t data_queue_idx;
98+ uint8_t is_sop;
99+ uint8_t in_use;
100+ struct rte_mbuf *pkt_first_seg;
101+ struct rte_mbuf *pkt_last_seg;
102+ unsigned int max_mbufs_per_pkt;
103 };
104
105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
Damjan Marion19414de2016-06-07 12:46:55 +0200106diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
107index 7c1b5c9..d2de6ee 100644
108--- a/drivers/net/enic/enic.h
109+++ b/drivers/net/enic/enic.h
110@@ -142,6 +142,16 @@ struct enic {
John Lo1220afe2016-05-27 22:07:44 -0400111 struct enic_soft_stats soft_stats;
112 };
113
114+static inline unsigned int enic_sop_rq(__rte_unused struct enic *enic, unsigned int rq)
115+{
116+ return rq * 2;
117+}
118+
119+static inline unsigned int enic_data_rq(__rte_unused struct enic *enic, unsigned int rq)
120+{
121+ return rq * 2 + 1;
122+}
123+
124 static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
125 {
126 return rq;
Damjan Marion19414de2016-06-07 12:46:55 +0200127diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
128index a00565a..be17707 100644
129--- a/drivers/net/enic/enic_main.c
130+++ b/drivers/net/enic/enic_main.c
131@@ -247,15 +247,23 @@ void enic_init_vnic_resources(struct enic *enic)
John Lo1220afe2016-05-27 22:07:44 -0400132 unsigned int error_interrupt_offset = 0;
133 unsigned int index = 0;
134 unsigned int cq_idx;
135+ struct vnic_rq *data_rq;
136
137 vnic_dev_stats_clear(enic->vdev);
138
139 for (index = 0; index < enic->rq_count; index++) {
140- vnic_rq_init(&enic->rq[index],
141+ vnic_rq_init(&enic->rq[enic_sop_rq(enic, index)],
142 enic_cq_rq(enic, index),
143 error_interrupt_enable,
144 error_interrupt_offset);
145
146+ data_rq = &enic->rq[enic_data_rq(enic, index)];
147+ if (data_rq->in_use)
148+ vnic_rq_init(data_rq,
149+ enic_cq_rq(enic, index),
150+ error_interrupt_enable,
151+ error_interrupt_offset);
152+
153 cq_idx = enic_cq_rq(enic, index);
154 vnic_cq_init(&enic->cq[cq_idx],
155 0 /* flow_control_enable */,
Damjan Marion19414de2016-06-07 12:46:55 +0200156@@ -305,6 +313,9 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
John Lo1220afe2016-05-27 22:07:44 -0400157 unsigned i;
158 dma_addr_t dma_addr;
159
160+ if (!rq->in_use)
161+ return 0;
162+
163 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
164 rq->ring.desc_count);
165
Damjan Marion19414de2016-06-07 12:46:55 +0200166@@ -316,20 +327,20 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
John Lo1220afe2016-05-27 22:07:44 -0400167 return -ENOMEM;
168 }
169
170- dma_addr = (dma_addr_t)(mb->buf_physaddr
171- + RTE_PKTMBUF_HEADROOM);
172-
173- rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
174- mb->buf_len - RTE_PKTMBUF_HEADROOM);
175+ dma_addr = (dma_addr_t)(mb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
176+ rq_enet_desc_enc(rqd, dma_addr,
177+ (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
178+ : RQ_ENET_TYPE_NOT_SOP),
179+ mb->buf_len - RTE_PKTMBUF_HEADROOM);
180 rq->mbuf_ring[i] = mb;
181 }
182
183 /* make sure all prior writes are complete before doing the PIO write */
184 rte_rmb();
185
186- /* Post all but the last 2 cache lines' worth of descriptors */
187- rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
188- / sizeof(struct rq_enet_desc));
189+ /* Post all but the last buffer to VIC. */
190+ rq->posted_index = rq->ring.desc_count - 1;
191+
192 rq->rx_nb_hold = 0;
193
194 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
Damjan Marion19414de2016-06-07 12:46:55 +0200195@@ -337,6 +348,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
John Lo1220afe2016-05-27 22:07:44 -0400196 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
197 rte_rmb();
198
199+// printf("posted %d buffers to %s rq\n", rq->ring.desc_count,
200+// rq->is_sop ? "sop" : "data");
201 return 0;
202
203 }
Damjan Marion19414de2016-06-07 12:46:55 +0200204@@ -398,17 +411,25 @@ int enic_enable(struct enic *enic)
John Lo1220afe2016-05-27 22:07:44 -0400205 "Flow director feature will not work\n");
206
207 for (index = 0; index < enic->rq_count; index++) {
208- err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
209+ err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_sop_rq(enic, index)]);
210 if (err) {
211- dev_err(enic, "Failed to alloc RX queue mbufs\n");
212+ dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
213+ return err;
214+ }
215+ err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_data_rq(enic, index)]);
216+ if (err) {
217+ /* release the previously allocated mbufs for the sop rq */
218+ enic_rxmbuf_queue_release(enic, &enic->rq[enic_sop_rq(enic, index)]);
219+
220+ dev_err(enic, "Failed to alloc data RX queue mbufs\n");
221 return err;
222 }
223 }
224
225 for (index = 0; index < enic->wq_count; index++)
226- vnic_wq_enable(&enic->wq[index]);
227+ enic_start_wq(enic, index);
228 for (index = 0; index < enic->rq_count; index++)
229- vnic_rq_enable(&enic->rq[index]);
230+ enic_start_rq(enic, index);
231
232 vnic_dev_enable_wait(enic->vdev);
233
Damjan Marion19414de2016-06-07 12:46:55 +0200234@@ -440,14 +461,26 @@ int enic_alloc_intr_resources(struct enic *enic)
John Lo1220afe2016-05-27 22:07:44 -0400235
236 void enic_free_rq(void *rxq)
237 {
238- struct vnic_rq *rq = (struct vnic_rq *)rxq;
239- struct enic *enic = vnic_dev_priv(rq->vdev);
240+ struct vnic_rq *rq_sop = (struct vnic_rq *)rxq;
241+ struct enic *enic = vnic_dev_priv(rq_sop->vdev);
242+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
Damjan Marion19414de2016-06-07 12:46:55 +0200243
244- enic_rxmbuf_queue_release(enic, rq);
245- rte_free(rq->mbuf_ring);
246- rq->mbuf_ring = NULL;
247- vnic_rq_free(rq);
248- vnic_cq_free(&enic->cq[rq->index]);
John Lo1220afe2016-05-27 22:07:44 -0400249+ enic_rxmbuf_queue_release(enic, rq_sop);
250+ if (rq_data->in_use)
251+ enic_rxmbuf_queue_release(enic, rq_data);
252+
253+ rte_free(rq_sop->mbuf_ring);
254+ if (rq_data->in_use)
255+ rte_free(rq_data->mbuf_ring);
256+
257+ rq_sop->mbuf_ring = NULL;
258+ rq_data->mbuf_ring = NULL;
259+
260+ vnic_rq_free(rq_sop);
261+ if (rq_data->in_use)
262+ vnic_rq_free(rq_data);
Damjan Marion19414de2016-06-07 12:46:55 +0200263+
John Lo1220afe2016-05-27 22:07:44 -0400264+ vnic_cq_free(&enic->cq[rq_sop->index]);
265 }
266
267 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
Damjan Marion19414de2016-06-07 12:46:55 +0200268@@ -462,12 +495,32 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
John Lo1220afe2016-05-27 22:07:44 -0400269
270 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
271 {
272- vnic_rq_enable(&enic->rq[queue_idx]);
273+ struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)];
274+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
275+
276+ if (rq_data->in_use)
277+ vnic_rq_enable(rq_data);
278+ rte_mb();
279+ vnic_rq_enable(rq_sop);
280+
281 }
282
283 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
284 {
285- return vnic_rq_disable(&enic->rq[queue_idx]);
286+ int ret1 = 0, ret2 = 0;
287+
288+ struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)];
289+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
290+
291+ ret2 = vnic_rq_disable(rq_sop);
292+ rte_mb();
293+ if (rq_data->in_use)
294+ ret1 = vnic_rq_disable(rq_data);
295+
296+ if (ret2)
297+ return ret2;
298+ else
299+ return ret1;
300 }
301
302 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
Damjan Marion19414de2016-06-07 12:46:55 +0200303@@ -475,53 +528,128 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
John Lo1220afe2016-05-27 22:07:44 -0400304 uint16_t nb_desc)
305 {
306 int rc;
307- struct vnic_rq *rq = &enic->rq[queue_idx];
308-
309- rq->socket_id = socket_id;
310- rq->mp = mp;
311+ uint16_t sop_queue_idx = enic_sop_rq(enic, queue_idx);
312+ uint16_t data_queue_idx = enic_data_rq(enic, queue_idx);
313+ struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
314+ struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
315+ unsigned int mbuf_size, mbufs_per_pkt;
316+ unsigned int nb_sop_desc, nb_data_desc;
317+ uint16_t min_sop, max_sop, min_data, max_data;
318+
319+ rq_sop->is_sop = 1;
320+ rq_sop->data_queue_idx = data_queue_idx;
321+ rq_data->is_sop = 0;
322+ rq_data->data_queue_idx = 0;
323+ rq_sop->socket_id = socket_id;
324+ rq_sop->mp = mp;
325+ rq_data->socket_id = socket_id;
326+ rq_data->mp = mp;
327+ rq_sop->in_use = 1;
328+
329+ mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM);
330+
331+ /* ceil(mtu/mbuf_size) */
332+ mbufs_per_pkt = (enic->config.mtu + (mbuf_size - 1)) / mbuf_size;
333+
334+ if (mbufs_per_pkt > 1)
335+ rq_data->in_use = 1;
336+ else
337+ rq_data->in_use = 0;
338+
339+ /* number of descriptors have to be a multiple of 32 */
340+ nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
341+ nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
342+
343+ rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
344+ rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
345+
346+ //printf("mtu = %u, mbuf_size = %u, mbuf_per_pkt = %u\n",
347+ // enic->config.mtu, mbuf_size, mbufs_per_pkt);
348+
349+ if (mbufs_per_pkt > 1) {
350+ min_sop = 64;
351+ max_sop = ((enic->config.rq_desc_count / (mbufs_per_pkt - 1)) & ~0x1F);
352+ min_data = min_sop * (mbufs_per_pkt - 1);
353+ max_data = enic->config.rq_desc_count;
354+ } else {
355+ min_sop = 64;
356+ max_sop = enic->config.rq_desc_count;
357+ min_data = 0;
358+ max_data = 0;
359+ }
360
361- if (nb_desc) {
362- if (nb_desc > enic->config.rq_desc_count) {
363- dev_warning(enic,
364- "RQ %d - number of rx desc in cmd line (%d)"\
365- "is greater than that in the UCSM/CIMC adapter"\
366- "policy. Applying the value in the adapter "\
367- "policy (%d).\n",
368- queue_idx, nb_desc, enic->config.rq_desc_count);
369- nb_desc = enic->config.rq_desc_count;
370- }
371- dev_info(enic, "RX Queues - effective number of descs:%d\n",
372- nb_desc);
373+ if (nb_desc < (min_sop + min_data)) {
374+ dev_warning(enic,
375+ "Number of rx descs too low, adjusting to minimum\n");
376+ nb_sop_desc = min_sop;
377+ nb_data_desc = min_data;
378+ } else if (nb_desc > (max_sop + max_data)){
379+ dev_warning(enic,
380+ "Number of rx_descs too high, adjusting to maximum\n");
381+ nb_sop_desc = max_sop;
382+ nb_data_desc = max_data;
383 }
384+ dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
385+ enic->config.mtu, mbuf_size, min_sop + min_data, max_sop + max_data);
John Lo1220afe2016-05-27 22:07:44 -0400386
387- /* Allocate queue resources */
388- rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
389- nb_desc, sizeof(struct rq_enet_desc));
Damjan Marion19414de2016-06-07 12:46:55 +0200390+ dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
391+ nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
392+
John Lo1220afe2016-05-27 22:07:44 -0400393+ /* Allocate sop queue resources */
394+ rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
395+ nb_sop_desc, sizeof(struct rq_enet_desc));
396 if (rc) {
397- dev_err(enic, "error in allocation of rq\n");
398+ dev_err(enic, "error in allocation of sop rq\n");
399 goto err_exit;
400 }
Damjan Marion19414de2016-06-07 12:46:55 +0200401-
John Lo1220afe2016-05-27 22:07:44 -0400402+ nb_sop_desc = rq_sop->ring.desc_count;
Damjan Marion19414de2016-06-07 12:46:55 +0200403+
John Lo1220afe2016-05-27 22:07:44 -0400404+ if (rq_data->in_use) {
405+ /* Allocate data queue resources */
406+ rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
407+ nb_data_desc,
408+ sizeof(struct rq_enet_desc));
409+ if (rc) {
410+ dev_err(enic, "error in allocation of data rq\n");
411+ goto err_free_rq_sop;
412+ }
413+ nb_data_desc = rq_data->ring.desc_count;
414+ }
415 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
416- socket_id, nb_desc,
417- sizeof(struct cq_enet_rq_desc));
418+ socket_id, nb_sop_desc + nb_data_desc,
419+ sizeof(struct cq_enet_rq_desc));
420 if (rc) {
421 dev_err(enic, "error in allocation of cq for rq\n");
422- goto err_free_rq_exit;
423+ goto err_free_rq_data;
424 }
425
426- /* Allocate the mbuf ring */
427- rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
428- sizeof(struct rte_mbuf *) * nb_desc,
429- RTE_CACHE_LINE_SIZE, rq->socket_id);
430+ /* Allocate the mbuf rings */
431+ rq_sop->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
432+ sizeof(struct rte_mbuf *) * nb_sop_desc,
433+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
434+ if (rq_sop->mbuf_ring == NULL)
435+ goto err_free_cq;
436+
437+ if (rq_data->in_use) {
438+ rq_data->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
439+ sizeof(struct rte_mbuf *) * nb_data_desc,
440+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
441+ if (rq_data->mbuf_ring == NULL)
442+ goto err_free_sop_mbuf;
443+ }
444
445- if (rq->mbuf_ring != NULL)
446- return 0;
447+ return 0;
448
449+err_free_sop_mbuf:
450+ rte_free(rq_sop->mbuf_ring);
451+err_free_cq:
452 /* cleanup on error */
453 vnic_cq_free(&enic->cq[queue_idx]);
454-err_free_rq_exit:
455- vnic_rq_free(rq);
456+err_free_rq_data:
457+ if (rq_data->in_use)
458+ vnic_rq_free(rq_data);
459+err_free_rq_sop:
460+ vnic_rq_free(rq_sop);
461 err_exit:
462 return -ENOMEM;
463 }
Damjan Marion19414de2016-06-07 12:46:55 +0200464diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
465index 174486b..463b954 100644
466--- a/drivers/net/enic/enic_rxtx.c
467+++ b/drivers/net/enic/enic_rxtx.c
468@@ -242,22 +242,27 @@ uint16_t
John Lo1220afe2016-05-27 22:07:44 -0400469 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
470 uint16_t nb_pkts)
471 {
472- struct vnic_rq *rq = rx_queue;
473- struct enic *enic = vnic_dev_priv(rq->vdev);
474- unsigned int rx_id;
475+ struct vnic_rq *sop_rq = rx_queue;
476+ struct vnic_rq *data_rq;
477+ struct vnic_rq *rq;
478+ struct enic *enic = vnic_dev_priv(sop_rq->vdev);
479+ uint16_t cq_idx;
480+ uint16_t rq_idx;
481+ uint16_t rq_num;
482 struct rte_mbuf *nmb, *rxmb;
483 uint16_t nb_rx = 0;
484- uint16_t nb_hold;
485 struct vnic_cq *cq;
486 volatile struct cq_desc *cqd_ptr;
487 uint8_t color;
488- uint16_t nb_err = 0;
489+ uint16_t seg_length;
490+ struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
491+ struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
John Lo1220afe2016-05-27 22:07:44 -0400492
493- cq = &enic->cq[enic_cq_rq(enic, rq->index)];
494- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
495- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
Damjan Marion19414de2016-06-07 12:46:55 +0200496+ cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
497+ cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
498+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
499
John Lo1220afe2016-05-27 22:07:44 -0400500- nb_hold = rq->rx_nb_hold; /* mbufs held by software */
501+ data_rq = &enic->rq[sop_rq->data_queue_idx];
502
503 while (nb_rx < nb_pkts) {
504 volatile struct rq_enet_desc *rqd_ptr;
Damjan Marion19414de2016-06-07 12:46:55 +0200505@@ -265,6 +270,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
John Lo1220afe2016-05-27 22:07:44 -0400506 struct cq_desc cqd;
507 uint64_t ol_err_flags;
508 uint8_t packet_error;
509+ uint16_t ciflags;
510
511 /* Check for pkts available */
512 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
Damjan Marion19414de2016-06-07 12:46:55 +0200513@@ -272,9 +278,13 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
John Lo1220afe2016-05-27 22:07:44 -0400514 if (color == cq->last_color)
515 break;
516
517- /* Get the cq descriptor and rq pointer */
518+ /* Get the cq descriptor and extract rq info from it */
519 cqd = *cqd_ptr;
520- rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
521+ rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
522+ rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
523+
524+ rq = &enic->rq[rq_num];
525+ rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
526
527 /* allocate a new mbuf */
528 nmb = rte_rxmbuf_alloc(rq->mp);
Damjan Marion19414de2016-06-07 12:46:55 +0200529@@ -287,67 +297,106 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
John Lo1220afe2016-05-27 22:07:44 -0400530 packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
531
532 /* Get the mbuf to return and replace with one just allocated */
533- rxmb = rq->mbuf_ring[rx_id];
534- rq->mbuf_ring[rx_id] = nmb;
535+ rxmb = rq->mbuf_ring[rq_idx];
536+ rq->mbuf_ring[rq_idx] = nmb;
537
538 /* Increment cqd, rqd, mbuf_table index */
539- rx_id++;
540- if (unlikely(rx_id == rq->ring.desc_count)) {
541- rx_id = 0;
542+ cq_idx++;
543+ if (unlikely(cq_idx == cq->ring.desc_count)) {
544+ cq_idx = 0;
545 cq->last_color = cq->last_color ? 0 : 1;
546 }
547
548 /* Prefetch next mbuf & desc while processing current one */
549- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
550+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
551 rte_enic_prefetch(cqd_ptr);
552- rte_enic_prefetch(rq->mbuf_ring[rx_id]);
553- rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
554- + rx_id);
555+// rte_enic_prefetch(rq->mbuf_ring[rx_id]);
556+// rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
557+// + rx_id);
558+
559+ ciflags = enic_cq_rx_desc_ciflags((struct cq_enet_rq_desc *) &cqd);
560
561 /* Push descriptor for newly allocated mbuf */
562- dma_addr = (dma_addr_t)(nmb->buf_physaddr
563- + RTE_PKTMBUF_HEADROOM);
564- rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
565- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
566- - RTE_PKTMBUF_HEADROOM);
567+
568+ dma_addr = (dma_addr_t)(nmb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
569+ rq_enet_desc_enc(rqd_ptr, dma_addr,
570+ (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
571+ : RQ_ENET_TYPE_NOT_SOP),
572+ nmb->buf_len - RTE_PKTMBUF_HEADROOM);
573
574 /* Fill in the rest of the mbuf */
575- rxmb->data_off = RTE_PKTMBUF_HEADROOM;
576- rxmb->nb_segs = 1;
577+ seg_length = enic_cq_rx_desc_n_bytes(&cqd);
578+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
579+ enic_cq_rx_to_pkt_flags(&cqd, rxmb);
580+ if (rq->is_sop) {
581+ first_seg = rxmb;
582+ first_seg->nb_segs = 1;
583+ first_seg->pkt_len = seg_length;
584+ } else {
585+ first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
586+ + seg_length);
587+ first_seg->nb_segs++;
588+ last_seg->next = rxmb;
589+ }
590+
591 rxmb->next = NULL;
592 rxmb->port = enic->port_id;
593- if (!packet_error) {
594- rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
595- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
596- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
597- } else {
598- rte_pktmbuf_free(rxmb);
599+ rxmb->data_len = seg_length;
600+
601+ rq->rx_nb_hold++;
602+
603+ if (!(enic_cq_rx_desc_eop(ciflags))) {
604+ last_seg = rxmb;
605+ continue;
606+ }
607+
608+ if (unlikely(packet_error)) {
609+ rte_pktmbuf_free(first_seg);
610 rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
611- nb_err++;
612+
613 continue;
614 }
615- rxmb->data_len = rxmb->pkt_len;
616+
617+
618+// printf("EOP: final packet length is %d\n", first_seg->pkt_len);
619+// rte_pktmbuf_dump(stdout, first_seg, 64);
620
621 /* prefetch mbuf data for caller */
622- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
623+ rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
624 RTE_PKTMBUF_HEADROOM));
625
626 /* store the mbuf address into the next entry of the array */
627- rx_pkts[nb_rx++] = rxmb;
628+ rx_pkts[nb_rx++] = first_seg;
629 }
630
631- nb_hold += nb_rx + nb_err;
632- cq->to_clean = rx_id;
633+ sop_rq->pkt_first_seg = first_seg;
634+ sop_rq->pkt_last_seg = last_seg;
635+
636+ cq->to_clean = cq_idx;
637+
638+ if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) > sop_rq->rx_free_thresh) {
639+ if (data_rq->in_use) {
640+ data_rq->posted_index = enic_ring_add(data_rq->ring.desc_count,
641+ data_rq->posted_index,
642+ data_rq->rx_nb_hold);
643+ //printf("Processed %d data descs. Posted index now %d\n",
644+ // data_rq->rx_nb_hold, data_rq->posted_index);
645+ data_rq->rx_nb_hold = 0;
646+ }
647+ sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
648+ sop_rq->posted_index,
649+ sop_rq->rx_nb_hold);
650+ //printf("Processed %d sop descs. Posted index now %d\n",
651+ // sop_rq->rx_nb_hold, sop_rq->posted_index);
652+ sop_rq->rx_nb_hold = 0;
653
654- if (nb_hold > rq->rx_free_thresh) {
655- rq->posted_index = enic_ring_add(rq->ring.desc_count,
656- rq->posted_index, nb_hold);
657- nb_hold = 0;
658 rte_mb();
659- iowrite32(rq->posted_index, &rq->ctrl->posted_index);
660+ if (data_rq->in_use)
661+ iowrite32(data_rq->posted_index, &data_rq->ctrl->posted_index);
662+ rte_compiler_barrier();
663+ iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
664 }
665
666- rq->rx_nb_hold = nb_hold;
667
668 return nb_rx;
669 }
Damjan Marion19414de2016-06-07 12:46:55 +0200670--
6712.7.4
672