blob: 7acead45615aeec7c404d053d9fe0507404b7bbc [file] [log] [blame]
Damjan Marion19414de2016-06-07 12:46:55 +02001From 4e1872a43b3ad824e37f840c9ed1e0c1f1b24a32 Mon Sep 17 00:00:00 2001
John Loacc22d72016-04-27 16:43:44 -04002From: John Daley <johndale@cisco.com>
3Date: Tue, 5 Apr 2016 15:19:06 -0700
Damjan Marion19414de2016-06-07 12:46:55 +02004Subject: [PATCH 12/17] enic: Optimization of Tx path to reduce Host CPU
John Loacc22d72016-04-27 16:43:44 -04005 overhead, cleanup
6
7Optimizations and cleanup:
8- flatten packet send path
9- flatten mbuf free path
10- disable CQ entry writing and use CQ messages instead
11- use rte_mempool_put_bulk() to bulk return freed mbufs
12- remove unnecessary fields vnic_bufs struct, use contiguous array of cache
13 aligned divisible elements. No next pointers.
14- use local variables inside per packet loop instead of fields in structs.
15- factor book keeping out of the per packet tx loop where possible
16 (removed several conditionals)
17- put Tx and Rx code in 1 file (enic_rxtx.c)
18
19Reviewed-by: Nelson Escobar <neescoba@cisco.com>
20Signed-off-by: John Daley <johndale@cisco.com>
21---
22 drivers/net/enic/Makefile | 2 +-
23 drivers/net/enic/base/enic_vnic_wq.h | 79 ------
24 drivers/net/enic/base/vnic_cq.h | 37 +--
25 drivers/net/enic/base/vnic_rq.h | 2 +-
26 drivers/net/enic/base/vnic_wq.c | 89 +++---
27 drivers/net/enic/base/vnic_wq.h | 113 +-------
28 drivers/net/enic/enic.h | 27 +-
29 drivers/net/enic/enic_ethdev.c | 67 +----
30 drivers/net/enic/enic_main.c | 132 +++------
31 drivers/net/enic/enic_res.h | 81 +-----
32 drivers/net/enic/enic_rx.c | 361 -------------------------
33 drivers/net/enic/enic_rxtx.c | 505 +++++++++++++++++++++++++++++++++++
34 12 files changed, 635 insertions(+), 860 deletions(-)
35 delete mode 100644 drivers/net/enic/base/enic_vnic_wq.h
36 delete mode 100644 drivers/net/enic/enic_rx.c
37 create mode 100644 drivers/net/enic/enic_rxtx.c
38
39diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
40index f316274..3926b79 100644
41--- a/drivers/net/enic/Makefile
42+++ b/drivers/net/enic/Makefile
43@@ -53,7 +53,7 @@ VPATH += $(SRCDIR)/src
44 #
45 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
46 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
47-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c
48+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c
49 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
50 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
51 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
52diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h
53deleted file mode 100644
54index b019109..0000000
55--- a/drivers/net/enic/base/enic_vnic_wq.h
56+++ /dev/null
57@@ -1,79 +0,0 @@
58-/*
59- * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved.
60- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
61- *
62- * Copyright (c) 2015, Cisco Systems, Inc.
63- * All rights reserved.
64- *
65- * Redistribution and use in source and binary forms, with or without
66- * modification, are permitted provided that the following conditions
67- * are met:
68- *
69- * 1. Redistributions of source code must retain the above copyright
70- * notice, this list of conditions and the following disclaimer.
71- *
72- * 2. Redistributions in binary form must reproduce the above copyright
73- * notice, this list of conditions and the following disclaimer in
74- * the documentation and/or other materials provided with the
75- * distribution.
76- *
77- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
80- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
81- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
82- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
83- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
84- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
85- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88- * POSSIBILITY OF SUCH DAMAGE.
89- *
90- */
91-
92-#ifndef _ENIC_VNIC_WQ_H_
93-#define _ENIC_VNIC_WQ_H_
94-
95-#include "vnic_dev.h"
96-#include "vnic_cq.h"
97-
98-static inline void enic_vnic_post_wq_index(struct vnic_wq *wq)
99-{
100- struct vnic_wq_buf *buf = wq->to_use;
101-
102- /* Adding write memory barrier prevents compiler and/or CPU
103- * reordering, thus avoiding descriptor posting before
104- * descriptor is initialized. Otherwise, hardware can read
105- * stale descriptor fields.
106- */
107- wmb();
108- iowrite32(buf->index, &wq->ctrl->posted_index);
109-}
110-
111-static inline void enic_vnic_post_wq(struct vnic_wq *wq,
112- void *os_buf, dma_addr_t dma_addr,
113- unsigned int len, int sop,
114- uint8_t desc_skip_cnt, uint8_t cq_entry,
115- uint8_t compressed_send, uint64_t wrid)
116-{
117- struct vnic_wq_buf *buf = wq->to_use;
118-
119- buf->sop = sop;
120- buf->cq_entry = cq_entry;
121- buf->compressed_send = compressed_send;
122- buf->desc_skip_cnt = desc_skip_cnt;
123- buf->os_buf = os_buf;
124- buf->dma_addr = dma_addr;
125- buf->len = len;
126- buf->wr_id = wrid;
127-
128- buf = buf->next;
129- wq->ring.desc_avail -= desc_skip_cnt;
130- wq->to_use = buf;
131-
132- if (cq_entry)
133- enic_vnic_post_wq_index(wq);
134-}
135-
136-#endif /* _ENIC_VNIC_WQ_H_ */
137diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h
138index 922391b..ffc1aaa 100644
139--- a/drivers/net/enic/base/vnic_cq.h
140+++ b/drivers/net/enic/base/vnic_cq.h
141@@ -96,41 +96,46 @@ static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
142 u8 type, u16 q_number, u16 completed_index, void *opaque),
143 void *opaque)
144 {
145- struct cq_desc *cq_desc;
146+ struct cq_desc *cq_desc, *cq_desc_last;
147 unsigned int work_done = 0;
148 u16 q_number, completed_index;
149- u8 type, color;
150- struct rte_mbuf **rx_pkts = opaque;
151- unsigned int ret;
152+ u8 type, color, type_color;
153
154 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
155 cq->ring.desc_size * cq->to_clean);
156- cq_desc_dec(cq_desc, &type, &color,
157- &q_number, &completed_index);
158+
159+ type_color = cq_desc->type_color;
160+ color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
161+ if (color == cq->last_color)
162+ return 0;
163
164 while (color != cq->last_color) {
165- if (opaque)
166- opaque = (void *)&(rx_pkts[work_done]);
167+ cq_desc_last = cq_desc;
168
169- ret = (*q_service)(cq->vdev, cq_desc, type,
170- q_number, completed_index, opaque);
171 cq->to_clean++;
172 if (cq->to_clean == cq->ring.desc_count) {
173 cq->to_clean = 0;
174 cq->last_color = cq->last_color ? 0 : 1;
175 }
176
177+ work_done++;
178+ if (work_done >= work_to_do)
179+ break;
180+
181 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
182 cq->ring.desc_size * cq->to_clean);
183- cq_desc_dec(cq_desc, &type, &color,
184- &q_number, &completed_index);
185
186- if (ret)
187- work_done++;
188- if (work_done >= work_to_do)
189- break;
190+ type_color = cq_desc->type_color;
191+ color = (type_color >> CQ_DESC_COLOR_SHIFT)
192+ & CQ_DESC_COLOR_MASK;
193+
194 }
195
196+ cq_desc_dec(cq_desc_last, &type, &color,
197+ &q_number, &completed_index);
198+
199+ (*q_service)(cq->vdev, cq_desc, type,
200+ q_number, completed_index, opaque);
201 return work_done;
202 }
203
204diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
205index e083ccc..424415c 100644
206--- a/drivers/net/enic/base/vnic_rq.h
207+++ b/drivers/net/enic/base/vnic_rq.h
208@@ -74,7 +74,7 @@ struct vnic_rq {
209 struct vnic_dev_ring ring;
210 struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
211 unsigned int mbuf_next_idx; /* next mb to consume */
212- void *os_buf_head;
213+ void *mb_head;
214 unsigned int pkts_outstanding;
215 uint16_t rx_nb_hold;
216 uint16_t rx_free_thresh;
217diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
218index a3ef417..ccbbd61 100644
219--- a/drivers/net/enic/base/vnic_wq.c
220+++ b/drivers/net/enic/base/vnic_wq.c
221@@ -59,71 +59,30 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
222
223 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
224 {
225- struct vnic_wq_buf *buf;
226- unsigned int i, j, count = wq->ring.desc_count;
227- unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
228-
229- for (i = 0; i < blks; i++) {
230- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
231- if (!wq->bufs[i])
232- return -ENOMEM;
233- }
234-
235- for (i = 0; i < blks; i++) {
236- buf = wq->bufs[i];
237- for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
238- buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
239- buf->desc = (u8 *)wq->ring.descs +
240- wq->ring.desc_size * buf->index;
241- if (buf->index + 1 == count) {
242- buf->next = wq->bufs[0];
243- break;
244- } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
245- buf->next = wq->bufs[i + 1];
246- } else {
247- buf->next = buf + 1;
248- buf++;
249- }
250- }
251- }
252-
253- wq->to_use = wq->to_clean = wq->bufs[0];
254-
255+ unsigned int count = wq->ring.desc_count;
256+ /* Allocate the mbuf ring */
257+ wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs",
258+ sizeof(struct vnic_wq_buf) * count,
259+ RTE_CACHE_LINE_SIZE, wq->socket_id);
260+ wq->head_idx = 0;
261+ wq->tail_idx = 0;
262+ if (wq->bufs == NULL)
263+ return -ENOMEM;
264 return 0;
265 }
266
267 void vnic_wq_free(struct vnic_wq *wq)
268 {
269 struct vnic_dev *vdev;
270- unsigned int i;
271
272 vdev = wq->vdev;
273
274 vnic_dev_free_desc_ring(vdev, &wq->ring);
275
276- for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
277- if (wq->bufs[i]) {
278- kfree(wq->bufs[i]);
279- wq->bufs[i] = NULL;
280- }
281- }
282-
283+ rte_free(wq->bufs);
284 wq->ctrl = NULL;
285 }
286
287-int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
288- unsigned int desc_size)
289-{
290- int mem_size = 0;
291-
292- mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size);
293-
294- mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) *
295- VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count);
296-
297- return mem_size;
298-}
299-
300
301 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
302 unsigned int desc_count, unsigned int desc_size)
303@@ -172,9 +131,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
304 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
305 iowrite32(0, &wq->ctrl->error_status);
306
307- wq->to_use = wq->to_clean =
308- &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
309- [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
310+ wq->head_idx = fetch_index;
311+ wq->tail_idx = wq->head_idx;
312 }
313
314 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
315@@ -184,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
316 vnic_wq_init_start(wq, cq_index, 0, 0,
317 error_interrupt_enable,
318 error_interrupt_offset);
319+ wq->last_completed_index = 0;
320 }
321
322 void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error)
323@@ -219,22 +178,34 @@ int vnic_wq_disable(struct vnic_wq *wq)
324 return -ETIMEDOUT;
325 }
326
327+static inline uint32_t
328+buf_idx_incr(uint32_t n_descriptors, uint32_t idx)
329+{
330+ idx++;
331+ if (unlikely(idx == n_descriptors))
332+ idx = 0;
333+ return idx;
334+}
335+
336 void vnic_wq_clean(struct vnic_wq *wq,
337- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
338+ void (*buf_clean)(struct vnic_wq_buf *buf))
339 {
340 struct vnic_wq_buf *buf;
341+ unsigned int to_clean = wq->tail_idx;
342
343- buf = wq->to_clean;
344+ buf = &wq->bufs[to_clean];
345
346 while (vnic_wq_desc_used(wq) > 0) {
347
348- (*buf_clean)(wq, buf);
349+ (*buf_clean)(buf);
350+ to_clean = buf_idx_incr(wq->ring.desc_count, to_clean);
351
352- buf = wq->to_clean = buf->next;
353+ buf = &wq->bufs[to_clean];
354 wq->ring.desc_avail++;
355 }
356
357- wq->to_use = wq->to_clean = wq->bufs[0];
358+ wq->head_idx = 0;
359+ wq->tail_idx = 0;
360
361 iowrite32(0, &wq->ctrl->fetch_index);
362 iowrite32(0, &wq->ctrl->posted_index);
363diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
364index c23de62..37c3ff9 100644
365--- a/drivers/net/enic/base/vnic_wq.h
366+++ b/drivers/net/enic/base/vnic_wq.h
367@@ -64,42 +64,23 @@ struct vnic_wq_ctrl {
368 u32 pad9;
369 };
370
371+/* 16 bytes */
372 struct vnic_wq_buf {
373- struct vnic_wq_buf *next;
374- dma_addr_t dma_addr;
375- void *os_buf;
376- unsigned int len;
377- unsigned int index;
378- int sop;
379- void *desc;
380- uint64_t wr_id; /* Cookie */
381- uint8_t cq_entry; /* Gets completion event from hw */
382- uint8_t desc_skip_cnt; /* Num descs to occupy */
383- uint8_t compressed_send; /* Both hdr and payload in one desc */
384+ struct rte_mempool *pool;
385+ void *mb;
386 };
387
388-/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
389-#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
390-#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
391-#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
392- ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
393- VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
394-#define VNIC_WQ_BUF_BLK_SZ(entries) \
395- (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
396-#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
397- DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
398-#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
399-
400 struct vnic_wq {
401 unsigned int index;
402 struct vnic_dev *vdev;
403 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
404 struct vnic_dev_ring ring;
405- struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
406- struct vnic_wq_buf *to_use;
407- struct vnic_wq_buf *to_clean;
408- unsigned int pkts_outstanding;
409+ struct vnic_wq_buf *bufs;
410+ unsigned int head_idx;
411+ unsigned int tail_idx;
412 unsigned int socket_id;
413+ const struct rte_memzone *cqmsg_rz;
414+ uint16_t last_completed_index;
415 };
416
417 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
418@@ -114,11 +95,6 @@ static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
419 return wq->ring.desc_count - wq->ring.desc_avail - 1;
420 }
421
422-static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
423-{
424- return wq->to_use->desc;
425-}
426-
427 #define PI_LOG2_CACHE_LINE_SIZE 5
428 #define PI_INDEX_BITS 12
429 #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1)
430@@ -191,75 +167,6 @@ static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len,
431 PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF);
432 }
433
434-static inline void vnic_wq_post(struct vnic_wq *wq,
435- void *os_buf, dma_addr_t dma_addr,
436- unsigned int len, int sop, int eop,
437- uint8_t desc_skip_cnt, uint8_t cq_entry,
438- uint8_t compressed_send, uint64_t wrid)
439-{
440- struct vnic_wq_buf *buf = wq->to_use;
441-
442- buf->sop = sop;
443- buf->cq_entry = cq_entry;
444- buf->compressed_send = compressed_send;
445- buf->desc_skip_cnt = desc_skip_cnt;
446- buf->os_buf = os_buf;
447- buf->dma_addr = dma_addr;
448- buf->len = len;
449- buf->wr_id = wrid;
450-
451- buf = buf->next;
452- if (eop) {
453-#ifdef DO_PREFETCH
454- uint64_t wr = vnic_cached_posted_index(dma_addr, len,
455- buf->index);
456-#endif
457- /* Adding write memory barrier prevents compiler and/or CPU
458- * reordering, thus avoiding descriptor posting before
459- * descriptor is initialized. Otherwise, hardware can read
460- * stale descriptor fields.
461- */
462- wmb();
463-#ifdef DO_PREFETCH
464- /* Intel chipsets seem to limit the rate of PIOs that we can
465- * push on the bus. Thus, it is very important to do a single
466- * 64 bit write here. With two 32-bit writes, my maximum
467- * pkt/sec rate was cut almost in half. -AJF
468- */
469- iowrite64((uint64_t)wr, &wq->ctrl->posted_index);
470-#else
471- iowrite32(buf->index, &wq->ctrl->posted_index);
472-#endif
473- }
474- wq->to_use = buf;
475-
476- wq->ring.desc_avail -= desc_skip_cnt;
477-}
478-
479-static inline void vnic_wq_service(struct vnic_wq *wq,
480- struct cq_desc *cq_desc, u16 completed_index,
481- void (*buf_service)(struct vnic_wq *wq,
482- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
483- void *opaque)
484-{
485- struct vnic_wq_buf *buf;
486-
487- buf = wq->to_clean;
488- while (1) {
489-
490- (*buf_service)(wq, cq_desc, buf, opaque);
491-
492- wq->ring.desc_avail++;
493-
494- wq->to_clean = buf->next;
495-
496- if (buf->index == completed_index)
497- break;
498-
499- buf = wq->to_clean;
500- }
501-}
502-
503 void vnic_wq_free(struct vnic_wq *wq);
504 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
505 unsigned int desc_count, unsigned int desc_size);
506@@ -275,8 +182,6 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq);
507 void vnic_wq_enable(struct vnic_wq *wq);
508 int vnic_wq_disable(struct vnic_wq *wq);
509 void vnic_wq_clean(struct vnic_wq *wq,
510- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
511-int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
512- unsigned int desc_size);
513+ void (*buf_clean)(struct vnic_wq_buf *buf));
514
515 #endif /* _VNIC_WQ_H_ */
516diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
517index 8c914f5..43b82a6 100644
518--- a/drivers/net/enic/enic.h
519+++ b/drivers/net/enic/enic.h
520@@ -155,6 +155,30 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
521 return (struct enic *)eth_dev->data->dev_private;
522 }
523
524+static inline uint32_t
525+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
526+{
527+ uint32_t d = i0 + i1;
528+ d -= (d >= n_descriptors) ? n_descriptors : 0;
529+ return d;
530+}
531+
532+static inline uint32_t
533+enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
534+{
535+ int32_t d = i1 - i0;
536+ return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
537+}
538+
539+static inline uint32_t
540+enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
541+{
542+ idx++;
543+ if (unlikely(idx == n_descriptors))
544+ idx = 0;
545+ return idx;
546+}
547+
548 #define RTE_LIBRTE_ENIC_ASSERT_ENABLE
549 #ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
550 #define ASSERT(x) do { \
551@@ -209,5 +233,6 @@ extern int enic_clsf_init(struct enic *enic);
552 extern void enic_clsf_destroy(struct enic *enic);
553 uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
554 uint16_t nb_pkts);
555-
556+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
557+ uint16_t nb_pkts);
558 #endif /* _ENIC_H_ */
559diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
560index 6bea940..697ff82 100644
561--- a/drivers/net/enic/enic_ethdev.c
562+++ b/drivers/net/enic/enic_ethdev.c
563@@ -519,71 +519,6 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui
564 enic_del_mac_address(enic);
565 }
566
567-
568-static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
569- uint16_t nb_pkts)
570-{
571- uint16_t index;
572- unsigned int frags;
573- unsigned int pkt_len;
574- unsigned int seg_len;
575- unsigned int inc_len;
576- unsigned int nb_segs;
577- struct rte_mbuf *tx_pkt, *next_tx_pkt;
578- struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
579- struct enic *enic = vnic_dev_priv(wq->vdev);
580- unsigned short vlan_id;
581- unsigned short ol_flags;
582- uint8_t last_seg, eop;
583- unsigned int host_tx_descs = 0;
584-
585- for (index = 0; index < nb_pkts; index++) {
586- tx_pkt = *tx_pkts++;
587- inc_len = 0;
588- nb_segs = tx_pkt->nb_segs;
589- if (nb_segs > vnic_wq_desc_avail(wq)) {
590- if (index > 0)
591- enic_post_wq_index(wq);
592-
593- /* wq cleanup and try again */
594- if (!enic_cleanup_wq(enic, wq) ||
595- (nb_segs > vnic_wq_desc_avail(wq))) {
596- return index;
597- }
598- }
599-
600- pkt_len = tx_pkt->pkt_len;
601- vlan_id = tx_pkt->vlan_tci;
602- ol_flags = tx_pkt->ol_flags;
603- for (frags = 0; inc_len < pkt_len; frags++) {
604- if (!tx_pkt)
605- break;
606- next_tx_pkt = tx_pkt->next;
607- seg_len = tx_pkt->data_len;
608- inc_len += seg_len;
609-
610- host_tx_descs++;
611- last_seg = 0;
612- eop = 0;
613- if ((pkt_len == inc_len) || !next_tx_pkt) {
614- eop = 1;
615- /* post if last packet in batch or > thresh */
616- if ((index == (nb_pkts - 1)) ||
617- (host_tx_descs > ENIC_TX_POST_THRESH)) {
618- last_seg = 1;
619- host_tx_descs = 0;
620- }
621- }
622- enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
623- !frags, eop, last_seg, ol_flags, vlan_id);
624- tx_pkt = next_tx_pkt;
625- }
626- }
627-
628- enic_cleanup_wq(enic, wq);
629- return index;
630-}
631-
632 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
633 .dev_configure = enicpmd_dev_configure,
634 .dev_start = enicpmd_dev_start,
635@@ -642,7 +577,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
636 enic->rte_dev = eth_dev;
637 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
638 eth_dev->rx_pkt_burst = &enic_recv_pkts;
639- eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
640+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
641
642 pdev = eth_dev->pci_dev;
643 rte_eth_copy_pci_info(eth_dev, pdev);
644diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
Damjan Marion19414de2016-06-07 12:46:55 +0200645index b164307..9bfdec1 100644
John Loacc22d72016-04-27 16:43:44 -0400646--- a/drivers/net/enic/enic_main.c
647+++ b/drivers/net/enic/enic_main.c
648@@ -40,11 +40,11 @@
649 #include <libgen.h>
650
651 #include <rte_pci.h>
652-#include <rte_memzone.h>
653 #include <rte_malloc.h>
654 #include <rte_mbuf.h>
655 #include <rte_string_fns.h>
656 #include <rte_ethdev.h>
657+#include <rte_memzone.h>
658
659 #include "enic_compat.h"
660 #include "enic.h"
661@@ -58,7 +58,6 @@
662 #include "vnic_cq.h"
663 #include "vnic_intr.h"
664 #include "vnic_nic.h"
665-#include "enic_vnic_wq.h"
666
667 static inline struct rte_mbuf *
668 rte_rxmbuf_alloc(struct rte_mempool *mp)
669@@ -109,38 +108,17 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
670 }
671 }
672
673-
674 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
675 {
676 vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
677 }
678
679-static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf)
680+static void enic_free_wq_buf(struct vnic_wq_buf *buf)
681 {
682- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf;
683+ struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
684
685 rte_mempool_put(mbuf->pool, mbuf);
686- buf->os_buf = NULL;
687-}
688-
689-static void enic_wq_free_buf(struct vnic_wq *wq,
690- __rte_unused struct cq_desc *cq_desc,
691- struct vnic_wq_buf *buf,
692- __rte_unused void *opaque)
693-{
694- enic_free_wq_buf(wq, buf);
695-}
696-
697-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
698- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
699-{
700- struct enic *enic = vnic_dev_priv(vdev);
701-
702- vnic_wq_service(&enic->wq[q_number], cq_desc,
703- completed_index, enic_wq_free_buf,
704- opaque);
705-
706- return 0;
707+ buf->mb = NULL;
708 }
709
710 static void enic_log_q_error(struct enic *enic)
711@@ -163,64 +141,6 @@ static void enic_log_q_error(struct enic *enic)
712 }
713 }
714
715-unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
716-{
717- unsigned int cq = enic_cq_wq(enic, wq->index);
718-
719- /* Return the work done */
720- return vnic_cq_service(&enic->cq[cq],
721- -1 /*wq_work_to_do*/, enic_wq_service, NULL);
722-}
723-
724-void enic_post_wq_index(struct vnic_wq *wq)
725-{
726- enic_vnic_post_wq_index(wq);
727-}
728-
729-void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
730- struct rte_mbuf *tx_pkt, unsigned short len,
731- uint8_t sop, uint8_t eop, uint8_t cq_entry,
732- uint16_t ol_flags, uint16_t vlan_tag)
733-{
734- struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
735- uint16_t mss = 0;
736- uint8_t vlan_tag_insert = 0;
737- uint64_t bus_addr = (dma_addr_t)
738- (tx_pkt->buf_physaddr + tx_pkt->data_off);
739-
740- if (sop) {
741- if (ol_flags & PKT_TX_VLAN_PKT)
742- vlan_tag_insert = 1;
743-
744- if (enic->hw_ip_checksum) {
745- if (ol_flags & PKT_TX_IP_CKSUM)
746- mss |= ENIC_CALC_IP_CKSUM;
747-
748- if (ol_flags & PKT_TX_TCP_UDP_CKSUM)
749- mss |= ENIC_CALC_TCP_UDP_CKSUM;
750- }
751- }
752-
753- wq_enet_desc_enc(desc,
754- bus_addr,
755- len,
756- mss,
757- 0 /* header_length */,
758- 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,
759- eop,
760- cq_entry,
761- 0 /* fcoe_encap */,
762- vlan_tag_insert,
763- vlan_tag,
764- 0 /* loopback */);
765-
766- enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len,
767- sop,
768- 1 /*desc_skip_cnt*/,
769- cq_entry,
770- 0 /*compressed send*/,
771- 0 /*wrid*/);
772-}
773
774 void enic_dev_stats_clear(struct enic *enic)
775 {
Damjan Marion19414de2016-06-07 12:46:55 +0200776@@ -298,12 +218,28 @@ void enic_init_vnic_resources(struct enic *enic)
John Loacc22d72016-04-27 16:43:44 -0400777 unsigned int error_interrupt_enable = 1;
778 unsigned int error_interrupt_offset = 0;
779 unsigned int index = 0;
780+ unsigned int cq_idx;
781+
782+ vnic_dev_stats_clear(enic->vdev);
783
784 for (index = 0; index < enic->rq_count; index++) {
785 vnic_rq_init(&enic->rq[index],
786 enic_cq_rq(enic, index),
787 error_interrupt_enable,
788 error_interrupt_offset);
789+
790+ cq_idx = enic_cq_rq(enic, index);
791+ vnic_cq_init(&enic->cq[cq_idx],
792+ 0 /* flow_control_enable */,
793+ 1 /* color_enable */,
794+ 0 /* cq_head */,
795+ 0 /* cq_tail */,
796+ 1 /* cq_tail_color */,
797+ 0 /* interrupt_enable */,
798+ 1 /* cq_entry_enable */,
799+ 0 /* cq_message_enable */,
800+ 0 /* interrupt offset */,
801+ 0 /* cq_message_addr */);
802 }
803
804 for (index = 0; index < enic->wq_count; index++) {
Damjan Marion19414de2016-06-07 12:46:55 +0200805@@ -311,22 +247,19 @@ void enic_init_vnic_resources(struct enic *enic)
John Loacc22d72016-04-27 16:43:44 -0400806 enic_cq_wq(enic, index),
807 error_interrupt_enable,
808 error_interrupt_offset);
809- }
810-
811- vnic_dev_stats_clear(enic->vdev);
812
813- for (index = 0; index < enic->cq_count; index++) {
814- vnic_cq_init(&enic->cq[index],
815+ cq_idx = enic_cq_wq(enic, index);
816+ vnic_cq_init(&enic->cq[cq_idx],
817 0 /* flow_control_enable */,
818 1 /* color_enable */,
819 0 /* cq_head */,
820 0 /* cq_tail */,
821 1 /* cq_tail_color */,
822 0 /* interrupt_enable */,
823- 1 /* cq_entry_enable */,
824- 0 /* cq_message_enable */,
825+ 0 /* cq_entry_enable */,
826+ 1 /* cq_message_enable */,
827 0 /* interrupt offset */,
828- 0 /* cq_message_addr */);
829+ (u64)enic->wq[index].cqmsg_rz->phys_addr);
830 }
831
832 vnic_intr_init(&enic->intr,
Damjan Marion19414de2016-06-07 12:46:55 +0200833@@ -570,6 +503,7 @@ void enic_free_wq(void *txq)
John Loacc22d72016-04-27 16:43:44 -0400834 struct vnic_wq *wq = (struct vnic_wq *)txq;
835 struct enic *enic = vnic_dev_priv(wq->vdev);
836
837+ rte_memzone_free(wq->cqmsg_rz);
838 vnic_wq_free(wq);
839 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
840 }
Damjan Marion19414de2016-06-07 12:46:55 +0200841@@ -580,6 +514,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
John Loacc22d72016-04-27 16:43:44 -0400842 int err;
843 struct vnic_wq *wq = &enic->wq[queue_idx];
844 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
845+ char name[NAME_MAX];
846+ static int instance;
847
848 wq->socket_id = socket_id;
849 if (nb_desc) {
Damjan Marion19414de2016-06-07 12:46:55 +0200850@@ -615,6 +551,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
John Loacc22d72016-04-27 16:43:44 -0400851 dev_err(enic, "error in allocation of cq for wq\n");
852 }
853
854+ /* setup up CQ message */
855+ snprintf((char *)name, sizeof(name),
856+ "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
857+ instance++);
858+
859+ wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
860+ sizeof(uint32_t),
861+ SOCKET_ID_ANY, 0,
862+ ENIC_ALIGN);
863+ if (!wq->cqmsg_rz)
864+ return -ENOMEM;
865+
866 return err;
867 }
868
869diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
870index 00fa71d..3e1bdf5 100644
871--- a/drivers/net/enic/enic_res.h
872+++ b/drivers/net/enic/enic_res.h
873@@ -53,89 +53,10 @@
874
875 #define ENIC_NON_TSO_MAX_DESC 16
876 #define ENIC_DEFAULT_RX_FREE_THRESH 32
877-#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2)
878+#define ENIC_TX_XMIT_MAX 64
879
880 #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
881
882-static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
883- void *os_buf, dma_addr_t dma_addr, unsigned int len,
884- unsigned int mss_or_csum_offset, unsigned int hdr_len,
885- int vlan_tag_insert, unsigned int vlan_tag,
886- int offload_mode, int cq_entry, int sop, int eop, int loopback)
887-{
888- struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
889- u8 desc_skip_cnt = 1;
890- u8 compressed_send = 0;
891- u64 wrid = 0;
892-
893- wq_enet_desc_enc(desc,
894- (u64)dma_addr | VNIC_PADDR_TARGET,
895- (u16)len,
896- (u16)mss_or_csum_offset,
897- (u16)hdr_len, (u8)offload_mode,
898- (u8)eop, (u8)cq_entry,
899- 0, /* fcoe_encap */
900- (u8)vlan_tag_insert,
901- (u16)vlan_tag,
902- (u8)loopback);
903-
904- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
905- (u8)cq_entry, compressed_send, wrid);
906-}
907-
908-static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
909- void *os_buf, dma_addr_t dma_addr, unsigned int len,
910- int eop, int loopback)
911-{
912- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
913- 0, 0, 0, 0, 0,
914- eop, 0 /* !SOP */, eop, loopback);
915-}
916-
917-static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
918- dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
919- unsigned int vlan_tag, int eop, int loopback)
920-{
921- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
922- 0, 0, vlan_tag_insert, vlan_tag,
923- WQ_ENET_OFFLOAD_MODE_CSUM,
924- eop, 1 /* SOP */, eop, loopback);
925-}
926-
927-static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
928- void *os_buf, dma_addr_t dma_addr, unsigned int len,
929- int ip_csum, int tcpudp_csum, int vlan_tag_insert,
930- unsigned int vlan_tag, int eop, int loopback)
931-{
932- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
933- (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
934- 0, vlan_tag_insert, vlan_tag,
935- WQ_ENET_OFFLOAD_MODE_CSUM,
936- eop, 1 /* SOP */, eop, loopback);
937-}
938-
939-static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
940- void *os_buf, dma_addr_t dma_addr, unsigned int len,
941- unsigned int csum_offset, unsigned int hdr_len,
942- int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
943-{
944- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
945- csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
946- WQ_ENET_OFFLOAD_MODE_CSUM_L4,
947- eop, 1 /* SOP */, eop, loopback);
948-}
949-
950-static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
951- void *os_buf, dma_addr_t dma_addr, unsigned int len,
952- unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
953- unsigned int vlan_tag, int eop, int loopback)
954-{
955- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
956- mss, hdr_len, vlan_tag_insert, vlan_tag,
957- WQ_ENET_OFFLOAD_MODE_TSO,
958- eop, 1 /* SOP */, eop, loopback);
959-}
960-
961 struct enic;
962
963 int enic_get_vnic_config(struct enic *);
964diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
965deleted file mode 100644
966index 39bb55c..0000000
967--- a/drivers/net/enic/enic_rx.c
968+++ /dev/null
969@@ -1,361 +0,0 @@
970-/*
971- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
972- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
973- *
974- * Copyright (c) 2014, Cisco Systems, Inc.
975- * All rights reserved.
976- *
977- * Redistribution and use in source and binary forms, with or without
978- * modification, are permitted provided that the following conditions
979- * are met:
980- *
981- * 1. Redistributions of source code must retain the above copyright
982- * notice, this list of conditions and the following disclaimer.
983- *
984- * 2. Redistributions in binary form must reproduce the above copyright
985- * notice, this list of conditions and the following disclaimer in
986- * the documentation and/or other materials provided with the
987- * distribution.
988- *
989- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
990- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
991- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
992- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
993- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
994- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
995- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
996- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
997- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
998- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
999- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1000- * POSSIBILITY OF SUCH DAMAGE.
1001- *
1002- */
1003-
1004-#include <rte_mbuf.h>
1005-#include <rte_ethdev.h>
1006-#include <rte_prefetch.h>
1007-
1008-#include "enic_compat.h"
1009-#include "rq_enet_desc.h"
1010-#include "enic.h"
1011-
1012-#define RTE_PMD_USE_PREFETCH
1013-
1014-#ifdef RTE_PMD_USE_PREFETCH
1015-/*
1016- * Prefetch a cache line into all cache levels.
1017- */
1018-#define rte_enic_prefetch(p) rte_prefetch0(p)
1019-#else
1020-#define rte_enic_prefetch(p) do {} while (0)
1021-#endif
1022-
1023-#ifdef RTE_PMD_PACKET_PREFETCH
1024-#define rte_packet_prefetch(p) rte_prefetch1(p)
1025-#else
1026-#define rte_packet_prefetch(p) do {} while (0)
1027-#endif
1028-
1029-static inline struct rte_mbuf *
1030-rte_rxmbuf_alloc(struct rte_mempool *mp)
1031-{
1032- struct rte_mbuf *m;
1033-
1034- m = __rte_mbuf_raw_alloc(mp);
1035- __rte_mbuf_sanity_check_raw(m, 0);
1036- return m;
1037-}
1038-
1039-static inline uint16_t
1040-enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
1041-{
1042- return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
1043-}
1044-
1045-static inline uint16_t
1046-enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
1047-{
1048- return(le16_to_cpu(crd->bytes_written_flags) &
1049- ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
1050-}
1051-
1052-static inline uint8_t
1053-enic_cq_rx_desc_packet_error(uint16_t bwflags)
1054-{
1055- return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
1056- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
1057-}
1058-
1059-static inline uint8_t
1060-enic_cq_rx_desc_eop(uint16_t ciflags)
1061-{
1062- return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
1063- == CQ_ENET_RQ_DESC_FLAGS_EOP;
1064-}
1065-
1066-static inline uint8_t
1067-enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
1068-{
1069- return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
1070- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
1071- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
1072-}
1073-
1074-static inline uint8_t
1075-enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
1076-{
1077- return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
1078- CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
1079-}
1080-
1081-static inline uint8_t
1082-enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
1083-{
1084- return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
1085- CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
1086-}
1087-
1088-static inline uint8_t
1089-enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
1090-{
1091- return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
1092- CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
1093-}
1094-
1095-static inline uint32_t
1096-enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
1097-{
1098- return le32_to_cpu(cqrd->rss_hash);
1099-}
1100-
1101-static inline uint16_t
1102-enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
1103-{
1104- return le16_to_cpu(cqrd->vlan);
1105-}
1106-
1107-static inline uint16_t
1108-enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
1109-{
1110- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1111- return le16_to_cpu(cqrd->bytes_written_flags) &
1112- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
1113-}
1114-
1115-static inline uint8_t
1116-enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
1117-{
1118- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1119- uint16_t bwflags;
1120- int ret = 0;
1121- uint64_t pkt_err_flags = 0;
1122-
1123- bwflags = enic_cq_rx_desc_bwflags(cqrd);
1124- if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
1125- pkt_err_flags = PKT_RX_MAC_ERR;
1126- ret = 1;
1127- }
1128- *pkt_err_flags_out = pkt_err_flags;
1129- return ret;
1130-}
1131-
1132-/*
1133- * Lookup table to translate RX CQ flags to mbuf flags.
1134- */
1135-static inline uint32_t
1136-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
1137-{
1138- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1139- uint8_t cqrd_flags = cqrd->flags;
1140- static const uint32_t cq_type_table[128] __rte_cache_aligned = {
1141- [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
1142- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1143- | RTE_PTYPE_L4_UDP,
1144- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1145- | RTE_PTYPE_L4_TCP,
1146- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1147- | RTE_PTYPE_L4_FRAG,
1148- [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
1149- [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1150- | RTE_PTYPE_L4_UDP,
1151- [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1152- | RTE_PTYPE_L4_TCP,
1153- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1154- | RTE_PTYPE_L4_FRAG,
1155- /* All others reserved */
1156- };
1157- cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
1158- | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
1159- | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
1160- return cq_type_table[cqrd_flags];
1161-}
1162-
1163-static inline void
1164-enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
1165-{
1166- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1167- uint16_t ciflags, bwflags, pkt_flags = 0;
1168- ciflags = enic_cq_rx_desc_ciflags(cqrd);
1169- bwflags = enic_cq_rx_desc_bwflags(cqrd);
1170-
1171- mbuf->ol_flags = 0;
1172-
1173- /* flags are meaningless if !EOP */
1174- if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
1175- goto mbuf_flags_done;
1176-
1177- /* VLAN stripping */
1178- if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
1179- pkt_flags |= PKT_RX_VLAN_PKT;
1180- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
1181- } else {
1182- mbuf->vlan_tci = 0;
1183- }
1184-
1185- /* RSS flag */
1186- if (enic_cq_rx_desc_rss_type(cqrd)) {
1187- pkt_flags |= PKT_RX_RSS_HASH;
1188- mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
1189- }
1190-
1191- /* checksum flags */
1192- if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
1193- (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
1194- if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
1195- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
1196- if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
1197- if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
1198- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
1199- }
1200- }
1201-
1202- mbuf_flags_done:
1203- mbuf->ol_flags = pkt_flags;
1204-}
1205-
1206-static inline uint32_t
1207-enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
1208-{
1209- uint32_t d = i0 + i1;
1210- ASSERT(i0 < n_descriptors);
1211- ASSERT(i1 < n_descriptors);
1212- d -= (d >= n_descriptors) ? n_descriptors : 0;
1213- return d;
1214-}
1215-
1216-
1217-uint16_t
1218-enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1219- uint16_t nb_pkts)
1220-{
1221- struct vnic_rq *rq = rx_queue;
1222- struct enic *enic = vnic_dev_priv(rq->vdev);
1223- unsigned int rx_id;
1224- struct rte_mbuf *nmb, *rxmb;
1225- uint16_t nb_rx = 0;
1226- uint16_t nb_hold;
1227- struct vnic_cq *cq;
1228- volatile struct cq_desc *cqd_ptr;
1229- uint8_t color;
1230-
1231- cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1232- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
1233- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1234-
1235- nb_hold = rq->rx_nb_hold; /* mbufs held by software */
1236-
1237- while (nb_rx < nb_pkts) {
1238- volatile struct rq_enet_desc *rqd_ptr;
1239- dma_addr_t dma_addr;
1240- struct cq_desc cqd;
1241- uint64_t ol_err_flags;
1242- uint8_t packet_error;
1243-
1244- /* Check for pkts available */
1245- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
1246- & CQ_DESC_COLOR_MASK;
1247- if (color == cq->last_color)
1248- break;
1249-
1250- /* Get the cq descriptor and rq pointer */
1251- cqd = *cqd_ptr;
1252- rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
1253-
1254- /* allocate a new mbuf */
1255- nmb = rte_rxmbuf_alloc(rq->mp);
1256- if (nmb == NULL) {
1257- dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
1258- enic->port_id, (unsigned)rq->index);
1259- rte_eth_devices[enic->port_id].
1260- data->rx_mbuf_alloc_failed++;
1261- break;
1262- }
1263-
1264- /* A packet error means descriptor and data are untrusted */
1265- packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
1266-
1267- /* Get the mbuf to return and replace with one just allocated */
1268- rxmb = rq->mbuf_ring[rx_id];
1269- rq->mbuf_ring[rx_id] = nmb;
1270-
1271- /* Increment cqd, rqd, mbuf_table index */
1272- rx_id++;
1273- if (unlikely(rx_id == rq->ring.desc_count)) {
1274- rx_id = 0;
1275- cq->last_color = cq->last_color ? 0 : 1;
1276- }
1277-
1278- /* Prefetch next mbuf & desc while processing current one */
1279- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1280- rte_enic_prefetch(cqd_ptr);
1281- rte_enic_prefetch(rq->mbuf_ring[rx_id]);
1282- rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
1283- + rx_id);
1284-
1285- /* Push descriptor for newly allocated mbuf */
1286- dma_addr = (dma_addr_t)(nmb->buf_physaddr
1287- + RTE_PKTMBUF_HEADROOM);
1288- rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
1289- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
1290- - RTE_PKTMBUF_HEADROOM);
1291-
1292- /* Fill in the rest of the mbuf */
1293- rxmb->data_off = RTE_PKTMBUF_HEADROOM;
1294- rxmb->nb_segs = 1;
1295- rxmb->next = NULL;
1296- rxmb->port = enic->port_id;
1297- if (!packet_error) {
1298- rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
1299- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
1300- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
1301- } else {
1302- rxmb->pkt_len = 0;
1303- rxmb->packet_type = 0;
1304- rxmb->ol_flags = 0;
1305- }
1306- rxmb->data_len = rxmb->pkt_len;
1307-
1308- /* prefetch mbuf data for caller */
1309- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
1310- RTE_PKTMBUF_HEADROOM));
1311-
1312- /* store the mbuf address into the next entry of the array */
1313- rx_pkts[nb_rx++] = rxmb;
1314- }
1315-
1316- nb_hold += nb_rx;
1317- cq->to_clean = rx_id;
1318-
1319- if (nb_hold > rq->rx_free_thresh) {
1320- rq->posted_index = enic_ring_add(rq->ring.desc_count,
1321- rq->posted_index, nb_hold);
1322- nb_hold = 0;
1323- rte_mb();
1324- iowrite32(rq->posted_index, &rq->ctrl->posted_index);
1325- }
1326-
1327- rq->rx_nb_hold = nb_hold;
1328-
1329- return nb_rx;
1330-}
1331diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
1332new file mode 100644
1333index 0000000..71ca34e
1334--- /dev/null
1335+++ b/drivers/net/enic/enic_rxtx.c
1336@@ -0,0 +1,505 @@
1337+/*
1338+ * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
1339+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
1340+ *
1341+ * Copyright (c) 2016, Cisco Systems, Inc.
1342+ * All rights reserved.
1343+ *
1344+ * Redistribution and use in source and binary forms, with or without
1345+ * modification, are permitted provided that the following conditions
1346+ * are met:
1347+ *
1348+ * 1. Redistributions of source code must retain the above copyright
1349+ * notice, this list of conditions and the following disclaimer.
1350+ *
1351+ * 2. Redistributions in binary form must reproduce the above copyright
1352+ * notice, this list of conditions and the following disclaimer in
1353+ * the documentation and/or other materials provided with the
1354+ * distribution.
1355+ *
1356+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1357+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1358+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
1359+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
1360+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
1361+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
1362+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1363+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
1364+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
1365+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
1366+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1367+ * POSSIBILITY OF SUCH DAMAGE.
1368+ *
1369+ */
1370+
1371+#include <rte_mbuf.h>
1372+#include <rte_ethdev.h>
1373+#include <rte_prefetch.h>
1374+#include <rte_memzone.h>
1375+
1376+#include "enic_compat.h"
1377+#include "rq_enet_desc.h"
1378+#include "enic.h"
1379+
1380+#define RTE_PMD_USE_PREFETCH
1381+
1382+#ifdef RTE_PMD_USE_PREFETCH
1383+/*
1384+ * Prefetch a cache line into all cache levels.
1385+ */
1386+#define rte_enic_prefetch(p) rte_prefetch0(p)
1387+#else
1388+#define rte_enic_prefetch(p) do {} while (0)
1389+#endif
1390+
1391+#ifdef RTE_PMD_PACKET_PREFETCH
1392+#define rte_packet_prefetch(p) rte_prefetch1(p)
1393+#else
1394+#define rte_packet_prefetch(p) do {} while (0)
1395+#endif
1396+
1397+static inline struct rte_mbuf *
1398+rte_rxmbuf_alloc(struct rte_mempool *mp)
1399+{
1400+ struct rte_mbuf *m;
1401+
1402+ m = __rte_mbuf_raw_alloc(mp);
1403+ __rte_mbuf_sanity_check_raw(m, 0);
1404+ return m;
1405+}
1406+
1407+static inline uint16_t
1408+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
1409+{
1410+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
1411+}
1412+
1413+static inline uint16_t
1414+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
1415+{
1416+ return(le16_to_cpu(crd->bytes_written_flags) &
1417+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
1418+}
1419+
1420+static inline uint8_t
1421+enic_cq_rx_desc_packet_error(uint16_t bwflags)
1422+{
1423+ return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
1424+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
1425+}
1426+
1427+static inline uint8_t
1428+enic_cq_rx_desc_eop(uint16_t ciflags)
1429+{
1430+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
1431+ == CQ_ENET_RQ_DESC_FLAGS_EOP;
1432+}
1433+
1434+static inline uint8_t
1435+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
1436+{
1437+ return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
1438+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
1439+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
1440+}
1441+
1442+static inline uint8_t
1443+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
1444+{
1445+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
1446+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
1447+}
1448+
1449+static inline uint8_t
1450+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
1451+{
1452+ return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
1453+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
1454+}
1455+
1456+static inline uint8_t
1457+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
1458+{
1459+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
1460+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
1461+}
1462+
1463+static inline uint32_t
1464+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
1465+{
1466+ return le32_to_cpu(cqrd->rss_hash);
1467+}
1468+
1469+static inline uint16_t
1470+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
1471+{
1472+ return le16_to_cpu(cqrd->vlan);
1473+}
1474+
1475+static inline uint16_t
1476+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
1477+{
1478+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1479+ return le16_to_cpu(cqrd->bytes_written_flags) &
1480+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
1481+}
1482+
1483+static inline uint8_t
1484+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
1485+{
1486+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1487+ uint16_t bwflags;
1488+ int ret = 0;
1489+ uint64_t pkt_err_flags = 0;
1490+
1491+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
1492+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
1493+ pkt_err_flags = PKT_RX_MAC_ERR;
1494+ ret = 1;
1495+ }
1496+ *pkt_err_flags_out = pkt_err_flags;
1497+ return ret;
1498+}
1499+
1500+/*
1501+ * Lookup table to translate RX CQ flags to mbuf flags.
1502+ */
1503+static inline uint32_t
1504+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
1505+{
1506+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1507+ uint8_t cqrd_flags = cqrd->flags;
1508+ static const uint32_t cq_type_table[128] __rte_cache_aligned = {
1509+ [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
1510+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1511+ | RTE_PTYPE_L4_UDP,
1512+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1513+ | RTE_PTYPE_L4_TCP,
1514+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1515+ | RTE_PTYPE_L4_FRAG,
1516+ [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
1517+ [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1518+ | RTE_PTYPE_L4_UDP,
1519+ [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1520+ | RTE_PTYPE_L4_TCP,
1521+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1522+ | RTE_PTYPE_L4_FRAG,
1523+ /* All others reserved */
1524+ };
1525+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
1526+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
1527+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
1528+ return cq_type_table[cqrd_flags];
1529+}
1530+
1531+static inline void
1532+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
1533+{
1534+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1535+ uint16_t ciflags, bwflags, pkt_flags = 0;
1536+ ciflags = enic_cq_rx_desc_ciflags(cqrd);
1537+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
1538+
1539+ mbuf->ol_flags = 0;
1540+
1541+ /* flags are meaningless if !EOP */
1542+ if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
1543+ goto mbuf_flags_done;
1544+
1545+ /* VLAN stripping */
1546+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
1547+ pkt_flags |= PKT_RX_VLAN_PKT;
1548+ mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
1549+ } else {
1550+ mbuf->vlan_tci = 0;
1551+ }
1552+
1553+ /* RSS flag */
1554+ if (enic_cq_rx_desc_rss_type(cqrd)) {
1555+ pkt_flags |= PKT_RX_RSS_HASH;
1556+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
1557+ }
1558+
1559+ /* checksum flags */
1560+ if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
1561+ (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
1562+ if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
1563+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
1564+ if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
1565+ if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
1566+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
1567+ }
1568+ }
1569+
1570+ mbuf_flags_done:
1571+ mbuf->ol_flags = pkt_flags;
1572+}
1573+
1574+uint16_t
1575+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1576+ uint16_t nb_pkts)
1577+{
1578+ struct vnic_rq *rq = rx_queue;
1579+ struct enic *enic = vnic_dev_priv(rq->vdev);
1580+ unsigned int rx_id;
1581+ struct rte_mbuf *nmb, *rxmb;
1582+ uint16_t nb_rx = 0;
1583+ uint16_t nb_hold;
1584+ struct vnic_cq *cq;
1585+ volatile struct cq_desc *cqd_ptr;
1586+ uint8_t color;
1587+
1588+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1589+ rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
1590+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1591+
1592+ nb_hold = rq->rx_nb_hold; /* mbufs held by software */
1593+
1594+ while (nb_rx < nb_pkts) {
1595+ volatile struct rq_enet_desc *rqd_ptr;
1596+ dma_addr_t dma_addr;
1597+ struct cq_desc cqd;
1598+ uint64_t ol_err_flags;
1599+ uint8_t packet_error;
1600+
1601+ /* Check for pkts available */
1602+ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
1603+ & CQ_DESC_COLOR_MASK;
1604+ if (color == cq->last_color)
1605+ break;
1606+
1607+ /* Get the cq descriptor and rq pointer */
1608+ cqd = *cqd_ptr;
1609+ rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
1610+
1611+ /* allocate a new mbuf */
1612+ nmb = rte_rxmbuf_alloc(rq->mp);
1613+ if (nmb == NULL) {
1614+ dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
1615+ enic->port_id, (unsigned)rq->index);
1616+ rte_eth_devices[enic->port_id].
1617+ data->rx_mbuf_alloc_failed++;
1618+ break;
1619+ }
1620+
1621+ /* A packet error means descriptor and data are untrusted */
1622+ packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
1623+
1624+ /* Get the mbuf to return and replace with one just allocated */
1625+ rxmb = rq->mbuf_ring[rx_id];
1626+ rq->mbuf_ring[rx_id] = nmb;
1627+
1628+ /* Increment cqd, rqd, mbuf_table index */
1629+ rx_id++;
1630+ if (unlikely(rx_id == rq->ring.desc_count)) {
1631+ rx_id = 0;
1632+ cq->last_color = cq->last_color ? 0 : 1;
1633+ }
1634+
1635+ /* Prefetch next mbuf & desc while processing current one */
1636+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1637+ rte_enic_prefetch(cqd_ptr);
1638+ rte_enic_prefetch(rq->mbuf_ring[rx_id]);
1639+ rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
1640+ + rx_id);
1641+
1642+ /* Push descriptor for newly allocated mbuf */
1643+ dma_addr = (dma_addr_t)(nmb->buf_physaddr
1644+ + RTE_PKTMBUF_HEADROOM);
1645+ rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
1646+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
1647+ - RTE_PKTMBUF_HEADROOM);
1648+
1649+ /* Fill in the rest of the mbuf */
1650+ rxmb->data_off = RTE_PKTMBUF_HEADROOM;
1651+ rxmb->nb_segs = 1;
1652+ rxmb->next = NULL;
1653+ rxmb->port = enic->port_id;
1654+ if (!packet_error) {
1655+ rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
1656+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
1657+ enic_cq_rx_to_pkt_flags(&cqd, rxmb);
1658+ } else {
1659+ rxmb->pkt_len = 0;
1660+ rxmb->packet_type = 0;
1661+ rxmb->ol_flags = 0;
1662+ }
1663+ rxmb->data_len = rxmb->pkt_len;
1664+
1665+ /* prefetch mbuf data for caller */
1666+ rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
1667+ RTE_PKTMBUF_HEADROOM));
1668+
1669+ /* store the mbuf address into the next entry of the array */
1670+ rx_pkts[nb_rx++] = rxmb;
1671+ }
1672+
1673+ nb_hold += nb_rx;
1674+ cq->to_clean = rx_id;
1675+
1676+ if (nb_hold > rq->rx_free_thresh) {
1677+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
1678+ rq->posted_index, nb_hold);
1679+ nb_hold = 0;
1680+ rte_mb();
1681+ iowrite32(rq->posted_index, &rq->ctrl->posted_index);
1682+ }
1683+
1684+ rq->rx_nb_hold = nb_hold;
1685+
1686+ return nb_rx;
1687+}
1688+
1689+static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
1690+{
1691+ struct vnic_wq_buf *buf;
1692+ struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
1693+ unsigned int nb_to_free, nb_free = 0, i;
1694+ struct rte_mempool *pool;
1695+ unsigned int tail_idx;
1696+ unsigned int desc_count = wq->ring.desc_count;
1697+
1698+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
1699+ + 1;
1700+ tail_idx = wq->tail_idx;
1701+ buf = &wq->bufs[tail_idx];
1702+ pool = ((struct rte_mbuf *)buf->mb)->pool;
1703+ for (i = 0; i < nb_to_free; i++) {
1704+ buf = &wq->bufs[tail_idx];
1705+ m = (struct rte_mbuf *)(buf->mb);
1706+ if (likely(m->pool == pool)) {
1707+ ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
1708+ free[nb_free++] = m;
1709+ } else {
1710+ rte_mempool_put_bulk(pool, (void *)free, nb_free);
1711+ free[0] = m;
1712+ nb_free = 1;
1713+ pool = m->pool;
1714+ }
1715+ tail_idx = enic_ring_incr(desc_count, tail_idx);
1716+ buf->mb = NULL;
1717+ }
1718+
1719+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
1720+
1721+ wq->tail_idx = tail_idx;
1722+ wq->ring.desc_avail += nb_to_free;
1723+}
1724+
1725+unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
1726+{
1727+ u16 completed_index;
1728+
1729+ completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
1730+
1731+ if (wq->last_completed_index != completed_index) {
1732+ enic_free_wq_bufs(wq, completed_index);
1733+ wq->last_completed_index = completed_index;
1734+ }
1735+ return 0;
1736+}
1737+
1738+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1739+ uint16_t nb_pkts)
1740+{
1741+ uint16_t index;
1742+ unsigned int pkt_len, data_len;
1743+ unsigned int nb_segs;
1744+ struct rte_mbuf *tx_pkt;
1745+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
1746+ struct enic *enic = vnic_dev_priv(wq->vdev);
1747+ unsigned short vlan_id;
1748+ unsigned short ol_flags;
1749+ unsigned int wq_desc_avail;
1750+ int head_idx;
1751+ struct vnic_wq_buf *buf;
1752+ unsigned int hw_ip_cksum_enabled;
1753+ unsigned int desc_count;
1754+ struct wq_enet_desc *descs, *desc_p, desc_tmp;
1755+ uint16_t mss;
1756+ uint8_t vlan_tag_insert;
1757+ uint8_t eop;
1758+ uint64_t bus_addr;
1759+
1760+ enic_cleanup_wq(enic, wq);
1761+ wq_desc_avail = vnic_wq_desc_avail(wq);
1762+ head_idx = wq->head_idx;
1763+ desc_count = wq->ring.desc_count;
1764+
1765+ nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
1766+
1767+ hw_ip_cksum_enabled = enic->hw_ip_checksum;
1768+ for (index = 0; index < nb_pkts; index++) {
1769+ tx_pkt = *tx_pkts++;
1770+ nb_segs = tx_pkt->nb_segs;
1771+ if (nb_segs > wq_desc_avail) {
1772+ if (index > 0)
1773+ goto post;
1774+ goto done;
1775+ }
1776+
1777+ pkt_len = tx_pkt->pkt_len;
1778+ data_len = tx_pkt->data_len;
1779+ vlan_id = tx_pkt->vlan_tci;
1780+ ol_flags = tx_pkt->ol_flags;
1781+
1782+ mss = 0;
1783+ vlan_tag_insert = 0;
1784+ bus_addr = (dma_addr_t)
1785+ (tx_pkt->buf_physaddr + tx_pkt->data_off);
1786+
1787+ descs = (struct wq_enet_desc *)wq->ring.descs;
1788+ desc_p = descs + head_idx;
1789+
1790+ eop = (data_len == pkt_len);
1791+
1792+ if (ol_flags & PKT_TX_VLAN_PKT)
1793+ vlan_tag_insert = 1;
1794+
1795+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_IP_CKSUM))
1796+ mss |= ENIC_CALC_IP_CKSUM;
1797+
1798+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_TCP_UDP_CKSUM))
1799+ mss |= ENIC_CALC_TCP_UDP_CKSUM;
1800+
1801+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
1802+ eop, 0, vlan_tag_insert, vlan_id, 0);
1803+
1804+ *desc_p = desc_tmp;
1805+ buf = &wq->bufs[head_idx];
1806+ buf->mb = (void *)tx_pkt;
1807+ head_idx = enic_ring_incr(desc_count, head_idx);
1808+ wq_desc_avail--;
1809+
1810+ if (!eop) {
1811+ for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
1812+ tx_pkt->next) {
1813+ data_len = tx_pkt->data_len;
1814+
1815+ if (tx_pkt->next == NULL)
1816+ eop = 1;
1817+ desc_p = descs + head_idx;
1818+ bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
1819+ + tx_pkt->data_off);
1820+ wq_enet_desc_enc((struct wq_enet_desc *)
1821+ &desc_tmp, bus_addr, data_len,
1822+ mss, 0, 0, eop, eop, 0,
1823+ vlan_tag_insert, vlan_id, 0);
1824+
1825+ *desc_p = desc_tmp;
1826+ buf = &wq->bufs[head_idx];
1827+ buf->mb = (void *)tx_pkt;
1828+ head_idx = enic_ring_incr(desc_count, head_idx);
1829+ wq_desc_avail--;
1830+ }
1831+ }
1832+ }
1833+ post:
1834+ rte_wmb();
1835+ iowrite32(head_idx, &wq->ctrl->posted_index);
1836+ done:
1837+ wq->ring.desc_avail = wq_desc_avail;
1838+ wq->head_idx = head_idx;
1839+
1840+ return index;
1841+}
1842--
Damjan Marion19414de2016-06-07 12:46:55 +020018432.7.4
John Loacc22d72016-04-27 16:43:44 -04001844