blob: b3bc87fe3764e397e4a9ce19518866cb97530771 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include "xgbe.h"
118#include "xgbe-common.h"
119
120static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
121
122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
123 struct xgbe_ring *ring)
124{
125 struct xgbe_ring_data *rdata;
126 unsigned int i;
127
128 if (!ring)
129 return;
130
131 if (ring->rdata) {
132 for (i = 0; i < ring->rdesc_count; i++) {
133 rdata = XGBE_GET_DESC_DATA(ring, i);
134 xgbe_unmap_rdata(pdata, rdata);
135 }
136
137 kfree(ring->rdata);
138 ring->rdata = NULL;
139 }
140
141 if (ring->rx_hdr_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
144 put_page(ring->rx_hdr_pa.pages);
145
146 ring->rx_hdr_pa.pages = NULL;
147 ring->rx_hdr_pa.pages_len = 0;
148 ring->rx_hdr_pa.pages_offset = 0;
149 ring->rx_hdr_pa.pages_dma = 0;
150 }
151
152 if (ring->rx_buf_pa.pages) {
153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
155 put_page(ring->rx_buf_pa.pages);
156
157 ring->rx_buf_pa.pages = NULL;
158 ring->rx_buf_pa.pages_len = 0;
159 ring->rx_buf_pa.pages_offset = 0;
160 ring->rx_buf_pa.pages_dma = 0;
161 }
162
163 if (ring->rdesc) {
164 dma_free_coherent(pdata->dev,
165 (sizeof(struct xgbe_ring_desc) *
166 ring->rdesc_count),
167 ring->rdesc, ring->rdesc_dma);
168 ring->rdesc = NULL;
169 }
170}
171
172static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
173{
174 struct xgbe_channel *channel;
175 unsigned int i;
176
177 DBGPR("-->xgbe_free_ring_resources\n");
178
179 channel = pdata->channel;
180 for (i = 0; i < pdata->channel_count; i++, channel++) {
181 xgbe_free_ring(pdata, channel->tx_ring);
182 xgbe_free_ring(pdata, channel->rx_ring);
183 }
184
185 DBGPR("<--xgbe_free_ring_resources\n");
186}
187
188static int xgbe_init_ring(struct xgbe_prv_data *pdata,
189 struct xgbe_ring *ring, unsigned int rdesc_count)
190{
191 DBGPR("-->xgbe_init_ring\n");
192
193 if (!ring)
194 return 0;
195
196 /* Descriptors */
197 ring->rdesc_count = rdesc_count;
198 ring->rdesc = dma_alloc_coherent(pdata->dev,
199 (sizeof(struct xgbe_ring_desc) *
200 rdesc_count), &ring->rdesc_dma,
201 GFP_KERNEL);
202 if (!ring->rdesc)
203 return -ENOMEM;
204
205 /* Descriptor information */
206 ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
207 GFP_KERNEL);
208 if (!ring->rdata)
209 return -ENOMEM;
210
211 netif_dbg(pdata, drv, pdata->netdev,
212 "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
213 ring->rdesc, &ring->rdesc_dma, ring->rdata);
214
215 DBGPR("<--xgbe_init_ring\n");
216
217 return 0;
218}
219
220static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
221{
222 struct xgbe_channel *channel;
223 unsigned int i;
224 int ret;
225
226 DBGPR("-->xgbe_alloc_ring_resources\n");
227
228 channel = pdata->channel;
229 for (i = 0; i < pdata->channel_count; i++, channel++) {
230 netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
231 channel->name);
232
233 ret = xgbe_init_ring(pdata, channel->tx_ring,
234 pdata->tx_desc_count);
235 if (ret) {
236 netdev_alert(pdata->netdev,
237 "error initializing Tx ring\n");
238 goto err_ring;
239 }
240
241 netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
242 channel->name);
243
244 ret = xgbe_init_ring(pdata, channel->rx_ring,
245 pdata->rx_desc_count);
246 if (ret) {
247 netdev_alert(pdata->netdev,
248 "error initializing Rx ring\n");
249 goto err_ring;
250 }
251 }
252
253 DBGPR("<--xgbe_alloc_ring_resources\n");
254
255 return 0;
256
257err_ring:
258 xgbe_free_ring_resources(pdata);
259
260 return ret;
261}
262
263static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
264 struct xgbe_page_alloc *pa, gfp_t gfp, int order)
265{
266 struct page *pages = NULL;
267 dma_addr_t pages_dma;
268 int ret;
269
270 /* Try to obtain pages, decreasing order if necessary */
271 gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
272 while (order >= 0) {
273 pages = alloc_pages(gfp, order);
274 if (pages)
275 break;
276
277 order--;
278 }
279 if (!pages)
280 return -ENOMEM;
281
282 /* Map the pages */
283 pages_dma = dma_map_page(pdata->dev, pages, 0,
284 PAGE_SIZE << order, DMA_FROM_DEVICE);
285 ret = dma_mapping_error(pdata->dev, pages_dma);
286 if (ret) {
287 put_page(pages);
288 return ret;
289 }
290
291 pa->pages = pages;
292 pa->pages_len = PAGE_SIZE << order;
293 pa->pages_offset = 0;
294 pa->pages_dma = pages_dma;
295
296 return 0;
297}
298
299static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
300 struct xgbe_page_alloc *pa,
301 unsigned int len)
302{
303 get_page(pa->pages);
304 bd->pa = *pa;
305
306 bd->dma_base = pa->pages_dma;
307 bd->dma_off = pa->pages_offset;
308 bd->dma_len = len;
309
310 pa->pages_offset += len;
311 if ((pa->pages_offset + len) > pa->pages_len) {
312 /* This data descriptor is responsible for unmapping page(s) */
313 bd->pa_unmap = *pa;
314
315 /* Get a new allocation next time */
316 pa->pages = NULL;
317 pa->pages_len = 0;
318 pa->pages_offset = 0;
319 pa->pages_dma = 0;
320 }
321}
322
323static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
324 struct xgbe_ring *ring,
325 struct xgbe_ring_data *rdata)
326{
327 int order, ret;
328
329 if (!ring->rx_hdr_pa.pages) {
330 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
331 if (ret)
332 return ret;
333 }
334
335 if (!ring->rx_buf_pa.pages) {
336 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
337 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
338 order);
339 if (ret)
340 return ret;
341 }
342
343 /* Set up the header page info */
344 xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
345 XGBE_SKB_ALLOC_SIZE);
346
347 /* Set up the buffer page info */
348 xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
349 pdata->rx_buf_size);
350
351 return 0;
352}
353
354static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
355{
356 struct xgbe_hw_if *hw_if = &pdata->hw_if;
357 struct xgbe_channel *channel;
358 struct xgbe_ring *ring;
359 struct xgbe_ring_data *rdata;
360 struct xgbe_ring_desc *rdesc;
361 dma_addr_t rdesc_dma;
362 unsigned int i, j;
363
364 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
365
366 channel = pdata->channel;
367 for (i = 0; i < pdata->channel_count; i++, channel++) {
368 ring = channel->tx_ring;
369 if (!ring)
370 break;
371
372 rdesc = ring->rdesc;
373 rdesc_dma = ring->rdesc_dma;
374
375 for (j = 0; j < ring->rdesc_count; j++) {
376 rdata = XGBE_GET_DESC_DATA(ring, j);
377
378 rdata->rdesc = rdesc;
379 rdata->rdesc_dma = rdesc_dma;
380
381 rdesc++;
382 rdesc_dma += sizeof(struct xgbe_ring_desc);
383 }
384
385 ring->cur = 0;
386 ring->dirty = 0;
387 memset(&ring->tx, 0, sizeof(ring->tx));
388
389 hw_if->tx_desc_init(channel);
390 }
391
392 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
393}
394
395static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
396{
397 struct xgbe_hw_if *hw_if = &pdata->hw_if;
398 struct xgbe_channel *channel;
399 struct xgbe_ring *ring;
400 struct xgbe_ring_desc *rdesc;
401 struct xgbe_ring_data *rdata;
402 dma_addr_t rdesc_dma;
403 unsigned int i, j;
404
405 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
406
407 channel = pdata->channel;
408 for (i = 0; i < pdata->channel_count; i++, channel++) {
409 ring = channel->rx_ring;
410 if (!ring)
411 break;
412
413 rdesc = ring->rdesc;
414 rdesc_dma = ring->rdesc_dma;
415
416 for (j = 0; j < ring->rdesc_count; j++) {
417 rdata = XGBE_GET_DESC_DATA(ring, j);
418
419 rdata->rdesc = rdesc;
420 rdata->rdesc_dma = rdesc_dma;
421
422 if (xgbe_map_rx_buffer(pdata, ring, rdata))
423 break;
424
425 rdesc++;
426 rdesc_dma += sizeof(struct xgbe_ring_desc);
427 }
428
429 ring->cur = 0;
430 ring->dirty = 0;
431
432 hw_if->rx_desc_init(channel);
433 }
434
435 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
436}
437
438static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
439 struct xgbe_ring_data *rdata)
440{
441 if (rdata->skb_dma) {
442 if (rdata->mapped_as_page) {
443 dma_unmap_page(pdata->dev, rdata->skb_dma,
444 rdata->skb_dma_len, DMA_TO_DEVICE);
445 } else {
446 dma_unmap_single(pdata->dev, rdata->skb_dma,
447 rdata->skb_dma_len, DMA_TO_DEVICE);
448 }
449 rdata->skb_dma = 0;
450 rdata->skb_dma_len = 0;
451 }
452
453 if (rdata->skb) {
454 dev_kfree_skb_any(rdata->skb);
455 rdata->skb = NULL;
456 }
457
458 if (rdata->rx.hdr.pa.pages)
459 put_page(rdata->rx.hdr.pa.pages);
460
461 if (rdata->rx.hdr.pa_unmap.pages) {
462 dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
463 rdata->rx.hdr.pa_unmap.pages_len,
464 DMA_FROM_DEVICE);
465 put_page(rdata->rx.hdr.pa_unmap.pages);
466 }
467
468 if (rdata->rx.buf.pa.pages)
469 put_page(rdata->rx.buf.pa.pages);
470
471 if (rdata->rx.buf.pa_unmap.pages) {
472 dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
473 rdata->rx.buf.pa_unmap.pages_len,
474 DMA_FROM_DEVICE);
475 put_page(rdata->rx.buf.pa_unmap.pages);
476 }
477
478 memset(&rdata->tx, 0, sizeof(rdata->tx));
479 memset(&rdata->rx, 0, sizeof(rdata->rx));
480
481 rdata->mapped_as_page = 0;
482
483 if (rdata->state_saved) {
484 rdata->state_saved = 0;
485 rdata->state.skb = NULL;
486 rdata->state.len = 0;
487 rdata->state.error = 0;
488 }
489}
490
491static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
492{
493 struct xgbe_prv_data *pdata = channel->pdata;
494 struct xgbe_ring *ring = channel->tx_ring;
495 struct xgbe_ring_data *rdata;
496 struct xgbe_packet_data *packet;
497 struct skb_frag_struct *frag;
498 dma_addr_t skb_dma;
499 unsigned int start_index, cur_index;
500 unsigned int offset, tso, vlan, datalen, len;
501 unsigned int i;
502
503 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
504
505 offset = 0;
506 start_index = ring->cur;
507 cur_index = ring->cur;
508
509 packet = &ring->packet_data;
510 packet->rdesc_count = 0;
511 packet->length = 0;
512
513 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
514 TSO_ENABLE);
515 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
516 VLAN_CTAG);
517
518 /* Save space for a context descriptor if needed */
519 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
520 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
521 cur_index++;
522 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
523
524 if (tso) {
525 /* Map the TSO header */
526 skb_dma = dma_map_single(pdata->dev, skb->data,
527 packet->header_len, DMA_TO_DEVICE);
528 if (dma_mapping_error(pdata->dev, skb_dma)) {
529 netdev_alert(pdata->netdev, "dma_map_single failed\n");
530 goto err_out;
531 }
532 rdata->skb_dma = skb_dma;
533 rdata->skb_dma_len = packet->header_len;
534 netif_dbg(pdata, tx_queued, pdata->netdev,
535 "skb header: index=%u, dma=%pad, len=%u\n",
536 cur_index, &skb_dma, packet->header_len);
537
538 offset = packet->header_len;
539
540 packet->length += packet->header_len;
541
542 cur_index++;
543 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
544 }
545
546 /* Map the (remainder of the) packet */
547 for (datalen = skb_headlen(skb) - offset; datalen; ) {
548 len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
549
550 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
551 DMA_TO_DEVICE);
552 if (dma_mapping_error(pdata->dev, skb_dma)) {
553 netdev_alert(pdata->netdev, "dma_map_single failed\n");
554 goto err_out;
555 }
556 rdata->skb_dma = skb_dma;
557 rdata->skb_dma_len = len;
558 netif_dbg(pdata, tx_queued, pdata->netdev,
559 "skb data: index=%u, dma=%pad, len=%u\n",
560 cur_index, &skb_dma, len);
561
562 datalen -= len;
563 offset += len;
564
565 packet->length += len;
566
567 cur_index++;
568 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
569 }
570
571 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
572 netif_dbg(pdata, tx_queued, pdata->netdev,
573 "mapping frag %u\n", i);
574
575 frag = &skb_shinfo(skb)->frags[i];
576 offset = 0;
577
578 for (datalen = skb_frag_size(frag); datalen; ) {
579 len = min_t(unsigned int, datalen,
580 XGBE_TX_MAX_BUF_SIZE);
581
582 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
583 len, DMA_TO_DEVICE);
584 if (dma_mapping_error(pdata->dev, skb_dma)) {
585 netdev_alert(pdata->netdev,
586 "skb_frag_dma_map failed\n");
587 goto err_out;
588 }
589 rdata->skb_dma = skb_dma;
590 rdata->skb_dma_len = len;
591 rdata->mapped_as_page = 1;
592 netif_dbg(pdata, tx_queued, pdata->netdev,
593 "skb frag: index=%u, dma=%pad, len=%u\n",
594 cur_index, &skb_dma, len);
595
596 datalen -= len;
597 offset += len;
598
599 packet->length += len;
600
601 cur_index++;
602 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
603 }
604 }
605
606 /* Save the skb address in the last entry. We always have some data
607 * that has been mapped so rdata is always advanced past the last
608 * piece of mapped data - use the entry pointed to by cur_index - 1.
609 */
610 rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
611 rdata->skb = skb;
612
613 /* Save the number of descriptor entries used */
614 packet->rdesc_count = cur_index - start_index;
615
616 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
617
618 return packet->rdesc_count;
619
620err_out:
621 while (start_index < cur_index) {
622 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
623 xgbe_unmap_rdata(pdata, rdata);
624 }
625
626 DBGPR("<--xgbe_map_tx_skb: count=0\n");
627
628 return 0;
629}
630
631void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
632{
633 DBGPR("-->xgbe_init_function_ptrs_desc\n");
634
635 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
636 desc_if->free_ring_resources = xgbe_free_ring_resources;
637 desc_if->map_tx_skb = xgbe_map_tx_skb;
638 desc_if->map_rx_buffer = xgbe_map_rx_buffer;
639 desc_if->unmap_rdata = xgbe_unmap_rdata;
640 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
641 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
642
643 DBGPR("<--xgbe_init_function_ptrs_desc\n");
644}