blob: 59fe092b4b0f10be3200d84acf2cd9ca21310f45 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/types.h>
33#include <linux/scatterlist.h>
34
35#include "qib_verbs.h"
36
37#define BAD_DMA_ADDRESS ((u64) 0)
38
39/*
40 * The following functions implement driver specific replacements
41 * for the ib_dma_*() functions.
42 *
43 * These functions return kernel virtual addresses instead of
44 * device bus addresses since the driver uses the CPU to copy
45 * data instead of using hardware DMA.
46 */
47
48static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
49{
50 return dma_addr == BAD_DMA_ADDRESS;
51}
52
53static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
54 size_t size, enum dma_data_direction direction)
55{
56 BUG_ON(!valid_dma_direction(direction));
57 return (u64) cpu_addr;
58}
59
60static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
61 enum dma_data_direction direction)
62{
63 BUG_ON(!valid_dma_direction(direction));
64}
65
66static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
67 unsigned long offset, size_t size,
68 enum dma_data_direction direction)
69{
70 u64 addr;
71
72 BUG_ON(!valid_dma_direction(direction));
73
74 if (offset + size > PAGE_SIZE) {
75 addr = BAD_DMA_ADDRESS;
76 goto done;
77 }
78
79 addr = (u64) page_address(page);
80 if (addr)
81 addr += offset;
82 /* TODO: handle highmem pages */
83
84done:
85 return addr;
86}
87
88static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
89 enum dma_data_direction direction)
90{
91 BUG_ON(!valid_dma_direction(direction));
92}
93
94static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
95 int nents, enum dma_data_direction direction)
96{
97 struct scatterlist *sg;
98 u64 addr;
99 int i;
100 int ret = nents;
101
102 BUG_ON(!valid_dma_direction(direction));
103
104 for_each_sg(sgl, sg, nents, i) {
105 addr = (u64) page_address(sg_page(sg));
106 /* TODO: handle highmem pages */
107 if (!addr) {
108 ret = 0;
109 break;
110 }
111 sg->dma_address = addr + sg->offset;
112#ifdef CONFIG_NEED_SG_DMA_LENGTH
113 sg->dma_length = sg->length;
114#endif
115 }
116 return ret;
117}
118
119static void qib_unmap_sg(struct ib_device *dev,
120 struct scatterlist *sg, int nents,
121 enum dma_data_direction direction)
122{
123 BUG_ON(!valid_dma_direction(direction));
124}
125
126static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
127 size_t size, enum dma_data_direction dir)
128{
129}
130
131static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
132 size_t size,
133 enum dma_data_direction dir)
134{
135}
136
137static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
138 u64 *dma_handle, gfp_t flag)
139{
140 struct page *p;
141 void *addr = NULL;
142
143 p = alloc_pages(flag, get_order(size));
144 if (p)
145 addr = page_address(p);
146 if (dma_handle)
147 *dma_handle = (u64) addr;
148 return addr;
149}
150
151static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
152 void *cpu_addr, u64 dma_handle)
153{
154 free_pages((unsigned long) cpu_addr, get_order(size));
155}
156
157struct ib_dma_mapping_ops qib_dma_mapping_ops = {
158 .mapping_error = qib_mapping_error,
159 .map_single = qib_dma_map_single,
160 .unmap_single = qib_dma_unmap_single,
161 .map_page = qib_dma_map_page,
162 .unmap_page = qib_dma_unmap_page,
163 .map_sg = qib_map_sg,
164 .unmap_sg = qib_unmap_sg,
165 .sync_single_for_cpu = qib_sync_single_for_cpu,
166 .sync_single_for_device = qib_sync_single_for_device,
167 .alloc_coherent = qib_dma_alloc_coherent,
168 .free_coherent = qib_dma_free_coherent
169};