2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/vmalloc.h>
26 #include <linux/swiotlb.h>
28 #include <asm/cacheflush.h>
30 struct dma_map_ops
*dma_ops
;
31 EXPORT_SYMBOL(dma_ops
);
33 static void *__dma_alloc_coherent(struct device
*dev
, size_t size
,
34 dma_addr_t
*dma_handle
, gfp_t flags
,
35 struct dma_attrs
*attrs
)
38 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
42 if (IS_ENABLED(CONFIG_ZONE_DMA
) &&
43 dev
->coherent_dma_mask
<= DMA_BIT_MASK(32))
45 if (IS_ENABLED(CONFIG_DMA_CMA
)) {
48 size
= PAGE_ALIGN(size
);
49 page
= dma_alloc_from_contiguous(dev
, size
>> PAGE_SHIFT
,
54 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
55 return page_address(page
);
57 return swiotlb_alloc_coherent(dev
, size
, dma_handle
, flags
);
61 static void __dma_free_coherent(struct device
*dev
, size_t size
,
62 void *vaddr
, dma_addr_t dma_handle
,
63 struct dma_attrs
*attrs
)
66 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
70 if (IS_ENABLED(CONFIG_DMA_CMA
)) {
71 phys_addr_t paddr
= dma_to_phys(dev
, dma_handle
);
73 dma_release_from_contiguous(dev
,
77 swiotlb_free_coherent(dev
, size
, vaddr
, dma_handle
);
81 static void *__dma_alloc_noncoherent(struct device
*dev
, size_t size
,
82 dma_addr_t
*dma_handle
, gfp_t flags
,
83 struct dma_attrs
*attrs
)
85 struct page
*page
, **map
;
86 void *ptr
, *coherent_ptr
;
89 size
= PAGE_ALIGN(size
);
90 order
= get_order(size
);
92 ptr
= __dma_alloc_coherent(dev
, size
, dma_handle
, flags
, attrs
);
95 map
= kmalloc(sizeof(struct page
*) << order
,
96 flags
& ~(GFP_DMA
| GFP_DMA32
));
100 /* remove any dirty cache lines on the kernel alias */
101 __dma_flush_range(ptr
, ptr
+ size
);
103 /* create a coherent mapping */
104 page
= virt_to_page(ptr
);
105 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
107 coherent_ptr
= vmap(map
, size
>> PAGE_SHIFT
, VM_MAP
,
108 pgprot_dmacoherent(pgprot_default
));
116 __dma_free_coherent(dev
, size
, ptr
, *dma_handle
, attrs
);
122 static void __dma_free_noncoherent(struct device
*dev
, size_t size
,
123 void *vaddr
, dma_addr_t dma_handle
,
124 struct dma_attrs
*attrs
)
126 void *swiotlb_addr
= phys_to_virt(dma_to_phys(dev
, dma_handle
));
129 __dma_free_coherent(dev
, size
, swiotlb_addr
, dma_handle
, attrs
);
132 static dma_addr_t
__swiotlb_map_page(struct device
*dev
, struct page
*page
,
133 unsigned long offset
, size_t size
,
134 enum dma_data_direction dir
,
135 struct dma_attrs
*attrs
)
139 dev_addr
= swiotlb_map_page(dev
, page
, offset
, size
, dir
, attrs
);
140 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
146 static void __swiotlb_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
147 size_t size
, enum dma_data_direction dir
,
148 struct dma_attrs
*attrs
)
150 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
151 swiotlb_unmap_page(dev
, dev_addr
, size
, dir
, attrs
);
154 static int __swiotlb_map_sg_attrs(struct device
*dev
, struct scatterlist
*sgl
,
155 int nelems
, enum dma_data_direction dir
,
156 struct dma_attrs
*attrs
)
158 struct scatterlist
*sg
;
161 ret
= swiotlb_map_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
162 for_each_sg(sgl
, sg
, ret
, i
)
163 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
169 static void __swiotlb_unmap_sg_attrs(struct device
*dev
,
170 struct scatterlist
*sgl
, int nelems
,
171 enum dma_data_direction dir
,
172 struct dma_attrs
*attrs
)
174 struct scatterlist
*sg
;
177 for_each_sg(sgl
, sg
, nelems
, i
)
178 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
180 swiotlb_unmap_sg_attrs(dev
, sgl
, nelems
, dir
, attrs
);
183 static void __swiotlb_sync_single_for_cpu(struct device
*dev
,
184 dma_addr_t dev_addr
, size_t size
,
185 enum dma_data_direction dir
)
187 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
188 swiotlb_sync_single_for_cpu(dev
, dev_addr
, size
, dir
);
191 static void __swiotlb_sync_single_for_device(struct device
*dev
,
192 dma_addr_t dev_addr
, size_t size
,
193 enum dma_data_direction dir
)
195 swiotlb_sync_single_for_device(dev
, dev_addr
, size
, dir
);
196 __dma_map_area(phys_to_virt(dma_to_phys(dev
, dev_addr
)), size
, dir
);
199 static void __swiotlb_sync_sg_for_cpu(struct device
*dev
,
200 struct scatterlist
*sgl
, int nelems
,
201 enum dma_data_direction dir
)
203 struct scatterlist
*sg
;
206 for_each_sg(sgl
, sg
, nelems
, i
)
207 __dma_unmap_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
209 swiotlb_sync_sg_for_cpu(dev
, sgl
, nelems
, dir
);
212 static void __swiotlb_sync_sg_for_device(struct device
*dev
,
213 struct scatterlist
*sgl
, int nelems
,
214 enum dma_data_direction dir
)
216 struct scatterlist
*sg
;
219 swiotlb_sync_sg_for_device(dev
, sgl
, nelems
, dir
);
220 for_each_sg(sgl
, sg
, nelems
, i
)
221 __dma_map_area(phys_to_virt(dma_to_phys(dev
, sg
->dma_address
)),
225 struct dma_map_ops noncoherent_swiotlb_dma_ops
= {
226 .alloc
= __dma_alloc_noncoherent
,
227 .free
= __dma_free_noncoherent
,
228 .map_page
= __swiotlb_map_page
,
229 .unmap_page
= __swiotlb_unmap_page
,
230 .map_sg
= __swiotlb_map_sg_attrs
,
231 .unmap_sg
= __swiotlb_unmap_sg_attrs
,
232 .sync_single_for_cpu
= __swiotlb_sync_single_for_cpu
,
233 .sync_single_for_device
= __swiotlb_sync_single_for_device
,
234 .sync_sg_for_cpu
= __swiotlb_sync_sg_for_cpu
,
235 .sync_sg_for_device
= __swiotlb_sync_sg_for_device
,
236 .dma_supported
= swiotlb_dma_supported
,
237 .mapping_error
= swiotlb_dma_mapping_error
,
239 EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops
);
241 struct dma_map_ops coherent_swiotlb_dma_ops
= {
242 .alloc
= __dma_alloc_coherent
,
243 .free
= __dma_free_coherent
,
244 .map_page
= swiotlb_map_page
,
245 .unmap_page
= swiotlb_unmap_page
,
246 .map_sg
= swiotlb_map_sg_attrs
,
247 .unmap_sg
= swiotlb_unmap_sg_attrs
,
248 .sync_single_for_cpu
= swiotlb_sync_single_for_cpu
,
249 .sync_single_for_device
= swiotlb_sync_single_for_device
,
250 .sync_sg_for_cpu
= swiotlb_sync_sg_for_cpu
,
251 .sync_sg_for_device
= swiotlb_sync_sg_for_device
,
252 .dma_supported
= swiotlb_dma_supported
,
253 .mapping_error
= swiotlb_dma_mapping_error
,
255 EXPORT_SYMBOL(coherent_swiotlb_dma_ops
);
257 void __init
arm64_swiotlb_init(void)
259 dma_ops
= &noncoherent_swiotlb_dma_ops
;
263 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
265 static int __init
dma_debug_do_init(void)
267 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
270 fs_initcall(dma_debug_do_init
);