import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/vmalloc.h>
26 #include <linux/swiotlb.h>
27
28 #include <asm/cacheflush.h>
29
30 struct dma_map_ops *dma_ops;
31 EXPORT_SYMBOL(dma_ops);
32
33 static void *__dma_alloc_coherent(struct device *dev, size_t size,
34 dma_addr_t *dma_handle, gfp_t flags,
35 struct dma_attrs *attrs)
36 {
37 if (dev == NULL) {
38 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
39 return NULL;
40 }
41
42 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
43 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
44 flags |= GFP_DMA;
45 if (IS_ENABLED(CONFIG_DMA_CMA)) {
46 struct page *page;
47
48 size = PAGE_ALIGN(size);
49 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
50 get_order(size));
51 if (!page)
52 return NULL;
53
54 *dma_handle = phys_to_dma(dev, page_to_phys(page));
55 return page_address(page);
56 } else {
57 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
58 }
59 }
60
61 static void __dma_free_coherent(struct device *dev, size_t size,
62 void *vaddr, dma_addr_t dma_handle,
63 struct dma_attrs *attrs)
64 {
65 if (dev == NULL) {
66 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
67 return;
68 }
69
70 if (IS_ENABLED(CONFIG_DMA_CMA)) {
71 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
72
73 dma_release_from_contiguous(dev,
74 phys_to_page(paddr),
75 size >> PAGE_SHIFT);
76 } else {
77 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
78 }
79 }
80
81 static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
82 dma_addr_t *dma_handle, gfp_t flags,
83 struct dma_attrs *attrs)
84 {
85 struct page *page, **map;
86 void *ptr, *coherent_ptr;
87 int order, i;
88
89 size = PAGE_ALIGN(size);
90 order = get_order(size);
91
92 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
93 if (!ptr)
94 goto no_mem;
95 map = kmalloc(sizeof(struct page *) << order,
96 flags & ~(GFP_DMA | GFP_DMA32));
97 if (!map)
98 goto no_map;
99
100 /* remove any dirty cache lines on the kernel alias */
101 __dma_flush_range(ptr, ptr + size);
102
103 /* create a coherent mapping */
104 page = virt_to_page(ptr);
105 for (i = 0; i < (size >> PAGE_SHIFT); i++)
106 map[i] = page + i;
107 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
108 pgprot_dmacoherent(pgprot_default));
109 kfree(map);
110 if (!coherent_ptr)
111 goto no_map;
112
113 return coherent_ptr;
114
115 no_map:
116 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
117 no_mem:
118 *dma_handle = ~0;
119 return NULL;
120 }
121
122 static void __dma_free_noncoherent(struct device *dev, size_t size,
123 void *vaddr, dma_addr_t dma_handle,
124 struct dma_attrs *attrs)
125 {
126 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
127
128 vunmap(vaddr);
129 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
130 }
131
132 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
133 unsigned long offset, size_t size,
134 enum dma_data_direction dir,
135 struct dma_attrs *attrs)
136 {
137 dma_addr_t dev_addr;
138
139 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
140 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
141
142 return dev_addr;
143 }
144
145
146 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
147 size_t size, enum dma_data_direction dir,
148 struct dma_attrs *attrs)
149 {
150 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
151 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
152 }
153
154 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
155 int nelems, enum dma_data_direction dir,
156 struct dma_attrs *attrs)
157 {
158 struct scatterlist *sg;
159 int i, ret;
160
161 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
162 for_each_sg(sgl, sg, ret, i)
163 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
164 sg->length, dir);
165
166 return ret;
167 }
168
169 static void __swiotlb_unmap_sg_attrs(struct device *dev,
170 struct scatterlist *sgl, int nelems,
171 enum dma_data_direction dir,
172 struct dma_attrs *attrs)
173 {
174 struct scatterlist *sg;
175 int i;
176
177 for_each_sg(sgl, sg, nelems, i)
178 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
179 sg->length, dir);
180 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
181 }
182
183 static void __swiotlb_sync_single_for_cpu(struct device *dev,
184 dma_addr_t dev_addr, size_t size,
185 enum dma_data_direction dir)
186 {
187 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
188 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
189 }
190
191 static void __swiotlb_sync_single_for_device(struct device *dev,
192 dma_addr_t dev_addr, size_t size,
193 enum dma_data_direction dir)
194 {
195 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
196 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
197 }
198
199 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
200 struct scatterlist *sgl, int nelems,
201 enum dma_data_direction dir)
202 {
203 struct scatterlist *sg;
204 int i;
205
206 for_each_sg(sgl, sg, nelems, i)
207 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
208 sg->length, dir);
209 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
210 }
211
212 static void __swiotlb_sync_sg_for_device(struct device *dev,
213 struct scatterlist *sgl, int nelems,
214 enum dma_data_direction dir)
215 {
216 struct scatterlist *sg;
217 int i;
218
219 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
220 for_each_sg(sgl, sg, nelems, i)
221 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
222 sg->length, dir);
223 }
224
225 struct dma_map_ops noncoherent_swiotlb_dma_ops = {
226 .alloc = __dma_alloc_noncoherent,
227 .free = __dma_free_noncoherent,
228 .map_page = __swiotlb_map_page,
229 .unmap_page = __swiotlb_unmap_page,
230 .map_sg = __swiotlb_map_sg_attrs,
231 .unmap_sg = __swiotlb_unmap_sg_attrs,
232 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
233 .sync_single_for_device = __swiotlb_sync_single_for_device,
234 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
235 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
236 .dma_supported = swiotlb_dma_supported,
237 .mapping_error = swiotlb_dma_mapping_error,
238 };
239 EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
240
241 struct dma_map_ops coherent_swiotlb_dma_ops = {
242 .alloc = __dma_alloc_coherent,
243 .free = __dma_free_coherent,
244 .map_page = swiotlb_map_page,
245 .unmap_page = swiotlb_unmap_page,
246 .map_sg = swiotlb_map_sg_attrs,
247 .unmap_sg = swiotlb_unmap_sg_attrs,
248 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
249 .sync_single_for_device = swiotlb_sync_single_for_device,
250 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
251 .sync_sg_for_device = swiotlb_sync_sg_for_device,
252 .dma_supported = swiotlb_dma_supported,
253 .mapping_error = swiotlb_dma_mapping_error,
254 };
255 EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
256
257 void __init arm64_swiotlb_init(void)
258 {
259 dma_ops = &noncoherent_swiotlb_dma_ops;
260 swiotlb_init(1);
261 }
262
263 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
264
265 static int __init dma_debug_do_init(void)
266 {
267 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
268 return 0;
269 }
270 fs_initcall(dma_debug_do_init);