Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / xen / swiotlb-xen.c
CommitLineData
b097186f
KRW
1/*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
36#include <linux/bootmem.h>
37#include <linux/dma-mapping.h>
63c9744b 38#include <linux/export.h>
b097186f
KRW
39#include <xen/swiotlb-xen.h>
40#include <xen/page.h>
41#include <xen/xen-ops.h>
f4b2f07b 42#include <xen/hvc-console.h>
b097186f
KRW
43/*
44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
46 * API.
47 */
48
49static char *xen_io_tlb_start, *xen_io_tlb_end;
50static unsigned long xen_io_tlb_nslabs;
51/*
52 * Quick lookup value of the bus address of the IOTLB.
53 */
54
b8b0f559 55static u64 start_dma_addr;
b097186f
KRW
56
57static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
58{
6eab04a8 59 return phys_to_machine(XPADDR(paddr)).maddr;
b097186f
KRW
60}
61
62static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
63{
64 return machine_to_phys(XMADDR(baddr)).paddr;
65}
66
67static dma_addr_t xen_virt_to_bus(void *address)
68{
69 return xen_phys_to_bus(virt_to_phys(address));
70}
71
72static int check_pages_physically_contiguous(unsigned long pfn,
73 unsigned int offset,
74 size_t length)
75{
76 unsigned long next_mfn;
77 int i;
78 int nr_pages;
79
80 next_mfn = pfn_to_mfn(pfn);
81 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
82
83 for (i = 1; i < nr_pages; i++) {
84 if (pfn_to_mfn(++pfn) != ++next_mfn)
85 return 0;
86 }
87 return 1;
88}
89
90static int range_straddles_page_boundary(phys_addr_t p, size_t size)
91{
92 unsigned long pfn = PFN_DOWN(p);
93 unsigned int offset = p & ~PAGE_MASK;
94
95 if (offset + size <= PAGE_SIZE)
96 return 0;
97 if (check_pages_physically_contiguous(pfn, offset, size))
98 return 0;
99 return 1;
100}
101
102static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
103{
104 unsigned long mfn = PFN_DOWN(dma_addr);
105 unsigned long pfn = mfn_to_local_pfn(mfn);
106 phys_addr_t paddr;
107
108 /* If the address is outside our domain, it CAN
109 * have the same virtual address as another address
110 * in our domain. Therefore _only_ check address within our domain.
111 */
112 if (pfn_valid(pfn)) {
113 paddr = PFN_PHYS(pfn);
114 return paddr >= virt_to_phys(xen_io_tlb_start) &&
115 paddr < virt_to_phys(xen_io_tlb_end);
116 }
117 return 0;
118}
119
120static int max_dma_bits = 32;
121
122static int
123xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
124{
125 int i, rc;
126 int dma_bits;
127
128 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
129
130 i = 0;
131 do {
132 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
133
134 do {
135 rc = xen_create_contiguous_region(
136 (unsigned long)buf + (i << IO_TLB_SHIFT),
137 get_order(slabs << IO_TLB_SHIFT),
138 dma_bits);
139 } while (rc && dma_bits++ < max_dma_bits);
140 if (rc)
141 return rc;
142
143 i += slabs;
144 } while (i < nslabs);
145 return 0;
146}
1cef36a5
KRW
147static unsigned long xen_set_nslabs(unsigned long nr_tbl)
148{
149 if (!nr_tbl) {
150 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
151 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
152 } else
153 xen_io_tlb_nslabs = nr_tbl;
b097186f 154
1cef36a5
KRW
155 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
156}
b097186f 157
5bab7864
KRW
158enum xen_swiotlb_err {
159 XEN_SWIOTLB_UNKNOWN = 0,
160 XEN_SWIOTLB_ENOMEM,
161 XEN_SWIOTLB_EFIXUP
162};
163
164static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
165{
166 switch (err) {
167 case XEN_SWIOTLB_ENOMEM:
168 return "Cannot allocate Xen-SWIOTLB buffer\n";
169 case XEN_SWIOTLB_EFIXUP:
170 return "Failed to get contiguous memory for DMA from Xen!\n"\
171 "You either: don't have the permissions, do not have"\
172 " enough free memory under 4GB, or the hypervisor memory"\
173 " is too fragmented!";
174 default:
175 break;
176 }
177 return "";
178}
b8277600 179int __ref xen_swiotlb_init(int verbose, bool early)
b097186f 180{
b8277600 181 unsigned long bytes, order;
f4b2f07b 182 int rc = -ENOMEM;
5bab7864 183 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
f4b2f07b 184 unsigned int repeat = 3;
5f98ecdb 185
1cef36a5 186 xen_io_tlb_nslabs = swiotlb_nr_tbl();
f4b2f07b 187retry:
1cef36a5 188 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
b8277600 189 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
b097186f
KRW
190 /*
191 * Get IO TLB memory from any location.
192 */
b8277600
KRW
193 if (early)
194 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
195 else {
196#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
197#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
198 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
199 xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
200 if (xen_io_tlb_start)
201 break;
202 order--;
203 }
204 if (order != get_order(bytes)) {
205 pr_warn("Warning: only able to allocate %ld MB "
206 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
207 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
208 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
209 }
210 }
f4b2f07b 211 if (!xen_io_tlb_start) {
5bab7864 212 m_ret = XEN_SWIOTLB_ENOMEM;
f4b2f07b
KRW
213 goto error;
214 }
b097186f
KRW
215 xen_io_tlb_end = xen_io_tlb_start + bytes;
216 /*
217 * And replace that memory with pages under 4GB.
218 */
219 rc = xen_swiotlb_fixup(xen_io_tlb_start,
220 bytes,
221 xen_io_tlb_nslabs);
f4b2f07b 222 if (rc) {
b8277600
KRW
223 if (early)
224 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
225 else {
226 free_pages((unsigned long)xen_io_tlb_start, order);
227 xen_io_tlb_start = NULL;
228 }
5bab7864 229 m_ret = XEN_SWIOTLB_EFIXUP;
b097186f 230 goto error;
f4b2f07b 231 }
b097186f 232 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
c468bdee 233 if (early) {
ac2cbab2
YL
234 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
235 verbose))
236 panic("Cannot allocate SWIOTLB buffer");
c468bdee
KRW
237 rc = 0;
238 } else
b8277600
KRW
239 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
240 return rc;
b097186f 241error:
f4b2f07b
KRW
242 if (repeat--) {
243 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
244 (xen_io_tlb_nslabs >> 1));
245 printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
246 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
247 goto retry;
248 }
b8277600
KRW
249 pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
250 if (early)
251 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
252 else
253 free_pages((unsigned long)xen_io_tlb_start, order);
254 return rc;
b097186f 255}
b097186f
KRW
256void *
257xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
baa676fc
AP
258 dma_addr_t *dma_handle, gfp_t flags,
259 struct dma_attrs *attrs)
b097186f
KRW
260{
261 void *ret;
262 int order = get_order(size);
263 u64 dma_mask = DMA_BIT_MASK(32);
264 unsigned long vstart;
6810df88
KRW
265 phys_addr_t phys;
266 dma_addr_t dev_addr;
b097186f
KRW
267
268 /*
269 * Ignore region specifiers - the kernel's ideas of
270 * pseudo-phys memory layout has nothing to do with the
271 * machine physical layout. We can't allocate highmem
272 * because we can't return a pointer to it.
273 */
274 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
275
276 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
277 return ret;
278
279 vstart = __get_free_pages(flags, order);
280 ret = (void *)vstart;
281
6810df88
KRW
282 if (!ret)
283 return ret;
284
b097186f 285 if (hwdev && hwdev->coherent_dma_mask)
b5031ed1 286 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
b097186f 287
6810df88
KRW
288 phys = virt_to_phys(ret);
289 dev_addr = xen_phys_to_bus(phys);
290 if (((dev_addr + size - 1 <= dma_mask)) &&
291 !range_straddles_page_boundary(phys, size))
292 *dma_handle = dev_addr;
293 else {
b097186f
KRW
294 if (xen_create_contiguous_region(vstart, order,
295 fls64(dma_mask)) != 0) {
296 free_pages(vstart, order);
297 return NULL;
298 }
b097186f
KRW
299 *dma_handle = virt_to_machine(ret).maddr;
300 }
6810df88 301 memset(ret, 0, size);
b097186f
KRW
302 return ret;
303}
304EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
305
306void
307xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
baa676fc 308 dma_addr_t dev_addr, struct dma_attrs *attrs)
b097186f
KRW
309{
310 int order = get_order(size);
6810df88
KRW
311 phys_addr_t phys;
312 u64 dma_mask = DMA_BIT_MASK(32);
b097186f
KRW
313
314 if (dma_release_from_coherent(hwdev, order, vaddr))
315 return;
316
6810df88
KRW
317 if (hwdev && hwdev->coherent_dma_mask)
318 dma_mask = hwdev->coherent_dma_mask;
319
320 phys = virt_to_phys(vaddr);
321
322 if (((dev_addr + size - 1 > dma_mask)) ||
323 range_straddles_page_boundary(phys, size))
324 xen_destroy_contiguous_region((unsigned long)vaddr, order);
325
b097186f
KRW
326 free_pages((unsigned long)vaddr, order);
327}
328EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
329
330
331/*
332 * Map a single buffer of the indicated size for DMA in streaming mode. The
333 * physical address to use is returned.
334 *
335 * Once the device is given the dma address, the device owns this memory until
336 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
337 */
338dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
339 unsigned long offset, size_t size,
340 enum dma_data_direction dir,
341 struct dma_attrs *attrs)
342{
e05ed4d1 343 phys_addr_t map, phys = page_to_phys(page) + offset;
b097186f 344 dma_addr_t dev_addr = xen_phys_to_bus(phys);
b097186f
KRW
345
346 BUG_ON(dir == DMA_NONE);
347 /*
348 * If the address happens to be in the device's DMA window,
349 * we can safely return the device addr and not worry about bounce
350 * buffering it.
351 */
352 if (dma_capable(dev, dev_addr, size) &&
353 !range_straddles_page_boundary(phys, size) && !swiotlb_force)
354 return dev_addr;
355
356 /*
357 * Oh well, have to allocate and map a bounce buffer.
358 */
359 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
e05ed4d1 360 if (map == SWIOTLB_MAP_ERROR)
b097186f
KRW
361 return DMA_ERROR_CODE;
362
e05ed4d1 363 dev_addr = xen_phys_to_bus(map);
b097186f
KRW
364
365 /*
366 * Ensure that the address returned is DMA'ble
367 */
ab2a47bd 368 if (!dma_capable(dev, dev_addr, size)) {
61ca08c3 369 swiotlb_tbl_unmap_single(dev, map, size, dir);
ab2a47bd
KRW
370 dev_addr = 0;
371 }
b097186f
KRW
372 return dev_addr;
373}
374EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
375
376/*
377 * Unmap a single streaming mode DMA translation. The dma_addr and size must
378 * match what was provided for in a previous xen_swiotlb_map_page call. All
379 * other usages are undefined.
380 *
381 * After this call, reads by the cpu to the buffer are guaranteed to see
382 * whatever the device wrote there.
383 */
384static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
385 size_t size, enum dma_data_direction dir)
386{
387 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
388
389 BUG_ON(dir == DMA_NONE);
390
391 /* NOTE: We use dev_addr here, not paddr! */
392 if (is_xen_swiotlb_buffer(dev_addr)) {
61ca08c3 393 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
b097186f
KRW
394 return;
395 }
396
397 if (dir != DMA_FROM_DEVICE)
398 return;
399
400 /*
401 * phys_to_virt doesn't work with hihgmem page but we could
402 * call dma_mark_clean() with hihgmem page here. However, we
403 * are fine since dma_mark_clean() is null on POWERPC. We can
404 * make dma_mark_clean() take a physical address if necessary.
405 */
406 dma_mark_clean(phys_to_virt(paddr), size);
407}
408
409void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
410 size_t size, enum dma_data_direction dir,
411 struct dma_attrs *attrs)
412{
413 xen_unmap_single(hwdev, dev_addr, size, dir);
414}
415EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
416
417/*
418 * Make physical memory consistent for a single streaming mode DMA translation
419 * after a transfer.
420 *
421 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
422 * using the cpu, yet do not wish to teardown the dma mapping, you must
423 * call this function before doing so. At the next point you give the dma
424 * address back to the card, you must first perform a
425 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
426 */
427static void
428xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
429 size_t size, enum dma_data_direction dir,
430 enum dma_sync_target target)
431{
432 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
433
434 BUG_ON(dir == DMA_NONE);
435
436 /* NOTE: We use dev_addr here, not paddr! */
437 if (is_xen_swiotlb_buffer(dev_addr)) {
fbfda893 438 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
b097186f
KRW
439 return;
440 }
441
442 if (dir != DMA_FROM_DEVICE)
443 return;
444
445 dma_mark_clean(phys_to_virt(paddr), size);
446}
447
448void
449xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
450 size_t size, enum dma_data_direction dir)
451{
452 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
453}
454EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
455
456void
457xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
458 size_t size, enum dma_data_direction dir)
459{
460 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
461}
462EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
463
464/*
465 * Map a set of buffers described by scatterlist in streaming mode for DMA.
466 * This is the scatter-gather version of the above xen_swiotlb_map_page
467 * interface. Here the scatter gather list elements are each tagged with the
468 * appropriate dma address and length. They are obtained via
469 * sg_dma_{address,length}(SG).
470 *
471 * NOTE: An implementation may be able to use a smaller number of
472 * DMA address/length pairs than there are SG table elements.
473 * (for example via virtual mapping capabilities)
474 * The routine returns the number of addr/length pairs actually
475 * used, at most nents.
476 *
477 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
478 * same here.
479 */
480int
481xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
482 int nelems, enum dma_data_direction dir,
483 struct dma_attrs *attrs)
484{
485 struct scatterlist *sg;
486 int i;
487
488 BUG_ON(dir == DMA_NONE);
489
490 for_each_sg(sgl, sg, nelems, i) {
491 phys_addr_t paddr = sg_phys(sg);
492 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
493
494 if (swiotlb_force ||
495 !dma_capable(hwdev, dev_addr, sg->length) ||
496 range_straddles_page_boundary(paddr, sg->length)) {
e05ed4d1
AD
497 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
498 start_dma_addr,
499 sg_phys(sg),
500 sg->length,
501 dir);
502 if (map == SWIOTLB_MAP_ERROR) {
b097186f
KRW
503 /* Don't panic here, we expect map_sg users
504 to do proper error handling. */
505 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
506 attrs);
507 sgl[0].dma_length = 0;
508 return DMA_ERROR_CODE;
509 }
e05ed4d1 510 sg->dma_address = xen_phys_to_bus(map);
b097186f
KRW
511 } else
512 sg->dma_address = dev_addr;
513 sg->dma_length = sg->length;
514 }
515 return nelems;
516}
517EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
518
b097186f
KRW
519/*
520 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
521 * concerning calls here are the same as for swiotlb_unmap_page() above.
522 */
523void
524xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
525 int nelems, enum dma_data_direction dir,
526 struct dma_attrs *attrs)
527{
528 struct scatterlist *sg;
529 int i;
530
531 BUG_ON(dir == DMA_NONE);
532
533 for_each_sg(sgl, sg, nelems, i)
534 xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
535
536}
537EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
538
b097186f
KRW
539/*
540 * Make physical memory consistent for a set of streaming mode DMA translations
541 * after a transfer.
542 *
543 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
544 * and usage.
545 */
546static void
547xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
548 int nelems, enum dma_data_direction dir,
549 enum dma_sync_target target)
550{
551 struct scatterlist *sg;
552 int i;
553
554 for_each_sg(sgl, sg, nelems, i)
555 xen_swiotlb_sync_single(hwdev, sg->dma_address,
556 sg->dma_length, dir, target);
557}
558
559void
560xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
561 int nelems, enum dma_data_direction dir)
562{
563 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
564}
565EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
566
567void
568xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
569 int nelems, enum dma_data_direction dir)
570{
571 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
572}
573EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
574
575int
576xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
577{
578 return !dma_addr;
579}
580EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
581
582/*
583 * Return whether the given device DMA address mask can be supported
584 * properly. For example, if your device can only drive the low 24-bits
585 * during bus mastering, then you would pass 0x00ffffff as the mask to
586 * this function.
587 */
588int
589xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
590{
591 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
592}
593EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);