x86: dma-mapping: fix GFP_ATOMIC macro usage
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
2118d0c5 2#include <linux/dma-debug.h>
cb5867a5 3#include <linux/dmar.h>
69c60c88 4#include <linux/export.h>
116890d5 5#include <linux/bootmem.h>
5a0e3ad6 6#include <linux/gfp.h>
bca5c096 7#include <linux/pci.h>
acde31dc 8#include <linux/kmemleak.h>
cb5867a5 9
116890d5
GC
10#include <asm/proto.h>
11#include <asm/dma.h>
46a7fa27 12#include <asm/iommu.h>
1d9b16d1 13#include <asm/gart.h>
cb5867a5 14#include <asm/calgary.h>
b4941a9a 15#include <asm/x86_init.h>
ee1f284f 16#include <asm/iommu_table.h>
459121c9 17
3b15e581
FY
18static int forbid_dac __read_mostly;
19
a3b28ee1 20struct dma_map_ops *dma_ops = &nommu_dma_ops;
85c246ee
GC
21EXPORT_SYMBOL(dma_ops);
22
b4cdc430 23static int iommu_sac_force __read_mostly;
8e0c3797 24
f9c258de
GC
25#ifdef CONFIG_IOMMU_DEBUG
26int panic_on_overflow __read_mostly = 1;
27int force_iommu __read_mostly = 1;
28#else
29int panic_on_overflow __read_mostly = 0;
30int force_iommu __read_mostly = 0;
31#endif
32
fae9a0d8
GC
33int iommu_merge __read_mostly = 0;
34
35int no_iommu __read_mostly;
36/* Set this to 1 if there is a HW IOMMU in the system */
37int iommu_detected __read_mostly = 0;
38
ac0101d3
JR
39/*
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
e3be785f 41 * If this variable is 1, IOMMU implementations do no DMA translation for
ac0101d3 42 * devices and allow every device to access to whole physical memory. This is
fb637f3c 43 * useful if a user wants to use an IOMMU only for KVM device assignment to
ac0101d3
JR
44 * guests and not for driver dma translation.
45 */
46int iommu_pass_through __read_mostly;
aed5d5f4 47
ee1f284f
KRW
48extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49
eb647138 50/* Dummy device used for NULL arguments (normally ISA). */
6c505ce3 51struct device x86_dma_fallback_dev = {
1a927133 52 .init_name = "fallback device",
eb647138 53 .coherent_dma_mask = ISA_DMA_BIT_MASK,
6c505ce3 54 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 55};
6c505ce3 56EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 57
2118d0c5 58/* Number of entries preallocated for DMA-API debugging */
73b664ce 59#define PREALLOC_DMA_DEBUG_ENTRIES 65536
2118d0c5 60
459121c9
GC
61int dma_set_mask(struct device *dev, u64 mask)
62{
63 if (!dev->dma_mask || !dma_supported(dev, mask))
64 return -EIO;
65
66 *dev->dma_mask = mask;
67
68 return 0;
69}
70EXPORT_SYMBOL(dma_set_mask);
71
116890d5
GC
72void __init pci_iommu_alloc(void)
73{
ee1f284f
KRW
74 struct iommu_table_entry *p;
75
ee1f284f
KRW
76 sort_iommu_table(__iommu_table, __iommu_table_end);
77 check_iommu_entries(__iommu_table, __iommu_table_end);
116890d5 78
ee1f284f
KRW
79 for (p = __iommu_table; p < __iommu_table_end; p++) {
80 if (p && p->detect && p->detect() > 0) {
81 p->flags |= IOMMU_DETECTED;
82 if (p->early_init)
83 p->early_init();
84 if (p->flags & IOMMU_FINISH_IF_DETECTED)
85 break;
86 }
87 }
116890d5 88}
9f6ac577 89void *dma_generic_alloc_coherent(struct device *dev, size_t size,
baa676fc
AP
90 dma_addr_t *dma_addr, gfp_t flag,
91 struct dma_attrs *attrs)
9f6ac577
FT
92{
93 unsigned long dma_mask;
c080e26e 94 struct page *page;
0a2b9a6e 95 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
9f6ac577
FT
96 dma_addr_t addr;
97
98 dma_mask = dma_alloc_coherent_mask(dev, flag);
99
100 flag |= __GFP_ZERO;
101again:
c080e26e 102 page = NULL;
a9710605
MS
103 /* CMA can be used only in the context which permits sleeping */
104 if (flag & __GFP_WAIT)
0a2b9a6e 105 page = dma_alloc_from_contiguous(dev, count, get_order(size));
a9710605 106 /* fallback */
0a2b9a6e
MS
107 if (!page)
108 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
9f6ac577
FT
109 if (!page)
110 return NULL;
111
112 addr = page_to_phys(page);
a4c2baa6 113 if (addr + size > dma_mask) {
9f6ac577
FT
114 __free_pages(page, get_order(size));
115
284901a9 116 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
9f6ac577
FT
117 flag = (flag & ~GFP_DMA32) | GFP_DMA;
118 goto again;
119 }
120
121 return NULL;
122 }
123
124 *dma_addr = addr;
125 return page_address(page);
126}
127
0a2b9a6e
MS
128void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
129 dma_addr_t dma_addr, struct dma_attrs *attrs)
130{
131 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
132 struct page *page = virt_to_page(vaddr);
133
134 if (!dma_release_from_contiguous(dev, page, count))
135 free_pages((unsigned long)vaddr, get_order(size));
136}
137
fae9a0d8 138/*
395cf969
PB
139 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
140 * parameter documentation.
fae9a0d8
GC
141 */
142static __init int iommu_setup(char *p)
143{
144 iommu_merge = 1;
145
146 if (!p)
147 return -EINVAL;
148
149 while (*p) {
150 if (!strncmp(p, "off", 3))
151 no_iommu = 1;
152 /* gart_parse_options has more force support */
153 if (!strncmp(p, "force", 5))
154 force_iommu = 1;
155 if (!strncmp(p, "noforce", 7)) {
156 iommu_merge = 0;
157 force_iommu = 0;
158 }
159
160 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
161 iommu_merge = 1;
162 force_iommu = 1;
163 }
164 if (!strncmp(p, "panic", 5))
165 panic_on_overflow = 1;
166 if (!strncmp(p, "nopanic", 7))
167 panic_on_overflow = 0;
168 if (!strncmp(p, "merge", 5)) {
169 iommu_merge = 1;
170 force_iommu = 1;
171 }
172 if (!strncmp(p, "nomerge", 7))
173 iommu_merge = 0;
174 if (!strncmp(p, "forcesac", 8))
175 iommu_sac_force = 1;
176 if (!strncmp(p, "allowdac", 8))
177 forbid_dac = 0;
178 if (!strncmp(p, "nodac", 5))
2ae8bb75 179 forbid_dac = 1;
fae9a0d8
GC
180 if (!strncmp(p, "usedac", 6)) {
181 forbid_dac = -1;
182 return 1;
183 }
184#ifdef CONFIG_SWIOTLB
185 if (!strncmp(p, "soft", 4))
186 swiotlb = 1;
3238c0c4 187#endif
80286879 188 if (!strncmp(p, "pt", 2))
4ed0d3e6 189 iommu_pass_through = 1;
fae9a0d8 190
fae9a0d8 191 gart_parse_options(p);
fae9a0d8
GC
192
193#ifdef CONFIG_CALGARY_IOMMU
194 if (!strncmp(p, "calgary", 7))
195 use_calgary = 1;
196#endif /* CONFIG_CALGARY_IOMMU */
197
198 p += strcspn(p, ",");
199 if (*p == ',')
200 ++p;
201 }
202 return 0;
203}
204early_param("iommu", iommu_setup);
205
8e0c3797
GC
206int dma_supported(struct device *dev, u64 mask)
207{
160c1d8e 208 struct dma_map_ops *ops = get_dma_ops(dev);
8d8bb39b 209
8e0c3797
GC
210#ifdef CONFIG_PCI
211 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 212 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
213 return 0;
214 }
215#endif
216
8d8bb39b
FT
217 if (ops->dma_supported)
218 return ops->dma_supported(dev, mask);
8e0c3797
GC
219
220 /* Copied from i386. Doesn't make much sense, because it will
221 only work for pci_alloc_coherent.
222 The caller just has to use GFP_DMA in this case. */
2f4f27d4 223 if (mask < DMA_BIT_MASK(24))
8e0c3797
GC
224 return 0;
225
226 /* Tell the device to use SAC when IOMMU force is on. This
227 allows the driver to use cheaper accesses in some cases.
228
229 Problem with this is that if we overflow the IOMMU area and
230 return DAC as fallback address the device may not handle it
231 correctly.
232
233 As a special case some controllers have a 39bit address
234 mode that is as efficient as 32bit (aic79xx). Don't force
235 SAC for these. Assume all masks <= 40 bits are of this
236 type. Normally this doesn't make any difference, but gives
237 more gentle handling of IOMMU overflow. */
50cf156a 238 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
fc3a8828 239 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
240 return 0;
241 }
242
243 return 1;
244}
245EXPORT_SYMBOL(dma_supported);
246
cb5867a5
GC
247static int __init pci_iommu_init(void)
248{
ee1f284f 249 struct iommu_table_entry *p;
2118d0c5
JR
250 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
251
86f31952
JR
252#ifdef CONFIG_PCI
253 dma_debug_add_bus(&pci_bus_type);
254#endif
d07c1be0
FT
255 x86_init.iommu.iommu_init();
256
ee1f284f
KRW
257 for (p = __iommu_table; p < __iommu_table_end; p++) {
258 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
259 p->late_init();
260 }
75f1cdf1 261
cb5867a5
GC
262 return 0;
263}
cb5867a5 264/* Must execute after PCI subsystem */
9a821b23 265rootfs_initcall(pci_iommu_init);
3b15e581
FY
266
267#ifdef CONFIG_PCI
268/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
269
a18e3690 270static void via_no_dac(struct pci_dev *dev)
3b15e581 271{
c484b241 272 if (forbid_dac == 0) {
13bf7576 273 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
3b15e581
FY
274 forbid_dac = 1;
275 }
276}
c484b241
YL
277DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
278 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
3b15e581 279#endif