Merge branch 'linus' into tracing/mmiotrace
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / arch / x86 / mm / ioremap.c
1 /*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #ifdef CONFIG_X86_64
26
27 unsigned long __phys_addr(unsigned long x)
28 {
29 if (x >= __START_KERNEL_map)
30 return x - __START_KERNEL_map + phys_base;
31 return x - PAGE_OFFSET;
32 }
33 EXPORT_SYMBOL(__phys_addr);
34
35 static inline int phys_addr_valid(unsigned long addr)
36 {
37 return addr < (1UL << boot_cpu_data.x86_phys_bits);
38 }
39
40 #else
41
42 static inline int phys_addr_valid(unsigned long addr)
43 {
44 return 1;
45 }
46
47 #endif
48
49 int page_is_ram(unsigned long pagenr)
50 {
51 resource_size_t addr, end;
52 int i;
53
54 /*
55 * A special case is the first 4Kb of memory;
56 * This is a BIOS owned area, not kernel ram, but generally
57 * not listed as such in the E820 table.
58 */
59 if (pagenr == 0)
60 return 0;
61
62 /*
63 * Second special case: Some BIOSen report the PC BIOS
64 * area (640->1Mb) as ram even though it is not.
65 */
66 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67 pagenr < (BIOS_END >> PAGE_SHIFT))
68 return 0;
69
70 for (i = 0; i < e820.nr_map; i++) {
71 /*
72 * Not usable memory:
73 */
74 if (e820.map[i].type != E820_RAM)
75 continue;
76 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
77 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
78
79
80 if ((pagenr >= addr) && (pagenr < end))
81 return 1;
82 }
83 return 0;
84 }
85
86 /*
87 * Fix up the linear direct mapping of the kernel to avoid cache attribute
88 * conflicts.
89 */
90 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
91 unsigned long prot_val)
92 {
93 unsigned long nrpages = size >> PAGE_SHIFT;
94 int err;
95
96 switch (prot_val) {
97 case _PAGE_CACHE_UC:
98 default:
99 err = _set_memory_uc(vaddr, nrpages);
100 break;
101 case _PAGE_CACHE_WC:
102 err = _set_memory_wc(vaddr, nrpages);
103 break;
104 case _PAGE_CACHE_WB:
105 err = _set_memory_wb(vaddr, nrpages);
106 break;
107 }
108
109 return err;
110 }
111
112 /*
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
115 * directly.
116 *
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
120 */
121 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
122 unsigned long size, unsigned long prot_val, void *caller)
123 {
124 unsigned long pfn, offset, vaddr;
125 resource_size_t last_addr;
126 const resource_size_t unaligned_phys_addr = phys_addr;
127 const unsigned long unaligned_size = size;
128 struct vm_struct *area;
129 unsigned long new_prot_val;
130 pgprot_t prot;
131 int retval;
132 void __iomem *ret_addr;
133
134 /* Don't allow wraparound or zero size */
135 last_addr = phys_addr + size - 1;
136 if (!size || last_addr < phys_addr)
137 return NULL;
138
139 if (!phys_addr_valid(phys_addr)) {
140 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
141 (unsigned long long)phys_addr);
142 WARN_ON_ONCE(1);
143 return NULL;
144 }
145
146 /*
147 * Don't remap the low PCI/ISA area, it's always mapped..
148 */
149 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
150 return (__force void __iomem *)phys_to_virt(phys_addr);
151
152 /*
153 * Don't allow anybody to remap normal RAM that we're using..
154 */
155 for (pfn = phys_addr >> PAGE_SHIFT;
156 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
157 pfn++) {
158
159 int is_ram = page_is_ram(pfn);
160
161 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
162 return NULL;
163 WARN_ON_ONCE(is_ram);
164 }
165
166 /*
167 * Mappings have to be page-aligned
168 */
169 offset = phys_addr & ~PAGE_MASK;
170 phys_addr &= PAGE_MASK;
171 size = PAGE_ALIGN(last_addr+1) - phys_addr;
172
173 retval = reserve_memtype(phys_addr, phys_addr + size,
174 prot_val, &new_prot_val);
175 if (retval) {
176 pr_debug("Warning: reserve_memtype returned %d\n", retval);
177 return NULL;
178 }
179
180 if (prot_val != new_prot_val) {
181 /*
182 * Do not fallback to certain memory types with certain
183 * requested type:
184 * - request is uc-, return cannot be write-back
185 * - request is uc-, return cannot be write-combine
186 * - request is write-combine, return cannot be write-back
187 */
188 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
189 (new_prot_val == _PAGE_CACHE_WB ||
190 new_prot_val == _PAGE_CACHE_WC)) ||
191 (prot_val == _PAGE_CACHE_WC &&
192 new_prot_val == _PAGE_CACHE_WB)) {
193 pr_debug(
194 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
195 (unsigned long long)phys_addr,
196 (unsigned long long)(phys_addr + size),
197 prot_val, new_prot_val);
198 free_memtype(phys_addr, phys_addr + size);
199 return NULL;
200 }
201 prot_val = new_prot_val;
202 }
203
204 switch (prot_val) {
205 case _PAGE_CACHE_UC:
206 default:
207 prot = PAGE_KERNEL_NOCACHE;
208 break;
209 case _PAGE_CACHE_UC_MINUS:
210 prot = PAGE_KERNEL_UC_MINUS;
211 break;
212 case _PAGE_CACHE_WC:
213 prot = PAGE_KERNEL_WC;
214 break;
215 case _PAGE_CACHE_WB:
216 prot = PAGE_KERNEL;
217 break;
218 }
219
220 /*
221 * Ok, go for it..
222 */
223 area = get_vm_area_caller(size, VM_IOREMAP, caller);
224 if (!area)
225 return NULL;
226 area->phys_addr = phys_addr;
227 vaddr = (unsigned long) area->addr;
228 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
229 free_memtype(phys_addr, phys_addr + size);
230 free_vm_area(area);
231 return NULL;
232 }
233
234 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
235 free_memtype(phys_addr, phys_addr + size);
236 vunmap(area->addr);
237 return NULL;
238 }
239
240 ret_addr = (void __iomem *) (vaddr + offset);
241 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
242
243 return ret_addr;
244 }
245
246 /**
247 * ioremap_nocache - map bus memory into CPU space
248 * @offset: bus address of the memory
249 * @size: size of the resource to map
250 *
251 * ioremap_nocache performs a platform specific sequence of operations to
252 * make bus memory CPU accessible via the readb/readw/readl/writeb/
253 * writew/writel functions and the other mmio helpers. The returned
254 * address is not guaranteed to be usable directly as a virtual
255 * address.
256 *
257 * This version of ioremap ensures that the memory is marked uncachable
258 * on the CPU as well as honouring existing caching rules from things like
259 * the PCI bus. Note that there are other caches and buffers on many
260 * busses. In particular driver authors should read up on PCI writes
261 *
262 * It's useful if some control registers are in such an area and
263 * write combining or read caching is not desirable:
264 *
265 * Must be freed with iounmap.
266 */
267 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
268 {
269 /*
270 * Ideally, this should be:
271 * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
272 *
273 * Till we fix all X drivers to use ioremap_wc(), we will use
274 * UC MINUS.
275 */
276 unsigned long val = _PAGE_CACHE_UC_MINUS;
277
278 return __ioremap_caller(phys_addr, size, val,
279 __builtin_return_address(0));
280 }
281 EXPORT_SYMBOL(ioremap_nocache);
282
283 /**
284 * ioremap_wc - map memory into CPU space write combined
285 * @offset: bus address of the memory
286 * @size: size of the resource to map
287 *
288 * This version of ioremap ensures that the memory is marked write combining.
289 * Write combining allows faster writes to some hardware devices.
290 *
291 * Must be freed with iounmap.
292 */
293 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
294 {
295 if (pat_wc_enabled)
296 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
297 __builtin_return_address(0));
298 else
299 return ioremap_nocache(phys_addr, size);
300 }
301 EXPORT_SYMBOL(ioremap_wc);
302
303 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
304 {
305 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
306 __builtin_return_address(0));
307 }
308 EXPORT_SYMBOL(ioremap_cache);
309
310 /**
311 * iounmap - Free a IO remapping
312 * @addr: virtual address from ioremap_*
313 *
314 * Caller must ensure there is only one unmapping for the same pointer.
315 */
316 void iounmap(volatile void __iomem *addr)
317 {
318 struct vm_struct *p, *o;
319
320 if ((void __force *)addr <= high_memory)
321 return;
322
323 /*
324 * __ioremap special-cases the PCI/ISA range by not instantiating a
325 * vm_area and by simply returning an address into the kernel mapping
326 * of ISA space. So handle that here.
327 */
328 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
329 addr < phys_to_virt(ISA_END_ADDRESS))
330 return;
331
332 addr = (volatile void __iomem *)
333 (PAGE_MASK & (unsigned long __force)addr);
334
335 mmiotrace_iounmap(addr);
336
337 /* Use the vm area unlocked, assuming the caller
338 ensures there isn't another iounmap for the same address
339 in parallel. Reuse of the virtual address is prevented by
340 leaving it in the global lists until we're done with it.
341 cpa takes care of the direct mappings. */
342 read_lock(&vmlist_lock);
343 for (p = vmlist; p; p = p->next) {
344 if (p->addr == addr)
345 break;
346 }
347 read_unlock(&vmlist_lock);
348
349 if (!p) {
350 printk(KERN_ERR "iounmap: bad address %p\n", addr);
351 dump_stack();
352 return;
353 }
354
355 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
356
357 /* Finally remove it */
358 o = remove_vm_area((void *)addr);
359 BUG_ON(p != o || o == NULL);
360 kfree(p);
361 }
362 EXPORT_SYMBOL(iounmap);
363
364 /*
365 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
366 * access
367 */
368 void *xlate_dev_mem_ptr(unsigned long phys)
369 {
370 void *addr;
371 unsigned long start = phys & PAGE_MASK;
372
373 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
374 if (page_is_ram(start >> PAGE_SHIFT))
375 return __va(phys);
376
377 addr = (void *)ioremap(start, PAGE_SIZE);
378 if (addr)
379 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
380
381 return addr;
382 }
383
384 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
385 {
386 if (page_is_ram(phys >> PAGE_SHIFT))
387 return;
388
389 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
390 return;
391 }
392
393 #ifdef CONFIG_X86_32
394
395 int __initdata early_ioremap_debug;
396
397 static int __init early_ioremap_debug_setup(char *str)
398 {
399 early_ioremap_debug = 1;
400
401 return 0;
402 }
403 early_param("early_ioremap_debug", early_ioremap_debug_setup);
404
405 static __initdata int after_paging_init;
406 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
407 __section(.bss.page_aligned);
408
409 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
410 {
411 /* Don't assume we're using swapper_pg_dir at this point */
412 pgd_t *base = __va(read_cr3());
413 pgd_t *pgd = &base[pgd_index(addr)];
414 pud_t *pud = pud_offset(pgd, addr);
415 pmd_t *pmd = pmd_offset(pud, addr);
416
417 return pmd;
418 }
419
420 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
421 {
422 return &bm_pte[pte_index(addr)];
423 }
424
425 void __init early_ioremap_init(void)
426 {
427 pmd_t *pmd;
428
429 if (early_ioremap_debug)
430 printk(KERN_INFO "early_ioremap_init()\n");
431
432 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
433 memset(bm_pte, 0, sizeof(bm_pte));
434 pmd_populate_kernel(&init_mm, pmd, bm_pte);
435
436 /*
437 * The boot-ioremap range spans multiple pmds, for which
438 * we are not prepared:
439 */
440 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
441 WARN_ON(1);
442 printk(KERN_WARNING "pmd %p != %p\n",
443 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
444 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
445 fix_to_virt(FIX_BTMAP_BEGIN));
446 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
447 fix_to_virt(FIX_BTMAP_END));
448
449 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
450 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
451 FIX_BTMAP_BEGIN);
452 }
453 }
454
455 void __init early_ioremap_clear(void)
456 {
457 pmd_t *pmd;
458
459 if (early_ioremap_debug)
460 printk(KERN_INFO "early_ioremap_clear()\n");
461
462 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
463 pmd_clear(pmd);
464 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
465 __flush_tlb_all();
466 }
467
468 void __init early_ioremap_reset(void)
469 {
470 enum fixed_addresses idx;
471 unsigned long addr, phys;
472 pte_t *pte;
473
474 after_paging_init = 1;
475 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
476 addr = fix_to_virt(idx);
477 pte = early_ioremap_pte(addr);
478 if (pte_present(*pte)) {
479 phys = pte_val(*pte) & PAGE_MASK;
480 set_fixmap(idx, phys);
481 }
482 }
483 }
484
485 static void __init __early_set_fixmap(enum fixed_addresses idx,
486 unsigned long phys, pgprot_t flags)
487 {
488 unsigned long addr = __fix_to_virt(idx);
489 pte_t *pte;
490
491 if (idx >= __end_of_fixed_addresses) {
492 BUG();
493 return;
494 }
495 pte = early_ioremap_pte(addr);
496 if (pgprot_val(flags))
497 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
498 else
499 pte_clear(NULL, addr, pte);
500 __flush_tlb_one(addr);
501 }
502
503 static inline void __init early_set_fixmap(enum fixed_addresses idx,
504 unsigned long phys)
505 {
506 if (after_paging_init)
507 set_fixmap(idx, phys);
508 else
509 __early_set_fixmap(idx, phys, PAGE_KERNEL);
510 }
511
512 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
513 {
514 if (after_paging_init)
515 clear_fixmap(idx);
516 else
517 __early_set_fixmap(idx, 0, __pgprot(0));
518 }
519
520
521 int __initdata early_ioremap_nested;
522
523 static int __init check_early_ioremap_leak(void)
524 {
525 if (!early_ioremap_nested)
526 return 0;
527
528 printk(KERN_WARNING
529 "Debug warning: early ioremap leak of %d areas detected.\n",
530 early_ioremap_nested);
531 printk(KERN_WARNING
532 "please boot with early_ioremap_debug and report the dmesg.\n");
533 WARN_ON(1);
534
535 return 1;
536 }
537 late_initcall(check_early_ioremap_leak);
538
539 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
540 {
541 unsigned long offset, last_addr;
542 unsigned int nrpages, nesting;
543 enum fixed_addresses idx0, idx;
544
545 WARN_ON(system_state != SYSTEM_BOOTING);
546
547 nesting = early_ioremap_nested;
548 if (early_ioremap_debug) {
549 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
550 phys_addr, size, nesting);
551 dump_stack();
552 }
553
554 /* Don't allow wraparound or zero size */
555 last_addr = phys_addr + size - 1;
556 if (!size || last_addr < phys_addr) {
557 WARN_ON(1);
558 return NULL;
559 }
560
561 if (nesting >= FIX_BTMAPS_NESTING) {
562 WARN_ON(1);
563 return NULL;
564 }
565 early_ioremap_nested++;
566 /*
567 * Mappings have to be page-aligned
568 */
569 offset = phys_addr & ~PAGE_MASK;
570 phys_addr &= PAGE_MASK;
571 size = PAGE_ALIGN(last_addr) - phys_addr;
572
573 /*
574 * Mappings have to fit in the FIX_BTMAP area.
575 */
576 nrpages = size >> PAGE_SHIFT;
577 if (nrpages > NR_FIX_BTMAPS) {
578 WARN_ON(1);
579 return NULL;
580 }
581
582 /*
583 * Ok, go for it..
584 */
585 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
586 idx = idx0;
587 while (nrpages > 0) {
588 early_set_fixmap(idx, phys_addr);
589 phys_addr += PAGE_SIZE;
590 --idx;
591 --nrpages;
592 }
593 if (early_ioremap_debug)
594 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
595
596 return (void *) (offset + fix_to_virt(idx0));
597 }
598
599 void __init early_iounmap(void *addr, unsigned long size)
600 {
601 unsigned long virt_addr;
602 unsigned long offset;
603 unsigned int nrpages;
604 enum fixed_addresses idx;
605 int nesting;
606
607 nesting = --early_ioremap_nested;
608 if (WARN_ON(nesting < 0))
609 return;
610
611 if (early_ioremap_debug) {
612 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
613 size, nesting);
614 dump_stack();
615 }
616
617 virt_addr = (unsigned long)addr;
618 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
619 WARN_ON(1);
620 return;
621 }
622 offset = virt_addr & ~PAGE_MASK;
623 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
624
625 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
626 while (nrpages > 0) {
627 early_clear_fixmap(idx);
628 --idx;
629 --nrpages;
630 }
631 }
632
633 void __this_fixmap_does_not_exist(void)
634 {
635 WARN_ON(1);
636 }
637
638 #endif /* CONFIG_X86_32 */