Merge branch 'tip/tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sh / mm / ioremap.c
1 /*
2 * arch/sh/mm/ioremap.c
3 *
4 * (C) Copyright 1995 1996 Linus Torvalds
5 * (C) Copyright 2005 - 2010 Paul Mundt
6 *
7 * Re-map IO memory to kernel address space so that we can access it.
8 * This is needed for high PCI addresses that aren't mapped in the
9 * 640k-1MB IO memory area on PC's
10 *
11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details.
14 */
15 #include <linux/vmalloc.h>
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/pci.h>
19 #include <linux/io.h>
20 #include <asm/page.h>
21 #include <asm/pgalloc.h>
22 #include <asm/addrspace.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu.h>
26
27 /*
28 * Remap an arbitrary physical address space into the kernel virtual
29 * address space. Needed when the kernel wants to access high addresses
30 * directly.
31 *
32 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
33 * have to convert them into an offset in a page-aligned mapping, but the
34 * caller shouldn't need to know that small detail.
35 */
36 void __iomem * __init_refok
37 __ioremap_caller(unsigned long phys_addr, unsigned long size,
38 pgprot_t pgprot, void *caller)
39 {
40 struct vm_struct *area;
41 unsigned long offset, last_addr, addr, orig_addr;
42
43 /* Don't allow wraparound or zero size */
44 last_addr = phys_addr + size - 1;
45 if (!size || last_addr < phys_addr)
46 return NULL;
47
48 /*
49 * Mappings have to be page-aligned
50 */
51 offset = phys_addr & ~PAGE_MASK;
52 phys_addr &= PAGE_MASK;
53 size = PAGE_ALIGN(last_addr+1) - phys_addr;
54
55 /*
56 * If we can't yet use the regular approach, go the fixmap route.
57 */
58 if (!mem_init_done)
59 return ioremap_fixed(phys_addr, offset, size, pgprot);
60
61 /*
62 * Ok, go for it..
63 */
64 area = get_vm_area_caller(size, VM_IOREMAP, caller);
65 if (!area)
66 return NULL;
67 area->phys_addr = phys_addr;
68 orig_addr = addr = (unsigned long)area->addr;
69
70 #ifdef CONFIG_PMB
71 /*
72 * First try to remap through the PMB once a valid VMA has been
73 * established. Smaller allocations (or the rest of the size
74 * remaining after a PMB mapping due to the size not being
75 * perfectly aligned on a PMB size boundary) are then mapped
76 * through the UTLB using conventional page tables.
77 *
78 * PMB entries are all pre-faulted.
79 */
80 if (unlikely(phys_addr >= P1SEG)) {
81 unsigned long mapped;
82
83 mapped = pmb_remap(addr, phys_addr, size, pgprot);
84 if (likely(mapped)) {
85 addr += mapped;
86 phys_addr += mapped;
87 size -= mapped;
88 }
89 }
90 #endif
91
92 if (likely(size))
93 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
94 vunmap((void *)orig_addr);
95 return NULL;
96 }
97
98 return (void __iomem *)(offset + (char *)orig_addr);
99 }
100 EXPORT_SYMBOL(__ioremap_caller);
101
102 /*
103 * Simple checks for non-translatable mappings.
104 */
105 static inline int iomapping_nontranslatable(unsigned long offset)
106 {
107 #ifdef CONFIG_29BIT
108 /*
109 * In 29-bit mode this includes the fixed P1/P2 areas, as well as
110 * parts of P3.
111 */
112 if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
113 return 1;
114 #endif
115
116 return 0;
117 }
118
119 void __iounmap(void __iomem *addr)
120 {
121 unsigned long vaddr = (unsigned long __force)addr;
122 struct vm_struct *p;
123
124 /*
125 * Nothing to do if there is no translatable mapping.
126 */
127 if (iomapping_nontranslatable(vaddr))
128 return;
129
130 /*
131 * There's no VMA if it's from an early fixed mapping.
132 */
133 if (iounmap_fixed(addr) == 0)
134 return;
135
136 #ifdef CONFIG_PMB
137 /*
138 * Purge any PMB entries that may have been established for this
139 * mapping, then proceed with conventional VMA teardown.
140 *
141 * XXX: Note that due to the way that remove_vm_area() does
142 * matching of the resultant VMA, we aren't able to fast-forward
143 * the address past the PMB space until the end of the VMA where
144 * the page tables reside. As such, unmap_vm_area() will be
145 * forced to linearly scan over the area until it finds the page
146 * tables where PTEs that need to be unmapped actually reside,
147 * which is far from optimal. Perhaps we need to use a separate
148 * VMA for the PMB mappings?
149 * -- PFM.
150 */
151 pmb_unmap(vaddr);
152 #endif
153
154 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
155 if (!p) {
156 printk(KERN_ERR "%s: bad address %p\n", __func__, addr);
157 return;
158 }
159
160 kfree(p);
161 }
162 EXPORT_SYMBOL(__iounmap);