Merge tag 'renesas-soc-r8a7790-for-v3.10' of git://git.kernel.org/pub/scm/linux/kerne...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / mm / pgtable_64.c
1 /*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
32 #include <linux/mm.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/init.h>
37 #include <linux/bootmem.h>
38 #include <linux/memblock.h>
39 #include <linux/slab.h>
40
41 #include <asm/pgalloc.h>
42 #include <asm/page.h>
43 #include <asm/prom.h>
44 #include <asm/io.h>
45 #include <asm/mmu_context.h>
46 #include <asm/pgtable.h>
47 #include <asm/mmu.h>
48 #include <asm/smp.h>
49 #include <asm/machdep.h>
50 #include <asm/tlb.h>
51 #include <asm/processor.h>
52 #include <asm/cputable.h>
53 #include <asm/sections.h>
54 #include <asm/firmware.h>
55
56 #include "mmu_decl.h"
57
58 /* Some sanity checking */
59 #if TASK_SIZE_USER64 > PGTABLE_RANGE
60 #error TASK_SIZE_USER64 exceeds pagetable range
61 #endif
62
63 #ifdef CONFIG_PPC_STD_MMU_64
64 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
65 #error TASK_SIZE_USER64 exceeds user VSID range
66 #endif
67 #endif
68
69 unsigned long ioremap_bot = IOREMAP_BASE;
70
71 #ifdef CONFIG_PPC_MMU_NOHASH
72 static void *early_alloc_pgtable(unsigned long size)
73 {
74 void *pt;
75
76 if (init_bootmem_done)
77 pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
78 else
79 pt = __va(memblock_alloc_base(size, size,
80 __pa(MAX_DMA_ADDRESS)));
81 memset(pt, 0, size);
82
83 return pt;
84 }
85 #endif /* CONFIG_PPC_MMU_NOHASH */
86
87 /*
88 * map_kernel_page currently only called by __ioremap
89 * map_kernel_page adds an entry to the ioremap page table
90 * and adds an entry to the HPT, possibly bolting it
91 */
92 int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
93 {
94 pgd_t *pgdp;
95 pud_t *pudp;
96 pmd_t *pmdp;
97 pte_t *ptep;
98
99 if (slab_is_available()) {
100 pgdp = pgd_offset_k(ea);
101 pudp = pud_alloc(&init_mm, pgdp, ea);
102 if (!pudp)
103 return -ENOMEM;
104 pmdp = pmd_alloc(&init_mm, pudp, ea);
105 if (!pmdp)
106 return -ENOMEM;
107 ptep = pte_alloc_kernel(pmdp, ea);
108 if (!ptep)
109 return -ENOMEM;
110 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
111 __pgprot(flags)));
112 } else {
113 #ifdef CONFIG_PPC_MMU_NOHASH
114 /* Warning ! This will blow up if bootmem is not initialized
115 * which our ppc64 code is keen to do that, we'll need to
116 * fix it and/or be more careful
117 */
118 pgdp = pgd_offset_k(ea);
119 #ifdef PUD_TABLE_SIZE
120 if (pgd_none(*pgdp)) {
121 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
122 BUG_ON(pudp == NULL);
123 pgd_populate(&init_mm, pgdp, pudp);
124 }
125 #endif /* PUD_TABLE_SIZE */
126 pudp = pud_offset(pgdp, ea);
127 if (pud_none(*pudp)) {
128 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
129 BUG_ON(pmdp == NULL);
130 pud_populate(&init_mm, pudp, pmdp);
131 }
132 pmdp = pmd_offset(pudp, ea);
133 if (!pmd_present(*pmdp)) {
134 ptep = early_alloc_pgtable(PAGE_SIZE);
135 BUG_ON(ptep == NULL);
136 pmd_populate_kernel(&init_mm, pmdp, ptep);
137 }
138 ptep = pte_offset_kernel(pmdp, ea);
139 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
140 __pgprot(flags)));
141 #else /* CONFIG_PPC_MMU_NOHASH */
142 /*
143 * If the mm subsystem is not fully up, we cannot create a
144 * linux page table entry for this mapping. Simply bolt an
145 * entry in the hardware page table.
146 *
147 */
148 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
149 mmu_io_psize, mmu_kernel_ssize)) {
150 printk(KERN_ERR "Failed to do bolted mapping IO "
151 "memory at %016lx !\n", pa);
152 return -ENOMEM;
153 }
154 #endif /* !CONFIG_PPC_MMU_NOHASH */
155 }
156 return 0;
157 }
158
159
160 /**
161 * __ioremap_at - Low level function to establish the page tables
162 * for an IO mapping
163 */
164 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
165 unsigned long flags)
166 {
167 unsigned long i;
168
169 /* Make sure we have the base flags */
170 if ((flags & _PAGE_PRESENT) == 0)
171 flags |= pgprot_val(PAGE_KERNEL);
172
173 /* Non-cacheable page cannot be coherent */
174 if (flags & _PAGE_NO_CACHE)
175 flags &= ~_PAGE_COHERENT;
176
177 /* We don't support the 4K PFN hack with ioremap */
178 if (flags & _PAGE_4K_PFN)
179 return NULL;
180
181 WARN_ON(pa & ~PAGE_MASK);
182 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
183 WARN_ON(size & ~PAGE_MASK);
184
185 for (i = 0; i < size; i += PAGE_SIZE)
186 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
187 return NULL;
188
189 return (void __iomem *)ea;
190 }
191
192 /**
193 * __iounmap_from - Low level function to tear down the page tables
194 * for an IO mapping. This is used for mappings that
195 * are manipulated manually, like partial unmapping of
196 * PCI IOs or ISA space.
197 */
198 void __iounmap_at(void *ea, unsigned long size)
199 {
200 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
201 WARN_ON(size & ~PAGE_MASK);
202
203 unmap_kernel_range((unsigned long)ea, size);
204 }
205
206 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
207 unsigned long flags, void *caller)
208 {
209 phys_addr_t paligned;
210 void __iomem *ret;
211
212 /*
213 * Choose an address to map it to.
214 * Once the imalloc system is running, we use it.
215 * Before that, we map using addresses going
216 * up from ioremap_bot. imalloc will use
217 * the addresses from ioremap_bot through
218 * IMALLOC_END
219 *
220 */
221 paligned = addr & PAGE_MASK;
222 size = PAGE_ALIGN(addr + size) - paligned;
223
224 if ((size == 0) || (paligned == 0))
225 return NULL;
226
227 if (mem_init_done) {
228 struct vm_struct *area;
229
230 area = __get_vm_area_caller(size, VM_IOREMAP,
231 ioremap_bot, IOREMAP_END,
232 caller);
233 if (area == NULL)
234 return NULL;
235
236 area->phys_addr = paligned;
237 ret = __ioremap_at(paligned, area->addr, size, flags);
238 if (!ret)
239 vunmap(area->addr);
240 } else {
241 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
242 if (ret)
243 ioremap_bot += size;
244 }
245
246 if (ret)
247 ret += addr & ~PAGE_MASK;
248 return ret;
249 }
250
251 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
252 unsigned long flags)
253 {
254 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
255 }
256
257 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
258 {
259 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
260 void *caller = __builtin_return_address(0);
261
262 if (ppc_md.ioremap)
263 return ppc_md.ioremap(addr, size, flags, caller);
264 return __ioremap_caller(addr, size, flags, caller);
265 }
266
267 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
268 {
269 unsigned long flags = _PAGE_NO_CACHE;
270 void *caller = __builtin_return_address(0);
271
272 if (ppc_md.ioremap)
273 return ppc_md.ioremap(addr, size, flags, caller);
274 return __ioremap_caller(addr, size, flags, caller);
275 }
276
277 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
278 unsigned long flags)
279 {
280 void *caller = __builtin_return_address(0);
281
282 /* writeable implies dirty for kernel addresses */
283 if (flags & _PAGE_RW)
284 flags |= _PAGE_DIRTY;
285
286 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
287 flags &= ~(_PAGE_USER | _PAGE_EXEC);
288
289 #ifdef _PAGE_BAP_SR
290 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
291 * which means that we just cleared supervisor access... oops ;-) This
292 * restores it
293 */
294 flags |= _PAGE_BAP_SR;
295 #endif
296
297 if (ppc_md.ioremap)
298 return ppc_md.ioremap(addr, size, flags, caller);
299 return __ioremap_caller(addr, size, flags, caller);
300 }
301
302
303 /*
304 * Unmap an IO region and remove it from imalloc'd list.
305 * Access to IO memory should be serialized by driver.
306 */
307 void __iounmap(volatile void __iomem *token)
308 {
309 void *addr;
310
311 if (!mem_init_done)
312 return;
313
314 addr = (void *) ((unsigned long __force)
315 PCI_FIX_ADDR(token) & PAGE_MASK);
316 if ((unsigned long)addr < ioremap_bot) {
317 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
318 " at 0x%p\n", addr);
319 return;
320 }
321 vunmap(addr);
322 }
323
324 void iounmap(volatile void __iomem *token)
325 {
326 if (ppc_md.iounmap)
327 ppc_md.iounmap(token);
328 else
329 __iounmap(token);
330 }
331
332 EXPORT_SYMBOL(ioremap);
333 EXPORT_SYMBOL(ioremap_wc);
334 EXPORT_SYMBOL(ioremap_prot);
335 EXPORT_SYMBOL(__ioremap);
336 EXPORT_SYMBOL(__ioremap_at);
337 EXPORT_SYMBOL(iounmap);
338 EXPORT_SYMBOL(__iounmap);
339 EXPORT_SYMBOL(__iounmap_at);