x86: make early_ioremap_debug early_param
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / mm / ioremap_32.c
CommitLineData
1da177e4
LT
1/*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/init.h>
13#include <linux/slab.h>
129f6946 14#include <linux/module.h>
a148ecfd 15#include <linux/io.h>
1da177e4
LT
16#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include <asm/pgtable.h>
20
21#define ISA_START_ADDRESS 0xa0000
22#define ISA_END_ADDRESS 0x100000
23
1da177e4
LT
24/*
25 * Generic mapping function (not visible outside):
26 */
27
28/*
29 * Remap an arbitrary physical address space into the kernel virtual
30 * address space. Needed when the kernel wants to access high addresses
31 * directly.
32 *
33 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
34 * have to convert them into an offset in a page-aligned mapping, but the
35 * caller shouldn't need to know that small detail.
36 */
37void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
38{
39 void __iomem * addr;
40 struct vm_struct * area;
41 unsigned long offset, last_addr;
a148ecfd 42 pgprot_t prot;
1da177e4
LT
43
44 /* Don't allow wraparound or zero size */
45 last_addr = phys_addr + size - 1;
46 if (!size || last_addr < phys_addr)
47 return NULL;
48
49 /*
50 * Don't remap the low PCI/ISA area, it's always mapped..
51 */
52 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
53 return (void __iomem *) phys_to_virt(phys_addr);
54
55 /*
56 * Don't allow anybody to remap normal RAM that we're using..
57 */
58 if (phys_addr <= virt_to_phys(high_memory - 1)) {
59 char *t_addr, *t_end;
60 struct page *page;
61
62 t_addr = __va(phys_addr);
63 t_end = t_addr + (size - 1);
64
65 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
66 if(!PageReserved(page))
67 return NULL;
68 }
69
a148ecfd
HS
70 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
71 | _PAGE_ACCESSED | flags);
72
1da177e4
LT
73 /*
74 * Mappings have to be page-aligned
75 */
76 offset = phys_addr & ~PAGE_MASK;
77 phys_addr &= PAGE_MASK;
78 size = PAGE_ALIGN(last_addr+1) - phys_addr;
79
80 /*
81 * Ok, go for it..
82 */
83 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
84 if (!area)
85 return NULL;
86 area->phys_addr = phys_addr;
87 addr = (void __iomem *) area->addr;
88 if (ioremap_page_range((unsigned long) addr,
a148ecfd 89 (unsigned long) addr + size, phys_addr, prot)) {
1da177e4
LT
90 vunmap((void __force *) addr);
91 return NULL;
92 }
93 return (void __iomem *) (offset + (char __iomem *)addr);
94}
129f6946 95EXPORT_SYMBOL(__ioremap);
1da177e4
LT
96
97/**
98 * ioremap_nocache - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
101 *
102 * ioremap_nocache performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
107 *
108 * This version of ioremap ensures that the memory is marked uncachable
109 * on the CPU as well as honouring existing caching rules from things like
110 * the PCI bus. Note that there are other caches and buffers on many
111 * busses. In particular driver authors should read up on PCI writes
112 *
113 * It's useful if some control registers are in such an area and
114 * write combining or read caching is not desirable:
115 *
116 * Must be freed with iounmap.
117 */
118
119void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
120{
121 unsigned long last_addr;
4138cc34 122 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
1da177e4
LT
123 if (!p)
124 return p;
125
126 /* Guaranteed to be > phys_addr, as per __ioremap() */
127 last_addr = phys_addr + size - 1;
128
129 if (last_addr < virt_to_phys(high_memory) - 1) {
130 struct page *ppage = virt_to_page(__va(phys_addr));
131 unsigned long npages;
132
133 phys_addr &= PAGE_MASK;
134
135 /* This might overflow and become zero.. */
136 last_addr = PAGE_ALIGN(last_addr);
137
138 /* .. but that's ok, because modulo-2**n arithmetic will make
139 * the page-aligned "last - first" come out right.
140 */
141 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
142
143 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
144 iounmap(p);
145 p = NULL;
146 }
147 global_flush_tlb();
148 }
149
150 return p;
151}
129f6946 152EXPORT_SYMBOL(ioremap_nocache);
1da177e4 153
bf5421c3
AK
154/**
155 * iounmap - Free a IO remapping
156 * @addr: virtual address from ioremap_*
157 *
158 * Caller must ensure there is only one unmapping for the same pointer.
159 */
1da177e4
LT
160void iounmap(volatile void __iomem *addr)
161{
bf5421c3 162 struct vm_struct *p, *o;
c23a4e96
AM
163
164 if ((void __force *)addr <= high_memory)
1da177e4
LT
165 return;
166
167 /*
168 * __ioremap special-cases the PCI/ISA range by not instantiating a
169 * vm_area and by simply returning an address into the kernel mapping
170 * of ISA space. So handle that here.
171 */
172 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
173 addr < phys_to_virt(ISA_END_ADDRESS))
174 return;
175
b16b88e5 176 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
177
178 /* Use the vm area unlocked, assuming the caller
179 ensures there isn't another iounmap for the same address
180 in parallel. Reuse of the virtual address is prevented by
181 leaving it in the global lists until we're done with it.
182 cpa takes care of the direct mappings. */
183 read_lock(&vmlist_lock);
184 for (p = vmlist; p; p = p->next) {
185 if (p->addr == addr)
186 break;
187 }
188 read_unlock(&vmlist_lock);
189
190 if (!p) {
191 printk("iounmap: bad address %p\n", addr);
c23a4e96 192 dump_stack();
bf5421c3 193 return;
1da177e4
LT
194 }
195
bf5421c3 196 /* Reset the direct mapping. Can block */
1da177e4 197 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
1da177e4 198 change_page_attr(virt_to_page(__va(p->phys_addr)),
9585116b 199 get_vm_area_size(p) >> PAGE_SHIFT,
1da177e4
LT
200 PAGE_KERNEL);
201 global_flush_tlb();
202 }
bf5421c3
AK
203
204 /* Finally remove it */
205 o = remove_vm_area((void *)addr);
206 BUG_ON(p != o || o == NULL);
1da177e4
LT
207 kfree(p);
208}
129f6946 209EXPORT_SYMBOL(iounmap);
1da177e4 210
d18d6d65
IM
211
212int __initdata early_ioremap_debug;
213
214static int __init early_ioremap_debug_setup(char *str)
215{
216 early_ioremap_debug = 1;
217
793b24a2 218 return 0;
d18d6d65 219}
793b24a2 220early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 221
0947b2f3
HY
222static __initdata int after_paging_init;
223static __initdata unsigned long bm_pte[1024]
224 __attribute__((aligned(PAGE_SIZE)));
225
beacfaac 226static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
0947b2f3
HY
227{
228 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
229}
230
beacfaac 231static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
0947b2f3
HY
232{
233 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
234}
235
beacfaac 236void __init early_ioremap_init(void)
0947b2f3
HY
237{
238 unsigned long *pgd;
239
d18d6d65
IM
240 if (early_ioremap_debug)
241 printk("early_ioremap_init()\n");
242
beacfaac 243 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
244 *pgd = __pa(bm_pte) | _PAGE_TABLE;
245 memset(bm_pte, 0, sizeof(bm_pte));
beacfaac 246 BUG_ON(pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
0947b2f3
HY
247}
248
beacfaac 249void __init early_ioremap_clear(void)
0947b2f3
HY
250{
251 unsigned long *pgd;
252
d18d6d65
IM
253 if (early_ioremap_debug)
254 printk("early_ioremap_clear()\n");
255
beacfaac 256 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3
HY
257 *pgd = 0;
258 __flush_tlb_all();
259}
260
beacfaac 261void __init early_ioremap_reset(void)
0947b2f3
HY
262{
263 enum fixed_addresses idx;
264 unsigned long *pte, phys, addr;
265
266 after_paging_init = 1;
64a8f852 267 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 268 addr = fix_to_virt(idx);
beacfaac 269 pte = early_ioremap_pte(addr);
0947b2f3
HY
270 if (!*pte & _PAGE_PRESENT) {
271 phys = *pte & PAGE_MASK;
272 set_fixmap(idx, phys);
273 }
274 }
275}
276
beacfaac 277static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
278 unsigned long phys, pgprot_t flags)
279{
280 unsigned long *pte, addr = __fix_to_virt(idx);
281
282 if (idx >= __end_of_fixed_addresses) {
283 BUG();
284 return;
285 }
beacfaac 286 pte = early_ioremap_pte(addr);
0947b2f3
HY
287 if (pgprot_val(flags))
288 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
289 else
290 *pte = 0;
291 __flush_tlb_one(addr);
292}
293
beacfaac 294static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
295 unsigned long phys)
296{
297 if (after_paging_init)
298 set_fixmap(idx, phys);
299 else
beacfaac 300 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
301}
302
beacfaac 303static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
304{
305 if (after_paging_init)
306 clear_fixmap(idx);
307 else
beacfaac 308 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
309}
310
1b42f516
IM
311
312int __initdata early_ioremap_nested;
313
beacfaac 314void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
315{
316 unsigned long offset, last_addr;
1b42f516
IM
317 unsigned int nrpages, nesting;
318 enum fixed_addresses idx0, idx;
319
320 WARN_ON(system_state != SYSTEM_BOOTING);
321
322 nesting = early_ioremap_nested;
d18d6d65
IM
323 if (early_ioremap_debug) {
324 printk("early_ioremap(%08lx, %08lx) [%d] => ",
325 phys_addr, size, nesting);
326 dump_stack();
327 }
1da177e4
LT
328
329 /* Don't allow wraparound or zero size */
330 last_addr = phys_addr + size - 1;
bd796ed0
IM
331 if (!size || last_addr < phys_addr) {
332 WARN_ON(1);
1da177e4 333 return NULL;
bd796ed0 334 }
1da177e4 335
bd796ed0
IM
336 if (nesting >= FIX_BTMAPS_NESTING) {
337 WARN_ON(1);
1b42f516 338 return NULL;
bd796ed0 339 }
1b42f516 340 early_ioremap_nested++;
1da177e4
LT
341 /*
342 * Mappings have to be page-aligned
343 */
344 offset = phys_addr & ~PAGE_MASK;
345 phys_addr &= PAGE_MASK;
346 size = PAGE_ALIGN(last_addr) - phys_addr;
347
348 /*
349 * Mappings have to fit in the FIX_BTMAP area.
350 */
351 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
352 if (nrpages > NR_FIX_BTMAPS) {
353 WARN_ON(1);
1da177e4 354 return NULL;
bd796ed0 355 }
1da177e4
LT
356
357 /*
358 * Ok, go for it..
359 */
1b42f516
IM
360 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
361 idx = idx0;
1da177e4 362 while (nrpages > 0) {
beacfaac 363 early_set_fixmap(idx, phys_addr);
1da177e4
LT
364 phys_addr += PAGE_SIZE;
365 --idx;
366 --nrpages;
367 }
d18d6d65
IM
368 if (early_ioremap_debug)
369 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516
IM
370
371 return (void*) (offset + fix_to_virt(idx0));
1da177e4
LT
372}
373
beacfaac 374void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
375{
376 unsigned long virt_addr;
377 unsigned long offset;
378 unsigned int nrpages;
379 enum fixed_addresses idx;
1b42f516
IM
380 unsigned int nesting;
381
382 nesting = --early_ioremap_nested;
bd796ed0 383 WARN_ON(nesting < 0);
1da177e4 384
d18d6d65
IM
385 if (early_ioremap_debug) {
386 printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting);
387 dump_stack();
388 }
389
1da177e4 390 virt_addr = (unsigned long)addr;
bd796ed0
IM
391 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
392 WARN_ON(1);
1da177e4 393 return;
bd796ed0 394 }
1da177e4
LT
395 offset = virt_addr & ~PAGE_MASK;
396 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
397
1b42f516 398 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 399 while (nrpages > 0) {
beacfaac 400 early_clear_fixmap(idx);
1da177e4
LT
401 --idx;
402 --nrpages;
403 }
404}
1b42f516
IM
405
406void __this_fixmap_does_not_exist(void)
407{
408 WARN_ON(1);
409}