x86: revert ucminus change
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
1da177e4 16#include <asm/cacheflush.h>
3cbd09e4
TG
17#include <asm/e820.h>
18#include <asm/fixmap.h>
1da177e4 19#include <asm/pgtable.h>
3cbd09e4 20#include <asm/tlbflush.h>
f6df72e7 21#include <asm/pgalloc.h>
1da177e4 22
d806e5ee
TG
23enum ioremap_mode {
24 IOR_MODE_UNCACHED,
25 IOR_MODE_CACHED,
26};
27
240d3a7c
TG
28#ifdef CONFIG_X86_64
29
30unsigned long __phys_addr(unsigned long x)
31{
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
35}
36EXPORT_SYMBOL(__phys_addr);
37
e3100c82
TG
38static inline int phys_addr_valid(unsigned long addr)
39{
40 return addr < (1UL << boot_cpu_data.x86_phys_bits);
41}
42
43#else
44
45static inline int phys_addr_valid(unsigned long addr)
46{
47 return 1;
48}
49
240d3a7c
TG
50#endif
51
5f5192b9
TG
52int page_is_ram(unsigned long pagenr)
53{
54 unsigned long addr, end;
55 int i;
56
d8a9e6a5
AV
57 /*
58 * A special case is the first 4Kb of memory;
59 * This is a BIOS owned area, not kernel ram, but generally
60 * not listed as such in the E820 table.
61 */
62 if (pagenr == 0)
63 return 0;
64
156fbc3f
AV
65 /*
66 * Second special case: Some BIOSen report the PC BIOS
67 * area (640->1Mb) as ram even though it is not.
68 */
69 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
70 pagenr < (BIOS_END >> PAGE_SHIFT))
71 return 0;
d8a9e6a5 72
5f5192b9
TG
73 for (i = 0; i < e820.nr_map; i++) {
74 /*
75 * Not usable memory:
76 */
77 if (e820.map[i].type != E820_RAM)
78 continue;
5f5192b9
TG
79 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
80 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 81
950f9d95 82
5f5192b9
TG
83 if ((pagenr >= addr) && (pagenr < end))
84 return 1;
85 }
86 return 0;
87}
88
e9332cac
TG
89/*
90 * Fix up the linear direct mapping of the kernel to avoid cache attribute
91 * conflicts.
92 */
75ab43bf 93static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
d806e5ee 94 enum ioremap_mode mode)
e9332cac 95{
d806e5ee 96 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 97 int err;
e9332cac 98
d806e5ee
TG
99 switch (mode) {
100 case IOR_MODE_UNCACHED:
101 default:
102 err = set_memory_uc(vaddr, nrpages);
103 break;
104 case IOR_MODE_CACHED:
105 err = set_memory_wb(vaddr, nrpages);
106 break;
107 }
e9332cac
TG
108
109 return err;
110}
111
1da177e4
LT
112/*
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
115 * directly.
116 *
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
120 */
b9e76a00 121static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
d806e5ee 122 enum ioremap_mode mode)
1da177e4 123{
e66aadbe 124 unsigned long pfn, offset, last_addr, vaddr;
91eebf40 125 struct vm_struct *area;
d806e5ee 126 pgprot_t prot;
1da177e4
LT
127
128 /* Don't allow wraparound or zero size */
129 last_addr = phys_addr + size - 1;
130 if (!size || last_addr < phys_addr)
131 return NULL;
132
e3100c82
TG
133 if (!phys_addr_valid(phys_addr)) {
134 printk(KERN_WARNING "ioremap: invalid physical address %lx\n",
135 phys_addr);
136 WARN_ON_ONCE(1);
137 return NULL;
138 }
139
1da177e4
LT
140 /*
141 * Don't remap the low PCI/ISA area, it's always mapped..
142 */
143 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
4b40fcee 144 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
145
146 /*
147 * Don't allow anybody to remap normal RAM that we're using..
148 */
bdd3cee2
IM
149 for (pfn = phys_addr >> PAGE_SHIFT;
150 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
151
ba748d22
IM
152 int is_ram = page_is_ram(pfn);
153
154 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 155 return NULL;
ba748d22 156 WARN_ON_ONCE(is_ram);
1da177e4
LT
157 }
158
d806e5ee
TG
159 switch (mode) {
160 case IOR_MODE_UNCACHED:
161 default:
55c62682 162 prot = PAGE_KERNEL_NOCACHE;
d806e5ee
TG
163 break;
164 case IOR_MODE_CACHED:
165 prot = PAGE_KERNEL;
166 break;
167 }
a148ecfd 168
1da177e4
LT
169 /*
170 * Mappings have to be page-aligned
171 */
172 offset = phys_addr & ~PAGE_MASK;
173 phys_addr &= PAGE_MASK;
174 size = PAGE_ALIGN(last_addr+1) - phys_addr;
175
176 /*
177 * Ok, go for it..
178 */
74ff2857 179 area = get_vm_area(size, VM_IOREMAP);
1da177e4
LT
180 if (!area)
181 return NULL;
182 area->phys_addr = phys_addr;
e66aadbe
TG
183 vaddr = (unsigned long) area->addr;
184 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
b16bf712 185 free_vm_area(area);
1da177e4
LT
186 return NULL;
187 }
e9332cac 188
75ab43bf 189 if (ioremap_change_attr(vaddr, size, mode) < 0) {
e66aadbe 190 vunmap(area->addr);
e9332cac
TG
191 return NULL;
192 }
193
e66aadbe 194 return (void __iomem *) (vaddr + offset);
1da177e4 195}
1da177e4
LT
196
197/**
198 * ioremap_nocache - map bus memory into CPU space
199 * @offset: bus address of the memory
200 * @size: size of the resource to map
201 *
202 * ioremap_nocache performs a platform specific sequence of operations to
203 * make bus memory CPU accessible via the readb/readw/readl/writeb/
204 * writew/writel functions and the other mmio helpers. The returned
205 * address is not guaranteed to be usable directly as a virtual
91eebf40 206 * address.
1da177e4
LT
207 *
208 * This version of ioremap ensures that the memory is marked uncachable
209 * on the CPU as well as honouring existing caching rules from things like
91eebf40 210 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
211 * busses. In particular driver authors should read up on PCI writes
212 *
213 * It's useful if some control registers are in such an area and
214 * write combining or read caching is not desirable:
91eebf40 215 *
1da177e4
LT
216 * Must be freed with iounmap.
217 */
b9e76a00 218void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 219{
d806e5ee 220 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
1da177e4 221}
129f6946 222EXPORT_SYMBOL(ioremap_nocache);
1da177e4 223
b9e76a00 224void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 225{
d806e5ee 226 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
5f868152
TG
227}
228EXPORT_SYMBOL(ioremap_cache);
229
bf5421c3
AK
230/**
231 * iounmap - Free a IO remapping
232 * @addr: virtual address from ioremap_*
233 *
234 * Caller must ensure there is only one unmapping for the same pointer.
235 */
1da177e4
LT
236void iounmap(volatile void __iomem *addr)
237{
bf5421c3 238 struct vm_struct *p, *o;
c23a4e96
AM
239
240 if ((void __force *)addr <= high_memory)
1da177e4
LT
241 return;
242
243 /*
244 * __ioremap special-cases the PCI/ISA range by not instantiating a
245 * vm_area and by simply returning an address into the kernel mapping
246 * of ISA space. So handle that here.
247 */
248 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
91eebf40 249 addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
250 return;
251
91eebf40
TG
252 addr = (volatile void __iomem *)
253 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
254
255 /* Use the vm area unlocked, assuming the caller
256 ensures there isn't another iounmap for the same address
257 in parallel. Reuse of the virtual address is prevented by
258 leaving it in the global lists until we're done with it.
259 cpa takes care of the direct mappings. */
260 read_lock(&vmlist_lock);
261 for (p = vmlist; p; p = p->next) {
262 if (p->addr == addr)
263 break;
264 }
265 read_unlock(&vmlist_lock);
266
267 if (!p) {
91eebf40 268 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 269 dump_stack();
bf5421c3 270 return;
1da177e4
LT
271 }
272
bf5421c3
AK
273 /* Finally remove it */
274 o = remove_vm_area((void *)addr);
275 BUG_ON(p != o || o == NULL);
91eebf40 276 kfree(p);
1da177e4 277}
129f6946 278EXPORT_SYMBOL(iounmap);
1da177e4 279
240d3a7c 280#ifdef CONFIG_X86_32
d18d6d65
IM
281
282int __initdata early_ioremap_debug;
283
284static int __init early_ioremap_debug_setup(char *str)
285{
286 early_ioremap_debug = 1;
287
793b24a2 288 return 0;
d18d6d65 289}
793b24a2 290early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 291
0947b2f3 292static __initdata int after_paging_init;
c92a7a54
IC
293static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
294 __section(.bss.page_aligned);
0947b2f3 295
551889a6 296static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 297{
37cc8d7f
JF
298 /* Don't assume we're using swapper_pg_dir at this point */
299 pgd_t *base = __va(read_cr3());
300 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
301 pud_t *pud = pud_offset(pgd, addr);
302 pmd_t *pmd = pmd_offset(pud, addr);
303
304 return pmd;
0947b2f3
HY
305}
306
551889a6 307static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 308{
551889a6 309 return &bm_pte[pte_index(addr)];
0947b2f3
HY
310}
311
beacfaac 312void __init early_ioremap_init(void)
0947b2f3 313{
551889a6 314 pmd_t *pmd;
0947b2f3 315
d18d6d65 316 if (early_ioremap_debug)
adafdf6a 317 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 318
551889a6 319 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 320 memset(bm_pte, 0, sizeof(bm_pte));
b6fbb669 321 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 322
0e3a9549 323 /*
551889a6 324 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
325 * we are not prepared:
326 */
551889a6 327 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 328 WARN_ON(1);
551889a6
IC
329 printk(KERN_WARNING "pmd %p != %p\n",
330 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 331 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 332 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 333 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 334 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
335
336 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
337 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
338 FIX_BTMAP_BEGIN);
0e3a9549 339 }
0947b2f3
HY
340}
341
beacfaac 342void __init early_ioremap_clear(void)
0947b2f3 343{
551889a6 344 pmd_t *pmd;
0947b2f3 345
d18d6d65 346 if (early_ioremap_debug)
adafdf6a 347 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 348
551889a6
IC
349 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
350 pmd_clear(pmd);
b6fbb669 351 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
0947b2f3
HY
352 __flush_tlb_all();
353}
354
beacfaac 355void __init early_ioremap_reset(void)
0947b2f3
HY
356{
357 enum fixed_addresses idx;
551889a6
IC
358 unsigned long addr, phys;
359 pte_t *pte;
0947b2f3
HY
360
361 after_paging_init = 1;
64a8f852 362 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 363 addr = fix_to_virt(idx);
beacfaac 364 pte = early_ioremap_pte(addr);
551889a6
IC
365 if (pte_present(*pte)) {
366 phys = pte_val(*pte) & PAGE_MASK;
0947b2f3
HY
367 set_fixmap(idx, phys);
368 }
369 }
370}
371
beacfaac 372static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
373 unsigned long phys, pgprot_t flags)
374{
551889a6
IC
375 unsigned long addr = __fix_to_virt(idx);
376 pte_t *pte;
0947b2f3
HY
377
378 if (idx >= __end_of_fixed_addresses) {
379 BUG();
380 return;
381 }
beacfaac 382 pte = early_ioremap_pte(addr);
0947b2f3 383 if (pgprot_val(flags))
551889a6 384 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 385 else
551889a6 386 pte_clear(NULL, addr, pte);
0947b2f3
HY
387 __flush_tlb_one(addr);
388}
389
beacfaac 390static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
391 unsigned long phys)
392{
393 if (after_paging_init)
394 set_fixmap(idx, phys);
395 else
beacfaac 396 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
397}
398
beacfaac 399static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
400{
401 if (after_paging_init)
402 clear_fixmap(idx);
403 else
beacfaac 404 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
405}
406
1b42f516
IM
407
408int __initdata early_ioremap_nested;
409
d690b2af
IM
410static int __init check_early_ioremap_leak(void)
411{
412 if (!early_ioremap_nested)
413 return 0;
414
415 printk(KERN_WARNING
91eebf40
TG
416 "Debug warning: early ioremap leak of %d areas detected.\n",
417 early_ioremap_nested);
d690b2af 418 printk(KERN_WARNING
91eebf40 419 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
420 WARN_ON(1);
421
422 return 1;
423}
424late_initcall(check_early_ioremap_leak);
425
beacfaac 426void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
427{
428 unsigned long offset, last_addr;
1b42f516
IM
429 unsigned int nrpages, nesting;
430 enum fixed_addresses idx0, idx;
431
432 WARN_ON(system_state != SYSTEM_BOOTING);
433
434 nesting = early_ioremap_nested;
d18d6d65 435 if (early_ioremap_debug) {
adafdf6a 436 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
91eebf40 437 phys_addr, size, nesting);
d18d6d65
IM
438 dump_stack();
439 }
1da177e4
LT
440
441 /* Don't allow wraparound or zero size */
442 last_addr = phys_addr + size - 1;
bd796ed0
IM
443 if (!size || last_addr < phys_addr) {
444 WARN_ON(1);
1da177e4 445 return NULL;
bd796ed0 446 }
1da177e4 447
bd796ed0
IM
448 if (nesting >= FIX_BTMAPS_NESTING) {
449 WARN_ON(1);
1b42f516 450 return NULL;
bd796ed0 451 }
1b42f516 452 early_ioremap_nested++;
1da177e4
LT
453 /*
454 * Mappings have to be page-aligned
455 */
456 offset = phys_addr & ~PAGE_MASK;
457 phys_addr &= PAGE_MASK;
458 size = PAGE_ALIGN(last_addr) - phys_addr;
459
460 /*
461 * Mappings have to fit in the FIX_BTMAP area.
462 */
463 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
464 if (nrpages > NR_FIX_BTMAPS) {
465 WARN_ON(1);
1da177e4 466 return NULL;
bd796ed0 467 }
1da177e4
LT
468
469 /*
470 * Ok, go for it..
471 */
1b42f516
IM
472 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
473 idx = idx0;
1da177e4 474 while (nrpages > 0) {
beacfaac 475 early_set_fixmap(idx, phys_addr);
1da177e4
LT
476 phys_addr += PAGE_SIZE;
477 --idx;
478 --nrpages;
479 }
d18d6d65
IM
480 if (early_ioremap_debug)
481 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 482
91eebf40 483 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
484}
485
beacfaac 486void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
487{
488 unsigned long virt_addr;
489 unsigned long offset;
490 unsigned int nrpages;
491 enum fixed_addresses idx;
1b42f516
IM
492 unsigned int nesting;
493
494 nesting = --early_ioremap_nested;
bd796ed0 495 WARN_ON(nesting < 0);
1da177e4 496
d18d6d65 497 if (early_ioremap_debug) {
adafdf6a 498 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
91eebf40 499 size, nesting);
d18d6d65
IM
500 dump_stack();
501 }
502
1da177e4 503 virt_addr = (unsigned long)addr;
bd796ed0
IM
504 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
505 WARN_ON(1);
1da177e4 506 return;
bd796ed0 507 }
1da177e4
LT
508 offset = virt_addr & ~PAGE_MASK;
509 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
510
1b42f516 511 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 512 while (nrpages > 0) {
beacfaac 513 early_clear_fixmap(idx);
1da177e4
LT
514 --idx;
515 --nrpages;
516 }
517}
1b42f516
IM
518
519void __this_fixmap_does_not_exist(void)
520{
521 WARN_ON(1);
522}
240d3a7c
TG
523
524#endif /* CONFIG_X86_32 */