2 * linux/arch/arm/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
6 * (C) Copyright 1995 1996 Linus Torvalds
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
23 #include <linux/module.h>
24 #include <linux/errno.h>
26 #include <linux/vmalloc.h>
28 #include <asm/cacheflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <asm/sizes.h>
36 * Used by ioremap() and iounmap() code to mark section-mapped I/O regions
37 * in vm_struct->flags field.
39 #define VM_ARM_SECTION_MAPPING 0x80000000
42 remap_area_pte(pte_t
* pte
, unsigned long address
, unsigned long size
,
43 unsigned long phys_addr
, pgprot_t pgprot
)
51 BUG_ON(address
>= end
);
56 set_pte(pte
, pfn_pte(phys_addr
>> PAGE_SHIFT
, pgprot
));
58 phys_addr
+= PAGE_SIZE
;
60 } while (address
&& (address
< end
));
64 printk("remap_area_pte: page already exists\n");
69 remap_area_pmd(pmd_t
* pmd
, unsigned long address
, unsigned long size
,
70 unsigned long phys_addr
, unsigned long flags
)
75 address
&= ~PGDIR_MASK
;
82 BUG_ON(address
>= end
);
84 pgprot
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
| L_PTE_WRITE
| flags
);
86 pte_t
* pte
= pte_alloc_kernel(pmd
, address
);
89 remap_area_pte(pte
, address
, end
- address
, address
+ phys_addr
, pgprot
);
90 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
92 } while (address
&& (address
< end
));
97 remap_area_pages(unsigned long start
, unsigned long pfn
,
98 unsigned long size
, unsigned long flags
)
100 unsigned long address
= start
;
101 unsigned long end
= start
+ size
;
102 unsigned long phys_addr
= __pfn_to_phys(pfn
);
106 phys_addr
-= address
;
107 dir
= pgd_offset(&init_mm
, address
);
108 BUG_ON(address
>= end
);
110 pmd_t
*pmd
= pmd_alloc(&init_mm
, dir
, address
);
115 if (remap_area_pmd(pmd
, address
, end
- address
,
116 phys_addr
+ address
, flags
)) {
121 address
= (address
+ PGDIR_SIZE
) & PGDIR_MASK
;
123 } while (address
&& (address
< end
));
129 void __check_kvm_seq(struct mm_struct
*mm
)
134 seq
= init_mm
.context
.kvm_seq
;
135 memcpy(pgd_offset(mm
, VMALLOC_START
),
136 pgd_offset_k(VMALLOC_START
),
137 sizeof(pgd_t
) * (pgd_index(VMALLOC_END
) -
138 pgd_index(VMALLOC_START
)));
139 mm
->context
.kvm_seq
= seq
;
140 } while (seq
!= init_mm
.context
.kvm_seq
);
145 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
146 * the other CPUs will not see this change until their next context switch.
147 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
148 * which requires the new ioremap'd region to be referenced, the CPU will
149 * reference the _old_ region.
151 * Note that get_vm_area() allocates a guard 4K page, so we need to mask
152 * the size back to 1MB aligned or we will overflow in the loop below.
154 static void unmap_area_sections(unsigned long virt
, unsigned long size
)
156 unsigned long addr
= virt
, end
= virt
+ (size
& ~SZ_1M
);
159 flush_cache_vunmap(addr
, end
);
160 pgd
= pgd_offset_k(addr
);
162 pmd_t pmd
, *pmdp
= pmd_offset(pgd
, addr
);
165 if (!pmd_none(pmd
)) {
167 * Clear the PMD from the page table, and
168 * increment the kvm sequence so others
169 * notice this change.
171 * Note: this is still racy on SMP machines.
174 init_mm
.context
.kvm_seq
++;
177 * Free the page table, if there was one.
179 if ((pmd_val(pmd
) & PMD_TYPE_MASK
) == PMD_TYPE_TABLE
)
180 pte_free_kernel(pmd_page_kernel(pmd
));
185 } while (addr
< end
);
188 * Ensure that the active_mm is up to date - we want to
189 * catch any use-after-iounmap cases.
191 if (current
->active_mm
->context
.kvm_seq
!= init_mm
.context
.kvm_seq
)
192 __check_kvm_seq(current
->active_mm
);
194 flush_tlb_kernel_range(virt
, end
);
198 remap_area_sections(unsigned long virt
, unsigned long pfn
,
199 unsigned long size
, unsigned long flags
)
201 unsigned long prot
, addr
= virt
, end
= virt
+ size
;
205 * Remove and free any PTE-based mapping, and
206 * sync the current kernel mapping.
208 unmap_area_sections(virt
, size
);
210 prot
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
| PMD_DOMAIN(DOMAIN_IO
) |
211 (flags
& (L_PTE_CACHEABLE
| L_PTE_BUFFERABLE
));
214 * ARMv6 and above need XN set to prevent speculative prefetches
217 if (cpu_architecture() >= CPU_ARCH_ARMv6
)
220 pgd
= pgd_offset_k(addr
);
222 pmd_t
*pmd
= pmd_offset(pgd
, addr
);
224 pmd
[0] = __pmd(__pfn_to_phys(pfn
) | prot
);
225 pfn
+= SZ_1M
>> PAGE_SHIFT
;
226 pmd
[1] = __pmd(__pfn_to_phys(pfn
) | prot
);
227 pfn
+= SZ_1M
>> PAGE_SHIFT
;
228 flush_pmd_entry(pmd
);
232 } while (addr
< end
);
240 * Remap an arbitrary physical address space into the kernel virtual
241 * address space. Needed when the kernel wants to access high addresses
244 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
245 * have to convert them into an offset in a page-aligned mapping, but the
246 * caller shouldn't need to know that small detail.
248 * 'flags' are the extra L_PTE_ flags that you want to specify for this
249 * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
252 __ioremap_pfn(unsigned long pfn
, unsigned long offset
, size_t size
,
257 struct vm_struct
* area
;
259 area
= get_vm_area(size
, VM_IOREMAP
);
262 addr
= (unsigned long)area
->addr
;
265 if (!((__pfn_to_phys(pfn
) | size
| addr
) & ~PMD_MASK
)) {
266 area
->flags
|= VM_ARM_SECTION_MAPPING
;
267 err
= remap_area_sections(addr
, pfn
, size
, flags
);
270 err
= remap_area_pages(addr
, pfn
, size
, flags
);
273 vunmap((void *)addr
);
277 flush_cache_vmap(addr
, addr
+ size
);
278 return (void __iomem
*) (offset
+ addr
);
280 EXPORT_SYMBOL(__ioremap_pfn
);
283 __ioremap(unsigned long phys_addr
, size_t size
, unsigned long flags
)
285 unsigned long last_addr
;
286 unsigned long offset
= phys_addr
& ~PAGE_MASK
;
287 unsigned long pfn
= __phys_to_pfn(phys_addr
);
290 * Don't allow wraparound or zero size
292 last_addr
= phys_addr
+ size
- 1;
293 if (!size
|| last_addr
< phys_addr
)
297 * Page align the mapping size
299 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
301 return __ioremap_pfn(pfn
, offset
, size
, flags
);
303 EXPORT_SYMBOL(__ioremap
);
305 void __iounmap(void __iomem
*addr
)
307 struct vm_struct
**p
, *tmp
;
308 unsigned int section_mapping
= 0;
310 addr
= (void __iomem
*)(PAGE_MASK
& (unsigned long)addr
);
313 * If this is a section based mapping we need to handle it
314 * specially as the VM subysystem does not know how to handle
315 * such a beast. We need the lock here b/c we need to clear
316 * all the mappings before the area can be reclaimed
319 write_lock(&vmlist_lock
);
320 for (p
= &vmlist
; (tmp
= *p
) ; p
= &tmp
->next
) {
321 if((tmp
->flags
& VM_IOREMAP
) && (tmp
->addr
== addr
)) {
322 if (tmp
->flags
& VM_ARM_SECTION_MAPPING
) {
324 unmap_area_sections((unsigned long)tmp
->addr
,
332 write_unlock(&vmlist_lock
);
334 if (!section_mapping
)
337 EXPORT_SYMBOL(__iounmap
);