Commit | Line | Data |
---|---|---|
1da177e4 | 1 | #include <linux/highmem.h> |
129f6946 | 2 | #include <linux/module.h> |
1da177e4 LT |
3 | |
4 | void *kmap(struct page *page) | |
5 | { | |
6 | might_sleep(); | |
7 | if (!PageHighMem(page)) | |
8 | return page_address(page); | |
9 | return kmap_high(page); | |
10 | } | |
11 | ||
12 | void kunmap(struct page *page) | |
13 | { | |
14 | if (in_interrupt()) | |
15 | BUG(); | |
16 | if (!PageHighMem(page)) | |
17 | return; | |
18 | kunmap_high(page); | |
19 | } | |
20 | ||
21 | /* | |
22 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
23 | * no global lock is needed and because the kmap code must perform a global TLB | |
24 | * invalidation when the kmap pool wraps. | |
25 | * | |
26 | * However when holding an atomic kmap is is not legal to sleep, so atomic | |
27 | * kmaps are appropriate for short, tight code paths only. | |
28 | */ | |
29 | void *kmap_atomic(struct page *page, enum km_type type) | |
30 | { | |
31 | enum fixed_addresses idx; | |
32 | unsigned long vaddr; | |
33 | ||
34 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | |
a866374a | 35 | pagefault_disable(); |
1da177e4 LT |
36 | if (!PageHighMem(page)) |
37 | return page_address(page); | |
38 | ||
39 | idx = type + KM_TYPE_NR*smp_processor_id(); | |
40 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
1da177e4 LT |
41 | if (!pte_none(*(kmap_pte-idx))) |
42 | BUG(); | |
1da177e4 | 43 | set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); |
1da177e4 LT |
44 | |
45 | return (void*) vaddr; | |
46 | } | |
47 | ||
48 | void kunmap_atomic(void *kvaddr, enum km_type type) | |
49 | { | |
1da177e4 LT |
50 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
51 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | |
52 | ||
23002d88 | 53 | #ifdef CONFIG_DEBUG_HIGHMEM |
ba9c231f | 54 | if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) { |
a866374a | 55 | pagefault_enable(); |
1da177e4 LT |
56 | return; |
57 | } | |
58 | ||
59 | if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) | |
60 | BUG(); | |
23002d88 | 61 | #endif |
1da177e4 | 62 | /* |
23002d88 ZA |
63 | * Force other mappings to Oops if they'll try to access this pte |
64 | * without first remap it. Keeping stale mappings around is a bad idea | |
65 | * also, in case the page changes cacheability attributes or becomes | |
66 | * a protected page in a hypervisor. | |
1da177e4 | 67 | */ |
23002d88 | 68 | kpte_clear_flush(kmap_pte-idx, vaddr); |
1da177e4 | 69 | |
a866374a | 70 | pagefault_enable(); |
1da177e4 LT |
71 | } |
72 | ||
60e64d46 VG |
73 | /* This is the same as kmap_atomic() but can map memory that doesn't |
74 | * have a struct page associated with it. | |
75 | */ | |
76 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | |
77 | { | |
78 | enum fixed_addresses idx; | |
79 | unsigned long vaddr; | |
80 | ||
a866374a | 81 | pagefault_disable(); |
60e64d46 VG |
82 | |
83 | idx = type + KM_TYPE_NR*smp_processor_id(); | |
84 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
85 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); | |
60e64d46 VG |
86 | |
87 | return (void*) vaddr; | |
88 | } | |
89 | ||
1da177e4 LT |
90 | struct page *kmap_atomic_to_page(void *ptr) |
91 | { | |
92 | unsigned long idx, vaddr = (unsigned long)ptr; | |
93 | pte_t *pte; | |
94 | ||
95 | if (vaddr < FIXADDR_START) | |
96 | return virt_to_page(ptr); | |
97 | ||
98 | idx = virt_to_fix(vaddr); | |
99 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | |
100 | return pte_page(*pte); | |
101 | } | |
102 | ||
129f6946 AD |
103 | EXPORT_SYMBOL(kmap); |
104 | EXPORT_SYMBOL(kunmap); | |
105 | EXPORT_SYMBOL(kmap_atomic); | |
106 | EXPORT_SYMBOL(kunmap_atomic); | |
107 | EXPORT_SYMBOL(kmap_atomic_to_page); |