Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/mm/pgtable.c | |
3 | */ | |
4 | ||
1da177e4 LT |
5 | #include <linux/sched.h> |
6 | #include <linux/kernel.h> | |
7 | #include <linux/errno.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/swap.h> | |
10 | #include <linux/smp.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/spinlock.h> | |
052e7994 | 15 | #include <linux/module.h> |
f1d1a842 | 16 | #include <linux/quicklist.h> |
1da177e4 LT |
17 | |
18 | #include <asm/system.h> | |
19 | #include <asm/pgtable.h> | |
20 | #include <asm/pgalloc.h> | |
21 | #include <asm/fixmap.h> | |
22 | #include <asm/e820.h> | |
23 | #include <asm/tlb.h> | |
24 | #include <asm/tlbflush.h> | |
25 | ||
26 | void show_mem(void) | |
27 | { | |
28 | int total = 0, reserved = 0; | |
29 | int shared = 0, cached = 0; | |
30 | int highmem = 0; | |
31 | struct page *page; | |
32 | pg_data_t *pgdat; | |
33 | unsigned long i; | |
208d54e5 | 34 | unsigned long flags; |
1da177e4 | 35 | |
f90e7185 | 36 | printk(KERN_INFO "Mem-info:\n"); |
1da177e4 | 37 | show_free_areas(); |
f90e7185 | 38 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
ec936fc5 | 39 | for_each_online_pgdat(pgdat) { |
208d54e5 | 40 | pgdat_resize_lock(pgdat, &flags); |
1da177e4 | 41 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
408fde81 | 42 | page = pgdat_page_nr(pgdat, i); |
1da177e4 LT |
43 | total++; |
44 | if (PageHighMem(page)) | |
45 | highmem++; | |
46 | if (PageReserved(page)) | |
47 | reserved++; | |
48 | else if (PageSwapCache(page)) | |
49 | cached++; | |
50 | else if (page_count(page)) | |
51 | shared += page_count(page) - 1; | |
52 | } | |
208d54e5 | 53 | pgdat_resize_unlock(pgdat, &flags); |
1da177e4 | 54 | } |
f90e7185 CL |
55 | printk(KERN_INFO "%d pages of RAM\n", total); |
56 | printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); | |
57 | printk(KERN_INFO "%d reserved pages\n", reserved); | |
58 | printk(KERN_INFO "%d pages shared\n", shared); | |
59 | printk(KERN_INFO "%d pages swap cached\n", cached); | |
6f4e1e50 | 60 | |
b1e7a8fd | 61 | printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); |
ce866b34 CL |
62 | printk(KERN_INFO "%lu pages writeback\n", |
63 | global_page_state(NR_WRITEBACK)); | |
65ba55f5 | 64 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); |
972d1a7b CL |
65 | printk(KERN_INFO "%lu pages slab\n", |
66 | global_page_state(NR_SLAB_RECLAIMABLE) + | |
67 | global_page_state(NR_SLAB_UNRECLAIMABLE)); | |
df849a15 CL |
68 | printk(KERN_INFO "%lu pages pagetables\n", |
69 | global_page_state(NR_PAGETABLE)); | |
1da177e4 LT |
70 | } |
71 | ||
72 | /* | |
73 | * Associate a virtual page frame with a given physical page frame | |
74 | * and protection flags for that frame. | |
75 | */ | |
76 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
77 | { | |
78 | pgd_t *pgd; | |
79 | pud_t *pud; | |
80 | pmd_t *pmd; | |
81 | pte_t *pte; | |
82 | ||
83 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
84 | if (pgd_none(*pgd)) { | |
85 | BUG(); | |
86 | return; | |
87 | } | |
88 | pud = pud_offset(pgd, vaddr); | |
89 | if (pud_none(*pud)) { | |
90 | BUG(); | |
91 | return; | |
92 | } | |
93 | pmd = pmd_offset(pud, vaddr); | |
94 | if (pmd_none(*pmd)) { | |
95 | BUG(); | |
96 | return; | |
97 | } | |
98 | pte = pte_offset_kernel(pmd, vaddr); | |
b0bfece4 JB |
99 | if (pgprot_val(flags)) |
100 | /* <pfn,flags> stored as-is, to permit clearing entries */ | |
101 | set_pte(pte, pfn_pte(pfn, flags)); | |
102 | else | |
103 | pte_clear(&init_mm, vaddr, pte); | |
1da177e4 LT |
104 | |
105 | /* | |
106 | * It's enough to flush this one mapping. | |
107 | * (PGE mappings get flushed as well) | |
108 | */ | |
109 | __flush_tlb_one(vaddr); | |
110 | } | |
111 | ||
112 | /* | |
113 | * Associate a large virtual page frame with a given physical page frame | |
114 | * and protection flags for that frame. pfn is for the base of the page, | |
115 | * vaddr is what the page gets mapped to - both must be properly aligned. | |
116 | * The pmd must already be instantiated. Assumes PAE mode. | |
117 | */ | |
118 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
119 | { | |
120 | pgd_t *pgd; | |
121 | pud_t *pud; | |
122 | pmd_t *pmd; | |
123 | ||
124 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ | |
f90e7185 | 125 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); |
1da177e4 LT |
126 | return; /* BUG(); */ |
127 | } | |
128 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ | |
f90e7185 | 129 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); |
1da177e4 LT |
130 | return; /* BUG(); */ |
131 | } | |
132 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
133 | if (pgd_none(*pgd)) { | |
f90e7185 | 134 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); |
1da177e4 LT |
135 | return; /* BUG(); */ |
136 | } | |
137 | pud = pud_offset(pgd, vaddr); | |
138 | pmd = pmd_offset(pud, vaddr); | |
139 | set_pmd(pmd, pfn_pmd(pfn, flags)); | |
140 | /* | |
141 | * It's enough to flush this one mapping. | |
142 | * (PGE mappings get flushed as well) | |
143 | */ | |
144 | __flush_tlb_one(vaddr); | |
145 | } | |
146 | ||
052e7994 | 147 | static int fixmaps; |
052e7994 JF |
148 | unsigned long __FIXADDR_TOP = 0xfffff000; |
149 | EXPORT_SYMBOL(__FIXADDR_TOP); | |
052e7994 | 150 | |
1da177e4 LT |
151 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) |
152 | { | |
153 | unsigned long address = __fix_to_virt(idx); | |
154 | ||
155 | if (idx >= __end_of_fixed_addresses) { | |
156 | BUG(); | |
157 | return; | |
158 | } | |
159 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | |
052e7994 JF |
160 | fixmaps++; |
161 | } | |
162 | ||
163 | /** | |
164 | * reserve_top_address - reserves a hole in the top of kernel address space | |
165 | * @reserve - size of hole to reserve | |
166 | * | |
167 | * Can be used to relocate the fixmap area and poke a hole in the top | |
168 | * of kernel address space to make room for a hypervisor. | |
169 | */ | |
170 | void reserve_top_address(unsigned long reserve) | |
171 | { | |
172 | BUG_ON(fixmaps > 0); | |
7ce0bcfd ZA |
173 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", |
174 | (int)-reserve); | |
052e7994 JF |
175 | __FIXADDR_TOP = -reserve - PAGE_SIZE; |
176 | __VMALLOC_RESERVE += reserve; | |
1da177e4 LT |
177 | } |
178 | ||
179 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | |
180 | { | |
181 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | |
182 | } | |
183 | ||
184 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
185 | { | |
186 | struct page *pte; | |
187 | ||
188 | #ifdef CONFIG_HIGHPTE | |
189 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | |
190 | #else | |
191 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | |
192 | #endif | |
193 | return pte; | |
194 | } | |
195 | ||
e18b890b | 196 | void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags) |
1da177e4 LT |
197 | { |
198 | memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); | |
199 | } | |
200 | ||
201 | /* | |
202 | * List of all pgd's needed for non-PAE so it can invalidate entries | |
203 | * in both cached and uncached pgd's; not needed for PAE since the | |
204 | * kernel pmd is shared. If PAE were not to share the pmd a similar | |
205 | * tactic would be needed. This is essentially codepath-based locking | |
206 | * against pageattr.c; it is the unique case in which a valid change | |
207 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
208 | * vmalloc faults work because attached pagetables are never freed. | |
1da177e4 LT |
209 | * -- wli |
210 | */ | |
211 | DEFINE_SPINLOCK(pgd_lock); | |
212 | struct page *pgd_list; | |
213 | ||
214 | static inline void pgd_list_add(pgd_t *pgd) | |
215 | { | |
216 | struct page *page = virt_to_page(pgd); | |
217 | page->index = (unsigned long)pgd_list; | |
218 | if (pgd_list) | |
4c21e2f2 | 219 | set_page_private(pgd_list, (unsigned long)&page->index); |
1da177e4 | 220 | pgd_list = page; |
4c21e2f2 | 221 | set_page_private(page, (unsigned long)&pgd_list); |
1da177e4 LT |
222 | } |
223 | ||
224 | static inline void pgd_list_del(pgd_t *pgd) | |
225 | { | |
226 | struct page *next, **pprev, *page = virt_to_page(pgd); | |
227 | next = (struct page *)page->index; | |
4c21e2f2 | 228 | pprev = (struct page **)page_private(page); |
1da177e4 LT |
229 | *pprev = next; |
230 | if (next) | |
4c21e2f2 | 231 | set_page_private(next, (unsigned long)pprev); |
1da177e4 LT |
232 | } |
233 | ||
f1d1a842 CL |
234 | |
235 | ||
5311ab62 JF |
236 | #if (PTRS_PER_PMD == 1) |
237 | /* Non-PAE pgd constructor */ | |
f1d1a842 | 238 | void pgd_ctor(void *pgd) |
1da177e4 LT |
239 | { |
240 | unsigned long flags; | |
241 | ||
5311ab62 JF |
242 | /* !PAE, no pagetable sharing */ |
243 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | |
244 | ||
245 | spin_lock_irqsave(&pgd_lock, flags); | |
1da177e4 | 246 | |
5311ab62 | 247 | /* must happen under lock */ |
d7271b14 | 248 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, |
1da177e4 | 249 | swapper_pg_dir + USER_PTRS_PER_PGD, |
d7271b14 | 250 | KERNEL_PGD_PTRS); |
c119ecce | 251 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, |
5311ab62 JF |
252 | __pa(swapper_pg_dir) >> PAGE_SHIFT, |
253 | USER_PTRS_PER_PGD, | |
254 | KERNEL_PGD_PTRS); | |
1da177e4 LT |
255 | pgd_list_add(pgd); |
256 | spin_unlock_irqrestore(&pgd_lock, flags); | |
1da177e4 | 257 | } |
5311ab62 JF |
258 | #else /* PTRS_PER_PMD > 1 */ |
259 | /* PAE pgd constructor */ | |
f1d1a842 | 260 | void pgd_ctor(void *pgd) |
5311ab62 JF |
261 | { |
262 | /* PAE, kernel PMD may be shared */ | |
263 | ||
264 | if (SHARED_KERNEL_PMD) { | |
265 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, | |
266 | swapper_pg_dir + USER_PTRS_PER_PGD, | |
267 | KERNEL_PGD_PTRS); | |
268 | } else { | |
269 | unsigned long flags; | |
270 | ||
271 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | |
272 | spin_lock_irqsave(&pgd_lock, flags); | |
273 | pgd_list_add(pgd); | |
274 | spin_unlock_irqrestore(&pgd_lock, flags); | |
275 | } | |
276 | } | |
277 | #endif /* PTRS_PER_PMD */ | |
1da177e4 | 278 | |
f1d1a842 | 279 | void pgd_dtor(void *pgd) |
1da177e4 LT |
280 | { |
281 | unsigned long flags; /* can be called from interrupt context */ | |
282 | ||
f1d1a842 CL |
283 | if (SHARED_KERNEL_PMD) |
284 | return; | |
5311ab62 | 285 | |
c119ecce | 286 | paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); |
1da177e4 LT |
287 | spin_lock_irqsave(&pgd_lock, flags); |
288 | pgd_list_del(pgd); | |
289 | spin_unlock_irqrestore(&pgd_lock, flags); | |
290 | } | |
291 | ||
5311ab62 JF |
292 | #define UNSHARED_PTRS_PER_PGD \ |
293 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | |
294 | ||
295 | /* If we allocate a pmd for part of the kernel address space, then | |
296 | make sure its initialized with the appropriate kernel mappings. | |
297 | Otherwise use a cached zeroed pmd. */ | |
298 | static pmd_t *pmd_cache_alloc(int idx) | |
299 | { | |
300 | pmd_t *pmd; | |
301 | ||
302 | if (idx >= USER_PTRS_PER_PGD) { | |
303 | pmd = (pmd_t *)__get_free_page(GFP_KERNEL); | |
304 | ||
305 | if (pmd) | |
306 | memcpy(pmd, | |
307 | (void *)pgd_page_vaddr(swapper_pg_dir[idx]), | |
308 | sizeof(pmd_t) * PTRS_PER_PMD); | |
309 | } else | |
310 | pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); | |
311 | ||
312 | return pmd; | |
313 | } | |
314 | ||
315 | static void pmd_cache_free(pmd_t *pmd, int idx) | |
316 | { | |
317 | if (idx >= USER_PTRS_PER_PGD) | |
318 | free_page((unsigned long)pmd); | |
319 | else | |
320 | kmem_cache_free(pmd_cache, pmd); | |
321 | } | |
322 | ||
1da177e4 LT |
323 | pgd_t *pgd_alloc(struct mm_struct *mm) |
324 | { | |
325 | int i; | |
f1d1a842 | 326 | pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor); |
1da177e4 LT |
327 | |
328 | if (PTRS_PER_PMD == 1 || !pgd) | |
329 | return pgd; | |
330 | ||
5311ab62 JF |
331 | for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) { |
332 | pmd_t *pmd = pmd_cache_alloc(i); | |
333 | ||
1da177e4 LT |
334 | if (!pmd) |
335 | goto out_oom; | |
5311ab62 | 336 | |
c119ecce | 337 | paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); |
1da177e4 LT |
338 | set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); |
339 | } | |
340 | return pgd; | |
341 | ||
342 | out_oom: | |
c119ecce ZA |
343 | for (i--; i >= 0; i--) { |
344 | pgd_t pgdent = pgd[i]; | |
345 | void* pmd = (void *)__va(pgd_val(pgdent)-1); | |
346 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); | |
5311ab62 | 347 | pmd_cache_free(pmd, i); |
c119ecce | 348 | } |
f1d1a842 | 349 | quicklist_free(0, pgd_dtor, pgd); |
1da177e4 LT |
350 | return NULL; |
351 | } | |
352 | ||
353 | void pgd_free(pgd_t *pgd) | |
354 | { | |
355 | int i; | |
356 | ||
357 | /* in the PAE case user pgd entries are overwritten before usage */ | |
358 | if (PTRS_PER_PMD > 1) | |
5311ab62 | 359 | for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) { |
c119ecce ZA |
360 | pgd_t pgdent = pgd[i]; |
361 | void* pmd = (void *)__va(pgd_val(pgdent)-1); | |
362 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); | |
5311ab62 | 363 | pmd_cache_free(pmd, i); |
c119ecce | 364 | } |
e0da382c | 365 | /* in the non-PAE case, free_pgtables() clears user pgd entries */ |
f1d1a842 | 366 | quicklist_free(0, pgd_dtor, pgd); |
1da177e4 | 367 | } |
f1d1a842 CL |
368 | |
369 | void check_pgt_cache(void) | |
370 | { | |
371 | quicklist_trim(0, pgd_dtor, 25, 16); | |
372 | } | |
373 |