include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / xen / mmu.c
CommitLineData
3b827c1b
JF
1/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
f120f13e 41#include <linux/sched.h>
f4f97b3e 42#include <linux/highmem.h>
994025ca 43#include <linux/debugfs.h>
3b827c1b 44#include <linux/bug.h>
44408ad7 45#include <linux/module.h>
5a0e3ad6 46#include <linux/gfp.h>
3b827c1b
JF
47
48#include <asm/pgtable.h>
49#include <asm/tlbflush.h>
5deb30d1 50#include <asm/fixmap.h>
3b827c1b 51#include <asm/mmu_context.h>
319f3ba5 52#include <asm/setup.h>
f4f97b3e 53#include <asm/paravirt.h>
cbcd79c2 54#include <asm/linkage.h>
3b827c1b
JF
55
56#include <asm/xen/hypercall.h>
f4f97b3e 57#include <asm/xen/hypervisor.h>
3b827c1b
JF
58
59#include <xen/page.h>
60#include <xen/interface/xen.h>
319f3ba5
JF
61#include <xen/interface/version.h>
62#include <xen/hvc-console.h>
3b827c1b 63
f4f97b3e 64#include "multicalls.h"
3b827c1b 65#include "mmu.h"
994025ca
JF
66#include "debugfs.h"
67
68#define MMU_UPDATE_HISTO 30
69
70#ifdef CONFIG_XEN_DEBUG_FS
71
72static struct {
73 u32 pgd_update;
74 u32 pgd_update_pinned;
75 u32 pgd_update_batched;
76
77 u32 pud_update;
78 u32 pud_update_pinned;
79 u32 pud_update_batched;
80
81 u32 pmd_update;
82 u32 pmd_update_pinned;
83 u32 pmd_update_batched;
84
85 u32 pte_update;
86 u32 pte_update_pinned;
87 u32 pte_update_batched;
88
89 u32 mmu_update;
90 u32 mmu_update_extended;
91 u32 mmu_update_histo[MMU_UPDATE_HISTO];
92
93 u32 prot_commit;
94 u32 prot_commit_batched;
95
96 u32 set_pte_at;
97 u32 set_pte_at_batched;
98 u32 set_pte_at_pinned;
99 u32 set_pte_at_current;
100 u32 set_pte_at_kernel;
101} mmu_stats;
102
103static u8 zero_stats;
104
105static inline void check_zero(void)
106{
107 if (unlikely(zero_stats)) {
108 memset(&mmu_stats, 0, sizeof(mmu_stats));
109 zero_stats = 0;
110 }
111}
112
113#define ADD_STATS(elem, val) \
114 do { check_zero(); mmu_stats.elem += (val); } while(0)
115
116#else /* !CONFIG_XEN_DEBUG_FS */
117
118#define ADD_STATS(elem, val) do { (void)(val); } while(0)
119
120#endif /* CONFIG_XEN_DEBUG_FS */
3b827c1b 121
319f3ba5
JF
122
123/*
124 * Identity map, in addition to plain kernel map. This needs to be
125 * large enough to allocate page table pages to allocate the rest.
126 * Each page can map 2MB.
127 */
128static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
129
130#ifdef CONFIG_X86_64
131/* l3 pud for userspace vsyscall mapping */
132static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
133#endif /* CONFIG_X86_64 */
134
135/*
136 * Note about cr3 (pagetable base) values:
137 *
138 * xen_cr3 contains the current logical cr3 value; it contains the
139 * last set cr3. This may not be the current effective cr3, because
140 * its update may be being lazily deferred. However, a vcpu looking
141 * at its own cr3 can use this value knowing that it everything will
142 * be self-consistent.
143 *
144 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
145 * hypercall to set the vcpu cr3 is complete (so it may be a little
146 * out of date, but it will never be set early). If one vcpu is
147 * looking at another vcpu's cr3 value, it should use this variable.
148 */
149DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
150DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
151
152
d6182fbf
JF
153/*
154 * Just beyond the highest usermode address. STACK_TOP_MAX has a
155 * redzone above it, so round it up to a PGD boundary.
156 */
157#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
158
159
d451bb7a 160#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
cf0923ea 161#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
d451bb7a 162
cf0923ea 163/* Placeholder for holes in the address space */
cbcd79c2 164static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
cf0923ea
JF
165 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
166
167 /* Array of pointers to pages containing p2m entries */
cbcd79c2 168static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
cf0923ea 169 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
d451bb7a 170
d5edbc1f 171/* Arrays of p2m arrays expressed in mfns used for save/restore */
cbcd79c2 172static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
d5edbc1f 173
cbcd79c2
JF
174static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
175 __page_aligned_bss;
d5edbc1f 176
d451bb7a
JF
177static inline unsigned p2m_top_index(unsigned long pfn)
178{
8006ec3e 179 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
d451bb7a
JF
180 return pfn / P2M_ENTRIES_PER_PAGE;
181}
182
183static inline unsigned p2m_index(unsigned long pfn)
184{
185 return pfn % P2M_ENTRIES_PER_PAGE;
186}
187
d5edbc1f 188/* Build the parallel p2m_top_mfn structures */
fa24ba62 189void xen_build_mfn_list_list(void)
d5edbc1f
JF
190{
191 unsigned pfn, idx;
192
f63c2f24 193 for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
d5edbc1f
JF
194 unsigned topidx = p2m_top_index(pfn);
195
196 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
197 }
198
f63c2f24 199 for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
d5edbc1f
JF
200 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
201 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
202 }
cdaead6b 203}
d5edbc1f 204
cdaead6b
JF
205void xen_setup_mfn_list_list(void)
206{
d5edbc1f
JF
207 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
208
209 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
210 virt_to_mfn(p2m_top_mfn_list);
211 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
212}
213
214/* Set up p2m_top to point to the domain-builder provided p2m pages */
d451bb7a
JF
215void __init xen_build_dynamic_phys_to_machine(void)
216{
d451bb7a 217 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
8006ec3e 218 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
d5edbc1f 219 unsigned pfn;
d451bb7a 220
f63c2f24 221 for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
d451bb7a
JF
222 unsigned topidx = p2m_top_index(pfn);
223
224 p2m_top[topidx] = &mfn_list[pfn];
225 }
cdaead6b
JF
226
227 xen_build_mfn_list_list();
d451bb7a
JF
228}
229
230unsigned long get_phys_to_machine(unsigned long pfn)
231{
232 unsigned topidx, idx;
233
8006ec3e
JF
234 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
235 return INVALID_P2M_ENTRY;
236
d451bb7a 237 topidx = p2m_top_index(pfn);
d451bb7a
JF
238 idx = p2m_index(pfn);
239 return p2m_top[topidx][idx];
240}
15ce6005 241EXPORT_SYMBOL_GPL(get_phys_to_machine);
d451bb7a 242
e791ca0f
JF
243/* install a new p2m_top page */
244bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
d451bb7a 245{
e791ca0f
JF
246 unsigned topidx = p2m_top_index(pfn);
247 unsigned long **pfnp, *mfnp;
d451bb7a
JF
248 unsigned i;
249
e791ca0f
JF
250 pfnp = &p2m_top[topidx];
251 mfnp = &p2m_top_mfn[topidx];
d451bb7a 252
f63c2f24 253 for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
d451bb7a
JF
254 p[i] = INVALID_P2M_ENTRY;
255
e791ca0f 256 if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
d5edbc1f 257 *mfnp = virt_to_mfn(p);
e791ca0f
JF
258 return true;
259 }
260
261 return false;
d451bb7a
JF
262}
263
e791ca0f 264static void alloc_p2m(unsigned long pfn)
d451bb7a 265{
e791ca0f 266 unsigned long *p;
d451bb7a 267
e791ca0f
JF
268 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
269 BUG_ON(p == NULL);
270
271 if (!install_p2mtop_page(pfn, p))
272 free_page((unsigned long)p);
273}
274
275/* Try to install p2m mapping; fail if intermediate bits missing */
276bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
277{
278 unsigned topidx, idx;
8006ec3e
JF
279
280 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
281 BUG_ON(mfn != INVALID_P2M_ENTRY);
e791ca0f 282 return true;
d451bb7a
JF
283 }
284
285 topidx = p2m_top_index(pfn);
cf0923ea 286 if (p2m_top[topidx] == p2m_missing) {
d451bb7a 287 if (mfn == INVALID_P2M_ENTRY)
e791ca0f
JF
288 return true;
289 return false;
d451bb7a
JF
290 }
291
292 idx = p2m_index(pfn);
293 p2m_top[topidx][idx] = mfn;
e791ca0f
JF
294
295 return true;
296}
297
298void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
299{
300 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
301 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
302 return;
303 }
304
305 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
306 alloc_p2m(pfn);
307
308 if (!__set_phys_to_machine(pfn, mfn))
309 BUG();
310 }
d451bb7a
JF
311}
312
9976b39b
JF
313unsigned long arbitrary_virt_to_mfn(void *vaddr)
314{
315 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
316
317 return PFN_DOWN(maddr.maddr);
318}
319
ce803e70 320xmaddr_t arbitrary_virt_to_machine(void *vaddr)
3b827c1b 321{
ce803e70 322 unsigned long address = (unsigned long)vaddr;
da7bfc50 323 unsigned int level;
9f32d21c
CL
324 pte_t *pte;
325 unsigned offset;
3b827c1b 326
9f32d21c
CL
327 /*
328 * if the PFN is in the linear mapped vaddr range, we can just use
329 * the (quick) virt_to_machine() p2m lookup
330 */
331 if (virt_addr_valid(vaddr))
332 return virt_to_machine(vaddr);
333
334 /* otherwise we have to do a (slower) full page-table walk */
3b827c1b 335
9f32d21c
CL
336 pte = lookup_address(address, &level);
337 BUG_ON(pte == NULL);
338 offset = address & ~PAGE_MASK;
ebd879e3 339 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
3b827c1b
JF
340}
341
342void make_lowmem_page_readonly(void *vaddr)
343{
344 pte_t *pte, ptev;
345 unsigned long address = (unsigned long)vaddr;
da7bfc50 346 unsigned int level;
3b827c1b 347
f0646e43 348 pte = lookup_address(address, &level);
3b827c1b
JF
349 BUG_ON(pte == NULL);
350
351 ptev = pte_wrprotect(*pte);
352
353 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
354 BUG();
355}
356
357void make_lowmem_page_readwrite(void *vaddr)
358{
359 pte_t *pte, ptev;
360 unsigned long address = (unsigned long)vaddr;
da7bfc50 361 unsigned int level;
3b827c1b 362
f0646e43 363 pte = lookup_address(address, &level);
3b827c1b
JF
364 BUG_ON(pte == NULL);
365
366 ptev = pte_mkwrite(*pte);
367
368 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
369 BUG();
370}
371
372
7708ad64 373static bool xen_page_pinned(void *ptr)
e2426cf8
JF
374{
375 struct page *page = virt_to_page(ptr);
376
377 return PagePinned(page);
378}
379
7708ad64 380static void xen_extend_mmu_update(const struct mmu_update *update)
3b827c1b 381{
d66bf8fc
JF
382 struct multicall_space mcs;
383 struct mmu_update *u;
3b827c1b 384
400d3494
JF
385 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
386
994025ca
JF
387 if (mcs.mc != NULL) {
388 ADD_STATS(mmu_update_extended, 1);
389 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
390
400d3494 391 mcs.mc->args[1]++;
994025ca
JF
392
393 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
394 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
395 else
396 ADD_STATS(mmu_update_histo[0], 1);
397 } else {
398 ADD_STATS(mmu_update, 1);
400d3494
JF
399 mcs = __xen_mc_entry(sizeof(*u));
400 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
994025ca 401 ADD_STATS(mmu_update_histo[1], 1);
400d3494 402 }
d66bf8fc 403
d66bf8fc 404 u = mcs.args;
400d3494
JF
405 *u = *update;
406}
407
408void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
409{
410 struct mmu_update u;
411
412 preempt_disable();
413
414 xen_mc_batch();
415
ce803e70
JF
416 /* ptr may be ioremapped for 64-bit pagetable setup */
417 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
400d3494 418 u.val = pmd_val_ma(val);
7708ad64 419 xen_extend_mmu_update(&u);
d66bf8fc 420
994025ca
JF
421 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
422
d66bf8fc
JF
423 xen_mc_issue(PARAVIRT_LAZY_MMU);
424
425 preempt_enable();
3b827c1b
JF
426}
427
e2426cf8
JF
428void xen_set_pmd(pmd_t *ptr, pmd_t val)
429{
994025ca
JF
430 ADD_STATS(pmd_update, 1);
431
e2426cf8
JF
432 /* If page is not pinned, we can just update the entry
433 directly */
7708ad64 434 if (!xen_page_pinned(ptr)) {
e2426cf8
JF
435 *ptr = val;
436 return;
437 }
438
994025ca
JF
439 ADD_STATS(pmd_update_pinned, 1);
440
e2426cf8
JF
441 xen_set_pmd_hyper(ptr, val);
442}
443
3b827c1b
JF
444/*
445 * Associate a virtual page frame with a given physical page frame
446 * and protection flags for that frame.
447 */
448void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
449{
836fe2f2 450 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
3b827c1b
JF
451}
452
453void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
454 pte_t *ptep, pte_t pteval)
455{
994025ca
JF
456 ADD_STATS(set_pte_at, 1);
457// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
458 ADD_STATS(set_pte_at_current, mm == current->mm);
459 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
460
d66bf8fc 461 if (mm == current->mm || mm == &init_mm) {
8965c1c0 462 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
d66bf8fc
JF
463 struct multicall_space mcs;
464 mcs = xen_mc_entry(0);
465
466 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
994025ca 467 ADD_STATS(set_pte_at_batched, 1);
d66bf8fc 468 xen_mc_issue(PARAVIRT_LAZY_MMU);
2bd50036 469 goto out;
d66bf8fc
JF
470 } else
471 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
2bd50036 472 goto out;
d66bf8fc
JF
473 }
474 xen_set_pte(ptep, pteval);
2bd50036 475
2829b449 476out: return;
3b827c1b
JF
477}
478
f63c2f24
T
479pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
480 unsigned long addr, pte_t *ptep)
947a69c9 481{
e57778a1
JF
482 /* Just return the pte as-is. We preserve the bits on commit */
483 return *ptep;
484}
485
486void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
487 pte_t *ptep, pte_t pte)
488{
400d3494 489 struct mmu_update u;
e57778a1 490
400d3494 491 xen_mc_batch();
947a69c9 492
9f32d21c 493 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
400d3494 494 u.val = pte_val_ma(pte);
7708ad64 495 xen_extend_mmu_update(&u);
947a69c9 496
994025ca
JF
497 ADD_STATS(prot_commit, 1);
498 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
499
e57778a1 500 xen_mc_issue(PARAVIRT_LAZY_MMU);
947a69c9
JF
501}
502
ebb9cfe2
JF
503/* Assume pteval_t is equivalent to all the other *val_t types. */
504static pteval_t pte_mfn_to_pfn(pteval_t val)
947a69c9 505{
ebb9cfe2 506 if (val & _PAGE_PRESENT) {
59438c9f 507 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
77be1fab 508 pteval_t flags = val & PTE_FLAGS_MASK;
d8355aca 509 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
ebb9cfe2 510 }
947a69c9 511
ebb9cfe2 512 return val;
947a69c9
JF
513}
514
ebb9cfe2 515static pteval_t pte_pfn_to_mfn(pteval_t val)
947a69c9 516{
ebb9cfe2 517 if (val & _PAGE_PRESENT) {
59438c9f 518 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
77be1fab 519 pteval_t flags = val & PTE_FLAGS_MASK;
d8355aca 520 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
947a69c9
JF
521 }
522
ebb9cfe2 523 return val;
947a69c9
JF
524}
525
ebb9cfe2 526pteval_t xen_pte_val(pte_t pte)
947a69c9 527{
ebb9cfe2 528 return pte_mfn_to_pfn(pte.pte);
947a69c9 529}
da5de7c2 530PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
947a69c9 531
947a69c9
JF
532pgdval_t xen_pgd_val(pgd_t pgd)
533{
ebb9cfe2 534 return pte_mfn_to_pfn(pgd.pgd);
947a69c9 535}
da5de7c2 536PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
947a69c9
JF
537
538pte_t xen_make_pte(pteval_t pte)
539{
ebb9cfe2
JF
540 pte = pte_pfn_to_mfn(pte);
541 return native_make_pte(pte);
947a69c9 542}
da5de7c2 543PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
947a69c9
JF
544
545pgd_t xen_make_pgd(pgdval_t pgd)
546{
ebb9cfe2
JF
547 pgd = pte_pfn_to_mfn(pgd);
548 return native_make_pgd(pgd);
947a69c9 549}
da5de7c2 550PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
947a69c9
JF
551
552pmdval_t xen_pmd_val(pmd_t pmd)
553{
ebb9cfe2 554 return pte_mfn_to_pfn(pmd.pmd);
947a69c9 555}
da5de7c2 556PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
28499143 557
e2426cf8 558void xen_set_pud_hyper(pud_t *ptr, pud_t val)
f4f97b3e 559{
400d3494 560 struct mmu_update u;
f4f97b3e 561
d66bf8fc
JF
562 preempt_disable();
563
400d3494
JF
564 xen_mc_batch();
565
ce803e70
JF
566 /* ptr may be ioremapped for 64-bit pagetable setup */
567 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
400d3494 568 u.val = pud_val_ma(val);
7708ad64 569 xen_extend_mmu_update(&u);
d66bf8fc 570
994025ca
JF
571 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
572
d66bf8fc
JF
573 xen_mc_issue(PARAVIRT_LAZY_MMU);
574
575 preempt_enable();
f4f97b3e
JF
576}
577
e2426cf8
JF
578void xen_set_pud(pud_t *ptr, pud_t val)
579{
994025ca
JF
580 ADD_STATS(pud_update, 1);
581
e2426cf8
JF
582 /* If page is not pinned, we can just update the entry
583 directly */
7708ad64 584 if (!xen_page_pinned(ptr)) {
e2426cf8
JF
585 *ptr = val;
586 return;
587 }
588
994025ca
JF
589 ADD_STATS(pud_update_pinned, 1);
590
e2426cf8
JF
591 xen_set_pud_hyper(ptr, val);
592}
593
f4f97b3e
JF
594void xen_set_pte(pte_t *ptep, pte_t pte)
595{
994025ca
JF
596 ADD_STATS(pte_update, 1);
597// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
598 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
599
f6e58732 600#ifdef CONFIG_X86_PAE
f4f97b3e
JF
601 ptep->pte_high = pte.pte_high;
602 smp_wmb();
603 ptep->pte_low = pte.pte_low;
f6e58732
JF
604#else
605 *ptep = pte;
606#endif
f4f97b3e
JF
607}
608
f6e58732 609#ifdef CONFIG_X86_PAE
3b827c1b
JF
610void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
611{
f6e58732 612 set_64bit((u64 *)ptep, native_pte_val(pte));
3b827c1b
JF
613}
614
615void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
616{
617 ptep->pte_low = 0;
618 smp_wmb(); /* make sure low gets written first */
619 ptep->pte_high = 0;
620}
621
622void xen_pmd_clear(pmd_t *pmdp)
623{
e2426cf8 624 set_pmd(pmdp, __pmd(0));
3b827c1b 625}
f6e58732 626#endif /* CONFIG_X86_PAE */
3b827c1b 627
abf33038 628pmd_t xen_make_pmd(pmdval_t pmd)
3b827c1b 629{
ebb9cfe2 630 pmd = pte_pfn_to_mfn(pmd);
947a69c9 631 return native_make_pmd(pmd);
3b827c1b 632}
da5de7c2 633PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
3b827c1b 634
f6e58732
JF
635#if PAGETABLE_LEVELS == 4
636pudval_t xen_pud_val(pud_t pud)
637{
638 return pte_mfn_to_pfn(pud.pud);
639}
da5de7c2 640PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
f6e58732
JF
641
642pud_t xen_make_pud(pudval_t pud)
643{
644 pud = pte_pfn_to_mfn(pud);
645
646 return native_make_pud(pud);
647}
da5de7c2 648PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
f6e58732 649
d6182fbf 650pgd_t *xen_get_user_pgd(pgd_t *pgd)
f6e58732 651{
d6182fbf
JF
652 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
653 unsigned offset = pgd - pgd_page;
654 pgd_t *user_ptr = NULL;
f6e58732 655
d6182fbf
JF
656 if (offset < pgd_index(USER_LIMIT)) {
657 struct page *page = virt_to_page(pgd_page);
658 user_ptr = (pgd_t *)page->private;
659 if (user_ptr)
660 user_ptr += offset;
661 }
f6e58732 662
d6182fbf
JF
663 return user_ptr;
664}
665
666static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
667{
668 struct mmu_update u;
f6e58732
JF
669
670 u.ptr = virt_to_machine(ptr).maddr;
671 u.val = pgd_val_ma(val);
7708ad64 672 xen_extend_mmu_update(&u);
d6182fbf
JF
673}
674
675/*
676 * Raw hypercall-based set_pgd, intended for in early boot before
677 * there's a page structure. This implies:
678 * 1. The only existing pagetable is the kernel's
679 * 2. It is always pinned
680 * 3. It has no user pagetable attached to it
681 */
682void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
683{
684 preempt_disable();
685
686 xen_mc_batch();
687
688 __xen_set_pgd_hyper(ptr, val);
f6e58732
JF
689
690 xen_mc_issue(PARAVIRT_LAZY_MMU);
691
692 preempt_enable();
693}
694
695void xen_set_pgd(pgd_t *ptr, pgd_t val)
696{
d6182fbf
JF
697 pgd_t *user_ptr = xen_get_user_pgd(ptr);
698
994025ca
JF
699 ADD_STATS(pgd_update, 1);
700
f6e58732
JF
701 /* If page is not pinned, we can just update the entry
702 directly */
7708ad64 703 if (!xen_page_pinned(ptr)) {
f6e58732 704 *ptr = val;
d6182fbf 705 if (user_ptr) {
7708ad64 706 WARN_ON(xen_page_pinned(user_ptr));
d6182fbf
JF
707 *user_ptr = val;
708 }
f6e58732
JF
709 return;
710 }
711
994025ca
JF
712 ADD_STATS(pgd_update_pinned, 1);
713 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
714
d6182fbf
JF
715 /* If it's pinned, then we can at least batch the kernel and
716 user updates together. */
717 xen_mc_batch();
718
719 __xen_set_pgd_hyper(ptr, val);
720 if (user_ptr)
721 __xen_set_pgd_hyper(user_ptr, val);
722
723 xen_mc_issue(PARAVIRT_LAZY_MMU);
f6e58732
JF
724}
725#endif /* PAGETABLE_LEVELS == 4 */
726
f4f97b3e 727/*
5deb30d1
JF
728 * (Yet another) pagetable walker. This one is intended for pinning a
729 * pagetable. This means that it walks a pagetable and calls the
730 * callback function on each page it finds making up the page table,
731 * at every level. It walks the entire pagetable, but it only bothers
732 * pinning pte pages which are below limit. In the normal case this
733 * will be STACK_TOP_MAX, but at boot we need to pin up to
734 * FIXADDR_TOP.
735 *
736 * For 32-bit the important bit is that we don't pin beyond there,
737 * because then we start getting into Xen's ptes.
738 *
739 * For 64-bit, we must skip the Xen hole in the middle of the address
740 * space, just after the big x86-64 virtual hole.
741 */
86bbc2c2
IC
742static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
743 int (*func)(struct mm_struct *mm, struct page *,
744 enum pt_level),
745 unsigned long limit)
3b827c1b 746{
f4f97b3e 747 int flush = 0;
5deb30d1
JF
748 unsigned hole_low, hole_high;
749 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
750 unsigned pgdidx, pudidx, pmdidx;
f4f97b3e 751
5deb30d1
JF
752 /* The limit is the last byte to be touched */
753 limit--;
754 BUG_ON(limit >= FIXADDR_TOP);
3b827c1b
JF
755
756 if (xen_feature(XENFEAT_auto_translated_physmap))
f4f97b3e
JF
757 return 0;
758
5deb30d1
JF
759 /*
760 * 64-bit has a great big hole in the middle of the address
761 * space, which contains the Xen mappings. On 32-bit these
762 * will end up making a zero-sized hole and so is a no-op.
763 */
d6182fbf 764 hole_low = pgd_index(USER_LIMIT);
5deb30d1
JF
765 hole_high = pgd_index(PAGE_OFFSET);
766
767 pgdidx_limit = pgd_index(limit);
768#if PTRS_PER_PUD > 1
769 pudidx_limit = pud_index(limit);
770#else
771 pudidx_limit = 0;
772#endif
773#if PTRS_PER_PMD > 1
774 pmdidx_limit = pmd_index(limit);
775#else
776 pmdidx_limit = 0;
777#endif
778
5deb30d1 779 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
f4f97b3e 780 pud_t *pud;
3b827c1b 781
5deb30d1
JF
782 if (pgdidx >= hole_low && pgdidx < hole_high)
783 continue;
f4f97b3e 784
5deb30d1 785 if (!pgd_val(pgd[pgdidx]))
3b827c1b 786 continue;
f4f97b3e 787
5deb30d1 788 pud = pud_offset(&pgd[pgdidx], 0);
3b827c1b
JF
789
790 if (PTRS_PER_PUD > 1) /* not folded */
eefb47f6 791 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
f4f97b3e 792
5deb30d1 793 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
f4f97b3e 794 pmd_t *pmd;
f4f97b3e 795
5deb30d1
JF
796 if (pgdidx == pgdidx_limit &&
797 pudidx > pudidx_limit)
798 goto out;
3b827c1b 799
5deb30d1 800 if (pud_none(pud[pudidx]))
3b827c1b 801 continue;
f4f97b3e 802
5deb30d1 803 pmd = pmd_offset(&pud[pudidx], 0);
3b827c1b
JF
804
805 if (PTRS_PER_PMD > 1) /* not folded */
eefb47f6 806 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
f4f97b3e 807
5deb30d1
JF
808 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
809 struct page *pte;
810
811 if (pgdidx == pgdidx_limit &&
812 pudidx == pudidx_limit &&
813 pmdidx > pmdidx_limit)
814 goto out;
3b827c1b 815
5deb30d1 816 if (pmd_none(pmd[pmdidx]))
3b827c1b
JF
817 continue;
818
5deb30d1 819 pte = pmd_page(pmd[pmdidx]);
eefb47f6 820 flush |= (*func)(mm, pte, PT_PTE);
3b827c1b
JF
821 }
822 }
823 }
11ad93e5 824
5deb30d1 825out:
11ad93e5
JF
826 /* Do the top level last, so that the callbacks can use it as
827 a cue to do final things like tlb flushes. */
eefb47f6 828 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
f4f97b3e
JF
829
830 return flush;
3b827c1b
JF
831}
832
86bbc2c2
IC
833static int xen_pgd_walk(struct mm_struct *mm,
834 int (*func)(struct mm_struct *mm, struct page *,
835 enum pt_level),
836 unsigned long limit)
837{
838 return __xen_pgd_walk(mm, mm->pgd, func, limit);
839}
840
7708ad64
JF
841/* If we're using split pte locks, then take the page's lock and
842 return a pointer to it. Otherwise return NULL. */
eefb47f6 843static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
74260714
JF
844{
845 spinlock_t *ptl = NULL;
846
f7d0b926 847#if USE_SPLIT_PTLOCKS
74260714 848 ptl = __pte_lockptr(page);
eefb47f6 849 spin_lock_nest_lock(ptl, &mm->page_table_lock);
74260714
JF
850#endif
851
852 return ptl;
853}
854
7708ad64 855static void xen_pte_unlock(void *v)
74260714
JF
856{
857 spinlock_t *ptl = v;
858 spin_unlock(ptl);
859}
860
861static void xen_do_pin(unsigned level, unsigned long pfn)
862{
863 struct mmuext_op *op;
864 struct multicall_space mcs;
865
866 mcs = __xen_mc_entry(sizeof(*op));
867 op = mcs.args;
868 op->cmd = level;
869 op->arg1.mfn = pfn_to_mfn(pfn);
870 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
871}
872
eefb47f6
JF
873static int xen_pin_page(struct mm_struct *mm, struct page *page,
874 enum pt_level level)
f4f97b3e 875{
d60cd46b 876 unsigned pgfl = TestSetPagePinned(page);
f4f97b3e
JF
877 int flush;
878
879 if (pgfl)
880 flush = 0; /* already pinned */
881 else if (PageHighMem(page))
882 /* kmaps need flushing if we found an unpinned
883 highpage */
884 flush = 1;
885 else {
886 void *pt = lowmem_page_address(page);
887 unsigned long pfn = page_to_pfn(page);
888 struct multicall_space mcs = __xen_mc_entry(0);
74260714 889 spinlock_t *ptl;
f4f97b3e
JF
890
891 flush = 0;
892
11ad93e5
JF
893 /*
894 * We need to hold the pagetable lock between the time
895 * we make the pagetable RO and when we actually pin
896 * it. If we don't, then other users may come in and
897 * attempt to update the pagetable by writing it,
898 * which will fail because the memory is RO but not
899 * pinned, so Xen won't do the trap'n'emulate.
900 *
901 * If we're using split pte locks, we can't hold the
902 * entire pagetable's worth of locks during the
903 * traverse, because we may wrap the preempt count (8
904 * bits). The solution is to mark RO and pin each PTE
905 * page while holding the lock. This means the number
906 * of locks we end up holding is never more than a
907 * batch size (~32 entries, at present).
908 *
909 * If we're not using split pte locks, we needn't pin
910 * the PTE pages independently, because we're
911 * protected by the overall pagetable lock.
912 */
74260714
JF
913 ptl = NULL;
914 if (level == PT_PTE)
eefb47f6 915 ptl = xen_pte_lock(page, mm);
74260714 916
f4f97b3e
JF
917 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
918 pfn_pte(pfn, PAGE_KERNEL_RO),
74260714
JF
919 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
920
11ad93e5 921 if (ptl) {
74260714
JF
922 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
923
74260714
JF
924 /* Queue a deferred unlock for when this batch
925 is completed. */
7708ad64 926 xen_mc_callback(xen_pte_unlock, ptl);
74260714 927 }
f4f97b3e
JF
928 }
929
930 return flush;
931}
3b827c1b 932
f4f97b3e
JF
933/* This is called just after a mm has been created, but it has not
934 been used yet. We need to make sure that its pagetable is all
935 read-only, and can be pinned. */
eefb47f6 936static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
3b827c1b 937{
d05fdf31
JF
938 vm_unmap_aliases();
939
f4f97b3e 940 xen_mc_batch();
3b827c1b 941
86bbc2c2 942 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
d05fdf31 943 /* re-enable interrupts for flushing */
f87e4cac 944 xen_mc_issue(0);
d05fdf31 945
f4f97b3e 946 kmap_flush_unused();
d05fdf31 947
f87e4cac
JF
948 xen_mc_batch();
949 }
f4f97b3e 950
d6182fbf
JF
951#ifdef CONFIG_X86_64
952 {
953 pgd_t *user_pgd = xen_get_user_pgd(pgd);
954
955 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
956
957 if (user_pgd) {
eefb47f6 958 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
f63c2f24
T
959 xen_do_pin(MMUEXT_PIN_L4_TABLE,
960 PFN_DOWN(__pa(user_pgd)));
d6182fbf
JF
961 }
962 }
963#else /* CONFIG_X86_32 */
5deb30d1
JF
964#ifdef CONFIG_X86_PAE
965 /* Need to make sure unshared kernel PMD is pinnable */
47cb2ed9 966 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
eefb47f6 967 PT_PMD);
5deb30d1 968#endif
28499143 969 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
d6182fbf 970#endif /* CONFIG_X86_64 */
f4f97b3e 971 xen_mc_issue(0);
3b827c1b
JF
972}
973
eefb47f6
JF
974static void xen_pgd_pin(struct mm_struct *mm)
975{
976 __xen_pgd_pin(mm, mm->pgd);
977}
978
0e91398f
JF
979/*
980 * On save, we need to pin all pagetables to make sure they get their
981 * mfns turned into pfns. Search the list for any unpinned pgds and pin
982 * them (unpinned pgds are not currently in use, probably because the
983 * process is under construction or destruction).
eefb47f6
JF
984 *
985 * Expected to be called in stop_machine() ("equivalent to taking
986 * every spinlock in the system"), so the locking doesn't really
987 * matter all that much.
0e91398f
JF
988 */
989void xen_mm_pin_all(void)
990{
991 unsigned long flags;
992 struct page *page;
74260714 993
0e91398f 994 spin_lock_irqsave(&pgd_lock, flags);
f4f97b3e 995
0e91398f
JF
996 list_for_each_entry(page, &pgd_list, lru) {
997 if (!PagePinned(page)) {
eefb47f6 998 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
0e91398f
JF
999 SetPageSavePinned(page);
1000 }
1001 }
1002
1003 spin_unlock_irqrestore(&pgd_lock, flags);
3b827c1b
JF
1004}
1005
c1f2f09e
EH
1006/*
1007 * The init_mm pagetable is really pinned as soon as its created, but
1008 * that's before we have page structures to store the bits. So do all
1009 * the book-keeping now.
1010 */
eefb47f6
JF
1011static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1012 enum pt_level level)
3b827c1b 1013{
f4f97b3e
JF
1014 SetPagePinned(page);
1015 return 0;
1016}
3b827c1b 1017
b96229b5 1018static void __init xen_mark_init_mm_pinned(void)
f4f97b3e 1019{
eefb47f6 1020 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
f4f97b3e 1021}
3b827c1b 1022
eefb47f6
JF
1023static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1024 enum pt_level level)
f4f97b3e 1025{
d60cd46b 1026 unsigned pgfl = TestClearPagePinned(page);
3b827c1b 1027
f4f97b3e
JF
1028 if (pgfl && !PageHighMem(page)) {
1029 void *pt = lowmem_page_address(page);
1030 unsigned long pfn = page_to_pfn(page);
74260714
JF
1031 spinlock_t *ptl = NULL;
1032 struct multicall_space mcs;
1033
11ad93e5
JF
1034 /*
1035 * Do the converse to pin_page. If we're using split
1036 * pte locks, we must be holding the lock for while
1037 * the pte page is unpinned but still RO to prevent
1038 * concurrent updates from seeing it in this
1039 * partially-pinned state.
1040 */
74260714 1041 if (level == PT_PTE) {
eefb47f6 1042 ptl = xen_pte_lock(page, mm);
74260714 1043
11ad93e5
JF
1044 if (ptl)
1045 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
74260714
JF
1046 }
1047
1048 mcs = __xen_mc_entry(0);
f4f97b3e
JF
1049
1050 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1051 pfn_pte(pfn, PAGE_KERNEL),
74260714
JF
1052 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1053
1054 if (ptl) {
1055 /* unlock when batch completed */
7708ad64 1056 xen_mc_callback(xen_pte_unlock, ptl);
74260714 1057 }
f4f97b3e
JF
1058 }
1059
1060 return 0; /* never need to flush on unpin */
3b827c1b
JF
1061}
1062
f4f97b3e 1063/* Release a pagetables pages back as normal RW */
eefb47f6 1064static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
f4f97b3e 1065{
f4f97b3e
JF
1066 xen_mc_batch();
1067
74260714 1068 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
f4f97b3e 1069
d6182fbf
JF
1070#ifdef CONFIG_X86_64
1071 {
1072 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1073
1074 if (user_pgd) {
f63c2f24
T
1075 xen_do_pin(MMUEXT_UNPIN_TABLE,
1076 PFN_DOWN(__pa(user_pgd)));
eefb47f6 1077 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
d6182fbf
JF
1078 }
1079 }
1080#endif
1081
5deb30d1
JF
1082#ifdef CONFIG_X86_PAE
1083 /* Need to make sure unshared kernel PMD is unpinned */
47cb2ed9 1084 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
eefb47f6 1085 PT_PMD);
5deb30d1 1086#endif
d6182fbf 1087
86bbc2c2 1088 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
f4f97b3e
JF
1089
1090 xen_mc_issue(0);
1091}
3b827c1b 1092
eefb47f6
JF
1093static void xen_pgd_unpin(struct mm_struct *mm)
1094{
1095 __xen_pgd_unpin(mm, mm->pgd);
1096}
1097
0e91398f
JF
1098/*
1099 * On resume, undo any pinning done at save, so that the rest of the
1100 * kernel doesn't see any unexpected pinned pagetables.
1101 */
1102void xen_mm_unpin_all(void)
1103{
1104 unsigned long flags;
1105 struct page *page;
1106
1107 spin_lock_irqsave(&pgd_lock, flags);
1108
1109 list_for_each_entry(page, &pgd_list, lru) {
1110 if (PageSavePinned(page)) {
1111 BUG_ON(!PagePinned(page));
eefb47f6 1112 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
0e91398f
JF
1113 ClearPageSavePinned(page);
1114 }
1115 }
1116
1117 spin_unlock_irqrestore(&pgd_lock, flags);
1118}
1119
3b827c1b
JF
1120void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1121{
f4f97b3e 1122 spin_lock(&next->page_table_lock);
eefb47f6 1123 xen_pgd_pin(next);
f4f97b3e 1124 spin_unlock(&next->page_table_lock);
3b827c1b
JF
1125}
1126
1127void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1128{
f4f97b3e 1129 spin_lock(&mm->page_table_lock);
eefb47f6 1130 xen_pgd_pin(mm);
f4f97b3e 1131 spin_unlock(&mm->page_table_lock);
3b827c1b
JF
1132}
1133
3b827c1b 1134
f87e4cac
JF
1135#ifdef CONFIG_SMP
1136/* Another cpu may still have their %cr3 pointing at the pagetable, so
1137 we need to repoint it somewhere else before we can unpin it. */
1138static void drop_other_mm_ref(void *info)
1139{
1140 struct mm_struct *mm = info;
ce87b3d3 1141 struct mm_struct *active_mm;
3b827c1b 1142
9eb912d1 1143 active_mm = percpu_read(cpu_tlbstate.active_mm);
ce87b3d3
JF
1144
1145 if (active_mm == mm)
f87e4cac 1146 leave_mm(smp_processor_id());
9f79991d
JF
1147
1148 /* If this cpu still has a stale cr3 reference, then make sure
1149 it has been flushed. */
7fd7d83d 1150 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
9f79991d 1151 load_cr3(swapper_pg_dir);
f87e4cac 1152}
3b827c1b 1153
7708ad64 1154static void xen_drop_mm_ref(struct mm_struct *mm)
f87e4cac 1155{
e4d98207 1156 cpumask_var_t mask;
9f79991d
JF
1157 unsigned cpu;
1158
f87e4cac
JF
1159 if (current->active_mm == mm) {
1160 if (current->mm == mm)
1161 load_cr3(swapper_pg_dir);
1162 else
1163 leave_mm(smp_processor_id());
9f79991d
JF
1164 }
1165
1166 /* Get the "official" set of cpus referring to our pagetable. */
e4d98207
MT
1167 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1168 for_each_online_cpu(cpu) {
78f1c4d6 1169 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
e4d98207
MT
1170 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1171 continue;
1172 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1173 }
1174 return;
1175 }
78f1c4d6 1176 cpumask_copy(mask, mm_cpumask(mm));
9f79991d
JF
1177
1178 /* It's possible that a vcpu may have a stale reference to our
1179 cr3, because its in lazy mode, and it hasn't yet flushed
1180 its set of pending hypercalls yet. In this case, we can
1181 look at its actual current cr3 value, and force it to flush
1182 if needed. */
1183 for_each_online_cpu(cpu) {
1184 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
e4d98207 1185 cpumask_set_cpu(cpu, mask);
3b827c1b
JF
1186 }
1187
e4d98207
MT
1188 if (!cpumask_empty(mask))
1189 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1190 free_cpumask_var(mask);
f87e4cac
JF
1191}
1192#else
7708ad64 1193static void xen_drop_mm_ref(struct mm_struct *mm)
f87e4cac
JF
1194{
1195 if (current->active_mm == mm)
1196 load_cr3(swapper_pg_dir);
1197}
1198#endif
1199
1200/*
1201 * While a process runs, Xen pins its pagetables, which means that the
1202 * hypervisor forces it to be read-only, and it controls all updates
1203 * to it. This means that all pagetable updates have to go via the
1204 * hypervisor, which is moderately expensive.
1205 *
1206 * Since we're pulling the pagetable down, we switch to use init_mm,
1207 * unpin old process pagetable and mark it all read-write, which
1208 * allows further operations on it to be simple memory accesses.
1209 *
1210 * The only subtle point is that another CPU may be still using the
1211 * pagetable because of lazy tlb flushing. This means we need need to
1212 * switch all CPUs off this pagetable before we can unpin it.
1213 */
1214void xen_exit_mmap(struct mm_struct *mm)
1215{
1216 get_cpu(); /* make sure we don't move around */
7708ad64 1217 xen_drop_mm_ref(mm);
f87e4cac 1218 put_cpu();
3b827c1b 1219
f120f13e 1220 spin_lock(&mm->page_table_lock);
df912ea4
JF
1221
1222 /* pgd may not be pinned in the error exit path of execve */
7708ad64 1223 if (xen_page_pinned(mm->pgd))
eefb47f6 1224 xen_pgd_unpin(mm);
74260714 1225
f120f13e 1226 spin_unlock(&mm->page_table_lock);
3b827c1b 1227}
994025ca 1228
319f3ba5
JF
1229static __init void xen_pagetable_setup_start(pgd_t *base)
1230{
1231}
1232
f1d7062a
TG
1233static void xen_post_allocator_init(void);
1234
319f3ba5
JF
1235static __init void xen_pagetable_setup_done(pgd_t *base)
1236{
1237 xen_setup_shared_info();
f1d7062a 1238 xen_post_allocator_init();
319f3ba5
JF
1239}
1240
1241static void xen_write_cr2(unsigned long cr2)
1242{
1243 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1244}
1245
1246static unsigned long xen_read_cr2(void)
1247{
1248 return percpu_read(xen_vcpu)->arch.cr2;
1249}
1250
1251unsigned long xen_read_cr2_direct(void)
1252{
1253 return percpu_read(xen_vcpu_info.arch.cr2);
1254}
1255
1256static void xen_flush_tlb(void)
1257{
1258 struct mmuext_op *op;
1259 struct multicall_space mcs;
1260
1261 preempt_disable();
1262
1263 mcs = xen_mc_entry(sizeof(*op));
1264
1265 op = mcs.args;
1266 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1267 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1268
1269 xen_mc_issue(PARAVIRT_LAZY_MMU);
1270
1271 preempt_enable();
1272}
1273
1274static void xen_flush_tlb_single(unsigned long addr)
1275{
1276 struct mmuext_op *op;
1277 struct multicall_space mcs;
1278
1279 preempt_disable();
1280
1281 mcs = xen_mc_entry(sizeof(*op));
1282 op = mcs.args;
1283 op->cmd = MMUEXT_INVLPG_LOCAL;
1284 op->arg1.linear_addr = addr & PAGE_MASK;
1285 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1286
1287 xen_mc_issue(PARAVIRT_LAZY_MMU);
1288
1289 preempt_enable();
1290}
1291
1292static void xen_flush_tlb_others(const struct cpumask *cpus,
1293 struct mm_struct *mm, unsigned long va)
1294{
1295 struct {
1296 struct mmuext_op op;
1297 DECLARE_BITMAP(mask, NR_CPUS);
1298 } *args;
1299 struct multicall_space mcs;
1300
e3f8a74e
JF
1301 if (cpumask_empty(cpus))
1302 return; /* nothing to do */
319f3ba5
JF
1303
1304 mcs = xen_mc_entry(sizeof(*args));
1305 args = mcs.args;
1306 args->op.arg2.vcpumask = to_cpumask(args->mask);
1307
1308 /* Remove us, and any offline CPUS. */
1309 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1310 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
319f3ba5
JF
1311
1312 if (va == TLB_FLUSH_ALL) {
1313 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1314 } else {
1315 args->op.cmd = MMUEXT_INVLPG_MULTI;
1316 args->op.arg1.linear_addr = va;
1317 }
1318
1319 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1320
319f3ba5
JF
1321 xen_mc_issue(PARAVIRT_LAZY_MMU);
1322}
1323
1324static unsigned long xen_read_cr3(void)
1325{
1326 return percpu_read(xen_cr3);
1327}
1328
1329static void set_current_cr3(void *v)
1330{
1331 percpu_write(xen_current_cr3, (unsigned long)v);
1332}
1333
1334static void __xen_write_cr3(bool kernel, unsigned long cr3)
1335{
1336 struct mmuext_op *op;
1337 struct multicall_space mcs;
1338 unsigned long mfn;
1339
1340 if (cr3)
1341 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1342 else
1343 mfn = 0;
1344
1345 WARN_ON(mfn == 0 && kernel);
1346
1347 mcs = __xen_mc_entry(sizeof(*op));
1348
1349 op = mcs.args;
1350 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1351 op->arg1.mfn = mfn;
1352
1353 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1354
1355 if (kernel) {
1356 percpu_write(xen_cr3, cr3);
1357
1358 /* Update xen_current_cr3 once the batch has actually
1359 been submitted. */
1360 xen_mc_callback(set_current_cr3, (void *)cr3);
1361 }
1362}
1363
1364static void xen_write_cr3(unsigned long cr3)
1365{
1366 BUG_ON(preemptible());
1367
1368 xen_mc_batch(); /* disables interrupts */
1369
1370 /* Update while interrupts are disabled, so its atomic with
1371 respect to ipis */
1372 percpu_write(xen_cr3, cr3);
1373
1374 __xen_write_cr3(true, cr3);
1375
1376#ifdef CONFIG_X86_64
1377 {
1378 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1379 if (user_pgd)
1380 __xen_write_cr3(false, __pa(user_pgd));
1381 else
1382 __xen_write_cr3(false, 0);
1383 }
1384#endif
1385
1386 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1387}
1388
1389static int xen_pgd_alloc(struct mm_struct *mm)
1390{
1391 pgd_t *pgd = mm->pgd;
1392 int ret = 0;
1393
1394 BUG_ON(PagePinned(virt_to_page(pgd)));
1395
1396#ifdef CONFIG_X86_64
1397 {
1398 struct page *page = virt_to_page(pgd);
1399 pgd_t *user_pgd;
1400
1401 BUG_ON(page->private != 0);
1402
1403 ret = -ENOMEM;
1404
1405 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1406 page->private = (unsigned long)user_pgd;
1407
1408 if (user_pgd != NULL) {
1409 user_pgd[pgd_index(VSYSCALL_START)] =
1410 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1411 ret = 0;
1412 }
1413
1414 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1415 }
1416#endif
1417
1418 return ret;
1419}
1420
1421static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1422{
1423#ifdef CONFIG_X86_64
1424 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1425
1426 if (user_pgd)
1427 free_page((unsigned long)user_pgd);
1428#endif
1429}
1430
1f4f9315
JF
1431#ifdef CONFIG_X86_32
1432static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1433{
1434 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1435 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1436 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1437 pte_val_ma(pte));
1438
1439 return pte;
1440}
1441
1442/* Init-time set_pte while constructing initial pagetables, which
1443 doesn't allow RO pagetable pages to be remapped RW */
1444static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1445{
1446 pte = mask_rw_pte(ptep, pte);
1447
1448 xen_set_pte(ptep, pte);
1449}
1450#endif
319f3ba5 1451
b96229b5
JF
1452static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1453{
1454 struct mmuext_op op;
1455 op.cmd = cmd;
1456 op.arg1.mfn = pfn_to_mfn(pfn);
1457 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1458 BUG();
1459}
1460
319f3ba5
JF
1461/* Early in boot, while setting up the initial pagetable, assume
1462 everything is pinned. */
1463static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1464{
b96229b5
JF
1465#ifdef CONFIG_FLATMEM
1466 BUG_ON(mem_map); /* should only be used early */
1467#endif
1468 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1469 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1470}
1471
1472/* Used for pmd and pud */
1473static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1474{
319f3ba5
JF
1475#ifdef CONFIG_FLATMEM
1476 BUG_ON(mem_map); /* should only be used early */
1477#endif
1478 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1479}
1480
1481/* Early release_pte assumes that all pts are pinned, since there's
1482 only init_mm and anything attached to that is pinned. */
b96229b5 1483static __init void xen_release_pte_init(unsigned long pfn)
319f3ba5 1484{
b96229b5 1485 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
319f3ba5
JF
1486 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1487}
1488
b96229b5 1489static __init void xen_release_pmd_init(unsigned long pfn)
319f3ba5 1490{
b96229b5 1491 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
319f3ba5
JF
1492}
1493
1494/* This needs to make sure the new pte page is pinned iff its being
1495 attached to a pinned pagetable. */
1496static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1497{
1498 struct page *page = pfn_to_page(pfn);
1499
1500 if (PagePinned(virt_to_page(mm->pgd))) {
1501 SetPagePinned(page);
1502
1503 vm_unmap_aliases();
1504 if (!PageHighMem(page)) {
1505 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1506 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1507 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1508 } else {
1509 /* make sure there are no stray mappings of
1510 this page */
1511 kmap_flush_unused();
1512 }
1513 }
1514}
1515
1516static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1517{
1518 xen_alloc_ptpage(mm, pfn, PT_PTE);
1519}
1520
1521static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1522{
1523 xen_alloc_ptpage(mm, pfn, PT_PMD);
1524}
1525
1526/* This should never happen until we're OK to use struct page */
1527static void xen_release_ptpage(unsigned long pfn, unsigned level)
1528{
1529 struct page *page = pfn_to_page(pfn);
1530
1531 if (PagePinned(page)) {
1532 if (!PageHighMem(page)) {
1533 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1534 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1535 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1536 }
1537 ClearPagePinned(page);
1538 }
1539}
1540
1541static void xen_release_pte(unsigned long pfn)
1542{
1543 xen_release_ptpage(pfn, PT_PTE);
1544}
1545
1546static void xen_release_pmd(unsigned long pfn)
1547{
1548 xen_release_ptpage(pfn, PT_PMD);
1549}
1550
1551#if PAGETABLE_LEVELS == 4
1552static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1553{
1554 xen_alloc_ptpage(mm, pfn, PT_PUD);
1555}
1556
1557static void xen_release_pud(unsigned long pfn)
1558{
1559 xen_release_ptpage(pfn, PT_PUD);
1560}
1561#endif
1562
1563void __init xen_reserve_top(void)
1564{
1565#ifdef CONFIG_X86_32
1566 unsigned long top = HYPERVISOR_VIRT_START;
1567 struct xen_platform_parameters pp;
1568
1569 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1570 top = pp.virt_start;
1571
1572 reserve_top_address(-top);
1573#endif /* CONFIG_X86_32 */
1574}
1575
1576/*
1577 * Like __va(), but returns address in the kernel mapping (which is
1578 * all we have until the physical memory mapping has been set up.
1579 */
1580static void *__ka(phys_addr_t paddr)
1581{
1582#ifdef CONFIG_X86_64
1583 return (void *)(paddr + __START_KERNEL_map);
1584#else
1585 return __va(paddr);
1586#endif
1587}
1588
1589/* Convert a machine address to physical address */
1590static unsigned long m2p(phys_addr_t maddr)
1591{
1592 phys_addr_t paddr;
1593
1594 maddr &= PTE_PFN_MASK;
1595 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1596
1597 return paddr;
1598}
1599
1600/* Convert a machine address to kernel virtual */
1601static void *m2v(phys_addr_t maddr)
1602{
1603 return __ka(m2p(maddr));
1604}
1605
1606static void set_page_prot(void *addr, pgprot_t prot)
1607{
1608 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1609 pte_t pte = pfn_pte(pfn, prot);
1610
1611 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1612 BUG();
1613}
1614
1615static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1616{
1617 unsigned pmdidx, pteidx;
1618 unsigned ident_pte;
1619 unsigned long pfn;
1620
1621 ident_pte = 0;
1622 pfn = 0;
1623 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1624 pte_t *pte_page;
1625
1626 /* Reuse or allocate a page of ptes */
1627 if (pmd_present(pmd[pmdidx]))
1628 pte_page = m2v(pmd[pmdidx].pmd);
1629 else {
1630 /* Check for free pte pages */
1631 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1632 break;
1633
1634 pte_page = &level1_ident_pgt[ident_pte];
1635 ident_pte += PTRS_PER_PTE;
1636
1637 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1638 }
1639
1640 /* Install mappings */
1641 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1642 pte_t pte;
1643
1644 if (pfn > max_pfn_mapped)
1645 max_pfn_mapped = pfn;
1646
1647 if (!pte_none(pte_page[pteidx]))
1648 continue;
1649
1650 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1651 pte_page[pteidx] = pte;
1652 }
1653 }
1654
1655 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1656 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1657
1658 set_page_prot(pmd, PAGE_KERNEL_RO);
1659}
1660
1661#ifdef CONFIG_X86_64
1662static void convert_pfn_mfn(void *v)
1663{
1664 pte_t *pte = v;
1665 int i;
1666
1667 /* All levels are converted the same way, so just treat them
1668 as ptes. */
1669 for (i = 0; i < PTRS_PER_PTE; i++)
1670 pte[i] = xen_make_pte(pte[i].pte);
1671}
1672
1673/*
1674 * Set up the inital kernel pagetable.
1675 *
1676 * We can construct this by grafting the Xen provided pagetable into
1677 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1678 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1679 * means that only the kernel has a physical mapping to start with -
1680 * but that's enough to get __va working. We need to fill in the rest
1681 * of the physical mapping once some sort of allocator has been set
1682 * up.
1683 */
1684__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1685 unsigned long max_pfn)
1686{
1687 pud_t *l3;
1688 pmd_t *l2;
1689
1690 /* Zap identity mapping */
1691 init_level4_pgt[0] = __pgd(0);
1692
1693 /* Pre-constructed entries are in pfn, so convert to mfn */
1694 convert_pfn_mfn(init_level4_pgt);
1695 convert_pfn_mfn(level3_ident_pgt);
1696 convert_pfn_mfn(level3_kernel_pgt);
1697
1698 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1699 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1700
1701 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1702 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1703
1704 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1705 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1706 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1707
1708 /* Set up identity map */
1709 xen_map_identity_early(level2_ident_pgt, max_pfn);
1710
1711 /* Make pagetable pieces RO */
1712 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1713 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1714 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1715 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1716 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1717 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1718
1719 /* Pin down new L4 */
1720 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1721 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1722
1723 /* Unpin Xen-provided one */
1724 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1725
1726 /* Switch over */
1727 pgd = init_level4_pgt;
1728
1729 /*
1730 * At this stage there can be no user pgd, and no page
1731 * structure to attach it to, so make sure we just set kernel
1732 * pgd.
1733 */
1734 xen_mc_batch();
1735 __xen_write_cr3(true, __pa(pgd));
1736 xen_mc_issue(PARAVIRT_LAZY_CPU);
1737
1738 reserve_early(__pa(xen_start_info->pt_base),
1739 __pa(xen_start_info->pt_base +
1740 xen_start_info->nr_pt_frames * PAGE_SIZE),
1741 "XEN PAGETABLES");
1742
1743 return pgd;
1744}
1745#else /* !CONFIG_X86_64 */
1746static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1747
1748__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1749 unsigned long max_pfn)
1750{
1751 pmd_t *kernel_pmd;
1752
93dbda7c
JF
1753 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1754 xen_start_info->nr_pt_frames * PAGE_SIZE +
1755 512*1024);
319f3ba5
JF
1756
1757 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1758 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1759
1760 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1761
1762 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1763 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1764 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1765
1766 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1767 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1768 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1769
1770 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1771
1772 xen_write_cr3(__pa(swapper_pg_dir));
1773
1774 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1775
33df4db0
JF
1776 reserve_early(__pa(xen_start_info->pt_base),
1777 __pa(xen_start_info->pt_base +
1778 xen_start_info->nr_pt_frames * PAGE_SIZE),
1779 "XEN PAGETABLES");
1780
319f3ba5
JF
1781 return swapper_pg_dir;
1782}
1783#endif /* CONFIG_X86_64 */
1784
3b3809ac 1785static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
319f3ba5
JF
1786{
1787 pte_t pte;
1788
1789 phys >>= PAGE_SHIFT;
1790
1791 switch (idx) {
1792 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1793#ifdef CONFIG_X86_F00F_BUG
1794 case FIX_F00F_IDT:
1795#endif
1796#ifdef CONFIG_X86_32
1797 case FIX_WP_TEST:
1798 case FIX_VDSO:
1799# ifdef CONFIG_HIGHMEM
1800 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1801# endif
1802#else
1803 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1804#endif
1805#ifdef CONFIG_X86_LOCAL_APIC
1806 case FIX_APIC_BASE: /* maps dummy local APIC */
1807#endif
3ecb1b7d
JF
1808 case FIX_TEXT_POKE0:
1809 case FIX_TEXT_POKE1:
1810 /* All local page mappings */
319f3ba5
JF
1811 pte = pfn_pte(phys, prot);
1812 break;
1813
1814 default:
1815 pte = mfn_pte(phys, prot);
1816 break;
1817 }
1818
1819 __native_set_fixmap(idx, pte);
1820
1821#ifdef CONFIG_X86_64
1822 /* Replicate changes to map the vsyscall page into the user
1823 pagetable vsyscall mapping. */
1824 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1825 unsigned long vaddr = __fix_to_virt(idx);
1826 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1827 }
1828#endif
1829}
1830
f1d7062a 1831static __init void xen_post_allocator_init(void)
319f3ba5
JF
1832{
1833 pv_mmu_ops.set_pte = xen_set_pte;
1834 pv_mmu_ops.set_pmd = xen_set_pmd;
1835 pv_mmu_ops.set_pud = xen_set_pud;
1836#if PAGETABLE_LEVELS == 4
1837 pv_mmu_ops.set_pgd = xen_set_pgd;
1838#endif
1839
1840 /* This will work as long as patching hasn't happened yet
1841 (which it hasn't) */
1842 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1843 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1844 pv_mmu_ops.release_pte = xen_release_pte;
1845 pv_mmu_ops.release_pmd = xen_release_pmd;
1846#if PAGETABLE_LEVELS == 4
1847 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1848 pv_mmu_ops.release_pud = xen_release_pud;
1849#endif
1850
1851#ifdef CONFIG_X86_64
1852 SetPagePinned(virt_to_page(level3_user_vsyscall));
1853#endif
1854 xen_mark_init_mm_pinned();
1855}
1856
b407fc57
JF
1857static void xen_leave_lazy_mmu(void)
1858{
5caecb94 1859 preempt_disable();
b407fc57
JF
1860 xen_mc_flush();
1861 paravirt_leave_lazy_mmu();
5caecb94 1862 preempt_enable();
b407fc57 1863}
319f3ba5 1864
030cb6c0 1865static const struct pv_mmu_ops xen_mmu_ops __initdata = {
319f3ba5
JF
1866 .read_cr2 = xen_read_cr2,
1867 .write_cr2 = xen_write_cr2,
1868
1869 .read_cr3 = xen_read_cr3,
1870 .write_cr3 = xen_write_cr3,
1871
1872 .flush_tlb_user = xen_flush_tlb,
1873 .flush_tlb_kernel = xen_flush_tlb,
1874 .flush_tlb_single = xen_flush_tlb_single,
1875 .flush_tlb_others = xen_flush_tlb_others,
1876
1877 .pte_update = paravirt_nop,
1878 .pte_update_defer = paravirt_nop,
1879
1880 .pgd_alloc = xen_pgd_alloc,
1881 .pgd_free = xen_pgd_free,
1882
1883 .alloc_pte = xen_alloc_pte_init,
1884 .release_pte = xen_release_pte_init,
b96229b5 1885 .alloc_pmd = xen_alloc_pmd_init,
319f3ba5 1886 .alloc_pmd_clone = paravirt_nop,
b96229b5 1887 .release_pmd = xen_release_pmd_init,
319f3ba5 1888
319f3ba5
JF
1889#ifdef CONFIG_X86_64
1890 .set_pte = xen_set_pte,
1891#else
1892 .set_pte = xen_set_pte_init,
1893#endif
1894 .set_pte_at = xen_set_pte_at,
1895 .set_pmd = xen_set_pmd_hyper,
1896
1897 .ptep_modify_prot_start = __ptep_modify_prot_start,
1898 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1899
da5de7c2
JF
1900 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
1901 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
319f3ba5 1902
da5de7c2
JF
1903 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
1904 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
319f3ba5
JF
1905
1906#ifdef CONFIG_X86_PAE
1907 .set_pte_atomic = xen_set_pte_atomic,
319f3ba5
JF
1908 .pte_clear = xen_pte_clear,
1909 .pmd_clear = xen_pmd_clear,
1910#endif /* CONFIG_X86_PAE */
1911 .set_pud = xen_set_pud_hyper,
1912
da5de7c2
JF
1913 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
1914 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
319f3ba5
JF
1915
1916#if PAGETABLE_LEVELS == 4
da5de7c2
JF
1917 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
1918 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
319f3ba5
JF
1919 .set_pgd = xen_set_pgd_hyper,
1920
b96229b5
JF
1921 .alloc_pud = xen_alloc_pmd_init,
1922 .release_pud = xen_release_pmd_init,
319f3ba5
JF
1923#endif /* PAGETABLE_LEVELS == 4 */
1924
1925 .activate_mm = xen_activate_mm,
1926 .dup_mmap = xen_dup_mmap,
1927 .exit_mmap = xen_exit_mmap,
1928
1929 .lazy_mode = {
1930 .enter = paravirt_enter_lazy_mmu,
b407fc57 1931 .leave = xen_leave_lazy_mmu,
319f3ba5
JF
1932 },
1933
1934 .set_fixmap = xen_set_fixmap,
1935};
1936
030cb6c0
TG
1937void __init xen_init_mmu_ops(void)
1938{
1939 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
1940 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
1941 pv_mmu_ops = xen_mmu_ops;
1942}
319f3ba5 1943
994025ca
JF
1944#ifdef CONFIG_XEN_DEBUG_FS
1945
1946static struct dentry *d_mmu_debug;
1947
1948static int __init xen_mmu_debugfs(void)
1949{
1950 struct dentry *d_xen = xen_init_debugfs();
1951
1952 if (d_xen == NULL)
1953 return -ENOMEM;
1954
1955 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
1956
1957 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
1958
1959 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
1960 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
1961 &mmu_stats.pgd_update_pinned);
1962 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
1963 &mmu_stats.pgd_update_pinned);
1964
1965 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
1966 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
1967 &mmu_stats.pud_update_pinned);
1968 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
1969 &mmu_stats.pud_update_pinned);
1970
1971 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
1972 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
1973 &mmu_stats.pmd_update_pinned);
1974 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
1975 &mmu_stats.pmd_update_pinned);
1976
1977 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
1978// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
1979// &mmu_stats.pte_update_pinned);
1980 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
1981 &mmu_stats.pte_update_pinned);
1982
1983 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
1984 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
1985 &mmu_stats.mmu_update_extended);
1986 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
1987 mmu_stats.mmu_update_histo, 20);
1988
1989 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
1990 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
1991 &mmu_stats.set_pte_at_batched);
1992 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
1993 &mmu_stats.set_pte_at_current);
1994 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
1995 &mmu_stats.set_pte_at_kernel);
1996
1997 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
1998 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
1999 &mmu_stats.prot_commit_batched);
2000
2001 return 0;
2002}
2003fs_initcall(xen_mmu_debugfs);
2004
2005#endif /* CONFIG_XEN_DEBUG_FS */