xen/mmu: On early bootup, flush the TLB when changing RO->RW bits Xen provided pageta...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / xen / mmu.c
1 /*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/module.h>
47 #include <linux/gfp.h>
48 #include <linux/memblock.h>
49 #include <linux/seq_file.h>
50 #include <linux/crash_dump.h>
51
52 #include <trace/events/xen.h>
53
54 #include <asm/pgtable.h>
55 #include <asm/tlbflush.h>
56 #include <asm/fixmap.h>
57 #include <asm/mmu_context.h>
58 #include <asm/setup.h>
59 #include <asm/paravirt.h>
60 #include <asm/e820.h>
61 #include <asm/linkage.h>
62 #include <asm/page.h>
63 #include <asm/init.h>
64 #include <asm/pat.h>
65 #include <asm/smp.h>
66
67 #include <asm/xen/hypercall.h>
68 #include <asm/xen/hypervisor.h>
69
70 #include <xen/xen.h>
71 #include <xen/page.h>
72 #include <xen/interface/xen.h>
73 #include <xen/interface/hvm/hvm_op.h>
74 #include <xen/interface/version.h>
75 #include <xen/interface/memory.h>
76 #include <xen/hvc-console.h>
77
78 #include "multicalls.h"
79 #include "mmu.h"
80 #include "debugfs.h"
81
82 /*
83 * Protects atomic reservation decrease/increase against concurrent increases.
84 * Also protects non-atomic updates of current_pages and balloon lists.
85 */
86 DEFINE_SPINLOCK(xen_reservation_lock);
87
88 #ifdef CONFIG_X86_32
89 /*
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
93 */
94 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
96 #endif
97 #ifdef CONFIG_X86_64
98 /* l3 pud for userspace vsyscall mapping */
99 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100 #endif /* CONFIG_X86_64 */
101
102 /*
103 * Note about cr3 (pagetable base) values:
104 *
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
110 *
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
115 */
116 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
117 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
118
119
120 /*
121 * Just beyond the highest usermode address. STACK_TOP_MAX has a
122 * redzone above it, so round it up to a PGD boundary.
123 */
124 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125
126 unsigned long arbitrary_virt_to_mfn(void *vaddr)
127 {
128 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
129
130 return PFN_DOWN(maddr.maddr);
131 }
132
133 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
134 {
135 unsigned long address = (unsigned long)vaddr;
136 unsigned int level;
137 pte_t *pte;
138 unsigned offset;
139
140 /*
141 * if the PFN is in the linear mapped vaddr range, we can just use
142 * the (quick) virt_to_machine() p2m lookup
143 */
144 if (virt_addr_valid(vaddr))
145 return virt_to_machine(vaddr);
146
147 /* otherwise we have to do a (slower) full page-table walk */
148
149 pte = lookup_address(address, &level);
150 BUG_ON(pte == NULL);
151 offset = address & ~PAGE_MASK;
152 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
153 }
154 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
155
156 void make_lowmem_page_readonly(void *vaddr)
157 {
158 pte_t *pte, ptev;
159 unsigned long address = (unsigned long)vaddr;
160 unsigned int level;
161
162 pte = lookup_address(address, &level);
163 if (pte == NULL)
164 return; /* vaddr missing */
165
166 ptev = pte_wrprotect(*pte);
167
168 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
169 BUG();
170 }
171
172 void make_lowmem_page_readwrite(void *vaddr)
173 {
174 pte_t *pte, ptev;
175 unsigned long address = (unsigned long)vaddr;
176 unsigned int level;
177
178 pte = lookup_address(address, &level);
179 if (pte == NULL)
180 return; /* vaddr missing */
181
182 ptev = pte_mkwrite(*pte);
183
184 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
185 BUG();
186 }
187
188
189 static bool xen_page_pinned(void *ptr)
190 {
191 struct page *page = virt_to_page(ptr);
192
193 return PagePinned(page);
194 }
195
196 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
197 {
198 struct multicall_space mcs;
199 struct mmu_update *u;
200
201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
202
203 mcs = xen_mc_entry(sizeof(*u));
204 u = mcs.args;
205
206 /* ptep might be kmapped when using 32-bit HIGHPTE */
207 u->ptr = virt_to_machine(ptep).maddr;
208 u->val = pte_val_ma(pteval);
209
210 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
211
212 xen_mc_issue(PARAVIRT_LAZY_MMU);
213 }
214 EXPORT_SYMBOL_GPL(xen_set_domain_pte);
215
216 static void xen_extend_mmu_update(const struct mmu_update *update)
217 {
218 struct multicall_space mcs;
219 struct mmu_update *u;
220
221 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
222
223 if (mcs.mc != NULL) {
224 mcs.mc->args[1]++;
225 } else {
226 mcs = __xen_mc_entry(sizeof(*u));
227 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
228 }
229
230 u = mcs.args;
231 *u = *update;
232 }
233
234 static void xen_extend_mmuext_op(const struct mmuext_op *op)
235 {
236 struct multicall_space mcs;
237 struct mmuext_op *u;
238
239 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
240
241 if (mcs.mc != NULL) {
242 mcs.mc->args[1]++;
243 } else {
244 mcs = __xen_mc_entry(sizeof(*u));
245 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
246 }
247
248 u = mcs.args;
249 *u = *op;
250 }
251
252 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
253 {
254 struct mmu_update u;
255
256 preempt_disable();
257
258 xen_mc_batch();
259
260 /* ptr may be ioremapped for 64-bit pagetable setup */
261 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
262 u.val = pmd_val_ma(val);
263 xen_extend_mmu_update(&u);
264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266
267 preempt_enable();
268 }
269
270 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
271 {
272 trace_xen_mmu_set_pmd(ptr, val);
273
274 /* If page is not pinned, we can just update the entry
275 directly */
276 if (!xen_page_pinned(ptr)) {
277 *ptr = val;
278 return;
279 }
280
281 xen_set_pmd_hyper(ptr, val);
282 }
283
284 /*
285 * Associate a virtual page frame with a given physical page frame
286 * and protection flags for that frame.
287 */
288 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
289 {
290 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
291 }
292
293 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
294 {
295 struct mmu_update u;
296
297 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
298 return false;
299
300 xen_mc_batch();
301
302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
303 u.val = pte_val_ma(pteval);
304 xen_extend_mmu_update(&u);
305
306 xen_mc_issue(PARAVIRT_LAZY_MMU);
307
308 return true;
309 }
310
311 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
312 {
313 if (!xen_batched_set_pte(ptep, pteval)) {
314 /*
315 * Could call native_set_pte() here and trap and
316 * emulate the PTE write but with 32-bit guests this
317 * needs two traps (one for each of the two 32-bit
318 * words in the PTE) so do one hypercall directly
319 * instead.
320 */
321 struct mmu_update u;
322
323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
324 u.val = pte_val_ma(pteval);
325 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
326 }
327 }
328
329 static void xen_set_pte(pte_t *ptep, pte_t pteval)
330 {
331 trace_xen_mmu_set_pte(ptep, pteval);
332 __xen_set_pte(ptep, pteval);
333 }
334
335 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
336 pte_t *ptep, pte_t pteval)
337 {
338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
339 __xen_set_pte(ptep, pteval);
340 }
341
342 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
344 {
345 /* Just return the pte as-is. We preserve the bits on commit */
346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
347 return *ptep;
348 }
349
350 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
352 {
353 struct mmu_update u;
354
355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
356 xen_mc_batch();
357
358 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
359 u.val = pte_val_ma(pte);
360 xen_extend_mmu_update(&u);
361
362 xen_mc_issue(PARAVIRT_LAZY_MMU);
363 }
364
365 /* Assume pteval_t is equivalent to all the other *val_t types. */
366 static pteval_t pte_mfn_to_pfn(pteval_t val)
367 {
368 if (val & _PAGE_PRESENT) {
369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
370 unsigned long pfn = mfn_to_pfn(mfn);
371
372 pteval_t flags = val & PTE_FLAGS_MASK;
373 if (unlikely(pfn == ~0))
374 val = flags & ~_PAGE_PRESENT;
375 else
376 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
377 }
378
379 return val;
380 }
381
382 static pteval_t pte_pfn_to_mfn(pteval_t val)
383 {
384 if (val & _PAGE_PRESENT) {
385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
386 pteval_t flags = val & PTE_FLAGS_MASK;
387 unsigned long mfn;
388
389 if (!xen_feature(XENFEAT_auto_translated_physmap))
390 mfn = get_phys_to_machine(pfn);
391 else
392 mfn = pfn;
393 /*
394 * If there's no mfn for the pfn, then just create an
395 * empty non-present pte. Unfortunately this loses
396 * information about the original pfn, so
397 * pte_mfn_to_pfn is asymmetric.
398 */
399 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
400 mfn = 0;
401 flags = 0;
402 } else {
403 /*
404 * Paramount to do this test _after_ the
405 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
406 * IDENTITY_FRAME_BIT resolves to true.
407 */
408 mfn &= ~FOREIGN_FRAME_BIT;
409 if (mfn & IDENTITY_FRAME_BIT) {
410 mfn &= ~IDENTITY_FRAME_BIT;
411 flags |= _PAGE_IOMAP;
412 }
413 }
414 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
415 }
416
417 return val;
418 }
419
420 static pteval_t iomap_pte(pteval_t val)
421 {
422 if (val & _PAGE_PRESENT) {
423 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
424 pteval_t flags = val & PTE_FLAGS_MASK;
425
426 /* We assume the pte frame number is a MFN, so
427 just use it as-is. */
428 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
429 }
430
431 return val;
432 }
433
434 static pteval_t xen_pte_val(pte_t pte)
435 {
436 pteval_t pteval = pte.pte;
437 #if 0
438 /* If this is a WC pte, convert back from Xen WC to Linux WC */
439 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
440 WARN_ON(!pat_enabled);
441 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
442 }
443 #endif
444 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
445 return pteval;
446
447 return pte_mfn_to_pfn(pteval);
448 }
449 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
450
451 static pgdval_t xen_pgd_val(pgd_t pgd)
452 {
453 return pte_mfn_to_pfn(pgd.pgd);
454 }
455 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
456
457 /*
458 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
459 * are reserved for now, to correspond to the Intel-reserved PAT
460 * types.
461 *
462 * We expect Linux's PAT set as follows:
463 *
464 * Idx PTE flags Linux Xen Default
465 * 0 WB WB WB
466 * 1 PWT WC WT WT
467 * 2 PCD UC- UC- UC-
468 * 3 PCD PWT UC UC UC
469 * 4 PAT WB WC WB
470 * 5 PAT PWT WC WP WT
471 * 6 PAT PCD UC- UC UC-
472 * 7 PAT PCD PWT UC UC UC
473 */
474
475 void xen_set_pat(u64 pat)
476 {
477 /* We expect Linux to use a PAT setting of
478 * UC UC- WC WB (ignoring the PAT flag) */
479 WARN_ON(pat != 0x0007010600070106ull);
480 }
481
482 static pte_t xen_make_pte(pteval_t pte)
483 {
484 phys_addr_t addr = (pte & PTE_PFN_MASK);
485 #if 0
486 /* If Linux is trying to set a WC pte, then map to the Xen WC.
487 * If _PAGE_PAT is set, then it probably means it is really
488 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
489 * things work out OK...
490 *
491 * (We should never see kernel mappings with _PAGE_PSE set,
492 * but we could see hugetlbfs mappings, I think.).
493 */
494 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
495 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
496 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
497 }
498 #endif
499 /*
500 * Unprivileged domains are allowed to do IOMAPpings for
501 * PCI passthrough, but not map ISA space. The ISA
502 * mappings are just dummy local mappings to keep other
503 * parts of the kernel happy.
504 */
505 if (unlikely(pte & _PAGE_IOMAP) &&
506 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
507 pte = iomap_pte(pte);
508 } else {
509 pte &= ~_PAGE_IOMAP;
510 pte = pte_pfn_to_mfn(pte);
511 }
512
513 return native_make_pte(pte);
514 }
515 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
516
517 static pgd_t xen_make_pgd(pgdval_t pgd)
518 {
519 pgd = pte_pfn_to_mfn(pgd);
520 return native_make_pgd(pgd);
521 }
522 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
523
524 static pmdval_t xen_pmd_val(pmd_t pmd)
525 {
526 return pte_mfn_to_pfn(pmd.pmd);
527 }
528 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
529
530 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
531 {
532 struct mmu_update u;
533
534 preempt_disable();
535
536 xen_mc_batch();
537
538 /* ptr may be ioremapped for 64-bit pagetable setup */
539 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
540 u.val = pud_val_ma(val);
541 xen_extend_mmu_update(&u);
542
543 xen_mc_issue(PARAVIRT_LAZY_MMU);
544
545 preempt_enable();
546 }
547
548 static void xen_set_pud(pud_t *ptr, pud_t val)
549 {
550 trace_xen_mmu_set_pud(ptr, val);
551
552 /* If page is not pinned, we can just update the entry
553 directly */
554 if (!xen_page_pinned(ptr)) {
555 *ptr = val;
556 return;
557 }
558
559 xen_set_pud_hyper(ptr, val);
560 }
561
562 #ifdef CONFIG_X86_PAE
563 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
564 {
565 trace_xen_mmu_set_pte_atomic(ptep, pte);
566 set_64bit((u64 *)ptep, native_pte_val(pte));
567 }
568
569 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
570 {
571 trace_xen_mmu_pte_clear(mm, addr, ptep);
572 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
573 native_pte_clear(mm, addr, ptep);
574 }
575
576 static void xen_pmd_clear(pmd_t *pmdp)
577 {
578 trace_xen_mmu_pmd_clear(pmdp);
579 set_pmd(pmdp, __pmd(0));
580 }
581 #endif /* CONFIG_X86_PAE */
582
583 static pmd_t xen_make_pmd(pmdval_t pmd)
584 {
585 pmd = pte_pfn_to_mfn(pmd);
586 return native_make_pmd(pmd);
587 }
588 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
589
590 #if PAGETABLE_LEVELS == 4
591 static pudval_t xen_pud_val(pud_t pud)
592 {
593 return pte_mfn_to_pfn(pud.pud);
594 }
595 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
596
597 static pud_t xen_make_pud(pudval_t pud)
598 {
599 pud = pte_pfn_to_mfn(pud);
600
601 return native_make_pud(pud);
602 }
603 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
604
605 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
606 {
607 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
608 unsigned offset = pgd - pgd_page;
609 pgd_t *user_ptr = NULL;
610
611 if (offset < pgd_index(USER_LIMIT)) {
612 struct page *page = virt_to_page(pgd_page);
613 user_ptr = (pgd_t *)page->private;
614 if (user_ptr)
615 user_ptr += offset;
616 }
617
618 return user_ptr;
619 }
620
621 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
622 {
623 struct mmu_update u;
624
625 u.ptr = virt_to_machine(ptr).maddr;
626 u.val = pgd_val_ma(val);
627 xen_extend_mmu_update(&u);
628 }
629
630 /*
631 * Raw hypercall-based set_pgd, intended for in early boot before
632 * there's a page structure. This implies:
633 * 1. The only existing pagetable is the kernel's
634 * 2. It is always pinned
635 * 3. It has no user pagetable attached to it
636 */
637 static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
638 {
639 preempt_disable();
640
641 xen_mc_batch();
642
643 __xen_set_pgd_hyper(ptr, val);
644
645 xen_mc_issue(PARAVIRT_LAZY_MMU);
646
647 preempt_enable();
648 }
649
650 static void xen_set_pgd(pgd_t *ptr, pgd_t val)
651 {
652 pgd_t *user_ptr = xen_get_user_pgd(ptr);
653
654 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
655
656 /* If page is not pinned, we can just update the entry
657 directly */
658 if (!xen_page_pinned(ptr)) {
659 *ptr = val;
660 if (user_ptr) {
661 WARN_ON(xen_page_pinned(user_ptr));
662 *user_ptr = val;
663 }
664 return;
665 }
666
667 /* If it's pinned, then we can at least batch the kernel and
668 user updates together. */
669 xen_mc_batch();
670
671 __xen_set_pgd_hyper(ptr, val);
672 if (user_ptr)
673 __xen_set_pgd_hyper(user_ptr, val);
674
675 xen_mc_issue(PARAVIRT_LAZY_MMU);
676 }
677 #endif /* PAGETABLE_LEVELS == 4 */
678
679 /*
680 * (Yet another) pagetable walker. This one is intended for pinning a
681 * pagetable. This means that it walks a pagetable and calls the
682 * callback function on each page it finds making up the page table,
683 * at every level. It walks the entire pagetable, but it only bothers
684 * pinning pte pages which are below limit. In the normal case this
685 * will be STACK_TOP_MAX, but at boot we need to pin up to
686 * FIXADDR_TOP.
687 *
688 * For 32-bit the important bit is that we don't pin beyond there,
689 * because then we start getting into Xen's ptes.
690 *
691 * For 64-bit, we must skip the Xen hole in the middle of the address
692 * space, just after the big x86-64 virtual hole.
693 */
694 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
695 int (*func)(struct mm_struct *mm, struct page *,
696 enum pt_level),
697 unsigned long limit)
698 {
699 int flush = 0;
700 unsigned hole_low, hole_high;
701 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
702 unsigned pgdidx, pudidx, pmdidx;
703
704 /* The limit is the last byte to be touched */
705 limit--;
706 BUG_ON(limit >= FIXADDR_TOP);
707
708 if (xen_feature(XENFEAT_auto_translated_physmap))
709 return 0;
710
711 /*
712 * 64-bit has a great big hole in the middle of the address
713 * space, which contains the Xen mappings. On 32-bit these
714 * will end up making a zero-sized hole and so is a no-op.
715 */
716 hole_low = pgd_index(USER_LIMIT);
717 hole_high = pgd_index(PAGE_OFFSET);
718
719 pgdidx_limit = pgd_index(limit);
720 #if PTRS_PER_PUD > 1
721 pudidx_limit = pud_index(limit);
722 #else
723 pudidx_limit = 0;
724 #endif
725 #if PTRS_PER_PMD > 1
726 pmdidx_limit = pmd_index(limit);
727 #else
728 pmdidx_limit = 0;
729 #endif
730
731 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
732 pud_t *pud;
733
734 if (pgdidx >= hole_low && pgdidx < hole_high)
735 continue;
736
737 if (!pgd_val(pgd[pgdidx]))
738 continue;
739
740 pud = pud_offset(&pgd[pgdidx], 0);
741
742 if (PTRS_PER_PUD > 1) /* not folded */
743 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
744
745 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
746 pmd_t *pmd;
747
748 if (pgdidx == pgdidx_limit &&
749 pudidx > pudidx_limit)
750 goto out;
751
752 if (pud_none(pud[pudidx]))
753 continue;
754
755 pmd = pmd_offset(&pud[pudidx], 0);
756
757 if (PTRS_PER_PMD > 1) /* not folded */
758 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
759
760 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
761 struct page *pte;
762
763 if (pgdidx == pgdidx_limit &&
764 pudidx == pudidx_limit &&
765 pmdidx > pmdidx_limit)
766 goto out;
767
768 if (pmd_none(pmd[pmdidx]))
769 continue;
770
771 pte = pmd_page(pmd[pmdidx]);
772 flush |= (*func)(mm, pte, PT_PTE);
773 }
774 }
775 }
776
777 out:
778 /* Do the top level last, so that the callbacks can use it as
779 a cue to do final things like tlb flushes. */
780 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
781
782 return flush;
783 }
784
785 static int xen_pgd_walk(struct mm_struct *mm,
786 int (*func)(struct mm_struct *mm, struct page *,
787 enum pt_level),
788 unsigned long limit)
789 {
790 return __xen_pgd_walk(mm, mm->pgd, func, limit);
791 }
792
793 /* If we're using split pte locks, then take the page's lock and
794 return a pointer to it. Otherwise return NULL. */
795 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
796 {
797 spinlock_t *ptl = NULL;
798
799 #if USE_SPLIT_PTLOCKS
800 ptl = __pte_lockptr(page);
801 spin_lock_nest_lock(ptl, &mm->page_table_lock);
802 #endif
803
804 return ptl;
805 }
806
807 static void xen_pte_unlock(void *v)
808 {
809 spinlock_t *ptl = v;
810 spin_unlock(ptl);
811 }
812
813 static void xen_do_pin(unsigned level, unsigned long pfn)
814 {
815 struct mmuext_op op;
816
817 op.cmd = level;
818 op.arg1.mfn = pfn_to_mfn(pfn);
819
820 xen_extend_mmuext_op(&op);
821 }
822
823 static int xen_pin_page(struct mm_struct *mm, struct page *page,
824 enum pt_level level)
825 {
826 unsigned pgfl = TestSetPagePinned(page);
827 int flush;
828
829 if (pgfl)
830 flush = 0; /* already pinned */
831 else if (PageHighMem(page))
832 /* kmaps need flushing if we found an unpinned
833 highpage */
834 flush = 1;
835 else {
836 void *pt = lowmem_page_address(page);
837 unsigned long pfn = page_to_pfn(page);
838 struct multicall_space mcs = __xen_mc_entry(0);
839 spinlock_t *ptl;
840
841 flush = 0;
842
843 /*
844 * We need to hold the pagetable lock between the time
845 * we make the pagetable RO and when we actually pin
846 * it. If we don't, then other users may come in and
847 * attempt to update the pagetable by writing it,
848 * which will fail because the memory is RO but not
849 * pinned, so Xen won't do the trap'n'emulate.
850 *
851 * If we're using split pte locks, we can't hold the
852 * entire pagetable's worth of locks during the
853 * traverse, because we may wrap the preempt count (8
854 * bits). The solution is to mark RO and pin each PTE
855 * page while holding the lock. This means the number
856 * of locks we end up holding is never more than a
857 * batch size (~32 entries, at present).
858 *
859 * If we're not using split pte locks, we needn't pin
860 * the PTE pages independently, because we're
861 * protected by the overall pagetable lock.
862 */
863 ptl = NULL;
864 if (level == PT_PTE)
865 ptl = xen_pte_lock(page, mm);
866
867 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
868 pfn_pte(pfn, PAGE_KERNEL_RO),
869 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
870
871 if (ptl) {
872 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
873
874 /* Queue a deferred unlock for when this batch
875 is completed. */
876 xen_mc_callback(xen_pte_unlock, ptl);
877 }
878 }
879
880 return flush;
881 }
882
883 /* This is called just after a mm has been created, but it has not
884 been used yet. We need to make sure that its pagetable is all
885 read-only, and can be pinned. */
886 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
887 {
888 trace_xen_mmu_pgd_pin(mm, pgd);
889
890 xen_mc_batch();
891
892 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
893 /* re-enable interrupts for flushing */
894 xen_mc_issue(0);
895
896 kmap_flush_unused();
897
898 xen_mc_batch();
899 }
900
901 #ifdef CONFIG_X86_64
902 {
903 pgd_t *user_pgd = xen_get_user_pgd(pgd);
904
905 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
906
907 if (user_pgd) {
908 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
909 xen_do_pin(MMUEXT_PIN_L4_TABLE,
910 PFN_DOWN(__pa(user_pgd)));
911 }
912 }
913 #else /* CONFIG_X86_32 */
914 #ifdef CONFIG_X86_PAE
915 /* Need to make sure unshared kernel PMD is pinnable */
916 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
917 PT_PMD);
918 #endif
919 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
920 #endif /* CONFIG_X86_64 */
921 xen_mc_issue(0);
922 }
923
924 static void xen_pgd_pin(struct mm_struct *mm)
925 {
926 __xen_pgd_pin(mm, mm->pgd);
927 }
928
929 /*
930 * On save, we need to pin all pagetables to make sure they get their
931 * mfns turned into pfns. Search the list for any unpinned pgds and pin
932 * them (unpinned pgds are not currently in use, probably because the
933 * process is under construction or destruction).
934 *
935 * Expected to be called in stop_machine() ("equivalent to taking
936 * every spinlock in the system"), so the locking doesn't really
937 * matter all that much.
938 */
939 void xen_mm_pin_all(void)
940 {
941 struct page *page;
942
943 spin_lock(&pgd_lock);
944
945 list_for_each_entry(page, &pgd_list, lru) {
946 if (!PagePinned(page)) {
947 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
948 SetPageSavePinned(page);
949 }
950 }
951
952 spin_unlock(&pgd_lock);
953 }
954
955 /*
956 * The init_mm pagetable is really pinned as soon as its created, but
957 * that's before we have page structures to store the bits. So do all
958 * the book-keeping now.
959 */
960 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
961 enum pt_level level)
962 {
963 SetPagePinned(page);
964 return 0;
965 }
966
967 static void __init xen_mark_init_mm_pinned(void)
968 {
969 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
970 }
971
972 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
973 enum pt_level level)
974 {
975 unsigned pgfl = TestClearPagePinned(page);
976
977 if (pgfl && !PageHighMem(page)) {
978 void *pt = lowmem_page_address(page);
979 unsigned long pfn = page_to_pfn(page);
980 spinlock_t *ptl = NULL;
981 struct multicall_space mcs;
982
983 /*
984 * Do the converse to pin_page. If we're using split
985 * pte locks, we must be holding the lock for while
986 * the pte page is unpinned but still RO to prevent
987 * concurrent updates from seeing it in this
988 * partially-pinned state.
989 */
990 if (level == PT_PTE) {
991 ptl = xen_pte_lock(page, mm);
992
993 if (ptl)
994 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
995 }
996
997 mcs = __xen_mc_entry(0);
998
999 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1000 pfn_pte(pfn, PAGE_KERNEL),
1001 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1002
1003 if (ptl) {
1004 /* unlock when batch completed */
1005 xen_mc_callback(xen_pte_unlock, ptl);
1006 }
1007 }
1008
1009 return 0; /* never need to flush on unpin */
1010 }
1011
1012 /* Release a pagetables pages back as normal RW */
1013 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
1014 {
1015 trace_xen_mmu_pgd_unpin(mm, pgd);
1016
1017 xen_mc_batch();
1018
1019 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1020
1021 #ifdef CONFIG_X86_64
1022 {
1023 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1024
1025 if (user_pgd) {
1026 xen_do_pin(MMUEXT_UNPIN_TABLE,
1027 PFN_DOWN(__pa(user_pgd)));
1028 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
1029 }
1030 }
1031 #endif
1032
1033 #ifdef CONFIG_X86_PAE
1034 /* Need to make sure unshared kernel PMD is unpinned */
1035 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
1036 PT_PMD);
1037 #endif
1038
1039 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1040
1041 xen_mc_issue(0);
1042 }
1043
1044 static void xen_pgd_unpin(struct mm_struct *mm)
1045 {
1046 __xen_pgd_unpin(mm, mm->pgd);
1047 }
1048
1049 /*
1050 * On resume, undo any pinning done at save, so that the rest of the
1051 * kernel doesn't see any unexpected pinned pagetables.
1052 */
1053 void xen_mm_unpin_all(void)
1054 {
1055 struct page *page;
1056
1057 spin_lock(&pgd_lock);
1058
1059 list_for_each_entry(page, &pgd_list, lru) {
1060 if (PageSavePinned(page)) {
1061 BUG_ON(!PagePinned(page));
1062 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
1063 ClearPageSavePinned(page);
1064 }
1065 }
1066
1067 spin_unlock(&pgd_lock);
1068 }
1069
1070 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1071 {
1072 spin_lock(&next->page_table_lock);
1073 xen_pgd_pin(next);
1074 spin_unlock(&next->page_table_lock);
1075 }
1076
1077 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1078 {
1079 spin_lock(&mm->page_table_lock);
1080 xen_pgd_pin(mm);
1081 spin_unlock(&mm->page_table_lock);
1082 }
1083
1084
1085 #ifdef CONFIG_SMP
1086 /* Another cpu may still have their %cr3 pointing at the pagetable, so
1087 we need to repoint it somewhere else before we can unpin it. */
1088 static void drop_other_mm_ref(void *info)
1089 {
1090 struct mm_struct *mm = info;
1091 struct mm_struct *active_mm;
1092
1093 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
1094
1095 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
1096 leave_mm(smp_processor_id());
1097
1098 /* If this cpu still has a stale cr3 reference, then make sure
1099 it has been flushed. */
1100 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
1101 load_cr3(swapper_pg_dir);
1102 }
1103
1104 static void xen_drop_mm_ref(struct mm_struct *mm)
1105 {
1106 cpumask_var_t mask;
1107 unsigned cpu;
1108
1109 if (current->active_mm == mm) {
1110 if (current->mm == mm)
1111 load_cr3(swapper_pg_dir);
1112 else
1113 leave_mm(smp_processor_id());
1114 }
1115
1116 /* Get the "official" set of cpus referring to our pagetable. */
1117 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1118 for_each_online_cpu(cpu) {
1119 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1120 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1121 continue;
1122 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1123 }
1124 return;
1125 }
1126 cpumask_copy(mask, mm_cpumask(mm));
1127
1128 /* It's possible that a vcpu may have a stale reference to our
1129 cr3, because its in lazy mode, and it hasn't yet flushed
1130 its set of pending hypercalls yet. In this case, we can
1131 look at its actual current cr3 value, and force it to flush
1132 if needed. */
1133 for_each_online_cpu(cpu) {
1134 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1135 cpumask_set_cpu(cpu, mask);
1136 }
1137
1138 if (!cpumask_empty(mask))
1139 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1140 free_cpumask_var(mask);
1141 }
1142 #else
1143 static void xen_drop_mm_ref(struct mm_struct *mm)
1144 {
1145 if (current->active_mm == mm)
1146 load_cr3(swapper_pg_dir);
1147 }
1148 #endif
1149
1150 /*
1151 * While a process runs, Xen pins its pagetables, which means that the
1152 * hypervisor forces it to be read-only, and it controls all updates
1153 * to it. This means that all pagetable updates have to go via the
1154 * hypervisor, which is moderately expensive.
1155 *
1156 * Since we're pulling the pagetable down, we switch to use init_mm,
1157 * unpin old process pagetable and mark it all read-write, which
1158 * allows further operations on it to be simple memory accesses.
1159 *
1160 * The only subtle point is that another CPU may be still using the
1161 * pagetable because of lazy tlb flushing. This means we need need to
1162 * switch all CPUs off this pagetable before we can unpin it.
1163 */
1164 static void xen_exit_mmap(struct mm_struct *mm)
1165 {
1166 get_cpu(); /* make sure we don't move around */
1167 xen_drop_mm_ref(mm);
1168 put_cpu();
1169
1170 spin_lock(&mm->page_table_lock);
1171
1172 /* pgd may not be pinned in the error exit path of execve */
1173 if (xen_page_pinned(mm->pgd))
1174 xen_pgd_unpin(mm);
1175
1176 spin_unlock(&mm->page_table_lock);
1177 }
1178
1179 static void xen_post_allocator_init(void);
1180
1181 #ifdef CONFIG_X86_64
1182 static void __init xen_cleanhighmap(unsigned long vaddr,
1183 unsigned long vaddr_end)
1184 {
1185 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1186 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1187
1188 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1189 * We include the PMD passed in on _both_ boundaries. */
1190 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1191 pmd++, vaddr += PMD_SIZE) {
1192 if (pmd_none(*pmd))
1193 continue;
1194 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1195 set_pmd(pmd, __pmd(0));
1196 }
1197 /* In case we did something silly, we should crash in this function
1198 * instead of somewhere later and be confusing. */
1199 xen_mc_flush();
1200 }
1201 #endif
1202 static void __init xen_pagetable_init(void)
1203 {
1204 #ifdef CONFIG_X86_64
1205 unsigned long size;
1206 unsigned long addr;
1207 #endif
1208 paging_init();
1209 xen_setup_shared_info();
1210 #ifdef CONFIG_X86_64
1211 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1212 unsigned long new_mfn_list;
1213
1214 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1215
1216 /* On 32-bit, we get zero so this never gets executed. */
1217 new_mfn_list = xen_revector_p2m_tree();
1218 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
1219 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1220 memset((void *)xen_start_info->mfn_list, 0xff, size);
1221
1222 /* We should be in __ka space. */
1223 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1224 addr = xen_start_info->mfn_list;
1225 /* We roundup to the PMD, which means that if anybody at this stage is
1226 * using the __ka address of xen_start_info or xen_start_info->shared_info
1227 * they are in going to crash. Fortunatly we have already revectored
1228 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1229 size = roundup(size, PMD_SIZE);
1230 xen_cleanhighmap(addr, addr + size);
1231
1232 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1233 memblock_free(__pa(xen_start_info->mfn_list), size);
1234 /* And revector! Bye bye old array */
1235 xen_start_info->mfn_list = new_mfn_list;
1236 } else
1237 goto skip;
1238 }
1239 /* At this stage, cleanup_highmap has already cleaned __ka space
1240 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1241 * the ramdisk). We continue on, erasing PMD entries that point to page
1242 * tables - do note that they are accessible at this stage via __va.
1243 * For good measure we also round up to the PMD - which means that if
1244 * anybody is using __ka address to the initial boot-stack - and try
1245 * to use it - they are going to crash. The xen_start_info has been
1246 * taken care of already in xen_setup_kernel_pagetable. */
1247 addr = xen_start_info->pt_base;
1248 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1249
1250 xen_cleanhighmap(addr, addr + size);
1251 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1252 #ifdef DEBUG
1253 /* This is superflous and is not neccessary, but you know what
1254 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1255 * anything at this stage. */
1256 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1257 #endif
1258 skip:
1259 #endif
1260 xen_post_allocator_init();
1261 }
1262 static void xen_write_cr2(unsigned long cr2)
1263 {
1264 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1265 }
1266
1267 static unsigned long xen_read_cr2(void)
1268 {
1269 return this_cpu_read(xen_vcpu)->arch.cr2;
1270 }
1271
1272 unsigned long xen_read_cr2_direct(void)
1273 {
1274 return this_cpu_read(xen_vcpu_info.arch.cr2);
1275 }
1276
1277 void xen_flush_tlb_all(void)
1278 {
1279 struct mmuext_op *op;
1280 struct multicall_space mcs;
1281
1282 trace_xen_mmu_flush_tlb_all(0);
1283
1284 preempt_disable();
1285
1286 mcs = xen_mc_entry(sizeof(*op));
1287
1288 op = mcs.args;
1289 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1290 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1291
1292 xen_mc_issue(PARAVIRT_LAZY_MMU);
1293
1294 preempt_enable();
1295 }
1296 static void xen_flush_tlb(void)
1297 {
1298 struct mmuext_op *op;
1299 struct multicall_space mcs;
1300
1301 trace_xen_mmu_flush_tlb(0);
1302
1303 preempt_disable();
1304
1305 mcs = xen_mc_entry(sizeof(*op));
1306
1307 op = mcs.args;
1308 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1309 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1310
1311 xen_mc_issue(PARAVIRT_LAZY_MMU);
1312
1313 preempt_enable();
1314 }
1315
1316 static void xen_flush_tlb_single(unsigned long addr)
1317 {
1318 struct mmuext_op *op;
1319 struct multicall_space mcs;
1320
1321 trace_xen_mmu_flush_tlb_single(addr);
1322
1323 preempt_disable();
1324
1325 mcs = xen_mc_entry(sizeof(*op));
1326 op = mcs.args;
1327 op->cmd = MMUEXT_INVLPG_LOCAL;
1328 op->arg1.linear_addr = addr & PAGE_MASK;
1329 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1330
1331 xen_mc_issue(PARAVIRT_LAZY_MMU);
1332
1333 preempt_enable();
1334 }
1335
1336 static void xen_flush_tlb_others(const struct cpumask *cpus,
1337 struct mm_struct *mm, unsigned long start,
1338 unsigned long end)
1339 {
1340 struct {
1341 struct mmuext_op op;
1342 #ifdef CONFIG_SMP
1343 DECLARE_BITMAP(mask, num_processors);
1344 #else
1345 DECLARE_BITMAP(mask, NR_CPUS);
1346 #endif
1347 } *args;
1348 struct multicall_space mcs;
1349
1350 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
1351
1352 if (cpumask_empty(cpus))
1353 return; /* nothing to do */
1354
1355 mcs = xen_mc_entry(sizeof(*args));
1356 args = mcs.args;
1357 args->op.arg2.vcpumask = to_cpumask(args->mask);
1358
1359 /* Remove us, and any offline CPUS. */
1360 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1361 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1362
1363 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1364 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
1365 args->op.cmd = MMUEXT_INVLPG_MULTI;
1366 args->op.arg1.linear_addr = start;
1367 }
1368
1369 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1370
1371 xen_mc_issue(PARAVIRT_LAZY_MMU);
1372 }
1373
1374 static unsigned long xen_read_cr3(void)
1375 {
1376 return this_cpu_read(xen_cr3);
1377 }
1378
1379 static void set_current_cr3(void *v)
1380 {
1381 this_cpu_write(xen_current_cr3, (unsigned long)v);
1382 }
1383
1384 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1385 {
1386 struct mmuext_op op;
1387 unsigned long mfn;
1388
1389 trace_xen_mmu_write_cr3(kernel, cr3);
1390
1391 if (cr3)
1392 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1393 else
1394 mfn = 0;
1395
1396 WARN_ON(mfn == 0 && kernel);
1397
1398 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1399 op.arg1.mfn = mfn;
1400
1401 xen_extend_mmuext_op(&op);
1402
1403 if (kernel) {
1404 this_cpu_write(xen_cr3, cr3);
1405
1406 /* Update xen_current_cr3 once the batch has actually
1407 been submitted. */
1408 xen_mc_callback(set_current_cr3, (void *)cr3);
1409 }
1410 }
1411 static void xen_write_cr3(unsigned long cr3)
1412 {
1413 BUG_ON(preemptible());
1414
1415 xen_mc_batch(); /* disables interrupts */
1416
1417 /* Update while interrupts are disabled, so its atomic with
1418 respect to ipis */
1419 this_cpu_write(xen_cr3, cr3);
1420
1421 __xen_write_cr3(true, cr3);
1422
1423 #ifdef CONFIG_X86_64
1424 {
1425 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1426 if (user_pgd)
1427 __xen_write_cr3(false, __pa(user_pgd));
1428 else
1429 __xen_write_cr3(false, 0);
1430 }
1431 #endif
1432
1433 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1434 }
1435
1436 #ifdef CONFIG_X86_64
1437 /*
1438 * At the start of the day - when Xen launches a guest, it has already
1439 * built pagetables for the guest. We diligently look over them
1440 * in xen_setup_kernel_pagetable and graft as appropiate them in the
1441 * init_level4_pgt and its friends. Then when we are happy we load
1442 * the new init_level4_pgt - and continue on.
1443 *
1444 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1445 * up the rest of the pagetables. When it has completed it loads the cr3.
1446 * N.B. that baremetal would start at 'start_kernel' (and the early
1447 * #PF handler would create bootstrap pagetables) - so we are running
1448 * with the same assumptions as what to do when write_cr3 is executed
1449 * at this point.
1450 *
1451 * Since there are no user-page tables at all, we have two variants
1452 * of xen_write_cr3 - the early bootup (this one), and the late one
1453 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1454 * the Linux kernel and user-space are both in ring 3 while the
1455 * hypervisor is in ring 0.
1456 */
1457 static void __init xen_write_cr3_init(unsigned long cr3)
1458 {
1459 BUG_ON(preemptible());
1460
1461 xen_mc_batch(); /* disables interrupts */
1462
1463 /* Update while interrupts are disabled, so its atomic with
1464 respect to ipis */
1465 this_cpu_write(xen_cr3, cr3);
1466
1467 __xen_write_cr3(true, cr3);
1468
1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1470 }
1471 #endif
1472
1473 static int xen_pgd_alloc(struct mm_struct *mm)
1474 {
1475 pgd_t *pgd = mm->pgd;
1476 int ret = 0;
1477
1478 BUG_ON(PagePinned(virt_to_page(pgd)));
1479
1480 #ifdef CONFIG_X86_64
1481 {
1482 struct page *page = virt_to_page(pgd);
1483 pgd_t *user_pgd;
1484
1485 BUG_ON(page->private != 0);
1486
1487 ret = -ENOMEM;
1488
1489 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1490 page->private = (unsigned long)user_pgd;
1491
1492 if (user_pgd != NULL) {
1493 user_pgd[pgd_index(VSYSCALL_START)] =
1494 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1495 ret = 0;
1496 }
1497
1498 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1499 }
1500 #endif
1501
1502 return ret;
1503 }
1504
1505 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1506 {
1507 #ifdef CONFIG_X86_64
1508 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1509
1510 if (user_pgd)
1511 free_page((unsigned long)user_pgd);
1512 #endif
1513 }
1514
1515 #ifdef CONFIG_X86_32
1516 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1517 {
1518 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1519 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1520 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1521 pte_val_ma(pte));
1522
1523 return pte;
1524 }
1525 #else /* CONFIG_X86_64 */
1526 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1527 {
1528 return pte;
1529 }
1530 #endif /* CONFIG_X86_64 */
1531
1532 /*
1533 * Init-time set_pte while constructing initial pagetables, which
1534 * doesn't allow RO page table pages to be remapped RW.
1535 *
1536 * If there is no MFN for this PFN then this page is initially
1537 * ballooned out so clear the PTE (as in decrease_reservation() in
1538 * drivers/xen/balloon.c).
1539 *
1540 * Many of these PTE updates are done on unpinned and writable pages
1541 * and doing a hypercall for these is unnecessary and expensive. At
1542 * this point it is not possible to tell if a page is pinned or not,
1543 * so always write the PTE directly and rely on Xen trapping and
1544 * emulating any updates as necessary.
1545 */
1546 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1547 {
1548 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1549 pte = mask_rw_pte(ptep, pte);
1550 else
1551 pte = __pte_ma(0);
1552
1553 native_set_pte(ptep, pte);
1554 }
1555
1556 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1557 {
1558 struct mmuext_op op;
1559 op.cmd = cmd;
1560 op.arg1.mfn = pfn_to_mfn(pfn);
1561 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1562 BUG();
1563 }
1564
1565 /* Early in boot, while setting up the initial pagetable, assume
1566 everything is pinned. */
1567 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1568 {
1569 #ifdef CONFIG_FLATMEM
1570 BUG_ON(mem_map); /* should only be used early */
1571 #endif
1572 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1573 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1574 }
1575
1576 /* Used for pmd and pud */
1577 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1578 {
1579 #ifdef CONFIG_FLATMEM
1580 BUG_ON(mem_map); /* should only be used early */
1581 #endif
1582 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1583 }
1584
1585 /* Early release_pte assumes that all pts are pinned, since there's
1586 only init_mm and anything attached to that is pinned. */
1587 static void __init xen_release_pte_init(unsigned long pfn)
1588 {
1589 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1590 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1591 }
1592
1593 static void __init xen_release_pmd_init(unsigned long pfn)
1594 {
1595 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1596 }
1597
1598 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1599 {
1600 struct multicall_space mcs;
1601 struct mmuext_op *op;
1602
1603 mcs = __xen_mc_entry(sizeof(*op));
1604 op = mcs.args;
1605 op->cmd = cmd;
1606 op->arg1.mfn = pfn_to_mfn(pfn);
1607
1608 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1609 }
1610
1611 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1612 {
1613 struct multicall_space mcs;
1614 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1615
1616 mcs = __xen_mc_entry(0);
1617 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1618 pfn_pte(pfn, prot), 0);
1619 }
1620
1621 /* This needs to make sure the new pte page is pinned iff its being
1622 attached to a pinned pagetable. */
1623 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1624 unsigned level)
1625 {
1626 bool pinned = PagePinned(virt_to_page(mm->pgd));
1627
1628 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1629
1630 if (pinned) {
1631 struct page *page = pfn_to_page(pfn);
1632
1633 SetPagePinned(page);
1634
1635 if (!PageHighMem(page)) {
1636 xen_mc_batch();
1637
1638 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1639
1640 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1641 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1642
1643 xen_mc_issue(PARAVIRT_LAZY_MMU);
1644 } else {
1645 /* make sure there are no stray mappings of
1646 this page */
1647 kmap_flush_unused();
1648 }
1649 }
1650 }
1651
1652 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1653 {
1654 xen_alloc_ptpage(mm, pfn, PT_PTE);
1655 }
1656
1657 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1658 {
1659 xen_alloc_ptpage(mm, pfn, PT_PMD);
1660 }
1661
1662 /* This should never happen until we're OK to use struct page */
1663 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1664 {
1665 struct page *page = pfn_to_page(pfn);
1666 bool pinned = PagePinned(page);
1667
1668 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1669
1670 if (pinned) {
1671 if (!PageHighMem(page)) {
1672 xen_mc_batch();
1673
1674 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1675 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1676
1677 __set_pfn_prot(pfn, PAGE_KERNEL);
1678
1679 xen_mc_issue(PARAVIRT_LAZY_MMU);
1680 }
1681 ClearPagePinned(page);
1682 }
1683 }
1684
1685 static void xen_release_pte(unsigned long pfn)
1686 {
1687 xen_release_ptpage(pfn, PT_PTE);
1688 }
1689
1690 static void xen_release_pmd(unsigned long pfn)
1691 {
1692 xen_release_ptpage(pfn, PT_PMD);
1693 }
1694
1695 #if PAGETABLE_LEVELS == 4
1696 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1697 {
1698 xen_alloc_ptpage(mm, pfn, PT_PUD);
1699 }
1700
1701 static void xen_release_pud(unsigned long pfn)
1702 {
1703 xen_release_ptpage(pfn, PT_PUD);
1704 }
1705 #endif
1706
1707 void __init xen_reserve_top(void)
1708 {
1709 #ifdef CONFIG_X86_32
1710 unsigned long top = HYPERVISOR_VIRT_START;
1711 struct xen_platform_parameters pp;
1712
1713 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1714 top = pp.virt_start;
1715
1716 reserve_top_address(-top);
1717 #endif /* CONFIG_X86_32 */
1718 }
1719
1720 /*
1721 * Like __va(), but returns address in the kernel mapping (which is
1722 * all we have until the physical memory mapping has been set up.
1723 */
1724 static void *__ka(phys_addr_t paddr)
1725 {
1726 #ifdef CONFIG_X86_64
1727 return (void *)(paddr + __START_KERNEL_map);
1728 #else
1729 return __va(paddr);
1730 #endif
1731 }
1732
1733 /* Convert a machine address to physical address */
1734 static unsigned long m2p(phys_addr_t maddr)
1735 {
1736 phys_addr_t paddr;
1737
1738 maddr &= PTE_PFN_MASK;
1739 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1740
1741 return paddr;
1742 }
1743
1744 /* Convert a machine address to kernel virtual */
1745 static void *m2v(phys_addr_t maddr)
1746 {
1747 return __ka(m2p(maddr));
1748 }
1749
1750 /* Set the page permissions on an identity-mapped pages */
1751 static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
1752 {
1753 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1754 pte_t pte = pfn_pte(pfn, prot);
1755
1756 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1757 BUG();
1758 }
1759 static void set_page_prot(void *addr, pgprot_t prot)
1760 {
1761 return set_page_prot_flags(addr, prot, UVMF_NONE);
1762 }
1763 #ifdef CONFIG_X86_32
1764 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1765 {
1766 unsigned pmdidx, pteidx;
1767 unsigned ident_pte;
1768 unsigned long pfn;
1769
1770 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1771 PAGE_SIZE);
1772
1773 ident_pte = 0;
1774 pfn = 0;
1775 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1776 pte_t *pte_page;
1777
1778 /* Reuse or allocate a page of ptes */
1779 if (pmd_present(pmd[pmdidx]))
1780 pte_page = m2v(pmd[pmdidx].pmd);
1781 else {
1782 /* Check for free pte pages */
1783 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1784 break;
1785
1786 pte_page = &level1_ident_pgt[ident_pte];
1787 ident_pte += PTRS_PER_PTE;
1788
1789 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1790 }
1791
1792 /* Install mappings */
1793 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1794 pte_t pte;
1795
1796 #ifdef CONFIG_X86_32
1797 if (pfn > max_pfn_mapped)
1798 max_pfn_mapped = pfn;
1799 #endif
1800
1801 if (!pte_none(pte_page[pteidx]))
1802 continue;
1803
1804 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1805 pte_page[pteidx] = pte;
1806 }
1807 }
1808
1809 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1810 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1811
1812 set_page_prot(pmd, PAGE_KERNEL_RO);
1813 }
1814 #endif
1815 void __init xen_setup_machphys_mapping(void)
1816 {
1817 struct xen_machphys_mapping mapping;
1818
1819 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1820 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1821 machine_to_phys_nr = mapping.max_mfn + 1;
1822 } else {
1823 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1824 }
1825 #ifdef CONFIG_X86_32
1826 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1827 < machine_to_phys_mapping);
1828 #endif
1829 }
1830
1831 #ifdef CONFIG_X86_64
1832 static void convert_pfn_mfn(void *v)
1833 {
1834 pte_t *pte = v;
1835 int i;
1836
1837 /* All levels are converted the same way, so just treat them
1838 as ptes. */
1839 for (i = 0; i < PTRS_PER_PTE; i++)
1840 pte[i] = xen_make_pte(pte[i].pte);
1841 }
1842 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1843 unsigned long addr)
1844 {
1845 if (*pt_base == PFN_DOWN(__pa(addr))) {
1846 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1847 clear_page((void *)addr);
1848 (*pt_base)++;
1849 }
1850 if (*pt_end == PFN_DOWN(__pa(addr))) {
1851 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1852 clear_page((void *)addr);
1853 (*pt_end)--;
1854 }
1855 }
1856 /*
1857 * Set up the initial kernel pagetable.
1858 *
1859 * We can construct this by grafting the Xen provided pagetable into
1860 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1861 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1862 * means that only the kernel has a physical mapping to start with -
1863 * but that's enough to get __va working. We need to fill in the rest
1864 * of the physical mapping once some sort of allocator has been set
1865 * up.
1866 */
1867 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1868 {
1869 pud_t *l3;
1870 pmd_t *l2;
1871 unsigned long addr[3];
1872 unsigned long pt_base, pt_end;
1873 unsigned i;
1874
1875 /* max_pfn_mapped is the last pfn mapped in the initial memory
1876 * mappings. Considering that on Xen after the kernel mappings we
1877 * have the mappings of some pages that don't exist in pfn space, we
1878 * set max_pfn_mapped to the last real pfn mapped. */
1879 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1880
1881 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1882 pt_end = pt_base + xen_start_info->nr_pt_frames;
1883
1884 /* Zap identity mapping */
1885 init_level4_pgt[0] = __pgd(0);
1886
1887 /* Pre-constructed entries are in pfn, so convert to mfn */
1888 /* L4[272] -> level3_ident_pgt
1889 * L4[511] -> level3_kernel_pgt */
1890 convert_pfn_mfn(init_level4_pgt);
1891
1892 /* L3_i[0] -> level2_ident_pgt */
1893 convert_pfn_mfn(level3_ident_pgt);
1894 /* L3_k[510] -> level2_kernel_pgt
1895 * L3_i[511] -> level2_fixmap_pgt */
1896 convert_pfn_mfn(level3_kernel_pgt);
1897
1898 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1899 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1900 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1901
1902 addr[0] = (unsigned long)pgd;
1903 addr[1] = (unsigned long)l3;
1904 addr[2] = (unsigned long)l2;
1905 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1906 * Both L4[272][0] and L4[511][511] have entries that point to the same
1907 * L2 (PMD) tables. Meaning that if you modify it in __va space
1908 * it will be also modified in the __ka space! (But if you just
1909 * modify the PMD table to point to other PTE's or none, then you
1910 * are OK - which is what cleanup_highmap does) */
1911 copy_page(level2_ident_pgt, l2);
1912 /* Graft it onto L4[511][511] */
1913 copy_page(level2_kernel_pgt, l2);
1914
1915 /* Get [511][510] and graft that in level2_fixmap_pgt */
1916 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1917 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1918 copy_page(level2_fixmap_pgt, l2);
1919 /* Note that we don't do anything with level1_fixmap_pgt which
1920 * we don't need. */
1921
1922 /* Make pagetable pieces RO */
1923 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1924 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1925 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1926 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1927 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1928 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1929 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1930
1931 /* Pin down new L4 */
1932 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1933 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1934
1935 /* Unpin Xen-provided one */
1936 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1937
1938 /*
1939 * At this stage there can be no user pgd, and no page
1940 * structure to attach it to, so make sure we just set kernel
1941 * pgd.
1942 */
1943 xen_mc_batch();
1944 __xen_write_cr3(true, __pa(init_level4_pgt));
1945 xen_mc_issue(PARAVIRT_LAZY_CPU);
1946
1947 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1948 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1949 * the initial domain. For guests using the toolstack, they are in:
1950 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1951 * rip out the [L4] (pgd), but for guests we shave off three pages.
1952 */
1953 for (i = 0; i < ARRAY_SIZE(addr); i++)
1954 check_pt_base(&pt_base, &pt_end, addr[i]);
1955
1956 /* Our (by three pages) smaller Xen pagetable that we are using */
1957 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
1958 /* Revector the xen_start_info */
1959 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1960 }
1961 #else /* !CONFIG_X86_64 */
1962 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1963 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1964
1965 static void __init xen_write_cr3_init(unsigned long cr3)
1966 {
1967 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1968
1969 BUG_ON(read_cr3() != __pa(initial_page_table));
1970 BUG_ON(cr3 != __pa(swapper_pg_dir));
1971
1972 /*
1973 * We are switching to swapper_pg_dir for the first time (from
1974 * initial_page_table) and therefore need to mark that page
1975 * read-only and then pin it.
1976 *
1977 * Xen disallows sharing of kernel PMDs for PAE
1978 * guests. Therefore we must copy the kernel PMD from
1979 * initial_page_table into a new kernel PMD to be used in
1980 * swapper_pg_dir.
1981 */
1982 swapper_kernel_pmd =
1983 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1984 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
1985 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1986 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1987 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1988
1989 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1990 xen_write_cr3(cr3);
1991 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1992
1993 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1994 PFN_DOWN(__pa(initial_page_table)));
1995 set_page_prot(initial_page_table, PAGE_KERNEL);
1996 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1997
1998 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1999 }
2000
2001 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2002 {
2003 pmd_t *kernel_pmd;
2004
2005 initial_kernel_pmd =
2006 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2007
2008 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2009 xen_start_info->nr_pt_frames * PAGE_SIZE +
2010 512*1024);
2011
2012 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2013 copy_page(initial_kernel_pmd, kernel_pmd);
2014
2015 xen_map_identity_early(initial_kernel_pmd, max_pfn);
2016
2017 copy_page(initial_page_table, pgd);
2018 initial_page_table[KERNEL_PGD_BOUNDARY] =
2019 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2020
2021 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2022 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2023 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2024
2025 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2026
2027 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2028 PFN_DOWN(__pa(initial_page_table)));
2029 xen_write_cr3(__pa(initial_page_table));
2030
2031 memblock_reserve(__pa(xen_start_info->pt_base),
2032 xen_start_info->nr_pt_frames * PAGE_SIZE);
2033 }
2034 #endif /* CONFIG_X86_64 */
2035
2036 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2037
2038 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2039 {
2040 pte_t pte;
2041
2042 phys >>= PAGE_SHIFT;
2043
2044 switch (idx) {
2045 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2046 #ifdef CONFIG_X86_F00F_BUG
2047 case FIX_F00F_IDT:
2048 #endif
2049 #ifdef CONFIG_X86_32
2050 case FIX_WP_TEST:
2051 case FIX_VDSO:
2052 # ifdef CONFIG_HIGHMEM
2053 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2054 # endif
2055 #else
2056 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
2057 case VVAR_PAGE:
2058 #endif
2059 case FIX_TEXT_POKE0:
2060 case FIX_TEXT_POKE1:
2061 /* All local page mappings */
2062 pte = pfn_pte(phys, prot);
2063 break;
2064
2065 #ifdef CONFIG_X86_LOCAL_APIC
2066 case FIX_APIC_BASE: /* maps dummy local APIC */
2067 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2068 break;
2069 #endif
2070
2071 #ifdef CONFIG_X86_IO_APIC
2072 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2073 /*
2074 * We just don't map the IO APIC - all access is via
2075 * hypercalls. Keep the address in the pte for reference.
2076 */
2077 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2078 break;
2079 #endif
2080
2081 case FIX_PARAVIRT_BOOTMAP:
2082 /* This is an MFN, but it isn't an IO mapping from the
2083 IO domain */
2084 pte = mfn_pte(phys, prot);
2085 break;
2086
2087 default:
2088 /* By default, set_fixmap is used for hardware mappings */
2089 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2090 break;
2091 }
2092
2093 __native_set_fixmap(idx, pte);
2094
2095 #ifdef CONFIG_X86_64
2096 /* Replicate changes to map the vsyscall page into the user
2097 pagetable vsyscall mapping. */
2098 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
2099 idx == VVAR_PAGE) {
2100 unsigned long vaddr = __fix_to_virt(idx);
2101 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2102 }
2103 #endif
2104 }
2105
2106 static void __init xen_post_allocator_init(void)
2107 {
2108 pv_mmu_ops.set_pte = xen_set_pte;
2109 pv_mmu_ops.set_pmd = xen_set_pmd;
2110 pv_mmu_ops.set_pud = xen_set_pud;
2111 #if PAGETABLE_LEVELS == 4
2112 pv_mmu_ops.set_pgd = xen_set_pgd;
2113 #endif
2114
2115 /* This will work as long as patching hasn't happened yet
2116 (which it hasn't) */
2117 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2118 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2119 pv_mmu_ops.release_pte = xen_release_pte;
2120 pv_mmu_ops.release_pmd = xen_release_pmd;
2121 #if PAGETABLE_LEVELS == 4
2122 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2123 pv_mmu_ops.release_pud = xen_release_pud;
2124 #endif
2125
2126 #ifdef CONFIG_X86_64
2127 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2128 SetPagePinned(virt_to_page(level3_user_vsyscall));
2129 #endif
2130 xen_mark_init_mm_pinned();
2131 }
2132
2133 static void xen_leave_lazy_mmu(void)
2134 {
2135 preempt_disable();
2136 xen_mc_flush();
2137 paravirt_leave_lazy_mmu();
2138 preempt_enable();
2139 }
2140
2141 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2142 .read_cr2 = xen_read_cr2,
2143 .write_cr2 = xen_write_cr2,
2144
2145 .read_cr3 = xen_read_cr3,
2146 .write_cr3 = xen_write_cr3_init,
2147
2148 .flush_tlb_user = xen_flush_tlb,
2149 .flush_tlb_kernel = xen_flush_tlb,
2150 .flush_tlb_single = xen_flush_tlb_single,
2151 .flush_tlb_others = xen_flush_tlb_others,
2152
2153 .pte_update = paravirt_nop,
2154 .pte_update_defer = paravirt_nop,
2155
2156 .pgd_alloc = xen_pgd_alloc,
2157 .pgd_free = xen_pgd_free,
2158
2159 .alloc_pte = xen_alloc_pte_init,
2160 .release_pte = xen_release_pte_init,
2161 .alloc_pmd = xen_alloc_pmd_init,
2162 .release_pmd = xen_release_pmd_init,
2163
2164 .set_pte = xen_set_pte_init,
2165 .set_pte_at = xen_set_pte_at,
2166 .set_pmd = xen_set_pmd_hyper,
2167
2168 .ptep_modify_prot_start = __ptep_modify_prot_start,
2169 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2170
2171 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2172 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2173
2174 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2175 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2176
2177 #ifdef CONFIG_X86_PAE
2178 .set_pte_atomic = xen_set_pte_atomic,
2179 .pte_clear = xen_pte_clear,
2180 .pmd_clear = xen_pmd_clear,
2181 #endif /* CONFIG_X86_PAE */
2182 .set_pud = xen_set_pud_hyper,
2183
2184 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2185 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2186
2187 #if PAGETABLE_LEVELS == 4
2188 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2189 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2190 .set_pgd = xen_set_pgd_hyper,
2191
2192 .alloc_pud = xen_alloc_pmd_init,
2193 .release_pud = xen_release_pmd_init,
2194 #endif /* PAGETABLE_LEVELS == 4 */
2195
2196 .activate_mm = xen_activate_mm,
2197 .dup_mmap = xen_dup_mmap,
2198 .exit_mmap = xen_exit_mmap,
2199
2200 .lazy_mode = {
2201 .enter = paravirt_enter_lazy_mmu,
2202 .leave = xen_leave_lazy_mmu,
2203 },
2204
2205 .set_fixmap = xen_set_fixmap,
2206 };
2207
2208 void __init xen_init_mmu_ops(void)
2209 {
2210 x86_init.paging.pagetable_init = xen_pagetable_init;
2211 pv_mmu_ops = xen_mmu_ops;
2212
2213 memset(dummy_mapping, 0xff, PAGE_SIZE);
2214 }
2215
2216 /* Protected by xen_reservation_lock. */
2217 #define MAX_CONTIG_ORDER 9 /* 2MB */
2218 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2219
2220 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2221 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2222 unsigned long *in_frames,
2223 unsigned long *out_frames)
2224 {
2225 int i;
2226 struct multicall_space mcs;
2227
2228 xen_mc_batch();
2229 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2230 mcs = __xen_mc_entry(0);
2231
2232 if (in_frames)
2233 in_frames[i] = virt_to_mfn(vaddr);
2234
2235 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2236 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2237
2238 if (out_frames)
2239 out_frames[i] = virt_to_pfn(vaddr);
2240 }
2241 xen_mc_issue(0);
2242 }
2243
2244 /*
2245 * Update the pfn-to-mfn mappings for a virtual address range, either to
2246 * point to an array of mfns, or contiguously from a single starting
2247 * mfn.
2248 */
2249 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2250 unsigned long *mfns,
2251 unsigned long first_mfn)
2252 {
2253 unsigned i, limit;
2254 unsigned long mfn;
2255
2256 xen_mc_batch();
2257
2258 limit = 1u << order;
2259 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2260 struct multicall_space mcs;
2261 unsigned flags;
2262
2263 mcs = __xen_mc_entry(0);
2264 if (mfns)
2265 mfn = mfns[i];
2266 else
2267 mfn = first_mfn + i;
2268
2269 if (i < (limit - 1))
2270 flags = 0;
2271 else {
2272 if (order == 0)
2273 flags = UVMF_INVLPG | UVMF_ALL;
2274 else
2275 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2276 }
2277
2278 MULTI_update_va_mapping(mcs.mc, vaddr,
2279 mfn_pte(mfn, PAGE_KERNEL), flags);
2280
2281 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2282 }
2283
2284 xen_mc_issue(0);
2285 }
2286
2287 /*
2288 * Perform the hypercall to exchange a region of our pfns to point to
2289 * memory with the required contiguous alignment. Takes the pfns as
2290 * input, and populates mfns as output.
2291 *
2292 * Returns a success code indicating whether the hypervisor was able to
2293 * satisfy the request or not.
2294 */
2295 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2296 unsigned long *pfns_in,
2297 unsigned long extents_out,
2298 unsigned int order_out,
2299 unsigned long *mfns_out,
2300 unsigned int address_bits)
2301 {
2302 long rc;
2303 int success;
2304
2305 struct xen_memory_exchange exchange = {
2306 .in = {
2307 .nr_extents = extents_in,
2308 .extent_order = order_in,
2309 .extent_start = pfns_in,
2310 .domid = DOMID_SELF
2311 },
2312 .out = {
2313 .nr_extents = extents_out,
2314 .extent_order = order_out,
2315 .extent_start = mfns_out,
2316 .address_bits = address_bits,
2317 .domid = DOMID_SELF
2318 }
2319 };
2320
2321 BUG_ON(extents_in << order_in != extents_out << order_out);
2322
2323 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2324 success = (exchange.nr_exchanged == extents_in);
2325
2326 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2327 BUG_ON(success && (rc != 0));
2328
2329 return success;
2330 }
2331
2332 int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2333 unsigned int address_bits)
2334 {
2335 unsigned long *in_frames = discontig_frames, out_frame;
2336 unsigned long flags;
2337 int success;
2338
2339 /*
2340 * Currently an auto-translated guest will not perform I/O, nor will
2341 * it require PAE page directories below 4GB. Therefore any calls to
2342 * this function are redundant and can be ignored.
2343 */
2344
2345 if (xen_feature(XENFEAT_auto_translated_physmap))
2346 return 0;
2347
2348 if (unlikely(order > MAX_CONTIG_ORDER))
2349 return -ENOMEM;
2350
2351 memset((void *) vstart, 0, PAGE_SIZE << order);
2352
2353 spin_lock_irqsave(&xen_reservation_lock, flags);
2354
2355 /* 1. Zap current PTEs, remembering MFNs. */
2356 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2357
2358 /* 2. Get a new contiguous memory extent. */
2359 out_frame = virt_to_pfn(vstart);
2360 success = xen_exchange_memory(1UL << order, 0, in_frames,
2361 1, order, &out_frame,
2362 address_bits);
2363
2364 /* 3. Map the new extent in place of old pages. */
2365 if (success)
2366 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2367 else
2368 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2369
2370 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2371
2372 return success ? 0 : -ENOMEM;
2373 }
2374 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2375
2376 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2377 {
2378 unsigned long *out_frames = discontig_frames, in_frame;
2379 unsigned long flags;
2380 int success;
2381
2382 if (xen_feature(XENFEAT_auto_translated_physmap))
2383 return;
2384
2385 if (unlikely(order > MAX_CONTIG_ORDER))
2386 return;
2387
2388 memset((void *) vstart, 0, PAGE_SIZE << order);
2389
2390 spin_lock_irqsave(&xen_reservation_lock, flags);
2391
2392 /* 1. Find start MFN of contiguous extent. */
2393 in_frame = virt_to_mfn(vstart);
2394
2395 /* 2. Zap current PTEs. */
2396 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2397
2398 /* 3. Do the exchange for non-contiguous MFNs. */
2399 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2400 0, out_frames, 0);
2401
2402 /* 4. Map new pages in place of old pages. */
2403 if (success)
2404 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2405 else
2406 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2407
2408 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2409 }
2410 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2411
2412 #ifdef CONFIG_XEN_PVHVM
2413 #ifdef CONFIG_PROC_VMCORE
2414 /*
2415 * This function is used in two contexts:
2416 * - the kdump kernel has to check whether a pfn of the crashed kernel
2417 * was a ballooned page. vmcore is using this function to decide
2418 * whether to access a pfn of the crashed kernel.
2419 * - the kexec kernel has to check whether a pfn was ballooned by the
2420 * previous kernel. If the pfn is ballooned, handle it properly.
2421 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2422 * handle the pfn special in this case.
2423 */
2424 static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2425 {
2426 struct xen_hvm_get_mem_type a = {
2427 .domid = DOMID_SELF,
2428 .pfn = pfn,
2429 };
2430 int ram;
2431
2432 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2433 return -ENXIO;
2434
2435 switch (a.mem_type) {
2436 case HVMMEM_mmio_dm:
2437 ram = 0;
2438 break;
2439 case HVMMEM_ram_rw:
2440 case HVMMEM_ram_ro:
2441 default:
2442 ram = 1;
2443 break;
2444 }
2445
2446 return ram;
2447 }
2448 #endif
2449
2450 static void xen_hvm_exit_mmap(struct mm_struct *mm)
2451 {
2452 struct xen_hvm_pagetable_dying a;
2453 int rc;
2454
2455 a.domid = DOMID_SELF;
2456 a.gpa = __pa(mm->pgd);
2457 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2458 WARN_ON_ONCE(rc < 0);
2459 }
2460
2461 static int is_pagetable_dying_supported(void)
2462 {
2463 struct xen_hvm_pagetable_dying a;
2464 int rc = 0;
2465
2466 a.domid = DOMID_SELF;
2467 a.gpa = 0x00;
2468 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2469 if (rc < 0) {
2470 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2471 return 0;
2472 }
2473 return 1;
2474 }
2475
2476 void __init xen_hvm_init_mmu_ops(void)
2477 {
2478 if (is_pagetable_dying_supported())
2479 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2480 #ifdef CONFIG_PROC_VMCORE
2481 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2482 #endif
2483 }
2484 #endif
2485
2486 #define REMAP_BATCH_SIZE 16
2487
2488 struct remap_data {
2489 unsigned long mfn;
2490 pgprot_t prot;
2491 struct mmu_update *mmu_update;
2492 };
2493
2494 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2495 unsigned long addr, void *data)
2496 {
2497 struct remap_data *rmd = data;
2498 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2499
2500 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2501 rmd->mmu_update->val = pte_val_ma(pte);
2502 rmd->mmu_update++;
2503
2504 return 0;
2505 }
2506
2507 int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2508 unsigned long addr,
2509 xen_pfn_t mfn, int nr,
2510 pgprot_t prot, unsigned domid,
2511 struct page **pages)
2512
2513 {
2514 struct remap_data rmd;
2515 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2516 int batch;
2517 unsigned long range;
2518 int err = 0;
2519
2520 if (xen_feature(XENFEAT_auto_translated_physmap))
2521 return -EINVAL;
2522
2523 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2524
2525 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2526
2527 rmd.mfn = mfn;
2528 rmd.prot = prot;
2529
2530 while (nr) {
2531 batch = min(REMAP_BATCH_SIZE, nr);
2532 range = (unsigned long)batch << PAGE_SHIFT;
2533
2534 rmd.mmu_update = mmu_update;
2535 err = apply_to_page_range(vma->vm_mm, addr, range,
2536 remap_area_mfn_pte_fn, &rmd);
2537 if (err)
2538 goto out;
2539
2540 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2541 if (err < 0)
2542 goto out;
2543
2544 nr -= batch;
2545 addr += range;
2546 }
2547
2548 err = 0;
2549 out:
2550
2551 xen_flush_tlb_all();
2552
2553 return err;
2554 }
2555 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2556
2557 /* Returns: 0 success */
2558 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2559 int numpgs, struct page **pages)
2560 {
2561 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2562 return 0;
2563
2564 return -EINVAL;
2565 }
2566 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);