static void xen_flush_tlb(void)
{
- struct mmuext_op op;
+ struct mmuext_op *op;
+ struct multicall_space mcs = xen_mc_entry(sizeof(*op));
- op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
- if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
- BUG();
+ op = mcs.args;
+ op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
}
static void xen_flush_tlb_single(unsigned long addr)
{
- struct mmuext_op op;
+ struct mmuext_op *op;
+ struct multicall_space mcs = xen_mc_entry(sizeof(*op));
- op.cmd = MMUEXT_INVLPG_LOCAL;
- op.arg1.linear_addr = addr & PAGE_MASK;
- if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
- BUG();
+ op = mcs.args;
+ op->cmd = MMUEXT_INVLPG_LOCAL;
+ op->arg1.linear_addr = addr & PAGE_MASK;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
}
static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
unsigned long va)
{
- struct mmuext_op op;
+ struct {
+ struct mmuext_op op;
+ cpumask_t mask;
+ } *args;
cpumask_t cpumask = *cpus;
+ struct multicall_space mcs;
/*
* A couple of (to be removed) sanity checks:
if (cpus_empty(cpumask))
return;
+ mcs = xen_mc_entry(sizeof(*args));
+ args = mcs.args;
+ args->mask = cpumask;
+ args->op.arg2.vcpumask = &args->mask;
+
if (va == TLB_FLUSH_ALL) {
- op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- op.arg2.vcpumask = (void *)cpus;
+ args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
} else {
- op.cmd = MMUEXT_INVLPG_MULTI;
- op.arg1.linear_addr = va;
- op.arg2.vcpumask = (void *)cpus;
+ args->op.cmd = MMUEXT_INVLPG_MULTI;
+ args->op.arg1.linear_addr = va;
}
- if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
- BUG();
+ MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
}
static unsigned long xen_read_cr2(void)
void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
- struct mmu_update u;
+ struct multicall_space mcs;
+ struct mmu_update *u;
- u.ptr = virt_to_machine(ptr).maddr;
- u.val = pmd_val_ma(val);
- if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
- BUG();
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*u));
+ u = mcs.args;
+ u->ptr = virt_to_machine(ptr).maddr;
+ u->val = pmd_val_ma(val);
+ MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
}
/*
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- if ((mm != current->mm && mm != &init_mm) ||
- HYPERVISOR_update_va_mapping(addr, pteval, 0) != 0)
- xen_set_pte(ptep, pteval);
+ if (mm == current->mm || mm == &init_mm) {
+ if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ struct multicall_space mcs;
+ mcs = xen_mc_entry(0);
+
+ MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+ return;
+ } else
+ if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
+ return;
+ }
+ xen_set_pte(ptep, pteval);
}
#ifdef CONFIG_X86_PAE
void xen_set_pud(pud_t *ptr, pud_t val)
{
- struct mmu_update u;
+ struct multicall_space mcs;
+ struct mmu_update *u;
- u.ptr = virt_to_machine(ptr).maddr;
- u.val = pud_val_ma(val);
- if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
- BUG();
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*u));
+ u = mcs.args;
+ u->ptr = virt_to_machine(ptr).maddr;
+ u->val = pud_val_ma(val);
+ MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
}
void xen_set_pte(pte_t *ptep, pte_t pte)
#include "multicalls.h"
-#define MC_BATCH 8
-#define MC_ARGS (MC_BATCH * 32 / sizeof(u64))
+#define MC_BATCH 32
+#define MC_ARGS (MC_BATCH * 16 / sizeof(u64))
struct mc_buffer {
struct multicall_entry entries[MC_BATCH];