}
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
- unsigned pt_access, unsigned pte_access,
- int write_fault, int *emulate, int level, gfn_t gfn,
- pfn_t pfn, bool speculative, bool host_writable)
+ unsigned pte_access, int write_fault, int *emulate,
+ int level, gfn_t gfn, pfn_t pfn, bool speculative,
+ bool host_writable)
{
int was_rmapped = 0;
int rmap_count;
- pgprintk("%s: spte %llx access %x write_fault %d gfn %llx\n",
- __func__, *sptep, pt_access,
- write_fault, gfn);
+ pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
+ *sptep, write_fault, gfn);
if (is_rmap_spte(*sptep)) {
/*
return -1;
for (i = 0; i < ret; i++, gfn++, start++)
- mmu_set_spte(vcpu, start, ACC_ALL, access, 0, NULL,
+ mmu_set_spte(vcpu, start, access, 0, NULL,
sp->role.level, gfn, page_to_pfn(pages[i]),
true, true);
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
if (iterator.level == level) {
- unsigned pte_access = ACC_ALL;
-
- mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
+ mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
write, &emulate, level, gfn, pfn,
prefault, map_writable);
direct_pte_prefetch(vcpu, iterator.sptep);
* we call mmu_set_spte() with host_writable = true because
* pte_prefetch_gfn_to_pfn always gets a writable pfn.
*/
- mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0,
- NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true);
+ mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL,
+ gfn, pfn, true, true);
return true;
}
}
clear_sp_write_flooding_count(it.sptep);
- mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
- write_fault, &emulate, it.level,
- gw->gfn, pfn, prefault, map_writable);
+ mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate,
+ it.level, gw->gfn, pfn, prefault, map_writable);
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
return emulate;