#define PFERR_PRESENT_MASK (1U << 0)
#define PFERR_WRITE_MASK (1U << 1)
#define PFERR_USER_MASK (1U << 2)
+#define PFERR_FETCH_MASK (1U << 4)
#define PT64_ROOT_LEVEL 4
#define PT32_ROOT_LEVEL 2
return 1;
}
+static int is_nx(struct kvm_vcpu *vcpu)
+{
+ return vcpu->shadow_efer & EFER_NX;
+}
+
static int is_present_pte(unsigned long pte)
{
return pte & PT_PRESENT_MASK;
*/
static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr,
- int write_fault, int user_fault)
+ int write_fault, int user_fault, int fetch_fault)
{
hpa_t hpa;
struct kvm_memory_slot *slot;
if (user_fault && !(*ptep & PT_USER_MASK))
goto access_error;
+#if PTTYPE == 64
+ if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
+ goto access_error;
+#endif
+
if (!(*ptep & PT_ACCESSED_MASK))
*ptep |= PT_ACCESSED_MASK; /* avoid rmw */
walker->error_code |= PFERR_WRITE_MASK;
if (user_fault)
walker->error_code |= PFERR_USER_MASK;
+ if (fetch_fault)
+ walker->error_code |= PFERR_FETCH_MASK;
return 0;
}
{
int write_fault = error_code & PFERR_WRITE_MASK;
int user_fault = error_code & PFERR_USER_MASK;
+ int fetch_fault = error_code & PFERR_FETCH_MASK;
struct guest_walker walker;
u64 *shadow_pte;
int fixed;
/*
* Look up the shadow pte for the faulting address.
*/
- r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault);
+ r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
+ fetch_fault);
/*
* The page is not mapped by the guest. Let the guest handle it.
pt_element_t guest_pte;
gpa_t gpa;
- FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0);
+ FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
guest_pte = *walker.ptep;
FNAME(release_walker)(&walker);