At present, kvmppc_ld calls kvmppc_xlate, and if kvmppc_xlate returns
any error indication, it returns -ENOENT, which is taken to mean an
HPTE not found error. However, the error could have been a segment
found (no SLB entry) or a permission error. Similarly,
kvmppc_pte_to_hva currently does permission checking, but any error
from it is taken by kvmppc_ld to mean that the access is an emulated
MMIO access. Also, kvmppc_ld does no execute permission checking.
This fixes these problems by (a) returning any error from kvmppc_xlate
directly, (b) moving the permission check from kvmppc_pte_to_hva
into kvmppc_ld, and (c) adding an execute permission check to kvmppc_ld.
This is similar to what was done for kvmppc_st() by commit
82ff911317c3
("KVM: PPC: Deflect page write faults properly in kvmppc_st").
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
return PAGE_OFFSET;
}
-static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
- bool read)
+static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
{
hva_t hpage;
- if (read && !pte->may_read)
- goto err;
-
- if (!read && !pte->may_write)
- goto err;
-
hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpage))
goto err;
{
struct kvmppc_pte pte;
hva_t hva = *eaddr;
+ int rc;
vcpu->stat.ld++;
- if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
- goto nopte;
+ rc = kvmppc_xlate(vcpu, *eaddr, data, false, &pte);
+ if (rc)
+ return rc;
*eaddr = pte.raddr;
- hva = kvmppc_pte_to_hva(vcpu, &pte, true);
+ if (!pte.may_read)
+ return -EPERM;
+
+ if (!data && !pte.may_execute)
+ return -ENOEXEC;
+
+ hva = kvmppc_pte_to_hva(vcpu, &pte);
if (kvm_is_error_hva(hva))
goto mmio;
return EMULATE_DONE;
-nopte:
- return -ENOENT;
mmio:
return EMULATE_DO_MMIO;
}