#define __KVM_HANDLER(area, h, n) \
do_kvm_##n: \
+ BEGIN_FTR_SECTION_NESTED(947) \
+ ld r10,area+EX_CFAR(r13); \
+ std r10,HSTATE_CFAR(r13); \
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
ld r10,area+EX_R10(r13); \
- stw r9,HSTATE_SCRATCH1(r13); \
+ stw r9,HSTATE_SCRATCH1(r13); \
ld r9,area+EX_R9(r13); \
- std r12,HSTATE_SCRATCH0(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
li r12,n; \
b kvmppc_interrupt
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
+ DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(IPI_PRIORITY, IPI_PRIORITY);
#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#ifdef CONFIG_PPC_BOOK3S_64
+ HSTATE_FIELD(HSTATE_CFAR, cfar);
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
#else /* CONFIG_PPC_BOOK3S */
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
/* Enter guest */
+BEGIN_FTR_SECTION
+ ld r5, VCPU_CFAR(r4)
+ mtspr SPRN_CFAR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+
ld r5, VCPU_LR(r4)
lwz r6, VCPU_CR(r4)
mtlr r5
lwz r4, HSTATE_SCRATCH1(r13)
std r3, VCPU_GPR(R12)(r9)
stw r4, VCPU_CR(r9)
+BEGIN_FTR_SECTION
+ ld r3, HSTATE_CFAR(r13)
+ std r3, VCPU_CFAR(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
/* Restore R1/R2 so we can handle faults */
ld r1, HSTATE_HOST_R1(r13)