extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter;
+extern void kvmppc_rmcall(ulong srr0, ulong srr1);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
ulong scratch0;
ulong scratch1;
ulong vmhandler;
- ulong rmhandler;
};
#endif /*__ASSEMBLY__ */
ulong trampoline_lowmem;
ulong trampoline_enter;
ulong highmem_handler;
+ ulong rmcall;
ulong host_paca_phys;
struct kvmppc_mmu mmu;
#endif
DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
shadow_vcpu.vmhandler));
- DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct,
- shadow_vcpu.rmhandler));
DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
shadow_vcpu.scratch0));
DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
+ DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
#else
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+ vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
vcpu->arch.shadow_msr = MSR_USER64;
EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
+EXPORT_SYMBOL_GPL(kvmppc_rmcall);
ld r3, VCPU_HIGHMEM_HANDLER(r4)
std r3, PACA_KVM_VMHANDLER(r13)
- ld r3, VCPU_TRAMPOLINE_ENTER(r4)
- std r3, PACA_KVM_RMHANDLER(r13)
-
kvm_start_lightweight:
ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
/* Load some guest state in the respective registers */
- ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
- mtctr r3 /* CTR = r3 */
+ ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
+ /* will be swapped in by rmcall */
ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
mtlr r3 /* LR = r3 */
no_dcbz32_on:
- /* This sets the Magic value for the trampoline */
-
- /* XXX this needs to move into a safe function, so we can
- be sure we don't get any interrupts */
-
- li r11, 1
- stb r11, PACA_KVM_IN_GUEST(r13)
-
- ld r3, PACA_KVM_RMHANDLER(r13)
- mtsrr0 r3
+ ld r6, VCPU_RMCALL(r4)
+ mtctr r6
- LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
- mtsrr1 r3
+ ld r3, VCPU_TRAMPOLINE_ENTER(r4)
+ LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
/* Jump to SLB patching handlder and into our guest */
- RFI
+ bctr
/*
* This is the handler in module memory. It gets jumped at from the
blr
kvmppc_handler_lowmem_trampoline_end:
+/*
+ * Call a function in real mode
+ *
+ * Input Registers:
+ *
+ * R3 = function
+ * R4 = MSR
+ * R5 = CTR
+ *
+ */
+_GLOBAL(kvmppc_rmcall)
+ mtmsr r4 /* Disable relocation, so mtsrr
+ doesn't get interrupted */
+ mtctr r5
+ mtsrr0 r3
+ mtsrr1 r4
+ RFI
+
.global kvmppc_trampoline_lowmem
kvmppc_trampoline_lowmem:
.long kvmppc_handler_lowmem_trampoline - _stext
mtsrr0 r9
mtsrr1 r10
+ /* Activate guest mode, so faults get handled by KVM */
+ li r11, KVM_GUEST_MODE_GUEST
+ stb r11, PACA_KVM_IN_GUEST(r13)
+
/* Remove LPAR shadow entries */
#if SLB_NUM_BOLTED == 3