static void kvm_map_magic_page(void *data)
{
- kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE,
- KVM_MAGIC_PAGE, /* Physical Address */
- KVM_MAGIC_PAGE); /* Effective Address */
+ u32 *features = data;
+
+ ulong in[8];
+ ulong out[8];
+
+ in[0] = KVM_MAGIC_PAGE;
+ in[1] = KVM_MAGIC_PAGE;
+
+ kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
+
+ *features = out[0];
}
-static void kvm_check_ins(u32 *inst)
+static void kvm_check_ins(u32 *inst, u32 features)
{
u32 _inst = *inst;
u32 inst_no_rt = _inst & ~KVM_MASK_RT;
u32 *p;
u32 *start, *end;
u32 tmp;
+ u32 features;
/* Tell the host to map the magic page to -4096 on all CPUs */
- on_each_cpu(kvm_map_magic_page, NULL, 1);
+ on_each_cpu(kvm_map_magic_page, &features, 1);
/* Quick self-test to see if the mapping works */
if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
end = (void*)_etext;
for (p = start; p < end; p++)
- kvm_check_ins(p);
+ kvm_check_ins(p, features);
printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
kvm_patching_worked ? "worked" : "failed");
vcpu->arch.magic_page_pa = param1;
vcpu->arch.magic_page_ea = param2;
+ r2 = 0;
+
r = HC_EV_SUCCESS;
break;
}
#endif
/* Second return value is in r4 */
- kvmppc_set_gpr(vcpu, 4, r2);
break;
default:
r = HC_EV_UNIMPLEMENTED;
break;
}
+ kvmppc_set_gpr(vcpu, 4, r2);
+
return r;
}