KVM: ARM: Power State Coordination Interface implementation
authorMarc Zyngier <marc.zyngier@arm.com>
Sun, 20 Jan 2013 23:28:13 +0000 (18:28 -0500)
committerChristoffer Dall <c.dall@virtualopensystems.com>
Wed, 23 Jan 2013 18:29:18 +0000 (13:29 -0500)
Implement the PSCI specification (ARM DEN 0022A) to control
virtual CPUs being "powered" on or off.

PSCI/KVM is detected using the KVM_CAP_ARM_PSCI capability.

A virtual CPU can now be initialized in a "powered off" state,
using the KVM_ARM_VCPU_POWER_OFF feature flag.

The guest can use either SMC or HVC to execute a PSCI function.

Reviewed-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>
Documentation/virtual/kvm/api.txt
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_psci.h [new file with mode: 0644]
arch/arm/include/uapi/asm/kvm.h
arch/arm/kvm/Makefile
arch/arm/kvm/arm.c
arch/arm/kvm/psci.c [new file with mode: 0644]
include/uapi/linux/kvm.h

index 38066a7a74e1617b595647f3ba49d166169c22a9..c25439a5827414435e5a7597ec60697437fc0894 100644 (file)
@@ -2185,6 +2185,10 @@ return ENOEXEC for that vcpu.
 Note that because some registers reflect machine topology, all vcpus
 should be created before this ioctl is invoked.
 
+Possible features:
+       - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
+         Depends on KVM_CAP_ARM_PSCI.
+
 
 4.78 KVM_GET_REG_LIST
 
index 4c1a073280bebc0789b1530aaa848c28c03c9176..fd611996bfb5c15f53ae4b11107fc7af32087432 100644 (file)
@@ -32,6 +32,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
+static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
 static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
 {
        return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
@@ -42,6 +47,11 @@ static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
        return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
 }
 
+static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
+{
+       *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+}
+
 static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
 {
        unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
index e65fc967a71db541dc369e562581c0393223806c..98b4d1a729234ba563115fd6d153104b87c93b4b 100644 (file)
@@ -30,7 +30,7 @@
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #define KVM_HAVE_ONE_REG
 
-#define KVM_VCPU_MAX_FEATURES 0
+#define KVM_VCPU_MAX_FEATURES 1
 
 /* We don't currently support large pages. */
 #define KVM_HPAGE_GFN_SHIFT(x) 0
@@ -100,6 +100,9 @@ struct kvm_vcpu_arch {
        int last_pcpu;
        cpumask_t require_dcache_flush;
 
+       /* Don't run the guest on this vcpu */
+       bool pause;
+
        /* IO related fields */
        struct kvm_decode mmio_decode;
 
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
new file mode 100644 (file)
index 0000000..9a83d98
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2012 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_PSCI_H__
+#define __ARM_KVM_PSCI_H__
+
+bool kvm_psci_call(struct kvm_vcpu *vcpu);
+
+#endif /* __ARM_KVM_PSCI_H__ */
index bbb6b232800496cbaa569dd87b46638ea5642b5b..3303ff5adbf3528fa155605d724bb48c32355ca3 100644 (file)
@@ -65,6 +65,8 @@ struct kvm_regs {
 #define KVM_ARM_TARGET_CORTEX_A15      0
 #define KVM_ARM_NUM_TARGETS            1
 
+#define KVM_ARM_VCPU_POWER_OFF         0 /* CPU is started in OFF state */
+
 struct kvm_vcpu_init {
        __u32 target;
        __u32 features[7];
@@ -145,4 +147,18 @@ struct kvm_arch_memory_slot {
 /* Highest supported SPI, from VGIC_NR_IRQS */
 #define KVM_ARM_IRQ_GIC_MAX            127
 
+/* PSCI interface */
+#define KVM_PSCI_FN_BASE               0x95c1ba5e
+#define KVM_PSCI_FN(n)                 (KVM_PSCI_FN_BASE + (n))
+
+#define KVM_PSCI_FN_CPU_SUSPEND                KVM_PSCI_FN(0)
+#define KVM_PSCI_FN_CPU_OFF            KVM_PSCI_FN(1)
+#define KVM_PSCI_FN_CPU_ON             KVM_PSCI_FN(2)
+#define KVM_PSCI_FN_MIGRATE            KVM_PSCI_FN(3)
+
+#define KVM_PSCI_RET_SUCCESS           0
+#define KVM_PSCI_RET_NI                        ((unsigned long)-1)
+#define KVM_PSCI_RET_INVAL             ((unsigned long)-2)
+#define KVM_PSCI_RET_DENIED            ((unsigned long)-3)
+
 #endif /* __ARM_KVM_H__ */
index 1e45cd97a7fcec2bcf741670574aa91feef02a73..ea27987bd07f0e00e27aef2bfeaf7040a686ed52 100644 (file)
@@ -18,4 +18,4 @@ kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
 
 obj-y += kvm-arm.o init.o interrupts.o
 obj-y += arm.o guest.o mmu.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o mmio.o
+obj-y += coproc.o coproc_a15.o mmio.o psci.o
index 8680b9ffd2aea6d2dfe9c5dd76257daf0afd884f..2d30e3afdaf99a01c764b0ac1e557c63f7a639b2 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
+#include <asm/kvm_psci.h>
 #include <asm/opcodes.h>
 
 #ifdef REQUIRES_VIRT
@@ -160,6 +161,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_SYNC_MMU:
        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
        case KVM_CAP_ONE_REG:
+       case KVM_CAP_ARM_PSCI:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -443,14 +445,18 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
        trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
                      vcpu->arch.hsr & HSR_HVC_IMM_MASK);
 
+       if (kvm_psci_call(vcpu))
+               return 1;
+
        kvm_inject_undefined(vcpu);
        return 1;
 }
 
 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-       /* We don't support SMC; don't do that. */
-       kvm_debug("smc: at %08x", *vcpu_pc(vcpu));
+       if (kvm_psci_call(vcpu))
+               return 1;
+
        kvm_inject_undefined(vcpu);
        return 1;
 }
@@ -589,9 +595,26 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
                return 0;
 
        vcpu->arch.has_run_once = true;
+
+       /*
+        * Handle the "start in power-off" case by calling into the
+        * PSCI code.
+        */
+       if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
+               *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
+               kvm_psci_call(vcpu);
+       }
+
        return 0;
 }
 
+static void vcpu_pause(struct kvm_vcpu *vcpu)
+{
+       wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
+
+       wait_event_interruptible(*wq, !vcpu->arch.pause);
+}
+
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:      The VCPU pointer
@@ -635,6 +658,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
                update_vttbr(vcpu->kvm);
 
+               if (vcpu->arch.pause)
+                       vcpu_pause(vcpu);
+
                local_irq_disable();
 
                /*
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
new file mode 100644 (file)
index 0000000..7ee5bb7
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/wait.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_psci.h>
+
+/*
+ * This is an implementation of the Power State Coordination Interface
+ * as described in ARM document number ARM DEN 0022A.
+ */
+
+static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.pause = true;
+}
+
+static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+{
+       struct kvm *kvm = source_vcpu->kvm;
+       struct kvm_vcpu *vcpu;
+       wait_queue_head_t *wq;
+       unsigned long cpu_id;
+       phys_addr_t target_pc;
+
+       cpu_id = *vcpu_reg(source_vcpu, 1);
+       if (vcpu_mode_is_32bit(source_vcpu))
+               cpu_id &= ~((u32) 0);
+
+       if (cpu_id >= atomic_read(&kvm->online_vcpus))
+               return KVM_PSCI_RET_INVAL;
+
+       target_pc = *vcpu_reg(source_vcpu, 2);
+
+       vcpu = kvm_get_vcpu(kvm, cpu_id);
+
+       wq = kvm_arch_vcpu_wq(vcpu);
+       if (!waitqueue_active(wq))
+               return KVM_PSCI_RET_INVAL;
+
+       kvm_reset_vcpu(vcpu);
+
+       /* Gracefully handle Thumb2 entry point */
+       if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
+               target_pc &= ~((phys_addr_t) 1);
+               vcpu_set_thumb(vcpu);
+       }
+
+       *vcpu_pc(vcpu) = target_pc;
+       vcpu->arch.pause = false;
+       smp_mb();               /* Make sure the above is visible */
+
+       wake_up_interruptible(wq);
+
+       return KVM_PSCI_RET_SUCCESS;
+}
+
+/**
+ * kvm_psci_call - handle PSCI call if r0 value is in range
+ * @vcpu: Pointer to the VCPU struct
+ *
+ * Handle PSCI calls from guests through traps from HVC or SMC instructions.
+ * The calling convention is similar to SMC calls to the secure world where
+ * the function number is placed in r0 and this function returns true if the
+ * function number specified in r0 is withing the PSCI range, and false
+ * otherwise.
+ */
+bool kvm_psci_call(struct kvm_vcpu *vcpu)
+{
+       unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
+       unsigned long val;
+
+       switch (psci_fn) {
+       case KVM_PSCI_FN_CPU_OFF:
+               kvm_psci_vcpu_off(vcpu);
+               val = KVM_PSCI_RET_SUCCESS;
+               break;
+       case KVM_PSCI_FN_CPU_ON:
+               val = kvm_psci_vcpu_on(vcpu);
+               break;
+       case KVM_PSCI_FN_CPU_SUSPEND:
+       case KVM_PSCI_FN_MIGRATE:
+               val = KVM_PSCI_RET_NI;
+               break;
+
+       default:
+               return false;
+       }
+
+       *vcpu_reg(vcpu, 0) = val;
+       return true;
+}
index dc63665e73ad9ffc63754c10df862400c6ebd573..7f2360a46fc2ab691a13f4c65ae1a92a4757aec2 100644 (file)
@@ -636,6 +636,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_IRQFD_RESAMPLE 82
 #define KVM_CAP_PPC_BOOKE_WATCHDOG 83
 #define KVM_CAP_PPC_HTAB_FD 84
+#define KVM_CAP_ARM_PSCI 87
 
 #ifdef KVM_CAP_IRQ_ROUTING