2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/kvm_host.h>
22 #include <linux/module.h>
23 #include <linux/vmalloc.h>
25 #include <linux/mman.h>
26 #include <linux/sched.h>
27 #include <linux/kvm.h>
28 #include <trace/events/kvm.h>
30 #define CREATE_TRACE_POINTS
33 #include <asm/unified.h>
34 #include <asm/uaccess.h>
35 #include <asm/ptrace.h>
37 #include <asm/cputype.h>
38 #include <asm/tlbflush.h>
39 #include <asm/cacheflush.h>
41 #include <asm/kvm_arm.h>
42 #include <asm/kvm_asm.h>
43 #include <asm/kvm_mmu.h>
44 #include <asm/kvm_emulate.h>
45 #include <asm/kvm_coproc.h>
46 #include <asm/kvm_psci.h>
47 #include <asm/opcodes.h>
50 __asm__(".arch_extension virt");
53 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page
);
54 static struct vfp_hard_struct __percpu
*kvm_host_vfp_state
;
55 static unsigned long hyp_default_vectors
;
57 /* Per-CPU variable containing the currently running vcpu. */
58 static DEFINE_PER_CPU(struct kvm_vcpu
*, kvm_arm_running_vcpu
);
60 /* The VMID used in the VTTBR */
61 static atomic64_t kvm_vmid_gen
= ATOMIC64_INIT(1);
62 static u8 kvm_next_vmid
;
63 static DEFINE_SPINLOCK(kvm_vmid_lock
);
65 static bool vgic_present
;
67 static void kvm_arm_set_running_vcpu(struct kvm_vcpu
*vcpu
)
69 BUG_ON(preemptible());
70 __get_cpu_var(kvm_arm_running_vcpu
) = vcpu
;
74 * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
75 * Must be called from non-preemptible context
77 struct kvm_vcpu
*kvm_arm_get_running_vcpu(void)
79 BUG_ON(preemptible());
80 return __get_cpu_var(kvm_arm_running_vcpu
);
84 * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
86 struct kvm_vcpu __percpu
**kvm_get_running_vcpus(void)
88 return &kvm_arm_running_vcpu
;
91 int kvm_arch_hardware_enable(void *garbage
)
96 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
98 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
101 void kvm_arch_hardware_disable(void *garbage
)
105 int kvm_arch_hardware_setup(void)
110 void kvm_arch_hardware_unsetup(void)
114 void kvm_arch_check_processor_compat(void *rtn
)
119 void kvm_arch_sync_events(struct kvm
*kvm
)
124 * kvm_arch_init_vm - initializes a VM data structure
125 * @kvm: pointer to the KVM struct
127 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
134 ret
= kvm_alloc_stage2_pgd(kvm
);
138 ret
= create_hyp_mappings(kvm
, kvm
+ 1);
140 goto out_free_stage2_pgd
;
142 /* Mark the initial VMID generation invalid */
143 kvm
->arch
.vmid_gen
= 0;
147 kvm_free_stage2_pgd(kvm
);
152 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
154 return VM_FAULT_SIGBUS
;
157 void kvm_arch_free_memslot(struct kvm_memory_slot
*free
,
158 struct kvm_memory_slot
*dont
)
162 int kvm_arch_create_memslot(struct kvm_memory_slot
*slot
, unsigned long npages
)
168 * kvm_arch_destroy_vm - destroy the VM data structure
169 * @kvm: pointer to the KVM struct
171 void kvm_arch_destroy_vm(struct kvm
*kvm
)
175 kvm_free_stage2_pgd(kvm
);
177 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
179 kvm_arch_vcpu_free(kvm
->vcpus
[i
]);
180 kvm
->vcpus
[i
] = NULL
;
185 int kvm_dev_ioctl_check_extension(long ext
)
189 case KVM_CAP_IRQCHIP
:
192 case KVM_CAP_USER_MEMORY
:
193 case KVM_CAP_SYNC_MMU
:
194 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
195 case KVM_CAP_ONE_REG
:
196 case KVM_CAP_ARM_PSCI
:
199 case KVM_CAP_COALESCED_MMIO
:
200 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
202 case KVM_CAP_ARM_SET_DEVICE_ADDR
:
205 case KVM_CAP_NR_VCPUS
:
206 r
= num_online_cpus();
208 case KVM_CAP_MAX_VCPUS
:
218 long kvm_arch_dev_ioctl(struct file
*filp
,
219 unsigned int ioctl
, unsigned long arg
)
224 int kvm_arch_set_memory_region(struct kvm
*kvm
,
225 struct kvm_userspace_memory_region
*mem
,
226 struct kvm_memory_slot old
,
232 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
233 struct kvm_memory_slot
*memslot
,
234 struct kvm_memory_slot old
,
235 struct kvm_userspace_memory_region
*mem
,
241 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
242 struct kvm_userspace_memory_region
*mem
,
243 struct kvm_memory_slot old
,
248 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
252 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
253 struct kvm_memory_slot
*slot
)
257 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
260 struct kvm_vcpu
*vcpu
;
262 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
268 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
272 err
= create_hyp_mappings(vcpu
, vcpu
+ 1);
278 kvm_vcpu_uninit(vcpu
);
280 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
285 int kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
290 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
292 kvm_mmu_free_memory_caches(vcpu
);
293 kvm_timer_vcpu_terminate(vcpu
);
294 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
297 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
299 kvm_arch_vcpu_free(vcpu
);
302 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
307 int __attribute_const__
kvm_target_cpu(void)
309 unsigned long implementor
= read_cpuid_implementor();
310 unsigned long part_number
= read_cpuid_part_number();
312 if (implementor
!= ARM_CPU_IMP_ARM
)
315 switch (part_number
) {
316 case ARM_CPU_PART_CORTEX_A15
:
317 return KVM_ARM_TARGET_CORTEX_A15
;
323 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
327 /* Force users to call KVM_ARM_VCPU_INIT */
328 vcpu
->arch
.target
= -1;
331 ret
= kvm_vgic_vcpu_init(vcpu
);
335 /* Set up the timer */
336 kvm_timer_vcpu_init(vcpu
);
341 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
345 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
348 vcpu
->arch
.vfp_host
= this_cpu_ptr(kvm_host_vfp_state
);
351 * Check whether this vcpu requires the cache to be flushed on
352 * this physical CPU. This is a consequence of doing dcache
353 * operations by set/way on this vcpu. We do it here to be in
354 * a non-preemptible section.
356 if (cpumask_test_and_clear_cpu(cpu
, &vcpu
->arch
.require_dcache_flush
))
357 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
359 kvm_arm_set_running_vcpu(vcpu
);
362 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
364 kvm_arm_set_running_vcpu(NULL
);
367 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
368 struct kvm_guest_debug
*dbg
)
374 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
375 struct kvm_mp_state
*mp_state
)
380 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
381 struct kvm_mp_state
*mp_state
)
387 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
388 * @v: The VCPU pointer
390 * If the guest CPU is not waiting for interrupts or an interrupt line is
391 * asserted, the CPU is by definition runnable.
393 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
395 return !!v
->arch
.irq_lines
|| kvm_vgic_vcpu_pending_irq(v
);
398 /* Just ensure a guest exit from a particular CPU */
399 static void exit_vm_noop(void *info
)
403 void force_vm_exit(const cpumask_t
*mask
)
405 smp_call_function_many(mask
, exit_vm_noop
, NULL
, true);
409 * need_new_vmid_gen - check that the VMID is still valid
410 * @kvm: The VM's VMID to checkt
412 * return true if there is a new generation of VMIDs being used
414 * The hardware supports only 256 values with the value zero reserved for the
415 * host, so we check if an assigned value belongs to a previous generation,
416 * which which requires us to assign a new value. If we're the first to use a
417 * VMID for the new generation, we must flush necessary caches and TLBs on all
420 static bool need_new_vmid_gen(struct kvm
*kvm
)
422 return unlikely(kvm
->arch
.vmid_gen
!= atomic64_read(&kvm_vmid_gen
));
426 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
427 * @kvm The guest that we are about to run
429 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
430 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
433 static void update_vttbr(struct kvm
*kvm
)
435 phys_addr_t pgd_phys
;
438 if (!need_new_vmid_gen(kvm
))
441 spin_lock(&kvm_vmid_lock
);
444 * We need to re-check the vmid_gen here to ensure that if another vcpu
445 * already allocated a valid vmid for this vm, then this vcpu should
448 if (!need_new_vmid_gen(kvm
)) {
449 spin_unlock(&kvm_vmid_lock
);
453 /* First user of a new VMID generation? */
454 if (unlikely(kvm_next_vmid
== 0)) {
455 atomic64_inc(&kvm_vmid_gen
);
459 * On SMP we know no other CPUs can use this CPU's or each
460 * other's VMID after force_vm_exit returns since the
461 * kvm_vmid_lock blocks them from reentry to the guest.
463 force_vm_exit(cpu_all_mask
);
465 * Now broadcast TLB + ICACHE invalidation over the inner
466 * shareable domain to make sure all data structures are
469 kvm_call_hyp(__kvm_flush_vm_context
);
472 kvm
->arch
.vmid_gen
= atomic64_read(&kvm_vmid_gen
);
473 kvm
->arch
.vmid
= kvm_next_vmid
;
476 /* update vttbr to be used with the new vmid */
477 pgd_phys
= virt_to_phys(kvm
->arch
.pgd
);
478 vmid
= ((u64
)(kvm
->arch
.vmid
) << VTTBR_VMID_SHIFT
) & VTTBR_VMID_MASK
;
479 kvm
->arch
.vttbr
= pgd_phys
& VTTBR_BADDR_MASK
;
480 kvm
->arch
.vttbr
|= vmid
;
482 spin_unlock(&kvm_vmid_lock
);
485 static int handle_svc_hyp(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
487 /* SVC called from Hyp mode should never get here */
488 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
490 return -EINVAL
; /* Squash warning */
493 static int handle_hvc(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
495 trace_kvm_hvc(*vcpu_pc(vcpu
), *vcpu_reg(vcpu
, 0),
496 vcpu
->arch
.hsr
& HSR_HVC_IMM_MASK
);
498 if (kvm_psci_call(vcpu
))
501 kvm_inject_undefined(vcpu
);
505 static int handle_smc(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
507 if (kvm_psci_call(vcpu
))
510 kvm_inject_undefined(vcpu
);
514 static int handle_pabt_hyp(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
516 /* The hypervisor should never cause aborts */
517 kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
518 vcpu
->arch
.hxfar
, vcpu
->arch
.hsr
);
522 static int handle_dabt_hyp(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
524 /* This is either an error in the ws. code or an external abort */
525 kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
526 vcpu
->arch
.hxfar
, vcpu
->arch
.hsr
);
530 typedef int (*exit_handle_fn
)(struct kvm_vcpu
*, struct kvm_run
*);
531 static exit_handle_fn arm_exit_handlers
[] = {
532 [HSR_EC_WFI
] = kvm_handle_wfi
,
533 [HSR_EC_CP15_32
] = kvm_handle_cp15_32
,
534 [HSR_EC_CP15_64
] = kvm_handle_cp15_64
,
535 [HSR_EC_CP14_MR
] = kvm_handle_cp14_access
,
536 [HSR_EC_CP14_LS
] = kvm_handle_cp14_load_store
,
537 [HSR_EC_CP14_64
] = kvm_handle_cp14_access
,
538 [HSR_EC_CP_0_13
] = kvm_handle_cp_0_13_access
,
539 [HSR_EC_CP10_ID
] = kvm_handle_cp10_id
,
540 [HSR_EC_SVC_HYP
] = handle_svc_hyp
,
541 [HSR_EC_HVC
] = handle_hvc
,
542 [HSR_EC_SMC
] = handle_smc
,
543 [HSR_EC_IABT
] = kvm_handle_guest_abort
,
544 [HSR_EC_IABT_HYP
] = handle_pabt_hyp
,
545 [HSR_EC_DABT
] = kvm_handle_guest_abort
,
546 [HSR_EC_DABT_HYP
] = handle_dabt_hyp
,
550 * A conditional instruction is allowed to trap, even though it
551 * wouldn't be executed. So let's re-implement the hardware, in
554 static bool kvm_condition_valid(struct kvm_vcpu
*vcpu
)
556 unsigned long cpsr
, cond
, insn
;
559 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
560 * catch undefined instructions, and then we won't get past
561 * the arm_exit_handlers test anyway.
563 BUG_ON(((vcpu
->arch
.hsr
& HSR_EC
) >> HSR_EC_SHIFT
) == 0);
565 /* Top two bits non-zero? Unconditional. */
566 if (vcpu
->arch
.hsr
>> 30)
569 cpsr
= *vcpu_cpsr(vcpu
);
571 /* Is condition field valid? */
572 if ((vcpu
->arch
.hsr
& HSR_CV
) >> HSR_CV_SHIFT
)
573 cond
= (vcpu
->arch
.hsr
& HSR_COND
) >> HSR_COND_SHIFT
;
575 /* This can happen in Thumb mode: examine IT state. */
578 it
= ((cpsr
>> 8) & 0xFC) | ((cpsr
>> 25) & 0x3);
580 /* it == 0 => unconditional. */
584 /* The cond for this insn works out as the top 4 bits. */
588 /* Shift makes it look like an ARM-mode instruction */
590 return arm_check_condition(insn
, cpsr
) != ARM_OPCODE_CONDTEST_FAIL
;
594 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
595 * proper exit to QEMU.
597 static int handle_exit(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
600 unsigned long hsr_ec
;
602 switch (exception_index
) {
603 case ARM_EXCEPTION_IRQ
:
605 case ARM_EXCEPTION_UNDEFINED
:
606 kvm_err("Undefined exception in Hyp mode at: %#08x\n",
609 panic("KVM: Hypervisor undefined exception!\n");
610 case ARM_EXCEPTION_DATA_ABORT
:
611 case ARM_EXCEPTION_PREF_ABORT
:
612 case ARM_EXCEPTION_HVC
:
613 hsr_ec
= (vcpu
->arch
.hsr
& HSR_EC
) >> HSR_EC_SHIFT
;
615 if (hsr_ec
>= ARRAY_SIZE(arm_exit_handlers
)
616 || !arm_exit_handlers
[hsr_ec
]) {
617 kvm_err("Unknown exception class: %#08lx, "
618 "hsr: %#08x\n", hsr_ec
,
619 (unsigned int)vcpu
->arch
.hsr
);
624 * See ARM ARM B1.14.1: "Hyp traps on instructions
625 * that fail their condition code check"
627 if (!kvm_condition_valid(vcpu
)) {
628 bool is_wide
= vcpu
->arch
.hsr
& HSR_IL
;
629 kvm_skip_instr(vcpu
, is_wide
);
633 return arm_exit_handlers
[hsr_ec
](vcpu
, run
);
635 kvm_pr_unimpl("Unsupported exception type: %d",
637 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
642 static int kvm_vcpu_first_run_init(struct kvm_vcpu
*vcpu
)
644 if (likely(vcpu
->arch
.has_run_once
))
647 vcpu
->arch
.has_run_once
= true;
650 * Initialize the VGIC before running a vcpu the first time on
653 if (irqchip_in_kernel(vcpu
->kvm
) &&
654 unlikely(!vgic_initialized(vcpu
->kvm
))) {
655 int ret
= kvm_vgic_init(vcpu
->kvm
);
661 * Handle the "start in power-off" case by calling into the
664 if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF
, vcpu
->arch
.features
)) {
665 *vcpu_reg(vcpu
, 0) = KVM_PSCI_FN_CPU_OFF
;
672 static void vcpu_pause(struct kvm_vcpu
*vcpu
)
674 wait_queue_head_t
*wq
= kvm_arch_vcpu_wq(vcpu
);
676 wait_event_interruptible(*wq
, !vcpu
->arch
.pause
);
680 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
681 * @vcpu: The VCPU pointer
682 * @run: The kvm_run structure pointer used for userspace state exchange
684 * This function is called through the VCPU_RUN ioctl called from user space. It
685 * will execute VM code in a loop until the time slice for the process is used
686 * or some emulation is needed from user space in which case the function will
687 * return with return value 0 and with the kvm_run structure filled in with the
688 * required data for the requested emulation.
690 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
695 /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
696 if (unlikely(vcpu
->arch
.target
< 0))
699 ret
= kvm_vcpu_first_run_init(vcpu
);
703 if (run
->exit_reason
== KVM_EXIT_MMIO
) {
704 ret
= kvm_handle_mmio_return(vcpu
, vcpu
->run
);
709 if (vcpu
->sigset_active
)
710 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
713 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
716 * Check conditions before entering the guest
720 update_vttbr(vcpu
->kvm
);
722 if (vcpu
->arch
.pause
)
725 kvm_vgic_flush_hwstate(vcpu
);
726 kvm_timer_flush_hwstate(vcpu
);
731 * Re-check atomic conditions
733 if (signal_pending(current
)) {
735 run
->exit_reason
= KVM_EXIT_INTR
;
738 if (ret
<= 0 || need_new_vmid_gen(vcpu
->kvm
)) {
740 kvm_timer_sync_hwstate(vcpu
);
741 kvm_vgic_sync_hwstate(vcpu
);
745 /**************************************************************
748 trace_kvm_entry(*vcpu_pc(vcpu
));
750 vcpu
->mode
= IN_GUEST_MODE
;
752 ret
= kvm_call_hyp(__kvm_vcpu_run
, vcpu
);
754 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
755 vcpu
->arch
.last_pcpu
= smp_processor_id();
757 trace_kvm_exit(*vcpu_pc(vcpu
));
759 * We may have taken a host interrupt in HYP mode (ie
760 * while executing the guest). This interrupt is still
761 * pending, as we haven't serviced it yet!
763 * We're now back in SVC mode, with interrupts
764 * disabled. Enabling the interrupts now will have
765 * the effect of taking the interrupt again, in SVC
772 *************************************************************/
774 kvm_timer_sync_hwstate(vcpu
);
775 kvm_vgic_sync_hwstate(vcpu
);
777 ret
= handle_exit(vcpu
, run
, ret
);
780 if (vcpu
->sigset_active
)
781 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
785 static int vcpu_interrupt_line(struct kvm_vcpu
*vcpu
, int number
, bool level
)
791 if (number
== KVM_ARM_IRQ_CPU_IRQ
)
792 bit_index
= __ffs(HCR_VI
);
793 else /* KVM_ARM_IRQ_CPU_FIQ */
794 bit_index
= __ffs(HCR_VF
);
796 ptr
= (unsigned long *)&vcpu
->arch
.irq_lines
;
798 set
= test_and_set_bit(bit_index
, ptr
);
800 set
= test_and_clear_bit(bit_index
, ptr
);
803 * If we didn't change anything, no need to wake up or kick other CPUs
809 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
810 * trigger a world-switch round on the running physical CPU to set the
811 * virtual IRQ/FIQ fields in the HCR appropriately.
818 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_level
)
820 u32 irq
= irq_level
->irq
;
821 unsigned int irq_type
, vcpu_idx
, irq_num
;
822 int nrcpus
= atomic_read(&kvm
->online_vcpus
);
823 struct kvm_vcpu
*vcpu
= NULL
;
824 bool level
= irq_level
->level
;
826 irq_type
= (irq
>> KVM_ARM_IRQ_TYPE_SHIFT
) & KVM_ARM_IRQ_TYPE_MASK
;
827 vcpu_idx
= (irq
>> KVM_ARM_IRQ_VCPU_SHIFT
) & KVM_ARM_IRQ_VCPU_MASK
;
828 irq_num
= (irq
>> KVM_ARM_IRQ_NUM_SHIFT
) & KVM_ARM_IRQ_NUM_MASK
;
830 trace_kvm_irq_line(irq_type
, vcpu_idx
, irq_num
, irq_level
->level
);
833 case KVM_ARM_IRQ_TYPE_CPU
:
834 if (irqchip_in_kernel(kvm
))
837 if (vcpu_idx
>= nrcpus
)
840 vcpu
= kvm_get_vcpu(kvm
, vcpu_idx
);
844 if (irq_num
> KVM_ARM_IRQ_CPU_FIQ
)
847 return vcpu_interrupt_line(vcpu
, irq_num
, level
);
848 case KVM_ARM_IRQ_TYPE_PPI
:
849 if (!irqchip_in_kernel(kvm
))
852 if (vcpu_idx
>= nrcpus
)
855 vcpu
= kvm_get_vcpu(kvm
, vcpu_idx
);
859 if (irq_num
< VGIC_NR_SGIS
|| irq_num
>= VGIC_NR_PRIVATE_IRQS
)
862 return kvm_vgic_inject_irq(kvm
, vcpu
->vcpu_id
, irq_num
, level
);
863 case KVM_ARM_IRQ_TYPE_SPI
:
864 if (!irqchip_in_kernel(kvm
))
867 if (irq_num
< VGIC_NR_PRIVATE_IRQS
||
868 irq_num
> KVM_ARM_IRQ_GIC_MAX
)
871 return kvm_vgic_inject_irq(kvm
, 0, irq_num
, level
);
877 long kvm_arch_vcpu_ioctl(struct file
*filp
,
878 unsigned int ioctl
, unsigned long arg
)
880 struct kvm_vcpu
*vcpu
= filp
->private_data
;
881 void __user
*argp
= (void __user
*)arg
;
884 case KVM_ARM_VCPU_INIT
: {
885 struct kvm_vcpu_init init
;
887 if (copy_from_user(&init
, argp
, sizeof(init
)))
890 return kvm_vcpu_set_target(vcpu
, &init
);
893 case KVM_SET_ONE_REG
:
894 case KVM_GET_ONE_REG
: {
895 struct kvm_one_reg reg
;
896 if (copy_from_user(®
, argp
, sizeof(reg
)))
898 if (ioctl
== KVM_SET_ONE_REG
)
899 return kvm_arm_set_reg(vcpu
, ®
);
901 return kvm_arm_get_reg(vcpu
, ®
);
903 case KVM_GET_REG_LIST
: {
904 struct kvm_reg_list __user
*user_list
= argp
;
905 struct kvm_reg_list reg_list
;
908 if (copy_from_user(®_list
, user_list
, sizeof(reg_list
)))
911 reg_list
.n
= kvm_arm_num_regs(vcpu
);
912 if (copy_to_user(user_list
, ®_list
, sizeof(reg_list
)))
916 return kvm_arm_copy_reg_indices(vcpu
, user_list
->reg
);
923 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
928 static int kvm_vm_ioctl_set_device_addr(struct kvm
*kvm
,
929 struct kvm_arm_device_addr
*dev_addr
)
931 unsigned long dev_id
, type
;
933 dev_id
= (dev_addr
->id
& KVM_ARM_DEVICE_ID_MASK
) >>
934 KVM_ARM_DEVICE_ID_SHIFT
;
935 type
= (dev_addr
->id
& KVM_ARM_DEVICE_TYPE_MASK
) >>
936 KVM_ARM_DEVICE_TYPE_SHIFT
;
939 case KVM_ARM_DEVICE_VGIC_V2
:
942 return kvm_vgic_set_addr(kvm
, type
, dev_addr
->addr
);
948 long kvm_arch_vm_ioctl(struct file
*filp
,
949 unsigned int ioctl
, unsigned long arg
)
951 struct kvm
*kvm
= filp
->private_data
;
952 void __user
*argp
= (void __user
*)arg
;
955 case KVM_CREATE_IRQCHIP
: {
957 return kvm_vgic_create(kvm
);
961 case KVM_ARM_SET_DEVICE_ADDR
: {
962 struct kvm_arm_device_addr dev_addr
;
964 if (copy_from_user(&dev_addr
, argp
, sizeof(dev_addr
)))
966 return kvm_vm_ioctl_set_device_addr(kvm
, &dev_addr
);
973 static void cpu_init_hyp_mode(void *vector
)
975 unsigned long long pgd_ptr
;
976 unsigned long pgd_low
, pgd_high
;
977 unsigned long hyp_stack_ptr
;
978 unsigned long stack_page
;
979 unsigned long vector_ptr
;
981 /* Switch from the HYP stub to our own HYP init vector */
982 __hyp_set_vectors((unsigned long)vector
);
984 pgd_ptr
= (unsigned long long)kvm_mmu_get_httbr();
985 pgd_low
= (pgd_ptr
& ((1ULL << 32) - 1));
986 pgd_high
= (pgd_ptr
>> 32ULL);
987 stack_page
= __get_cpu_var(kvm_arm_hyp_stack_page
);
988 hyp_stack_ptr
= stack_page
+ PAGE_SIZE
;
989 vector_ptr
= (unsigned long)__kvm_hyp_vector
;
992 * Call initialization code, and switch to the full blown
993 * HYP code. The init code doesn't need to preserve these registers as
994 * r1-r3 and r12 are already callee save according to the AAPCS.
995 * Note that we slightly misuse the prototype by casing the pgd_low to
998 kvm_call_hyp((void *)pgd_low
, pgd_high
, hyp_stack_ptr
, vector_ptr
);
1002 * Inits Hyp-mode on all online CPUs
1004 static int init_hyp_mode(void)
1006 phys_addr_t init_phys_addr
;
1011 * Allocate Hyp PGD and setup Hyp identity mapping
1013 err
= kvm_mmu_init();
1018 * It is probably enough to obtain the default on one
1019 * CPU. It's unlikely to be different on the others.
1021 hyp_default_vectors
= __hyp_get_vectors();
1024 * Allocate stack pages for Hypervisor-mode
1026 for_each_possible_cpu(cpu
) {
1027 unsigned long stack_page
;
1029 stack_page
= __get_free_page(GFP_KERNEL
);
1032 goto out_free_stack_pages
;
1035 per_cpu(kvm_arm_hyp_stack_page
, cpu
) = stack_page
;
1039 * Execute the init code on each CPU.
1041 * Note: The stack is not mapped yet, so don't do anything else than
1042 * initializing the hypervisor mode on each CPU using a local stack
1043 * space for temporary storage.
1045 init_phys_addr
= virt_to_phys(__kvm_hyp_init
);
1046 for_each_online_cpu(cpu
) {
1047 smp_call_function_single(cpu
, cpu_init_hyp_mode
,
1048 (void *)(long)init_phys_addr
, 1);
1052 * Unmap the identity mapping
1054 kvm_clear_hyp_idmap();
1057 * Map the Hyp-code called directly from the host
1059 err
= create_hyp_mappings(__kvm_hyp_code_start
, __kvm_hyp_code_end
);
1061 kvm_err("Cannot map world-switch code\n");
1062 goto out_free_mappings
;
1066 * Map the Hyp stack pages
1068 for_each_possible_cpu(cpu
) {
1069 char *stack_page
= (char *)per_cpu(kvm_arm_hyp_stack_page
, cpu
);
1070 err
= create_hyp_mappings(stack_page
, stack_page
+ PAGE_SIZE
);
1073 kvm_err("Cannot map hyp stack\n");
1074 goto out_free_mappings
;
1079 * Map the host VFP structures
1081 kvm_host_vfp_state
= alloc_percpu(struct vfp_hard_struct
);
1082 if (!kvm_host_vfp_state
) {
1084 kvm_err("Cannot allocate host VFP state\n");
1085 goto out_free_mappings
;
1088 for_each_possible_cpu(cpu
) {
1089 struct vfp_hard_struct
*vfp
;
1091 vfp
= per_cpu_ptr(kvm_host_vfp_state
, cpu
);
1092 err
= create_hyp_mappings(vfp
, vfp
+ 1);
1095 kvm_err("Cannot map host VFP state: %d\n", err
);
1101 * Init HYP view of VGIC
1103 err
= kvm_vgic_hyp_init();
1107 #ifdef CONFIG_KVM_ARM_VGIC
1108 vgic_present
= true;
1112 * Init HYP architected timer support
1114 err
= kvm_timer_hyp_init();
1116 goto out_free_mappings
;
1118 kvm_info("Hyp mode initialized successfully\n");
1121 free_percpu(kvm_host_vfp_state
);
1124 out_free_stack_pages
:
1125 for_each_possible_cpu(cpu
)
1126 free_page(per_cpu(kvm_arm_hyp_stack_page
, cpu
));
1128 kvm_err("error initializing Hyp mode: %d\n", err
);
1133 * Initialize Hyp-mode and memory mappings on all CPUs.
1135 int kvm_arch_init(void *opaque
)
1139 if (!is_hyp_mode_available()) {
1140 kvm_err("HYP mode not available\n");
1144 if (kvm_target_cpu() < 0) {
1145 kvm_err("Target CPU not supported!\n");
1149 err
= init_hyp_mode();
1153 kvm_coproc_table_init();
1159 /* NOP: Compiling as a module not supported */
1160 void kvm_arch_exit(void)
1164 static int arm_init(void)
1166 int rc
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1170 module_init(arm_init
);