KVM: arm/arm64: Move shared files to virt/kvm/arm
authorChristoffer Dall <cdall@linaro.org>
Thu, 4 May 2017 11:54:17 +0000 (13:54 +0200)
committerChristoffer Dall <cdall@linaro.org>
Thu, 4 May 2017 11:57:26 +0000 (13:57 +0200)
For some time now we have been having a lot of shared functionality
between the arm and arm64 KVM support in arch/arm, which not only
required a horrible inter-arch reference from the Makefile in
arch/arm64/kvm, but also created confusion for newcomers to the code
base, as was recently seen on the mailing list.

Further, it causes confusion for things like cscope, which needs special
attention to index specific shared files for arm64 from the arm tree.

Move the shared files into virt/kvm/arm and move the trace points along
with it.  When moving the tracepoints we have to modify the way the vgic
creates definitions of the trace points, so we take the chance to
include the VGIC tracepoints in its very own special vgic trace.h file.

Signed-off-by: Christoffer Dall <cdall@linaro.org>
16 files changed:
arch/arm/kvm/Makefile
arch/arm/kvm/arm.c [deleted file]
arch/arm/kvm/mmio.c [deleted file]
arch/arm/kvm/mmu.c [deleted file]
arch/arm/kvm/perf.c [deleted file]
arch/arm/kvm/psci.c [deleted file]
arch/arm/kvm/trace.h
arch/arm64/kvm/Makefile
virt/kvm/arm/arm.c [new file with mode: 0644]
virt/kvm/arm/mmio.c [new file with mode: 0644]
virt/kvm/arm/mmu.c [new file with mode: 0644]
virt/kvm/arm/perf.c [new file with mode: 0644]
virt/kvm/arm/psci.c [new file with mode: 0644]
virt/kvm/arm/trace.h
virt/kvm/arm/vgic/trace.h [new file with mode: 0644]
virt/kvm/arm/vgic/vgic.c

index 7b3670c2ae7bdf8165652281c7ee023ad0b5146c..d9beee652d36cbba48d0900e896f46b82df5c39e 100644 (file)
@@ -18,9 +18,12 @@ KVM := ../../../virt/kvm
 kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
 
 obj-$(CONFIG_KVM_ARM_HOST) += hyp/
+
 obj-y += kvm-arm.o init.o interrupts.o
-obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o vgic-v3-coproc.o
+obj-y += handle_exit.o guest.o emulate.o reset.o
+obj-y += coproc.o coproc_a15.o coproc_a7.o   vgic-v3-coproc.o
+obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
+obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
 obj-y += $(KVM)/arm/aarch32.o
 
 obj-y += $(KVM)/arm/vgic/vgic.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
deleted file mode 100644 (file)
index 7941699..0000000
+++ /dev/null
@@ -1,1480 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#include <linux/cpu_pm.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/kvm_host.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/mman.h>
-#include <linux/sched.h>
-#include <linux/kvm.h>
-#include <trace/events/kvm.h>
-#include <kvm/arm_pmu.h>
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
-#include <linux/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/mman.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-#include <asm/virt.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_psci.h>
-#include <asm/sections.h>
-
-#ifdef REQUIRES_VIRT
-__asm__(".arch_extension       virt");
-#endif
-
-static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
-
-/* Per-CPU variable containing the currently running vcpu. */
-static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
-
-/* The VMID used in the VTTBR */
-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
-static u32 kvm_next_vmid;
-static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_SPINLOCK(kvm_vmid_lock);
-
-static bool vgic_present;
-
-static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
-
-static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
-{
-       BUG_ON(preemptible());
-       __this_cpu_write(kvm_arm_running_vcpu, vcpu);
-}
-
-/**
- * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
- * Must be called from non-preemptible context
- */
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
-{
-       BUG_ON(preemptible());
-       return __this_cpu_read(kvm_arm_running_vcpu);
-}
-
-/**
- * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
- */
-struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
-{
-       return &kvm_arm_running_vcpu;
-}
-
-int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
-{
-       return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
-}
-
-int kvm_arch_hardware_setup(void)
-{
-       return 0;
-}
-
-void kvm_arch_check_processor_compat(void *rtn)
-{
-       *(int *)rtn = 0;
-}
-
-
-/**
- * kvm_arch_init_vm - initializes a VM data structure
- * @kvm:       pointer to the KVM struct
- */
-int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
-{
-       int ret, cpu;
-
-       if (type)
-               return -EINVAL;
-
-       kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
-       if (!kvm->arch.last_vcpu_ran)
-               return -ENOMEM;
-
-       for_each_possible_cpu(cpu)
-               *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
-
-       ret = kvm_alloc_stage2_pgd(kvm);
-       if (ret)
-               goto out_fail_alloc;
-
-       ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
-       if (ret)
-               goto out_free_stage2_pgd;
-
-       kvm_vgic_early_init(kvm);
-
-       /* Mark the initial VMID generation invalid */
-       kvm->arch.vmid_gen = 0;
-
-       /* The maximum number of VCPUs is limited by the host's GIC model */
-       kvm->arch.max_vcpus = vgic_present ?
-                               kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
-
-       return ret;
-out_free_stage2_pgd:
-       kvm_free_stage2_pgd(kvm);
-out_fail_alloc:
-       free_percpu(kvm->arch.last_vcpu_ran);
-       kvm->arch.last_vcpu_ran = NULL;
-       return ret;
-}
-
-bool kvm_arch_has_vcpu_debugfs(void)
-{
-       return false;
-}
-
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
-{
-       return VM_FAULT_SIGBUS;
-}
-
-
-/**
- * kvm_arch_destroy_vm - destroy the VM data structure
- * @kvm:       pointer to the KVM struct
- */
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
-       int i;
-
-       free_percpu(kvm->arch.last_vcpu_ran);
-       kvm->arch.last_vcpu_ran = NULL;
-
-       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-               if (kvm->vcpus[i]) {
-                       kvm_arch_vcpu_free(kvm->vcpus[i]);
-                       kvm->vcpus[i] = NULL;
-               }
-       }
-
-       kvm_vgic_destroy(kvm);
-}
-
-int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
-{
-       int r;
-       switch (ext) {
-       case KVM_CAP_IRQCHIP:
-               r = vgic_present;
-               break;
-       case KVM_CAP_IOEVENTFD:
-       case KVM_CAP_DEVICE_CTRL:
-       case KVM_CAP_USER_MEMORY:
-       case KVM_CAP_SYNC_MMU:
-       case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
-       case KVM_CAP_ONE_REG:
-       case KVM_CAP_ARM_PSCI:
-       case KVM_CAP_ARM_PSCI_0_2:
-       case KVM_CAP_READONLY_MEM:
-       case KVM_CAP_MP_STATE:
-       case KVM_CAP_IMMEDIATE_EXIT:
-               r = 1;
-               break;
-       case KVM_CAP_COALESCED_MMIO:
-               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
-               break;
-       case KVM_CAP_ARM_SET_DEVICE_ADDR:
-               r = 1;
-               break;
-       case KVM_CAP_NR_VCPUS:
-               r = num_online_cpus();
-               break;
-       case KVM_CAP_MAX_VCPUS:
-               r = KVM_MAX_VCPUS;
-               break;
-       case KVM_CAP_NR_MEMSLOTS:
-               r = KVM_USER_MEM_SLOTS;
-               break;
-       case KVM_CAP_MSI_DEVID:
-               if (!kvm)
-                       r = -EINVAL;
-               else
-                       r = kvm->arch.vgic.msis_require_devid;
-               break;
-       case KVM_CAP_ARM_USER_IRQ:
-               /*
-                * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
-                * (bump this number if adding more devices)
-                */
-               r = 1;
-               break;
-       default:
-               r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
-               break;
-       }
-       return r;
-}
-
-long kvm_arch_dev_ioctl(struct file *filp,
-                       unsigned int ioctl, unsigned long arg)
-{
-       return -EINVAL;
-}
-
-
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
-{
-       int err;
-       struct kvm_vcpu *vcpu;
-
-       if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
-               err = -EBUSY;
-               goto out;
-       }
-
-       if (id >= kvm->arch.max_vcpus) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-       if (!vcpu) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       err = kvm_vcpu_init(vcpu, kvm, id);
-       if (err)
-               goto free_vcpu;
-
-       err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
-       if (err)
-               goto vcpu_uninit;
-
-       return vcpu;
-vcpu_uninit:
-       kvm_vcpu_uninit(vcpu);
-free_vcpu:
-       kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
-       return ERR_PTR(err);
-}
-
-void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
-{
-       kvm_vgic_vcpu_early_init(vcpu);
-}
-
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
-{
-       kvm_mmu_free_memory_caches(vcpu);
-       kvm_timer_vcpu_terminate(vcpu);
-       kvm_vgic_vcpu_destroy(vcpu);
-       kvm_pmu_vcpu_destroy(vcpu);
-       kvm_vcpu_uninit(vcpu);
-       kmem_cache_free(kvm_vcpu_cache, vcpu);
-}
-
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-       kvm_arch_vcpu_free(vcpu);
-}
-
-int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
-{
-       return kvm_timer_should_fire(vcpu_vtimer(vcpu)) ||
-              kvm_timer_should_fire(vcpu_ptimer(vcpu));
-}
-
-void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
-{
-       kvm_timer_schedule(vcpu);
-}
-
-void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
-{
-       kvm_timer_unschedule(vcpu);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       /* Force users to call KVM_ARM_VCPU_INIT */
-       vcpu->arch.target = -1;
-       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
-       /* Set up the timer */
-       kvm_timer_vcpu_init(vcpu);
-
-       kvm_arm_reset_debug_ptr(vcpu);
-
-       return 0;
-}
-
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-       int *last_ran;
-
-       last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
-
-       /*
-        * We might get preempted before the vCPU actually runs, but
-        * over-invalidation doesn't affect correctness.
-        */
-       if (*last_ran != vcpu->vcpu_id) {
-               kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
-               *last_ran = vcpu->vcpu_id;
-       }
-
-       vcpu->cpu = cpu;
-       vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
-
-       kvm_arm_set_running_vcpu(vcpu);
-
-       kvm_vgic_load(vcpu);
-}
-
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-{
-       kvm_vgic_put(vcpu);
-
-       vcpu->cpu = -1;
-
-       kvm_arm_set_running_vcpu(NULL);
-       kvm_timer_vcpu_put(vcpu);
-}
-
-int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
-                                   struct kvm_mp_state *mp_state)
-{
-       if (vcpu->arch.power_off)
-               mp_state->mp_state = KVM_MP_STATE_STOPPED;
-       else
-               mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
-
-       return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
-                                   struct kvm_mp_state *mp_state)
-{
-       switch (mp_state->mp_state) {
-       case KVM_MP_STATE_RUNNABLE:
-               vcpu->arch.power_off = false;
-               break;
-       case KVM_MP_STATE_STOPPED:
-               vcpu->arch.power_off = true;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/**
- * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
- * @v:         The VCPU pointer
- *
- * If the guest CPU is not waiting for interrupts or an interrupt line is
- * asserted, the CPU is by definition runnable.
- */
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
-{
-       return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
-               && !v->arch.power_off && !v->arch.pause);
-}
-
-/* Just ensure a guest exit from a particular CPU */
-static void exit_vm_noop(void *info)
-{
-}
-
-void force_vm_exit(const cpumask_t *mask)
-{
-       preempt_disable();
-       smp_call_function_many(mask, exit_vm_noop, NULL, true);
-       preempt_enable();
-}
-
-/**
- * need_new_vmid_gen - check that the VMID is still valid
- * @kvm: The VM's VMID to check
- *
- * return true if there is a new generation of VMIDs being used
- *
- * The hardware supports only 256 values with the value zero reserved for the
- * host, so we check if an assigned value belongs to a previous generation,
- * which which requires us to assign a new value. If we're the first to use a
- * VMID for the new generation, we must flush necessary caches and TLBs on all
- * CPUs.
- */
-static bool need_new_vmid_gen(struct kvm *kvm)
-{
-       return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
-}
-
-/**
- * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
- * @kvm        The guest that we are about to run
- *
- * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
- * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
- * caches and TLBs.
- */
-static void update_vttbr(struct kvm *kvm)
-{
-       phys_addr_t pgd_phys;
-       u64 vmid;
-
-       if (!need_new_vmid_gen(kvm))
-               return;
-
-       spin_lock(&kvm_vmid_lock);
-
-       /*
-        * We need to re-check the vmid_gen here to ensure that if another vcpu
-        * already allocated a valid vmid for this vm, then this vcpu should
-        * use the same vmid.
-        */
-       if (!need_new_vmid_gen(kvm)) {
-               spin_unlock(&kvm_vmid_lock);
-               return;
-       }
-
-       /* First user of a new VMID generation? */
-       if (unlikely(kvm_next_vmid == 0)) {
-               atomic64_inc(&kvm_vmid_gen);
-               kvm_next_vmid = 1;
-
-               /*
-                * On SMP we know no other CPUs can use this CPU's or each
-                * other's VMID after force_vm_exit returns since the
-                * kvm_vmid_lock blocks them from reentry to the guest.
-                */
-               force_vm_exit(cpu_all_mask);
-               /*
-                * Now broadcast TLB + ICACHE invalidation over the inner
-                * shareable domain to make sure all data structures are
-                * clean.
-                */
-               kvm_call_hyp(__kvm_flush_vm_context);
-       }
-
-       kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
-       kvm->arch.vmid = kvm_next_vmid;
-       kvm_next_vmid++;
-       kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
-
-       /* update vttbr to be used with the new vmid */
-       pgd_phys = virt_to_phys(kvm->arch.pgd);
-       BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
-       vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
-       kvm->arch.vttbr = pgd_phys | vmid;
-
-       spin_unlock(&kvm_vmid_lock);
-}
-
-static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       int ret = 0;
-
-       if (likely(vcpu->arch.has_run_once))
-               return 0;
-
-       vcpu->arch.has_run_once = true;
-
-       /*
-        * Map the VGIC hardware resources before running a vcpu the first
-        * time on this VM.
-        */
-       if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
-               ret = kvm_vgic_map_resources(kvm);
-               if (ret)
-                       return ret;
-       }
-
-       ret = kvm_timer_enable(vcpu);
-
-       return ret;
-}
-
-bool kvm_arch_intc_initialized(struct kvm *kvm)
-{
-       return vgic_initialized(kvm);
-}
-
-void kvm_arm_halt_guest(struct kvm *kvm)
-{
-       int i;
-       struct kvm_vcpu *vcpu;
-
-       kvm_for_each_vcpu(i, vcpu, kvm)
-               vcpu->arch.pause = true;
-       kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
-}
-
-void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
-{
-       vcpu->arch.pause = true;
-       kvm_vcpu_kick(vcpu);
-}
-
-void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
-{
-       struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
-       vcpu->arch.pause = false;
-       swake_up(wq);
-}
-
-void kvm_arm_resume_guest(struct kvm *kvm)
-{
-       int i;
-       struct kvm_vcpu *vcpu;
-
-       kvm_for_each_vcpu(i, vcpu, kvm)
-               kvm_arm_resume_vcpu(vcpu);
-}
-
-static void vcpu_sleep(struct kvm_vcpu *vcpu)
-{
-       struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
-       swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
-                                      (!vcpu->arch.pause)));
-}
-
-static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.target >= 0;
-}
-
-/**
- * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
- * @vcpu:      The VCPU pointer
- * @run:       The kvm_run structure pointer used for userspace state exchange
- *
- * This function is called through the VCPU_RUN ioctl called from user space. It
- * will execute VM code in a loop until the time slice for the process is used
- * or some emulation is needed from user space in which case the function will
- * return with return value 0 and with the kvm_run structure filled in with the
- * required data for the requested emulation.
- */
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       int ret;
-       sigset_t sigsaved;
-
-       if (unlikely(!kvm_vcpu_initialized(vcpu)))
-               return -ENOEXEC;
-
-       ret = kvm_vcpu_first_run_init(vcpu);
-       if (ret)
-               return ret;
-
-       if (run->exit_reason == KVM_EXIT_MMIO) {
-               ret = kvm_handle_mmio_return(vcpu, vcpu->run);
-               if (ret)
-                       return ret;
-       }
-
-       if (run->immediate_exit)
-               return -EINTR;
-
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
-       ret = 1;
-       run->exit_reason = KVM_EXIT_UNKNOWN;
-       while (ret > 0) {
-               /*
-                * Check conditions before entering the guest
-                */
-               cond_resched();
-
-               update_vttbr(vcpu->kvm);
-
-               if (vcpu->arch.power_off || vcpu->arch.pause)
-                       vcpu_sleep(vcpu);
-
-               /*
-                * Preparing the interrupts to be injected also
-                * involves poking the GIC, which must be done in a
-                * non-preemptible context.
-                */
-               preempt_disable();
-
-               kvm_pmu_flush_hwstate(vcpu);
-
-               kvm_timer_flush_hwstate(vcpu);
-               kvm_vgic_flush_hwstate(vcpu);
-
-               local_irq_disable();
-
-               /*
-                * If we have a singal pending, or need to notify a userspace
-                * irqchip about timer or PMU level changes, then we exit (and
-                * update the timer level state in kvm_timer_update_run
-                * below).
-                */
-               if (signal_pending(current) ||
-                   kvm_timer_should_notify_user(vcpu) ||
-                   kvm_pmu_should_notify_user(vcpu)) {
-                       ret = -EINTR;
-                       run->exit_reason = KVM_EXIT_INTR;
-               }
-
-               if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
-                       vcpu->arch.power_off || vcpu->arch.pause) {
-                       local_irq_enable();
-                       kvm_pmu_sync_hwstate(vcpu);
-                       kvm_timer_sync_hwstate(vcpu);
-                       kvm_vgic_sync_hwstate(vcpu);
-                       preempt_enable();
-                       continue;
-               }
-
-               kvm_arm_setup_debug(vcpu);
-
-               /**************************************************************
-                * Enter the guest
-                */
-               trace_kvm_entry(*vcpu_pc(vcpu));
-               guest_enter_irqoff();
-               vcpu->mode = IN_GUEST_MODE;
-
-               ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
-
-               vcpu->mode = OUTSIDE_GUEST_MODE;
-               vcpu->stat.exits++;
-               /*
-                * Back from guest
-                *************************************************************/
-
-               kvm_arm_clear_debug(vcpu);
-
-               /*
-                * We may have taken a host interrupt in HYP mode (ie
-                * while executing the guest). This interrupt is still
-                * pending, as we haven't serviced it yet!
-                *
-                * We're now back in SVC mode, with interrupts
-                * disabled.  Enabling the interrupts now will have
-                * the effect of taking the interrupt again, in SVC
-                * mode this time.
-                */
-               local_irq_enable();
-
-               /*
-                * We do local_irq_enable() before calling guest_exit() so
-                * that if a timer interrupt hits while running the guest we
-                * account that tick as being spent in the guest.  We enable
-                * preemption after calling guest_exit() so that if we get
-                * preempted we make sure ticks after that is not counted as
-                * guest time.
-                */
-               guest_exit();
-               trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
-
-               /*
-                * We must sync the PMU and timer state before the vgic state so
-                * that the vgic can properly sample the updated state of the
-                * interrupt line.
-                */
-               kvm_pmu_sync_hwstate(vcpu);
-               kvm_timer_sync_hwstate(vcpu);
-
-               kvm_vgic_sync_hwstate(vcpu);
-
-               preempt_enable();
-
-               ret = handle_exit(vcpu, run, ret);
-       }
-
-       /* Tell userspace about in-kernel device output levels */
-       if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
-               kvm_timer_update_run(vcpu);
-               kvm_pmu_update_run(vcpu);
-       }
-
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
-       return ret;
-}
-
-static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
-{
-       int bit_index;
-       bool set;
-       unsigned long *ptr;
-
-       if (number == KVM_ARM_IRQ_CPU_IRQ)
-               bit_index = __ffs(HCR_VI);
-       else /* KVM_ARM_IRQ_CPU_FIQ */
-               bit_index = __ffs(HCR_VF);
-
-       ptr = (unsigned long *)&vcpu->arch.irq_lines;
-       if (level)
-               set = test_and_set_bit(bit_index, ptr);
-       else
-               set = test_and_clear_bit(bit_index, ptr);
-
-       /*
-        * If we didn't change anything, no need to wake up or kick other CPUs
-        */
-       if (set == level)
-               return 0;
-
-       /*
-        * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
-        * trigger a world-switch round on the running physical CPU to set the
-        * virtual IRQ/FIQ fields in the HCR appropriately.
-        */
-       kvm_vcpu_kick(vcpu);
-
-       return 0;
-}
-
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
-                         bool line_status)
-{
-       u32 irq = irq_level->irq;
-       unsigned int irq_type, vcpu_idx, irq_num;
-       int nrcpus = atomic_read(&kvm->online_vcpus);
-       struct kvm_vcpu *vcpu = NULL;
-       bool level = irq_level->level;
-
-       irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
-       vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
-       irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
-
-       trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
-
-       switch (irq_type) {
-       case KVM_ARM_IRQ_TYPE_CPU:
-               if (irqchip_in_kernel(kvm))
-                       return -ENXIO;
-
-               if (vcpu_idx >= nrcpus)
-                       return -EINVAL;
-
-               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
-               if (!vcpu)
-                       return -EINVAL;
-
-               if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
-                       return -EINVAL;
-
-               return vcpu_interrupt_line(vcpu, irq_num, level);
-       case KVM_ARM_IRQ_TYPE_PPI:
-               if (!irqchip_in_kernel(kvm))
-                       return -ENXIO;
-
-               if (vcpu_idx >= nrcpus)
-                       return -EINVAL;
-
-               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
-               if (!vcpu)
-                       return -EINVAL;
-
-               if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
-                       return -EINVAL;
-
-               return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
-       case KVM_ARM_IRQ_TYPE_SPI:
-               if (!irqchip_in_kernel(kvm))
-                       return -ENXIO;
-
-               if (irq_num < VGIC_NR_PRIVATE_IRQS)
-                       return -EINVAL;
-
-               return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
-       }
-
-       return -EINVAL;
-}
-
-static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
-                              const struct kvm_vcpu_init *init)
-{
-       unsigned int i;
-       int phys_target = kvm_target_cpu();
-
-       if (init->target != phys_target)
-               return -EINVAL;
-
-       /*
-        * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
-        * use the same target.
-        */
-       if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
-               return -EINVAL;
-
-       /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
-       for (i = 0; i < sizeof(init->features) * 8; i++) {
-               bool set = (init->features[i / 32] & (1 << (i % 32)));
-
-               if (set && i >= KVM_VCPU_MAX_FEATURES)
-                       return -ENOENT;
-
-               /*
-                * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
-                * use the same feature set.
-                */
-               if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
-                   test_bit(i, vcpu->arch.features) != set)
-                       return -EINVAL;
-
-               if (set)
-                       set_bit(i, vcpu->arch.features);
-       }
-
-       vcpu->arch.target = phys_target;
-
-       /* Now we know what it is, we can reset it. */
-       return kvm_reset_vcpu(vcpu);
-}
-
-
-static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
-                                        struct kvm_vcpu_init *init)
-{
-       int ret;
-
-       ret = kvm_vcpu_set_target(vcpu, init);
-       if (ret)
-               return ret;
-
-       /*
-        * Ensure a rebooted VM will fault in RAM pages and detect if the
-        * guest MMU is turned off and flush the caches as needed.
-        */
-       if (vcpu->arch.has_run_once)
-               stage2_unmap_vm(vcpu->kvm);
-
-       vcpu_reset_hcr(vcpu);
-
-       /*
-        * Handle the "start in power-off" case.
-        */
-       if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
-               vcpu->arch.power_off = true;
-       else
-               vcpu->arch.power_off = false;
-
-       return 0;
-}
-
-static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
-                                struct kvm_device_attr *attr)
-{
-       int ret = -ENXIO;
-
-       switch (attr->group) {
-       default:
-               ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
-               break;
-       }
-
-       return ret;
-}
-
-static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
-                                struct kvm_device_attr *attr)
-{
-       int ret = -ENXIO;
-
-       switch (attr->group) {
-       default:
-               ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
-               break;
-       }
-
-       return ret;
-}
-
-static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
-                                struct kvm_device_attr *attr)
-{
-       int ret = -ENXIO;
-
-       switch (attr->group) {
-       default:
-               ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
-               break;
-       }
-
-       return ret;
-}
-
-long kvm_arch_vcpu_ioctl(struct file *filp,
-                        unsigned int ioctl, unsigned long arg)
-{
-       struct kvm_vcpu *vcpu = filp->private_data;
-       void __user *argp = (void __user *)arg;
-       struct kvm_device_attr attr;
-
-       switch (ioctl) {
-       case KVM_ARM_VCPU_INIT: {
-               struct kvm_vcpu_init init;
-
-               if (copy_from_user(&init, argp, sizeof(init)))
-                       return -EFAULT;
-
-               return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
-       }
-       case KVM_SET_ONE_REG:
-       case KVM_GET_ONE_REG: {
-               struct kvm_one_reg reg;
-
-               if (unlikely(!kvm_vcpu_initialized(vcpu)))
-                       return -ENOEXEC;
-
-               if (copy_from_user(&reg, argp, sizeof(reg)))
-                       return -EFAULT;
-               if (ioctl == KVM_SET_ONE_REG)
-                       return kvm_arm_set_reg(vcpu, &reg);
-               else
-                       return kvm_arm_get_reg(vcpu, &reg);
-       }
-       case KVM_GET_REG_LIST: {
-               struct kvm_reg_list __user *user_list = argp;
-               struct kvm_reg_list reg_list;
-               unsigned n;
-
-               if (unlikely(!kvm_vcpu_initialized(vcpu)))
-                       return -ENOEXEC;
-
-               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
-                       return -EFAULT;
-               n = reg_list.n;
-               reg_list.n = kvm_arm_num_regs(vcpu);
-               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
-                       return -EFAULT;
-               if (n < reg_list.n)
-                       return -E2BIG;
-               return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
-       }
-       case KVM_SET_DEVICE_ATTR: {
-               if (copy_from_user(&attr, argp, sizeof(attr)))
-                       return -EFAULT;
-               return kvm_arm_vcpu_set_attr(vcpu, &attr);
-       }
-       case KVM_GET_DEVICE_ATTR: {
-               if (copy_from_user(&attr, argp, sizeof(attr)))
-                       return -EFAULT;
-               return kvm_arm_vcpu_get_attr(vcpu, &attr);
-       }
-       case KVM_HAS_DEVICE_ATTR: {
-               if (copy_from_user(&attr, argp, sizeof(attr)))
-                       return -EFAULT;
-               return kvm_arm_vcpu_has_attr(vcpu, &attr);
-       }
-       default:
-               return -EINVAL;
-       }
-}
-
-/**
- * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
- * @kvm: kvm instance
- * @log: slot id and address to which we copy the log
- *
- * Steps 1-4 below provide general overview of dirty page logging. See
- * kvm_get_dirty_log_protect() function description for additional details.
- *
- * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
- * always flush the TLB (step 4) even if previous step failed  and the dirty
- * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
- * does not preclude user space subsequent dirty log read. Flushing TLB ensures
- * writes will be marked dirty for next log read.
- *
- *   1. Take a snapshot of the bit and clear it if needed.
- *   2. Write protect the corresponding page.
- *   3. Copy the snapshot to the userspace.
- *   4. Flush TLB's if needed.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
-{
-       bool is_dirty = false;
-       int r;
-
-       mutex_lock(&kvm->slots_lock);
-
-       r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
-
-       if (is_dirty)
-               kvm_flush_remote_tlbs(kvm);
-
-       mutex_unlock(&kvm->slots_lock);
-       return r;
-}
-
-static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
-                                       struct kvm_arm_device_addr *dev_addr)
-{
-       unsigned long dev_id, type;
-
-       dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
-               KVM_ARM_DEVICE_ID_SHIFT;
-       type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
-               KVM_ARM_DEVICE_TYPE_SHIFT;
-
-       switch (dev_id) {
-       case KVM_ARM_DEVICE_VGIC_V2:
-               if (!vgic_present)
-                       return -ENXIO;
-               return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
-       default:
-               return -ENODEV;
-       }
-}
-
-long kvm_arch_vm_ioctl(struct file *filp,
-                      unsigned int ioctl, unsigned long arg)
-{
-       struct kvm *kvm = filp->private_data;
-       void __user *argp = (void __user *)arg;
-
-       switch (ioctl) {
-       case KVM_CREATE_IRQCHIP: {
-               int ret;
-               if (!vgic_present)
-                       return -ENXIO;
-               mutex_lock(&kvm->lock);
-               ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
-               mutex_unlock(&kvm->lock);
-               return ret;
-       }
-       case KVM_ARM_SET_DEVICE_ADDR: {
-               struct kvm_arm_device_addr dev_addr;
-
-               if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
-                       return -EFAULT;
-               return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
-       }
-       case KVM_ARM_PREFERRED_TARGET: {
-               int err;
-               struct kvm_vcpu_init init;
-
-               err = kvm_vcpu_preferred_target(&init);
-               if (err)
-                       return err;
-
-               if (copy_to_user(argp, &init, sizeof(init)))
-                       return -EFAULT;
-
-               return 0;
-       }
-       default:
-               return -EINVAL;
-       }
-}
-
-static void cpu_init_hyp_mode(void *dummy)
-{
-       phys_addr_t pgd_ptr;
-       unsigned long hyp_stack_ptr;
-       unsigned long stack_page;
-       unsigned long vector_ptr;
-
-       /* Switch from the HYP stub to our own HYP init vector */
-       __hyp_set_vectors(kvm_get_idmap_vector());
-
-       pgd_ptr = kvm_mmu_get_httbr();
-       stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
-       hyp_stack_ptr = stack_page + PAGE_SIZE;
-       vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
-
-       __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
-       __cpu_init_stage2();
-
-       if (is_kernel_in_hyp_mode())
-               kvm_timer_init_vhe();
-
-       kvm_arm_init_debug();
-}
-
-static void cpu_hyp_reset(void)
-{
-       if (!is_kernel_in_hyp_mode())
-               __hyp_reset_vectors();
-}
-
-static void cpu_hyp_reinit(void)
-{
-       cpu_hyp_reset();
-
-       if (is_kernel_in_hyp_mode()) {
-               /*
-                * __cpu_init_stage2() is safe to call even if the PM
-                * event was cancelled before the CPU was reset.
-                */
-               __cpu_init_stage2();
-       } else {
-               cpu_init_hyp_mode(NULL);
-       }
-}
-
-static void _kvm_arch_hardware_enable(void *discard)
-{
-       if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
-               cpu_hyp_reinit();
-               __this_cpu_write(kvm_arm_hardware_enabled, 1);
-       }
-}
-
-int kvm_arch_hardware_enable(void)
-{
-       _kvm_arch_hardware_enable(NULL);
-       return 0;
-}
-
-static void _kvm_arch_hardware_disable(void *discard)
-{
-       if (__this_cpu_read(kvm_arm_hardware_enabled)) {
-               cpu_hyp_reset();
-               __this_cpu_write(kvm_arm_hardware_enabled, 0);
-       }
-}
-
-void kvm_arch_hardware_disable(void)
-{
-       _kvm_arch_hardware_disable(NULL);
-}
-
-#ifdef CONFIG_CPU_PM
-static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
-                                   unsigned long cmd,
-                                   void *v)
-{
-       /*
-        * kvm_arm_hardware_enabled is left with its old value over
-        * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
-        * re-enable hyp.
-        */
-       switch (cmd) {
-       case CPU_PM_ENTER:
-               if (__this_cpu_read(kvm_arm_hardware_enabled))
-                       /*
-                        * don't update kvm_arm_hardware_enabled here
-                        * so that the hardware will be re-enabled
-                        * when we resume. See below.
-                        */
-                       cpu_hyp_reset();
-
-               return NOTIFY_OK;
-       case CPU_PM_EXIT:
-               if (__this_cpu_read(kvm_arm_hardware_enabled))
-                       /* The hardware was enabled before suspend. */
-                       cpu_hyp_reinit();
-
-               return NOTIFY_OK;
-
-       default:
-               return NOTIFY_DONE;
-       }
-}
-
-static struct notifier_block hyp_init_cpu_pm_nb = {
-       .notifier_call = hyp_init_cpu_pm_notifier,
-};
-
-static void __init hyp_cpu_pm_init(void)
-{
-       cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
-}
-static void __init hyp_cpu_pm_exit(void)
-{
-       cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
-}
-#else
-static inline void hyp_cpu_pm_init(void)
-{
-}
-static inline void hyp_cpu_pm_exit(void)
-{
-}
-#endif
-
-static void teardown_common_resources(void)
-{
-       free_percpu(kvm_host_cpu_state);
-}
-
-static int init_common_resources(void)
-{
-       kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
-       if (!kvm_host_cpu_state) {
-               kvm_err("Cannot allocate host CPU state\n");
-               return -ENOMEM;
-       }
-
-       /* set size of VMID supported by CPU */
-       kvm_vmid_bits = kvm_get_vmid_bits();
-       kvm_info("%d-bit VMID\n", kvm_vmid_bits);
-
-       return 0;
-}
-
-static int init_subsystems(void)
-{
-       int err = 0;
-
-       /*
-        * Enable hardware so that subsystem initialisation can access EL2.
-        */
-       on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
-
-       /*
-        * Register CPU lower-power notifier
-        */
-       hyp_cpu_pm_init();
-
-       /*
-        * Init HYP view of VGIC
-        */
-       err = kvm_vgic_hyp_init();
-       switch (err) {
-       case 0:
-               vgic_present = true;
-               break;
-       case -ENODEV:
-       case -ENXIO:
-               vgic_present = false;
-               err = 0;
-               break;
-       default:
-               goto out;
-       }
-
-       /*
-        * Init HYP architected timer support
-        */
-       err = kvm_timer_hyp_init();
-       if (err)
-               goto out;
-
-       kvm_perf_init();
-       kvm_coproc_table_init();
-
-out:
-       on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
-
-       return err;
-}
-
-static void teardown_hyp_mode(void)
-{
-       int cpu;
-
-       if (is_kernel_in_hyp_mode())
-               return;
-
-       free_hyp_pgds();
-       for_each_possible_cpu(cpu)
-               free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
-       hyp_cpu_pm_exit();
-}
-
-static int init_vhe_mode(void)
-{
-       kvm_info("VHE mode initialized successfully\n");
-       return 0;
-}
-
-/**
- * Inits Hyp-mode on all online CPUs
- */
-static int init_hyp_mode(void)
-{
-       int cpu;
-       int err = 0;
-
-       /*
-        * Allocate Hyp PGD and setup Hyp identity mapping
-        */
-       err = kvm_mmu_init();
-       if (err)
-               goto out_err;
-
-       /*
-        * Allocate stack pages for Hypervisor-mode
-        */
-       for_each_possible_cpu(cpu) {
-               unsigned long stack_page;
-
-               stack_page = __get_free_page(GFP_KERNEL);
-               if (!stack_page) {
-                       err = -ENOMEM;
-                       goto out_err;
-               }
-
-               per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
-       }
-
-       /*
-        * Map the Hyp-code called directly from the host
-        */
-       err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
-                                 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
-       if (err) {
-               kvm_err("Cannot map world-switch code\n");
-               goto out_err;
-       }
-
-       err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
-                                 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
-       if (err) {
-               kvm_err("Cannot map rodata section\n");
-               goto out_err;
-       }
-
-       err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
-                                 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
-       if (err) {
-               kvm_err("Cannot map bss section\n");
-               goto out_err;
-       }
-
-       /*
-        * Map the Hyp stack pages
-        */
-       for_each_possible_cpu(cpu) {
-               char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
-               err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
-                                         PAGE_HYP);
-
-               if (err) {
-                       kvm_err("Cannot map hyp stack\n");
-                       goto out_err;
-               }
-       }
-
-       for_each_possible_cpu(cpu) {
-               kvm_cpu_context_t *cpu_ctxt;
-
-               cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
-               err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
-
-               if (err) {
-                       kvm_err("Cannot map host CPU state: %d\n", err);
-                       goto out_err;
-               }
-       }
-
-       kvm_info("Hyp mode initialized successfully\n");
-
-       return 0;
-
-out_err:
-       teardown_hyp_mode();
-       kvm_err("error initializing Hyp mode: %d\n", err);
-       return err;
-}
-
-static void check_kvm_target_cpu(void *ret)
-{
-       *(int *)ret = kvm_target_cpu();
-}
-
-struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
-{
-       struct kvm_vcpu *vcpu;
-       int i;
-
-       mpidr &= MPIDR_HWID_BITMASK;
-       kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
-                       return vcpu;
-       }
-       return NULL;
-}
-
-/**
- * Initialize Hyp-mode and memory mappings on all CPUs.
- */
-int kvm_arch_init(void *opaque)
-{
-       int err;
-       int ret, cpu;
-
-       if (!is_hyp_mode_available()) {
-               kvm_err("HYP mode not available\n");
-               return -ENODEV;
-       }
-
-       for_each_online_cpu(cpu) {
-               smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
-               if (ret < 0) {
-                       kvm_err("Error, CPU %d not supported!\n", cpu);
-                       return -ENODEV;
-               }
-       }
-
-       err = init_common_resources();
-       if (err)
-               return err;
-
-       if (is_kernel_in_hyp_mode())
-               err = init_vhe_mode();
-       else
-               err = init_hyp_mode();
-       if (err)
-               goto out_err;
-
-       err = init_subsystems();
-       if (err)
-               goto out_hyp;
-
-       return 0;
-
-out_hyp:
-       teardown_hyp_mode();
-out_err:
-       teardown_common_resources();
-       return err;
-}
-
-/* NOP: Compiling as a module not supported */
-void kvm_arch_exit(void)
-{
-       kvm_perf_teardown();
-}
-
-static int arm_init(void)
-{
-       int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
-       return rc;
-}
-
-module_init(arm_init);
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
deleted file mode 100644 (file)
index b6e715f..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_mmio.h>
-#include <asm/kvm_emulate.h>
-#include <trace/events/kvm.h>
-
-#include "trace.h"
-
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
-{
-       void *datap = NULL;
-       union {
-               u8      byte;
-               u16     hword;
-               u32     word;
-               u64     dword;
-       } tmp;
-
-       switch (len) {
-       case 1:
-               tmp.byte        = data;
-               datap           = &tmp.byte;
-               break;
-       case 2:
-               tmp.hword       = data;
-               datap           = &tmp.hword;
-               break;
-       case 4:
-               tmp.word        = data;
-               datap           = &tmp.word;
-               break;
-       case 8:
-               tmp.dword       = data;
-               datap           = &tmp.dword;
-               break;
-       }
-
-       memcpy(buf, datap, len);
-}
-
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
-{
-       unsigned long data = 0;
-       union {
-               u16     hword;
-               u32     word;
-               u64     dword;
-       } tmp;
-
-       switch (len) {
-       case 1:
-               data = *(u8 *)buf;
-               break;
-       case 2:
-               memcpy(&tmp.hword, buf, len);
-               data = tmp.hword;
-               break;
-       case 4:
-               memcpy(&tmp.word, buf, len);
-               data = tmp.word;
-               break;
-       case 8:
-               memcpy(&tmp.dword, buf, len);
-               data = tmp.dword;
-               break;
-       }
-
-       return data;
-}
-
-/**
- * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
- *                          or in-kernel IO emulation
- *
- * @vcpu: The VCPU pointer
- * @run:  The VCPU run struct containing the mmio data
- */
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       unsigned long data;
-       unsigned int len;
-       int mask;
-
-       if (!run->mmio.is_write) {
-               len = run->mmio.len;
-               if (len > sizeof(unsigned long))
-                       return -EINVAL;
-
-               data = kvm_mmio_read_buf(run->mmio.data, len);
-
-               if (vcpu->arch.mmio_decode.sign_extend &&
-                   len < sizeof(unsigned long)) {
-                       mask = 1U << ((len * 8) - 1);
-                       data = (data ^ mask) - mask;
-               }
-
-               trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-                              data);
-               data = vcpu_data_host_to_guest(vcpu, data, len);
-               vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
-       }
-
-       return 0;
-}
-
-static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
-{
-       unsigned long rt;
-       int access_size;
-       bool sign_extend;
-
-       if (kvm_vcpu_dabt_iss1tw(vcpu)) {
-               /* page table accesses IO mem: tell guest to fix its TTBR */
-               kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-               return 1;
-       }
-
-       access_size = kvm_vcpu_dabt_get_as(vcpu);
-       if (unlikely(access_size < 0))
-               return access_size;
-
-       *is_write = kvm_vcpu_dabt_iswrite(vcpu);
-       sign_extend = kvm_vcpu_dabt_issext(vcpu);
-       rt = kvm_vcpu_dabt_get_rd(vcpu);
-
-       *len = access_size;
-       vcpu->arch.mmio_decode.sign_extend = sign_extend;
-       vcpu->arch.mmio_decode.rt = rt;
-
-       /*
-        * The MMIO instruction is emulated and should not be re-executed
-        * in the guest.
-        */
-       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
-       return 0;
-}
-
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
-                phys_addr_t fault_ipa)
-{
-       unsigned long data;
-       unsigned long rt;
-       int ret;
-       bool is_write;
-       int len;
-       u8 data_buf[8];
-
-       /*
-        * Prepare MMIO operation. First decode the syndrome data we get
-        * from the CPU. Then try if some in-kernel emulation feels
-        * responsible, otherwise let user space do its magic.
-        */
-       if (kvm_vcpu_dabt_isvalid(vcpu)) {
-               ret = decode_hsr(vcpu, &is_write, &len);
-               if (ret)
-                       return ret;
-       } else {
-               kvm_err("load/store instruction decoding not implemented\n");
-               return -ENOSYS;
-       }
-
-       rt = vcpu->arch.mmio_decode.rt;
-
-       if (is_write) {
-               data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
-                                              len);
-
-               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
-               kvm_mmio_write_buf(data_buf, len, data);
-
-               ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
-                                      data_buf);
-       } else {
-               trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
-                              fault_ipa, 0);
-
-               ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
-                                     data_buf);
-       }
-
-       /* Now prepare kvm_run for the potential return to userland. */
-       run->mmio.is_write      = is_write;
-       run->mmio.phys_addr     = fault_ipa;
-       run->mmio.len           = len;
-
-       if (!ret) {
-               /* We handled the access successfully in the kernel. */
-               if (!is_write)
-                       memcpy(run->mmio.data, data_buf, len);
-               vcpu->stat.mmio_exit_kernel++;
-               kvm_handle_mmio_return(vcpu, run);
-               return 1;
-       }
-
-       if (is_write)
-               memcpy(run->mmio.data, data_buf, len);
-       vcpu->stat.mmio_exit_user++;
-       run->exit_reason        = KVM_EXIT_MMIO;
-       return 0;
-}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
deleted file mode 100644 (file)
index efb4335..0000000
+++ /dev/null
@@ -1,1958 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#include <linux/mman.h>
-#include <linux/kvm_host.h>
-#include <linux/io.h>
-#include <linux/hugetlb.h>
-#include <trace/events/kvm.h>
-#include <asm/pgalloc.h>
-#include <asm/cacheflush.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-#include <asm/kvm_mmio.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_emulate.h>
-#include <asm/virt.h>
-
-#include "trace.h"
-
-static pgd_t *boot_hyp_pgd;
-static pgd_t *hyp_pgd;
-static pgd_t *merged_hyp_pgd;
-static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
-
-static unsigned long hyp_idmap_start;
-static unsigned long hyp_idmap_end;
-static phys_addr_t hyp_idmap_vector;
-
-#define S2_PGD_SIZE    (PTRS_PER_S2_PGD * sizeof(pgd_t))
-#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
-
-#define KVM_S2PTE_FLAG_IS_IOMAP                (1UL << 0)
-#define KVM_S2_FLAG_LOGGING_ACTIVE     (1UL << 1)
-
-static bool memslot_is_logging(struct kvm_memory_slot *memslot)
-{
-       return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
-}
-
-/**
- * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
- * @kvm:       pointer to kvm structure.
- *
- * Interface to HYP function to flush all VM TLB entries
- */
-void kvm_flush_remote_tlbs(struct kvm *kvm)
-{
-       kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
-}
-
-static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
-{
-       kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
-}
-
-/*
- * D-Cache management functions. They take the page table entries by
- * value, as they are flushing the cache using the kernel mapping (or
- * kmap on 32bit).
- */
-static void kvm_flush_dcache_pte(pte_t pte)
-{
-       __kvm_flush_dcache_pte(pte);
-}
-
-static void kvm_flush_dcache_pmd(pmd_t pmd)
-{
-       __kvm_flush_dcache_pmd(pmd);
-}
-
-static void kvm_flush_dcache_pud(pud_t pud)
-{
-       __kvm_flush_dcache_pud(pud);
-}
-
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
-       return !pfn_valid(pfn);
-}
-
-/**
- * stage2_dissolve_pmd() - clear and flush huge PMD entry
- * @kvm:       pointer to kvm structure.
- * @addr:      IPA
- * @pmd:       pmd pointer for IPA
- *
- * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
- * pages in the range dirty.
- */
-static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
-{
-       if (!pmd_thp_or_huge(*pmd))
-               return;
-
-       pmd_clear(pmd);
-       kvm_tlb_flush_vmid_ipa(kvm, addr);
-       put_page(virt_to_page(pmd));
-}
-
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
-                                 int min, int max)
-{
-       void *page;
-
-       BUG_ON(max > KVM_NR_MEM_OBJS);
-       if (cache->nobjs >= min)
-               return 0;
-       while (cache->nobjs < max) {
-               page = (void *)__get_free_page(PGALLOC_GFP);
-               if (!page)
-                       return -ENOMEM;
-               cache->objects[cache->nobjs++] = page;
-       }
-       return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
-       while (mc->nobjs)
-               free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
-       void *p;
-
-       BUG_ON(!mc || !mc->nobjs);
-       p = mc->objects[--mc->nobjs];
-       return p;
-}
-
-static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
-{
-       pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
-       stage2_pgd_clear(pgd);
-       kvm_tlb_flush_vmid_ipa(kvm, addr);
-       stage2_pud_free(pud_table);
-       put_page(virt_to_page(pgd));
-}
-
-static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
-{
-       pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
-       VM_BUG_ON(stage2_pud_huge(*pud));
-       stage2_pud_clear(pud);
-       kvm_tlb_flush_vmid_ipa(kvm, addr);
-       stage2_pmd_free(pmd_table);
-       put_page(virt_to_page(pud));
-}
-
-static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
-{
-       pte_t *pte_table = pte_offset_kernel(pmd, 0);
-       VM_BUG_ON(pmd_thp_or_huge(*pmd));
-       pmd_clear(pmd);
-       kvm_tlb_flush_vmid_ipa(kvm, addr);
-       pte_free_kernel(NULL, pte_table);
-       put_page(virt_to_page(pmd));
-}
-
-/*
- * Unmapping vs dcache management:
- *
- * If a guest maps certain memory pages as uncached, all writes will
- * bypass the data cache and go directly to RAM.  However, the CPUs
- * can still speculate reads (not writes) and fill cache lines with
- * data.
- *
- * Those cache lines will be *clean* cache lines though, so a
- * clean+invalidate operation is equivalent to an invalidate
- * operation, because no cache lines are marked dirty.
- *
- * Those clean cache lines could be filled prior to an uncached write
- * by the guest, and the cache coherent IO subsystem would therefore
- * end up writing old data to disk.
- *
- * This is why right after unmapping a page/section and invalidating
- * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
- * the IO subsystem will never hit in the cache.
- */
-static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
-                      phys_addr_t addr, phys_addr_t end)
-{
-       phys_addr_t start_addr = addr;
-       pte_t *pte, *start_pte;
-
-       start_pte = pte = pte_offset_kernel(pmd, addr);
-       do {
-               if (!pte_none(*pte)) {
-                       pte_t old_pte = *pte;
-
-                       kvm_set_pte(pte, __pte(0));
-                       kvm_tlb_flush_vmid_ipa(kvm, addr);
-
-                       /* No need to invalidate the cache for device mappings */
-                       if (!kvm_is_device_pfn(pte_pfn(old_pte)))
-                               kvm_flush_dcache_pte(old_pte);
-
-                       put_page(virt_to_page(pte));
-               }
-       } while (pte++, addr += PAGE_SIZE, addr != end);
-
-       if (stage2_pte_table_empty(start_pte))
-               clear_stage2_pmd_entry(kvm, pmd, start_addr);
-}
-
-static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
-                      phys_addr_t addr, phys_addr_t end)
-{
-       phys_addr_t next, start_addr = addr;
-       pmd_t *pmd, *start_pmd;
-
-       start_pmd = pmd = stage2_pmd_offset(pud, addr);
-       do {
-               next = stage2_pmd_addr_end(addr, end);
-               if (!pmd_none(*pmd)) {
-                       if (pmd_thp_or_huge(*pmd)) {
-                               pmd_t old_pmd = *pmd;
-
-                               pmd_clear(pmd);
-                               kvm_tlb_flush_vmid_ipa(kvm, addr);
-
-                               kvm_flush_dcache_pmd(old_pmd);
-
-                               put_page(virt_to_page(pmd));
-                       } else {
-                               unmap_stage2_ptes(kvm, pmd, addr, next);
-                       }
-               }
-       } while (pmd++, addr = next, addr != end);
-
-       if (stage2_pmd_table_empty(start_pmd))
-               clear_stage2_pud_entry(kvm, pud, start_addr);
-}
-
-static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
-                      phys_addr_t addr, phys_addr_t end)
-{
-       phys_addr_t next, start_addr = addr;
-       pud_t *pud, *start_pud;
-
-       start_pud = pud = stage2_pud_offset(pgd, addr);
-       do {
-               next = stage2_pud_addr_end(addr, end);
-               if (!stage2_pud_none(*pud)) {
-                       if (stage2_pud_huge(*pud)) {
-                               pud_t old_pud = *pud;
-
-                               stage2_pud_clear(pud);
-                               kvm_tlb_flush_vmid_ipa(kvm, addr);
-                               kvm_flush_dcache_pud(old_pud);
-                               put_page(virt_to_page(pud));
-                       } else {
-                               unmap_stage2_pmds(kvm, pud, addr, next);
-                       }
-               }
-       } while (pud++, addr = next, addr != end);
-
-       if (stage2_pud_table_empty(start_pud))
-               clear_stage2_pgd_entry(kvm, pgd, start_addr);
-}
-
-/**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
- * @kvm:   The VM pointer
- * @start: The intermediate physical base address of the range to unmap
- * @size:  The size of the area to unmap
- *
- * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
- * be called while holding mmu_lock (unless for freeing the stage2 pgd before
- * destroying the VM), otherwise another faulting VCPU may come in and mess
- * with things behind our backs.
- */
-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
-{
-       pgd_t *pgd;
-       phys_addr_t addr = start, end = start + size;
-       phys_addr_t next;
-
-       pgd = kvm->arch.pgd + stage2_pgd_index(addr);
-       do {
-               next = stage2_pgd_addr_end(addr, end);
-               if (!stage2_pgd_none(*pgd))
-                       unmap_stage2_puds(kvm, pgd, addr, next);
-       } while (pgd++, addr = next, addr != end);
-}
-
-static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
-                             phys_addr_t addr, phys_addr_t end)
-{
-       pte_t *pte;
-
-       pte = pte_offset_kernel(pmd, addr);
-       do {
-               if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
-                       kvm_flush_dcache_pte(*pte);
-       } while (pte++, addr += PAGE_SIZE, addr != end);
-}
-
-static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
-                             phys_addr_t addr, phys_addr_t end)
-{
-       pmd_t *pmd;
-       phys_addr_t next;
-
-       pmd = stage2_pmd_offset(pud, addr);
-       do {
-               next = stage2_pmd_addr_end(addr, end);
-               if (!pmd_none(*pmd)) {
-                       if (pmd_thp_or_huge(*pmd))
-                               kvm_flush_dcache_pmd(*pmd);
-                       else
-                               stage2_flush_ptes(kvm, pmd, addr, next);
-               }
-       } while (pmd++, addr = next, addr != end);
-}
-
-static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
-                             phys_addr_t addr, phys_addr_t end)
-{
-       pud_t *pud;
-       phys_addr_t next;
-
-       pud = stage2_pud_offset(pgd, addr);
-       do {
-               next = stage2_pud_addr_end(addr, end);
-               if (!stage2_pud_none(*pud)) {
-                       if (stage2_pud_huge(*pud))
-                               kvm_flush_dcache_pud(*pud);
-                       else
-                               stage2_flush_pmds(kvm, pud, addr, next);
-               }
-       } while (pud++, addr = next, addr != end);
-}
-
-static void stage2_flush_memslot(struct kvm *kvm,
-                                struct kvm_memory_slot *memslot)
-{
-       phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
-       phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
-       phys_addr_t next;
-       pgd_t *pgd;
-
-       pgd = kvm->arch.pgd + stage2_pgd_index(addr);
-       do {
-               next = stage2_pgd_addr_end(addr, end);
-               stage2_flush_puds(kvm, pgd, addr, next);
-       } while (pgd++, addr = next, addr != end);
-}
-
-/**
- * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
- * @kvm: The struct kvm pointer
- *
- * Go through the stage 2 page tables and invalidate any cache lines
- * backing memory already mapped to the VM.
- */
-static void stage2_flush_vm(struct kvm *kvm)
-{
-       struct kvm_memslots *slots;
-       struct kvm_memory_slot *memslot;
-       int idx;
-
-       idx = srcu_read_lock(&kvm->srcu);
-       spin_lock(&kvm->mmu_lock);
-
-       slots = kvm_memslots(kvm);
-       kvm_for_each_memslot(memslot, slots)
-               stage2_flush_memslot(kvm, memslot);
-
-       spin_unlock(&kvm->mmu_lock);
-       srcu_read_unlock(&kvm->srcu, idx);
-}
-
-static void clear_hyp_pgd_entry(pgd_t *pgd)
-{
-       pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
-       pgd_clear(pgd);
-       pud_free(NULL, pud_table);
-       put_page(virt_to_page(pgd));
-}
-
-static void clear_hyp_pud_entry(pud_t *pud)
-{
-       pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
-       VM_BUG_ON(pud_huge(*pud));
-       pud_clear(pud);
-       pmd_free(NULL, pmd_table);
-       put_page(virt_to_page(pud));
-}
-
-static void clear_hyp_pmd_entry(pmd_t *pmd)
-{
-       pte_t *pte_table = pte_offset_kernel(pmd, 0);
-       VM_BUG_ON(pmd_thp_or_huge(*pmd));
-       pmd_clear(pmd);
-       pte_free_kernel(NULL, pte_table);
-       put_page(virt_to_page(pmd));
-}
-
-static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
-{
-       pte_t *pte, *start_pte;
-
-       start_pte = pte = pte_offset_kernel(pmd, addr);
-       do {
-               if (!pte_none(*pte)) {
-                       kvm_set_pte(pte, __pte(0));
-                       put_page(virt_to_page(pte));
-               }
-       } while (pte++, addr += PAGE_SIZE, addr != end);
-
-       if (hyp_pte_table_empty(start_pte))
-               clear_hyp_pmd_entry(pmd);
-}
-
-static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
-{
-       phys_addr_t next;
-       pmd_t *pmd, *start_pmd;
-
-       start_pmd = pmd = pmd_offset(pud, addr);
-       do {
-               next = pmd_addr_end(addr, end);
-               /* Hyp doesn't use huge pmds */
-               if (!pmd_none(*pmd))
-                       unmap_hyp_ptes(pmd, addr, next);
-       } while (pmd++, addr = next, addr != end);
-
-       if (hyp_pmd_table_empty(start_pmd))
-               clear_hyp_pud_entry(pud);
-}
-
-static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
-{
-       phys_addr_t next;
-       pud_t *pud, *start_pud;
-
-       start_pud = pud = pud_offset(pgd, addr);
-       do {
-               next = pud_addr_end(addr, end);
-               /* Hyp doesn't use huge puds */
-               if (!pud_none(*pud))
-                       unmap_hyp_pmds(pud, addr, next);
-       } while (pud++, addr = next, addr != end);
-
-       if (hyp_pud_table_empty(start_pud))
-               clear_hyp_pgd_entry(pgd);
-}
-
-static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
-{
-       pgd_t *pgd;
-       phys_addr_t addr = start, end = start + size;
-       phys_addr_t next;
-
-       /*
-        * We don't unmap anything from HYP, except at the hyp tear down.
-        * Hence, we don't have to invalidate the TLBs here.
-        */
-       pgd = pgdp + pgd_index(addr);
-       do {
-               next = pgd_addr_end(addr, end);
-               if (!pgd_none(*pgd))
-                       unmap_hyp_puds(pgd, addr, next);
-       } while (pgd++, addr = next, addr != end);
-}
-
-/**
- * free_hyp_pgds - free Hyp-mode page tables
- *
- * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
- * therefore contains either mappings in the kernel memory area (above
- * PAGE_OFFSET), or device mappings in the vmalloc range (from
- * VMALLOC_START to VMALLOC_END).
- *
- * boot_hyp_pgd should only map two pages for the init code.
- */
-void free_hyp_pgds(void)
-{
-       unsigned long addr;
-
-       mutex_lock(&kvm_hyp_pgd_mutex);
-
-       if (boot_hyp_pgd) {
-               unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-               free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
-               boot_hyp_pgd = NULL;
-       }
-
-       if (hyp_pgd) {
-               unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-               for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-               for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-
-               free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
-               hyp_pgd = NULL;
-       }
-       if (merged_hyp_pgd) {
-               clear_page(merged_hyp_pgd);
-               free_page((unsigned long)merged_hyp_pgd);
-               merged_hyp_pgd = NULL;
-       }
-
-       mutex_unlock(&kvm_hyp_pgd_mutex);
-}
-
-static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
-                                   unsigned long end, unsigned long pfn,
-                                   pgprot_t prot)
-{
-       pte_t *pte;
-       unsigned long addr;
-
-       addr = start;
-       do {
-               pte = pte_offset_kernel(pmd, addr);
-               kvm_set_pte(pte, pfn_pte(pfn, prot));
-               get_page(virt_to_page(pte));
-               kvm_flush_dcache_to_poc(pte, sizeof(*pte));
-               pfn++;
-       } while (addr += PAGE_SIZE, addr != end);
-}
-
-static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
-                                  unsigned long end, unsigned long pfn,
-                                  pgprot_t prot)
-{
-       pmd_t *pmd;
-       pte_t *pte;
-       unsigned long addr, next;
-
-       addr = start;
-       do {
-               pmd = pmd_offset(pud, addr);
-
-               BUG_ON(pmd_sect(*pmd));
-
-               if (pmd_none(*pmd)) {
-                       pte = pte_alloc_one_kernel(NULL, addr);
-                       if (!pte) {
-                               kvm_err("Cannot allocate Hyp pte\n");
-                               return -ENOMEM;
-                       }
-                       pmd_populate_kernel(NULL, pmd, pte);
-                       get_page(virt_to_page(pmd));
-                       kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
-               }
-
-               next = pmd_addr_end(addr, end);
-
-               create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
-               pfn += (next - addr) >> PAGE_SHIFT;
-       } while (addr = next, addr != end);
-
-       return 0;
-}
-
-static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
-                                  unsigned long end, unsigned long pfn,
-                                  pgprot_t prot)
-{
-       pud_t *pud;
-       pmd_t *pmd;
-       unsigned long addr, next;
-       int ret;
-
-       addr = start;
-       do {
-               pud = pud_offset(pgd, addr);
-
-               if (pud_none_or_clear_bad(pud)) {
-                       pmd = pmd_alloc_one(NULL, addr);
-                       if (!pmd) {
-                               kvm_err("Cannot allocate Hyp pmd\n");
-                               return -ENOMEM;
-                       }
-                       pud_populate(NULL, pud, pmd);
-                       get_page(virt_to_page(pud));
-                       kvm_flush_dcache_to_poc(pud, sizeof(*pud));
-               }
-
-               next = pud_addr_end(addr, end);
-               ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
-               if (ret)
-                       return ret;
-               pfn += (next - addr) >> PAGE_SHIFT;
-       } while (addr = next, addr != end);
-
-       return 0;
-}
-
-static int __create_hyp_mappings(pgd_t *pgdp,
-                                unsigned long start, unsigned long end,
-                                unsigned long pfn, pgprot_t prot)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       unsigned long addr, next;
-       int err = 0;
-
-       mutex_lock(&kvm_hyp_pgd_mutex);
-       addr = start & PAGE_MASK;
-       end = PAGE_ALIGN(end);
-       do {
-               pgd = pgdp + pgd_index(addr);
-
-               if (pgd_none(*pgd)) {
-                       pud = pud_alloc_one(NULL, addr);
-                       if (!pud) {
-                               kvm_err("Cannot allocate Hyp pud\n");
-                               err = -ENOMEM;
-                               goto out;
-                       }
-                       pgd_populate(NULL, pgd, pud);
-                       get_page(virt_to_page(pgd));
-                       kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
-               }
-
-               next = pgd_addr_end(addr, end);
-               err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
-               if (err)
-                       goto out;
-               pfn += (next - addr) >> PAGE_SHIFT;
-       } while (addr = next, addr != end);
-out:
-       mutex_unlock(&kvm_hyp_pgd_mutex);
-       return err;
-}
-
-static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
-{
-       if (!is_vmalloc_addr(kaddr)) {
-               BUG_ON(!virt_addr_valid(kaddr));
-               return __pa(kaddr);
-       } else {
-               return page_to_phys(vmalloc_to_page(kaddr)) +
-                      offset_in_page(kaddr);
-       }
-}
-
-/**
- * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
- * @from:      The virtual kernel start address of the range
- * @to:                The virtual kernel end address of the range (exclusive)
- * @prot:      The protection to be applied to this range
- *
- * The same virtual address as the kernel virtual address is also used
- * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
- * physical pages.
- */
-int create_hyp_mappings(void *from, void *to, pgprot_t prot)
-{
-       phys_addr_t phys_addr;
-       unsigned long virt_addr;
-       unsigned long start = kern_hyp_va((unsigned long)from);
-       unsigned long end = kern_hyp_va((unsigned long)to);
-
-       if (is_kernel_in_hyp_mode())
-               return 0;
-
-       start = start & PAGE_MASK;
-       end = PAGE_ALIGN(end);
-
-       for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
-               int err;
-
-               phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
-               err = __create_hyp_mappings(hyp_pgd, virt_addr,
-                                           virt_addr + PAGE_SIZE,
-                                           __phys_to_pfn(phys_addr),
-                                           prot);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-/**
- * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
- * @from:      The kernel start VA of the range
- * @to:                The kernel end VA of the range (exclusive)
- * @phys_addr: The physical start address which gets mapped
- *
- * The resulting HYP VA is the same as the kernel VA, modulo
- * HYP_PAGE_OFFSET.
- */
-int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
-{
-       unsigned long start = kern_hyp_va((unsigned long)from);
-       unsigned long end = kern_hyp_va((unsigned long)to);
-
-       if (is_kernel_in_hyp_mode())
-               return 0;
-
-       /* Check for a valid kernel IO mapping */
-       if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
-               return -EINVAL;
-
-       return __create_hyp_mappings(hyp_pgd, start, end,
-                                    __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
-}
-
-/**
- * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
- * @kvm:       The KVM struct pointer for the VM.
- *
- * Allocates only the stage-2 HW PGD level table(s) (can support either full
- * 40-bit input addresses or limited to 32-bit input addresses). Clears the
- * allocated pages.
- *
- * Note we don't need locking here as this is only called when the VM is
- * created, which can only be done once.
- */
-int kvm_alloc_stage2_pgd(struct kvm *kvm)
-{
-       pgd_t *pgd;
-
-       if (kvm->arch.pgd != NULL) {
-               kvm_err("kvm_arch already initialized?\n");
-               return -EINVAL;
-       }
-
-       /* Allocate the HW PGD, making sure that each page gets its own refcount */
-       pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
-       if (!pgd)
-               return -ENOMEM;
-
-       kvm->arch.pgd = pgd;
-       return 0;
-}
-
-static void stage2_unmap_memslot(struct kvm *kvm,
-                                struct kvm_memory_slot *memslot)
-{
-       hva_t hva = memslot->userspace_addr;
-       phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
-       phys_addr_t size = PAGE_SIZE * memslot->npages;
-       hva_t reg_end = hva + size;
-
-       /*
-        * A memory region could potentially cover multiple VMAs, and any holes
-        * between them, so iterate over all of them to find out if we should
-        * unmap any of them.
-        *
-        *     +--------------------------------------------+
-        * +---------------+----------------+   +----------------+
-        * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
-        * +---------------+----------------+   +----------------+
-        *     |               memory region                |
-        *     +--------------------------------------------+
-        */
-       do {
-               struct vm_area_struct *vma = find_vma(current->mm, hva);
-               hva_t vm_start, vm_end;
-
-               if (!vma || vma->vm_start >= reg_end)
-                       break;
-
-               /*
-                * Take the intersection of this VMA with the memory region
-                */
-               vm_start = max(hva, vma->vm_start);
-               vm_end = min(reg_end, vma->vm_end);
-
-               if (!(vma->vm_flags & VM_PFNMAP)) {
-                       gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
-                       unmap_stage2_range(kvm, gpa, vm_end - vm_start);
-               }
-               hva = vm_end;
-       } while (hva < reg_end);
-}
-
-/**
- * stage2_unmap_vm - Unmap Stage-2 RAM mappings
- * @kvm: The struct kvm pointer
- *
- * Go through the memregions and unmap any reguler RAM
- * backing memory already mapped to the VM.
- */
-void stage2_unmap_vm(struct kvm *kvm)
-{
-       struct kvm_memslots *slots;
-       struct kvm_memory_slot *memslot;
-       int idx;
-
-       idx = srcu_read_lock(&kvm->srcu);
-       spin_lock(&kvm->mmu_lock);
-
-       slots = kvm_memslots(kvm);
-       kvm_for_each_memslot(memslot, slots)
-               stage2_unmap_memslot(kvm, memslot);
-
-       spin_unlock(&kvm->mmu_lock);
-       srcu_read_unlock(&kvm->srcu, idx);
-}
-
-/**
- * kvm_free_stage2_pgd - free all stage-2 tables
- * @kvm:       The KVM struct pointer for the VM.
- *
- * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
- * underlying level-2 and level-3 tables before freeing the actual level-1 table
- * and setting the struct pointer to NULL.
- *
- * Note we don't need locking here as this is only called when the VM is
- * destroyed, which can only be done once.
- */
-void kvm_free_stage2_pgd(struct kvm *kvm)
-{
-       if (kvm->arch.pgd == NULL)
-               return;
-
-       unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
-       /* Free the HW pgd, one page at a time */
-       free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
-       kvm->arch.pgd = NULL;
-}
-
-static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
-                            phys_addr_t addr)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-
-       pgd = kvm->arch.pgd + stage2_pgd_index(addr);
-       if (WARN_ON(stage2_pgd_none(*pgd))) {
-               if (!cache)
-                       return NULL;
-               pud = mmu_memory_cache_alloc(cache);
-               stage2_pgd_populate(pgd, pud);
-               get_page(virt_to_page(pgd));
-       }
-
-       return stage2_pud_offset(pgd, addr);
-}
-
-static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
-                            phys_addr_t addr)
-{
-       pud_t *pud;
-       pmd_t *pmd;
-
-       pud = stage2_get_pud(kvm, cache, addr);
-       if (stage2_pud_none(*pud)) {
-               if (!cache)
-                       return NULL;
-               pmd = mmu_memory_cache_alloc(cache);
-               stage2_pud_populate(pud, pmd);
-               get_page(virt_to_page(pud));
-       }
-
-       return stage2_pmd_offset(pud, addr);
-}
-
-static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
-                              *cache, phys_addr_t addr, const pmd_t *new_pmd)
-{
-       pmd_t *pmd, old_pmd;
-
-       pmd = stage2_get_pmd(kvm, cache, addr);
-       VM_BUG_ON(!pmd);
-
-       /*
-        * Mapping in huge pages should only happen through a fault.  If a
-        * page is merged into a transparent huge page, the individual
-        * subpages of that huge page should be unmapped through MMU
-        * notifiers before we get here.
-        *
-        * Merging of CompoundPages is not supported; they should become
-        * splitting first, unmapped, merged, and mapped back in on-demand.
-        */
-       VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
-
-       old_pmd = *pmd;
-       if (pmd_present(old_pmd)) {
-               pmd_clear(pmd);
-               kvm_tlb_flush_vmid_ipa(kvm, addr);
-       } else {
-               get_page(virt_to_page(pmd));
-       }
-
-       kvm_set_pmd(pmd, *new_pmd);
-       return 0;
-}
-
-static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
-                         phys_addr_t addr, const pte_t *new_pte,
-                         unsigned long flags)
-{
-       pmd_t *pmd;
-       pte_t *pte, old_pte;
-       bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
-       bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
-
-       VM_BUG_ON(logging_active && !cache);
-
-       /* Create stage-2 page table mapping - Levels 0 and 1 */
-       pmd = stage2_get_pmd(kvm, cache, addr);
-       if (!pmd) {
-               /*
-                * Ignore calls from kvm_set_spte_hva for unallocated
-                * address ranges.
-                */
-               return 0;
-       }
-
-       /*
-        * While dirty page logging - dissolve huge PMD, then continue on to
-        * allocate page.
-        */
-       if (logging_active)
-               stage2_dissolve_pmd(kvm, addr, pmd);
-
-       /* Create stage-2 page mappings - Level 2 */
-       if (pmd_none(*pmd)) {
-               if (!cache)
-                       return 0; /* ignore calls from kvm_set_spte_hva */
-               pte = mmu_memory_cache_alloc(cache);
-               pmd_populate_kernel(NULL, pmd, pte);
-               get_page(virt_to_page(pmd));
-       }
-
-       pte = pte_offset_kernel(pmd, addr);
-
-       if (iomap && pte_present(*pte))
-               return -EFAULT;
-
-       /* Create 2nd stage page table mapping - Level 3 */
-       old_pte = *pte;
-       if (pte_present(old_pte)) {
-               kvm_set_pte(pte, __pte(0));
-               kvm_tlb_flush_vmid_ipa(kvm, addr);
-       } else {
-               get_page(virt_to_page(pte));
-       }
-
-       kvm_set_pte(pte, *new_pte);
-       return 0;
-}
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static int stage2_ptep_test_and_clear_young(pte_t *pte)
-{
-       if (pte_young(*pte)) {
-               *pte = pte_mkold(*pte);
-               return 1;
-       }
-       return 0;
-}
-#else
-static int stage2_ptep_test_and_clear_young(pte_t *pte)
-{
-       return __ptep_test_and_clear_young(pte);
-}
-#endif
-
-static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
-{
-       return stage2_ptep_test_and_clear_young((pte_t *)pmd);
-}
-
-/**
- * kvm_phys_addr_ioremap - map a device range to guest IPA
- *
- * @kvm:       The KVM pointer
- * @guest_ipa: The IPA at which to insert the mapping
- * @pa:                The physical address of the device
- * @size:      The size of the mapping
- */
-int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-                         phys_addr_t pa, unsigned long size, bool writable)
-{
-       phys_addr_t addr, end;
-       int ret = 0;
-       unsigned long pfn;
-       struct kvm_mmu_memory_cache cache = { 0, };
-
-       end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
-       pfn = __phys_to_pfn(pa);
-
-       for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
-               pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
-
-               if (writable)
-                       pte = kvm_s2pte_mkwrite(pte);
-
-               ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
-                                               KVM_NR_MEM_OBJS);
-               if (ret)
-                       goto out;
-               spin_lock(&kvm->mmu_lock);
-               ret = stage2_set_pte(kvm, &cache, addr, &pte,
-                                               KVM_S2PTE_FLAG_IS_IOMAP);
-               spin_unlock(&kvm->mmu_lock);
-               if (ret)
-                       goto out;
-
-               pfn++;
-       }
-
-out:
-       mmu_free_memory_cache(&cache);
-       return ret;
-}
-
-static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
-{
-       kvm_pfn_t pfn = *pfnp;
-       gfn_t gfn = *ipap >> PAGE_SHIFT;
-
-       if (PageTransCompoundMap(pfn_to_page(pfn))) {
-               unsigned long mask;
-               /*
-                * The address we faulted on is backed by a transparent huge
-                * page.  However, because we map the compound huge page and
-                * not the individual tail page, we need to transfer the
-                * refcount to the head page.  We have to be careful that the
-                * THP doesn't start to split while we are adjusting the
-                * refcounts.
-                *
-                * We are sure this doesn't happen, because mmu_notifier_retry
-                * was successful and we are holding the mmu_lock, so if this
-                * THP is trying to split, it will be blocked in the mmu
-                * notifier before touching any of the pages, specifically
-                * before being able to call __split_huge_page_refcount().
-                *
-                * We can therefore safely transfer the refcount from PG_tail
-                * to PG_head and switch the pfn from a tail page to the head
-                * page accordingly.
-                */
-               mask = PTRS_PER_PMD - 1;
-               VM_BUG_ON((gfn & mask) != (pfn & mask));
-               if (pfn & mask) {
-                       *ipap &= PMD_MASK;
-                       kvm_release_pfn_clean(pfn);
-                       pfn &= ~mask;
-                       kvm_get_pfn(pfn);
-                       *pfnp = pfn;
-               }
-
-               return true;
-       }
-
-       return false;
-}
-
-static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
-       if (kvm_vcpu_trap_is_iabt(vcpu))
-               return false;
-
-       return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
-/**
- * stage2_wp_ptes - write protect PMD range
- * @pmd:       pointer to pmd entry
- * @addr:      range start address
- * @end:       range end address
- */
-static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
-{
-       pte_t *pte;
-
-       pte = pte_offset_kernel(pmd, addr);
-       do {
-               if (!pte_none(*pte)) {
-                       if (!kvm_s2pte_readonly(pte))
-                               kvm_set_s2pte_readonly(pte);
-               }
-       } while (pte++, addr += PAGE_SIZE, addr != end);
-}
-
-/**
- * stage2_wp_pmds - write protect PUD range
- * @pud:       pointer to pud entry
- * @addr:      range start address
- * @end:       range end address
- */
-static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
-{
-       pmd_t *pmd;
-       phys_addr_t next;
-
-       pmd = stage2_pmd_offset(pud, addr);
-
-       do {
-               next = stage2_pmd_addr_end(addr, end);
-               if (!pmd_none(*pmd)) {
-                       if (pmd_thp_or_huge(*pmd)) {
-                               if (!kvm_s2pmd_readonly(pmd))
-                                       kvm_set_s2pmd_readonly(pmd);
-                       } else {
-                               stage2_wp_ptes(pmd, addr, next);
-                       }
-               }
-       } while (pmd++, addr = next, addr != end);
-}
-
-/**
-  * stage2_wp_puds - write protect PGD range
-  * @pgd:      pointer to pgd entry
-  * @addr:     range start address
-  * @end:      range end address
-  *
-  * Process PUD entries, for a huge PUD we cause a panic.
-  */
-static void  stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
-{
-       pud_t *pud;
-       phys_addr_t next;
-
-       pud = stage2_pud_offset(pgd, addr);
-       do {
-               next = stage2_pud_addr_end(addr, end);
-               if (!stage2_pud_none(*pud)) {
-                       /* TODO:PUD not supported, revisit later if supported */
-                       BUG_ON(stage2_pud_huge(*pud));
-                       stage2_wp_pmds(pud, addr, next);
-               }
-       } while (pud++, addr = next, addr != end);
-}
-
-/**
- * stage2_wp_range() - write protect stage2 memory region range
- * @kvm:       The KVM pointer
- * @addr:      Start address of range
- * @end:       End address of range
- */
-static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
-{
-       pgd_t *pgd;
-       phys_addr_t next;
-
-       pgd = kvm->arch.pgd + stage2_pgd_index(addr);
-       do {
-               /*
-                * Release kvm_mmu_lock periodically if the memory region is
-                * large. Otherwise, we may see kernel panics with
-                * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
-                * CONFIG_LOCKDEP. Additionally, holding the lock too long
-                * will also starve other vCPUs.
-                */
-               if (need_resched() || spin_needbreak(&kvm->mmu_lock))
-                       cond_resched_lock(&kvm->mmu_lock);
-
-               next = stage2_pgd_addr_end(addr, end);
-               if (stage2_pgd_present(*pgd))
-                       stage2_wp_puds(pgd, addr, next);
-       } while (pgd++, addr = next, addr != end);
-}
-
-/**
- * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
- * @kvm:       The KVM pointer
- * @slot:      The memory slot to write protect
- *
- * Called to start logging dirty pages after memory region
- * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
- * all present PMD and PTEs are write protected in the memory region.
- * Afterwards read of dirty page log can be called.
- *
- * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
- * serializing operations for VM memory regions.
- */
-void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
-{
-       struct kvm_memslots *slots = kvm_memslots(kvm);
-       struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
-       phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
-       phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
-
-       spin_lock(&kvm->mmu_lock);
-       stage2_wp_range(kvm, start, end);
-       spin_unlock(&kvm->mmu_lock);
-       kvm_flush_remote_tlbs(kvm);
-}
-
-/**
- * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
- * @kvm:       The KVM pointer
- * @slot:      The memory slot associated with mask
- * @gfn_offset:        The gfn offset in memory slot
- * @mask:      The mask of dirty pages at offset 'gfn_offset' in this memory
- *             slot to be write protected
- *
- * Walks bits set in mask write protects the associated pte's. Caller must
- * acquire kvm_mmu_lock.
- */
-static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
-               struct kvm_memory_slot *slot,
-               gfn_t gfn_offset, unsigned long mask)
-{
-       phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
-       phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
-       phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
-
-       stage2_wp_range(kvm, start, end);
-}
-
-/*
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
- * dirty pages.
- *
- * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
- * enable dirty logging for them.
- */
-void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
-               struct kvm_memory_slot *slot,
-               gfn_t gfn_offset, unsigned long mask)
-{
-       kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
-}
-
-static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
-                                     unsigned long size)
-{
-       __coherent_cache_guest_page(vcpu, pfn, size);
-}
-
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-                         struct kvm_memory_slot *memslot, unsigned long hva,
-                         unsigned long fault_status)
-{
-       int ret;
-       bool write_fault, writable, hugetlb = false, force_pte = false;
-       unsigned long mmu_seq;
-       gfn_t gfn = fault_ipa >> PAGE_SHIFT;
-       struct kvm *kvm = vcpu->kvm;
-       struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
-       struct vm_area_struct *vma;
-       kvm_pfn_t pfn;
-       pgprot_t mem_type = PAGE_S2;
-       bool logging_active = memslot_is_logging(memslot);
-       unsigned long flags = 0;
-
-       write_fault = kvm_is_write_fault(vcpu);
-       if (fault_status == FSC_PERM && !write_fault) {
-               kvm_err("Unexpected L2 read permission error\n");
-               return -EFAULT;
-       }
-
-       /* Let's check if we will get back a huge page backed by hugetlbfs */
-       down_read(&current->mm->mmap_sem);
-       vma = find_vma_intersection(current->mm, hva, hva + 1);
-       if (unlikely(!vma)) {
-               kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
-               up_read(&current->mm->mmap_sem);
-               return -EFAULT;
-       }
-
-       if (is_vm_hugetlb_page(vma) && !logging_active) {
-               hugetlb = true;
-               gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
-       } else {
-               /*
-                * Pages belonging to memslots that don't have the same
-                * alignment for userspace and IPA cannot be mapped using
-                * block descriptors even if the pages belong to a THP for
-                * the process, because the stage-2 block descriptor will
-                * cover more than a single THP and we loose atomicity for
-                * unmapping, updates, and splits of the THP or other pages
-                * in the stage-2 block range.
-                */
-               if ((memslot->userspace_addr & ~PMD_MASK) !=
-                   ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
-                       force_pte = true;
-       }
-       up_read(&current->mm->mmap_sem);
-
-       /* We need minimum second+third level pages */
-       ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
-                                    KVM_NR_MEM_OBJS);
-       if (ret)
-               return ret;
-
-       mmu_seq = vcpu->kvm->mmu_notifier_seq;
-       /*
-        * Ensure the read of mmu_notifier_seq happens before we call
-        * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
-        * the page we just got a reference to gets unmapped before we have a
-        * chance to grab the mmu_lock, which ensure that if the page gets
-        * unmapped afterwards, the call to kvm_unmap_hva will take it away
-        * from us again properly. This smp_rmb() interacts with the smp_wmb()
-        * in kvm_mmu_notifier_invalidate_<page|range_end>.
-        */
-       smp_rmb();
-
-       pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
-       if (is_error_noslot_pfn(pfn))
-               return -EFAULT;
-
-       if (kvm_is_device_pfn(pfn)) {
-               mem_type = PAGE_S2_DEVICE;
-               flags |= KVM_S2PTE_FLAG_IS_IOMAP;
-       } else if (logging_active) {
-               /*
-                * Faults on pages in a memslot with logging enabled
-                * should not be mapped with huge pages (it introduces churn
-                * and performance degradation), so force a pte mapping.
-                */
-               force_pte = true;
-               flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
-
-               /*
-                * Only actually map the page as writable if this was a write
-                * fault.
-                */
-               if (!write_fault)
-                       writable = false;
-       }
-
-       spin_lock(&kvm->mmu_lock);
-       if (mmu_notifier_retry(kvm, mmu_seq))
-               goto out_unlock;
-
-       if (!hugetlb && !force_pte)
-               hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
-
-       if (hugetlb) {
-               pmd_t new_pmd = pfn_pmd(pfn, mem_type);
-               new_pmd = pmd_mkhuge(new_pmd);
-               if (writable) {
-                       new_pmd = kvm_s2pmd_mkwrite(new_pmd);
-                       kvm_set_pfn_dirty(pfn);
-               }
-               coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
-               ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
-       } else {
-               pte_t new_pte = pfn_pte(pfn, mem_type);
-
-               if (writable) {
-                       new_pte = kvm_s2pte_mkwrite(new_pte);
-                       kvm_set_pfn_dirty(pfn);
-                       mark_page_dirty(kvm, gfn);
-               }
-               coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
-               ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
-       }
-
-out_unlock:
-       spin_unlock(&kvm->mmu_lock);
-       kvm_set_pfn_accessed(pfn);
-       kvm_release_pfn_clean(pfn);
-       return ret;
-}
-
-/*
- * Resolve the access fault by making the page young again.
- * Note that because the faulting entry is guaranteed not to be
- * cached in the TLB, we don't need to invalidate anything.
- * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
- * so there is no need for atomic (pte|pmd)_mkyoung operations.
- */
-static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
-{
-       pmd_t *pmd;
-       pte_t *pte;
-       kvm_pfn_t pfn;
-       bool pfn_valid = false;
-
-       trace_kvm_access_fault(fault_ipa);
-
-       spin_lock(&vcpu->kvm->mmu_lock);
-
-       pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
-       if (!pmd || pmd_none(*pmd))     /* Nothing there */
-               goto out;
-
-       if (pmd_thp_or_huge(*pmd)) {    /* THP, HugeTLB */
-               *pmd = pmd_mkyoung(*pmd);
-               pfn = pmd_pfn(*pmd);
-               pfn_valid = true;
-               goto out;
-       }
-
-       pte = pte_offset_kernel(pmd, fault_ipa);
-       if (pte_none(*pte))             /* Nothing there either */
-               goto out;
-
-       *pte = pte_mkyoung(*pte);       /* Just a page... */
-       pfn = pte_pfn(*pte);
-       pfn_valid = true;
-out:
-       spin_unlock(&vcpu->kvm->mmu_lock);
-       if (pfn_valid)
-               kvm_set_pfn_accessed(pfn);
-}
-
-/**
- * kvm_handle_guest_abort - handles all 2nd stage aborts
- * @vcpu:      the VCPU pointer
- * @run:       the kvm_run structure
- *
- * Any abort that gets to the host is almost guaranteed to be caused by a
- * missing second stage translation table entry, which can mean that either the
- * guest simply needs more memory and we must allocate an appropriate page or it
- * can mean that the guest tried to access I/O memory, which is emulated by user
- * space. The distinction is based on the IPA causing the fault and whether this
- * memory region has been registered as standard RAM by user space.
- */
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       unsigned long fault_status;
-       phys_addr_t fault_ipa;
-       struct kvm_memory_slot *memslot;
-       unsigned long hva;
-       bool is_iabt, write_fault, writable;
-       gfn_t gfn;
-       int ret, idx;
-
-       is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
-       if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
-               kvm_inject_vabt(vcpu);
-               return 1;
-       }
-
-       fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
-
-       trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
-                             kvm_vcpu_get_hfar(vcpu), fault_ipa);
-
-       /* Check the stage-2 fault is trans. fault or write fault */
-       fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
-       if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
-           fault_status != FSC_ACCESS) {
-               kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
-                       kvm_vcpu_trap_get_class(vcpu),
-                       (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
-                       (unsigned long)kvm_vcpu_get_hsr(vcpu));
-               return -EFAULT;
-       }
-
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-
-       gfn = fault_ipa >> PAGE_SHIFT;
-       memslot = gfn_to_memslot(vcpu->kvm, gfn);
-       hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
-       write_fault = kvm_is_write_fault(vcpu);
-       if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
-               if (is_iabt) {
-                       /* Prefetch Abort on I/O address */
-                       kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-                       ret = 1;
-                       goto out_unlock;
-               }
-
-               /*
-                * Check for a cache maintenance operation. Since we
-                * ended-up here, we know it is outside of any memory
-                * slot. But we can't find out if that is for a device,
-                * or if the guest is just being stupid. The only thing
-                * we know for sure is that this range cannot be cached.
-                *
-                * So let's assume that the guest is just being
-                * cautious, and skip the instruction.
-                */
-               if (kvm_vcpu_dabt_is_cm(vcpu)) {
-                       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
-                       ret = 1;
-                       goto out_unlock;
-               }
-
-               /*
-                * The IPA is reported as [MAX:12], so we need to
-                * complement it with the bottom 12 bits from the
-                * faulting VA. This is always 12 bits, irrespective
-                * of the page size.
-                */
-               fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
-               ret = io_mem_abort(vcpu, run, fault_ipa);
-               goto out_unlock;
-       }
-
-       /* Userspace should not be able to register out-of-bounds IPAs */
-       VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
-
-       if (fault_status == FSC_ACCESS) {
-               handle_access_fault(vcpu, fault_ipa);
-               ret = 1;
-               goto out_unlock;
-       }
-
-       ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
-       if (ret == 0)
-               ret = 1;
-out_unlock:
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
-       return ret;
-}
-
-static int handle_hva_to_gpa(struct kvm *kvm,
-                            unsigned long start,
-                            unsigned long end,
-                            int (*handler)(struct kvm *kvm,
-                                           gpa_t gpa, u64 size,
-                                           void *data),
-                            void *data)
-{
-       struct kvm_memslots *slots;
-       struct kvm_memory_slot *memslot;
-       int ret = 0;
-
-       slots = kvm_memslots(kvm);
-
-       /* we only care about the pages that the guest sees */
-       kvm_for_each_memslot(memslot, slots) {
-               unsigned long hva_start, hva_end;
-               gfn_t gpa;
-
-               hva_start = max(start, memslot->userspace_addr);
-               hva_end = min(end, memslot->userspace_addr +
-                                       (memslot->npages << PAGE_SHIFT));
-               if (hva_start >= hva_end)
-                       continue;
-
-               gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
-               ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
-       }
-
-       return ret;
-}
-
-static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
-       unmap_stage2_range(kvm, gpa, size);
-       return 0;
-}
-
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       unsigned long end = hva + PAGE_SIZE;
-
-       if (!kvm->arch.pgd)
-               return 0;
-
-       trace_kvm_unmap_hva(hva);
-       handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-       return 0;
-}
-
-int kvm_unmap_hva_range(struct kvm *kvm,
-                       unsigned long start, unsigned long end)
-{
-       if (!kvm->arch.pgd)
-               return 0;
-
-       trace_kvm_unmap_hva_range(start, end);
-       handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
-       return 0;
-}
-
-static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
-       pte_t *pte = (pte_t *)data;
-
-       WARN_ON(size != PAGE_SIZE);
-       /*
-        * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
-        * flag clear because MMU notifiers will have unmapped a huge PMD before
-        * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
-        * therefore stage2_set_pte() never needs to clear out a huge PMD
-        * through this calling path.
-        */
-       stage2_set_pte(kvm, NULL, gpa, pte, 0);
-       return 0;
-}
-
-
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
-{
-       unsigned long end = hva + PAGE_SIZE;
-       pte_t stage2_pte;
-
-       if (!kvm->arch.pgd)
-               return;
-
-       trace_kvm_set_spte_hva(hva);
-       stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
-       handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
-}
-
-static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
-       pmd_t *pmd;
-       pte_t *pte;
-
-       WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
-       pmd = stage2_get_pmd(kvm, NULL, gpa);
-       if (!pmd || pmd_none(*pmd))     /* Nothing there */
-               return 0;
-
-       if (pmd_thp_or_huge(*pmd))      /* THP, HugeTLB */
-               return stage2_pmdp_test_and_clear_young(pmd);
-
-       pte = pte_offset_kernel(pmd, gpa);
-       if (pte_none(*pte))
-               return 0;
-
-       return stage2_ptep_test_and_clear_young(pte);
-}
-
-static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
-       pmd_t *pmd;
-       pte_t *pte;
-
-       WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
-       pmd = stage2_get_pmd(kvm, NULL, gpa);
-       if (!pmd || pmd_none(*pmd))     /* Nothing there */
-               return 0;
-
-       if (pmd_thp_or_huge(*pmd))              /* THP, HugeTLB */
-               return pmd_young(*pmd);
-
-       pte = pte_offset_kernel(pmd, gpa);
-       if (!pte_none(*pte))            /* Just a page... */
-               return pte_young(*pte);
-
-       return 0;
-}
-
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
-{
-       trace_kvm_age_hva(start, end);
-       return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
-}
-
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
-       trace_kvm_test_age_hva(hva);
-       return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
-}
-
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
-{
-       mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
-}
-
-phys_addr_t kvm_mmu_get_httbr(void)
-{
-       if (__kvm_cpu_uses_extended_idmap())
-               return virt_to_phys(merged_hyp_pgd);
-       else
-               return virt_to_phys(hyp_pgd);
-}
-
-phys_addr_t kvm_get_idmap_vector(void)
-{
-       return hyp_idmap_vector;
-}
-
-static int kvm_map_idmap_text(pgd_t *pgd)
-{
-       int err;
-
-       /* Create the idmap in the boot page tables */
-       err =   __create_hyp_mappings(pgd,
-                                     hyp_idmap_start, hyp_idmap_end,
-                                     __phys_to_pfn(hyp_idmap_start),
-                                     PAGE_HYP_EXEC);
-       if (err)
-               kvm_err("Failed to idmap %lx-%lx\n",
-                       hyp_idmap_start, hyp_idmap_end);
-
-       return err;
-}
-
-int kvm_mmu_init(void)
-{
-       int err;
-
-       hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
-       hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
-       hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
-
-       /*
-        * We rely on the linker script to ensure at build time that the HYP
-        * init code does not cross a page boundary.
-        */
-       BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
-
-       kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
-       kvm_info("HYP VA range: %lx:%lx\n",
-                kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
-
-       if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
-           hyp_idmap_start <  kern_hyp_va(~0UL) &&
-           hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
-               /*
-                * The idmap page is intersecting with the VA space,
-                * it is not safe to continue further.
-                */
-               kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-       hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
-       if (!hyp_pgd) {
-               kvm_err("Hyp mode PGD not allocated\n");
-               err = -ENOMEM;
-               goto out;
-       }
-
-       if (__kvm_cpu_uses_extended_idmap()) {
-               boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-                                                        hyp_pgd_order);
-               if (!boot_hyp_pgd) {
-                       kvm_err("Hyp boot PGD not allocated\n");
-                       err = -ENOMEM;
-                       goto out;
-               }
-
-               err = kvm_map_idmap_text(boot_hyp_pgd);
-               if (err)
-                       goto out;
-
-               merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-               if (!merged_hyp_pgd) {
-                       kvm_err("Failed to allocate extra HYP pgd\n");
-                       goto out;
-               }
-               __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
-                                   hyp_idmap_start);
-       } else {
-               err = kvm_map_idmap_text(hyp_pgd);
-               if (err)
-                       goto out;
-       }
-
-       return 0;
-out:
-       free_hyp_pgds();
-       return err;
-}
-
-void kvm_arch_commit_memory_region(struct kvm *kvm,
-                                  const struct kvm_userspace_memory_region *mem,
-                                  const struct kvm_memory_slot *old,
-                                  const struct kvm_memory_slot *new,
-                                  enum kvm_mr_change change)
-{
-       /*
-        * At this point memslot has been committed and there is an
-        * allocated dirty_bitmap[], dirty pages will be be tracked while the
-        * memory slot is write protected.
-        */
-       if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
-               kvm_mmu_wp_memory_region(kvm, mem->slot);
-}
-
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
-                                  struct kvm_memory_slot *memslot,
-                                  const struct kvm_userspace_memory_region *mem,
-                                  enum kvm_mr_change change)
-{
-       hva_t hva = mem->userspace_addr;
-       hva_t reg_end = hva + mem->memory_size;
-       bool writable = !(mem->flags & KVM_MEM_READONLY);
-       int ret = 0;
-
-       if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
-                       change != KVM_MR_FLAGS_ONLY)
-               return 0;
-
-       /*
-        * Prevent userspace from creating a memory region outside of the IPA
-        * space addressable by the KVM guest IPA space.
-        */
-       if (memslot->base_gfn + memslot->npages >=
-           (KVM_PHYS_SIZE >> PAGE_SHIFT))
-               return -EFAULT;
-
-       /*
-        * A memory region could potentially cover multiple VMAs, and any holes
-        * between them, so iterate over all of them to find out if we can map
-        * any of them right now.
-        *
-        *     +--------------------------------------------+
-        * +---------------+----------------+   +----------------+
-        * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
-        * +---------------+----------------+   +----------------+
-        *     |               memory region                |
-        *     +--------------------------------------------+
-        */
-       do {
-               struct vm_area_struct *vma = find_vma(current->mm, hva);
-               hva_t vm_start, vm_end;
-
-               if (!vma || vma->vm_start >= reg_end)
-                       break;
-
-               /*
-                * Mapping a read-only VMA is only allowed if the
-                * memory region is configured as read-only.
-                */
-               if (writable && !(vma->vm_flags & VM_WRITE)) {
-                       ret = -EPERM;
-                       break;
-               }
-
-               /*
-                * Take the intersection of this VMA with the memory region
-                */
-               vm_start = max(hva, vma->vm_start);
-               vm_end = min(reg_end, vma->vm_end);
-
-               if (vma->vm_flags & VM_PFNMAP) {
-                       gpa_t gpa = mem->guest_phys_addr +
-                                   (vm_start - mem->userspace_addr);
-                       phys_addr_t pa;
-
-                       pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
-                       pa += vm_start - vma->vm_start;
-
-                       /* IO region dirty page logging not allowed */
-                       if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
-                               return -EINVAL;
-
-                       ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
-                                                   vm_end - vm_start,
-                                                   writable);
-                       if (ret)
-                               break;
-               }
-               hva = vm_end;
-       } while (hva < reg_end);
-
-       if (change == KVM_MR_FLAGS_ONLY)
-               return ret;
-
-       spin_lock(&kvm->mmu_lock);
-       if (ret)
-               unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
-       else
-               stage2_flush_memslot(kvm, memslot);
-       spin_unlock(&kvm->mmu_lock);
-       return ret;
-}
-
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
-                          struct kvm_memory_slot *dont)
-{
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
-                           unsigned long npages)
-{
-       return 0;
-}
-
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
-{
-}
-
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-       kvm_free_stage2_pgd(kvm);
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
-                                  struct kvm_memory_slot *slot)
-{
-       gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
-       phys_addr_t size = slot->npages << PAGE_SHIFT;
-
-       spin_lock(&kvm->mmu_lock);
-       unmap_stage2_range(kvm, gpa, size);
-       spin_unlock(&kvm->mmu_lock);
-}
-
-/*
- * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
- *
- * Main problems:
- * - S/W ops are local to a CPU (not broadcast)
- * - We have line migration behind our back (speculation)
- * - System caches don't support S/W at all (damn!)
- *
- * In the face of the above, the best we can do is to try and convert
- * S/W ops to VA ops. Because the guest is not allowed to infer the
- * S/W to PA mapping, it can only use S/W to nuke the whole cache,
- * which is a rather good thing for us.
- *
- * Also, it is only used when turning caches on/off ("The expected
- * usage of the cache maintenance instructions that operate by set/way
- * is associated with the cache maintenance instructions associated
- * with the powerdown and powerup of caches, if this is required by
- * the implementation.").
- *
- * We use the following policy:
- *
- * - If we trap a S/W operation, we enable VM trapping to detect
- *   caches being turned on/off, and do a full clean.
- *
- * - We flush the caches on both caches being turned on and off.
- *
- * - Once the caches are enabled, we stop trapping VM ops.
- */
-void kvm_set_way_flush(struct kvm_vcpu *vcpu)
-{
-       unsigned long hcr = vcpu_get_hcr(vcpu);
-
-       /*
-        * If this is the first time we do a S/W operation
-        * (i.e. HCR_TVM not set) flush the whole memory, and set the
-        * VM trapping.
-        *
-        * Otherwise, rely on the VM trapping to wait for the MMU +
-        * Caches to be turned off. At that point, we'll be able to
-        * clean the caches again.
-        */
-       if (!(hcr & HCR_TVM)) {
-               trace_kvm_set_way_flush(*vcpu_pc(vcpu),
-                                       vcpu_has_cache_enabled(vcpu));
-               stage2_flush_vm(vcpu->kvm);
-               vcpu_set_hcr(vcpu, hcr | HCR_TVM);
-       }
-}
-
-void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
-{
-       bool now_enabled = vcpu_has_cache_enabled(vcpu);
-
-       /*
-        * If switching the MMU+caches on, need to invalidate the caches.
-        * If switching it off, need to clean the caches.
-        * Clean + invalidate does the trick always.
-        */
-       if (now_enabled != was_enabled)
-               stage2_flush_vm(vcpu->kvm);
-
-       /* Caches are now on, stop trapping VM ops (until a S/W op) */
-       if (now_enabled)
-               vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
-
-       trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
-}
diff --git a/arch/arm/kvm/perf.c b/arch/arm/kvm/perf.c
deleted file mode 100644 (file)
index 1a3849d..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Based on the x86 implementation.
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/perf_event.h>
-#include <linux/kvm_host.h>
-
-#include <asm/kvm_emulate.h>
-
-static int kvm_is_in_guest(void)
-{
-        return kvm_arm_get_running_vcpu() != NULL;
-}
-
-static int kvm_is_user_mode(void)
-{
-       struct kvm_vcpu *vcpu;
-
-       vcpu = kvm_arm_get_running_vcpu();
-
-       if (vcpu)
-               return !vcpu_mode_priv(vcpu);
-
-       return 0;
-}
-
-static unsigned long kvm_get_guest_ip(void)
-{
-       struct kvm_vcpu *vcpu;
-
-       vcpu = kvm_arm_get_running_vcpu();
-
-       if (vcpu)
-               return *vcpu_pc(vcpu);
-
-       return 0;
-}
-
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
-       .is_in_guest    = kvm_is_in_guest,
-       .is_user_mode   = kvm_is_user_mode,
-       .get_guest_ip   = kvm_get_guest_ip,
-};
-
-int kvm_perf_init(void)
-{
-       return perf_register_guest_info_callbacks(&kvm_guest_cbs);
-}
-
-int kvm_perf_teardown(void)
-{
-       return perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
-}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
deleted file mode 100644 (file)
index a08d7a9..0000000
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Copyright (C) 2012 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/preempt.h>
-#include <linux/kvm_host.h>
-#include <linux/wait.h>
-
-#include <asm/cputype.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_psci.h>
-#include <asm/kvm_host.h>
-
-#include <uapi/linux/psci.h>
-
-/*
- * This is an implementation of the Power State Coordination Interface
- * as described in ARM document number ARM DEN 0022A.
- */
-
-#define AFFINITY_MASK(level)   ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
-
-static unsigned long psci_affinity_mask(unsigned long affinity_level)
-{
-       if (affinity_level <= 3)
-               return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
-
-       return 0;
-}
-
-static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
-{
-       /*
-        * NOTE: For simplicity, we make VCPU suspend emulation to be
-        * same-as WFI (Wait-for-interrupt) emulation.
-        *
-        * This means for KVM the wakeup events are interrupts and
-        * this is consistent with intended use of StateID as described
-        * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
-        *
-        * Further, we also treat power-down request to be same as
-        * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
-        * specification (ARM DEN 0022A). This means all suspend states
-        * for KVM will preserve the register state.
-        */
-       kvm_vcpu_block(vcpu);
-
-       return PSCI_RET_SUCCESS;
-}
-
-static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
-{
-       vcpu->arch.power_off = true;
-}
-
-static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
-{
-       struct kvm *kvm = source_vcpu->kvm;
-       struct kvm_vcpu *vcpu = NULL;
-       struct swait_queue_head *wq;
-       unsigned long cpu_id;
-       unsigned long context_id;
-       phys_addr_t target_pc;
-
-       cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
-       if (vcpu_mode_is_32bit(source_vcpu))
-               cpu_id &= ~((u32) 0);
-
-       vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
-
-       /*
-        * Make sure the caller requested a valid CPU and that the CPU is
-        * turned off.
-        */
-       if (!vcpu)
-               return PSCI_RET_INVALID_PARAMS;
-       if (!vcpu->arch.power_off) {
-               if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
-                       return PSCI_RET_ALREADY_ON;
-               else
-                       return PSCI_RET_INVALID_PARAMS;
-       }
-
-       target_pc = vcpu_get_reg(source_vcpu, 2);
-       context_id = vcpu_get_reg(source_vcpu, 3);
-
-       kvm_reset_vcpu(vcpu);
-
-       /* Gracefully handle Thumb2 entry point */
-       if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
-               target_pc &= ~((phys_addr_t) 1);
-               vcpu_set_thumb(vcpu);
-       }
-
-       /* Propagate caller endianness */
-       if (kvm_vcpu_is_be(source_vcpu))
-               kvm_vcpu_set_be(vcpu);
-
-       *vcpu_pc(vcpu) = target_pc;
-       /*
-        * NOTE: We always update r0 (or x0) because for PSCI v0.1
-        * the general puspose registers are undefined upon CPU_ON.
-        */
-       vcpu_set_reg(vcpu, 0, context_id);
-       vcpu->arch.power_off = false;
-       smp_mb();               /* Make sure the above is visible */
-
-       wq = kvm_arch_vcpu_wq(vcpu);
-       swake_up(wq);
-
-       return PSCI_RET_SUCCESS;
-}
-
-static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
-{
-       int i, matching_cpus = 0;
-       unsigned long mpidr;
-       unsigned long target_affinity;
-       unsigned long target_affinity_mask;
-       unsigned long lowest_affinity_level;
-       struct kvm *kvm = vcpu->kvm;
-       struct kvm_vcpu *tmp;
-
-       target_affinity = vcpu_get_reg(vcpu, 1);
-       lowest_affinity_level = vcpu_get_reg(vcpu, 2);
-
-       /* Determine target affinity mask */
-       target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
-       if (!target_affinity_mask)
-               return PSCI_RET_INVALID_PARAMS;
-
-       /* Ignore other bits of target affinity */
-       target_affinity &= target_affinity_mask;
-
-       /*
-        * If one or more VCPU matching target affinity are running
-        * then ON else OFF
-        */
-       kvm_for_each_vcpu(i, tmp, kvm) {
-               mpidr = kvm_vcpu_get_mpidr_aff(tmp);
-               if ((mpidr & target_affinity_mask) == target_affinity) {
-                       matching_cpus++;
-                       if (!tmp->arch.power_off)
-                               return PSCI_0_2_AFFINITY_LEVEL_ON;
-               }
-       }
-
-       if (!matching_cpus)
-               return PSCI_RET_INVALID_PARAMS;
-
-       return PSCI_0_2_AFFINITY_LEVEL_OFF;
-}
-
-static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
-{
-       int i;
-       struct kvm_vcpu *tmp;
-
-       /*
-        * The KVM ABI specifies that a system event exit may call KVM_RUN
-        * again and may perform shutdown/reboot at a later time that when the
-        * actual request is made.  Since we are implementing PSCI and a
-        * caller of PSCI reboot and shutdown expects that the system shuts
-        * down or reboots immediately, let's make sure that VCPUs are not run
-        * after this call is handled and before the VCPUs have been
-        * re-initialized.
-        */
-       kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
-               tmp->arch.power_off = true;
-               kvm_vcpu_kick(tmp);
-       }
-
-       memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
-       vcpu->run->system_event.type = type;
-       vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
-}
-
-static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
-{
-       kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
-}
-
-static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
-{
-       kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
-}
-
-int kvm_psci_version(struct kvm_vcpu *vcpu)
-{
-       if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
-               return KVM_ARM_PSCI_0_2;
-
-       return KVM_ARM_PSCI_0_1;
-}
-
-static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
-       unsigned long val;
-       int ret = 1;
-
-       switch (psci_fn) {
-       case PSCI_0_2_FN_PSCI_VERSION:
-               /*
-                * Bits[31:16] = Major Version = 0
-                * Bits[15:0] = Minor Version = 2
-                */
-               val = 2;
-               break;
-       case PSCI_0_2_FN_CPU_SUSPEND:
-       case PSCI_0_2_FN64_CPU_SUSPEND:
-               val = kvm_psci_vcpu_suspend(vcpu);
-               break;
-       case PSCI_0_2_FN_CPU_OFF:
-               kvm_psci_vcpu_off(vcpu);
-               val = PSCI_RET_SUCCESS;
-               break;
-       case PSCI_0_2_FN_CPU_ON:
-       case PSCI_0_2_FN64_CPU_ON:
-               mutex_lock(&kvm->lock);
-               val = kvm_psci_vcpu_on(vcpu);
-               mutex_unlock(&kvm->lock);
-               break;
-       case PSCI_0_2_FN_AFFINITY_INFO:
-       case PSCI_0_2_FN64_AFFINITY_INFO:
-               val = kvm_psci_vcpu_affinity_info(vcpu);
-               break;
-       case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
-               /*
-                * Trusted OS is MP hence does not require migration
-                * or
-                * Trusted OS is not present
-                */
-               val = PSCI_0_2_TOS_MP;
-               break;
-       case PSCI_0_2_FN_SYSTEM_OFF:
-               kvm_psci_system_off(vcpu);
-               /*
-                * We should'nt be going back to guest VCPU after
-                * receiving SYSTEM_OFF request.
-                *
-                * If user space accidently/deliberately resumes
-                * guest VCPU after SYSTEM_OFF request then guest
-                * VCPU should see internal failure from PSCI return
-                * value. To achieve this, we preload r0 (or x0) with
-                * PSCI return value INTERNAL_FAILURE.
-                */
-               val = PSCI_RET_INTERNAL_FAILURE;
-               ret = 0;
-               break;
-       case PSCI_0_2_FN_SYSTEM_RESET:
-               kvm_psci_system_reset(vcpu);
-               /*
-                * Same reason as SYSTEM_OFF for preloading r0 (or x0)
-                * with PSCI return value INTERNAL_FAILURE.
-                */
-               val = PSCI_RET_INTERNAL_FAILURE;
-               ret = 0;
-               break;
-       default:
-               val = PSCI_RET_NOT_SUPPORTED;
-               break;
-       }
-
-       vcpu_set_reg(vcpu, 0, val);
-       return ret;
-}
-
-static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
-       unsigned long val;
-
-       switch (psci_fn) {
-       case KVM_PSCI_FN_CPU_OFF:
-               kvm_psci_vcpu_off(vcpu);
-               val = PSCI_RET_SUCCESS;
-               break;
-       case KVM_PSCI_FN_CPU_ON:
-               mutex_lock(&kvm->lock);
-               val = kvm_psci_vcpu_on(vcpu);
-               mutex_unlock(&kvm->lock);
-               break;
-       default:
-               val = PSCI_RET_NOT_SUPPORTED;
-               break;
-       }
-
-       vcpu_set_reg(vcpu, 0, val);
-       return 1;
-}
-
-/**
- * kvm_psci_call - handle PSCI call if r0 value is in range
- * @vcpu: Pointer to the VCPU struct
- *
- * Handle PSCI calls from guests through traps from HVC instructions.
- * The calling convention is similar to SMC calls to the secure world
- * where the function number is placed in r0.
- *
- * This function returns: > 0 (success), 0 (success but exit to user
- * space), and < 0 (errors)
- *
- * Errors:
- * -EINVAL: Unrecognized PSCI function
- */
-int kvm_psci_call(struct kvm_vcpu *vcpu)
-{
-       switch (kvm_psci_version(vcpu)) {
-       case KVM_ARM_PSCI_0_2:
-               return kvm_psci_0_2_call(vcpu);
-       case KVM_ARM_PSCI_0_1:
-               return kvm_psci_0_1_call(vcpu);
-       default:
-               return -EINVAL;
-       };
-}
index c25a88598eb04d02dde1085a20427eb123a18248..fc0943776db2d821a5b3931a9d4e217a8cf747d6 100644 (file)
@@ -6,133 +6,6 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
 
-/*
- * Tracepoints for entry/exit to guest
- */
-TRACE_EVENT(kvm_entry,
-       TP_PROTO(unsigned long vcpu_pc),
-       TP_ARGS(vcpu_pc),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  vcpu_pc         )
-       ),
-
-       TP_fast_assign(
-               __entry->vcpu_pc                = vcpu_pc;
-       ),
-
-       TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_exit,
-       TP_PROTO(int idx, unsigned int exit_reason, unsigned long vcpu_pc),
-       TP_ARGS(idx, exit_reason, vcpu_pc),
-
-       TP_STRUCT__entry(
-               __field(        int,            idx             )
-               __field(        unsigned int,   exit_reason     )
-               __field(        unsigned long,  vcpu_pc         )
-       ),
-
-       TP_fast_assign(
-               __entry->idx                    = idx;
-               __entry->exit_reason            = exit_reason;
-               __entry->vcpu_pc                = vcpu_pc;
-       ),
-
-       TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
-                 __print_symbolic(__entry->idx, kvm_arm_exception_type),
-                 __entry->exit_reason,
-                 __print_symbolic(__entry->exit_reason, kvm_arm_exception_class),
-                 __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_guest_fault,
-       TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
-                unsigned long hxfar,
-                unsigned long long ipa),
-       TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  vcpu_pc         )
-               __field(        unsigned long,  hsr             )
-               __field(        unsigned long,  hxfar           )
-               __field(   unsigned long long,  ipa             )
-       ),
-
-       TP_fast_assign(
-               __entry->vcpu_pc                = vcpu_pc;
-               __entry->hsr                    = hsr;
-               __entry->hxfar                  = hxfar;
-               __entry->ipa                    = ipa;
-       ),
-
-       TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
-                 __entry->ipa, __entry->hsr,
-                 __entry->hxfar, __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_access_fault,
-       TP_PROTO(unsigned long ipa),
-       TP_ARGS(ipa),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  ipa             )
-       ),
-
-       TP_fast_assign(
-               __entry->ipa            = ipa;
-       ),
-
-       TP_printk("IPA: %lx", __entry->ipa)
-);
-
-TRACE_EVENT(kvm_irq_line,
-       TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
-       TP_ARGS(type, vcpu_idx, irq_num, level),
-
-       TP_STRUCT__entry(
-               __field(        unsigned int,   type            )
-               __field(        int,            vcpu_idx        )
-               __field(        int,            irq_num         )
-               __field(        int,            level           )
-       ),
-
-       TP_fast_assign(
-               __entry->type           = type;
-               __entry->vcpu_idx       = vcpu_idx;
-               __entry->irq_num        = irq_num;
-               __entry->level          = level;
-       ),
-
-       TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
-                 (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
-                 (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
-                 (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
-                 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
-);
-
-TRACE_EVENT(kvm_mmio_emulate,
-       TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
-                unsigned long cpsr),
-       TP_ARGS(vcpu_pc, instr, cpsr),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  vcpu_pc         )
-               __field(        unsigned long,  instr           )
-               __field(        unsigned long,  cpsr            )
-       ),
-
-       TP_fast_assign(
-               __entry->vcpu_pc                = vcpu_pc;
-               __entry->instr                  = instr;
-               __entry->cpsr                   = cpsr;
-       ),
-
-       TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
-                 __entry->vcpu_pc, __entry->instr, __entry->cpsr)
-);
-
 /* Architecturally implementation defined CP15 register access */
 TRACE_EVENT(kvm_emulate_cp15_imp,
        TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
@@ -181,87 +54,6 @@ TRACE_EVENT(kvm_wfx,
                __entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
 );
 
-TRACE_EVENT(kvm_unmap_hva,
-       TP_PROTO(unsigned long hva),
-       TP_ARGS(hva),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  hva             )
-       ),
-
-       TP_fast_assign(
-               __entry->hva            = hva;
-       ),
-
-       TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
-);
-
-TRACE_EVENT(kvm_unmap_hva_range,
-       TP_PROTO(unsigned long start, unsigned long end),
-       TP_ARGS(start, end),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  start           )
-               __field(        unsigned long,  end             )
-       ),
-
-       TP_fast_assign(
-               __entry->start          = start;
-               __entry->end            = end;
-       ),
-
-       TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
-                 __entry->start, __entry->end)
-);
-
-TRACE_EVENT(kvm_set_spte_hva,
-       TP_PROTO(unsigned long hva),
-       TP_ARGS(hva),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  hva             )
-       ),
-
-       TP_fast_assign(
-               __entry->hva            = hva;
-       ),
-
-       TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
-);
-
-TRACE_EVENT(kvm_age_hva,
-       TP_PROTO(unsigned long start, unsigned long end),
-       TP_ARGS(start, end),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  start           )
-               __field(        unsigned long,  end             )
-       ),
-
-       TP_fast_assign(
-               __entry->start          = start;
-               __entry->end            = end;
-       ),
-
-       TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
-                 __entry->start, __entry->end)
-);
-
-TRACE_EVENT(kvm_test_age_hva,
-       TP_PROTO(unsigned long hva),
-       TP_ARGS(hva),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  hva             )
-       ),
-
-       TP_fast_assign(
-               __entry->hva            = hva;
-       ),
-
-       TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
-);
-
 TRACE_EVENT(kvm_hvc,
        TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
        TP_ARGS(vcpu_pc, r0, imm),
@@ -282,45 +74,6 @@ TRACE_EVENT(kvm_hvc,
                  __entry->vcpu_pc, __entry->r0, __entry->imm)
 );
 
-TRACE_EVENT(kvm_set_way_flush,
-           TP_PROTO(unsigned long vcpu_pc, bool cache),
-           TP_ARGS(vcpu_pc, cache),
-
-           TP_STRUCT__entry(
-                   __field(    unsigned long,  vcpu_pc         )
-                   __field(    bool,           cache           )
-           ),
-
-           TP_fast_assign(
-                   __entry->vcpu_pc            = vcpu_pc;
-                   __entry->cache              = cache;
-           ),
-
-           TP_printk("S/W flush at 0x%016lx (cache %s)",
-                     __entry->vcpu_pc, __entry->cache ? "on" : "off")
-);
-
-TRACE_EVENT(kvm_toggle_cache,
-           TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
-           TP_ARGS(vcpu_pc, was, now),
-
-           TP_STRUCT__entry(
-                   __field(    unsigned long,  vcpu_pc         )
-                   __field(    bool,           was             )
-                   __field(    bool,           now             )
-           ),
-
-           TP_fast_assign(
-                   __entry->vcpu_pc            = vcpu_pc;
-                   __entry->was                = was;
-                   __entry->now                = now;
-           ),
-
-           TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
-                     __entry->vcpu_pc, __entry->was ? "on" : "off",
-                     __entry->now ? "on" : "off")
-);
-
 #endif /* _TRACE_KVM_H */
 
 #undef TRACE_INCLUDE_PATH
index afd51bebb9c500442ac2de68562b11cb019c3dc3..5d9810086c254defd58046c6a34da6aabfa42fa3 100644 (file)
@@ -7,14 +7,13 @@ CFLAGS_arm.o := -I.
 CFLAGS_mmu.o := -I.
 
 KVM=../../../virt/kvm
-ARM=../../../arch/arm/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
 obj-$(CONFIG_KVM_ARM_HOST) += hyp/
 
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
 
 kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o
 kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
new file mode 100644 (file)
index 0000000..7941699
--- /dev/null
@@ -0,0 +1,1480 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <linux/kvm.h>
+#include <trace/events/kvm.h>
+#include <kvm/arm_pmu.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/mman.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/virt.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_psci.h>
+#include <asm/sections.h>
+
+#ifdef REQUIRES_VIRT
+__asm__(".arch_extension       virt");
+#endif
+
+static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
+
+/* Per-CPU variable containing the currently running vcpu. */
+static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
+
+/* The VMID used in the VTTBR */
+static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
+static u32 kvm_next_vmid;
+static unsigned int kvm_vmid_bits __read_mostly;
+static DEFINE_SPINLOCK(kvm_vmid_lock);
+
+static bool vgic_present;
+
+static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
+
+static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
+{
+       BUG_ON(preemptible());
+       __this_cpu_write(kvm_arm_running_vcpu, vcpu);
+}
+
+/**
+ * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
+ * Must be called from non-preemptible context
+ */
+struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
+{
+       BUG_ON(preemptible());
+       return __this_cpu_read(kvm_arm_running_vcpu);
+}
+
+/**
+ * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
+ */
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
+{
+       return &kvm_arm_running_vcpu;
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       *(int *)rtn = 0;
+}
+
+
+/**
+ * kvm_arch_init_vm - initializes a VM data structure
+ * @kvm:       pointer to the KVM struct
+ */
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+       int ret, cpu;
+
+       if (type)
+               return -EINVAL;
+
+       kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+       if (!kvm->arch.last_vcpu_ran)
+               return -ENOMEM;
+
+       for_each_possible_cpu(cpu)
+               *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+
+       ret = kvm_alloc_stage2_pgd(kvm);
+       if (ret)
+               goto out_fail_alloc;
+
+       ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
+       if (ret)
+               goto out_free_stage2_pgd;
+
+       kvm_vgic_early_init(kvm);
+
+       /* Mark the initial VMID generation invalid */
+       kvm->arch.vmid_gen = 0;
+
+       /* The maximum number of VCPUs is limited by the host's GIC model */
+       kvm->arch.max_vcpus = vgic_present ?
+                               kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
+
+       return ret;
+out_free_stage2_pgd:
+       kvm_free_stage2_pgd(kvm);
+out_fail_alloc:
+       free_percpu(kvm->arch.last_vcpu_ran);
+       kvm->arch.last_vcpu_ran = NULL;
+       return ret;
+}
+
+bool kvm_arch_has_vcpu_debugfs(void)
+{
+       return false;
+}
+
+int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+
+/**
+ * kvm_arch_destroy_vm - destroy the VM data structure
+ * @kvm:       pointer to the KVM struct
+ */
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       int i;
+
+       free_percpu(kvm->arch.last_vcpu_ran);
+       kvm->arch.last_vcpu_ran = NULL;
+
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               if (kvm->vcpus[i]) {
+                       kvm_arch_vcpu_free(kvm->vcpus[i]);
+                       kvm->vcpus[i] = NULL;
+               }
+       }
+
+       kvm_vgic_destroy(kvm);
+}
+
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+{
+       int r;
+       switch (ext) {
+       case KVM_CAP_IRQCHIP:
+               r = vgic_present;
+               break;
+       case KVM_CAP_IOEVENTFD:
+       case KVM_CAP_DEVICE_CTRL:
+       case KVM_CAP_USER_MEMORY:
+       case KVM_CAP_SYNC_MMU:
+       case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
+       case KVM_CAP_ONE_REG:
+       case KVM_CAP_ARM_PSCI:
+       case KVM_CAP_ARM_PSCI_0_2:
+       case KVM_CAP_READONLY_MEM:
+       case KVM_CAP_MP_STATE:
+       case KVM_CAP_IMMEDIATE_EXIT:
+               r = 1;
+               break;
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
+       case KVM_CAP_ARM_SET_DEVICE_ADDR:
+               r = 1;
+               break;
+       case KVM_CAP_NR_VCPUS:
+               r = num_online_cpus();
+               break;
+       case KVM_CAP_MAX_VCPUS:
+               r = KVM_MAX_VCPUS;
+               break;
+       case KVM_CAP_NR_MEMSLOTS:
+               r = KVM_USER_MEM_SLOTS;
+               break;
+       case KVM_CAP_MSI_DEVID:
+               if (!kvm)
+                       r = -EINVAL;
+               else
+                       r = kvm->arch.vgic.msis_require_devid;
+               break;
+       case KVM_CAP_ARM_USER_IRQ:
+               /*
+                * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
+                * (bump this number if adding more devices)
+                */
+               r = 1;
+               break;
+       default:
+               r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
+               break;
+       }
+       return r;
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+                       unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       int err;
+       struct kvm_vcpu *vcpu;
+
+       if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (id >= kvm->arch.max_vcpus) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+       if (err)
+               goto free_vcpu;
+
+       err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
+       if (err)
+               goto vcpu_uninit;
+
+       return vcpu;
+vcpu_uninit:
+       kvm_vcpu_uninit(vcpu);
+free_vcpu:
+       kmem_cache_free(kvm_vcpu_cache, vcpu);
+out:
+       return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+       kvm_vgic_vcpu_early_init(vcpu);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       kvm_mmu_free_memory_caches(vcpu);
+       kvm_timer_vcpu_terminate(vcpu);
+       kvm_vgic_vcpu_destroy(vcpu);
+       kvm_pmu_vcpu_destroy(vcpu);
+       kvm_vcpu_uninit(vcpu);
+       kmem_cache_free(kvm_vcpu_cache, vcpu);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return kvm_timer_should_fire(vcpu_vtimer(vcpu)) ||
+              kvm_timer_should_fire(vcpu_ptimer(vcpu));
+}
+
+void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+       kvm_timer_schedule(vcpu);
+}
+
+void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+       kvm_timer_unschedule(vcpu);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       /* Force users to call KVM_ARM_VCPU_INIT */
+       vcpu->arch.target = -1;
+       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+
+       /* Set up the timer */
+       kvm_timer_vcpu_init(vcpu);
+
+       kvm_arm_reset_debug_ptr(vcpu);
+
+       return 0;
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       int *last_ran;
+
+       last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+
+       /*
+        * We might get preempted before the vCPU actually runs, but
+        * over-invalidation doesn't affect correctness.
+        */
+       if (*last_ran != vcpu->vcpu_id) {
+               kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+               *last_ran = vcpu->vcpu_id;
+       }
+
+       vcpu->cpu = cpu;
+       vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
+
+       kvm_arm_set_running_vcpu(vcpu);
+
+       kvm_vgic_load(vcpu);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       kvm_vgic_put(vcpu);
+
+       vcpu->cpu = -1;
+
+       kvm_arm_set_running_vcpu(NULL);
+       kvm_timer_vcpu_put(vcpu);
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       if (vcpu->arch.power_off)
+               mp_state->mp_state = KVM_MP_STATE_STOPPED;
+       else
+               mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       switch (mp_state->mp_state) {
+       case KVM_MP_STATE_RUNNABLE:
+               vcpu->arch.power_off = false;
+               break;
+       case KVM_MP_STATE_STOPPED:
+               vcpu->arch.power_off = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
+ * @v:         The VCPU pointer
+ *
+ * If the guest CPU is not waiting for interrupts or an interrupt line is
+ * asserted, the CPU is by definition runnable.
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+{
+       return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
+               && !v->arch.power_off && !v->arch.pause);
+}
+
+/* Just ensure a guest exit from a particular CPU */
+static void exit_vm_noop(void *info)
+{
+}
+
+void force_vm_exit(const cpumask_t *mask)
+{
+       preempt_disable();
+       smp_call_function_many(mask, exit_vm_noop, NULL, true);
+       preempt_enable();
+}
+
+/**
+ * need_new_vmid_gen - check that the VMID is still valid
+ * @kvm: The VM's VMID to check
+ *
+ * return true if there is a new generation of VMIDs being used
+ *
+ * The hardware supports only 256 values with the value zero reserved for the
+ * host, so we check if an assigned value belongs to a previous generation,
+ * which which requires us to assign a new value. If we're the first to use a
+ * VMID for the new generation, we must flush necessary caches and TLBs on all
+ * CPUs.
+ */
+static bool need_new_vmid_gen(struct kvm *kvm)
+{
+       return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+}
+
+/**
+ * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
+ * @kvm        The guest that we are about to run
+ *
+ * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
+ * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
+ * caches and TLBs.
+ */
+static void update_vttbr(struct kvm *kvm)
+{
+       phys_addr_t pgd_phys;
+       u64 vmid;
+
+       if (!need_new_vmid_gen(kvm))
+               return;
+
+       spin_lock(&kvm_vmid_lock);
+
+       /*
+        * We need to re-check the vmid_gen here to ensure that if another vcpu
+        * already allocated a valid vmid for this vm, then this vcpu should
+        * use the same vmid.
+        */
+       if (!need_new_vmid_gen(kvm)) {
+               spin_unlock(&kvm_vmid_lock);
+               return;
+       }
+
+       /* First user of a new VMID generation? */
+       if (unlikely(kvm_next_vmid == 0)) {
+               atomic64_inc(&kvm_vmid_gen);
+               kvm_next_vmid = 1;
+
+               /*
+                * On SMP we know no other CPUs can use this CPU's or each
+                * other's VMID after force_vm_exit returns since the
+                * kvm_vmid_lock blocks them from reentry to the guest.
+                */
+               force_vm_exit(cpu_all_mask);
+               /*
+                * Now broadcast TLB + ICACHE invalidation over the inner
+                * shareable domain to make sure all data structures are
+                * clean.
+                */
+               kvm_call_hyp(__kvm_flush_vm_context);
+       }
+
+       kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
+       kvm->arch.vmid = kvm_next_vmid;
+       kvm_next_vmid++;
+       kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
+
+       /* update vttbr to be used with the new vmid */
+       pgd_phys = virt_to_phys(kvm->arch.pgd);
+       BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
+       vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
+       kvm->arch.vttbr = pgd_phys | vmid;
+
+       spin_unlock(&kvm_vmid_lock);
+}
+
+static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       int ret = 0;
+
+       if (likely(vcpu->arch.has_run_once))
+               return 0;
+
+       vcpu->arch.has_run_once = true;
+
+       /*
+        * Map the VGIC hardware resources before running a vcpu the first
+        * time on this VM.
+        */
+       if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
+               ret = kvm_vgic_map_resources(kvm);
+               if (ret)
+                       return ret;
+       }
+
+       ret = kvm_timer_enable(vcpu);
+
+       return ret;
+}
+
+bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+       return vgic_initialized(kvm);
+}
+
+void kvm_arm_halt_guest(struct kvm *kvm)
+{
+       int i;
+       struct kvm_vcpu *vcpu;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               vcpu->arch.pause = true;
+       kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
+}
+
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.pause = true;
+       kvm_vcpu_kick(vcpu);
+}
+
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
+{
+       struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+       vcpu->arch.pause = false;
+       swake_up(wq);
+}
+
+void kvm_arm_resume_guest(struct kvm *kvm)
+{
+       int i;
+       struct kvm_vcpu *vcpu;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               kvm_arm_resume_vcpu(vcpu);
+}
+
+static void vcpu_sleep(struct kvm_vcpu *vcpu)
+{
+       struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+       swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+                                      (!vcpu->arch.pause)));
+}
+
+static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.target >= 0;
+}
+
+/**
+ * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
+ * @vcpu:      The VCPU pointer
+ * @run:       The kvm_run structure pointer used for userspace state exchange
+ *
+ * This function is called through the VCPU_RUN ioctl called from user space. It
+ * will execute VM code in a loop until the time slice for the process is used
+ * or some emulation is needed from user space in which case the function will
+ * return with return value 0 and with the kvm_run structure filled in with the
+ * required data for the requested emulation.
+ */
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int ret;
+       sigset_t sigsaved;
+
+       if (unlikely(!kvm_vcpu_initialized(vcpu)))
+               return -ENOEXEC;
+
+       ret = kvm_vcpu_first_run_init(vcpu);
+       if (ret)
+               return ret;
+
+       if (run->exit_reason == KVM_EXIT_MMIO) {
+               ret = kvm_handle_mmio_return(vcpu, vcpu->run);
+               if (ret)
+                       return ret;
+       }
+
+       if (run->immediate_exit)
+               return -EINTR;
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       ret = 1;
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       while (ret > 0) {
+               /*
+                * Check conditions before entering the guest
+                */
+               cond_resched();
+
+               update_vttbr(vcpu->kvm);
+
+               if (vcpu->arch.power_off || vcpu->arch.pause)
+                       vcpu_sleep(vcpu);
+
+               /*
+                * Preparing the interrupts to be injected also
+                * involves poking the GIC, which must be done in a
+                * non-preemptible context.
+                */
+               preempt_disable();
+
+               kvm_pmu_flush_hwstate(vcpu);
+
+               kvm_timer_flush_hwstate(vcpu);
+               kvm_vgic_flush_hwstate(vcpu);
+
+               local_irq_disable();
+
+               /*
+                * If we have a singal pending, or need to notify a userspace
+                * irqchip about timer or PMU level changes, then we exit (and
+                * update the timer level state in kvm_timer_update_run
+                * below).
+                */
+               if (signal_pending(current) ||
+                   kvm_timer_should_notify_user(vcpu) ||
+                   kvm_pmu_should_notify_user(vcpu)) {
+                       ret = -EINTR;
+                       run->exit_reason = KVM_EXIT_INTR;
+               }
+
+               if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
+                       vcpu->arch.power_off || vcpu->arch.pause) {
+                       local_irq_enable();
+                       kvm_pmu_sync_hwstate(vcpu);
+                       kvm_timer_sync_hwstate(vcpu);
+                       kvm_vgic_sync_hwstate(vcpu);
+                       preempt_enable();
+                       continue;
+               }
+
+               kvm_arm_setup_debug(vcpu);
+
+               /**************************************************************
+                * Enter the guest
+                */
+               trace_kvm_entry(*vcpu_pc(vcpu));
+               guest_enter_irqoff();
+               vcpu->mode = IN_GUEST_MODE;
+
+               ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
+
+               vcpu->mode = OUTSIDE_GUEST_MODE;
+               vcpu->stat.exits++;
+               /*
+                * Back from guest
+                *************************************************************/
+
+               kvm_arm_clear_debug(vcpu);
+
+               /*
+                * We may have taken a host interrupt in HYP mode (ie
+                * while executing the guest). This interrupt is still
+                * pending, as we haven't serviced it yet!
+                *
+                * We're now back in SVC mode, with interrupts
+                * disabled.  Enabling the interrupts now will have
+                * the effect of taking the interrupt again, in SVC
+                * mode this time.
+                */
+               local_irq_enable();
+
+               /*
+                * We do local_irq_enable() before calling guest_exit() so
+                * that if a timer interrupt hits while running the guest we
+                * account that tick as being spent in the guest.  We enable
+                * preemption after calling guest_exit() so that if we get
+                * preempted we make sure ticks after that is not counted as
+                * guest time.
+                */
+               guest_exit();
+               trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+
+               /*
+                * We must sync the PMU and timer state before the vgic state so
+                * that the vgic can properly sample the updated state of the
+                * interrupt line.
+                */
+               kvm_pmu_sync_hwstate(vcpu);
+               kvm_timer_sync_hwstate(vcpu);
+
+               kvm_vgic_sync_hwstate(vcpu);
+
+               preempt_enable();
+
+               ret = handle_exit(vcpu, run, ret);
+       }
+
+       /* Tell userspace about in-kernel device output levels */
+       if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
+               kvm_timer_update_run(vcpu);
+               kvm_pmu_update_run(vcpu);
+       }
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       return ret;
+}
+
+static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
+{
+       int bit_index;
+       bool set;
+       unsigned long *ptr;
+
+       if (number == KVM_ARM_IRQ_CPU_IRQ)
+               bit_index = __ffs(HCR_VI);
+       else /* KVM_ARM_IRQ_CPU_FIQ */
+               bit_index = __ffs(HCR_VF);
+
+       ptr = (unsigned long *)&vcpu->arch.irq_lines;
+       if (level)
+               set = test_and_set_bit(bit_index, ptr);
+       else
+               set = test_and_clear_bit(bit_index, ptr);
+
+       /*
+        * If we didn't change anything, no need to wake up or kick other CPUs
+        */
+       if (set == level)
+               return 0;
+
+       /*
+        * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
+        * trigger a world-switch round on the running physical CPU to set the
+        * virtual IRQ/FIQ fields in the HCR appropriately.
+        */
+       kvm_vcpu_kick(vcpu);
+
+       return 0;
+}
+
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+                         bool line_status)
+{
+       u32 irq = irq_level->irq;
+       unsigned int irq_type, vcpu_idx, irq_num;
+       int nrcpus = atomic_read(&kvm->online_vcpus);
+       struct kvm_vcpu *vcpu = NULL;
+       bool level = irq_level->level;
+
+       irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
+       vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
+       irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
+
+       trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
+
+       switch (irq_type) {
+       case KVM_ARM_IRQ_TYPE_CPU:
+               if (irqchip_in_kernel(kvm))
+                       return -ENXIO;
+
+               if (vcpu_idx >= nrcpus)
+                       return -EINVAL;
+
+               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+               if (!vcpu)
+                       return -EINVAL;
+
+               if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
+                       return -EINVAL;
+
+               return vcpu_interrupt_line(vcpu, irq_num, level);
+       case KVM_ARM_IRQ_TYPE_PPI:
+               if (!irqchip_in_kernel(kvm))
+                       return -ENXIO;
+
+               if (vcpu_idx >= nrcpus)
+                       return -EINVAL;
+
+               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+               if (!vcpu)
+                       return -EINVAL;
+
+               if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
+                       return -EINVAL;
+
+               return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
+       case KVM_ARM_IRQ_TYPE_SPI:
+               if (!irqchip_in_kernel(kvm))
+                       return -ENXIO;
+
+               if (irq_num < VGIC_NR_PRIVATE_IRQS)
+                       return -EINVAL;
+
+               return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
+       }
+
+       return -EINVAL;
+}
+
+static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+                              const struct kvm_vcpu_init *init)
+{
+       unsigned int i;
+       int phys_target = kvm_target_cpu();
+
+       if (init->target != phys_target)
+               return -EINVAL;
+
+       /*
+        * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+        * use the same target.
+        */
+       if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
+               return -EINVAL;
+
+       /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
+       for (i = 0; i < sizeof(init->features) * 8; i++) {
+               bool set = (init->features[i / 32] & (1 << (i % 32)));
+
+               if (set && i >= KVM_VCPU_MAX_FEATURES)
+                       return -ENOENT;
+
+               /*
+                * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+                * use the same feature set.
+                */
+               if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
+                   test_bit(i, vcpu->arch.features) != set)
+                       return -EINVAL;
+
+               if (set)
+                       set_bit(i, vcpu->arch.features);
+       }
+
+       vcpu->arch.target = phys_target;
+
+       /* Now we know what it is, we can reset it. */
+       return kvm_reset_vcpu(vcpu);
+}
+
+
+static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+                                        struct kvm_vcpu_init *init)
+{
+       int ret;
+
+       ret = kvm_vcpu_set_target(vcpu, init);
+       if (ret)
+               return ret;
+
+       /*
+        * Ensure a rebooted VM will fault in RAM pages and detect if the
+        * guest MMU is turned off and flush the caches as needed.
+        */
+       if (vcpu->arch.has_run_once)
+               stage2_unmap_vm(vcpu->kvm);
+
+       vcpu_reset_hcr(vcpu);
+
+       /*
+        * Handle the "start in power-off" case.
+        */
+       if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+               vcpu->arch.power_off = true;
+       else
+               vcpu->arch.power_off = false;
+
+       return 0;
+}
+
+static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
+                                struct kvm_device_attr *attr)
+{
+       int ret = -ENXIO;
+
+       switch (attr->group) {
+       default:
+               ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
+               break;
+       }
+
+       return ret;
+}
+
+static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
+                                struct kvm_device_attr *attr)
+{
+       int ret = -ENXIO;
+
+       switch (attr->group) {
+       default:
+               ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
+               break;
+       }
+
+       return ret;
+}
+
+static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
+                                struct kvm_device_attr *attr)
+{
+       int ret = -ENXIO;
+
+       switch (attr->group) {
+       default:
+               ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
+               break;
+       }
+
+       return ret;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+                        unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       struct kvm_device_attr attr;
+
+       switch (ioctl) {
+       case KVM_ARM_VCPU_INIT: {
+               struct kvm_vcpu_init init;
+
+               if (copy_from_user(&init, argp, sizeof(init)))
+                       return -EFAULT;
+
+               return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
+       }
+       case KVM_SET_ONE_REG:
+       case KVM_GET_ONE_REG: {
+               struct kvm_one_reg reg;
+
+               if (unlikely(!kvm_vcpu_initialized(vcpu)))
+                       return -ENOEXEC;
+
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       return -EFAULT;
+               if (ioctl == KVM_SET_ONE_REG)
+                       return kvm_arm_set_reg(vcpu, &reg);
+               else
+                       return kvm_arm_get_reg(vcpu, &reg);
+       }
+       case KVM_GET_REG_LIST: {
+               struct kvm_reg_list __user *user_list = argp;
+               struct kvm_reg_list reg_list;
+               unsigned n;
+
+               if (unlikely(!kvm_vcpu_initialized(vcpu)))
+                       return -ENOEXEC;
+
+               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+                       return -EFAULT;
+               n = reg_list.n;
+               reg_list.n = kvm_arm_num_regs(vcpu);
+               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+                       return -EFAULT;
+               if (n < reg_list.n)
+                       return -E2BIG;
+               return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
+       }
+       case KVM_SET_DEVICE_ATTR: {
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       return -EFAULT;
+               return kvm_arm_vcpu_set_attr(vcpu, &attr);
+       }
+       case KVM_GET_DEVICE_ATTR: {
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       return -EFAULT;
+               return kvm_arm_vcpu_get_attr(vcpu, &attr);
+       }
+       case KVM_HAS_DEVICE_ATTR: {
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       return -EFAULT;
+               return kvm_arm_vcpu_has_attr(vcpu, &attr);
+       }
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
+ * @kvm: kvm instance
+ * @log: slot id and address to which we copy the log
+ *
+ * Steps 1-4 below provide general overview of dirty page logging. See
+ * kvm_get_dirty_log_protect() function description for additional details.
+ *
+ * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
+ * always flush the TLB (step 4) even if previous step failed  and the dirty
+ * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
+ * does not preclude user space subsequent dirty log read. Flushing TLB ensures
+ * writes will be marked dirty for next log read.
+ *
+ *   1. Take a snapshot of the bit and clear it if needed.
+ *   2. Write protect the corresponding page.
+ *   3. Copy the snapshot to the userspace.
+ *   4. Flush TLB's if needed.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+       bool is_dirty = false;
+       int r;
+
+       mutex_lock(&kvm->slots_lock);
+
+       r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+
+       if (is_dirty)
+               kvm_flush_remote_tlbs(kvm);
+
+       mutex_unlock(&kvm->slots_lock);
+       return r;
+}
+
+static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
+                                       struct kvm_arm_device_addr *dev_addr)
+{
+       unsigned long dev_id, type;
+
+       dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
+               KVM_ARM_DEVICE_ID_SHIFT;
+       type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
+               KVM_ARM_DEVICE_TYPE_SHIFT;
+
+       switch (dev_id) {
+       case KVM_ARM_DEVICE_VGIC_V2:
+               if (!vgic_present)
+                       return -ENXIO;
+               return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
+       default:
+               return -ENODEV;
+       }
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+                      unsigned int ioctl, unsigned long arg)
+{
+       struct kvm *kvm = filp->private_data;
+       void __user *argp = (void __user *)arg;
+
+       switch (ioctl) {
+       case KVM_CREATE_IRQCHIP: {
+               int ret;
+               if (!vgic_present)
+                       return -ENXIO;
+               mutex_lock(&kvm->lock);
+               ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+               mutex_unlock(&kvm->lock);
+               return ret;
+       }
+       case KVM_ARM_SET_DEVICE_ADDR: {
+               struct kvm_arm_device_addr dev_addr;
+
+               if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
+                       return -EFAULT;
+               return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
+       }
+       case KVM_ARM_PREFERRED_TARGET: {
+               int err;
+               struct kvm_vcpu_init init;
+
+               err = kvm_vcpu_preferred_target(&init);
+               if (err)
+                       return err;
+
+               if (copy_to_user(argp, &init, sizeof(init)))
+                       return -EFAULT;
+
+               return 0;
+       }
+       default:
+               return -EINVAL;
+       }
+}
+
+static void cpu_init_hyp_mode(void *dummy)
+{
+       phys_addr_t pgd_ptr;
+       unsigned long hyp_stack_ptr;
+       unsigned long stack_page;
+       unsigned long vector_ptr;
+
+       /* Switch from the HYP stub to our own HYP init vector */
+       __hyp_set_vectors(kvm_get_idmap_vector());
+
+       pgd_ptr = kvm_mmu_get_httbr();
+       stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
+       hyp_stack_ptr = stack_page + PAGE_SIZE;
+       vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+
+       __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
+       __cpu_init_stage2();
+
+       if (is_kernel_in_hyp_mode())
+               kvm_timer_init_vhe();
+
+       kvm_arm_init_debug();
+}
+
+static void cpu_hyp_reset(void)
+{
+       if (!is_kernel_in_hyp_mode())
+               __hyp_reset_vectors();
+}
+
+static void cpu_hyp_reinit(void)
+{
+       cpu_hyp_reset();
+
+       if (is_kernel_in_hyp_mode()) {
+               /*
+                * __cpu_init_stage2() is safe to call even if the PM
+                * event was cancelled before the CPU was reset.
+                */
+               __cpu_init_stage2();
+       } else {
+               cpu_init_hyp_mode(NULL);
+       }
+}
+
+static void _kvm_arch_hardware_enable(void *discard)
+{
+       if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
+               cpu_hyp_reinit();
+               __this_cpu_write(kvm_arm_hardware_enabled, 1);
+       }
+}
+
+int kvm_arch_hardware_enable(void)
+{
+       _kvm_arch_hardware_enable(NULL);
+       return 0;
+}
+
+static void _kvm_arch_hardware_disable(void *discard)
+{
+       if (__this_cpu_read(kvm_arm_hardware_enabled)) {
+               cpu_hyp_reset();
+               __this_cpu_write(kvm_arm_hardware_enabled, 0);
+       }
+}
+
+void kvm_arch_hardware_disable(void)
+{
+       _kvm_arch_hardware_disable(NULL);
+}
+
+#ifdef CONFIG_CPU_PM
+static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
+                                   unsigned long cmd,
+                                   void *v)
+{
+       /*
+        * kvm_arm_hardware_enabled is left with its old value over
+        * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
+        * re-enable hyp.
+        */
+       switch (cmd) {
+       case CPU_PM_ENTER:
+               if (__this_cpu_read(kvm_arm_hardware_enabled))
+                       /*
+                        * don't update kvm_arm_hardware_enabled here
+                        * so that the hardware will be re-enabled
+                        * when we resume. See below.
+                        */
+                       cpu_hyp_reset();
+
+               return NOTIFY_OK;
+       case CPU_PM_EXIT:
+               if (__this_cpu_read(kvm_arm_hardware_enabled))
+                       /* The hardware was enabled before suspend. */
+                       cpu_hyp_reinit();
+
+               return NOTIFY_OK;
+
+       default:
+               return NOTIFY_DONE;
+       }
+}
+
+static struct notifier_block hyp_init_cpu_pm_nb = {
+       .notifier_call = hyp_init_cpu_pm_notifier,
+};
+
+static void __init hyp_cpu_pm_init(void)
+{
+       cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
+}
+static void __init hyp_cpu_pm_exit(void)
+{
+       cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
+}
+#else
+static inline void hyp_cpu_pm_init(void)
+{
+}
+static inline void hyp_cpu_pm_exit(void)
+{
+}
+#endif
+
+static void teardown_common_resources(void)
+{
+       free_percpu(kvm_host_cpu_state);
+}
+
+static int init_common_resources(void)
+{
+       kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
+       if (!kvm_host_cpu_state) {
+               kvm_err("Cannot allocate host CPU state\n");
+               return -ENOMEM;
+       }
+
+       /* set size of VMID supported by CPU */
+       kvm_vmid_bits = kvm_get_vmid_bits();
+       kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+
+       return 0;
+}
+
+static int init_subsystems(void)
+{
+       int err = 0;
+
+       /*
+        * Enable hardware so that subsystem initialisation can access EL2.
+        */
+       on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
+
+       /*
+        * Register CPU lower-power notifier
+        */
+       hyp_cpu_pm_init();
+
+       /*
+        * Init HYP view of VGIC
+        */
+       err = kvm_vgic_hyp_init();
+       switch (err) {
+       case 0:
+               vgic_present = true;
+               break;
+       case -ENODEV:
+       case -ENXIO:
+               vgic_present = false;
+               err = 0;
+               break;
+       default:
+               goto out;
+       }
+
+       /*
+        * Init HYP architected timer support
+        */
+       err = kvm_timer_hyp_init();
+       if (err)
+               goto out;
+
+       kvm_perf_init();
+       kvm_coproc_table_init();
+
+out:
+       on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
+
+       return err;
+}
+
+static void teardown_hyp_mode(void)
+{
+       int cpu;
+
+       if (is_kernel_in_hyp_mode())
+               return;
+
+       free_hyp_pgds();
+       for_each_possible_cpu(cpu)
+               free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+       hyp_cpu_pm_exit();
+}
+
+static int init_vhe_mode(void)
+{
+       kvm_info("VHE mode initialized successfully\n");
+       return 0;
+}
+
+/**
+ * Inits Hyp-mode on all online CPUs
+ */
+static int init_hyp_mode(void)
+{
+       int cpu;
+       int err = 0;
+
+       /*
+        * Allocate Hyp PGD and setup Hyp identity mapping
+        */
+       err = kvm_mmu_init();
+       if (err)
+               goto out_err;
+
+       /*
+        * Allocate stack pages for Hypervisor-mode
+        */
+       for_each_possible_cpu(cpu) {
+               unsigned long stack_page;
+
+               stack_page = __get_free_page(GFP_KERNEL);
+               if (!stack_page) {
+                       err = -ENOMEM;
+                       goto out_err;
+               }
+
+               per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
+       }
+
+       /*
+        * Map the Hyp-code called directly from the host
+        */
+       err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
+                                 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
+       if (err) {
+               kvm_err("Cannot map world-switch code\n");
+               goto out_err;
+       }
+
+       err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
+                                 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
+       if (err) {
+               kvm_err("Cannot map rodata section\n");
+               goto out_err;
+       }
+
+       err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
+                                 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
+       if (err) {
+               kvm_err("Cannot map bss section\n");
+               goto out_err;
+       }
+
+       /*
+        * Map the Hyp stack pages
+        */
+       for_each_possible_cpu(cpu) {
+               char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
+               err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
+                                         PAGE_HYP);
+
+               if (err) {
+                       kvm_err("Cannot map hyp stack\n");
+                       goto out_err;
+               }
+       }
+
+       for_each_possible_cpu(cpu) {
+               kvm_cpu_context_t *cpu_ctxt;
+
+               cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
+               err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
+
+               if (err) {
+                       kvm_err("Cannot map host CPU state: %d\n", err);
+                       goto out_err;
+               }
+       }
+
+       kvm_info("Hyp mode initialized successfully\n");
+
+       return 0;
+
+out_err:
+       teardown_hyp_mode();
+       kvm_err("error initializing Hyp mode: %d\n", err);
+       return err;
+}
+
+static void check_kvm_target_cpu(void *ret)
+{
+       *(int *)ret = kvm_target_cpu();
+}
+
+struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+{
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       mpidr &= MPIDR_HWID_BITMASK;
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
+                       return vcpu;
+       }
+       return NULL;
+}
+
+/**
+ * Initialize Hyp-mode and memory mappings on all CPUs.
+ */
+int kvm_arch_init(void *opaque)
+{
+       int err;
+       int ret, cpu;
+
+       if (!is_hyp_mode_available()) {
+               kvm_err("HYP mode not available\n");
+               return -ENODEV;
+       }
+
+       for_each_online_cpu(cpu) {
+               smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
+               if (ret < 0) {
+                       kvm_err("Error, CPU %d not supported!\n", cpu);
+                       return -ENODEV;
+               }
+       }
+
+       err = init_common_resources();
+       if (err)
+               return err;
+
+       if (is_kernel_in_hyp_mode())
+               err = init_vhe_mode();
+       else
+               err = init_hyp_mode();
+       if (err)
+               goto out_err;
+
+       err = init_subsystems();
+       if (err)
+               goto out_hyp;
+
+       return 0;
+
+out_hyp:
+       teardown_hyp_mode();
+out_err:
+       teardown_common_resources();
+       return err;
+}
+
+/* NOP: Compiling as a module not supported */
+void kvm_arch_exit(void)
+{
+       kvm_perf_teardown();
+}
+
+static int arm_init(void)
+{
+       int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+       return rc;
+}
+
+module_init(arm_init);
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
new file mode 100644 (file)
index 0000000..b6e715f
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmio.h>
+#include <asm/kvm_emulate.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
+{
+       void *datap = NULL;
+       union {
+               u8      byte;
+               u16     hword;
+               u32     word;
+               u64     dword;
+       } tmp;
+
+       switch (len) {
+       case 1:
+               tmp.byte        = data;
+               datap           = &tmp.byte;
+               break;
+       case 2:
+               tmp.hword       = data;
+               datap           = &tmp.hword;
+               break;
+       case 4:
+               tmp.word        = data;
+               datap           = &tmp.word;
+               break;
+       case 8:
+               tmp.dword       = data;
+               datap           = &tmp.dword;
+               break;
+       }
+
+       memcpy(buf, datap, len);
+}
+
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
+{
+       unsigned long data = 0;
+       union {
+               u16     hword;
+               u32     word;
+               u64     dword;
+       } tmp;
+
+       switch (len) {
+       case 1:
+               data = *(u8 *)buf;
+               break;
+       case 2:
+               memcpy(&tmp.hword, buf, len);
+               data = tmp.hword;
+               break;
+       case 4:
+               memcpy(&tmp.word, buf, len);
+               data = tmp.word;
+               break;
+       case 8:
+               memcpy(&tmp.dword, buf, len);
+               data = tmp.dword;
+               break;
+       }
+
+       return data;
+}
+
+/**
+ * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+ *                          or in-kernel IO emulation
+ *
+ * @vcpu: The VCPU pointer
+ * @run:  The VCPU run struct containing the mmio data
+ */
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       unsigned long data;
+       unsigned int len;
+       int mask;
+
+       if (!run->mmio.is_write) {
+               len = run->mmio.len;
+               if (len > sizeof(unsigned long))
+                       return -EINVAL;
+
+               data = kvm_mmio_read_buf(run->mmio.data, len);
+
+               if (vcpu->arch.mmio_decode.sign_extend &&
+                   len < sizeof(unsigned long)) {
+                       mask = 1U << ((len * 8) - 1);
+                       data = (data ^ mask) - mask;
+               }
+
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+                              data);
+               data = vcpu_data_host_to_guest(vcpu, data, len);
+               vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
+       }
+
+       return 0;
+}
+
+static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
+{
+       unsigned long rt;
+       int access_size;
+       bool sign_extend;
+
+       if (kvm_vcpu_dabt_iss1tw(vcpu)) {
+               /* page table accesses IO mem: tell guest to fix its TTBR */
+               kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+               return 1;
+       }
+
+       access_size = kvm_vcpu_dabt_get_as(vcpu);
+       if (unlikely(access_size < 0))
+               return access_size;
+
+       *is_write = kvm_vcpu_dabt_iswrite(vcpu);
+       sign_extend = kvm_vcpu_dabt_issext(vcpu);
+       rt = kvm_vcpu_dabt_get_rd(vcpu);
+
+       *len = access_size;
+       vcpu->arch.mmio_decode.sign_extend = sign_extend;
+       vcpu->arch.mmio_decode.rt = rt;
+
+       /*
+        * The MMIO instruction is emulated and should not be re-executed
+        * in the guest.
+        */
+       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+       return 0;
+}
+
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                phys_addr_t fault_ipa)
+{
+       unsigned long data;
+       unsigned long rt;
+       int ret;
+       bool is_write;
+       int len;
+       u8 data_buf[8];
+
+       /*
+        * Prepare MMIO operation. First decode the syndrome data we get
+        * from the CPU. Then try if some in-kernel emulation feels
+        * responsible, otherwise let user space do its magic.
+        */
+       if (kvm_vcpu_dabt_isvalid(vcpu)) {
+               ret = decode_hsr(vcpu, &is_write, &len);
+               if (ret)
+                       return ret;
+       } else {
+               kvm_err("load/store instruction decoding not implemented\n");
+               return -ENOSYS;
+       }
+
+       rt = vcpu->arch.mmio_decode.rt;
+
+       if (is_write) {
+               data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
+                                              len);
+
+               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+               kvm_mmio_write_buf(data_buf, len, data);
+
+               ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+                                      data_buf);
+       } else {
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
+                              fault_ipa, 0);
+
+               ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+                                     data_buf);
+       }
+
+       /* Now prepare kvm_run for the potential return to userland. */
+       run->mmio.is_write      = is_write;
+       run->mmio.phys_addr     = fault_ipa;
+       run->mmio.len           = len;
+
+       if (!ret) {
+               /* We handled the access successfully in the kernel. */
+               if (!is_write)
+                       memcpy(run->mmio.data, data_buf, len);
+               vcpu->stat.mmio_exit_kernel++;
+               kvm_handle_mmio_return(vcpu, run);
+               return 1;
+       }
+
+       if (is_write)
+               memcpy(run->mmio.data, data_buf, len);
+       vcpu->stat.mmio_exit_user++;
+       run->exit_reason        = KVM_EXIT_MMIO;
+       return 0;
+}
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
new file mode 100644 (file)
index 0000000..efb4335
--- /dev/null
@@ -0,0 +1,1958 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/mman.h>
+#include <linux/kvm_host.h>
+#include <linux/io.h>
+#include <linux/hugetlb.h>
+#include <trace/events/kvm.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_mmio.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/virt.h>
+
+#include "trace.h"
+
+static pgd_t *boot_hyp_pgd;
+static pgd_t *hyp_pgd;
+static pgd_t *merged_hyp_pgd;
+static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
+
+static unsigned long hyp_idmap_start;
+static unsigned long hyp_idmap_end;
+static phys_addr_t hyp_idmap_vector;
+
+#define S2_PGD_SIZE    (PTRS_PER_S2_PGD * sizeof(pgd_t))
+#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+
+#define KVM_S2PTE_FLAG_IS_IOMAP                (1UL << 0)
+#define KVM_S2_FLAG_LOGGING_ACTIVE     (1UL << 1)
+
+static bool memslot_is_logging(struct kvm_memory_slot *memslot)
+{
+       return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
+}
+
+/**
+ * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
+ * @kvm:       pointer to kvm structure.
+ *
+ * Interface to HYP function to flush all VM TLB entries
+ */
+void kvm_flush_remote_tlbs(struct kvm *kvm)
+{
+       kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
+}
+
+static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+{
+       kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
+}
+
+/*
+ * D-Cache management functions. They take the page table entries by
+ * value, as they are flushing the cache using the kernel mapping (or
+ * kmap on 32bit).
+ */
+static void kvm_flush_dcache_pte(pte_t pte)
+{
+       __kvm_flush_dcache_pte(pte);
+}
+
+static void kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       __kvm_flush_dcache_pmd(pmd);
+}
+
+static void kvm_flush_dcache_pud(pud_t pud)
+{
+       __kvm_flush_dcache_pud(pud);
+}
+
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+       return !pfn_valid(pfn);
+}
+
+/**
+ * stage2_dissolve_pmd() - clear and flush huge PMD entry
+ * @kvm:       pointer to kvm structure.
+ * @addr:      IPA
+ * @pmd:       pmd pointer for IPA
+ *
+ * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
+ * pages in the range dirty.
+ */
+static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
+{
+       if (!pmd_thp_or_huge(*pmd))
+               return;
+
+       pmd_clear(pmd);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
+       put_page(virt_to_page(pmd));
+}
+
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+                                 int min, int max)
+{
+       void *page;
+
+       BUG_ON(max > KVM_NR_MEM_OBJS);
+       if (cache->nobjs >= min)
+               return 0;
+       while (cache->nobjs < max) {
+               page = (void *)__get_free_page(PGALLOC_GFP);
+               if (!page)
+                       return -ENOMEM;
+               cache->objects[cache->nobjs++] = page;
+       }
+       return 0;
+}
+
+static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+{
+       while (mc->nobjs)
+               free_page((unsigned long)mc->objects[--mc->nobjs]);
+}
+
+static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
+{
+       void *p;
+
+       BUG_ON(!mc || !mc->nobjs);
+       p = mc->objects[--mc->nobjs];
+       return p;
+}
+
+static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
+{
+       pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
+       stage2_pgd_clear(pgd);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
+       stage2_pud_free(pud_table);
+       put_page(virt_to_page(pgd));
+}
+
+static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
+{
+       pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
+       VM_BUG_ON(stage2_pud_huge(*pud));
+       stage2_pud_clear(pud);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
+       stage2_pmd_free(pmd_table);
+       put_page(virt_to_page(pud));
+}
+
+static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
+{
+       pte_t *pte_table = pte_offset_kernel(pmd, 0);
+       VM_BUG_ON(pmd_thp_or_huge(*pmd));
+       pmd_clear(pmd);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
+       pte_free_kernel(NULL, pte_table);
+       put_page(virt_to_page(pmd));
+}
+
+/*
+ * Unmapping vs dcache management:
+ *
+ * If a guest maps certain memory pages as uncached, all writes will
+ * bypass the data cache and go directly to RAM.  However, the CPUs
+ * can still speculate reads (not writes) and fill cache lines with
+ * data.
+ *
+ * Those cache lines will be *clean* cache lines though, so a
+ * clean+invalidate operation is equivalent to an invalidate
+ * operation, because no cache lines are marked dirty.
+ *
+ * Those clean cache lines could be filled prior to an uncached write
+ * by the guest, and the cache coherent IO subsystem would therefore
+ * end up writing old data to disk.
+ *
+ * This is why right after unmapping a page/section and invalidating
+ * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
+ * the IO subsystem will never hit in the cache.
+ */
+static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
+                      phys_addr_t addr, phys_addr_t end)
+{
+       phys_addr_t start_addr = addr;
+       pte_t *pte, *start_pte;
+
+       start_pte = pte = pte_offset_kernel(pmd, addr);
+       do {
+               if (!pte_none(*pte)) {
+                       pte_t old_pte = *pte;
+
+                       kvm_set_pte(pte, __pte(0));
+                       kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                       /* No need to invalidate the cache for device mappings */
+                       if (!kvm_is_device_pfn(pte_pfn(old_pte)))
+                               kvm_flush_dcache_pte(old_pte);
+
+                       put_page(virt_to_page(pte));
+               }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       if (stage2_pte_table_empty(start_pte))
+               clear_stage2_pmd_entry(kvm, pmd, start_addr);
+}
+
+static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
+                      phys_addr_t addr, phys_addr_t end)
+{
+       phys_addr_t next, start_addr = addr;
+       pmd_t *pmd, *start_pmd;
+
+       start_pmd = pmd = stage2_pmd_offset(pud, addr);
+       do {
+               next = stage2_pmd_addr_end(addr, end);
+               if (!pmd_none(*pmd)) {
+                       if (pmd_thp_or_huge(*pmd)) {
+                               pmd_t old_pmd = *pmd;
+
+                               pmd_clear(pmd);
+                               kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                               kvm_flush_dcache_pmd(old_pmd);
+
+                               put_page(virt_to_page(pmd));
+                       } else {
+                               unmap_stage2_ptes(kvm, pmd, addr, next);
+                       }
+               }
+       } while (pmd++, addr = next, addr != end);
+
+       if (stage2_pmd_table_empty(start_pmd))
+               clear_stage2_pud_entry(kvm, pud, start_addr);
+}
+
+static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
+                      phys_addr_t addr, phys_addr_t end)
+{
+       phys_addr_t next, start_addr = addr;
+       pud_t *pud, *start_pud;
+
+       start_pud = pud = stage2_pud_offset(pgd, addr);
+       do {
+               next = stage2_pud_addr_end(addr, end);
+               if (!stage2_pud_none(*pud)) {
+                       if (stage2_pud_huge(*pud)) {
+                               pud_t old_pud = *pud;
+
+                               stage2_pud_clear(pud);
+                               kvm_tlb_flush_vmid_ipa(kvm, addr);
+                               kvm_flush_dcache_pud(old_pud);
+                               put_page(virt_to_page(pud));
+                       } else {
+                               unmap_stage2_pmds(kvm, pud, addr, next);
+                       }
+               }
+       } while (pud++, addr = next, addr != end);
+
+       if (stage2_pud_table_empty(start_pud))
+               clear_stage2_pgd_entry(kvm, pgd, start_addr);
+}
+
+/**
+ * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * @kvm:   The VM pointer
+ * @start: The intermediate physical base address of the range to unmap
+ * @size:  The size of the area to unmap
+ *
+ * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
+ * be called while holding mmu_lock (unless for freeing the stage2 pgd before
+ * destroying the VM), otherwise another faulting VCPU may come in and mess
+ * with things behind our backs.
+ */
+static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+{
+       pgd_t *pgd;
+       phys_addr_t addr = start, end = start + size;
+       phys_addr_t next;
+
+       pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+       do {
+               next = stage2_pgd_addr_end(addr, end);
+               if (!stage2_pgd_none(*pgd))
+                       unmap_stage2_puds(kvm, pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pte_t *pte;
+
+       pte = pte_offset_kernel(pmd, addr);
+       do {
+               if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
+                       kvm_flush_dcache_pte(*pte);
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pmd_t *pmd;
+       phys_addr_t next;
+
+       pmd = stage2_pmd_offset(pud, addr);
+       do {
+               next = stage2_pmd_addr_end(addr, end);
+               if (!pmd_none(*pmd)) {
+                       if (pmd_thp_or_huge(*pmd))
+                               kvm_flush_dcache_pmd(*pmd);
+                       else
+                               stage2_flush_ptes(kvm, pmd, addr, next);
+               }
+       } while (pmd++, addr = next, addr != end);
+}
+
+static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pud_t *pud;
+       phys_addr_t next;
+
+       pud = stage2_pud_offset(pgd, addr);
+       do {
+               next = stage2_pud_addr_end(addr, end);
+               if (!stage2_pud_none(*pud)) {
+                       if (stage2_pud_huge(*pud))
+                               kvm_flush_dcache_pud(*pud);
+                       else
+                               stage2_flush_pmds(kvm, pud, addr, next);
+               }
+       } while (pud++, addr = next, addr != end);
+}
+
+static void stage2_flush_memslot(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot)
+{
+       phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
+       phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+       phys_addr_t next;
+       pgd_t *pgd;
+
+       pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+       do {
+               next = stage2_pgd_addr_end(addr, end);
+               stage2_flush_puds(kvm, pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
+ * @kvm: The struct kvm pointer
+ *
+ * Go through the stage 2 page tables and invalidate any cache lines
+ * backing memory already mapped to the VM.
+ */
+static void stage2_flush_vm(struct kvm *kvm)
+{
+       struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
+       int idx;
+
+       idx = srcu_read_lock(&kvm->srcu);
+       spin_lock(&kvm->mmu_lock);
+
+       slots = kvm_memslots(kvm);
+       kvm_for_each_memslot(memslot, slots)
+               stage2_flush_memslot(kvm, memslot);
+
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
+}
+
+static void clear_hyp_pgd_entry(pgd_t *pgd)
+{
+       pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
+       pgd_clear(pgd);
+       pud_free(NULL, pud_table);
+       put_page(virt_to_page(pgd));
+}
+
+static void clear_hyp_pud_entry(pud_t *pud)
+{
+       pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
+       VM_BUG_ON(pud_huge(*pud));
+       pud_clear(pud);
+       pmd_free(NULL, pmd_table);
+       put_page(virt_to_page(pud));
+}
+
+static void clear_hyp_pmd_entry(pmd_t *pmd)
+{
+       pte_t *pte_table = pte_offset_kernel(pmd, 0);
+       VM_BUG_ON(pmd_thp_or_huge(*pmd));
+       pmd_clear(pmd);
+       pte_free_kernel(NULL, pte_table);
+       put_page(virt_to_page(pmd));
+}
+
+static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
+{
+       pte_t *pte, *start_pte;
+
+       start_pte = pte = pte_offset_kernel(pmd, addr);
+       do {
+               if (!pte_none(*pte)) {
+                       kvm_set_pte(pte, __pte(0));
+                       put_page(virt_to_page(pte));
+               }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       if (hyp_pte_table_empty(start_pte))
+               clear_hyp_pmd_entry(pmd);
+}
+
+static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
+{
+       phys_addr_t next;
+       pmd_t *pmd, *start_pmd;
+
+       start_pmd = pmd = pmd_offset(pud, addr);
+       do {
+               next = pmd_addr_end(addr, end);
+               /* Hyp doesn't use huge pmds */
+               if (!pmd_none(*pmd))
+                       unmap_hyp_ptes(pmd, addr, next);
+       } while (pmd++, addr = next, addr != end);
+
+       if (hyp_pmd_table_empty(start_pmd))
+               clear_hyp_pud_entry(pud);
+}
+
+static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
+{
+       phys_addr_t next;
+       pud_t *pud, *start_pud;
+
+       start_pud = pud = pud_offset(pgd, addr);
+       do {
+               next = pud_addr_end(addr, end);
+               /* Hyp doesn't use huge puds */
+               if (!pud_none(*pud))
+                       unmap_hyp_pmds(pud, addr, next);
+       } while (pud++, addr = next, addr != end);
+
+       if (hyp_pud_table_empty(start_pud))
+               clear_hyp_pgd_entry(pgd);
+}
+
+static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
+{
+       pgd_t *pgd;
+       phys_addr_t addr = start, end = start + size;
+       phys_addr_t next;
+
+       /*
+        * We don't unmap anything from HYP, except at the hyp tear down.
+        * Hence, we don't have to invalidate the TLBs here.
+        */
+       pgd = pgdp + pgd_index(addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               if (!pgd_none(*pgd))
+                       unmap_hyp_puds(pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * free_hyp_pgds - free Hyp-mode page tables
+ *
+ * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
+ * therefore contains either mappings in the kernel memory area (above
+ * PAGE_OFFSET), or device mappings in the vmalloc range (from
+ * VMALLOC_START to VMALLOC_END).
+ *
+ * boot_hyp_pgd should only map two pages for the init code.
+ */
+void free_hyp_pgds(void)
+{
+       unsigned long addr;
+
+       mutex_lock(&kvm_hyp_pgd_mutex);
+
+       if (boot_hyp_pgd) {
+               unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
+               free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
+               boot_hyp_pgd = NULL;
+       }
+
+       if (hyp_pgd) {
+               unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
+               for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
+                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+               for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
+                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+
+               free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
+               hyp_pgd = NULL;
+       }
+       if (merged_hyp_pgd) {
+               clear_page(merged_hyp_pgd);
+               free_page((unsigned long)merged_hyp_pgd);
+               merged_hyp_pgd = NULL;
+       }
+
+       mutex_unlock(&kvm_hyp_pgd_mutex);
+}
+
+static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
+                                   unsigned long end, unsigned long pfn,
+                                   pgprot_t prot)
+{
+       pte_t *pte;
+       unsigned long addr;
+
+       addr = start;
+       do {
+               pte = pte_offset_kernel(pmd, addr);
+               kvm_set_pte(pte, pfn_pte(pfn, prot));
+               get_page(virt_to_page(pte));
+               kvm_flush_dcache_to_poc(pte, sizeof(*pte));
+               pfn++;
+       } while (addr += PAGE_SIZE, addr != end);
+}
+
+static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
+                                  unsigned long end, unsigned long pfn,
+                                  pgprot_t prot)
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long addr, next;
+
+       addr = start;
+       do {
+               pmd = pmd_offset(pud, addr);
+
+               BUG_ON(pmd_sect(*pmd));
+
+               if (pmd_none(*pmd)) {
+                       pte = pte_alloc_one_kernel(NULL, addr);
+                       if (!pte) {
+                               kvm_err("Cannot allocate Hyp pte\n");
+                               return -ENOMEM;
+                       }
+                       pmd_populate_kernel(NULL, pmd, pte);
+                       get_page(virt_to_page(pmd));
+                       kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
+               }
+
+               next = pmd_addr_end(addr, end);
+
+               create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
+               pfn += (next - addr) >> PAGE_SHIFT;
+       } while (addr = next, addr != end);
+
+       return 0;
+}
+
+static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
+                                  unsigned long end, unsigned long pfn,
+                                  pgprot_t prot)
+{
+       pud_t *pud;
+       pmd_t *pmd;
+       unsigned long addr, next;
+       int ret;
+
+       addr = start;
+       do {
+               pud = pud_offset(pgd, addr);
+
+               if (pud_none_or_clear_bad(pud)) {
+                       pmd = pmd_alloc_one(NULL, addr);
+                       if (!pmd) {
+                               kvm_err("Cannot allocate Hyp pmd\n");
+                               return -ENOMEM;
+                       }
+                       pud_populate(NULL, pud, pmd);
+                       get_page(virt_to_page(pud));
+                       kvm_flush_dcache_to_poc(pud, sizeof(*pud));
+               }
+
+               next = pud_addr_end(addr, end);
+               ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
+               if (ret)
+                       return ret;
+               pfn += (next - addr) >> PAGE_SHIFT;
+       } while (addr = next, addr != end);
+
+       return 0;
+}
+
+static int __create_hyp_mappings(pgd_t *pgdp,
+                                unsigned long start, unsigned long end,
+                                unsigned long pfn, pgprot_t prot)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       unsigned long addr, next;
+       int err = 0;
+
+       mutex_lock(&kvm_hyp_pgd_mutex);
+       addr = start & PAGE_MASK;
+       end = PAGE_ALIGN(end);
+       do {
+               pgd = pgdp + pgd_index(addr);
+
+               if (pgd_none(*pgd)) {
+                       pud = pud_alloc_one(NULL, addr);
+                       if (!pud) {
+                               kvm_err("Cannot allocate Hyp pud\n");
+                               err = -ENOMEM;
+                               goto out;
+                       }
+                       pgd_populate(NULL, pgd, pud);
+                       get_page(virt_to_page(pgd));
+                       kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
+               }
+
+               next = pgd_addr_end(addr, end);
+               err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
+               if (err)
+                       goto out;
+               pfn += (next - addr) >> PAGE_SHIFT;
+       } while (addr = next, addr != end);
+out:
+       mutex_unlock(&kvm_hyp_pgd_mutex);
+       return err;
+}
+
+static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
+{
+       if (!is_vmalloc_addr(kaddr)) {
+               BUG_ON(!virt_addr_valid(kaddr));
+               return __pa(kaddr);
+       } else {
+               return page_to_phys(vmalloc_to_page(kaddr)) +
+                      offset_in_page(kaddr);
+       }
+}
+
+/**
+ * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
+ * @from:      The virtual kernel start address of the range
+ * @to:                The virtual kernel end address of the range (exclusive)
+ * @prot:      The protection to be applied to this range
+ *
+ * The same virtual address as the kernel virtual address is also used
+ * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
+ * physical pages.
+ */
+int create_hyp_mappings(void *from, void *to, pgprot_t prot)
+{
+       phys_addr_t phys_addr;
+       unsigned long virt_addr;
+       unsigned long start = kern_hyp_va((unsigned long)from);
+       unsigned long end = kern_hyp_va((unsigned long)to);
+
+       if (is_kernel_in_hyp_mode())
+               return 0;
+
+       start = start & PAGE_MASK;
+       end = PAGE_ALIGN(end);
+
+       for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
+               int err;
+
+               phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
+               err = __create_hyp_mappings(hyp_pgd, virt_addr,
+                                           virt_addr + PAGE_SIZE,
+                                           __phys_to_pfn(phys_addr),
+                                           prot);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+/**
+ * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
+ * @from:      The kernel start VA of the range
+ * @to:                The kernel end VA of the range (exclusive)
+ * @phys_addr: The physical start address which gets mapped
+ *
+ * The resulting HYP VA is the same as the kernel VA, modulo
+ * HYP_PAGE_OFFSET.
+ */
+int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
+{
+       unsigned long start = kern_hyp_va((unsigned long)from);
+       unsigned long end = kern_hyp_va((unsigned long)to);
+
+       if (is_kernel_in_hyp_mode())
+               return 0;
+
+       /* Check for a valid kernel IO mapping */
+       if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
+               return -EINVAL;
+
+       return __create_hyp_mappings(hyp_pgd, start, end,
+                                    __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
+}
+
+/**
+ * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
+ * @kvm:       The KVM struct pointer for the VM.
+ *
+ * Allocates only the stage-2 HW PGD level table(s) (can support either full
+ * 40-bit input addresses or limited to 32-bit input addresses). Clears the
+ * allocated pages.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * created, which can only be done once.
+ */
+int kvm_alloc_stage2_pgd(struct kvm *kvm)
+{
+       pgd_t *pgd;
+
+       if (kvm->arch.pgd != NULL) {
+               kvm_err("kvm_arch already initialized?\n");
+               return -EINVAL;
+       }
+
+       /* Allocate the HW PGD, making sure that each page gets its own refcount */
+       pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
+       if (!pgd)
+               return -ENOMEM;
+
+       kvm->arch.pgd = pgd;
+       return 0;
+}
+
+static void stage2_unmap_memslot(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot)
+{
+       hva_t hva = memslot->userspace_addr;
+       phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
+       phys_addr_t size = PAGE_SIZE * memslot->npages;
+       hva_t reg_end = hva + size;
+
+       /*
+        * A memory region could potentially cover multiple VMAs, and any holes
+        * between them, so iterate over all of them to find out if we should
+        * unmap any of them.
+        *
+        *     +--------------------------------------------+
+        * +---------------+----------------+   +----------------+
+        * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
+        * +---------------+----------------+   +----------------+
+        *     |               memory region                |
+        *     +--------------------------------------------+
+        */
+       do {
+               struct vm_area_struct *vma = find_vma(current->mm, hva);
+               hva_t vm_start, vm_end;
+
+               if (!vma || vma->vm_start >= reg_end)
+                       break;
+
+               /*
+                * Take the intersection of this VMA with the memory region
+                */
+               vm_start = max(hva, vma->vm_start);
+               vm_end = min(reg_end, vma->vm_end);
+
+               if (!(vma->vm_flags & VM_PFNMAP)) {
+                       gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
+                       unmap_stage2_range(kvm, gpa, vm_end - vm_start);
+               }
+               hva = vm_end;
+       } while (hva < reg_end);
+}
+
+/**
+ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
+ * @kvm: The struct kvm pointer
+ *
+ * Go through the memregions and unmap any reguler RAM
+ * backing memory already mapped to the VM.
+ */
+void stage2_unmap_vm(struct kvm *kvm)
+{
+       struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
+       int idx;
+
+       idx = srcu_read_lock(&kvm->srcu);
+       spin_lock(&kvm->mmu_lock);
+
+       slots = kvm_memslots(kvm);
+       kvm_for_each_memslot(memslot, slots)
+               stage2_unmap_memslot(kvm, memslot);
+
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
+}
+
+/**
+ * kvm_free_stage2_pgd - free all stage-2 tables
+ * @kvm:       The KVM struct pointer for the VM.
+ *
+ * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
+ * underlying level-2 and level-3 tables before freeing the actual level-1 table
+ * and setting the struct pointer to NULL.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * destroyed, which can only be done once.
+ */
+void kvm_free_stage2_pgd(struct kvm *kvm)
+{
+       if (kvm->arch.pgd == NULL)
+               return;
+
+       unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+       /* Free the HW pgd, one page at a time */
+       free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
+       kvm->arch.pgd = NULL;
+}
+
+static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+                            phys_addr_t addr)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+
+       pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+       if (WARN_ON(stage2_pgd_none(*pgd))) {
+               if (!cache)
+                       return NULL;
+               pud = mmu_memory_cache_alloc(cache);
+               stage2_pgd_populate(pgd, pud);
+               get_page(virt_to_page(pgd));
+       }
+
+       return stage2_pud_offset(pgd, addr);
+}
+
+static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+                            phys_addr_t addr)
+{
+       pud_t *pud;
+       pmd_t *pmd;
+
+       pud = stage2_get_pud(kvm, cache, addr);
+       if (stage2_pud_none(*pud)) {
+               if (!cache)
+                       return NULL;
+               pmd = mmu_memory_cache_alloc(cache);
+               stage2_pud_populate(pud, pmd);
+               get_page(virt_to_page(pud));
+       }
+
+       return stage2_pmd_offset(pud, addr);
+}
+
+static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+                              *cache, phys_addr_t addr, const pmd_t *new_pmd)
+{
+       pmd_t *pmd, old_pmd;
+
+       pmd = stage2_get_pmd(kvm, cache, addr);
+       VM_BUG_ON(!pmd);
+
+       /*
+        * Mapping in huge pages should only happen through a fault.  If a
+        * page is merged into a transparent huge page, the individual
+        * subpages of that huge page should be unmapped through MMU
+        * notifiers before we get here.
+        *
+        * Merging of CompoundPages is not supported; they should become
+        * splitting first, unmapped, merged, and mapped back in on-demand.
+        */
+       VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
+
+       old_pmd = *pmd;
+       if (pmd_present(old_pmd)) {
+               pmd_clear(pmd);
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
+       } else {
+               get_page(virt_to_page(pmd));
+       }
+
+       kvm_set_pmd(pmd, *new_pmd);
+       return 0;
+}
+
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+                         phys_addr_t addr, const pte_t *new_pte,
+                         unsigned long flags)
+{
+       pmd_t *pmd;
+       pte_t *pte, old_pte;
+       bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
+       bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
+
+       VM_BUG_ON(logging_active && !cache);
+
+       /* Create stage-2 page table mapping - Levels 0 and 1 */
+       pmd = stage2_get_pmd(kvm, cache, addr);
+       if (!pmd) {
+               /*
+                * Ignore calls from kvm_set_spte_hva for unallocated
+                * address ranges.
+                */
+               return 0;
+       }
+
+       /*
+        * While dirty page logging - dissolve huge PMD, then continue on to
+        * allocate page.
+        */
+       if (logging_active)
+               stage2_dissolve_pmd(kvm, addr, pmd);
+
+       /* Create stage-2 page mappings - Level 2 */
+       if (pmd_none(*pmd)) {
+               if (!cache)
+                       return 0; /* ignore calls from kvm_set_spte_hva */
+               pte = mmu_memory_cache_alloc(cache);
+               pmd_populate_kernel(NULL, pmd, pte);
+               get_page(virt_to_page(pmd));
+       }
+
+       pte = pte_offset_kernel(pmd, addr);
+
+       if (iomap && pte_present(*pte))
+               return -EFAULT;
+
+       /* Create 2nd stage page table mapping - Level 3 */
+       old_pte = *pte;
+       if (pte_present(old_pte)) {
+               kvm_set_pte(pte, __pte(0));
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
+       } else {
+               get_page(virt_to_page(pte));
+       }
+
+       kvm_set_pte(pte, *new_pte);
+       return 0;
+}
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static int stage2_ptep_test_and_clear_young(pte_t *pte)
+{
+       if (pte_young(*pte)) {
+               *pte = pte_mkold(*pte);
+               return 1;
+       }
+       return 0;
+}
+#else
+static int stage2_ptep_test_and_clear_young(pte_t *pte)
+{
+       return __ptep_test_and_clear_young(pte);
+}
+#endif
+
+static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
+{
+       return stage2_ptep_test_and_clear_young((pte_t *)pmd);
+}
+
+/**
+ * kvm_phys_addr_ioremap - map a device range to guest IPA
+ *
+ * @kvm:       The KVM pointer
+ * @guest_ipa: The IPA at which to insert the mapping
+ * @pa:                The physical address of the device
+ * @size:      The size of the mapping
+ */
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+                         phys_addr_t pa, unsigned long size, bool writable)
+{
+       phys_addr_t addr, end;
+       int ret = 0;
+       unsigned long pfn;
+       struct kvm_mmu_memory_cache cache = { 0, };
+
+       end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
+       pfn = __phys_to_pfn(pa);
+
+       for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
+               pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
+
+               if (writable)
+                       pte = kvm_s2pte_mkwrite(pte);
+
+               ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
+                                               KVM_NR_MEM_OBJS);
+               if (ret)
+                       goto out;
+               spin_lock(&kvm->mmu_lock);
+               ret = stage2_set_pte(kvm, &cache, addr, &pte,
+                                               KVM_S2PTE_FLAG_IS_IOMAP);
+               spin_unlock(&kvm->mmu_lock);
+               if (ret)
+                       goto out;
+
+               pfn++;
+       }
+
+out:
+       mmu_free_memory_cache(&cache);
+       return ret;
+}
+
+static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
+{
+       kvm_pfn_t pfn = *pfnp;
+       gfn_t gfn = *ipap >> PAGE_SHIFT;
+
+       if (PageTransCompoundMap(pfn_to_page(pfn))) {
+               unsigned long mask;
+               /*
+                * The address we faulted on is backed by a transparent huge
+                * page.  However, because we map the compound huge page and
+                * not the individual tail page, we need to transfer the
+                * refcount to the head page.  We have to be careful that the
+                * THP doesn't start to split while we are adjusting the
+                * refcounts.
+                *
+                * We are sure this doesn't happen, because mmu_notifier_retry
+                * was successful and we are holding the mmu_lock, so if this
+                * THP is trying to split, it will be blocked in the mmu
+                * notifier before touching any of the pages, specifically
+                * before being able to call __split_huge_page_refcount().
+                *
+                * We can therefore safely transfer the refcount from PG_tail
+                * to PG_head and switch the pfn from a tail page to the head
+                * page accordingly.
+                */
+               mask = PTRS_PER_PMD - 1;
+               VM_BUG_ON((gfn & mask) != (pfn & mask));
+               if (pfn & mask) {
+                       *ipap &= PMD_MASK;
+                       kvm_release_pfn_clean(pfn);
+                       pfn &= ~mask;
+                       kvm_get_pfn(pfn);
+                       *pfnp = pfn;
+               }
+
+               return true;
+       }
+
+       return false;
+}
+
+static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+{
+       if (kvm_vcpu_trap_is_iabt(vcpu))
+               return false;
+
+       return kvm_vcpu_dabt_iswrite(vcpu);
+}
+
+/**
+ * stage2_wp_ptes - write protect PMD range
+ * @pmd:       pointer to pmd entry
+ * @addr:      range start address
+ * @end:       range end address
+ */
+static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
+{
+       pte_t *pte;
+
+       pte = pte_offset_kernel(pmd, addr);
+       do {
+               if (!pte_none(*pte)) {
+                       if (!kvm_s2pte_readonly(pte))
+                               kvm_set_s2pte_readonly(pte);
+               }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+/**
+ * stage2_wp_pmds - write protect PUD range
+ * @pud:       pointer to pud entry
+ * @addr:      range start address
+ * @end:       range end address
+ */
+static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
+{
+       pmd_t *pmd;
+       phys_addr_t next;
+
+       pmd = stage2_pmd_offset(pud, addr);
+
+       do {
+               next = stage2_pmd_addr_end(addr, end);
+               if (!pmd_none(*pmd)) {
+                       if (pmd_thp_or_huge(*pmd)) {
+                               if (!kvm_s2pmd_readonly(pmd))
+                                       kvm_set_s2pmd_readonly(pmd);
+                       } else {
+                               stage2_wp_ptes(pmd, addr, next);
+                       }
+               }
+       } while (pmd++, addr = next, addr != end);
+}
+
+/**
+  * stage2_wp_puds - write protect PGD range
+  * @pgd:      pointer to pgd entry
+  * @addr:     range start address
+  * @end:      range end address
+  *
+  * Process PUD entries, for a huge PUD we cause a panic.
+  */
+static void  stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
+{
+       pud_t *pud;
+       phys_addr_t next;
+
+       pud = stage2_pud_offset(pgd, addr);
+       do {
+               next = stage2_pud_addr_end(addr, end);
+               if (!stage2_pud_none(*pud)) {
+                       /* TODO:PUD not supported, revisit later if supported */
+                       BUG_ON(stage2_pud_huge(*pud));
+                       stage2_wp_pmds(pud, addr, next);
+               }
+       } while (pud++, addr = next, addr != end);
+}
+
+/**
+ * stage2_wp_range() - write protect stage2 memory region range
+ * @kvm:       The KVM pointer
+ * @addr:      Start address of range
+ * @end:       End address of range
+ */
+static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
+{
+       pgd_t *pgd;
+       phys_addr_t next;
+
+       pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+       do {
+               /*
+                * Release kvm_mmu_lock periodically if the memory region is
+                * large. Otherwise, we may see kernel panics with
+                * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
+                * CONFIG_LOCKDEP. Additionally, holding the lock too long
+                * will also starve other vCPUs.
+                */
+               if (need_resched() || spin_needbreak(&kvm->mmu_lock))
+                       cond_resched_lock(&kvm->mmu_lock);
+
+               next = stage2_pgd_addr_end(addr, end);
+               if (stage2_pgd_present(*pgd))
+                       stage2_wp_puds(pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
+ * @kvm:       The KVM pointer
+ * @slot:      The memory slot to write protect
+ *
+ * Called to start logging dirty pages after memory region
+ * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
+ * all present PMD and PTEs are write protected in the memory region.
+ * Afterwards read of dirty page log can be called.
+ *
+ * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
+ * serializing operations for VM memory regions.
+ */
+void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
+{
+       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
+       phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
+       phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+
+       spin_lock(&kvm->mmu_lock);
+       stage2_wp_range(kvm, start, end);
+       spin_unlock(&kvm->mmu_lock);
+       kvm_flush_remote_tlbs(kvm);
+}
+
+/**
+ * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
+ * @kvm:       The KVM pointer
+ * @slot:      The memory slot associated with mask
+ * @gfn_offset:        The gfn offset in memory slot
+ * @mask:      The mask of dirty pages at offset 'gfn_offset' in this memory
+ *             slot to be write protected
+ *
+ * Walks bits set in mask write protects the associated pte's. Caller must
+ * acquire kvm_mmu_lock.
+ */
+static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+               struct kvm_memory_slot *slot,
+               gfn_t gfn_offset, unsigned long mask)
+{
+       phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+       phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
+       phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
+
+       stage2_wp_range(kvm, start, end);
+}
+
+/*
+ * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
+ * dirty pages.
+ *
+ * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
+ * enable dirty logging for them.
+ */
+void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+               struct kvm_memory_slot *slot,
+               gfn_t gfn_offset, unsigned long mask)
+{
+       kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+}
+
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
+                                     unsigned long size)
+{
+       __coherent_cache_guest_page(vcpu, pfn, size);
+}
+
+static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+                         struct kvm_memory_slot *memslot, unsigned long hva,
+                         unsigned long fault_status)
+{
+       int ret;
+       bool write_fault, writable, hugetlb = false, force_pte = false;
+       unsigned long mmu_seq;
+       gfn_t gfn = fault_ipa >> PAGE_SHIFT;
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+       struct vm_area_struct *vma;
+       kvm_pfn_t pfn;
+       pgprot_t mem_type = PAGE_S2;
+       bool logging_active = memslot_is_logging(memslot);
+       unsigned long flags = 0;
+
+       write_fault = kvm_is_write_fault(vcpu);
+       if (fault_status == FSC_PERM && !write_fault) {
+               kvm_err("Unexpected L2 read permission error\n");
+               return -EFAULT;
+       }
+
+       /* Let's check if we will get back a huge page backed by hugetlbfs */
+       down_read(&current->mm->mmap_sem);
+       vma = find_vma_intersection(current->mm, hva, hva + 1);
+       if (unlikely(!vma)) {
+               kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
+               up_read(&current->mm->mmap_sem);
+               return -EFAULT;
+       }
+
+       if (is_vm_hugetlb_page(vma) && !logging_active) {
+               hugetlb = true;
+               gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
+       } else {
+               /*
+                * Pages belonging to memslots that don't have the same
+                * alignment for userspace and IPA cannot be mapped using
+                * block descriptors even if the pages belong to a THP for
+                * the process, because the stage-2 block descriptor will
+                * cover more than a single THP and we loose atomicity for
+                * unmapping, updates, and splits of the THP or other pages
+                * in the stage-2 block range.
+                */
+               if ((memslot->userspace_addr & ~PMD_MASK) !=
+                   ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
+                       force_pte = true;
+       }
+       up_read(&current->mm->mmap_sem);
+
+       /* We need minimum second+third level pages */
+       ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
+                                    KVM_NR_MEM_OBJS);
+       if (ret)
+               return ret;
+
+       mmu_seq = vcpu->kvm->mmu_notifier_seq;
+       /*
+        * Ensure the read of mmu_notifier_seq happens before we call
+        * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
+        * the page we just got a reference to gets unmapped before we have a
+        * chance to grab the mmu_lock, which ensure that if the page gets
+        * unmapped afterwards, the call to kvm_unmap_hva will take it away
+        * from us again properly. This smp_rmb() interacts with the smp_wmb()
+        * in kvm_mmu_notifier_invalidate_<page|range_end>.
+        */
+       smp_rmb();
+
+       pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
+       if (is_error_noslot_pfn(pfn))
+               return -EFAULT;
+
+       if (kvm_is_device_pfn(pfn)) {
+               mem_type = PAGE_S2_DEVICE;
+               flags |= KVM_S2PTE_FLAG_IS_IOMAP;
+       } else if (logging_active) {
+               /*
+                * Faults on pages in a memslot with logging enabled
+                * should not be mapped with huge pages (it introduces churn
+                * and performance degradation), so force a pte mapping.
+                */
+               force_pte = true;
+               flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
+
+               /*
+                * Only actually map the page as writable if this was a write
+                * fault.
+                */
+               if (!write_fault)
+                       writable = false;
+       }
+
+       spin_lock(&kvm->mmu_lock);
+       if (mmu_notifier_retry(kvm, mmu_seq))
+               goto out_unlock;
+
+       if (!hugetlb && !force_pte)
+               hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
+
+       if (hugetlb) {
+               pmd_t new_pmd = pfn_pmd(pfn, mem_type);
+               new_pmd = pmd_mkhuge(new_pmd);
+               if (writable) {
+                       new_pmd = kvm_s2pmd_mkwrite(new_pmd);
+                       kvm_set_pfn_dirty(pfn);
+               }
+               coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
+               ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+       } else {
+               pte_t new_pte = pfn_pte(pfn, mem_type);
+
+               if (writable) {
+                       new_pte = kvm_s2pte_mkwrite(new_pte);
+                       kvm_set_pfn_dirty(pfn);
+                       mark_page_dirty(kvm, gfn);
+               }
+               coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
+               ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
+       }
+
+out_unlock:
+       spin_unlock(&kvm->mmu_lock);
+       kvm_set_pfn_accessed(pfn);
+       kvm_release_pfn_clean(pfn);
+       return ret;
+}
+
+/*
+ * Resolve the access fault by making the page young again.
+ * Note that because the faulting entry is guaranteed not to be
+ * cached in the TLB, we don't need to invalidate anything.
+ * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
+ * so there is no need for atomic (pte|pmd)_mkyoung operations.
+ */
+static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       kvm_pfn_t pfn;
+       bool pfn_valid = false;
+
+       trace_kvm_access_fault(fault_ipa);
+
+       spin_lock(&vcpu->kvm->mmu_lock);
+
+       pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
+       if (!pmd || pmd_none(*pmd))     /* Nothing there */
+               goto out;
+
+       if (pmd_thp_or_huge(*pmd)) {    /* THP, HugeTLB */
+               *pmd = pmd_mkyoung(*pmd);
+               pfn = pmd_pfn(*pmd);
+               pfn_valid = true;
+               goto out;
+       }
+
+       pte = pte_offset_kernel(pmd, fault_ipa);
+       if (pte_none(*pte))             /* Nothing there either */
+               goto out;
+
+       *pte = pte_mkyoung(*pte);       /* Just a page... */
+       pfn = pte_pfn(*pte);
+       pfn_valid = true;
+out:
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       if (pfn_valid)
+               kvm_set_pfn_accessed(pfn);
+}
+
+/**
+ * kvm_handle_guest_abort - handles all 2nd stage aborts
+ * @vcpu:      the VCPU pointer
+ * @run:       the kvm_run structure
+ *
+ * Any abort that gets to the host is almost guaranteed to be caused by a
+ * missing second stage translation table entry, which can mean that either the
+ * guest simply needs more memory and we must allocate an appropriate page or it
+ * can mean that the guest tried to access I/O memory, which is emulated by user
+ * space. The distinction is based on the IPA causing the fault and whether this
+ * memory region has been registered as standard RAM by user space.
+ */
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       unsigned long fault_status;
+       phys_addr_t fault_ipa;
+       struct kvm_memory_slot *memslot;
+       unsigned long hva;
+       bool is_iabt, write_fault, writable;
+       gfn_t gfn;
+       int ret, idx;
+
+       is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+       if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
+               kvm_inject_vabt(vcpu);
+               return 1;
+       }
+
+       fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
+
+       trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
+                             kvm_vcpu_get_hfar(vcpu), fault_ipa);
+
+       /* Check the stage-2 fault is trans. fault or write fault */
+       fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
+       if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
+           fault_status != FSC_ACCESS) {
+               kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
+                       kvm_vcpu_trap_get_class(vcpu),
+                       (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
+                       (unsigned long)kvm_vcpu_get_hsr(vcpu));
+               return -EFAULT;
+       }
+
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+       gfn = fault_ipa >> PAGE_SHIFT;
+       memslot = gfn_to_memslot(vcpu->kvm, gfn);
+       hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
+       write_fault = kvm_is_write_fault(vcpu);
+       if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
+               if (is_iabt) {
+                       /* Prefetch Abort on I/O address */
+                       kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+                       ret = 1;
+                       goto out_unlock;
+               }
+
+               /*
+                * Check for a cache maintenance operation. Since we
+                * ended-up here, we know it is outside of any memory
+                * slot. But we can't find out if that is for a device,
+                * or if the guest is just being stupid. The only thing
+                * we know for sure is that this range cannot be cached.
+                *
+                * So let's assume that the guest is just being
+                * cautious, and skip the instruction.
+                */
+               if (kvm_vcpu_dabt_is_cm(vcpu)) {
+                       kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+                       ret = 1;
+                       goto out_unlock;
+               }
+
+               /*
+                * The IPA is reported as [MAX:12], so we need to
+                * complement it with the bottom 12 bits from the
+                * faulting VA. This is always 12 bits, irrespective
+                * of the page size.
+                */
+               fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
+               ret = io_mem_abort(vcpu, run, fault_ipa);
+               goto out_unlock;
+       }
+
+       /* Userspace should not be able to register out-of-bounds IPAs */
+       VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
+
+       if (fault_status == FSC_ACCESS) {
+               handle_access_fault(vcpu, fault_ipa);
+               ret = 1;
+               goto out_unlock;
+       }
+
+       ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
+       if (ret == 0)
+               ret = 1;
+out_unlock:
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+       return ret;
+}
+
+static int handle_hva_to_gpa(struct kvm *kvm,
+                            unsigned long start,
+                            unsigned long end,
+                            int (*handler)(struct kvm *kvm,
+                                           gpa_t gpa, u64 size,
+                                           void *data),
+                            void *data)
+{
+       struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
+       int ret = 0;
+
+       slots = kvm_memslots(kvm);
+
+       /* we only care about the pages that the guest sees */
+       kvm_for_each_memslot(memslot, slots) {
+               unsigned long hva_start, hva_end;
+               gfn_t gpa;
+
+               hva_start = max(start, memslot->userspace_addr);
+               hva_end = min(end, memslot->userspace_addr +
+                                       (memslot->npages << PAGE_SHIFT));
+               if (hva_start >= hva_end)
+                       continue;
+
+               gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
+               ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
+       }
+
+       return ret;
+}
+
+static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+{
+       unmap_stage2_range(kvm, gpa, size);
+       return 0;
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+       unsigned long end = hva + PAGE_SIZE;
+
+       if (!kvm->arch.pgd)
+               return 0;
+
+       trace_kvm_unmap_hva(hva);
+       handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
+       return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm,
+                       unsigned long start, unsigned long end)
+{
+       if (!kvm->arch.pgd)
+               return 0;
+
+       trace_kvm_unmap_hva_range(start, end);
+       handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+       return 0;
+}
+
+static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+{
+       pte_t *pte = (pte_t *)data;
+
+       WARN_ON(size != PAGE_SIZE);
+       /*
+        * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
+        * flag clear because MMU notifiers will have unmapped a huge PMD before
+        * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
+        * therefore stage2_set_pte() never needs to clear out a huge PMD
+        * through this calling path.
+        */
+       stage2_set_pte(kvm, NULL, gpa, pte, 0);
+       return 0;
+}
+
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+       unsigned long end = hva + PAGE_SIZE;
+       pte_t stage2_pte;
+
+       if (!kvm->arch.pgd)
+               return;
+
+       trace_kvm_set_spte_hva(hva);
+       stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+       handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
+}
+
+static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+{
+       pmd_t *pmd;
+       pte_t *pte;
+
+       WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
+       pmd = stage2_get_pmd(kvm, NULL, gpa);
+       if (!pmd || pmd_none(*pmd))     /* Nothing there */
+               return 0;
+
+       if (pmd_thp_or_huge(*pmd))      /* THP, HugeTLB */
+               return stage2_pmdp_test_and_clear_young(pmd);
+
+       pte = pte_offset_kernel(pmd, gpa);
+       if (pte_none(*pte))
+               return 0;
+
+       return stage2_ptep_test_and_clear_young(pte);
+}
+
+static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+{
+       pmd_t *pmd;
+       pte_t *pte;
+
+       WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
+       pmd = stage2_get_pmd(kvm, NULL, gpa);
+       if (!pmd || pmd_none(*pmd))     /* Nothing there */
+               return 0;
+
+       if (pmd_thp_or_huge(*pmd))              /* THP, HugeTLB */
+               return pmd_young(*pmd);
+
+       pte = pte_offset_kernel(pmd, gpa);
+       if (!pte_none(*pte))            /* Just a page... */
+               return pte_young(*pte);
+
+       return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+       trace_kvm_age_hva(start, end);
+       return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       trace_kvm_test_age_hva(hva);
+       return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+}
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
+{
+       mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+}
+
+phys_addr_t kvm_mmu_get_httbr(void)
+{
+       if (__kvm_cpu_uses_extended_idmap())
+               return virt_to_phys(merged_hyp_pgd);
+       else
+               return virt_to_phys(hyp_pgd);
+}
+
+phys_addr_t kvm_get_idmap_vector(void)
+{
+       return hyp_idmap_vector;
+}
+
+static int kvm_map_idmap_text(pgd_t *pgd)
+{
+       int err;
+
+       /* Create the idmap in the boot page tables */
+       err =   __create_hyp_mappings(pgd,
+                                     hyp_idmap_start, hyp_idmap_end,
+                                     __phys_to_pfn(hyp_idmap_start),
+                                     PAGE_HYP_EXEC);
+       if (err)
+               kvm_err("Failed to idmap %lx-%lx\n",
+                       hyp_idmap_start, hyp_idmap_end);
+
+       return err;
+}
+
+int kvm_mmu_init(void)
+{
+       int err;
+
+       hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
+       hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
+       hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
+
+       /*
+        * We rely on the linker script to ensure at build time that the HYP
+        * init code does not cross a page boundary.
+        */
+       BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
+
+       kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
+       kvm_info("HYP VA range: %lx:%lx\n",
+                kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
+
+       if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
+           hyp_idmap_start <  kern_hyp_va(~0UL) &&
+           hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
+               /*
+                * The idmap page is intersecting with the VA space,
+                * it is not safe to continue further.
+                */
+               kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
+       if (!hyp_pgd) {
+               kvm_err("Hyp mode PGD not allocated\n");
+               err = -ENOMEM;
+               goto out;
+       }
+
+       if (__kvm_cpu_uses_extended_idmap()) {
+               boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                                        hyp_pgd_order);
+               if (!boot_hyp_pgd) {
+                       kvm_err("Hyp boot PGD not allocated\n");
+                       err = -ENOMEM;
+                       goto out;
+               }
+
+               err = kvm_map_idmap_text(boot_hyp_pgd);
+               if (err)
+                       goto out;
+
+               merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+               if (!merged_hyp_pgd) {
+                       kvm_err("Failed to allocate extra HYP pgd\n");
+                       goto out;
+               }
+               __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
+                                   hyp_idmap_start);
+       } else {
+               err = kvm_map_idmap_text(hyp_pgd);
+               if (err)
+                       goto out;
+       }
+
+       return 0;
+out:
+       free_hyp_pgds();
+       return err;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                  const struct kvm_userspace_memory_region *mem,
+                                  const struct kvm_memory_slot *old,
+                                  const struct kvm_memory_slot *new,
+                                  enum kvm_mr_change change)
+{
+       /*
+        * At this point memslot has been committed and there is an
+        * allocated dirty_bitmap[], dirty pages will be be tracked while the
+        * memory slot is write protected.
+        */
+       if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
+               kvm_mmu_wp_memory_region(kvm, mem->slot);
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot,
+                                  const struct kvm_userspace_memory_region *mem,
+                                  enum kvm_mr_change change)
+{
+       hva_t hva = mem->userspace_addr;
+       hva_t reg_end = hva + mem->memory_size;
+       bool writable = !(mem->flags & KVM_MEM_READONLY);
+       int ret = 0;
+
+       if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
+                       change != KVM_MR_FLAGS_ONLY)
+               return 0;
+
+       /*
+        * Prevent userspace from creating a memory region outside of the IPA
+        * space addressable by the KVM guest IPA space.
+        */
+       if (memslot->base_gfn + memslot->npages >=
+           (KVM_PHYS_SIZE >> PAGE_SHIFT))
+               return -EFAULT;
+
+       /*
+        * A memory region could potentially cover multiple VMAs, and any holes
+        * between them, so iterate over all of them to find out if we can map
+        * any of them right now.
+        *
+        *     +--------------------------------------------+
+        * +---------------+----------------+   +----------------+
+        * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
+        * +---------------+----------------+   +----------------+
+        *     |               memory region                |
+        *     +--------------------------------------------+
+        */
+       do {
+               struct vm_area_struct *vma = find_vma(current->mm, hva);
+               hva_t vm_start, vm_end;
+
+               if (!vma || vma->vm_start >= reg_end)
+                       break;
+
+               /*
+                * Mapping a read-only VMA is only allowed if the
+                * memory region is configured as read-only.
+                */
+               if (writable && !(vma->vm_flags & VM_WRITE)) {
+                       ret = -EPERM;
+                       break;
+               }
+
+               /*
+                * Take the intersection of this VMA with the memory region
+                */
+               vm_start = max(hva, vma->vm_start);
+               vm_end = min(reg_end, vma->vm_end);
+
+               if (vma->vm_flags & VM_PFNMAP) {
+                       gpa_t gpa = mem->guest_phys_addr +
+                                   (vm_start - mem->userspace_addr);
+                       phys_addr_t pa;
+
+                       pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+                       pa += vm_start - vma->vm_start;
+
+                       /* IO region dirty page logging not allowed */
+                       if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
+                               return -EINVAL;
+
+                       ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
+                                                   vm_end - vm_start,
+                                                   writable);
+                       if (ret)
+                               break;
+               }
+               hva = vm_end;
+       } while (hva < reg_end);
+
+       if (change == KVM_MR_FLAGS_ONLY)
+               return ret;
+
+       spin_lock(&kvm->mmu_lock);
+       if (ret)
+               unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
+       else
+               stage2_flush_memslot(kvm, memslot);
+       spin_unlock(&kvm->mmu_lock);
+       return ret;
+}
+
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
+{
+       return 0;
+}
+
+void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
+{
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+       kvm_free_stage2_pgd(kvm);
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+       gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
+       phys_addr_t size = slot->npages << PAGE_SHIFT;
+
+       spin_lock(&kvm->mmu_lock);
+       unmap_stage2_range(kvm, gpa, size);
+       spin_unlock(&kvm->mmu_lock);
+}
+
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ *
+ * Main problems:
+ * - S/W ops are local to a CPU (not broadcast)
+ * - We have line migration behind our back (speculation)
+ * - System caches don't support S/W at all (damn!)
+ *
+ * In the face of the above, the best we can do is to try and convert
+ * S/W ops to VA ops. Because the guest is not allowed to infer the
+ * S/W to PA mapping, it can only use S/W to nuke the whole cache,
+ * which is a rather good thing for us.
+ *
+ * Also, it is only used when turning caches on/off ("The expected
+ * usage of the cache maintenance instructions that operate by set/way
+ * is associated with the cache maintenance instructions associated
+ * with the powerdown and powerup of caches, if this is required by
+ * the implementation.").
+ *
+ * We use the following policy:
+ *
+ * - If we trap a S/W operation, we enable VM trapping to detect
+ *   caches being turned on/off, and do a full clean.
+ *
+ * - We flush the caches on both caches being turned on and off.
+ *
+ * - Once the caches are enabled, we stop trapping VM ops.
+ */
+void kvm_set_way_flush(struct kvm_vcpu *vcpu)
+{
+       unsigned long hcr = vcpu_get_hcr(vcpu);
+
+       /*
+        * If this is the first time we do a S/W operation
+        * (i.e. HCR_TVM not set) flush the whole memory, and set the
+        * VM trapping.
+        *
+        * Otherwise, rely on the VM trapping to wait for the MMU +
+        * Caches to be turned off. At that point, we'll be able to
+        * clean the caches again.
+        */
+       if (!(hcr & HCR_TVM)) {
+               trace_kvm_set_way_flush(*vcpu_pc(vcpu),
+                                       vcpu_has_cache_enabled(vcpu));
+               stage2_flush_vm(vcpu->kvm);
+               vcpu_set_hcr(vcpu, hcr | HCR_TVM);
+       }
+}
+
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
+{
+       bool now_enabled = vcpu_has_cache_enabled(vcpu);
+
+       /*
+        * If switching the MMU+caches on, need to invalidate the caches.
+        * If switching it off, need to clean the caches.
+        * Clean + invalidate does the trick always.
+        */
+       if (now_enabled != was_enabled)
+               stage2_flush_vm(vcpu->kvm);
+
+       /* Caches are now on, stop trapping VM ops (until a S/W op) */
+       if (now_enabled)
+               vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
+
+       trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
+}
diff --git a/virt/kvm/arm/perf.c b/virt/kvm/arm/perf.c
new file mode 100644 (file)
index 0000000..1a3849d
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Based on the x86 implementation.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_emulate.h>
+
+static int kvm_is_in_guest(void)
+{
+        return kvm_arm_get_running_vcpu() != NULL;
+}
+
+static int kvm_is_user_mode(void)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = kvm_arm_get_running_vcpu();
+
+       if (vcpu)
+               return !vcpu_mode_priv(vcpu);
+
+       return 0;
+}
+
+static unsigned long kvm_get_guest_ip(void)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = kvm_arm_get_running_vcpu();
+
+       if (vcpu)
+               return *vcpu_pc(vcpu);
+
+       return 0;
+}
+
+static struct perf_guest_info_callbacks kvm_guest_cbs = {
+       .is_in_guest    = kvm_is_in_guest,
+       .is_user_mode   = kvm_is_user_mode,
+       .get_guest_ip   = kvm_get_guest_ip,
+};
+
+int kvm_perf_init(void)
+{
+       return perf_register_guest_info_callbacks(&kvm_guest_cbs);
+}
+
+int kvm_perf_teardown(void)
+{
+       return perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
+}
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
new file mode 100644 (file)
index 0000000..a08d7a9
--- /dev/null
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2012 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/preempt.h>
+#include <linux/kvm_host.h>
+#include <linux/wait.h>
+
+#include <asm/cputype.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_psci.h>
+#include <asm/kvm_host.h>
+
+#include <uapi/linux/psci.h>
+
+/*
+ * This is an implementation of the Power State Coordination Interface
+ * as described in ARM document number ARM DEN 0022A.
+ */
+
+#define AFFINITY_MASK(level)   ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
+
+static unsigned long psci_affinity_mask(unsigned long affinity_level)
+{
+       if (affinity_level <= 3)
+               return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
+
+       return 0;
+}
+
+static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
+{
+       /*
+        * NOTE: For simplicity, we make VCPU suspend emulation to be
+        * same-as WFI (Wait-for-interrupt) emulation.
+        *
+        * This means for KVM the wakeup events are interrupts and
+        * this is consistent with intended use of StateID as described
+        * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
+        *
+        * Further, we also treat power-down request to be same as
+        * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
+        * specification (ARM DEN 0022A). This means all suspend states
+        * for KVM will preserve the register state.
+        */
+       kvm_vcpu_block(vcpu);
+
+       return PSCI_RET_SUCCESS;
+}
+
+static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.power_off = true;
+}
+
+static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+{
+       struct kvm *kvm = source_vcpu->kvm;
+       struct kvm_vcpu *vcpu = NULL;
+       struct swait_queue_head *wq;
+       unsigned long cpu_id;
+       unsigned long context_id;
+       phys_addr_t target_pc;
+
+       cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
+       if (vcpu_mode_is_32bit(source_vcpu))
+               cpu_id &= ~((u32) 0);
+
+       vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
+
+       /*
+        * Make sure the caller requested a valid CPU and that the CPU is
+        * turned off.
+        */
+       if (!vcpu)
+               return PSCI_RET_INVALID_PARAMS;
+       if (!vcpu->arch.power_off) {
+               if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
+                       return PSCI_RET_ALREADY_ON;
+               else
+                       return PSCI_RET_INVALID_PARAMS;
+       }
+
+       target_pc = vcpu_get_reg(source_vcpu, 2);
+       context_id = vcpu_get_reg(source_vcpu, 3);
+
+       kvm_reset_vcpu(vcpu);
+
+       /* Gracefully handle Thumb2 entry point */
+       if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
+               target_pc &= ~((phys_addr_t) 1);
+               vcpu_set_thumb(vcpu);
+       }
+
+       /* Propagate caller endianness */
+       if (kvm_vcpu_is_be(source_vcpu))
+               kvm_vcpu_set_be(vcpu);
+
+       *vcpu_pc(vcpu) = target_pc;
+       /*
+        * NOTE: We always update r0 (or x0) because for PSCI v0.1
+        * the general puspose registers are undefined upon CPU_ON.
+        */
+       vcpu_set_reg(vcpu, 0, context_id);
+       vcpu->arch.power_off = false;
+       smp_mb();               /* Make sure the above is visible */
+
+       wq = kvm_arch_vcpu_wq(vcpu);
+       swake_up(wq);
+
+       return PSCI_RET_SUCCESS;
+}
+
+static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
+{
+       int i, matching_cpus = 0;
+       unsigned long mpidr;
+       unsigned long target_affinity;
+       unsigned long target_affinity_mask;
+       unsigned long lowest_affinity_level;
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_vcpu *tmp;
+
+       target_affinity = vcpu_get_reg(vcpu, 1);
+       lowest_affinity_level = vcpu_get_reg(vcpu, 2);
+
+       /* Determine target affinity mask */
+       target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
+       if (!target_affinity_mask)
+               return PSCI_RET_INVALID_PARAMS;
+
+       /* Ignore other bits of target affinity */
+       target_affinity &= target_affinity_mask;
+
+       /*
+        * If one or more VCPU matching target affinity are running
+        * then ON else OFF
+        */
+       kvm_for_each_vcpu(i, tmp, kvm) {
+               mpidr = kvm_vcpu_get_mpidr_aff(tmp);
+               if ((mpidr & target_affinity_mask) == target_affinity) {
+                       matching_cpus++;
+                       if (!tmp->arch.power_off)
+                               return PSCI_0_2_AFFINITY_LEVEL_ON;
+               }
+       }
+
+       if (!matching_cpus)
+               return PSCI_RET_INVALID_PARAMS;
+
+       return PSCI_0_2_AFFINITY_LEVEL_OFF;
+}
+
+static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
+{
+       int i;
+       struct kvm_vcpu *tmp;
+
+       /*
+        * The KVM ABI specifies that a system event exit may call KVM_RUN
+        * again and may perform shutdown/reboot at a later time that when the
+        * actual request is made.  Since we are implementing PSCI and a
+        * caller of PSCI reboot and shutdown expects that the system shuts
+        * down or reboots immediately, let's make sure that VCPUs are not run
+        * after this call is handled and before the VCPUs have been
+        * re-initialized.
+        */
+       kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+               tmp->arch.power_off = true;
+               kvm_vcpu_kick(tmp);
+       }
+
+       memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
+       vcpu->run->system_event.type = type;
+       vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+}
+
+static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
+{
+       kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
+}
+
+static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
+{
+       kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
+}
+
+int kvm_psci_version(struct kvm_vcpu *vcpu)
+{
+       if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+               return KVM_ARM_PSCI_0_2;
+
+       return KVM_ARM_PSCI_0_1;
+}
+
+static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+       unsigned long val;
+       int ret = 1;
+
+       switch (psci_fn) {
+       case PSCI_0_2_FN_PSCI_VERSION:
+               /*
+                * Bits[31:16] = Major Version = 0
+                * Bits[15:0] = Minor Version = 2
+                */
+               val = 2;
+               break;
+       case PSCI_0_2_FN_CPU_SUSPEND:
+       case PSCI_0_2_FN64_CPU_SUSPEND:
+               val = kvm_psci_vcpu_suspend(vcpu);
+               break;
+       case PSCI_0_2_FN_CPU_OFF:
+               kvm_psci_vcpu_off(vcpu);
+               val = PSCI_RET_SUCCESS;
+               break;
+       case PSCI_0_2_FN_CPU_ON:
+       case PSCI_0_2_FN64_CPU_ON:
+               mutex_lock(&kvm->lock);
+               val = kvm_psci_vcpu_on(vcpu);
+               mutex_unlock(&kvm->lock);
+               break;
+       case PSCI_0_2_FN_AFFINITY_INFO:
+       case PSCI_0_2_FN64_AFFINITY_INFO:
+               val = kvm_psci_vcpu_affinity_info(vcpu);
+               break;
+       case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+               /*
+                * Trusted OS is MP hence does not require migration
+                * or
+                * Trusted OS is not present
+                */
+               val = PSCI_0_2_TOS_MP;
+               break;
+       case PSCI_0_2_FN_SYSTEM_OFF:
+               kvm_psci_system_off(vcpu);
+               /*
+                * We should'nt be going back to guest VCPU after
+                * receiving SYSTEM_OFF request.
+                *
+                * If user space accidently/deliberately resumes
+                * guest VCPU after SYSTEM_OFF request then guest
+                * VCPU should see internal failure from PSCI return
+                * value. To achieve this, we preload r0 (or x0) with
+                * PSCI return value INTERNAL_FAILURE.
+                */
+               val = PSCI_RET_INTERNAL_FAILURE;
+               ret = 0;
+               break;
+       case PSCI_0_2_FN_SYSTEM_RESET:
+               kvm_psci_system_reset(vcpu);
+               /*
+                * Same reason as SYSTEM_OFF for preloading r0 (or x0)
+                * with PSCI return value INTERNAL_FAILURE.
+                */
+               val = PSCI_RET_INTERNAL_FAILURE;
+               ret = 0;
+               break;
+       default:
+               val = PSCI_RET_NOT_SUPPORTED;
+               break;
+       }
+
+       vcpu_set_reg(vcpu, 0, val);
+       return ret;
+}
+
+static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+       unsigned long val;
+
+       switch (psci_fn) {
+       case KVM_PSCI_FN_CPU_OFF:
+               kvm_psci_vcpu_off(vcpu);
+               val = PSCI_RET_SUCCESS;
+               break;
+       case KVM_PSCI_FN_CPU_ON:
+               mutex_lock(&kvm->lock);
+               val = kvm_psci_vcpu_on(vcpu);
+               mutex_unlock(&kvm->lock);
+               break;
+       default:
+               val = PSCI_RET_NOT_SUPPORTED;
+               break;
+       }
+
+       vcpu_set_reg(vcpu, 0, val);
+       return 1;
+}
+
+/**
+ * kvm_psci_call - handle PSCI call if r0 value is in range
+ * @vcpu: Pointer to the VCPU struct
+ *
+ * Handle PSCI calls from guests through traps from HVC instructions.
+ * The calling convention is similar to SMC calls to the secure world
+ * where the function number is placed in r0.
+ *
+ * This function returns: > 0 (success), 0 (success but exit to user
+ * space), and < 0 (errors)
+ *
+ * Errors:
+ * -EINVAL: Unrecognized PSCI function
+ */
+int kvm_psci_call(struct kvm_vcpu *vcpu)
+{
+       switch (kvm_psci_version(vcpu)) {
+       case KVM_ARM_PSCI_0_2:
+               return kvm_psci_0_2_call(vcpu);
+       case KVM_ARM_PSCI_0_1:
+               return kvm_psci_0_1_call(vcpu);
+       default:
+               return -EINVAL;
+       };
+}
index 37d8b98867d5a04fd814ac89be30c1a44e0f213c..f7dc5ddd6847ba09a6ced07403cba502bb4f8e4b 100644 (file)
 #define TRACE_SYSTEM kvm
 
 /*
- * Tracepoints for vgic
+ * Tracepoints for entry/exit to guest
  */
-TRACE_EVENT(vgic_update_irq_pending,
-       TP_PROTO(unsigned long vcpu_id, __u32 irq, bool level),
-       TP_ARGS(vcpu_id, irq, level),
+TRACE_EVENT(kvm_entry,
+       TP_PROTO(unsigned long vcpu_pc),
+       TP_ARGS(vcpu_pc),
 
        TP_STRUCT__entry(
-               __field(        unsigned long,  vcpu_id )
-               __field(        __u32,          irq     )
-               __field(        bool,           level   )
+               __field(        unsigned long,  vcpu_pc         )
        ),
 
        TP_fast_assign(
-               __entry->vcpu_id        = vcpu_id;
-               __entry->irq            = irq;
+               __entry->vcpu_pc                = vcpu_pc;
+       ),
+
+       TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_exit,
+       TP_PROTO(int idx, unsigned int exit_reason, unsigned long vcpu_pc),
+       TP_ARGS(idx, exit_reason, vcpu_pc),
+
+       TP_STRUCT__entry(
+               __field(        int,            idx             )
+               __field(        unsigned int,   exit_reason     )
+               __field(        unsigned long,  vcpu_pc         )
+       ),
+
+       TP_fast_assign(
+               __entry->idx                    = idx;
+               __entry->exit_reason            = exit_reason;
+               __entry->vcpu_pc                = vcpu_pc;
+       ),
+
+       TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+                 __print_symbolic(__entry->idx, kvm_arm_exception_type),
+                 __entry->exit_reason,
+                 __print_symbolic(__entry->exit_reason, kvm_arm_exception_class),
+                 __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_guest_fault,
+       TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
+                unsigned long hxfar,
+                unsigned long long ipa),
+       TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  vcpu_pc         )
+               __field(        unsigned long,  hsr             )
+               __field(        unsigned long,  hxfar           )
+               __field(   unsigned long long,  ipa             )
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_pc                = vcpu_pc;
+               __entry->hsr                    = hsr;
+               __entry->hxfar                  = hxfar;
+               __entry->ipa                    = ipa;
+       ),
+
+       TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
+                 __entry->ipa, __entry->hsr,
+                 __entry->hxfar, __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_access_fault,
+       TP_PROTO(unsigned long ipa),
+       TP_ARGS(ipa),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  ipa             )
+       ),
+
+       TP_fast_assign(
+               __entry->ipa            = ipa;
+       ),
+
+       TP_printk("IPA: %lx", __entry->ipa)
+);
+
+TRACE_EVENT(kvm_irq_line,
+       TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
+       TP_ARGS(type, vcpu_idx, irq_num, level),
+
+       TP_STRUCT__entry(
+               __field(        unsigned int,   type            )
+               __field(        int,            vcpu_idx        )
+               __field(        int,            irq_num         )
+               __field(        int,            level           )
+       ),
+
+       TP_fast_assign(
+               __entry->type           = type;
+               __entry->vcpu_idx       = vcpu_idx;
+               __entry->irq_num        = irq_num;
                __entry->level          = level;
        ),
 
-       TP_printk("VCPU: %ld, IRQ %d, level: %d",
-                 __entry->vcpu_id, __entry->irq, __entry->level)
+       TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
+                 (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
+                 (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
+                 (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
+                 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
+);
+
+TRACE_EVENT(kvm_mmio_emulate,
+       TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
+                unsigned long cpsr),
+       TP_ARGS(vcpu_pc, instr, cpsr),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  vcpu_pc         )
+               __field(        unsigned long,  instr           )
+               __field(        unsigned long,  cpsr            )
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_pc                = vcpu_pc;
+               __entry->instr                  = instr;
+               __entry->cpsr                   = cpsr;
+       ),
+
+       TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
+                 __entry->vcpu_pc, __entry->instr, __entry->cpsr)
+);
+
+TRACE_EVENT(kvm_unmap_hva,
+       TP_PROTO(unsigned long hva),
+       TP_ARGS(hva),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  hva             )
+       ),
+
+       TP_fast_assign(
+               __entry->hva            = hva;
+       ),
+
+       TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
+);
+
+TRACE_EVENT(kvm_unmap_hva_range,
+       TP_PROTO(unsigned long start, unsigned long end),
+       TP_ARGS(start, end),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  start           )
+               __field(        unsigned long,  end             )
+       ),
+
+       TP_fast_assign(
+               __entry->start          = start;
+               __entry->end            = end;
+       ),
+
+       TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
+                 __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_set_spte_hva,
+       TP_PROTO(unsigned long hva),
+       TP_ARGS(hva),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  hva             )
+       ),
+
+       TP_fast_assign(
+               __entry->hva            = hva;
+       ),
+
+       TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
+);
+
+TRACE_EVENT(kvm_age_hva,
+       TP_PROTO(unsigned long start, unsigned long end),
+       TP_ARGS(start, end),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  start           )
+               __field(        unsigned long,  end             )
+       ),
+
+       TP_fast_assign(
+               __entry->start          = start;
+               __entry->end            = end;
+       ),
+
+       TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
+                 __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_test_age_hva,
+       TP_PROTO(unsigned long hva),
+       TP_ARGS(hva),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  hva             )
+       ),
+
+       TP_fast_assign(
+               __entry->hva            = hva;
+       ),
+
+       TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
+);
+
+TRACE_EVENT(kvm_set_way_flush,
+           TP_PROTO(unsigned long vcpu_pc, bool cache),
+           TP_ARGS(vcpu_pc, cache),
+
+           TP_STRUCT__entry(
+                   __field(    unsigned long,  vcpu_pc         )
+                   __field(    bool,           cache           )
+           ),
+
+           TP_fast_assign(
+                   __entry->vcpu_pc            = vcpu_pc;
+                   __entry->cache              = cache;
+           ),
+
+           TP_printk("S/W flush at 0x%016lx (cache %s)",
+                     __entry->vcpu_pc, __entry->cache ? "on" : "off")
+);
+
+TRACE_EVENT(kvm_toggle_cache,
+           TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
+           TP_ARGS(vcpu_pc, was, now),
+
+           TP_STRUCT__entry(
+                   __field(    unsigned long,  vcpu_pc         )
+                   __field(    bool,           was             )
+                   __field(    bool,           now             )
+           ),
+
+           TP_fast_assign(
+                   __entry->vcpu_pc            = vcpu_pc;
+                   __entry->was                = was;
+                   __entry->now                = now;
+           ),
+
+           TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
+                     __entry->vcpu_pc, __entry->was ? "on" : "off",
+                     __entry->now ? "on" : "off")
 );
 
 /*
diff --git a/virt/kvm/arm/vgic/trace.h b/virt/kvm/arm/vgic/trace.h
new file mode 100644 (file)
index 0000000..ed32292
--- /dev/null
@@ -0,0 +1,37 @@
+#if !defined(_TRACE_VGIC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VGIC_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+TRACE_EVENT(vgic_update_irq_pending,
+       TP_PROTO(unsigned long vcpu_id, __u32 irq, bool level),
+       TP_ARGS(vcpu_id, irq, level),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  vcpu_id )
+               __field(        __u32,          irq     )
+               __field(        bool,           level   )
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu_id        = vcpu_id;
+               __entry->irq            = irq;
+               __entry->level          = level;
+       ),
+
+       TP_printk("VCPU: %ld, IRQ %d, level: %d",
+                 __entry->vcpu_id, __entry->irq, __entry->level)
+);
+
+#endif /* _TRACE_VGIC_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm/vgic
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 3d0979c307212cd1f17f3aec9295bb150bf64c1b..d40210ae947486dd7d7741ddc4350701422d4ea0 100644 (file)
@@ -21,7 +21,7 @@
 #include "vgic.h"
 
 #define CREATE_TRACE_POINTS
-#include "../trace.h"
+#include "trace.h"
 
 #ifdef CONFIG_DEBUG_SPINLOCK
 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)