#include <asm/pgalloc.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_asm.h>
#include <asm/mach/map.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
+static void kvm_tlb_flush_vmid(struct kvm *kvm)
+{
+ kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
+}
+
static void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
pte_val(*pte) = new_pte;
flush_pmd_entry(pte);
}
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+ int min, int max)
+{
+ void *page;
+
+ BUG_ON(max > KVM_NR_MEM_OBJS);
+ if (cache->nobjs >= min)
+ return 0;
+ while (cache->nobjs < max) {
+ page = (void *)__get_free_page(PGALLOC_GFP);
+ if (!page)
+ return -ENOMEM;
+ cache->objects[cache->nobjs++] = page;
+ }
+ return 0;
+}
+
+static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+{
+ while (mc->nobjs)
+ free_page((unsigned long)mc->objects[--mc->nobjs]);
+}
+
+static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
+{
+ void *p;
+
+ BUG_ON(!mc || !mc->nobjs);
+ p = mc->objects[--mc->nobjs];
+ return p;
+}
+
static void free_ptes(pmd_t *pmd, unsigned long addr)
{
pte_t *pte;
return __create_hyp_mappings(from, to, &pfn);
}
+/**
+ * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
+ * @kvm: The KVM struct pointer for the VM.
+ *
+ * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
+ * support either full 40-bit input addresses or limited to 32-bit input
+ * addresses). Clears the allocated pages.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * created, which can only be done once.
+ */
+int kvm_alloc_stage2_pgd(struct kvm *kvm)
+{
+ pgd_t *pgd;
+
+ if (kvm->arch.pgd != NULL) {
+ kvm_err("kvm_arch already initialized?\n");
+ return -EINVAL;
+ }
+
+ pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
+ if (!pgd)
+ return -ENOMEM;
+
+ /* stage-2 pgd must be aligned to its size */
+ VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
+
+ memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
+ clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+ kvm->arch.pgd = pgd;
+
+ return 0;
+}
+
+static void clear_pud_entry(pud_t *pud)
+{
+ pmd_t *pmd_table = pmd_offset(pud, 0);
+ pud_clear(pud);
+ pmd_free(NULL, pmd_table);
+ put_page(virt_to_page(pud));
+}
+
+static void clear_pmd_entry(pmd_t *pmd)
+{
+ pte_t *pte_table = pte_offset_kernel(pmd, 0);
+ pmd_clear(pmd);
+ pte_free_kernel(NULL, pte_table);
+ put_page(virt_to_page(pmd));
+}
+
+static bool pmd_empty(pmd_t *pmd)
+{
+ struct page *pmd_page = virt_to_page(pmd);
+ return page_count(pmd_page) == 1;
+}
+
+static void clear_pte_entry(pte_t *pte)
+{
+ if (pte_present(*pte)) {
+ kvm_set_pte(pte, __pte(0));
+ put_page(virt_to_page(pte));
+ }
+}
+
+static bool pte_empty(pte_t *pte)
+{
+ struct page *pte_page = virt_to_page(pte);
+ return page_count(pte_page) == 1;
+}
+
+/**
+ * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * @kvm: The VM pointer
+ * @start: The intermediate physical base address of the range to unmap
+ * @size: The size of the area to unmap
+ *
+ * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
+ * be called while holding mmu_lock (unless for freeing the stage2 pgd before
+ * destroying the VM), otherwise another faulting VCPU may come in and mess
+ * with things behind our backs.
+ */
+static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ phys_addr_t addr = start, end = start + size;
+ u64 range;
+
+ while (addr < end) {
+ pgd = kvm->arch.pgd + pgd_index(addr);
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ addr += PUD_SIZE;
+ continue;
+ }
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+ addr += PMD_SIZE;
+ continue;
+ }
+
+ pte = pte_offset_kernel(pmd, addr);
+ clear_pte_entry(pte);
+ range = PAGE_SIZE;
+
+ /* If we emptied the pte, walk back up the ladder */
+ if (pte_empty(pte)) {
+ clear_pmd_entry(pmd);
+ range = PMD_SIZE;
+ if (pmd_empty(pmd)) {
+ clear_pud_entry(pud);
+ range = PUD_SIZE;
+ }
+ }
+
+ addr += range;
+ }
+}
+
+/**
+ * kvm_free_stage2_pgd - free all stage-2 tables
+ * @kvm: The KVM struct pointer for the VM.
+ *
+ * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
+ * underlying level-2 and level-3 tables before freeing the actual level-1 table
+ * and setting the struct pointer to NULL.
+ *
+ * Note we don't need locking here as this is only called when the VM is
+ * destroyed, which can only be done once.
+ */
+void kvm_free_stage2_pgd(struct kvm *kvm)
+{
+ if (kvm->arch.pgd == NULL)
+ return;
+
+ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
+ kvm->arch.pgd = NULL;
+}
+
+
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ phys_addr_t addr, const pte_t *new_pte, bool iomap)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte, old_pte;
+
+ /* Create 2nd stage page table mapping - Level 1 */
+ pgd = kvm->arch.pgd + pgd_index(addr);
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ if (!cache)
+ return 0; /* ignore calls from kvm_set_spte_hva */
+ pmd = mmu_memory_cache_alloc(cache);
+ pud_populate(NULL, pud, pmd);
+ pmd += pmd_index(addr);
+ get_page(virt_to_page(pud));
+ } else
+ pmd = pmd_offset(pud, addr);
+
+ /* Create 2nd stage page table mapping - Level 2 */
+ if (pmd_none(*pmd)) {
+ if (!cache)
+ return 0; /* ignore calls from kvm_set_spte_hva */
+ pte = mmu_memory_cache_alloc(cache);
+ clean_pte_table(pte);
+ pmd_populate_kernel(NULL, pmd, pte);
+ pte += pte_index(addr);
+ get_page(virt_to_page(pmd));
+ } else
+ pte = pte_offset_kernel(pmd, addr);
+
+ if (iomap && pte_present(*pte))
+ return -EFAULT;
+
+ /* Create 2nd stage page table mapping - Level 3 */
+ old_pte = *pte;
+ kvm_set_pte(pte, *new_pte);
+ if (pte_present(old_pte))
+ kvm_tlb_flush_vmid(kvm);
+ else
+ get_page(virt_to_page(pte));
+
+ return 0;
+}
+
+/**
+ * kvm_phys_addr_ioremap - map a device range to guest IPA
+ *
+ * @kvm: The KVM pointer
+ * @guest_ipa: The IPA at which to insert the mapping
+ * @pa: The physical address of the device
+ * @size: The size of the mapping
+ */
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+ phys_addr_t pa, unsigned long size)
+{
+ phys_addr_t addr, end;
+ int ret = 0;
+ unsigned long pfn;
+ struct kvm_mmu_memory_cache cache = { 0, };
+
+ end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
+ pfn = __phys_to_pfn(pa);
+
+ for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
+ pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
+
+ ret = mmu_topup_memory_cache(&cache, 2, 2);
+ if (ret)
+ goto out;
+ spin_lock(&kvm->mmu_lock);
+ ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
+ spin_unlock(&kvm->mmu_lock);
+ if (ret)
+ goto out;
+
+ pfn++;
+ }
+
+out:
+ mmu_free_memory_cache(&cache);
+ return ret;
+}
+
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
return -EINVAL;
}
+static void handle_hva_to_gpa(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ void (*handler)(struct kvm *kvm,
+ gpa_t gpa, void *data),
+ void *data)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+
+ slots = kvm_memslots(kvm);
+
+ /* we only care about the pages that the guest sees */
+ kvm_for_each_memslot(memslot, slots) {
+ unsigned long hva_start, hva_end;
+ gfn_t gfn, gfn_end;
+
+ hva_start = max(start, memslot->userspace_addr);
+ hva_end = min(end, memslot->userspace_addr +
+ (memslot->npages << PAGE_SHIFT));
+ if (hva_start >= hva_end)
+ continue;
+
+ /*
+ * {gfn(page) | page intersects with [hva_start, hva_end)} =
+ * {gfn_start, gfn_start+1, ..., gfn_end-1}.
+ */
+ gfn = hva_to_gfn_memslot(hva_start, memslot);
+ gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
+
+ for (; gfn < gfn_end; ++gfn) {
+ gpa_t gpa = gfn << PAGE_SHIFT;
+ handler(kvm, gpa, data);
+ }
+ }
+}
+
+static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ unmap_stage2_range(kvm, gpa, PAGE_SIZE);
+ kvm_tlb_flush_vmid(kvm);
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ unsigned long end = hva + PAGE_SIZE;
+
+ if (!kvm->arch.pgd)
+ return 0;
+
+ trace_kvm_unmap_hva(hva);
+ handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
+ return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
+{
+ if (!kvm->arch.pgd)
+ return 0;
+
+ trace_kvm_unmap_hva_range(start, end);
+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
+ return 0;
+}
+
+static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ pte_t *pte = (pte_t *)data;
+
+ stage2_set_pte(kvm, NULL, gpa, pte, false);
+}
+
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ unsigned long end = hva + PAGE_SIZE;
+ pte_t stage2_pte;
+
+ if (!kvm->arch.pgd)
+ return;
+
+ trace_kvm_set_spte_hva(hva);
+ stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+ handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
+}
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
+{
+ mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+}
+
phys_addr_t kvm_mmu_get_httbr(void)
{
VM_BUG_ON(!virt_addr_valid(hyp_pgd));
int kvm_mmu_init(void)
{
- return hyp_pgd ? 0 : -ENOMEM;
+ if (!hyp_pgd) {
+ kvm_err("Hyp mode PGD not allocated\n");
+ return -ENOMEM;
+ }
+
+ return 0;
}
/**