From: Aneesh Kumar K.V Date: Tue, 2 Jul 2013 05:45:18 +0000 (+0530) Subject: powerpc/kvm: Use 256K chunk to track both RMA and hash page table allocation. X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=990978e99359e1f3a843563b9f96f9dc7bb7c05a;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git powerpc/kvm: Use 256K chunk to track both RMA and hash page table allocation. Both RMA and hash page table request will be a multiple of 256K. We can use a chunk size of 256K to track the free/used 256K chunk in the bitmap. This should help to reduce the bitmap size. Signed-off-by: Aneesh Kumar K.V Acked-by: Paul Mackerras Signed-off-by: Alexander Graf --- diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 354f4bb21f5c..7eb5ddab1203 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -37,6 +37,8 @@ #include #include +#include "book3s_hv_cma.h" + /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ #define MAX_LPID_970 63 @@ -71,6 +73,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) /* Next try to allocate from the preallocated pool */ if (!hpt) { + VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER); page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); if (page) { hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); diff --git a/arch/powerpc/kvm/book3s_hv_cma.c b/arch/powerpc/kvm/book3s_hv_cma.c index e04b269b9c5b..d9d3d8553d51 100644 --- a/arch/powerpc/kvm/book3s_hv_cma.c +++ b/arch/powerpc/kvm/book3s_hv_cma.c @@ -24,6 +24,8 @@ #include #include +#include "book3s_hv_cma.h" + struct kvm_cma { unsigned long base_pfn; unsigned long count; @@ -96,6 +98,7 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages) int ret; struct page *page = NULL; struct kvm_cma *cma = &kvm_cma_area; + unsigned long chunk_count, nr_chunk; unsigned long mask, pfn, pageno, start = 0; @@ -107,21 +110,27 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages) if (!nr_pages) return NULL; - + /* + * align mask with chunk size. The bit tracks pages in chunk size + */ VM_BUG_ON(!is_power_of_2(align_pages)); - mask = align_pages - 1; + mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1; + BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER); + + chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); + nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); mutex_lock(&kvm_cma_mutex); for (;;) { - pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, - start, nr_pages, mask); - if (pageno >= cma->count) + pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count, + start, nr_chunk, mask); + if (pageno >= chunk_count) break; - pfn = cma->base_pfn + pageno; + pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)); ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA); if (ret == 0) { - bitmap_set(cma->bitmap, pageno, nr_pages); + bitmap_set(cma->bitmap, pageno, nr_chunk); page = pfn_to_page(pfn); memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT); break; @@ -150,9 +159,9 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages) bool kvm_release_cma(struct page *pages, unsigned long nr_pages) { unsigned long pfn; + unsigned long nr_chunk; struct kvm_cma *cma = &kvm_cma_area; - if (!cma || !pages) return false; @@ -164,9 +173,12 @@ bool kvm_release_cma(struct page *pages, unsigned long nr_pages) return false; VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count); + nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); mutex_lock(&kvm_cma_mutex); - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, nr_pages); + bitmap_clear(cma->bitmap, + (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT), + nr_chunk); free_contig_range(pfn, nr_pages); mutex_unlock(&kvm_cma_mutex); @@ -204,13 +216,14 @@ static int __init kvm_cma_activate_area(unsigned long base_pfn, static int __init kvm_cma_init_reserved_areas(void) { int bitmap_size, ret; + unsigned long chunk_count; struct kvm_cma *cma = &kvm_cma_area; pr_debug("%s()\n", __func__); if (!cma->count) return 0; - - bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); + chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); + bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long); cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!cma->bitmap) return -ENOMEM; diff --git a/arch/powerpc/kvm/book3s_hv_cma.h b/arch/powerpc/kvm/book3s_hv_cma.h index 788bc3b73104..655144f75fa5 100644 --- a/arch/powerpc/kvm/book3s_hv_cma.h +++ b/arch/powerpc/kvm/book3s_hv_cma.h @@ -14,6 +14,11 @@ #ifndef __POWERPC_KVM_CMA_ALLOC_H__ #define __POWERPC_KVM_CMA_ALLOC_H__ +/* + * Both RMA and Hash page allocation will be multiple of 256K. + */ +#define KVM_CMA_CHUNK_ORDER 18 + extern struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages); extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages);