From: Oliver O'Halloran Date: Wed, 28 Jun 2017 01:32:32 +0000 (+1000) Subject: powerpc/vmemmap: Reshuffle vmemmap_free() X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=d7d9b612f1b0ac154071bafc02bc9b7b879d7363;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git powerpc/vmemmap: Reshuffle vmemmap_free() Removes an indentation level and shuffles some code around to make the following patch cleaner. No functional changes. Reviewed-by: Balbir Singh Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman --- diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index f1a4964f9317..f855a2442c97 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -255,13 +255,15 @@ static unsigned long vmemmap_list_free(unsigned long start) void __ref vmemmap_free(unsigned long start, unsigned long end) { unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; + unsigned long page_order = get_order(page_size); start = _ALIGN_DOWN(start, page_size); pr_debug("vmemmap_free %lx...%lx\n", start, end); for (; start < end; start += page_size) { - unsigned long addr; + unsigned long nr_pages, addr; + struct page *page; /* * the section has already be marked as invalid, so @@ -272,29 +274,29 @@ void __ref vmemmap_free(unsigned long start, unsigned long end) continue; addr = vmemmap_list_free(start); - if (addr) { - struct page *page = pfn_to_page(addr >> PAGE_SHIFT); - - if (PageReserved(page)) { - /* allocated from bootmem */ - if (page_size < PAGE_SIZE) { - /* - * this shouldn't happen, but if it is - * the case, leave the memory there - */ - WARN_ON_ONCE(1); - } else { - unsigned int nr_pages = - 1 << get_order(page_size); - while (nr_pages--) - free_reserved_page(page++); - } - } else - free_pages((unsigned long)(__va(addr)), - get_order(page_size)); - - vmemmap_remove_mapping(start, page_size); + if (!addr) + continue; + + page = pfn_to_page(addr >> PAGE_SHIFT); + nr_pages = 1 << page_order; + + if (PageReserved(page)) { + /* allocated from bootmem */ + if (page_size < PAGE_SIZE) { + /* + * this shouldn't happen, but if it is + * the case, leave the memory there + */ + WARN_ON_ONCE(1); + } else { + while (nr_pages--) + free_reserved_page(page++); + } + } else { + free_pages((unsigned long)(__va(addr)), page_order); } + + vmemmap_remove_mapping(start, page_size); } } #endif