void __ref vmemmap_free(unsigned long start, unsigned long end)
{
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
+ unsigned long page_order = get_order(page_size);
start = _ALIGN_DOWN(start, page_size);
pr_debug("vmemmap_free %lx...%lx\n", start, end);
for (; start < end; start += page_size) {
- unsigned long addr;
+ unsigned long nr_pages, addr;
+ struct page *page;
/*
* the section has already be marked as invalid, so
continue;
addr = vmemmap_list_free(start);
- if (addr) {
- struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
-
- if (PageReserved(page)) {
- /* allocated from bootmem */
- if (page_size < PAGE_SIZE) {
- /*
- * this shouldn't happen, but if it is
- * the case, leave the memory there
- */
- WARN_ON_ONCE(1);
- } else {
- unsigned int nr_pages =
- 1 << get_order(page_size);
- while (nr_pages--)
- free_reserved_page(page++);
- }
- } else
- free_pages((unsigned long)(__va(addr)),
- get_order(page_size));
-
- vmemmap_remove_mapping(start, page_size);
+ if (!addr)
+ continue;
+
+ page = pfn_to_page(addr >> PAGE_SHIFT);
+ nr_pages = 1 << page_order;
+
+ if (PageReserved(page)) {
+ /* allocated from bootmem */
+ if (page_size < PAGE_SIZE) {
+ /*
+ * this shouldn't happen, but if it is
+ * the case, leave the memory there
+ */
+ WARN_ON_ONCE(1);
+ } else {
+ while (nr_pages--)
+ free_reserved_page(page++);
+ }
+ } else {
+ free_pages((unsigned long)(__va(addr)), page_order);
}
+
+ vmemmap_remove_mapping(start, page_size);
}
}
#endif