vunmap_page_range(va->va_start, va->va_end);
}
+static void vmap_debug_free_range(unsigned long start, unsigned long end)
+{
+ /*
+ * Unmap page tables and force a TLB flush immediately if
+ * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
+ * bugs similarly to those in linear kernel virtual address
+ * space after a page has been freed.
+ *
+ * All the lazy freeing logic is still retained, in order to
+ * minimise intrusiveness of this debugging feature.
+ *
+ * This is going to be *slow* (linear kernel virtual address
+ * debugging doesn't do a broadcast TLB flush so it is a lot
+ * faster).
+ */
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ vunmap_page_range(start, end);
+ flush_tlb_kernel_range(start, end);
+#endif
+}
+
/*
* lazy_max_pages is the maximum amount of virtual address space we gather up
* before attempting to purge with a TLB flush.
BUG_ON(addr & (PAGE_SIZE-1));
debug_check_no_locks_freed(mem, size);
+ vmap_debug_free_range(addr, addr+size);
if (likely(count <= VMAP_MAX_ALLOC))
vb_free(mem, size);
if (va && va->flags & VM_VM_AREA) {
struct vm_struct *vm = va->private;
struct vm_struct *tmp, **p;
+
+ vmap_debug_free_range(va->va_start, va->va_end);
free_unmap_vmap_area(va);
vm->size -= PAGE_SIZE;