binder: fix race between munmap() and direct reclaim
authorTodd Kjos <tkjos@android.com>
Wed, 5 Jun 2019 16:37:46 +0000 (09:37 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 9 Jun 2019 07:18:20 +0000 (09:18 +0200)
commit 5cec2d2e5839f9c0fec319c523a911e0a7fd299f upstream.

An munmap() on a binder device causes binder_vma_close() to be called
which clears the alloc->vma pointer.

If direct reclaim causes binder_alloc_free_page() to be called, there
is a race where alloc->vma is read into a local vma pointer and then
used later after the mm->mmap_sem is acquired. This can result in
calling zap_page_range() with an invalid vma which manifests as a
use-after-free in zap_page_range().

The fix is to check alloc->vma after acquiring the mmap_sem (which we
were acquiring anyway) and skip zap_page_range() if it has changed
to NULL.

Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Todd Kjos <tkjos@google.com>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: stable <stable@vger.kernel.org> # 4.14
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder_alloc.c

index b9281f2725a6a07e0cd195be7d9ff4d8e9b9d710..e0b0399ff7ec81e2382245dca318b82a82c52209 100644 (file)
@@ -945,14 +945,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+       mm = alloc->vma_vm_mm;
+       if (!mmget_not_zero(mm))
+               goto err_mmget;
+       if (!down_write_trylock(&mm->mmap_sem))
+               goto err_down_write_mmap_sem_failed;
        vma = binder_alloc_get_vma(alloc);
-       if (vma) {
-               if (!mmget_not_zero(alloc->vma_vm_mm))
-                       goto err_mmget;
-               mm = alloc->vma_vm_mm;
-               if (!down_write_trylock(&mm->mmap_sem))
-                       goto err_down_write_mmap_sem_failed;
-       }
 
        list_lru_isolate(lru, item);
        spin_unlock(lock);
@@ -965,10 +964,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
                               PAGE_SIZE);
 
                trace_binder_unmap_user_end(alloc, index);
-
-               up_write(&mm->mmap_sem);
-               mmput(mm);
        }
+       up_write(&mm->mmap_sem);
+       mmput(mm);
 
        trace_binder_unmap_kernel_start(alloc, index);