/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED;
new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED;
if (prev_needed != new_needed) {
- /* Aliased allocations can't be made ephemeral */
+ /* Aliased allocations can't be shrunk as the code doesn't
+ * support looking up:
+ * - all physical pages assigned to different GPU VAs
+ * - CPU mappings for the physical pages at different vm_pgoff
+ * (==GPU VA) locations.
+ */
if (atomic_read(®->cpu_alloc->gpu_mappings) > 1)
goto out_unlock;
u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx);
struct kbase_alloc_import_user_buf *user_buf;
struct page **pages = NULL;
+ int write;
if ((address & (cache_line_alignment - 1)) != 0 ||
(size & (cache_line_alignment - 1)) != 0) {
down_read(¤t->mm->mmap_sem);
+ write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
- reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+ write, 0, pages, NULL);
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
faulted_pages = get_user_pages(address, *va_pages,
- reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+ write, 0, pages, NULL);
#else
faulted_pages = get_user_pages(address, *va_pages,
- reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+ write ? FOLL_WRITE : 0,
pages, NULL);
#endif
goto bad_handle; /* Free region */
if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
goto bad_handle; /* Ephemeral region */
+ if (aliasing_reg->flags & KBASE_REG_NO_USER_FREE)
+ goto bad_handle; /* JIT regions can't be
+ * aliased. NO_USER_FREE flag
+ * covers the entire lifetime
+ * of JIT regions. The other
+ * types of regions covered
+ * by this flag also shall
+ * not be aliased.
+ */
if (!aliasing_reg->gpu_alloc)
goto bad_handle; /* No alloc */
if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
+
+ /* Ensure the underlying alloc is marked as being
+ * mapped at >1 different GPU VA immediately, even
+ * though mapping might not happen until later.
+ *
+ * Otherwise, we would (incorrectly) allow shrinking of
+ * the source region (aliasing_reg) and so freeing the
+ * physical pages (without freeing the entire alloc)
+ * whilst we still hold an implicit reference on those
+ * physical pages.
+ */
+ kbase_mem_phy_alloc_gpu_mapped(alloc);
}
}
#endif
no_mmap:
bad_handle:
+ /* Marking the source allocs as not being mapped on the GPU and putting
+ * them is handled by putting reg's allocs, so no rollback of those
+ * actions is done here.
+ */
kbase_gpu_vm_unlock(kctx);
no_aliased_array:
invalid_flags:
if (new_pages > reg->nr_pages)
goto out_unlock;
- /* can't be mapped more than once on the GPU */
+ /* Can't shrink when physical pages are mapped to different GPU
+ * VAs. The code doesn't support looking up:
+ * - all physical pages assigned to different GPU VAs
+ * - CPU mappings for the physical pages at different vm_pgoff
+ * (==GPU VA) locations.
+ *
+ * Note that for Native allocs mapped at multiple GPU VAs, growth of
+ * such allocs is not a supported use-case.
+ */
if (atomic_read(®->gpu_alloc->gpu_mappings) > 1)
goto out_unlock;
/* can't grow regions which are ephemeral */