Import G955FXXUCDUD1/G950FXXUCDUD1+N950FXXSGDUG7 kernel source
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / gpu / arm / tMIx / r9p0 / mali_kbase_mem_linux.c
index 78c2e76f2cf897d9c57826093d32b7f4917ce2ff..0815ebbeb63e4513f423fd390033464afd309632 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
  *
  * This program is free software and is provided to you under the terms of the
  * GNU General Public License version 2 as published by the Free Software
@@ -603,7 +603,12 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
        prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED;
        new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED;
        if (prev_needed != new_needed) {
-               /* Aliased allocations can't be made ephemeral */
+               /* Aliased allocations can't be shrunk as the code doesn't
+                * support looking up:
+                * - all physical pages assigned to different GPU VAs
+                * - CPU mappings for the physical pages at different vm_pgoff
+                *   (==GPU VA) locations.
+                */
                if (atomic_read(&reg->cpu_alloc->gpu_mappings) > 1)
                        goto out_unlock;
 
@@ -924,6 +929,7 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
        u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx);
        struct kbase_alloc_import_user_buf *user_buf;
        struct page **pages = NULL;
+       int write;
 
        if ((address & (cache_line_alignment - 1)) != 0 ||
                        (size & (cache_line_alignment - 1)) != 0) {
@@ -1017,15 +1023,17 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
 
        down_read(&current->mm->mmap_sem);
 
+       write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
+
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
        faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
-                       reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+                       write, 0, pages, NULL);
 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
        faulted_pages = get_user_pages(address, *va_pages,
-                       reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+                       write, 0, pages, NULL);
 #else
        faulted_pages = get_user_pages(address, *va_pages,
-                       reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+                       write ? FOLL_WRITE : 0,
                        pages, NULL);
 #endif
 
@@ -1197,6 +1205,15 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
                                goto bad_handle; /* Free region */
                        if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
                                goto bad_handle; /* Ephemeral region */
+                       if (aliasing_reg->flags & KBASE_REG_NO_USER_FREE)
+                               goto bad_handle; /* JIT regions can't be
+                                                 * aliased. NO_USER_FREE flag
+                                                 * covers the entire lifetime
+                                                 * of JIT regions. The other
+                                                 * types of regions covered
+                                                 * by this flag also shall
+                                                 * not be aliased.
+                                                 */
                        if (!aliasing_reg->gpu_alloc)
                                goto bad_handle; /* No alloc */
                        if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
@@ -1224,6 +1241,18 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
                        reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
                        reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
                        reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
+
+                       /* Ensure the underlying alloc is marked as being
+                        * mapped at >1 different GPU VA immediately, even
+                        * though mapping might not happen until later.
+                        *
+                        * Otherwise, we would (incorrectly) allow shrinking of
+                        * the source region (aliasing_reg) and so freeing the
+                        * physical pages (without freeing the entire alloc)
+                        * whilst we still hold an implicit reference on those
+                        * physical pages.
+                        */
+                       kbase_mem_phy_alloc_gpu_mapped(alloc);
                }
        }
 
@@ -1267,6 +1296,10 @@ no_cookie:
 #endif
 no_mmap:
 bad_handle:
+       /* Marking the source allocs as not being mapped on the GPU and putting
+        * them is handled by putting reg's allocs, so no rollback of those
+        * actions is done here.
+        */
        kbase_gpu_vm_unlock(kctx);
 no_aliased_array:
 invalid_flags:
@@ -1512,7 +1545,15 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
        if (new_pages > reg->nr_pages)
                goto out_unlock;
 
-       /* can't be mapped more than once on the GPU */
+       /* Can't shrink when physical pages are mapped to different GPU
+        * VAs. The code doesn't support looking up:
+        * - all physical pages assigned to different GPU VAs
+        * - CPU mappings for the physical pages at different vm_pgoff
+        *   (==GPU VA) locations.
+        *
+        * Note that for Native allocs mapped at multiple GPU VAs, growth of
+        * such allocs is not a supported use-case.
+        */
        if (atomic_read(&reg->gpu_alloc->gpu_mappings) > 1)
                goto out_unlock;
        /* can't grow regions which are ephemeral */