[9610] drivers: gpu: add SECURE_RENDERING codes
authorYoojin Park <yoojin1.park@samsung.com>
Mon, 5 Aug 2019 12:45:19 +0000 (21:45 +0900)
committerKim Gunho <gunho.kim@samsung.com>
Fri, 30 Aug 2019 07:58:56 +0000 (16:58 +0900)
Change-Id: I3fbfaf65a4b3ea592efb2dabc5281e61f093343c
Signed-off-by: Yoojin Park <yoojin1.park@samsung.com>
drivers/gpu/arm/b_r19p0/backend/gpu/mali_kbase_jm_rb.c
drivers/gpu/arm/b_r19p0/backend/gpu/mali_kbase_pm_driver.c
drivers/gpu/arm/b_r19p0/mali_kbase_core_linux.c
drivers/gpu/arm/b_r19p0/mali_kbase_mem.h
drivers/gpu/arm/b_r19p0/mali_kbase_mem_linux.c

index 7cdaf98bfa54e34050008be9b87d860fd6d46250..9b4e793e2b316b732e798dfbd58aa723661b7b1f 100644 (file)
@@ -479,6 +479,8 @@ static inline bool kbase_gpu_in_protected_mode(struct kbase_device *kbdev)
        return kbdev->protected_mode;
 }
 
+/* MALI_SEC_SECURE_RENDERING */
+#ifndef CONFIG_MALI_EXYNOS_SECURE_RENDERING
 static void kbase_gpu_disable_coherent(struct kbase_device *kbdev)
 {
        lockdep_assert_held(&kbdev->hwaccess_lock);
@@ -532,7 +534,28 @@ static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev)
         */
        return kbase_reset_gpu_silent(kbdev);
 }
+#endif
+
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+void kbasep_js_cacheclean(struct kbase_device *kbdev)
+{
+    /* Limit the number of loops to avoid a hang if the interrupt is missed */
+    u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+
+    GPU_LOG(DVFS_INFO, LSI_SECURE_CACHE, 0u, 0u, "GPU CACHE WORKING for Secure Rendering\n");
+    /* use GPU_COMMAND completion solution */
+    /* clean the caches */
+    kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CLEAN_CACHES);
+
+    /* wait for cache flush to complete before continuing */
+    while (--max_loops && (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)) & CLEAN_CACHES_COMPLETED) == 0)
+        ;
 
+    /* clear the CLEAN_CACHES_COMPLETED irq */
+    kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), CLEAN_CACHES_COMPLETED);
+    GPU_LOG(DVFS_INFO, LSI_SECURE_CACHE_END, 0u, 0u, "GPU CACHE WORKING for Secure Rendering\n");
+}
+#else
 static int kbase_jm_protected_entry(struct kbase_device *kbdev,
                                struct kbase_jd_atom **katom, int idx, int js)
 {
@@ -595,11 +618,30 @@ static int kbase_jm_protected_entry(struct kbase_device *kbdev,
 
        return err;
 }
+#endif
 
 static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
                struct kbase_jd_atom **katom, int idx, int js)
 {
        int err = 0;
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+    if (kbase_gpu_atoms_submitted_any(kbdev))
+        return -EAGAIN;
+
+    if (kbdev->protected_ops) {
+        /* Switch GPU to protected mode */
+        kbasep_js_cacheclean(kbdev);
+        err = kbdev->protected_ops->protected_mode_enable(
+                kbdev->protected_dev);
+
+        if (err)
+            dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
+                    err);
+        else
+            kbdev->protected_mode = true;
+    }
+    return 0;
+#else
 
        lockdep_assert_held(&kbdev->hwaccess_lock);
 
@@ -748,12 +790,31 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
        }
 
        return 0;
+#endif  /* MALI_SEC_SECURE_RENDERING */
 }
 
 static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
                struct kbase_jd_atom **katom, int idx, int js)
 {
        int err = 0;
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+    if (kbase_gpu_atoms_submitted_any(kbdev))
+        return -EAGAIN;
+
+    if (kbdev->protected_ops) {
+        /* Switch GPU to protected mode */
+        kbasep_js_cacheclean(kbdev);
+        err = kbdev->protected_ops->protected_mode_disable(
+                kbdev->protected_dev);
+
+        if (err)
+            dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
+                    err);
+        else
+            kbdev->protected_mode = false;
+    }
+    return 0;
+#else
 
        lockdep_assert_held(&kbdev->hwaccess_lock);
 
@@ -846,6 +907,7 @@ static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
        }
 
        return 0;
+#endif  /* MALI_SEC_SECURE_RENDERING */
 }
 
 void kbase_backend_slot_update(struct kbase_device *kbdev)
index be3012c3352e4204c1f5caeed4d75b0608afb95d..d189619b000573da8357a7b3bc53b9d43ddbdd8e 100644 (file)
@@ -1491,6 +1491,27 @@ bool kbase_pm_clock_off(struct kbase_device *kbdev, bool is_suspend)
 
        KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u);
 
+    /* MALI_SEC_SECURE_RENDERING */
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+    if (kbdev->protected_mode == true) {
+        int err = 0;
+
+        WARN_ONCE(!kbdev->protected_ops,
+                "Cannot disable secure mode: secure callbacks not specified.\n");
+
+        if (kbdev->protected_ops) {
+            /* Switch GPU to non-secure mode */
+            err = kbdev->protected_ops->protected_mode_disable(
+                kbdev->protected_dev);
+
+            if (err)
+                dev_warn(kbdev->dev, "Failed to disable secure mode: %d\n", err);
+            else
+                kbdev->protected_mode = false;
+        }
+    }
+#endif
+
        /* Disable interrupts. This also clears any outstanding interrupts */
        kbase_pm_disable_interrupts(kbdev);
        /* Ensure that any IRQ handlers have finished */
@@ -1829,6 +1850,8 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
        return -EINVAL;
 }
 
+/* MALI_SEC_SECURE_RENDERING */
+#ifndef CONFIG_MALI_EXYNOS_SECURE_RENDERING
 static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
 {
        struct kbase_device *kbdev = pdev->data;
@@ -1851,6 +1874,7 @@ struct protected_mode_ops kbase_native_protected_ops = {
        .protected_mode_enable = kbasep_protected_mode_enable,
        .protected_mode_disable = kbasep_protected_mode_disable
 };
+#endif
 
 int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
 {
@@ -1884,7 +1908,12 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
        spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
 
        /* Soft reset the GPU */
+       /* MALI_SEC_SECURE_RENDERING */
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+    if (kbdev->protected_mode_support && kbdev->protected_mode)
+#else
        if (kbdev->protected_mode_support)
+#endif
                err = kbdev->protected_ops->protected_mode_disable(
                                kbdev->protected_dev);
        else
index f408bbb01cf2083693208b63427fd0ea0e5cc3d8..3b2339a66327062ca07fcf047621f6027274282c 100644 (file)
@@ -3245,7 +3245,7 @@ static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
 
 #endif /* CONFIG_DEBUG_FS */
 #endif /* MALI_KBASE_BUILD */
-
+#if !defined(CONFIG_MALI_EXYNOS_SECURE_RENDERING)
 static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
 {
        struct kbase_device *kbdev = container_of(data, struct kbase_device,
@@ -3360,6 +3360,27 @@ static void kbasep_protected_mode_term(struct kbase_device *kbdev)
                kfree(kbdev->protected_dev);
        }
 }
+#else /* if defined(CONFIG_MALI_EXYNOS_SECURE_RENDERING) */
+static int kbasep_protected_mode_init(struct kbase_device *kbdev)
+{
+    dev_info(kbdev->dev, "Support Secure Rendering with Exynos SoC\n");
+    /* Use native protected ops */
+    kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
+            GFP_KERNEL);
+    if (!kbdev->protected_dev)
+        return -ENOMEM;
+    kbdev->protected_dev->data = kbdev;
+    kbdev->protected_ops = &exynos_protected_ops;
+    kbdev->protected_mode_support = true;
+    return 0;
+}
+
+static void kbasep_protected_mode_term(struct kbase_device *kbdev)
+{
+    kfree(kbdev->protected_dev);
+    kbdev->protected_mode_support = false;
+}
+#endif
 
 #ifdef CONFIG_MALI_NO_MALI
 static int kbase_common_reg_map(struct kbase_device *kbdev)
index 81e2c3723d0dde51791ebeb381835ab8d3805cb4..d5c271d6d566d44247fbb4fe24aa6d711a4c78b4 100644 (file)
@@ -356,6 +356,14 @@ struct kbase_va_region {
 #define KBASE_REG_ZONE_EXEC_VA           KBASE_REG_ZONE(2)
 #define KBASE_REG_ZONE_EXEC_VA_MAX_PAGES ((1ULL << 32) >> PAGE_SHIFT) /* 4 GB */
 
+/* MALI_SEC_INTEGRATION */
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+#define ION_HPA_DEFAULT_ORDER 4
+#define ION_HPA_DEFAULT_PAGE_ORDER (4 + PAGE_SHIFT)
+#define ION_HPA_DEFAULT_SIZE  (PAGE_SIZE << ION_HPA_DEFAULT_ORDER)
+#define ION_HPA_PAGE_COUNT(len) \
+        (ALIGN(len, ION_HPA_DEFAULT_SIZE) / ION_HPA_DEFAULT_SIZE)
+#endif
 
        unsigned long flags;
 
index 9e121f06bf7b57168d53ec64306872c241630ca5..d2019fb8d917d78fe6235069db17b8da1b4e8d48 100644 (file)
@@ -1333,7 +1333,17 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
                return NULL;
        }
 
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+#ifdef CONFIG_HPA
+    if (*flags & BASE_MEM_SECURE)
+        *va_pages = (PAGE_ALIGN((ION_HPA_PAGE_COUNT(dma_buf->size) * ION_HPA_DEFAULT_SIZE)) >> PAGE_SHIFT) + padding;
+    else
+#endif
+               *va_pages = (PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT) + padding;
+#else /* CONFIG_MALI_EXYNOS_SECURE_RENDERING: below code is orignal */
        *va_pages = (PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT) + padding;
+#endif
+
        if (!*va_pages) {
                dma_buf_detach(dma_buf, dma_attachment);
                dma_buf_put(dma_buf);