return kbdev->protected_mode;
}
+/* MALI_SEC_SECURE_RENDERING */
+#ifndef CONFIG_MALI_EXYNOS_SECURE_RENDERING
static void kbase_gpu_disable_coherent(struct kbase_device *kbdev)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
*/
return kbase_reset_gpu_silent(kbdev);
}
+#endif
+
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+void kbasep_js_cacheclean(struct kbase_device *kbdev)
+{
+ /* Limit the number of loops to avoid a hang if the interrupt is missed */
+ u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
+
+ GPU_LOG(DVFS_INFO, LSI_SECURE_CACHE, 0u, 0u, "GPU CACHE WORKING for Secure Rendering\n");
+ /* use GPU_COMMAND completion solution */
+ /* clean the caches */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CLEAN_CACHES);
+
+ /* wait for cache flush to complete before continuing */
+ while (--max_loops && (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)) & CLEAN_CACHES_COMPLETED) == 0)
+ ;
+ /* clear the CLEAN_CACHES_COMPLETED irq */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), CLEAN_CACHES_COMPLETED);
+ GPU_LOG(DVFS_INFO, LSI_SECURE_CACHE_END, 0u, 0u, "GPU CACHE WORKING for Secure Rendering\n");
+}
+#else
static int kbase_jm_protected_entry(struct kbase_device *kbdev,
struct kbase_jd_atom **katom, int idx, int js)
{
return err;
}
+#endif
static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
struct kbase_jd_atom **katom, int idx, int js)
{
int err = 0;
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+ if (kbase_gpu_atoms_submitted_any(kbdev))
+ return -EAGAIN;
+
+ if (kbdev->protected_ops) {
+ /* Switch GPU to protected mode */
+ kbasep_js_cacheclean(kbdev);
+ err = kbdev->protected_ops->protected_mode_enable(
+ kbdev->protected_dev);
+
+ if (err)
+ dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
+ err);
+ else
+ kbdev->protected_mode = true;
+ }
+ return 0;
+#else
lockdep_assert_held(&kbdev->hwaccess_lock);
}
return 0;
+#endif /* MALI_SEC_SECURE_RENDERING */
}
static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
struct kbase_jd_atom **katom, int idx, int js)
{
int err = 0;
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+ if (kbase_gpu_atoms_submitted_any(kbdev))
+ return -EAGAIN;
+
+ if (kbdev->protected_ops) {
+ /* Switch GPU to protected mode */
+ kbasep_js_cacheclean(kbdev);
+ err = kbdev->protected_ops->protected_mode_disable(
+ kbdev->protected_dev);
+
+ if (err)
+ dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
+ err);
+ else
+ kbdev->protected_mode = false;
+ }
+ return 0;
+#else
lockdep_assert_held(&kbdev->hwaccess_lock);
}
return 0;
+#endif /* MALI_SEC_SECURE_RENDERING */
}
void kbase_backend_slot_update(struct kbase_device *kbdev)
KBASE_TRACE_ADD(kbdev, PM_GPU_OFF, NULL, NULL, 0u, 0u);
+ /* MALI_SEC_SECURE_RENDERING */
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+ if (kbdev->protected_mode == true) {
+ int err = 0;
+
+ WARN_ONCE(!kbdev->protected_ops,
+ "Cannot disable secure mode: secure callbacks not specified.\n");
+
+ if (kbdev->protected_ops) {
+ /* Switch GPU to non-secure mode */
+ err = kbdev->protected_ops->protected_mode_disable(
+ kbdev->protected_dev);
+
+ if (err)
+ dev_warn(kbdev->dev, "Failed to disable secure mode: %d\n", err);
+ else
+ kbdev->protected_mode = false;
+ }
+ }
+#endif
+
/* Disable interrupts. This also clears any outstanding interrupts */
kbase_pm_disable_interrupts(kbdev);
/* Ensure that any IRQ handlers have finished */
return -EINVAL;
}
+/* MALI_SEC_SECURE_RENDERING */
+#ifndef CONFIG_MALI_EXYNOS_SECURE_RENDERING
static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
{
struct kbase_device *kbdev = pdev->data;
.protected_mode_enable = kbasep_protected_mode_enable,
.protected_mode_disable = kbasep_protected_mode_disable
};
+#endif
int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
{
spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
/* Soft reset the GPU */
+ /* MALI_SEC_SECURE_RENDERING */
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+ if (kbdev->protected_mode_support && kbdev->protected_mode)
+#else
if (kbdev->protected_mode_support)
+#endif
err = kbdev->protected_ops->protected_mode_disable(
kbdev->protected_dev);
else
#endif /* CONFIG_DEBUG_FS */
#endif /* MALI_KBASE_BUILD */
-
+#if !defined(CONFIG_MALI_EXYNOS_SECURE_RENDERING)
static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
{
struct kbase_device *kbdev = container_of(data, struct kbase_device,
kfree(kbdev->protected_dev);
}
}
+#else /* if defined(CONFIG_MALI_EXYNOS_SECURE_RENDERING) */
+static int kbasep_protected_mode_init(struct kbase_device *kbdev)
+{
+ dev_info(kbdev->dev, "Support Secure Rendering with Exynos SoC\n");
+ /* Use native protected ops */
+ kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
+ GFP_KERNEL);
+ if (!kbdev->protected_dev)
+ return -ENOMEM;
+ kbdev->protected_dev->data = kbdev;
+ kbdev->protected_ops = &exynos_protected_ops;
+ kbdev->protected_mode_support = true;
+ return 0;
+}
+
+static void kbasep_protected_mode_term(struct kbase_device *kbdev)
+{
+ kfree(kbdev->protected_dev);
+ kbdev->protected_mode_support = false;
+}
+#endif
#ifdef CONFIG_MALI_NO_MALI
static int kbase_common_reg_map(struct kbase_device *kbdev)
#define KBASE_REG_ZONE_EXEC_VA KBASE_REG_ZONE(2)
#define KBASE_REG_ZONE_EXEC_VA_MAX_PAGES ((1ULL << 32) >> PAGE_SHIFT) /* 4 GB */
+/* MALI_SEC_INTEGRATION */
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+#define ION_HPA_DEFAULT_ORDER 4
+#define ION_HPA_DEFAULT_PAGE_ORDER (4 + PAGE_SHIFT)
+#define ION_HPA_DEFAULT_SIZE (PAGE_SIZE << ION_HPA_DEFAULT_ORDER)
+#define ION_HPA_PAGE_COUNT(len) \
+ (ALIGN(len, ION_HPA_DEFAULT_SIZE) / ION_HPA_DEFAULT_SIZE)
+#endif
unsigned long flags;
return NULL;
}
+#ifdef CONFIG_MALI_EXYNOS_SECURE_RENDERING
+#ifdef CONFIG_HPA
+ if (*flags & BASE_MEM_SECURE)
+ *va_pages = (PAGE_ALIGN((ION_HPA_PAGE_COUNT(dma_buf->size) * ION_HPA_DEFAULT_SIZE)) >> PAGE_SHIFT) + padding;
+ else
+#endif
+ *va_pages = (PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT) + padding;
+#else /* CONFIG_MALI_EXYNOS_SECURE_RENDERING: below code is orignal */
*va_pages = (PAGE_ALIGN(dma_buf->size) >> PAGE_SHIFT) + padding;
+#endif
+
if (!*va_pages) {
dma_buf_detach(dma_buf, dma_attachment);
dma_buf_put(dma_buf);