[COMMON] g2d: support secure mode
authorhyesoo.yu <hyesoo.yu@samsung.com>
Wed, 24 May 2017 04:33:58 +0000 (13:33 +0900)
committerSeungchul Kim <sc377.kim@samsung.com>
Mon, 28 May 2018 05:27:18 +0000 (14:27 +0900)
Support to blend the secure layer. To access the secure layer,
it must enable secure mode of G2D from secure world,
and command area must be mapped to secure mmu.

When both normal and secure tasks are running at the same time
on H/W queue, confilcts could be occurred by turning on/off
the secure mode, which reads the command area from
secure or normal sysmmu.

So, always turn on the secure mode and map the command to
secure mmu.

Change-Id: Ia15796e611a9b6f98181183c556d4f4848fa455e
Signed-off-by: hyesoo.yu <hyesoo.yu@samsung.com>
drivers/gpu/exynos/g2d/g2d_task.c
drivers/gpu/exynos/g2d/g2d_task.h

index b013a5a4b55ab70e45a6e8f4940b0b37b8dd9628..03fae9778e15afbcbf37cd41d110f5642fe9b9b7 100644 (file)
 #include "g2d_command.h"
 #include "g2d_fence.h"
 
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+#include <linux/smc.h>
+
+#define G2D_SECURE_DMA_BASE    0x8000000
+#define G2D_SEC_COMMAND_BUF 12
+#define G2D_ALWAYS_S 37
+
+static int g2d_map_cmd_data(struct g2d_task *task)
+{
+       struct g2d_buffer_prot_info *prot = &task->prot_info;
+       int ret;
+
+       prot->chunk_count = 1;
+       prot->flags = G2D_SEC_COMMAND_BUF;
+       prot->chunk_size = G2D_CMD_LIST_SIZE;
+       prot->bus_address = page_to_phys(task->cmd_page);
+       prot->dma_addr = G2D_SECURE_DMA_BASE + G2D_CMD_LIST_SIZE * task->job_id;
+
+       __flush_dcache_area(prot, sizeof(struct g2d_buffer_prot_info));
+       ret = exynos_smc(SMC_DRM_PPMP_PROT, virt_to_phys(prot), 0, 0);
+
+       if (ret) {
+               dev_err(task->g2d_dev->dev,
+                       "Failed to map secure page tbl (%d) %x %x %lx\n", ret,
+                       prot->dma_addr, prot->flags, prot->bus_address);
+               return ret;
+       }
+
+       task->cmd_addr = prot->dma_addr;
+
+       return 0;
+}
+
+static void g2d_secure_enable(void)
+{
+       exynos_smc(SMC_PROTECTION_SET, 0, G2D_ALWAYS_S, 1);
+}
+
+static void g2d_secure_disable(void)
+{
+       exynos_smc(SMC_PROTECTION_SET, 0, G2D_ALWAYS_S, 0);
+}
+#else
+static int g2d_map_cmd_data(struct g2d_task *task)
+{
+       struct scatterlist sgl;
+
+       /* mapping the command data */
+       sg_init_table(&sgl, 1);
+       sg_set_page(&sgl, task->cmd_page, G2D_CMD_LIST_SIZE, 0);
+       task->cmd_addr = iovmm_map(task->g2d_dev->dev, &sgl, 0,
+                                  G2D_CMD_LIST_SIZE, DMA_TO_DEVICE,
+                                  IOMMU_READ | IOMMU_CACHE);
+
+       if (IS_ERR_VALUE(task->cmd_addr)) {
+               dev_err(task->g2d_dev->dev,
+                       "%s: Unable to allocate IOVA for cmd data\n", __func__);
+               return task->cmd_addr;
+       }
+
+       return 0;
+}
+
+#define g2d_secure_enable() do { } while (0)
+#define g2d_secure_disable() do { } while (0)
+#endif
+
 struct g2d_task *g2d_get_active_task_from_id(struct g2d_device *g2d_dev,
                                             unsigned int id)
 {
@@ -75,6 +142,8 @@ static void g2d_finish_task(struct g2d_device *g2d_dev,
 
        del_timer(&task->timer);
 
+       g2d_secure_disable();
+
        clk_disable(g2d_dev->clock);
 
        pm_runtime_put(g2d_dev->dev);
@@ -199,6 +268,8 @@ static void g2d_schedule_task(struct g2d_task *task)
                goto err_clk;
        }
 
+       g2d_secure_enable();
+
        spin_lock_irqsave(&g2d_dev->lock_task, flags);
 
        list_add_tail(&task->node, &g2d_dev->tasks_prepared);
@@ -324,11 +395,10 @@ void g2d_destroy_tasks(struct g2d_device *g2d_dev)
        destroy_workqueue(g2d_dev->schedule_workq);
 }
 
-static struct g2d_task *g2d_create_task(struct g2d_device *g2d_dev)
+static struct g2d_task *g2d_create_task(struct g2d_device *g2d_dev, int id)
 {
        struct g2d_task *task;
-       struct scatterlist sgl;
-       int i;
+       int i, ret;
 
        task = kzalloc(sizeof(*task), GFP_KERNEL);
        if (!task)
@@ -337,29 +407,33 @@ static struct g2d_task *g2d_create_task(struct g2d_device *g2d_dev)
        INIT_LIST_HEAD(&task->node);
 
        task->cmd_page = alloc_pages(GFP_KERNEL, get_order(G2D_CMD_LIST_SIZE));
-       if (!task->cmd_page)
+       if (!task->cmd_page) {
+               ret = -ENOMEM;
                goto err_page;
+       }
 
-       /* mapping the command data */
-       sg_init_table(&sgl, 1);
-       sg_set_page(&sgl, task->cmd_page, G2D_CMD_LIST_SIZE, 0);
-       task->cmd_addr = iovmm_map(g2d_dev->dev, &sgl, 0, G2D_CMD_LIST_SIZE,
-                                  DMA_TO_DEVICE, IOMMU_READ | IOMMU_CACHE);
+       task->job_id = id;
+       task->g2d_dev = g2d_dev;
+
+       ret = g2d_map_cmd_data(task);
+       if (ret)
+               goto err_map;
 
        for (i = 0; i < G2D_MAX_IMAGES; i++)
                task->source[i].task = task;
        task->target.task = task;
 
-       task->g2d_dev = g2d_dev;
-
        init_completion(&task->completion);
        spin_lock_init(&task->fence_timeout_lock);
 
        return task;
+
+err_map:
+       __free_pages(task->cmd_page, get_order(G2D_CMD_LIST_SIZE));
 err_page:
        kfree(task);
 
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(ret);
 }
 
 int g2d_create_tasks(struct g2d_device *g2d_dev)
@@ -372,15 +446,13 @@ int g2d_create_tasks(struct g2d_device *g2d_dev)
                return -ENOMEM;
 
        for (i = 0; i < G2D_MAX_JOBS; i++) {
-               task = g2d_create_task(g2d_dev);
+               task = g2d_create_task(g2d_dev, i);
 
                if (IS_ERR(task)) {
                        g2d_destroy_tasks(g2d_dev);
                        return PTR_ERR(task);
                }
 
-               task->job_id = i;
-
                task->next = g2d_dev->tasks;
                g2d_dev->tasks = task;
                list_add(&task->node, &g2d_dev->tasks_free);
index 4e07ae1e95b545b437b884d627382f545841ef6b..b8139febc2f53bde0bee7945d57d8291bebaebca 100644 (file)
 #define G2D_MAX_JOBS           16
 #define G2D_CMD_LIST_SIZE      8192
 
+struct g2d_buffer_prot_info {
+       unsigned int chunk_count;
+       unsigned int dma_addr;
+       unsigned int flags;
+       unsigned int chunk_size;
+       unsigned long bus_address;
+};
+
 struct g2d_buffer {
        union {
                struct {
@@ -102,6 +110,9 @@ struct g2d_task {
        unsigned int            total_cached_len;
        unsigned int            total_hwrender_len;
        spinlock_t              fence_timeout_lock;
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+       struct g2d_buffer_prot_info prot_info;
+#endif
 };
 
 /* The below macros should be called with g2d_device.lock_tasks held */