[COMMON] g2d: get userdata
authorCho KyongHo <pullip.cho@samsung.com>
Thu, 27 Apr 2017 08:42:29 +0000 (17:42 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:22:13 +0000 (20:22 +0300)
This patch defines the interface between userspace and the driver. G2D
driver does not define a formal structure for the information about
jobs to process in H/W. Instead, it receives a set of commands to H/W.
The driver then just checks if the commands contains an invalid value
that may harm H/W. It does not check if the values in the commands are
correctly specified as the expectation of the user.

However, the driver allows the users to specifiy commands in a
restricted form. This restriction removes ambiguities about
specification of commans in userspace because it clears users the
information that needs to be specified in the command list. It also
relieves the driver from the heavy command validation workloads. The
driver does not need to validate the offset and the command of every
single command in the command list.
The driver defines the number and the order of commands. Even though
commands are not specified in the order as the expectation of the
driver, it pushes the commands to H/W if the commands are not harmful.

With the restriction, the driver does not allow users to specify some
sensitive commands including security management, H/W control, layer
management. The information related to the sensitive commands are
delivered to the driver in a well-defined form of data structure. The
driver then validates the information in the data structure and
generates commands for the information after the validation.

Change-Id: I4a792adb084eecdeb3f890a701178ddb1ca069af
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
drivers/gpu/exynos/g2d/Makefile
drivers/gpu/exynos/g2d/g2d_command.c [new file with mode: 0644]
drivers/gpu/exynos/g2d/g2d_command.h [new file with mode: 0644]
drivers/gpu/exynos/g2d/g2d_drv.c
drivers/gpu/exynos/g2d/g2d_task.c
drivers/gpu/exynos/g2d/g2d_task.h
drivers/gpu/exynos/g2d/g2d_uapi.h
drivers/gpu/exynos/g2d/g2d_uapi_process.c [new file with mode: 0644]
drivers/gpu/exynos/g2d/g2d_uapi_process.h [new file with mode: 0644]

index 9eb028b9b8abae894ef709a6cc9bbabdaa33b8af..5d03ed83ca338d94e9ae32b34ae3ea10cd13ce1b 100644 (file)
@@ -1 +1,2 @@
 obj-$(CONFIG_EXYNOS_GRAPHICS_G2D) += g2d_drv.o g2d_task.o g2d_regs.o
+obj-$(CONFIG_EXYNOS_GRAPHICS_G2D) += g2d_uapi_process.o g2d_command.o
diff --git a/drivers/gpu/exynos/g2d/g2d_command.c b/drivers/gpu/exynos/g2d/g2d_command.c
new file mode 100644 (file)
index 0000000..651790d
--- /dev/null
@@ -0,0 +1,763 @@
+/*
+ * linux/drivers/gpu/exynos/g2d/g2d_command.c
+ *
+ * Copyright (C) 2017 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+
+#include "g2d.h"
+#include "g2d_task.h"
+#include "g2d_uapi.h"
+#include "g2d_command.h"
+
+#define NV12N_Y_SIZE(w, h)     (ALIGN((w), 16) * ALIGN((h), 16) + 256)
+#define NV12N_CBCR_SIZE(w, h)          \
+               (ALIGN((ALIGN((w), 16) * (ALIGN((h), 16) / 2) + 256), 16))
+#define NV12N_CBCR_BASE(base, w, h)    ((base) + NV12N_Y_SIZE((w), (h)))
+
+#define NV12N10B_Y_2B_SIZE(w, h)    ((ALIGN((w) / 4, 16) * ALIGN((h), 16) + 64))
+#define NV12N10B_CBCR_2B_SIZE(w, h)     \
+                       ((ALIGN((w) / 4, 16) * (ALIGN((h), 16) / 2) + 64))
+#define NV12N10B_CBCR_BASE(base, w, h) \
+               ((base) + NV12N_Y_SIZE((w), (h)) + NV12N10B_Y_2B_SIZE((w), (h)))
+
+static const struct g2d_fmt g2d_formats[] = {
+       {
+               .name           = "ARGB8888",
+               .fmtvalue       = G2D_FMT_ARGB8888,     /* [31:0] ARGB */
+               .bpp            = { 32 },
+               .num_planes     = 1,
+       }, {
+               .name           = "ABGR8888",
+               .fmtvalue       = G2D_FMT_ABGR8888,     /* [31:0] ABGR */
+               .bpp            = { 32 },
+               .num_planes     = 1,
+       }, {
+               .name           = "XBGR8888",
+               .fmtvalue       = G2D_FMT_XBGR8888,     /* [31:0] XBGR */
+               .bpp            = { 32 },
+               .num_planes     = 1,
+       }, {
+               .name           = "XRGB8888",
+               .fmtvalue       = G2D_FMT_XRGB8888,     /* [31:0] XBGR */
+               .bpp            = { 32 },
+               .num_planes     = 1,
+       }, {
+               .name           = "RGB888",
+               .fmtvalue       = G2D_FMT_RGB888,       /* [23:0] RGB */
+               .bpp            = { 24 },
+               .num_planes     = 1,
+       }, {
+               .name           = "ARGB4444",
+               .fmtvalue       = G2D_FMT_ARGB4444,     /* [15:0] ARGB */
+               .bpp            = { 16 },
+               .num_planes     = 1,
+       }, {
+               .name           = "ARGB1555",
+               .fmtvalue       = G2D_FMT_ARGB1555,     /* [15:0] ARGB */
+               .bpp            = { 16 },
+               .num_planes     = 1,
+       }, {
+               .name           = "RGB565",
+               .fmtvalue       = G2D_FMT_RGB565,       /* [15:0] RGB */
+               .bpp            = { 16 },
+               .num_planes     = 1,
+       }, {
+               .name           = "NV12",
+               .fmtvalue       = G2D_FMT_NV12,
+               .bpp            = { 8, 4 },
+               .num_planes     = 2,
+       }, {
+               .name           = "NV21",
+               .fmtvalue       = G2D_FMT_NV21,
+               .bpp            = { 8, 4 },
+               .num_planes     = 2,
+       }, {
+               .name           = "NV12_8+2",
+               .fmtvalue       = G2D_FMT_NV12_82,
+               .bpp            = { 8, 2, 4, 1 },
+               .num_planes     = 4,
+       }, {
+               .name           = "NV21_8+2",
+               .fmtvalue       = G2D_FMT_NV21_82,
+               .bpp            = { 8, 2, 4, 1 },
+               .num_planes     = 4,
+       }, {
+               .name           = "NV12_P010",
+               .fmtvalue       = G2D_FMT_NV12_P010,
+               .bpp            = { 16, 8},
+               .num_planes     = 2,
+       }, {
+               .name           = "NV21N_P010",
+               .fmtvalue       = G2D_FMT_NV21_P010,
+               .bpp            = { 16, 8},
+               .num_planes     = 2,
+       }, {
+               .name           = "YUYV",
+               .fmtvalue       = G2D_FMT_YUYV,
+               .bpp            = { 16 },
+               .num_planes     = 1,
+       }, {
+               .name           = "YVYU",
+               .fmtvalue       = G2D_FMT_YVYU,
+               .bpp            = { 16 },
+               .num_planes     = 1,
+       }, {
+               .name           = "UYVY",
+               .fmtvalue       = G2D_FMT_UYVY,
+               .bpp            = { 16 },
+               .num_planes     = 1,
+       }, {
+               .name           = "VYUY",
+               .fmtvalue       = G2D_FMT_VYUY,
+               .bpp            = { 16 },
+               .num_planes     = 1,
+       }, {
+               .name           = "NV16",
+               .fmtvalue       = G2D_FMT_NV16,
+               .bpp            = { 8, 8 },
+               .num_planes     = 2,
+       }, {
+               .name           = "NV61",
+               .fmtvalue       = G2D_FMT_NV61,
+               .bpp            = { 8, 8 },
+               .num_planes     = 2,
+       },
+};
+
+const struct g2d_fmt *g2d_find_format(u32 fmtval)
+{
+       size_t i;
+
+       for (i = 0; i < ARRAY_SIZE(g2d_formats); i++)
+               if (g2d_formats[i].fmtvalue == G2D_IMGFMT(fmtval))
+                       return &g2d_formats[i];
+
+       return NULL;
+}
+
+#define layer_width(layer)     ((layer)->commands[G2DSFR_IMG_WIDTH].value)
+#define layer_height(layer)    ((layer)->commands[G2DSFR_IMG_HEIGHT].value)
+#define layer_pixelcount(layer)        (layer_width(layer) * layer_height(layer))
+#define YUV82_BASE_ALIGNED(addr, idx) IS_ALIGNED((addr), 32 >> (idx / 2))
+#define YUV82_BASE_ALIGN(addr, idx)   ALIGN((addr), 32 >> (idx / 2))
+
+static unsigned char src_base_reg_offset[4] = {0x1C, 0x80, 0x64, 0x68};
+static unsigned char src_base_reg_offset_yuv82[4] = {0x1C, 0x64, 0x80, 0x68};
+static unsigned char dst_base_reg_offset[4] = {0x00, 0x50, 0x30, 0x34};
+#define BASE_REG_OFFSET(base, offsets, buf_idx)        ((base) + (offsets)[(buf_idx)])
+
+#define AFBC_HEADER_SIZE(cmd)                                          \
+               ((ALIGN((cmd)[G2DSFR_IMG_WIDTH].value, 16) / 16) *      \
+                (ALIGN((cmd)[G2DSFR_IMG_HEIGHT].value, 16) / 16) * 16)
+
+size_t g2d_get_payload_index(struct g2d_reg cmd[], const struct g2d_fmt *fmt,
+                            unsigned int idx)
+{
+       /*
+        * TODO: consider NV12N and similar image formats
+        *       with alignment restriction
+        */
+       BUG_ON(!IS_YUV(cmd[G2DSFR_IMG_COLORMODE].value));
+
+       return ((cmd[G2DSFR_IMG_WIDTH].value * fmt->bpp[idx]) / 8) *
+                                               cmd[G2DSFR_IMG_BOTTOM].value;
+}
+
+size_t g2d_get_payload(struct g2d_reg cmd[], const struct g2d_fmt *fmt,
+                      u32 flags)
+{
+       /*
+        * TODO: consider NV12N and similar image formats
+        *       with alignment restriction
+        */
+       size_t payload;
+       u32 mode = cmd[G2DSFR_IMG_COLORMODE].value;
+       u32 width = cmd[G2DSFR_IMG_WIDTH].value;
+       u32 height = cmd[G2DSFR_IMG_BOTTOM].value;
+       size_t pixcount = width * height;
+
+       if (IS_YUV420_82(mode)) {
+               payload  = YUV82_BASE_ALIGN((pixcount * fmt->bpp[0]) / 8, 1);
+               payload += YUV82_BASE_ALIGN((pixcount * fmt->bpp[1]) / 8, 2);
+               payload += YUV82_BASE_ALIGN((pixcount * fmt->bpp[2]) / 8, 3);
+               payload += (pixcount * fmt->bpp[3]) / 8;
+       } else if (IS_YUV(mode)) {
+               unsigned int i;
+
+               payload = 0;
+               if (((flags & G2D_LAYERFLAG_MFC_STRIDE) != 0) &&
+                               (fmt->fmtvalue == G2D_FMT_NV12)) {
+                       payload += NV12N_Y_SIZE(width, height);
+                       payload += NV12N_CBCR_SIZE(width, height);
+               } else {
+                       for (i = 0; i < fmt->num_planes; i++)
+                               payload += (pixcount * fmt->bpp[i]) / 8;
+               }
+       } else if (IS_AFBC(mode)) {
+               payload = AFBC_HEADER_SIZE(cmd) + (pixcount * fmt->bpp[0]) / 8;
+       } else {
+               payload = cmd[G2DSFR_IMG_STRIDE].value *
+                               cmd[G2DSFR_IMG_BOTTOM].value;
+       }
+
+       return payload;
+}
+
+static bool check_width_height(u32 value)
+{
+       return (value > 0) && (value <= G2D_MAX_SIZE);
+}
+
+/* 8bpp(grayscale) format is not supported */
+static bool check_srccolor_mode(u32 value)
+{
+       u32 fmt = ((value) & G2D_DATAFMT_MASK) >> G2D_DATAFMT_SHIFT;
+
+       if ((fmt > 13) || (fmt == 6) || (fmt == 7) || (fmt == 9))
+               return false;
+
+       if (IS_YUV(value) && (value & G2D_DATAFORMAT_AFBC))
+               return false;
+
+       return true;
+}
+
+static bool check_dstcolor_mode(u32 value)
+{
+       u32 fmt = ((value) & G2D_DATAFMT_MASK) >> G2D_DATAFMT_SHIFT;
+
+       /* src + YCbCr420 3p, - YCbCr420 2p 8.2 */
+       if ((fmt > 12) || (fmt == 6) || (fmt == 7))
+               return false;
+
+       /* AFBC and UORDER shoult not be set together */
+       if ((value & (G2D_DATAFORMAT_AFBC | G2D_DATAFORMAT_UORDER)) ==
+                       (G2D_DATAFORMAT_AFBC | G2D_DATAFORMAT_UORDER))
+               return false;
+
+       if (IS_YUV(value) &&
+                       (value & (G2D_DATAFORMAT_AFBC | G2D_DATAFORMAT_UORDER)))
+               return false;
+
+       return true;
+}
+
+static bool check_blend_mode(u32 value)
+{
+       int i = 0;
+
+       for (i = 0; i < 2; i++) { /* for each source and destination */
+               if ((value & 0xF) > 7) /* Coeff */
+                       return false;
+               value >>= 4;
+               if ((value & 0x3) > 2) /* CoeffSA */
+                       return false;
+               value >>= 2;
+               if ((value & 0x3) > 2) /* CoeffDA */
+                       return false;
+               value >>= 2;
+       }
+
+       return true;
+}
+
+static bool check_scale_control(u32 value)
+{
+       return value != 3;
+}
+
+struct command_checker {
+       const char *cmdname;
+       u32 offset;
+       u32 mask;
+       bool (*checker)(u32 value);
+};
+
+static struct command_checker source_command_checker[G2DSFR_SRC_FIELD_COUNT] = {
+       {"STRIDE",      0x0020, 0x0001FFFF, NULL,},
+       {"COLORMODE",   0x0028, 0x031FFFFF, check_srccolor_mode,},
+       {"LEFT",        0x002C, 0x00001FFF, NULL,},
+       {"TOP",         0x0030, 0x00001FFF, NULL,},
+       {"RIGHT",       0x0034, 0x00003FFF, check_width_height,},
+       {"BOTTOM",      0x0038, 0x00003FFF, check_width_height,},
+       {"WIDTH",       0x0070, 0x00003FFF, check_width_height,},
+       {"HEIGHT",      0x0074, 0x00003FFF, check_width_height,},
+       {"COMMAND",     0x0000, 0x03100001, NULL,},
+       {"SELECT",      0x0004, 0x00000001, NULL,},
+       {"ROTATE",      0x0008, 0x00000031, NULL,},
+       {"DSTLEFT",     0x000C, 0x00001FFF, NULL,},
+       {"DSTTOP",      0x0010, 0x00001FFF, NULL,},
+       {"DSTRIGHT",    0x0014, 0x00003FFF, check_width_height,},
+       {"DSTBOTTOM",   0x0018, 0x00003FFF, check_width_height,},
+       {"SCALECONTROL", 0x048, 0x00000003, check_scale_control,},
+       {"XSCALE",      0x004C, 0x3FFFFFFF, NULL,},
+       {"YSCALE",      0x0050, 0x3FFFFFFF, NULL,},
+       {"XPHASE",      0x0054, 0x0000FFFF, NULL,},
+       {"YPHASE",      0x0058, 0x0000FFFF, NULL,},
+       {"COLOR",       0x005C, 0xFFFFFFFF, NULL,},
+       {"ALPHA",       0x0060, 0xFFFFFFFF, NULL,},
+       {"BLEND",       0x003C, 0x0035FFFF, check_blend_mode,},
+       {"YCBCRMODE",   0x0088, 0x10000013, NULL,},
+       {"HDRMODE",     0x0090, 0x000011B3, NULL,},
+};
+
+static struct command_checker target_command_checker[G2DSFR_DST_FIELD_COUNT] = {
+       /* BASE OFFSET: 0x0120 */
+       {"STRIDE",      0x0004, 0x0001FFFF, NULL,},
+       {"COLORMODE",   0x000C, 0x033FFFFF, check_dstcolor_mode,},
+       {"LEFT",        0x0010, 0x00001FFF, NULL,},
+       {"TOP",         0x0014, 0x00001FFF, NULL,},
+       {"RIGHT",       0x0018, 0x00003FFF, check_width_height,},
+       {"BOTTOM",      0x001C, 0x00003FFF, check_width_height,},
+       {"WIDTH",       0x0040, 0x00003FFF, check_width_height,},
+       {"HEIGHT",      0x0044, 0x00003FFF, check_width_height,},
+       /* TODO: check csc */
+       {"YCBCRMODE",   0x0058, 0x0000F714, NULL,},
+};
+
+static u16 extra_cmd_range[][2] = { /* {first, last} */
+       {0x2000, 0x208C}, /* SRC CSC Coefficients */
+       {0x2100, 0x2120}, /* DST CSC Coefficients */
+       {0x3000, 0x3100}, /* HDR EOTF Coefficients */
+       {0x3200, 0x3300}, /* Degamma Coefficients */
+       {0x3400, 0x3420}, /* HDR Gamut Mapping Coefficients */
+       {0x3500, 0x3520}, /* Degamma 2.2 Coefficients */
+       {0x3600, 0x3680}, /* HDR Tone Mapping Coefficients */
+       {0x3700, 0x3780}, /* Degamma Tone Mapping Coefficients */
+};
+
+#define TARGET_OFFSET          0x120
+#define LAYER_OFFSET(idx)      ((2 + (idx)) << 8)
+
+static int g2d_copy_commands(struct g2d_device *g2d_dev, int index,
+                             struct g2d_reg regs[], __u32 cmd[],
+                             struct command_checker checker[],
+                             unsigned int num_cmds)
+{
+       unsigned int base = (index < 0) ? TARGET_OFFSET : LAYER_OFFSET(index);
+       int i;
+
+       for (i = 0; i < num_cmds; i++) {
+               if (((cmd[i] & ~checker[i].mask) != 0) ||
+                               (checker[i].checker &&
+                                       !checker[i].checker(cmd[i]))) {
+                       dev_err(g2d_dev->dev,
+                               "%s: Invalid %s[%d] SFR '%s' value %#x\n",
+                               __func__, (index < 0) ? "target" : "source",
+                               index, checker[i].cmdname, cmd[i]);
+                       return -EINVAL;
+               }
+
+               regs[i].offset = base + checker[i].offset;
+               regs[i].value = cmd[i];
+       }
+
+       return num_cmds;
+}
+
+static bool g2d_validate_image_dimension(struct g2d_device *g2d_dev,
+                                        u32 width, u32 height,
+                                        u32 left, u32 top,
+                                        u32 right, u32 bottom)
+{
+       if ((left >= right) || (top >= bottom) ||
+                       (width < (right - left)) || (height < (bottom - top))) {
+               dev_err(g2d_dev->dev,
+                       "%s: Invalid dimension [%ux%u, %ux%u) / %ux%u\n",
+                       __func__, left, top, right, bottom, width, height);
+               return false;
+       }
+
+       return true;
+}
+
+#define IS_EVEN(value) (((value) & 1) == 0)
+
+static bool g2d_validate_image_format(struct g2d_device *g2d_dev,
+                                     struct g2d_reg commands[], bool dst)
+{
+       struct device *dev = g2d_dev->dev;
+       u32 stride = commands[G2DSFR_IMG_STRIDE].value;
+       u32 mode   = commands[G2DSFR_IMG_COLORMODE].value;
+       u32 width  = commands[G2DSFR_IMG_WIDTH].value;
+       u32 height = commands[G2DSFR_IMG_HEIGHT].value;
+       u32 Bpp = 0;
+       const struct g2d_fmt *fmt;
+
+       if (IS_AFBC(mode) && !dst) {
+               width++;
+               height++;
+       }
+
+       if (!g2d_validate_image_dimension(g2d_dev, width, height,
+                                       commands[G2DSFR_IMG_LEFT].value,
+                                       commands[G2DSFR_IMG_TOP].value,
+                                       commands[G2DSFR_IMG_RIGHT].value,
+                                       commands[G2DSFR_IMG_BOTTOM].value))
+               return false;
+
+       fmt = g2d_find_format(mode);
+       if (fmt == NULL) {
+               dev_err(dev, "%s: Color mode %#x is not supported\n",
+                       __func__, mode);
+               return false;
+       }
+
+       Bpp = fmt->bpp[0] / 8;
+
+       if (stride) {
+               int err = 0;
+
+               if (IS_AFBC(mode) || IS_YUV(mode))
+                       err |= 1 << 1;
+               if (IS_UORDER(mode) & (stride != ALIGN(width * Bpp, 16)))
+                       err |= 1 << 2;
+               if (stride < (width * Bpp))
+                       err |= 1 << 4;
+               if (stride > (G2D_MAX_SIZE * Bpp))
+                       err |= 1 << 5;
+
+               if (err) {
+                       dev_err(dev,
+                       "%s: Invalid[%#x] stride %u with mode %#x size %ux%u\n",
+                               __func__, err, stride, mode, width, height);
+                       return false;
+               }
+       } else if (IS_YUV(mode)) {
+               /* TODO: Y8 handling if required */
+               if (!IS_EVEN(width) || (!IS_YUV422(mode) && !IS_EVEN(height)))
+                       goto err_align;
+       } else if (!IS_AFBC(mode)) {
+               dev_err(dev, "%s: Non AFBC RGB requires valid stride\n",
+                       __func__);
+               return false;
+       }
+
+       if (!dst) {
+               if (IS_AFBC(mode) && (!IS_AFBC_WIDTH_ALIGNED(width) ||
+                                       !IS_AFBC_HEIGHT_ALIGNED(height)))
+                       goto err_align;
+
+               if (IS_YUV420_82(mode) && !IS_ALIGNED(width, 64))
+                       goto err_align;
+
+               return true;
+       }
+
+       width = commands[G2DSFR_IMG_LEFT].value |
+                               commands[G2DSFR_IMG_RIGHT].value;
+       height = commands[G2DSFR_IMG_TOP].value |
+                                       commands[G2DSFR_IMG_BOTTOM].value;
+
+       if (IS_AFBC(mode) && !IS_AFBC_WIDTH_ALIGNED(width | height))
+               goto err_align;
+
+       if (IS_YUV(mode)) {
+               /*
+                * DST clip region has the alignment restrictions
+                * accroding to the chroma subsampling
+                */
+               if (!IS_EVEN(width) || (!IS_YUV422(mode) && !IS_EVEN(height)))
+                       goto err_align;
+       }
+
+       return true;
+err_align:
+       dev_err(dev,
+               "%s: Unaligned size %ux%u or crop [%ux%u, %ux%u) for %s %s\n",
+               __func__,
+               commands[G2DSFR_IMG_WIDTH].value,
+               commands[G2DSFR_IMG_HEIGHT].value,
+               commands[G2DSFR_IMG_LEFT].value,
+               commands[G2DSFR_IMG_TOP].value,
+               commands[G2DSFR_IMG_RIGHT].value,
+               commands[G2DSFR_IMG_BOTTOM].value,
+               IS_AFBC(mode) ? "AFBC" :
+                       IS_YUV422(mode) ? "YUV422" : "YUV20",
+               IS_YUV420_82(mode) ? "8+2" : "");
+
+       return false;
+}
+
+bool g2d_validate_source_commands(struct g2d_device *g2d_dev,
+                                 unsigned int i, struct g2d_layer *source,
+                                 struct g2d_layer *target)
+{
+       u32 colormode = source->commands[G2DSFR_IMG_COLORMODE].value;
+       u32 width, height;
+
+       if (!g2d_validate_image_format(g2d_dev, source->commands, false)) {
+               dev_err(g2d_dev->dev,
+                       "%s: Failed to validate source[%d] commands\n",
+                       __func__, i);
+               return false;
+       }
+
+       if (((source->flags & G2D_LAYERFLAG_COLORFILL) != 0) &&
+                                                       !IS_RGB(colormode)) {
+               dev_err(g2d_dev->dev,
+                       "%s: Image type should be RGB for Solid color layer\n",
+                       __func__);
+               dev_err(g2d_dev->dev,
+                       "%s: layer index: %d, flags %#x, colormode: %#010x\n",
+                       __func__, i, source->flags, colormode);
+               return false;
+       }
+
+       width = target->commands[G2DSFR_IMG_WIDTH].value;
+       height = target->commands[G2DSFR_IMG_HEIGHT].value;
+
+       if (IS_AFBC(colormode)) {
+               width++;
+               height++;
+       }
+
+       if (!g2d_validate_image_dimension(g2d_dev, width, height,
+                               source->commands[G2DSFR_SRC_DSTLEFT].value,
+                               source->commands[G2DSFR_SRC_DSTTOP].value,
+                               source->commands[G2DSFR_SRC_DSTRIGHT].value,
+                               source->commands[G2DSFR_SRC_DSTBOTTOM].value)) {
+               dev_err(g2d_dev->dev,
+                       "%s: Window of source[%d] floods the target\n",
+                       __func__, i);
+               return false;
+       }
+
+       return true;
+}
+
+bool g2d_validate_target_commands(struct g2d_device *g2d_dev,
+                                 struct g2d_task *task)
+{
+       if (!g2d_validate_image_format(g2d_dev, task->target.commands, true)) {
+               dev_err(g2d_dev->dev,
+                       "%s: Failed to validate target commands\n", __func__);
+               return false;
+       }
+
+       return true;
+}
+
+static bool g2d_validate_extra_command(struct g2d_device *g2d_dev,
+                                      struct g2d_reg extra[],
+                                      unsigned int num_regs)
+{
+       unsigned int n, i;
+       /*
+        * TODO: NxM loop ==> total 2008 comparison are required in maximum:
+        * Consider if we can make it simple with a single range [0x2000, 4000)
+        */
+       for (n = 0; n < num_regs; n++) {
+               for (i = 0; i < ARRAY_SIZE(extra_cmd_range); i++) {
+                       if ((extra[n].offset >= extra_cmd_range[i][0]) &&
+                               (extra[n].offset <= extra_cmd_range[i][1]))
+                               break;
+               }
+
+               if (i == ARRAY_SIZE(extra_cmd_range)) {
+                       dev_err(g2d_dev->dev,
+                               "%s: Invalid offset %#x @ extra command[%d]\n",
+                               __func__, extra[n].offset, n);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
+int g2d_import_commands(struct g2d_device *g2d_dev, struct g2d_task *task,
+                       struct g2d_task_data *data, unsigned int num_sources)
+{
+       struct device *dev = g2d_dev->dev;
+       struct g2d_reg *cmdaddr = page_address(task->cmd_page);
+       struct g2d_commands *cmds = &data->commands;
+       unsigned int i;
+       int copied;
+
+       task->cmd_count = 0;
+
+       copied = g2d_copy_commands(g2d_dev, -1, cmdaddr, cmds->target,
+                               target_command_checker, G2DSFR_DST_FIELD_COUNT);
+       if (copied < 0)
+               return -EINVAL;
+
+       task->target.commands = cmdaddr;
+
+       cmdaddr += copied;
+       task->cmd_count += copied;
+
+       for (i = 0; i < num_sources; i++) {
+               u32 srccmds[G2DSFR_SRC_FIELD_COUNT];
+
+               if (copy_from_user(srccmds, cmds->source[i], sizeof(srccmds))) {
+                       dev_err(dev, "%s: Failed to get source[%d] commands\n",
+                               __func__, i);
+                       return -EFAULT;
+               }
+
+               copied = g2d_copy_commands(g2d_dev, i, cmdaddr, srccmds,
+                               source_command_checker, G2DSFR_SRC_FIELD_COUNT);
+               if (copied < 0)
+                       return -EINVAL;
+
+               task->source[i].commands = cmdaddr;
+               cmdaddr += copied;
+               task->cmd_count += copied;
+       }
+
+       if (copy_from_user(cmdaddr, cmds->extra,
+                          cmds->num_extra_regs * sizeof(struct g2d_reg))) {
+               dev_err(dev, "%s: Failed to get %u extra commands\n", __func__,
+                       cmds->num_extra_regs);
+               return -EFAULT;
+       }
+
+       if (!g2d_validate_extra_command(g2d_dev, cmdaddr, cmds->num_extra_regs))
+               return -EINVAL;
+
+       task->cmd_count += cmds->num_extra_regs;
+
+       return 0;
+}
+
+static unsigned int g2d_set_image_buffer(struct g2d_task *task,
+                                        struct g2d_layer *layer, u32 colormode,
+                                        unsigned char offsets[], u32 base)
+{
+       const struct g2d_fmt *fmt = g2d_find_format(colormode);
+       struct g2d_reg *reg = (struct g2d_reg *)page_address(task->cmd_page);
+       unsigned int cmd_count = task->cmd_count;
+       unsigned int i;
+       dma_addr_t addr;
+
+       if (fmt->num_planes == 4) {
+               unsigned int nbufs = min_t(unsigned int,
+                                          layer->num_buffers, fmt->num_planes);
+
+               for (i = 0; i < nbufs; i++) {
+                       if (!YUV82_BASE_ALIGNED(layer->buffer[i].dma_addr, i)) {
+                               dev_err(task->g2d_dev->dev,
+                               "%s: Plane %d Addr isn't aligned for YUV 8+2\n",
+                                       __func__, i);
+                               return 0;
+                       }
+               }
+       } else if (!IS_ALIGNED(layer->buffer[0].dma_addr, 4)) {
+               dev_err(task->g2d_dev->dev,
+                       "%s: Plane 0 address isn't aligned by 4.\n", __func__);
+               return 0;
+       }
+
+       for (i = 0; i < layer->num_buffers; i++) {
+               reg[cmd_count].offset = BASE_REG_OFFSET(base, offsets, i);
+               reg[cmd_count].value = layer->buffer[i].dma_addr;
+               cmd_count++;
+       }
+
+       if (layer->num_buffers == fmt->num_planes)
+               return cmd_count;
+
+       if (fmt->num_planes == 2) {
+               /* YCbCr semi-planar in a single buffer */
+               reg[cmd_count].offset = BASE_REG_OFFSET(base, offsets, 1);
+               if (((layer->flags & G2D_LAYERFLAG_MFC_STRIDE) != 0) &&
+                                       (fmt->fmtvalue == G2D_FMT_NV12)) {
+                       reg[cmd_count].value =
+                               NV12N_CBCR_BASE(layer->buffer[0].dma_addr,
+                                               layer_width(layer),
+                                               layer_height(layer));
+               } else {
+                       reg[cmd_count].value = layer_pixelcount(layer);
+                       reg[cmd_count].value *= fmt->bpp[0] / 8;
+                       reg[cmd_count].value += layer->buffer[0].dma_addr;
+                       reg[cmd_count].value = ALIGN(reg[cmd_count].value, 2);
+               }
+               cmd_count++;
+               return cmd_count;
+       }
+
+       addr = layer->buffer[0].dma_addr;
+       /* YCbCr semi-planar 8+2 in a single buffer */
+       for (i = 1; i < 4; i++) {
+               addr += (layer_pixelcount(layer) * fmt->bpp[i - 1]) / 8;
+               addr = YUV82_BASE_ALIGN(addr, 32);
+               reg[cmd_count].value = addr;
+               reg[cmd_count].offset = BASE_REG_OFFSET(base, offsets, i);
+               cmd_count++;
+       }
+
+       return cmd_count;
+}
+
+static unsigned int g2d_set_afbc_buffer(struct g2d_task *task,
+                                       struct g2d_layer *layer,
+                                       u32 base_offset)
+{
+       struct g2d_reg *reg = (struct g2d_reg *)page_address(task->cmd_page);
+       u32 align = (base_offset == TARGET_OFFSET) ? 64 : 16;
+       unsigned char *reg_offset = (base_offset == TARGET_OFFSET) ?
+                               dst_base_reg_offset : src_base_reg_offset;
+
+       if (!IS_ALIGNED(layer->buffer[0].dma_addr, align)) {
+               dev_err(task->g2d_dev->dev,
+                       "%s: AFBC base %#llx is not aligned by %u\n",
+                       __func__, layer->buffer[0].dma_addr, align);
+               return 0;
+       }
+
+       reg[task->cmd_count].offset = base_offset + reg_offset[2];
+       reg[task->cmd_count].value = layer->buffer[0].dma_addr;
+       reg[task->cmd_count + 1].offset = base_offset + reg_offset[3];
+       if (base_offset == TARGET_OFFSET)
+               reg[task->cmd_count + 1].value =
+                       ALIGN(AFBC_HEADER_SIZE(layer->commands)
+                                       + layer->buffer[0].dma_addr, align);
+       else
+               reg[task->cmd_count + 1].value = layer->buffer[0].dma_addr;
+
+       return task->cmd_count + 2;
+}
+
+bool g2d_prepare_source(struct g2d_task *task,
+                       struct g2d_layer *layer, int index)
+{
+       u32 colormode = layer->commands[G2DSFR_IMG_COLORMODE].value;
+       unsigned char *offsets = IS_YUV420_82(colormode) ?
+                               src_base_reg_offset_yuv82 : src_base_reg_offset;
+
+       task->cmd_count = ((colormode & G2D_DATAFORMAT_AFBC) != 0)
+                       ? g2d_set_afbc_buffer(task, layer, LAYER_OFFSET(index))
+                       : g2d_set_image_buffer(task, layer, colormode,
+                               offsets, LAYER_OFFSET(index));
+       /*
+        * It is alright to set task->cmd_count to 0
+        * because this task is to be discarded.
+        */
+       return task->cmd_count != 0;
+}
+
+bool g2d_prepare_target(struct g2d_task *task)
+{
+       u32 colormode = task->target.commands[G2DSFR_IMG_COLORMODE].value;
+
+       task->cmd_count = ((colormode & G2D_DATAFORMAT_AFBC) != 0)
+                       ? g2d_set_afbc_buffer(task, &task->target,
+                                             TARGET_OFFSET)
+                       : g2d_set_image_buffer(task, &task->target, colormode,
+                                       dst_base_reg_offset, TARGET_OFFSET);
+
+       return task->cmd_count != 0;
+}
diff --git a/drivers/gpu/exynos/g2d/g2d_command.h b/drivers/gpu/exynos/g2d/g2d_command.h
new file mode 100644 (file)
index 0000000..3f2f9b8
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * linux/drivers/gpu/exynos/g2d/g2d_command.h
+ *
+ * Copyright (C) 2017 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _G2D_COMMAND_H_
+#define _G2D_COMMAND_H_
+
+#include "g2d_format.h"
+
+struct g2d_device;
+struct g2d_task;
+struct g2d_layer;
+struct g2d_task_data;
+
+struct g2d_fmt {
+       const char      *name;
+       u32             fmtvalue;
+       u8              bpp[G2D_MAX_PLANES];
+       u8              num_planes;
+};
+
+const struct g2d_fmt *g2d_find_format(u32 fmtval);
+int g2d_import_commands(struct g2d_device *g2d_dev, struct g2d_task *task,
+                       struct g2d_task_data *data, unsigned int num_sources);
+bool g2d_validate_source_commands(struct g2d_device *g2d_dev,
+                                 unsigned int i, struct g2d_layer *source,
+                                 struct g2d_layer *target);
+bool g2d_validate_target_commands(struct g2d_device *g2d_dev,
+                                 struct g2d_task *task);
+size_t g2d_get_payload(struct g2d_reg cmd[], const struct g2d_fmt *fmt,
+                      u32 flags);
+size_t g2d_get_payload_index(struct g2d_reg cmd[], const struct g2d_fmt *fmt,
+                            unsigned int idx);
+bool g2d_prepare_source(struct g2d_task *task,
+                       struct g2d_layer *layer, int index);
+bool g2d_prepare_target(struct g2d_task *task);
+
+#endif /* _G2D_COMMAND_H_ */
index 360529a0e8c57f0857afb0026db3532ad5807958..077228ee61f703d6837a1cd5a9c487a4f52582c2 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
+#include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 
 #include "g2d.h"
 #include "g2d_regs.h"
 #include "g2d_task.h"
-#include "g2d_uapi.h"
+#include "g2d_uapi_process.h"
 
 #define MODULE_NAME "exynos-g2d"
 
@@ -202,22 +203,44 @@ static long g2d_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        struct g2d_context *ctx = filp->private_data;
        struct g2d_device *g2d_dev = ctx->g2d_dev;
+       int ret = 0;
 
        switch (cmd) {
        case G2D_IOC_PROCESS:
        {
+               struct g2d_task_data __user *uptr =
+                               (struct g2d_task_data __user *)arg;
+               struct g2d_task_data data;
                struct g2d_task *task;
 
+               if (copy_from_user(&data, uptr, sizeof(data))) {
+                       dev_err(g2d_dev->dev,
+                               "%s: Failed to read g2d_task_data\n", __func__);
+                       ret = -EFAULT;
+                       break;
+               }
+
                task = g2d_get_free_task(g2d_dev);
-               if (task == NULL)
-                       return -EBUSY;
+               if (task == NULL) {
+                       ret = -EBUSY;
+                       break;
+               }
 
                kref_init(&task->starter);
 
+               ret = g2d_get_userdata(g2d_dev, task, &data);
+               if (ret < 0) {
+                       g2d_put_free_task(g2d_dev, task);
+                       break;
+               }
+
                g2d_start_task(task);
+
+               ret = g2d_wait_put_user(g2d_dev, task, uptr, data.flags);
        }
        }
-       return 0;
+
+       return ret;
 }
 
 static long g2d_compat_ioctl(struct file *filp,
index 8f51fbecab9e7f11a22df8b2f7ce7a5599319dcf..57751b961949719a44e0f3be85ba1892c1e8cbec 100644 (file)
@@ -20,6 +20,7 @@
 
 #include "g2d.h"
 #include "g2d_task.h"
+#include "g2d_uapi.h"
 
 struct g2d_task *g2d_get_active_task_from_id(struct g2d_device *g2d_dev,
                                             unsigned int id)
@@ -345,6 +346,8 @@ void g2d_dump_task(struct g2d_device *g2d_dev, unsigned int job_id)
 {
        struct g2d_task *task;
        unsigned long flags;
+       unsigned int i;
+       struct g2d_reg *regs;
 
        spin_lock_irqsave(&g2d_dev->lock_task, flags);
 
@@ -355,5 +358,11 @@ void g2d_dump_task(struct g2d_device *g2d_dev, unsigned int job_id)
 
        /* TODO: more dump task */
 
+       regs = page_address(task->cmd_page);
+
+       for (i = 0; i < task->cmd_count; i++)
+               pr_info("G2D: CMD[%03d] %#06x, %#010x\n",
+                       i, regs[i].offset, regs[i].value);
+
        spin_unlock_irqrestore(&g2d_dev->lock_task, flags);
 }
index c8fad74afd3d622bd5561661454a567bf9e8f1cc..49bc4499be7b94cf548c1cde0bda05c50dd490e7 100644 (file)
@@ -53,6 +53,7 @@ struct g2d_layer {
        int                     buffer_type;
        int                     num_buffers;
        struct g2d_buffer       buffer[G2D_MAX_PLANES];
+       struct g2d_reg          *commands;
 };
 
 #define G2D_TASKSTATE_WAITING          (1 << 1)
@@ -72,6 +73,7 @@ struct g2d_task {
        struct g2d_task         *next;
        struct g2d_device       *g2d_dev;
 
+       unsigned int            flags;
        unsigned int            job_id;
        unsigned long           state;
        struct kref             starter;
@@ -134,6 +136,13 @@ static inline void clear_task_state(struct g2d_task *task)
 
 #define is_task_state_active(task) (((task)->state & G2D_TASKSTATE_ACTIVE) != 0)
 #define is_task_state_killed(task) (((task)->state & G2D_TASKSTATE_KILLED) != 0)
+#define is_task_state_error(task)  (((task)->state & G2D_TASKSTATE_ERROR) != 0)
+
+static inline bool g2d_task_wait_completion(struct g2d_task *task)
+{
+       wait_for_completion(&task->completion);
+       return !is_task_state_error(task);
+}
 
 #define G2D_HW_TIMEOUT_MSEC    500
 
index e3ab7e5d9fb029cd19fbd9d265f3bf3863659f3b..29ce32ead6a2e83a0ccca0723f9ab2c4c1edb009 100644 (file)
 #ifndef _G2D_UAPI_H_
 #define _G2D_UAPI_H_
 
+enum g2dsfr_img_register {
+       G2DSFR_IMG_STRIDE,
+       G2DSFR_IMG_COLORMODE,
+       G2DSFR_IMG_LEFT,
+       G2DSFR_IMG_TOP,
+       G2DSFR_IMG_RIGHT,
+       G2DSFR_IMG_BOTTOM,
+       G2DSFR_IMG_WIDTH,
+       G2DSFR_IMG_HEIGHT,
+
+       G2DSFR_IMG_FIELD_COUNT,
+};
+
+enum g2dsfr_src_register {
+       G2DSFR_SRC_COMMAND = G2DSFR_IMG_FIELD_COUNT,
+       G2DSFR_SRC_SELECT,
+       G2DSFR_SRC_ROTATE,
+       G2DSFR_SRC_DSTLEFT,
+       G2DSFR_SRC_DSTTOP,
+       G2DSFR_SRC_DSTRIGHT,
+       G2DSFR_SRC_DSTBOTTOM,
+       G2DSFR_SRC_SCALECONTROL,
+       G2DSFR_SRC_XSCALE,
+       G2DSFR_SRC_YSCALE,
+       G2DSFR_SRC_XPHASE,
+       G2DSFR_SRC_YPHASE,
+       G2DSFR_SRC_COLOR,
+       G2DSFR_SRC_ALPHA,
+       G2DSFR_SRC_BLEND,
+       G2DSFR_SRC_YCBCRMODE,
+       G2DSFR_SRC_HDRMODE,
+
+       G2DSFR_SRC_FIELD_COUNT
+};
+
+enum g2dsfr_dst_register {
+       G2DSFR_DST_YCBCRMODE = G2DSFR_IMG_FIELD_COUNT,
+
+       G2DSFR_DST_FIELD_COUNT,
+};
+
+struct g2d_reg {
+       __u32 offset;
+       __u32 value;
+};
+
+#define G2D_MAX_PLANES         4
+#define G2D_MAX_SFR_COUNT      1024
+#define G2D_MAX_BUFFERS                4
+#define G2D_MAX_IMAGES         16
+#define G2D_MAX_PRIORITY       3
+#define G2D_MAX_RELEASE_FENCES (G2D_MAX_IMAGES + 1)
+
+struct g2d_commands {
+       __u32           target[G2DSFR_DST_FIELD_COUNT];
+       __u32           *source[G2D_MAX_IMAGES];
+       struct g2d_reg  *extra;
+       __u32           num_extra_regs;
+};
+
+/*
+ * The buffer types
+ * - G2D_BUFTYPE_NONE    - Invalid or no buffer is configured
+ * - G2D_BUFTYPE_EMPTY   - The buffer of the layer has no buffer from user
+ * - G2D_BUFTYPE_USERPTR - The buffer is identified by user address
+ * - G2D_BUFTYPE_DMABUF  - The buffer is identified by fd and offset
+ *
+ * G2D_BUFTYPE_EMPTY is only valid if the layer is solid fill for source layers
+ * or the buffer is prepared in the kernel space for the target when HWFC is
+ * enabled.
+ */
+#define G2D_BUFTYPE_NONE       0
+#define G2D_BUFTYPE_EMPTY      1
+#define G2D_BUFTYPE_USERPTR    2
+#define G2D_BUFTYPE_DMABUF     3
+
+#define G2D_BUFTYPE_VALID(type)        !(((type) & G2D_BUFTYPE_DMABUF) == 0)
+
+/*
+ * struct g2d_buffer_data - Layer buffer description
+ *
+ * @userptr    : the buffer address in the address space of the user. It is
+ *               only valid if G2D_BUFTYPE_USERPTR is set in
+ *               g2d_layer_data.buffer_type.
+ * @fd         : the open file descriptor of a buffer valid in the address
+ *               space of the user. It is only valid if
+ *               G2D_BUFTYPE_DMABUF is set in g2d_layer_data.buffer_type.
+ * @offset     : the offset where the effective data starts from in the given
+ *               buffer. It is only valid if g2d_layer_data.buffer_type is
+ *               G2D_BUFTYPE_DMABUF.
+ * @length     : Length in bytes of the buffer.
+ * @reserved   : reserved for later use or for the purpose of the client
+ *               driver's private use.
+ */
+struct g2d_buffer_data {
+       union {
+               unsigned long userptr;
+               struct {
+                       __s32 fd;
+                       __u32 offset;
+               } dmabuf;
+       };
+       __u32           length;
+};
+
+/* informs the driver that should wait for the given fence to be signaled */
+#define G2D_LAYERFLAG_ACQUIRE_FENCE    (1 << 1)
+/* informs the driver that sholud protect the buffer */
+#define G2D_LAYERFLAG_SECURE           (1 << 2)
+/* informs the driver that the layer should be filled with one color */
+#define G2D_LAYERFLAG_COLORFILL                (1 << 3)
+/* Informs that the image has stride restrictions for MFC */
+#define G2D_LAYERFLAG_MFC_STRIDE       (1 << 4)
+/* cleaning of the CPU caches is unneccessary before processing */
+#define G2D_LAYERFLAG_NO_CACHECLEAN    (1 << 16)
+/* invalidation of the CPU caches is unneccessary after processing */
+#define G2D_LAYERFLAG_NO_CACHEINV      (1 << 17)
+
+/*
+ * struct g2d_layer_data - description of an layer
+ *
+ * @flags      : flags to determine if an attribute of the layer is valid or
+ *               not. The value in @flags is a combination of
+ *               G2D_LAYERFLAG_XXX.
+ * @fence      : a open file descriptor of the acquire fence if
+ *               G2D_IMGFLAG_ACQUIRE_FENCE is set in
+ *               g2d_layer_data.flags.
+ * @buffer_type : determines whether g2d_buffer_data.fd or
+ *               g2d_buffer_data.userptr is valid. It sholud be one of
+ *               G2D_BUFTYPE_USERPTR, G2D_BUFTYPE_DMABUF, G2D_BUFTYPE_EMPTY.
+ * @num_buffers        : the number of valid elements in @buffer array.
+ * @buffer      : the description of the buffers.
+ */
+struct g2d_layer_data {
+       __u32                   flags;
+       __s32                   fence;
+       __u32                   buffer_type;
+       __u32                   num_buffers;
+       struct g2d_buffer_data  buffer[G2D_MAX_BUFFERS];
+};
+
+/* g2d_task.flags */
+/* dithering required at the end of the layer processing */
+#define G2D_FLAG_DITHER                (1 << 1)
+/*
+ * G2D_IOC_PROCESS ioctl returns immediately after validation of the
+ * values in the parameters finishes. The user should call ioctl with
+ * G2D_IOC_WAIT_PROCESS command to get the result
+ */
+#define G2D_FLAG_NONBLOCK      (1 << 2)
+/*
+ * indicate that the task should be processed using hwfc between
+ * g2d and mfc device.
+ */
+#define G2D_FLAG_HWFC          (1 << 3)
+/*
+ * indicate that the task will be operated one task at a time
+ * by APB interface mode.
+ */
+#define G2D_FLAG_APB           (1 << 4)
+/*
+ * indicate that an error occurred during layer processing. Configured by the
+ * framework. The users should check if an error occurred on return of ioctl.
+ */
+#define G2D_FLAG_ERROR         (1 << 5)
+
 /*
  * struct g2d_task - description of a layer processing task
  * @version: the version of the format of struct g2d_task_data
+ * @flags: the flags to indicate the status or the attributes of the layer
+ *         processing. It should be the combination of the values defined by
+ *         G2D_FLAG_XXX.
+ * @laptime_in_usec: delay in processing
+ * @priority: the task's priority for H/W job manager.
+ * @num_sources        : the number of valid source layers.
+ * @num_release_fences: the number of requested release fence.
+ * @release_fences: pointer to an array of release fences with
+ *                  @num_release_fences elements
+ * @target: the description of the target layer that is the result of
+ *          the layer processing
+ * @source: the descriptions of source layers.
+ * @commands: command data
  */
 struct g2d_task_data {
        __u32                   version;
+       __u32                   flags;
+       __u32                   laptime_in_usec;
+       __u32                   priority;
+       __u32                   num_source;
+       __u32                   num_release_fences;
+       __s32                   *release_fences;
+       struct g2d_layer_data   target;
+       struct g2d_layer_data   *source;
+       struct g2d_commands     commands;
 };
 
 #define G2D_IOC_PROCESS                _IOWR('M', 4, struct g2d_task_data)
diff --git a/drivers/gpu/exynos/g2d/g2d_uapi_process.c b/drivers/gpu/exynos/g2d/g2d_uapi_process.c
new file mode 100644 (file)
index 0000000..2685349
--- /dev/null
@@ -0,0 +1,613 @@
+/*
+ * linux/drivers/gpu/exynos/g2d/g2d_uapi_process.c
+ *
+ * Copyright (C) 2017 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/iommu.h>
+#include <linux/ion.h>
+#include <linux/slab.h>
+#include <linux/sched/mm.h>
+#include <linux/exynos_iovmm.h>
+
+#include "g2d.h"
+#include "g2d_task.h"
+#include "g2d_uapi_process.h"
+#include "g2d_command.h"
+
+static int g2d_prepare_buffer(struct g2d_device *g2d_dev,
+                             struct g2d_layer *layer,
+                             struct g2d_layer_data *data)
+{
+       struct device *dev = g2d_dev->dev;
+       const struct g2d_fmt *fmt = NULL;
+       struct g2d_reg *cmd = layer->commands;
+       unsigned int payload;
+       int i;
+
+       fmt = g2d_find_format(cmd[G2DSFR_IMG_COLORMODE].value);
+
+       BUG_ON(!fmt);
+
+       if ((data->num_buffers > 1) && (data->num_buffers != fmt->num_planes)) {
+               dev_err(dev, "%s: Invalid number of buffers %u for %s\n",
+                       __func__, data->num_buffers, fmt->name);
+               return -EINVAL;
+       }
+
+       if (data->num_buffers > 1) {
+               for (i = 0; i < data->num_buffers; i++) {
+                       payload = g2d_get_payload_index(cmd, fmt, i);
+                       if (data->buffer[i].length < payload) {
+                               dev_err(dev,
+                                       "%s: Too small buffer[%d]: expected %uB"
+                                       " for %ux%u(b%u)/%s but %uB is given\n",
+                                       __func__, i, payload,
+                                       cmd[G2DSFR_IMG_WIDTH].value,
+                                       cmd[G2DSFR_IMG_HEIGHT].value,
+                                       cmd[G2DSFR_IMG_BOTTOM].value,
+                                       fmt->name, data->buffer[i].length);
+                               return -EINVAL;
+                       }
+
+                       layer->buffer[i].payload = payload;
+               }
+       } else {
+               payload = g2d_get_payload(cmd, fmt, layer->flags);
+               if (data->buffer[0].length < payload) {
+                       dev_err(dev, "%s: Too small buffer: expected %uB"
+                               " for %ux%u(bt %u)/%s but %uB is given\n",
+                               __func__, payload,
+                               cmd[G2DSFR_IMG_WIDTH].value,
+                               cmd[G2DSFR_IMG_HEIGHT].value,
+                               cmd[G2DSFR_IMG_BOTTOM].value,
+                               fmt->name, data->buffer[0].length);
+                       return -EINVAL;
+               }
+
+               layer->buffer[0].payload = payload;
+       }
+
+       layer->num_buffers = data->num_buffers;
+
+       return 0;
+}
+
+static int g2d_get_dmabuf(struct g2d_task *task,
+                         struct g2d_buffer *buffer,
+                         struct g2d_buffer_data *data,
+                         enum dma_data_direction dir)
+{
+       struct device *dev = task->g2d_dev->dev;
+       struct dma_buf *dmabuf;
+       struct dma_buf_attachment *attachment;
+       struct sg_table *sgt;
+       dma_addr_t dma_addr;
+       int ret = -EINVAL;
+       int prot = IOMMU_READ | IOMMU_CACHE;
+
+       dmabuf = dma_buf_get(data->dmabuf.fd);
+       if (IS_ERR(dmabuf)) {
+               dev_err(dev, "%s: Failed to get dmabuf from fd %d\n",
+                       __func__, data->dmabuf.fd);
+               return PTR_ERR(dmabuf);
+       }
+
+       if (dmabuf->size < data->dmabuf.offset) {
+               dev_err(dev, "%s: too large offset %u for dmabuf of %zu\n",
+                       __func__, data->dmabuf.offset, dmabuf->size);
+               goto err;
+       }
+
+       if ((dmabuf->size - data->dmabuf.offset) < buffer->payload) {
+               dev_err(dev, "%s: too small dmabuf %zu/%u but reqiured %u\n",
+                       __func__, dmabuf->size,
+                       data->dmabuf.offset, buffer->payload);
+               goto err;
+       }
+
+       if (dir != DMA_TO_DEVICE)
+               prot |= IOMMU_WRITE;
+
+       attachment = dma_buf_attach(dmabuf, dev);
+       if (IS_ERR(attachment)) {
+               ret = PTR_ERR(attachment);
+               dev_err(dev,
+                       "%s: failed to attach to dmabuf (%d)\n", __func__, ret);
+               goto err;
+       }
+
+       sgt = dma_buf_map_attachment(attachment, dir);
+       if (IS_ERR(sgt)) {
+               ret = PTR_ERR(sgt);
+               dev_err(dev, "%s: failed to map dmabuf (%d)\n", __func__, ret);
+               goto err_map;
+       }
+
+       dma_addr = ion_iovmm_map(attachment, 0, buffer->payload, dir, prot);
+       if (IS_ERR_VALUE(dma_addr)) {
+               ret = (int)dma_addr;
+               dev_err(dev, "%s: failed to iovmm map for dmabuf (%d)\n",
+                       __func__, ret);
+               goto err_iovmmmap;
+       }
+
+       buffer->dmabuf.dmabuf = dmabuf;
+       buffer->dmabuf.attachment = attachment;
+       buffer->dmabuf.sgt = sgt;
+       buffer->dmabuf.offset = data->dmabuf.offset;
+       buffer->dma_addr = dma_addr + data->dmabuf.offset;
+
+       return 0;
+err_iovmmmap:
+       dma_buf_unmap_attachment(attachment, sgt, dir);
+err_map:
+       dma_buf_detach(dmabuf, attachment);
+err:
+       dma_buf_put(dmabuf);
+       return ret;
+}
+
+static int g2d_put_dmabuf(struct g2d_device *g2d_dev, struct g2d_buffer *buffer,
+                         enum dma_data_direction dir)
+{
+       ion_iovmm_unmap(buffer->dmabuf.attachment,
+                       buffer->dma_addr - buffer->dmabuf.offset);
+       dma_buf_unmap_attachment(buffer->dmabuf.attachment,
+                                buffer->dmabuf.sgt, dir);
+       dma_buf_detach(buffer->dmabuf.dmabuf, buffer->dmabuf.attachment);
+       dma_buf_put(buffer->dmabuf.dmabuf);
+
+       memset(buffer, 0, sizeof(*buffer));
+
+       return 0;
+}
+
+#define is_vma_cached(vma)                                             \
+       ((pgprot_val(pgprot_noncached((vma)->vm_page_prot)) !=          \
+                         pgprot_val((vma)->vm_page_prot)) &&           \
+          (pgprot_val(pgprot_writecombine((vma)->vm_page_prot)) !=     \
+           pgprot_val((vma)->vm_page_prot)))
+
+static int g2d_get_userptr(struct g2d_task *task,
+                          struct g2d_buffer *buffer,
+                          struct g2d_buffer_data *data,
+                          enum dma_data_direction dir)
+{
+       struct device *dev = task->g2d_dev->dev;
+       unsigned long end = data->userptr + data->length;
+       struct vm_area_struct *vma;
+       struct vm_area_struct *tvma;
+       struct mm_struct *mm;
+       int ret = -EINVAL;
+       int prot = IOMMU_READ;
+
+       mm = get_task_mm(current);
+
+       down_read(&mm->mmap_sem);
+
+       vma = find_vma(mm, data->userptr);
+       if (!vma || (data->userptr < vma->vm_start)) {
+               dev_err(dev, "%s: invalid address %#lx\n",
+                       __func__, data->userptr);
+               goto err_novma;
+       }
+
+       tvma = kmemdup(vma, sizeof(*vma), GFP_KERNEL);
+       if (!tvma) {
+               ret = -ENOMEM;
+               goto err_novma;
+       }
+
+       if (dir != DMA_TO_DEVICE)
+               prot |= IOMMU_WRITE;
+       if (is_vma_cached(vma))
+               prot |= IOMMU_CACHE;
+
+       buffer->userptr.vma = tvma;
+
+       tvma->vm_next = NULL;
+       tvma->vm_prev = NULL;
+       tvma->vm_mm = mm;
+
+       while (end > vma->vm_end) {
+               if (!!(vma->vm_flags & VM_PFNMAP)) {
+                       dev_err(dev, "%s: non-linear pfnmap is not supported\n",
+                               __func__);
+                       goto err_vma;
+               }
+
+               if ((vma->vm_next == NULL) ||
+                               (vma->vm_end != vma->vm_next->vm_start)) {
+                       dev_err(dev, "%s: invalid size %u bytes\n",
+                                       __func__, data->length);
+                       goto err_vma;
+               }
+
+               vma = vma->vm_next;
+               tvma->vm_next = kmemdup(vma, sizeof(*vma), GFP_KERNEL);
+               if (tvma->vm_next == NULL) {
+                       dev_err(dev, "%s: failed to allocate vma\n", __func__);
+                       ret = -ENOMEM;
+                       goto err_vma;
+               }
+               tvma->vm_next->vm_prev = tvma;
+               tvma->vm_next->vm_next = NULL;
+               tvma = tvma->vm_next;
+       }
+
+       for (vma = buffer->userptr.vma; vma != NULL; vma = vma->vm_next) {
+               if (vma->vm_file)
+                       get_file(vma->vm_file);
+               if (vma->vm_ops && vma->vm_ops->open)
+                       vma->vm_ops->open(vma);
+       }
+
+       buffer->dma_addr = exynos_iovmm_map_userptr(dev, data->userptr,
+                                                   data->length, prot);
+       if (IS_ERR_VALUE(buffer->dma_addr)) {
+               ret = (int)buffer->dma_addr;
+               dev_err(dev, "%s: failed to iovmm map userptr\n", __func__);
+               goto err_map;
+       }
+
+       up_read(&mm->mmap_sem);
+
+       buffer->userptr.addr = data->userptr;
+
+       return 0;
+err_map:
+       buffer->dma_addr = 0;
+
+       for (vma = buffer->userptr.vma; vma != NULL; vma = vma->vm_next) {
+               if (vma->vm_file)
+                       fput(vma->vm_file);
+               if (vma->vm_ops && vma->vm_ops->close)
+                       vma->vm_ops->close(vma);
+       }
+err_vma:
+       while (tvma) {
+               vma = tvma;
+               tvma = tvma->vm_prev;
+               kfree(vma);
+       }
+
+       buffer->userptr.vma = NULL;
+err_novma:
+       up_read(&mm->mmap_sem);
+
+       mmput(mm);
+
+       return ret;
+}
+
+static int g2d_put_userptr(struct g2d_device *g2d_dev,
+                       struct g2d_buffer *buffer, enum dma_data_direction dir)
+{
+       struct vm_area_struct *vma = buffer->userptr.vma;
+       struct mm_struct *mm;
+
+       BUG_ON((vma == NULL) || (buffer->userptr.addr == 0));
+
+       mm = vma->vm_mm;
+
+       exynos_iovmm_unmap_userptr(g2d_dev->dev, buffer->dma_addr);
+
+       /*
+        * Calling to vm_ops->close() actually does not need mmap_sem to be
+        * acquired but some device driver needs mmap_sem to be held.
+        */
+       down_read(&mm->mmap_sem);
+
+       while (vma) {
+               struct vm_area_struct *tvma;
+
+               if (vma->vm_ops && vma->vm_ops->close)
+                       vma->vm_ops->close(vma);
+
+               if (vma->vm_file)
+                       fput(vma->vm_file);
+
+               tvma = vma;
+               vma = vma->vm_next;
+
+               kfree(tvma);
+       }
+
+       up_read(&mm->mmap_sem);
+
+       mmput(mm);
+
+       memset(buffer, 0, sizeof(*buffer));
+
+       return 0;
+}
+
+static int g2d_get_buffer(struct g2d_device *g2d_dev,
+                               struct g2d_layer *layer,
+                               struct g2d_layer_data *data,
+                               enum dma_data_direction dir)
+{
+       int ret = 0;
+       unsigned int i;
+       int (*get_func)(struct g2d_task *, struct g2d_buffer *,
+                       struct g2d_buffer_data *, enum dma_data_direction);
+       int (*put_func)(struct g2d_device *, struct g2d_buffer *,
+                       enum dma_data_direction);
+
+       if (layer->buffer_type == G2D_BUFTYPE_DMABUF) {
+               get_func = g2d_get_dmabuf;
+               put_func = g2d_put_dmabuf;
+       } else if (layer->buffer_type == G2D_BUFTYPE_USERPTR) {
+               get_func = g2d_get_userptr;
+               put_func = g2d_put_userptr;
+       } else {
+               BUG();
+       }
+
+       for (i = 0; i < layer->num_buffers; i++) {
+               ret = get_func(layer->task, &layer->buffer[i],
+                              &data->buffer[i], dir);
+               if (ret) {
+                       while (i-- > 0)
+                               put_func(g2d_dev, &layer->buffer[i], dir);
+                       return ret;
+               }
+
+               layer->buffer[i].length = data->buffer[i].length;
+       }
+
+       return 0;
+}
+
+static void g2d_put_buffer(struct g2d_device *g2d_dev,
+                       u32 buffer_type, struct g2d_buffer buffer[],
+                       unsigned int num_buffer, enum dma_data_direction dir)
+{
+       unsigned int i;
+       int (*put_func)(struct g2d_device *, struct g2d_buffer *,
+                       enum dma_data_direction);
+
+       switch (buffer_type) {
+       case G2D_BUFTYPE_DMABUF:
+               put_func = g2d_put_dmabuf;
+               break;
+       case G2D_BUFTYPE_USERPTR:
+               put_func = g2d_put_userptr;
+               break;
+       case G2D_BUFTYPE_EMPTY:
+               return;
+       default:
+               BUG();
+       }
+
+       for (i = 0; i < num_buffer; i++)
+               put_func(g2d_dev, &buffer[i], dir);
+}
+
+static void g2d_put_image(struct g2d_device *g2d_dev, struct g2d_layer *layer,
+                         enum dma_data_direction dir)
+{
+       g2d_put_buffer(g2d_dev, layer->buffer_type,
+                       layer->buffer, layer->num_buffers, dir);
+
+       layer->buffer_type = G2D_BUFTYPE_NONE;
+}
+
+static int g2d_get_source(struct g2d_device *g2d_dev, struct g2d_task *task,
+                         struct g2d_layer *layer, struct g2d_layer_data *data,
+                         int index)
+{
+       struct device *dev = g2d_dev->dev;
+       int ret;
+
+       layer->flags = data->flags;
+       layer->buffer_type = data->buffer_type;
+
+       if (!G2D_BUFTYPE_VALID(layer->buffer_type)) {
+               dev_err(dev, "%s: invalid buffer type %u specified\n",
+                       __func__, layer->buffer_type);
+               return -EINVAL;
+       }
+
+       /* color fill has no buffer */
+       if (layer->buffer_type == G2D_BUFTYPE_EMPTY) {
+               if (layer->flags & G2D_LAYERFLAG_COLORFILL) {
+                       layer->num_buffers = 0;
+                       layer->flags &= ~G2D_LAYERFLAG_ACQUIRE_FENCE;
+                       return 0;
+               }
+
+               dev_err(dev, "%s: DMA layer %d has no buffer - flags: %#x\n",
+                       __func__, index, layer->flags);
+               return -EINVAL;
+       }
+
+       if (!g2d_validate_source_commands(g2d_dev, index, layer, &task->target))
+               return -EINVAL;
+
+       ret = g2d_prepare_buffer(g2d_dev, layer, data);
+       if (ret)
+               return ret;
+
+       ret = g2d_get_buffer(g2d_dev, layer, data, DMA_TO_DEVICE);
+       if (ret)
+               return ret;
+
+       if (!g2d_prepare_source(task, layer, index)) {
+               dev_err(dev, "%s: Failed to prepare source layer %d\n",
+                       __func__, index);
+               g2d_put_buffer(g2d_dev, layer->buffer_type, layer->buffer,
+                               layer->num_buffers, DMA_TO_DEVICE);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int g2d_get_sources(struct g2d_device *g2d_dev, struct g2d_task *task,
+                          struct g2d_layer_data __user *src)
+{
+       struct device *dev = g2d_dev->dev;
+       unsigned int i;
+       int ret;
+
+       for (i = 0; i < task->num_source; i++, src++) {
+               struct g2d_layer_data data;
+
+               if (copy_from_user(&data, src, sizeof(*src))) {
+                       dev_err(dev,
+                               "%s: Failed to read source image data %d/%d\n",
+                               __func__, i, task->num_source);
+                       ret = -EFAULT;
+                       break;
+               }
+
+               ret = g2d_get_source(g2d_dev, task,
+                                    &task->source[i], &data, i);
+               if (ret)
+                       break;
+       }
+
+       if (i < task->num_source) {
+               while (i-- > 0)
+                       g2d_put_image(g2d_dev, &task->source[i], DMA_TO_DEVICE);
+       }
+
+       return ret;
+}
+
+static int g2d_get_target(struct g2d_device *g2d_dev,
+                         struct g2d_task *task, struct g2d_layer_data *data)
+{
+       struct device *dev = g2d_dev->dev;
+       struct g2d_layer *target = &task->target;
+       int ret;
+
+       target->flags = data->flags;
+       target->buffer_type = data->buffer_type;
+
+       if (!G2D_BUFTYPE_VALID(target->buffer_type)) {
+               dev_err(dev,
+                       "%s: invalid buffer type %u specified for target\n",
+                       __func__, target->buffer_type);
+               return -EINVAL;
+       }
+
+       if (target->buffer_type == G2D_BUFTYPE_EMPTY) {
+               if (!!(task->flags & G2D_FLAG_HWFC)) {
+                       /* TODO: set the hwfc buffers from g2d_dev*/
+               } else {
+                       dev_err(dev, "%s: target has no buffer - flags: %#x\n",
+                               __func__, task->flags);
+                       return -EINVAL;
+               }
+       }
+
+       if (!g2d_validate_target_commands(g2d_dev, task))
+               return -EINVAL;
+
+       ret = g2d_prepare_buffer(g2d_dev, target, data);
+       if (ret)
+               return ret;
+
+       ret = g2d_get_buffer(g2d_dev, target, data, DMA_FROM_DEVICE);
+       if (ret)
+               return ret;
+
+       if (!g2d_prepare_target(task)) {
+               dev_err(dev, "%s: Failed to prepare target layer\n", __func__);
+
+               g2d_put_buffer(g2d_dev, target->buffer_type, target->buffer,
+                               target->num_buffers, DMA_FROM_DEVICE);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int g2d_get_userdata(struct g2d_device *g2d_dev,
+                    struct g2d_task *task, struct g2d_task_data *data)
+{
+       struct device *dev = g2d_dev->dev;
+       int ret;
+
+       /* invalid range check */
+       if ((data->num_source < 1) || (data->num_source > G2D_MAX_IMAGES)) {
+               dev_err(dev, "%s: Invalid number of source images %u\n",
+                       __func__, data->num_source);
+               return -EINVAL;
+       }
+
+       if (data->priority > G2D_MAX_PRIORITY) {
+               dev_err(dev, "%s: Invalid number of priority %u\n",
+                       __func__, data->priority);
+               return -EINVAL;
+       }
+
+       task->flags = data->flags;
+       task->priority = data->priority;
+       task->num_source = data->num_source;
+
+       ret = g2d_import_commands(g2d_dev, task, data, task->num_source);
+       if (ret < 0)
+               return ret;
+
+       ret = g2d_get_target(g2d_dev, task, &data->target);
+       if (ret)
+               return ret;
+
+       ret = g2d_get_sources(g2d_dev, task, data->source);
+       if (ret)
+               goto err_src;
+
+       return 0;
+err_src:
+       g2d_put_image(g2d_dev, &task->target, DMA_FROM_DEVICE);
+
+       return ret;
+}
+
+static void g2d_put_images(struct g2d_device *g2d_dev, struct g2d_task *task)
+{
+       unsigned int i;
+
+       for (i = 0; i < task->num_source; i++)
+               g2d_put_image(g2d_dev, &task->source[i], DMA_TO_DEVICE);
+
+       g2d_put_image(g2d_dev, &task->target, DMA_FROM_DEVICE);
+       task->num_source = 0;
+}
+
+int g2d_wait_put_user(struct g2d_device *g2d_dev, struct g2d_task *task,
+                     struct g2d_task_data __user *uptr, u32 userflag)
+{
+       u32 laptime_in_usec = (u32)ktime_us_delta(task->ktime_end,
+                                                 task->ktime_begin);
+       int ret;
+
+       if (!g2d_task_wait_completion(task)) {
+               userflag |= G2D_FLAG_ERROR;
+               ret = put_user(userflag, &uptr->flags);
+       } else {
+               ret = put_user(laptime_in_usec, &uptr->laptime_in_usec);
+       }
+
+       g2d_put_images(g2d_dev, task);
+
+       g2d_put_free_task(g2d_dev, task);
+
+       return ret;
+}
diff --git a/drivers/gpu/exynos/g2d/g2d_uapi_process.h b/drivers/gpu/exynos/g2d/g2d_uapi_process.h
new file mode 100644 (file)
index 0000000..c2c78fa
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * linux/drivers/gpu/exynos/g2d/g2d_uapi_process.h
+ *
+ * Copyright (C) 2017 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _G2D_UAPI_PROCESS_H_
+#define _G2D_UAPI_PROCESS_H_
+
+#include "g2d_uapi.h"
+
+struct g2d_device;
+struct g2d_task;
+
+int g2d_get_userdata(struct g2d_device *g2d_dev,
+                    struct g2d_task *task, struct g2d_task_data *data);
+int g2d_wait_put_user(struct g2d_device *g2d_dev, struct g2d_task *task,
+                     struct g2d_task_data __user *uptr, u32 userflag);
+
+#endif /* _G2D_UAPI_PROCESS_H_ */