on various Qualcomm SoCs.
To compile this driver as a module choose m here.
+config VIDEO_EXYNOS_SCALER
+ bool "EXYNOS Scaler Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_EXYNOS
+ select V4L2_MEM2MEM_DEV
+ select MEDIA_M2M1SHOT
+ select VIDEOBUF2_DMA_SG
+ default n
+ ---help---
+ This is a v4l2 driver for EXYNOS Scaler (MSCL) device.
+
endif # V4L_MEM2MEM_DRIVERS
# TI VIDEO PORT Helper Modules
obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
obj-y += meson/
+
+obj-$(CONFIG_VIDEO_EXYNOS_SCALER) += exynos/scaler/
--- /dev/null
+#
+# Copyright (c) 2012 Samsung Electronics Co., Ltd.
+# http://www.samsung.com
+#
+# Licensed under GPLv2
+#
+
+scaler-objs := scaler-core.o scaler-regs.o
+obj-$(CONFIG_VIDEO_EXYNOS_SCALER) += scaler.o
--- /dev/null
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Core file for Samsung EXYNOS Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/exynos_iovmm.h>
+#include <linux/smc.h>
+#include <linux/exynos_ion.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/m2m1shot.h>
+#include <media/m2m1shot-helper.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "scaler.h"
+#include "scaler-regs.h"
+
+/* Protection IDs of Scaler are 1 and 2. */
+#define SC_SMC_PROTECTION_ID(instance) (1 + (instance))
+
+int sc_log_level;
+module_param_named(sc_log_level, sc_log_level, uint, 0644);
+
+int sc_set_blur;
+module_param_named(sc_set_blur, sc_set_blur, uint, 0644);
+
+/*
+ * If true, writes the latency of H/W operation to v4l2_buffer.reserved2
+ * in the unit of nano seconds. It must not be enabled with real use-case
+ * because v4l2_buffer.reserved may be used for other purpose.
+ * The latency is written to the destination buffer.
+ */
+int __measure_hw_latency;
+module_param_named(measure_hw_latency, __measure_hw_latency, int, 0644);
+
+struct vb2_sc_buffer {
+ struct v4l2_m2m_buffer mb;
+ struct sc_ctx *ctx;
+ ktime_t ktime;
+};
+
+static const struct sc_fmt sc_formats[] = {
+ {
+ .name = "RGB565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .cfg_val = SCALER_CFG_FMT_RGB565,
+ .bitperpixel = { 16 },
+ .num_planes = 1,
+ .num_comp = 1,
+ .is_rgb = 1,
+ }, {
+ .name = "RGB1555",
+ .pixelformat = V4L2_PIX_FMT_RGB555X,
+ .cfg_val = SCALER_CFG_FMT_ARGB1555,
+ .bitperpixel = { 16 },
+ .num_planes = 1,
+ .num_comp = 1,
+ .is_rgb = 1,
+ }, {
+ .name = "ARGB4444",
+ .pixelformat = V4L2_PIX_FMT_RGB444,
+ .cfg_val = SCALER_CFG_FMT_ARGB4444,
+ .bitperpixel = { 16 },
+ .num_planes = 1,
+ .num_comp = 1,
+ .is_rgb = 1,
+ }, { /* swaps of ARGB32 in bytes in half word, half words in word */
+ .name = "RGBA8888",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .cfg_val = SCALER_CFG_FMT_RGBA8888 |
+ SCALER_CFG_BYTE_HWORD_SWAP,
+ .bitperpixel = { 32 },
+ .num_planes = 1,
+ .num_comp = 1,
+ .is_rgb = 1,
+ }, {
+ .name = "BGRA8888",
+ .pixelformat = V4L2_PIX_FMT_BGR32,
+ .cfg_val = SCALER_CFG_FMT_ARGB8888,
+ .bitperpixel = { 32 },
+ .num_planes = 1,
+ .num_comp = 1,
+ .is_rgb = 1,
+ }, {
+ .name = "YUV 4:2:0 contiguous 2-planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_2P,
+ .bitperpixel = { 12 },
+ .num_planes = 1,
+ .num_comp = 2,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YUV 4:2:0 contiguous 2-planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .cfg_val = SCALER_CFG_FMT_YCRCB420_2P,
+ .bitperpixel = { 12 },
+ .num_planes = 1,
+ .num_comp = 2,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV12M,
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_2P,
+ .bitperpixel = { 8, 4 },
+ .num_planes = 2,
+ .num_comp = 2,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV21M,
+ .cfg_val = SCALER_CFG_FMT_YCRCB420_2P,
+ .bitperpixel = { 8, 4 },
+ .num_planes = 2,
+ .num_comp = 2,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
+ .pixelformat = V4L2_PIX_FMT_NV12MT_16X16,
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_2P |
+ SCALER_CFG_TILE_EN,
+ .bitperpixel = { 8, 4 },
+ .num_planes = 2,
+ .num_comp = 2,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV420, /* I420 */
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_3P,
+ .bitperpixel = { 12 },
+ .num_planes = 1,
+ .num_comp = 3,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YVU 4:2:0 contiguous 3-planar, Y/Cr/Cb",
+ .pixelformat = V4L2_PIX_FMT_YVU420, /* YV12 */
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_3P,
+ .bitperpixel = { 12 },
+ .num_planes = 1,
+ .num_comp = 3,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV420M,
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_3P,
+ .bitperpixel = { 8, 2, 2 },
+ .num_planes = 3,
+ .num_comp = 3,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YVU 4:2:0 non-contiguous 3-planar, Y/Cr/Cb",
+ .pixelformat = V4L2_PIX_FMT_YVU420M,
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_3P,
+ .bitperpixel = { 8, 2, 2 },
+ .num_planes = 3,
+ .num_comp = 3,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .cfg_val = SCALER_CFG_FMT_YUYV,
+ .bitperpixel = { 16 },
+ .num_planes = 1,
+ .num_comp = 1,
+ .h_shift = 1,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .cfg_val = SCALER_CFG_FMT_UYVY,
+ .bitperpixel = { 16 },
+ .num_planes = 1,
+ .num_comp = 1,
+ .h_shift = 1,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .pixelformat = V4L2_PIX_FMT_YVYU,
+ .cfg_val = SCALER_CFG_FMT_YVYU,
+ .bitperpixel = { 16 },
+ .num_planes = 1,
+ .num_comp = 1,
+ .h_shift = 1,
+ }, {
+ .name = "YUV 4:2:2 contiguous 2-planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .cfg_val = SCALER_CFG_FMT_YCBCR422_2P,
+ .bitperpixel = { 16 },
+ .num_planes = 1,
+ .num_comp = 2,
+ .h_shift = 1,
+ }, {
+ .name = "YUV 4:2:2 contiguous 2-planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV61,
+ .cfg_val = SCALER_CFG_FMT_YCRCB422_2P,
+ .bitperpixel = { 16 },
+ .num_planes = 1,
+ .num_comp = 2,
+ .h_shift = 1,
+ }, {
+ .name = "YUV 4:2:2 contiguous 3-planar, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ .cfg_val = SCALER_CFG_FMT_YCBCR422_3P,
+ .bitperpixel = { 16 },
+ .num_planes = 1,
+ .num_comp = 3,
+ .h_shift = 1,
+ }, {
+ .name = "YUV 4:2:0 contiguous Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV12N,
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_2P,
+ .bitperpixel = { 12 },
+ .num_planes = 1,
+ .num_comp = 2,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YUV 4:2:0 contiguous Y/CbCr 10-bit",
+ .pixelformat = V4L2_PIX_FMT_NV12N_10B,
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_2P,
+ .bitperpixel = { 15 },
+ .num_planes = 1,
+ .num_comp = 2,
+ .h_shift = 1,
+ .v_shift = 1,
+ }, {
+ .name = "YUV 4:2:0 contiguous 3-planar Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV420N,
+ .cfg_val = SCALER_CFG_FMT_YCBCR420_3P,
+ .bitperpixel = { 12 },
+ .num_planes = 1,
+ .num_comp = 3,
+ .h_shift = 1,
+ .v_shift = 1,
+ },
+};
+
+#define SCALE_RATIO_CONST(x, y) (u32)((1048576ULL * (x)) / (y))
+
+#define SCALE_RATIO(x, y) \
+({ \
+ u32 ratio; \
+ if (__builtin_constant_p(x) && __builtin_constant_p(y)) {\
+ ratio = SCALE_RATIO_CONST(x, y); \
+ } else if ((x) < 2048) { \
+ ratio = (u32)((1048576UL * (x)) / (y)); \
+ } else { \
+ unsigned long long dividend = 1048576ULL; \
+ dividend *= x; \
+ do_div(dividend, y); \
+ ratio = (u32)dividend; \
+ } \
+ ratio; \
+})
+
+#define SCALE_RATIO_FRACT(x, y, z) (u32)(((x << 20) + SCALER_FRACT_VAL(y)) / z)
+
+/* must specify in revers order of SCALER_VERSION(xyz) */
+static const u32 sc_version_table[][2] = {
+ { 0x80060007, SCALER_VERSION(4, 2, 0) }, /* SC_BI */
+ { 0xA0000013, SCALER_VERSION(4, 0, 1) },
+ { 0xA0000012, SCALER_VERSION(4, 0, 1) },
+ { 0x80050007, SCALER_VERSION(4, 0, 0) }, /* SC_POLY */
+ { 0xA000000B, SCALER_VERSION(3, 0, 2) },
+ { 0xA000000A, SCALER_VERSION(3, 0, 2) },
+ { 0x8000006D, SCALER_VERSION(3, 0, 1) },
+ { 0x80000068, SCALER_VERSION(3, 0, 0) },
+ { 0x8004000C, SCALER_VERSION(2, 2, 0) },
+ { 0x80000008, SCALER_VERSION(2, 1, 1) },
+ { 0x80000048, SCALER_VERSION(2, 1, 0) },
+ { 0x80010000, SCALER_VERSION(2, 0, 1) },
+ { 0x80000047, SCALER_VERSION(2, 0, 0) },
+};
+
+static const struct sc_variant sc_variant[] = {
+ {
+ .limit_input = {
+ .min_w = 16,
+ .min_h = 16,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .limit_output = {
+ .min_w = 4,
+ .min_h = 4,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .version = SCALER_VERSION(4, 2, 0),
+ .sc_up_max = SCALE_RATIO_CONST(1, 8),
+ .sc_down_min = SCALE_RATIO_CONST(4, 1),
+ .sc_down_swmin = SCALE_RATIO_CONST(16, 1),
+ .blending = 1,
+ .prescale = 0,
+ .ratio_20bit = 1,
+ .initphase = 1,
+ }, {
+ .limit_input = {
+ .min_w = 16,
+ .min_h = 16,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .limit_output = {
+ .min_w = 4,
+ .min_h = 4,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .version = SCALER_VERSION(4, 0, 1),
+ .sc_up_max = SCALE_RATIO_CONST(1, 8),
+ .sc_down_min = SCALE_RATIO_CONST(4, 1),
+ .sc_down_swmin = SCALE_RATIO_CONST(16, 1),
+ .blending = 0,
+ .prescale = 0,
+ .ratio_20bit = 1,
+ .initphase = 1,
+ }, {
+ .limit_input = {
+ .min_w = 16,
+ .min_h = 16,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .limit_output = {
+ .min_w = 4,
+ .min_h = 4,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .version = SCALER_VERSION(4, 0, 0),
+ .sc_up_max = SCALE_RATIO_CONST(1, 8),
+ .sc_down_min = SCALE_RATIO_CONST(4, 1),
+ .sc_down_swmin = SCALE_RATIO_CONST(16, 1),
+ .blending = 0,
+ .prescale = 0,
+ .ratio_20bit = 0,
+ .initphase = 0,
+ }, {
+ .limit_input = {
+ .min_w = 16,
+ .min_h = 16,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .limit_output = {
+ .min_w = 4,
+ .min_h = 4,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .version = SCALER_VERSION(3, 0, 0),
+ .sc_up_max = SCALE_RATIO_CONST(1, 8),
+ .sc_down_min = SCALE_RATIO_CONST(16, 1),
+ .sc_down_swmin = SCALE_RATIO_CONST(16, 1),
+ .blending = 0,
+ .prescale = 1,
+ .ratio_20bit = 1,
+ .initphase = 1,
+ }, {
+ .limit_input = {
+ .min_w = 16,
+ .min_h = 16,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .limit_output = {
+ .min_w = 4,
+ .min_h = 4,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .version = SCALER_VERSION(2, 2, 0),
+ .sc_up_max = SCALE_RATIO_CONST(1, 8),
+ .sc_down_min = SCALE_RATIO_CONST(4, 1),
+ .sc_down_swmin = SCALE_RATIO_CONST(16, 1),
+ .blending = 1,
+ .prescale = 0,
+ .ratio_20bit = 0,
+ .initphase = 0,
+ }, {
+ .limit_input = {
+ .min_w = 16,
+ .min_h = 16,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .limit_output = {
+ .min_w = 4,
+ .min_h = 4,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .version = SCALER_VERSION(2, 0, 1),
+ .sc_up_max = SCALE_RATIO_CONST(1, 8),
+ .sc_down_min = SCALE_RATIO_CONST(4, 1),
+ .sc_down_swmin = SCALE_RATIO_CONST(16, 1),
+ .blending = 0,
+ .prescale = 0,
+ .ratio_20bit = 0,
+ .initphase = 0,
+ }, {
+ .limit_input = {
+ .min_w = 16,
+ .min_h = 16,
+ .max_w = 8192,
+ .max_h = 8192,
+ },
+ .limit_output = {
+ .min_w = 4,
+ .min_h = 4,
+ .max_w = 4096,
+ .max_h = 4096,
+ },
+ .version = SCALER_VERSION(2, 0, 0),
+ .sc_up_max = SCALE_RATIO_CONST(1, 8),
+ .sc_down_min = SCALE_RATIO_CONST(4, 1),
+ .sc_down_swmin = SCALE_RATIO_CONST(16, 1),
+ .blending = 0,
+ .prescale = 0,
+ .ratio_20bit = 0,
+ .initphase = 0,
+ },
+};
+
+/* Find the matches format */
+static const struct sc_fmt *sc_find_format(struct sc_dev *sc,
+ u32 pixfmt, bool output_buf)
+{
+ const struct sc_fmt *sc_fmt;
+ unsigned long i;
+
+ for (i = 0; i < ARRAY_SIZE(sc_formats); ++i) {
+ sc_fmt = &sc_formats[i];
+ if (sc_fmt->pixelformat == pixfmt) {
+ if (!!(sc_fmt->cfg_val & SCALER_CFG_TILE_EN)) {
+ /* tile mode is not supported from v3.0.0 */
+ if (sc->version >= SCALER_VERSION(3, 0, 0))
+ return NULL;
+ if (!output_buf)
+ return NULL;
+ }
+ /* bytes swap is not supported under v2.1.0 */
+ if (!!(sc_fmt->cfg_val & SCALER_CFG_SWAP_MASK) &&
+ (sc->version < SCALER_VERSION(2, 1, 0)))
+ return NULL;
+ return &sc_formats[i];
+ }
+ }
+
+ return NULL;
+}
+
+static int sc_v4l2_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, MODULE_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, MODULE_NAME, sizeof(cap->card) - 1);
+
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ cap->capabilities |= V4L2_CAP_DEVICE_CAPS;
+ cap->device_caps = 0x0100 ;
+
+ return 0;
+}
+
+static int sc_v4l2_enum_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ const struct sc_fmt *sc_fmt;
+
+ if (f->index >= ARRAY_SIZE(sc_formats))
+ return -EINVAL;
+
+ sc_fmt = &sc_formats[f->index];
+ strncpy(f->description, sc_fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = sc_fmt->pixelformat;
+
+ return 0;
+}
+
+static int sc_v4l2_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ const struct sc_fmt *sc_fmt;
+ struct sc_frame *frame;
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ int i;
+
+ frame = ctx_get_frame(ctx, f->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ sc_fmt = frame->sc_fmt;
+
+ pixm->width = frame->width;
+ pixm->height = frame->height;
+ pixm->pixelformat = frame->pixelformat;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->num_planes = frame->sc_fmt->num_planes;
+ pixm->colorspace = 0;
+
+ for (i = 0; i < pixm->num_planes; ++i) {
+ pixm->plane_fmt[i].bytesperline = (pixm->width *
+ sc_fmt->bitperpixel[i]) >> 3;
+ if (sc_fmt_is_ayv12(sc_fmt->pixelformat)) {
+ unsigned int y_size, c_span;
+ y_size = pixm->width * pixm->height;
+ c_span = ALIGN(pixm->width >> 1, 16);
+ pixm->plane_fmt[i].sizeimage =
+ y_size + (c_span * pixm->height >> 1) * 2;
+ } else {
+ pixm->plane_fmt[i].sizeimage =
+ pixm->plane_fmt[i].bytesperline * pixm->height;
+ }
+
+ v4l2_dbg(1, sc_log_level, &ctx->sc_dev->m2m.v4l2_dev,
+ "[%d] plane: bytesperline %d, sizeimage %d\n",
+ i, pixm->plane_fmt[i].bytesperline,
+ pixm->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static int sc_v4l2_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ const struct sc_fmt *sc_fmt;
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ const struct sc_size_limit *limit;
+ int i;
+ int h_align = 0;
+ int w_align = 0;
+
+ if (!V4L2_TYPE_IS_MULTIPLANAR(f->type)) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "not supported v4l2 type\n");
+ return -EINVAL;
+ }
+
+ sc_fmt = sc_find_format(ctx->sc_dev, f->fmt.pix_mp.pixelformat, V4L2_TYPE_IS_OUTPUT(f->type));
+ if (!sc_fmt) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "not supported format type\n");
+ return -EINVAL;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ limit = &ctx->sc_dev->variant->limit_input;
+ else
+ limit = &ctx->sc_dev->variant->limit_output;
+
+ /*
+ * Y_SPAN - should even in interleaved YCbCr422
+ * C_SPAN - should even in YCbCr420 and YCbCr422
+ */
+ if (sc_fmt_is_yuv422(sc_fmt->pixelformat) ||
+ sc_fmt_is_yuv420(sc_fmt->pixelformat))
+ w_align = 1;
+
+ /* Bound an image to have width and height in limit */
+ v4l_bound_align_image(&pixm->width, limit->min_w, limit->max_w,
+ w_align, &pixm->height, limit->min_h,
+ limit->max_h, h_align, 0);
+
+ pixm->num_planes = sc_fmt->num_planes;
+ pixm->colorspace = 0;
+
+ for (i = 0; i < pixm->num_planes; ++i) {
+ pixm->plane_fmt[i].bytesperline = (pixm->width *
+ sc_fmt->bitperpixel[i]) >> 3;
+ if (sc_fmt_is_ayv12(sc_fmt->pixelformat)) {
+ unsigned int y_size, c_span;
+ y_size = pixm->width * pixm->height;
+ c_span = ALIGN(pixm->width >> 1, 16);
+ pixm->plane_fmt[i].sizeimage =
+ y_size + (c_span * pixm->height >> 1) * 2;
+ } else {
+ pixm->plane_fmt[i].sizeimage =
+ pixm->plane_fmt[i].bytesperline * pixm->height;
+ }
+
+ v4l2_dbg(1, sc_log_level, &ctx->sc_dev->m2m.v4l2_dev,
+ "[%d] plane: bytesperline %d, sizeimage %d\n",
+ i, pixm->plane_fmt[i].bytesperline,
+ pixm->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static int sc_v4l2_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ struct sc_frame *frame;
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ const struct sc_size_limit *limitout =
+ &ctx->sc_dev->variant->limit_input;
+ const struct sc_size_limit *limitcap =
+ &ctx->sc_dev->variant->limit_output;
+ int i, ret = 0;
+
+ if (vb2_is_streaming(vq)) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev, "device is busy\n");
+ return -EBUSY;
+ }
+
+ ret = sc_v4l2_try_fmt_mplane(file, fh, f);
+ if (ret < 0)
+ return ret;
+
+ frame = ctx_get_frame(ctx, f->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ set_bit(CTX_PARAMS, &ctx->flags);
+
+ frame->sc_fmt = sc_find_format(ctx->sc_dev, f->fmt.pix_mp.pixelformat, V4L2_TYPE_IS_OUTPUT(f->type));
+ if (!frame->sc_fmt) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "not supported format values\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < frame->sc_fmt->num_planes; i++)
+ frame->bytesused[i] = pixm->plane_fmt[i].sizeimage;
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type) &&
+ ((pixm->width > limitout->max_w) ||
+ (pixm->height > limitout->max_h))) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "%dx%d of source image is not supported: too large\n",
+ pixm->width, pixm->height);
+ return -EINVAL;
+ }
+
+ if (!V4L2_TYPE_IS_OUTPUT(f->type) &&
+ ((pixm->width > limitcap->max_w) ||
+ (pixm->height > limitcap->max_h))) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "%dx%d of target image is not supported: too large\n",
+ pixm->width, pixm->height);
+ return -EINVAL;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(f->type) &&
+ ((pixm->width < limitout->min_w) ||
+ (pixm->height < limitout->min_h))) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "%dx%d of source image is not supported: too small\n",
+ pixm->width, pixm->height);
+ return -EINVAL;
+ }
+
+ if (!V4L2_TYPE_IS_OUTPUT(f->type) &&
+ ((pixm->width < limitcap->min_w) ||
+ (pixm->height < limitcap->min_h))) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "%dx%d of target image is not supported: too small\n",
+ pixm->width, pixm->height);
+ return -EINVAL;
+ }
+
+ if (pixm->flags == V4L2_PIX_FMT_FLAG_PREMUL_ALPHA &&
+ ctx->sc_dev->version != SCALER_VERSION(4, 0, 0))
+ frame->pre_multi = true;
+ else
+ frame->pre_multi = false;
+
+ frame->width = pixm->width;
+ frame->height = pixm->height;
+ frame->pixelformat = pixm->pixelformat;
+
+ frame->crop.width = pixm->width;
+ frame->crop.height = pixm->height;
+
+ return 0;
+}
+
+static int sc_v4l2_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int sc_v4l2_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int sc_v4l2_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int sc_v4l2_dqbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int sc_v4l2_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int sc_v4l2_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int sc_v4l2_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ struct sc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = frame->width;
+ cr->bounds.height = frame->height;
+ cr->defrect = cr->bounds;
+
+ return 0;
+}
+
+static int sc_v4l2_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ struct sc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->c.left = SC_CROP_MAKE_FR_VAL(frame->crop.left, ctx->init_phase.yh);
+ cr->c.top = SC_CROP_MAKE_FR_VAL(frame->crop.top, ctx->init_phase.yv);
+ cr->c.width = SC_CROP_MAKE_FR_VAL(frame->crop.width, ctx->init_phase.w);
+ cr->c.height = SC_CROP_MAKE_FR_VAL(frame->crop.height, ctx->init_phase.h);
+
+ return 0;
+}
+
+static int sc_get_fract_val(struct v4l2_rect *rect, struct sc_ctx *ctx)
+{
+ ctx->init_phase.yh = SC_CROP_GET_FR_VAL(rect->left);
+ if (ctx->init_phase.yh)
+ rect->left &= SC_CROP_INT_MASK;
+
+ ctx->init_phase.yv = SC_CROP_GET_FR_VAL(rect->top);
+ if (ctx->init_phase.yv)
+ rect->top &= SC_CROP_INT_MASK;
+
+ ctx->init_phase.w = SC_CROP_GET_FR_VAL(rect->width);
+ if (ctx->init_phase.w) {
+ rect->width &= SC_CROP_INT_MASK;
+ rect->width += 1;
+ }
+
+ ctx->init_phase.h = SC_CROP_GET_FR_VAL(rect->height);
+ if (ctx->init_phase.h) {
+ rect->height &= SC_CROP_INT_MASK;
+ rect->height += 1;
+ }
+
+ if (sc_fmt_is_yuv420(ctx->s_frame.sc_fmt->pixelformat)) {
+ ctx->init_phase.ch = ctx->init_phase.yh / 2;
+ ctx->init_phase.cv = ctx->init_phase.yv / 2;
+ } else {
+ ctx->init_phase.ch = ctx->init_phase.yh;
+ ctx->init_phase.cv = ctx->init_phase.yv;
+ }
+
+ if ((ctx->init_phase.yh || ctx->init_phase.yv || ctx->init_phase.w
+ || ctx->init_phase.h) &&
+ (!(sc_fmt_is_yuv420(ctx->s_frame.sc_fmt->pixelformat) ||
+ sc_fmt_is_rgb888(ctx->s_frame.sc_fmt->pixelformat)))) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "%s format on real number is not supported",
+ ctx->s_frame.sc_fmt->name);
+ return -EINVAL;
+ }
+
+ v4l2_dbg(1, sc_log_level, &ctx->sc_dev->m2m.v4l2_dev,
+ "src crop position (x,y,w,h) = \
+ (%d.%d, %d.%d, %d.%d, %d.%d) %d, %d\n",
+ rect->left, ctx->init_phase.yh,
+ rect->top, ctx->init_phase.yv,
+ rect->width, ctx->init_phase.w,
+ rect->height, ctx->init_phase.h,
+ ctx->init_phase.ch, ctx->init_phase.cv);
+ return 0;
+}
+
+static int sc_v4l2_s_crop(struct file *file, void *fh,
+ const struct v4l2_crop *cr)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(fh);
+ struct sc_dev *sc = ctx->sc_dev;
+ struct sc_frame *frame;
+ struct v4l2_rect rect = cr->c;
+ const struct sc_size_limit *limit = NULL;
+ int x_align = 0, y_align = 0;
+ int w_align = 0;
+ int h_align = 0;
+ int ret = 0;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ if (!test_bit(CTX_PARAMS, &ctx->flags)) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "color format is not set\n");
+ return -EINVAL;
+ }
+
+ if (cr->c.left < 0 || cr->c.top < 0) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "crop position is negative\n");
+ return -EINVAL;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(cr->type)) {
+ ret = sc_get_fract_val(&rect, ctx);
+ if (ret < 0)
+ return ret;
+ limit = &sc->variant->limit_input;
+ set_bit(CTX_SRC_FMT, &ctx->flags);
+ } else {
+ limit = &sc->variant->limit_output;
+ set_bit(CTX_DST_FMT, &ctx->flags);
+ }
+
+ if (sc_fmt_is_yuv422(frame->sc_fmt->pixelformat)) {
+ w_align = 1;
+ } else if (sc_fmt_is_yuv420(frame->sc_fmt->pixelformat)) {
+ w_align = 1;
+ h_align = 1;
+ }
+
+ /* Bound an image to have crop width and height in limit */
+ v4l_bound_align_image(&rect.width, limit->min_w, limit->max_w,
+ w_align, &rect.height, limit->min_h,
+ limit->max_h, h_align, 0);
+
+ if (V4L2_TYPE_IS_OUTPUT(cr->type)) {
+ if (sc_fmt_is_yuv422(frame->sc_fmt->pixelformat))
+ x_align = 1;
+ } else {
+ if (sc_fmt_is_yuv422(frame->sc_fmt->pixelformat)) {
+ x_align = 1;
+ } else if (sc_fmt_is_yuv420(frame->sc_fmt->pixelformat)) {
+ x_align = 1;
+ y_align = 1;
+ }
+ }
+
+ /* Bound an image to have crop position in limit */
+ v4l_bound_align_image(&rect.left, 0, frame->width - rect.width,
+ x_align, &rect.top, 0, frame->height - rect.height,
+ y_align, 0);
+
+ if ((rect.height > frame->height) || (rect.top > frame->height) ||
+ (rect.width > frame->width) || (rect.left > frame->width)) {
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "Out of crop range: (%d,%d,%d,%d) from %dx%d\n",
+ rect.left, rect.top, rect.width, rect.height,
+ frame->width, frame->height);
+ return -EINVAL;
+ }
+
+ frame->crop.top = rect.top;
+ frame->crop.left = rect.left;
+ frame->crop.height = rect.height;
+ frame->crop.width = rect.width;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops sc_v4l2_ioctl_ops = {
+ .vidioc_querycap = sc_v4l2_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = sc_v4l2_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = sc_v4l2_enum_fmt_mplane,
+
+ .vidioc_g_fmt_vid_cap_mplane = sc_v4l2_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = sc_v4l2_g_fmt_mplane,
+
+ .vidioc_try_fmt_vid_cap_mplane = sc_v4l2_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = sc_v4l2_try_fmt_mplane,
+
+ .vidioc_s_fmt_vid_cap_mplane = sc_v4l2_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = sc_v4l2_s_fmt_mplane,
+
+ .vidioc_reqbufs = sc_v4l2_reqbufs,
+ .vidioc_querybuf = sc_v4l2_querybuf,
+
+ .vidioc_qbuf = sc_v4l2_qbuf,
+ .vidioc_dqbuf = sc_v4l2_dqbuf,
+
+ .vidioc_streamon = sc_v4l2_streamon,
+ .vidioc_streamoff = sc_v4l2_streamoff,
+
+ .vidioc_g_crop = sc_v4l2_g_crop,
+ .vidioc_s_crop = sc_v4l2_s_crop,
+ .vidioc_cropcap = sc_v4l2_cropcap
+};
+
+static int sc_ctx_stop_req(struct sc_ctx *ctx)
+{
+ struct sc_ctx *curr_ctx;
+ struct sc_dev *sc = ctx->sc_dev;
+ int ret = 0;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(sc->m2m.m2m_dev);
+ if (!test_bit(CTX_RUN, &ctx->flags) || (curr_ctx != ctx))
+ return 0;
+
+ set_bit(CTX_ABORT, &ctx->flags);
+
+ ret = wait_event_timeout(sc->wait,
+ !test_bit(CTX_RUN, &ctx->flags), SC_TIMEOUT);
+
+ /* TODO: How to handle case of timeout event */
+ if (ret == 0) {
+ dev_err(sc->dev, "device failed to stop request\n");
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+static void sc_calc_intbufsize(struct sc_dev *sc, struct sc_int_frame *int_frame)
+{
+ struct sc_frame *frame = &int_frame->frame;
+ unsigned int pixsize, bytesize;
+
+ pixsize = frame->width * frame->height;
+ bytesize = (pixsize * frame->sc_fmt->bitperpixel[0]) >> 3;
+
+ switch (frame->sc_fmt->num_comp) {
+ case 1:
+ frame->addr.ysize = bytesize;
+ break;
+ case 2:
+ if (frame->sc_fmt->num_planes == 1) {
+ frame->addr.ysize = pixsize;
+ frame->addr.cbsize = bytesize - pixsize;
+ } else if (frame->sc_fmt->num_planes == 2) {
+ frame->addr.ysize =
+ (pixsize * frame->sc_fmt->bitperpixel[0]) / 8;
+ frame->addr.cbsize =
+ (pixsize * frame->sc_fmt->bitperpixel[1]) / 8;
+ }
+ break;
+ case 3:
+ if (frame->sc_fmt->num_planes == 1) {
+ if (sc_fmt_is_ayv12(frame->sc_fmt->pixelformat)) {
+ unsigned int c_span;
+ c_span = ALIGN(frame->width >> 1, 16);
+ frame->addr.ysize = pixsize;
+ frame->addr.cbsize = c_span * (frame->height >> 1);
+ frame->addr.crsize = frame->addr.cbsize;
+ } else {
+ frame->addr.ysize = pixsize;
+ frame->addr.cbsize = (bytesize - pixsize) / 2;
+ frame->addr.crsize = frame->addr.cbsize;
+ }
+ } else if (frame->sc_fmt->num_planes == 3) {
+ frame->addr.ysize =
+ (pixsize * frame->sc_fmt->bitperpixel[0]) / 8;
+ frame->addr.cbsize =
+ (pixsize * frame->sc_fmt->bitperpixel[1]) / 8;
+ frame->addr.crsize =
+ (pixsize * frame->sc_fmt->bitperpixel[2]) / 8;
+ } else {
+ dev_err(sc->dev, "Please check the num of comp\n");
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ memcpy(&int_frame->src_addr, &frame->addr, sizeof(int_frame->src_addr));
+ memcpy(&int_frame->dst_addr, &frame->addr, sizeof(int_frame->dst_addr));
+}
+
+static void free_intermediate_frame(struct sc_ctx *ctx)
+{
+
+ if (ctx->i_frame == NULL)
+ return;
+
+ if (!ctx->i_frame->handle[0])
+ return;
+
+ ion_free(ctx->i_frame->client, ctx->i_frame->handle[0]);
+
+ if (ctx->i_frame->handle[1])
+ ion_free(ctx->i_frame->client, ctx->i_frame->handle[1]);
+ if (ctx->i_frame->handle[2])
+ ion_free(ctx->i_frame->client, ctx->i_frame->handle[2]);
+
+ if (ctx->i_frame->src_addr.y)
+ iovmm_unmap(ctx->sc_dev->dev, ctx->i_frame->src_addr.y);
+ if (ctx->i_frame->src_addr.cb)
+ iovmm_unmap(ctx->sc_dev->dev, ctx->i_frame->src_addr.cb);
+ if (ctx->i_frame->src_addr.cr)
+ iovmm_unmap(ctx->sc_dev->dev, ctx->i_frame->src_addr.cr);
+ if (ctx->i_frame->dst_addr.y)
+ iovmm_unmap(ctx->sc_dev->dev, ctx->i_frame->dst_addr.y);
+ if (ctx->i_frame->dst_addr.cb)
+ iovmm_unmap(ctx->sc_dev->dev, ctx->i_frame->dst_addr.cb);
+ if (ctx->i_frame->dst_addr.cr)
+ iovmm_unmap(ctx->sc_dev->dev, ctx->i_frame->dst_addr.cr);
+
+ memset(&ctx->i_frame->handle, 0, sizeof(struct ion_handle *) * 3);
+ memset(&ctx->i_frame->src_addr, 0, sizeof(ctx->i_frame->src_addr));
+ memset(&ctx->i_frame->dst_addr, 0, sizeof(ctx->i_frame->dst_addr));
+}
+
+static void destroy_intermediate_frame(struct sc_ctx *ctx)
+{
+ if (ctx->i_frame) {
+ free_intermediate_frame(ctx);
+ ion_client_destroy(ctx->i_frame->client);
+ kfree(ctx->i_frame);
+ ctx->i_frame = NULL;
+ clear_bit(CTX_INT_FRAME, &ctx->flags);
+ }
+}
+
+static bool initialize_initermediate_frame(struct sc_ctx *ctx)
+{
+ struct sc_frame *frame;
+ struct sc_dev *sc = ctx->sc_dev;
+ struct sg_table *sgt;
+
+ frame = &ctx->i_frame->frame;
+
+ frame->crop.top = 0;
+ frame->crop.left = 0;
+ frame->width = frame->crop.width;
+ frame->height = frame->crop.height;
+
+ /*
+ * Check if intermeidate frame is already initialized by a previous
+ * frame. If it is already initialized, intermediate buffer is no longer
+ * needed to be initialized because image setting is never changed
+ * while streaming continues.
+ */
+ if (ctx->i_frame->handle[0])
+ return true;
+
+ sc_calc_intbufsize(sc, ctx->i_frame);
+
+ if (frame->addr.ysize) {
+ ctx->i_frame->handle[0] = ion_alloc(ctx->i_frame->client,
+ frame->addr.ysize, 0, ION_HEAP_SYSTEM_MASK, 0);
+ if (IS_ERR(ctx->i_frame->handle[0])) {
+ dev_err(sc->dev,
+ "Failed to allocate intermediate y buffer (err %ld)",
+ PTR_ERR(ctx->i_frame->handle[0]));
+ ctx->i_frame->handle[0] = NULL;
+ goto err_ion_alloc;
+ }
+
+ sgt = ion_sg_table(ctx->i_frame->client,
+ ctx->i_frame->handle[0]);
+ if (IS_ERR(sgt)) {
+ dev_err(sc->dev,
+ "Failed to get sg_table from ion_handle of y (err %ld)",
+ PTR_ERR(sgt));
+ goto err_ion_alloc;
+ }
+
+ ctx->i_frame->src_addr.y = iovmm_map(sc->dev, sgt->sgl, 0,
+ frame->addr.ysize, DMA_TO_DEVICE, 0);
+ if (IS_ERR_VALUE(ctx->i_frame->src_addr.y)) {
+ dev_err(sc->dev,
+ "Failed to allocate iova of y (err %pa)",
+ &ctx->i_frame->src_addr.y);
+ ctx->i_frame->src_addr.y = 0;
+ goto err_ion_alloc;
+ }
+
+ ctx->i_frame->dst_addr.y = iovmm_map(sc->dev, sgt->sgl, 0,
+ frame->addr.ysize, DMA_FROM_DEVICE, 0);
+ if (IS_ERR_VALUE(ctx->i_frame->dst_addr.y)) {
+ dev_err(sc->dev,
+ "Failed to allocate iova of y (err %pa)",
+ &ctx->i_frame->dst_addr.y);
+ ctx->i_frame->dst_addr.y = 0;
+ goto err_ion_alloc;
+ }
+
+ frame->addr.y = ctx->i_frame->dst_addr.y;
+ }
+
+ if (frame->addr.cbsize) {
+ ctx->i_frame->handle[1] = ion_alloc(ctx->i_frame->client,
+ frame->addr.cbsize, 0, ION_HEAP_SYSTEM_MASK, 0);
+ if (IS_ERR(ctx->i_frame->handle[1])) {
+ dev_err(sc->dev,
+ "Failed to allocate intermediate cb buffer (err %ld)",
+ PTR_ERR(ctx->i_frame->handle[1]));
+ ctx->i_frame->handle[1] = NULL;
+ goto err_ion_alloc;
+ }
+
+ sgt = ion_sg_table(ctx->i_frame->client,
+ ctx->i_frame->handle[1]);
+ if (IS_ERR(sgt)) {
+ dev_err(sc->dev,
+ "Failed to get sg_table from ion_handle of cb(err %ld)",
+ PTR_ERR(sgt));
+ goto err_ion_alloc;
+ }
+
+ ctx->i_frame->src_addr.cb = iovmm_map(sc->dev, sgt->sgl, 0,
+ frame->addr.cbsize, DMA_TO_DEVICE, 0);
+ if (IS_ERR_VALUE(ctx->i_frame->src_addr.cb)) {
+ dev_err(sc->dev,
+ "Failed to allocate iova of cb (err %pa)",
+ &ctx->i_frame->src_addr.cb);
+ ctx->i_frame->src_addr.cb = 0;
+ goto err_ion_alloc;
+ }
+
+ ctx->i_frame->dst_addr.cb = iovmm_map(sc->dev, sgt->sgl, 0,
+ frame->addr.cbsize, DMA_FROM_DEVICE, 0);
+ if (IS_ERR_VALUE(ctx->i_frame->dst_addr.cb)) {
+ dev_err(sc->dev,
+ "Failed to allocate iova of cb (err %pa)",
+ &ctx->i_frame->dst_addr.cb);
+ ctx->i_frame->dst_addr.cb = 0;
+ goto err_ion_alloc;
+ }
+
+ frame->addr.cb = ctx->i_frame->dst_addr.cb;
+ }
+
+ if (frame->addr.crsize) {
+ ctx->i_frame->handle[2] = ion_alloc(ctx->i_frame->client,
+ frame->addr.crsize, 0, ION_HEAP_SYSTEM_MASK, 0);
+ if (IS_ERR(ctx->i_frame->handle[2])) {
+ dev_err(sc->dev,
+ "Failed to allocate intermediate cr buffer (err %ld)",
+ PTR_ERR(ctx->i_frame->handle[2]));
+ ctx->i_frame->handle[2] = NULL;
+ goto err_ion_alloc;
+ }
+
+ sgt = ion_sg_table(ctx->i_frame->client,
+ ctx->i_frame->handle[2]);
+ if (IS_ERR(sgt)) {
+ dev_err(sc->dev,
+ "Failed to get sg_table from ion_handle of cr(err %ld)",
+ PTR_ERR(sgt));
+ goto err_ion_alloc;
+ }
+
+ ctx->i_frame->src_addr.cr = iovmm_map(sc->dev, sgt->sgl, 0,
+ frame->addr.crsize, DMA_TO_DEVICE, 0);
+ if (IS_ERR_VALUE(ctx->i_frame->src_addr.cr)) {
+ dev_err(sc->dev,
+ "Failed to allocate iova of cr (err %pa)",
+ &ctx->i_frame->src_addr.cr);
+ ctx->i_frame->src_addr.cr = 0;
+ goto err_ion_alloc;
+ }
+
+ ctx->i_frame->dst_addr.cr = iovmm_map(sc->dev, sgt->sgl, 0,
+ frame->addr.crsize, DMA_FROM_DEVICE, 0);
+ if (IS_ERR_VALUE(ctx->i_frame->dst_addr.cr)) {
+ dev_err(sc->dev,
+ "Failed to allocate iova of cr (err %pa)",
+ &ctx->i_frame->dst_addr.cr);
+ ctx->i_frame->dst_addr.cr = 0;
+ goto err_ion_alloc;
+ }
+
+ frame->addr.cr = ctx->i_frame->dst_addr.cr;
+ }
+
+ return true;
+
+err_ion_alloc:
+ free_intermediate_frame(ctx);
+ return false;
+}
+
+static bool allocate_intermediate_frame(struct sc_ctx *ctx)
+{
+ if (ctx->i_frame == NULL) {
+ ctx->i_frame = kzalloc(sizeof(*ctx->i_frame), GFP_KERNEL);
+ if (ctx->i_frame == NULL) {
+ dev_err(ctx->sc_dev->dev,
+ "Failed to allocate intermediate frame\n");
+ return false;
+ }
+
+ ctx->i_frame->client = exynos_ion_client_create("scaler-int");
+ if (IS_ERR(ctx->i_frame->client)) {
+ dev_err(ctx->sc_dev->dev,
+ "Failed to create ION client for int.buf.(err %ld)\n",
+ PTR_ERR(ctx->i_frame->client));
+ ctx->i_frame->client = NULL;
+ kfree(ctx->i_frame);
+ ctx->i_frame = NULL;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Zoom-out range: (x1/4, x1/16] */
+static int sc_prepare_2nd_scaling(struct sc_ctx *ctx,
+ __s32 src_width, __s32 src_height,
+ unsigned int *h_ratio, unsigned int *v_ratio)
+{
+ struct sc_dev *sc = ctx->sc_dev;
+ struct v4l2_rect crop = ctx->d_frame.crop;
+ const struct sc_size_limit *limit;
+ unsigned int halign = 0, walign = 0;
+ __u32 pixfmt;
+ const struct sc_fmt *target_fmt = ctx->d_frame.sc_fmt;
+
+ if (!allocate_intermediate_frame(ctx))
+ return -ENOMEM;
+
+ limit = &sc->variant->limit_input;
+ if (*v_ratio > SCALE_RATIO_CONST(4, 1)) {
+ crop.height = ((src_height + 7) / 8) * 2;
+ if (crop.height < limit->min_h) {
+ if (SCALE_RATIO(limit->min_h,
+ ctx->d_frame.crop.height) >
+ SCALE_RATIO_CONST(4, 1)) {
+ dev_err(sc->dev,
+ "Failed height scale down %d -> %d\n",
+ src_height,
+ ctx->d_frame.crop.height);
+
+ free_intermediate_frame(ctx);
+ return -EINVAL;
+ }
+
+ crop.height = limit->min_h;
+ }
+ }
+
+ if (*h_ratio > SCALE_RATIO_CONST(4, 1)) {
+ crop.width = ((src_width + 7) / 8) * 2;
+ if (crop.width < limit->min_w) {
+ if (SCALE_RATIO(limit->min_w,
+ ctx->d_frame.crop.width) >
+ SCALE_RATIO_CONST(4, 1)) {
+ dev_err(sc->dev,
+ "Failed width scale down %d -> %d\n",
+ src_width,
+ ctx->d_frame.crop.width);
+
+ free_intermediate_frame(ctx);
+ return -EINVAL;
+ }
+
+ crop.width = limit->min_w;
+ }
+ }
+
+ pixfmt = target_fmt->pixelformat;
+
+ if (sc_fmt_is_yuv422(pixfmt)) {
+ walign = 1;
+ } else if (sc_fmt_is_yuv420(pixfmt)) {
+ walign = 1;
+ halign = 1;
+ }
+
+ limit = &sc->variant->limit_output;
+ v4l_bound_align_image(&crop.width, limit->min_w, limit->max_w,
+ walign, &crop.height, limit->min_h,
+ limit->max_h, halign, 0);
+
+ *h_ratio = SCALE_RATIO(src_width, crop.width);
+ *v_ratio = SCALE_RATIO(src_height, crop.height);
+
+ if ((ctx->i_frame->frame.sc_fmt != ctx->d_frame.sc_fmt) ||
+ memcmp(&crop, &ctx->i_frame->frame.crop, sizeof(crop))) {
+ memcpy(&ctx->i_frame->frame, &ctx->d_frame,
+ sizeof(ctx->d_frame));
+ memcpy(&ctx->i_frame->frame.crop, &crop, sizeof(crop));
+ free_intermediate_frame(ctx);
+ if (!initialize_initermediate_frame(ctx)) {
+ free_intermediate_frame(ctx);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static struct sc_dnoise_filter sc_filter_tab[4] = {
+ {SC_FT_240, 426, 240},
+ {SC_FT_480, 854, 480},
+ {SC_FT_720, 1280, 720},
+ {SC_FT_1080, 1920, 1080},
+};
+
+static int sc_find_filter_size(struct sc_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sc_filter_tab); i++) {
+ if (sc_filter_tab[i].strength == ctx->dnoise_ft.strength) {
+ if (ctx->s_frame.width >= ctx->s_frame.height) {
+ ctx->dnoise_ft.w = sc_filter_tab[i].w;
+ ctx->dnoise_ft.h = sc_filter_tab[i].h;
+ } else {
+ ctx->dnoise_ft.w = sc_filter_tab[i].h;
+ ctx->dnoise_ft.h = sc_filter_tab[i].w;
+ }
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(sc_filter_tab)) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: can't find filter size\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ctx->s_frame.crop.width < ctx->dnoise_ft.w ||
+ ctx->s_frame.crop.height < ctx->dnoise_ft.h) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: filter is over source size.(%dx%d -> %dx%d)\n",
+ __func__, ctx->s_frame.crop.width,
+ ctx->s_frame.crop.height, ctx->dnoise_ft.w,
+ ctx->dnoise_ft.h);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sc_prepare_denoise_filter(struct sc_ctx *ctx)
+{
+ unsigned int sc_down_min = ctx->sc_dev->variant->sc_down_min;
+
+ if (ctx->dnoise_ft.strength <= SC_FT_BLUR)
+ return 0;
+
+ if (sc_find_filter_size(ctx))
+ return -EINVAL;
+
+ if (!allocate_intermediate_frame(ctx))
+ return -ENOMEM;
+
+ memcpy(&ctx->i_frame->frame, &ctx->d_frame, sizeof(ctx->d_frame));
+ ctx->i_frame->frame.crop.width = ctx->dnoise_ft.w;
+ ctx->i_frame->frame.crop.height = ctx->dnoise_ft.h;
+
+ free_intermediate_frame(ctx);
+ if (!initialize_initermediate_frame(ctx)) {
+ free_intermediate_frame(ctx);
+ dev_err(ctx->sc_dev->dev,
+ "%s: failed to initialize int_frame\n", __func__);
+ return -ENOMEM;
+ }
+
+ ctx->h_ratio = SCALE_RATIO(ctx->s_frame.crop.width, ctx->dnoise_ft.w);
+ ctx->v_ratio = SCALE_RATIO(ctx->s_frame.crop.height, ctx->dnoise_ft.h);
+
+ if ((ctx->h_ratio > sc_down_min) ||
+ (ctx->h_ratio < ctx->sc_dev->variant->sc_up_max)) {
+ dev_err(ctx->sc_dev->dev,
+ "filter can't support width scaling(%d -> %d)\n",
+ ctx->s_frame.crop.width, ctx->dnoise_ft.w);
+ goto err_ft;
+ }
+
+ if ((ctx->v_ratio > sc_down_min) ||
+ (ctx->v_ratio < ctx->sc_dev->variant->sc_up_max)) {
+ dev_err(ctx->sc_dev->dev,
+ "filter can't support height scaling(%d -> %d)\n",
+ ctx->s_frame.crop.height, ctx->dnoise_ft.h);
+ goto err_ft;
+ }
+
+ if (ctx->sc_dev->variant->prescale) {
+ BUG_ON(sc_down_min != SCALE_RATIO_CONST(16, 1));
+
+ if (ctx->h_ratio > SCALE_RATIO_CONST(8, 1))
+ ctx->pre_h_ratio = 2;
+ else if (ctx->h_ratio > SCALE_RATIO_CONST(4, 1))
+ ctx->pre_h_ratio = 1;
+ else
+ ctx->pre_h_ratio = 0;
+
+ if (ctx->v_ratio > SCALE_RATIO_CONST(8, 1))
+ ctx->pre_v_ratio = 2;
+ else if (ctx->v_ratio > SCALE_RATIO_CONST(4, 1))
+ ctx->pre_v_ratio = 1;
+ else
+ ctx->pre_v_ratio = 0;
+
+ if (ctx->pre_h_ratio || ctx->pre_v_ratio) {
+ if (!IS_ALIGNED(ctx->s_frame.crop.width,
+ 1 << (ctx->pre_h_ratio +
+ ctx->s_frame.sc_fmt->h_shift))) {
+ dev_err(ctx->sc_dev->dev,
+ "filter can't support not-aligned source(%d -> %d)\n",
+ ctx->s_frame.crop.width, ctx->dnoise_ft.w);
+ goto err_ft;
+ } else if (!IS_ALIGNED(ctx->s_frame.crop.height,
+ 1 << (ctx->pre_v_ratio +
+ ctx->s_frame.sc_fmt->v_shift))) {
+ dev_err(ctx->sc_dev->dev,
+ "filter can't support not-aligned source(%d -> %d)\n",
+ ctx->s_frame.crop.height, ctx->dnoise_ft.h);
+ goto err_ft;
+ } else {
+ ctx->h_ratio >>= ctx->pre_h_ratio;
+ ctx->v_ratio >>= ctx->pre_v_ratio;
+ }
+ }
+ }
+
+ return 0;
+
+err_ft:
+ free_intermediate_frame(ctx);
+ return -EINVAL;
+}
+
+static int sc_find_scaling_ratio(struct sc_ctx *ctx)
+{
+ __s32 src_width, src_height;
+ unsigned int h_ratio, v_ratio;
+ struct sc_dev *sc = ctx->sc_dev;
+ unsigned int sc_down_min = sc->variant->sc_down_min;
+
+ if ((ctx->s_frame.crop.width == 0) ||
+ (ctx->d_frame.crop.width == 0))
+ return 0; /* s_fmt is not complete */
+
+ src_width = ctx->s_frame.crop.width;
+ src_height = ctx->s_frame.crop.height;
+ if (!!(ctx->flip_rot_cfg & SCALER_ROT_90))
+ swap(src_width, src_height);
+
+ h_ratio = SCALE_RATIO(src_width, ctx->d_frame.crop.width);
+ v_ratio = SCALE_RATIO(src_height, ctx->d_frame.crop.height);
+
+ /*
+ * If the source crop width or height is fractional value
+ * calculate scaling ratio including it and calculate with original
+ * crop.width and crop.height value because they were rounded up.
+ */
+ if (ctx->init_phase.w)
+ h_ratio = SCALE_RATIO_FRACT((src_width - 1), ctx->init_phase.w,
+ ctx->d_frame.crop.width);
+ if (ctx->init_phase.h)
+ v_ratio = SCALE_RATIO_FRACT((src_height - 1), ctx->init_phase.h,
+ ctx->d_frame.crop.height);
+ sc_dbg("Scaling ratio h_ratio %d, v_ratio %d\n", h_ratio, v_ratio);
+
+ if ((h_ratio > sc->variant->sc_down_swmin) ||
+ (h_ratio < sc->variant->sc_up_max)) {
+ dev_err(sc->dev, "Width scaling is out of range(%d -> %d)\n",
+ src_width, ctx->d_frame.crop.width);
+ return -EINVAL;
+ }
+
+ if ((v_ratio > sc->variant->sc_down_swmin) ||
+ (v_ratio < sc->variant->sc_up_max)) {
+ dev_err(sc->dev, "Height scaling is out of range(%d -> %d)\n",
+ src_height, ctx->d_frame.crop.height);
+ return -EINVAL;
+ }
+
+ if (sc->variant->prescale) {
+ BUG_ON(sc_down_min != SCALE_RATIO_CONST(16, 1));
+
+ if (h_ratio > SCALE_RATIO_CONST(8, 1)) {
+ ctx->pre_h_ratio = 2;
+ } else if (h_ratio > SCALE_RATIO_CONST(4, 1)) {
+ ctx->pre_h_ratio = 1;
+ } else {
+ ctx->pre_h_ratio = 0;
+ }
+
+ if (v_ratio > SCALE_RATIO_CONST(8, 1)) {
+ ctx->pre_v_ratio = 2;
+ } else if (v_ratio > SCALE_RATIO_CONST(4, 1)) {
+ ctx->pre_v_ratio = 1;
+ } else {
+ ctx->pre_v_ratio = 0;
+ }
+
+ /*
+ * If the source image resolution violates the constraints of
+ * pre-scaler, then performs poly-phase scaling twice
+ */
+ if (ctx->pre_h_ratio || ctx->pre_v_ratio) {
+ if (!IS_ALIGNED(src_width, 1 << (ctx->pre_h_ratio +
+ ctx->s_frame.sc_fmt->h_shift)) ||
+ !IS_ALIGNED(src_height, 1 << (ctx->pre_v_ratio +
+ ctx->s_frame.sc_fmt->v_shift))) {
+ sc_down_min = SCALE_RATIO_CONST(4, 1);
+ ctx->pre_h_ratio = 0;
+ ctx->pre_v_ratio = 0;
+ } else {
+ h_ratio >>= ctx->pre_h_ratio;
+ v_ratio >>= ctx->pre_v_ratio;
+ }
+ }
+
+ if (sc_down_min == SCALE_RATIO_CONST(4, 1)) {
+ dev_info(sc->dev,
+ "%s: Prepared 2nd polyphase scaler (%dx%d->%dx%d)\n",
+ __func__,
+ ctx->s_frame.crop.width, ctx->s_frame.crop.height,
+ ctx->d_frame.crop.width, ctx->d_frame.crop.height);
+ }
+ }
+
+ if ((ctx->cp_enabled) && (h_ratio > sc_down_min)) {
+ dev_err(sc->dev, "Out of width range on protect(%d->%d)\n",
+ src_width, ctx->d_frame.crop.width);
+ return -EINVAL;
+ }
+ if ((ctx->cp_enabled) && (v_ratio > sc_down_min)) {
+ dev_err(sc->dev, "Out of height range on protect(%d->%d)\n",
+ src_height, ctx->d_frame.crop.height);
+ return -EINVAL;
+ }
+
+ if ((h_ratio > sc_down_min) || (v_ratio > sc_down_min)) {
+ int ret;
+
+ ret = sc_prepare_2nd_scaling(ctx, src_width, src_height,
+ &h_ratio, &v_ratio);
+ if (ret)
+ return ret;
+ } else {
+ destroy_intermediate_frame(ctx);
+ }
+
+ ctx->h_ratio = h_ratio;
+ ctx->v_ratio = v_ratio;
+
+ return 0;
+}
+
+static int sc_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct sc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct sc_frame *frame;
+ int ret;
+ int i;
+
+ frame = ctx_get_frame(ctx, vq->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ /* Get number of planes from format_list in driver */
+ *num_planes = frame->sc_fmt->num_planes;
+ for (i = 0; i < frame->sc_fmt->num_planes; i++) {
+ sizes[i] = frame->bytesused[i];
+ alloc_devs[i] = ctx->sc_dev->dev;
+ }
+
+ ret = sc_find_scaling_ratio(ctx);
+ if (ret)
+ return ret;
+
+ ret = sc_prepare_denoise_filter(ctx);
+ if (ret)
+ return ret;
+
+ return vb2_queue_init(vq);
+}
+
+static int sc_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct sc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct sc_frame *frame;
+ int i;
+
+ frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ for (i = 0; i < frame->sc_fmt->num_planes; i++)
+ vb2_set_plane_payload(vb, i, frame->bytesused[i]);
+ }
+
+ return 0;
+}
+
+static void sc_vb2_buf_finish(struct vb2_buffer *vb)
+{
+}
+
+static void sc_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct sc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, v4l2_buf);
+}
+
+static void sc_vb2_buf_cleanup(struct vb2_buffer *vb)
+{
+}
+
+static void sc_vb2_lock(struct vb2_queue *vq)
+{
+ struct sc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_lock(&ctx->sc_dev->lock);
+}
+
+static void sc_vb2_unlock(struct vb2_queue *vq)
+{
+ struct sc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_unlock(&ctx->sc_dev->lock);
+}
+
+static int sc_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct sc_ctx *ctx = vb2_get_drv_priv(vq);
+ set_bit(CTX_STREAMING, &ctx->flags);
+
+ return 0;
+}
+
+static void sc_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct sc_ctx *ctx = vb2_get_drv_priv(vq);
+ int ret;
+
+ ret = sc_ctx_stop_req(ctx);
+ if (ret < 0)
+ dev_err(ctx->sc_dev->dev, "wait timeout\n");
+
+ clear_bit(CTX_STREAMING, &ctx->flags);
+}
+
+static struct vb2_ops sc_vb2_ops = {
+ .queue_setup = sc_vb2_queue_setup,
+ .buf_prepare = sc_vb2_buf_prepare,
+ .buf_finish = sc_vb2_buf_finish,
+ .buf_queue = sc_vb2_buf_queue,
+ .buf_cleanup = sc_vb2_buf_cleanup,
+ .wait_finish = sc_vb2_lock,
+ .wait_prepare = sc_vb2_unlock,
+ .start_streaming = sc_vb2_start_streaming,
+ .stop_streaming = sc_vb2_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct sc_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->ops = &sc_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct vb2_sc_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->ops = &sc_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct vb2_sc_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static bool sc_configure_rotation_degree(struct sc_ctx *ctx, int degree)
+{
+ ctx->flip_rot_cfg &= ~SCALER_ROT_MASK;
+
+ /*
+ * we expect that the direction of rotation is clockwise
+ * but the Scaler does in counter clockwise.
+ * Since the GScaler doest that in clockwise,
+ * the following makes the direction of rotation by the Scaler
+ * clockwise.
+ */
+ if (degree == 270) {
+ ctx->flip_rot_cfg |= SCALER_ROT_90;
+ } else if (degree == 180) {
+ ctx->flip_rot_cfg |= SCALER_ROT_180;
+ } else if (degree == 90) {
+ ctx->flip_rot_cfg |= SCALER_ROT_270;
+ } else if (degree != 0) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: Rotation of %d is not supported\n",
+ __func__, degree);
+ return false;
+ }
+
+ return true;
+}
+
+static int sc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct sc_ctx *ctx;
+ int ret = 0;
+
+ sc_dbg("ctrl ID:%d, value:%d\n", ctrl->id, ctrl->val);
+ ctx = container_of(ctrl->handler, struct sc_ctx, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ ctx->flip_rot_cfg |= SCALER_FLIP_X_EN;
+ else
+ ctx->flip_rot_cfg &= ~SCALER_FLIP_X_EN;
+ break;
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ ctx->flip_rot_cfg |= SCALER_FLIP_Y_EN;
+ else
+ ctx->flip_rot_cfg &= ~SCALER_FLIP_Y_EN;
+ break;
+ case V4L2_CID_ROTATE:
+ if (!sc_configure_rotation_degree(ctx, ctrl->val))
+ return -EINVAL;
+ break;
+ case V4L2_CID_GLOBAL_ALPHA:
+ ctx->g_alpha = ctrl->val;
+ break;
+ case V4L2_CID_2D_BLEND_OP:
+ if (!ctx->sc_dev->variant->blending && (ctrl->val > 0)) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: blending is not supported from v2.2.0\n",
+ __func__);
+ return -EINVAL;
+ }
+ ctx->bl_op = ctrl->val;
+ break;
+ case V4L2_CID_2D_FMT_PREMULTI:
+ ctx->pre_multi = ctrl->val;
+ break;
+ case V4L2_CID_2D_DITH:
+ ctx->dith = ctrl->val;
+ break;
+ case V4L2_CID_CSC_EQ:
+ ctx->csc.csc_eq = ctrl->val;
+ break;
+ case V4L2_CID_CSC_RANGE:
+ ctx->csc.csc_range = ctrl->val;
+ break;
+ case V4L2_CID_CONTENT_PROTECTION:
+ ctx->cp_enabled = !!ctrl->val;
+ break;
+ case SC_CID_DNOISE_FT:
+ ctx->dnoise_ft.strength = ctrl->val;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops sc_ctrl_ops = {
+ .s_ctrl = sc_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config sc_custom_ctrl[] = {
+ {
+ .ops = &sc_ctrl_ops,
+ .id = V4L2_CID_GLOBAL_ALPHA,
+ .name = "Set constant src alpha",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ .step = 1,
+ .min = 0,
+ .max = 255,
+ .def = 255,
+ }, {
+ .ops = &sc_ctrl_ops,
+ .id = V4L2_CID_2D_BLEND_OP,
+ .name = "set blend op",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ .step = 1,
+ .min = 0,
+ .max = BL_OP_ADD,
+ .def = 0,
+ }, {
+ .ops = &sc_ctrl_ops,
+ .id = V4L2_CID_2D_DITH,
+ .name = "set dithering",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ .step = 1,
+ .min = false,
+ .max = true,
+ .def = false,
+ }, {
+ .ops = &sc_ctrl_ops,
+ .id = V4L2_CID_2D_FMT_PREMULTI,
+ .name = "set pre-multiplied format",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ .step = 1,
+ .min = false,
+ .max = true,
+ .def = false,
+ }, {
+ .ops = &sc_ctrl_ops,
+ .id = V4L2_CID_CSC_EQ,
+ .name = "Set CSC equation",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ .step = 1,
+ .min = V4L2_COLORSPACE_DEFAULT,
+ .max = V4L2_COLORSPACE_DCI_P3,
+ .def = V4L2_COLORSPACE_DEFAULT,
+ }, {
+ .ops = &sc_ctrl_ops,
+ .id = V4L2_CID_CSC_RANGE,
+ .name = "Set CSC range",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ .step = 1,
+ .min = SC_CSC_NARROW,
+ .max = SC_CSC_WIDE,
+ .def = SC_CSC_NARROW,
+ }, {
+ .ops = &sc_ctrl_ops,
+ .id = V4L2_CID_CONTENT_PROTECTION,
+ .name = "Enable contents protection",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ .step = 1,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ }, {
+ .ops = &sc_ctrl_ops,
+ .id = SC_CID_DNOISE_FT,
+ .name = "Enable denoising filter",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .step = 1,
+ .min = 0,
+ .max = SC_FT_MAX,
+ .def = 0,
+ }
+};
+
+static int sc_add_ctrls(struct sc_ctx *ctx)
+{
+ unsigned long i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, SC_MAX_CTRL_NUM);
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &sc_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &sc_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &sc_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+
+ for (i = 0; i < ARRAY_SIZE(sc_custom_ctrl); i++)
+ v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &sc_custom_ctrl[i], NULL);
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+ v4l2_err(&ctx->sc_dev->m2m.v4l2_dev,
+ "v4l2_ctrl_handler_init failed %d\n", err);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ return 0;
+}
+
+static int sc_power_clk_enable(struct sc_dev *sc)
+{
+ int ret;
+
+ if (in_interrupt())
+ ret = pm_runtime_get(sc->dev);
+ else
+ ret = pm_runtime_get_sync(sc->dev);
+
+ if (ret < 0) {
+ dev_err(sc->dev,
+ "%s=%d: Failed to enable local power\n", __func__, ret);
+ return ret;
+ }
+
+ if (!IS_ERR(sc->pclk)) {
+ ret = clk_enable(sc->pclk);
+ if (ret) {
+ dev_err(sc->dev, "%s: Failed to enable PCLK (err %d)\n",
+ __func__, ret);
+ goto err_pclk;
+ }
+ }
+
+ if (!IS_ERR(sc->aclk)) {
+ ret = clk_enable(sc->aclk);
+ if (ret) {
+ dev_err(sc->dev, "%s: Failed to enable ACLK (err %d)\n",
+ __func__, ret);
+ goto err_aclk;
+ }
+ }
+
+ return 0;
+err_aclk:
+ if (!IS_ERR(sc->pclk))
+ clk_disable(sc->pclk);
+err_pclk:
+ pm_runtime_put(sc->dev);
+ return ret;
+}
+
+static void sc_clk_power_disable(struct sc_dev *sc)
+{
+ sc_clear_aux_power_cfg(sc);
+
+ if (!IS_ERR(sc->aclk))
+ clk_disable(sc->aclk);
+
+ if (!IS_ERR(sc->pclk))
+ clk_disable(sc->pclk);
+
+ pm_runtime_put(sc->dev);
+}
+
+static int sc_open(struct file *file)
+{
+ struct sc_dev *sc = video_drvdata(file);
+ struct sc_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ dev_err(sc->dev, "no memory for open context\n");
+ return -ENOMEM;
+ }
+
+ atomic_inc(&sc->m2m.in_use);
+
+ ctx->context_type = SC_CTX_V4L2_TYPE;
+ INIT_LIST_HEAD(&ctx->node);
+ ctx->sc_dev = sc;
+
+ v4l2_fh_init(&ctx->fh, sc->m2m.vfd);
+ ret = sc_add_ctrls(ctx);
+ if (ret)
+ goto err_fh;
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ /* Default color format */
+ ctx->s_frame.sc_fmt = &sc_formats[0];
+ ctx->d_frame.sc_fmt = &sc_formats[0];
+
+ if (!IS_ERR(sc->pclk)) {
+ ret = clk_prepare(sc->pclk);
+ if (ret) {
+ dev_err(sc->dev, "%s: failed to prepare PCLK(err %d)\n",
+ __func__, ret);
+ goto err_pclk_prepare;
+ }
+ }
+
+ if (!IS_ERR(sc->aclk)) {
+ ret = clk_prepare(sc->aclk);
+ if (ret) {
+ dev_err(sc->dev, "%s: failed to prepare ACLK(err %d)\n",
+ __func__, ret);
+ goto err_aclk_prepare;
+ }
+ }
+
+ /* Setup the device context for mem2mem mode. */
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(sc->m2m.m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ ret = -EINVAL;
+ goto err_ctx;
+ }
+
+ return 0;
+
+err_ctx:
+ if (!IS_ERR(sc->aclk))
+ clk_unprepare(sc->aclk);
+err_aclk_prepare:
+ if (!IS_ERR(sc->pclk))
+ clk_unprepare(sc->pclk);
+err_pclk_prepare:
+ v4l2_fh_del(&ctx->fh);
+err_fh:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_exit(&ctx->fh);
+ atomic_dec(&sc->m2m.in_use);
+ kfree(ctx);
+
+ return ret;
+}
+
+static int sc_release(struct file *file)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(file->private_data);
+ struct sc_dev *sc = ctx->sc_dev;
+
+ sc_dbg("refcnt= %d", atomic_read(&sc->m2m.in_use));
+
+ atomic_dec(&sc->m2m.in_use);
+
+ destroy_intermediate_frame(ctx);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ if (!IS_ERR(sc->aclk))
+ clk_unprepare(sc->aclk);
+ if (!IS_ERR(sc->pclk))
+ clk_unprepare(sc->pclk);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ return 0;
+}
+
+static unsigned int sc_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(file->private_data);
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+static int sc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sc_ctx *ctx = fh_to_sc_ctx(file->private_data);
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations sc_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = sc_open,
+ .release = sc_release,
+ .poll = sc_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = sc_mmap,
+};
+
+static void sc_job_finish(struct sc_dev *sc, struct sc_ctx *ctx)
+{
+ unsigned long flags;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ spin_lock_irqsave(&sc->slock, flags);
+
+ if (ctx->context_type == SC_CTX_V4L2_TYPE) {
+ ctx = v4l2_m2m_get_curr_priv(sc->m2m.m2m_dev);
+ if (!ctx || !ctx->m2m_ctx) {
+ dev_err(sc->dev, "current ctx is NULL\n");
+ spin_unlock_irqrestore(&sc->slock, flags);
+ return;
+
+ }
+ clear_bit(CTX_RUN, &ctx->flags);
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if (test_bit(DEV_CP, &sc->state)) {
+ sc_ctrl_protection(sc, ctx, false);
+ clear_bit(DEV_CP, &sc->state);
+ }
+#endif
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ BUG_ON(!src_vb || !dst_vb);
+
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+
+ v4l2_m2m_job_finish(sc->m2m.m2m_dev, ctx->m2m_ctx);
+ } else {
+ struct m2m1shot_task *task =
+ m2m1shot_get_current_task(sc->m21dev);
+
+ BUG_ON(ctx->context_type != SC_CTX_M2M1SHOT_TYPE);
+
+ m2m1shot_task_finish(sc->m21dev, task, true);
+ }
+
+ spin_unlock_irqrestore(&sc->slock, flags);
+}
+
+static void sc_watchdog(unsigned long arg)
+{
+ struct sc_dev *sc = (struct sc_dev *)arg;
+ struct sc_ctx *ctx;
+ unsigned long flags;
+
+ sc_dbg("timeout watchdog\n");
+ if (atomic_read(&sc->wdt.cnt) >= SC_WDT_CNT) {
+ sc_hwset_soft_reset(sc);
+
+ atomic_set(&sc->wdt.cnt, 0);
+ clear_bit(DEV_RUN, &sc->state);
+
+ spin_lock_irqsave(&sc->ctxlist_lock, flags);
+ ctx = sc->current_ctx;
+ sc->current_ctx = NULL;
+ spin_unlock_irqrestore(&sc->ctxlist_lock, flags);
+
+ BUG_ON(!ctx);
+ sc_job_finish(sc, ctx);
+ sc_clk_power_disable(sc);
+ return;
+ }
+
+ if (test_bit(DEV_RUN, &sc->state)) {
+ sc_hwregs_dump(sc);
+ exynos_sysmmu_show_status(sc->dev);
+ atomic_inc(&sc->wdt.cnt);
+ dev_err(sc->dev, "scaler is still running\n");
+ mod_timer(&sc->wdt.timer, jiffies + SC_TIMEOUT);
+ } else {
+ sc_dbg("scaler finished job\n");
+ }
+
+}
+
+static void sc_set_csc_coef(struct sc_ctx *ctx)
+{
+ struct sc_frame *s_frame, *d_frame;
+ struct sc_dev *sc;
+ enum sc_csc_idx idx;
+
+ sc = ctx->sc_dev;
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ if (s_frame->sc_fmt->is_rgb == d_frame->sc_fmt->is_rgb)
+ idx = NO_CSC;
+ else if (s_frame->sc_fmt->is_rgb)
+ idx = CSC_R2Y;
+ else
+ idx = CSC_Y2R;
+
+ sc_hwset_csc_coef(sc, idx, &ctx->csc);
+}
+
+static bool sc_process_2nd_stage(struct sc_dev *sc, struct sc_ctx *ctx)
+{
+ struct sc_frame *s_frame, *d_frame;
+ const struct sc_size_limit *limit;
+ unsigned int halign = 0, walign = 0;
+ unsigned int pre_h_ratio = 0;
+ unsigned int pre_v_ratio = 0;
+ unsigned int h_ratio = SCALE_RATIO(1, 1);
+ unsigned int v_ratio = SCALE_RATIO(1, 1);
+
+ if (!test_bit(CTX_INT_FRAME, &ctx->flags))
+ return false;
+
+ s_frame = &ctx->i_frame->frame;
+ d_frame = &ctx->d_frame;
+
+ s_frame->addr.y = ctx->i_frame->src_addr.y;
+ s_frame->addr.cb = ctx->i_frame->src_addr.cb;
+ s_frame->addr.cr = ctx->i_frame->src_addr.cr;
+
+ if (sc_fmt_is_yuv422(d_frame->sc_fmt->pixelformat)) {
+ walign = 1;
+ } else if (sc_fmt_is_yuv420(d_frame->sc_fmt->pixelformat)) {
+ walign = 1;
+ halign = 1;
+ }
+
+ limit = &sc->variant->limit_input;
+ v4l_bound_align_image(&s_frame->crop.width, limit->min_w, limit->max_w,
+ walign, &s_frame->crop.height, limit->min_h,
+ limit->max_h, halign, 0);
+
+ sc_hwset_src_image_format(sc, s_frame->sc_fmt);
+ sc_hwset_dst_image_format(sc, d_frame->sc_fmt);
+ sc_hwset_src_imgsize(sc, s_frame);
+ sc_hwset_dst_imgsize(sc, d_frame);
+
+ if ((ctx->flip_rot_cfg & SCALER_ROT_90) &&
+ (ctx->dnoise_ft.strength > SC_FT_BLUR)) {
+ h_ratio = SCALE_RATIO(s_frame->crop.height, d_frame->crop.width);
+ v_ratio = SCALE_RATIO(s_frame->crop.width, d_frame->crop.height);
+ } else {
+ h_ratio = SCALE_RATIO(s_frame->crop.width, d_frame->crop.width);
+ v_ratio = SCALE_RATIO(s_frame->crop.height, d_frame->crop.height);
+ }
+
+ pre_h_ratio = 0;
+ pre_v_ratio = 0;
+
+ if (!sc->variant->ratio_20bit) {
+ /* No prescaler, 1/4 precision */
+ BUG_ON(h_ratio > SCALE_RATIO(4, 1));
+ BUG_ON(v_ratio > SCALE_RATIO(4, 1));
+
+ h_ratio >>= 4;
+ v_ratio >>= 4;
+ }
+
+ /* no rotation */
+
+ sc_hwset_hratio(sc, h_ratio, pre_h_ratio);
+ sc_hwset_vratio(sc, v_ratio, pre_v_ratio);
+
+ sc_hwset_polyphase_hcoef(sc, h_ratio, h_ratio, 0);
+ sc_hwset_polyphase_vcoef(sc, v_ratio, v_ratio, 0);
+
+ sc_hwset_src_pos(sc, s_frame->crop.left, s_frame->crop.top,
+ s_frame->sc_fmt->h_shift, s_frame->sc_fmt->v_shift);
+ sc_hwset_src_wh(sc, s_frame->crop.width, s_frame->crop.height,
+ pre_h_ratio, pre_v_ratio,
+ s_frame->sc_fmt->h_shift, s_frame->sc_fmt->v_shift);
+
+ sc_hwset_dst_pos(sc, d_frame->crop.left, d_frame->crop.top);
+ sc_hwset_dst_wh(sc, d_frame->crop.width, d_frame->crop.height);
+
+ sc_hwset_src_addr(sc, &s_frame->addr);
+ sc_hwset_dst_addr(sc, &d_frame->addr);
+
+ if ((ctx->flip_rot_cfg & SCALER_ROT_MASK) &&
+ (ctx->dnoise_ft.strength > SC_FT_BLUR))
+ sc_hwset_flip_rotation(sc, ctx->flip_rot_cfg);
+ else
+ sc_hwset_flip_rotation(sc, 0);
+
+ sc_hwset_start(sc);
+
+ clear_bit(CTX_INT_FRAME, &ctx->flags);
+
+ return true;
+}
+
+static void sc_set_dithering(struct sc_ctx *ctx)
+{
+ struct sc_dev *sc = ctx->sc_dev;
+ unsigned int val = 0;
+
+ if (ctx->dith)
+ val = sc_dith_val(1, 1, 1);
+
+ sc_dbg("dither value is 0x%x\n", val);
+ sc_hwset_dith(sc, val);
+}
+
+/*
+ * 'Prefetch' is not required by Scaler
+ * because fetch larger region is more beneficial for rotation
+ */
+#define SC_SRC_PBCONFIG (SYSMMU_PBUFCFG_TLB_UPDATE | \
+ SYSMMU_PBUFCFG_ASCENDING | SYSMMU_PBUFCFG_READ)
+#define SC_DST_PBCONFIG (SYSMMU_PBUFCFG_TLB_UPDATE | \
+ SYSMMU_PBUFCFG_ASCENDING | SYSMMU_PBUFCFG_WRITE)
+
+static void sc_set_prefetch_buffers(struct device *dev, struct sc_ctx *ctx)
+{
+ struct sc_frame *s_frame = &ctx->s_frame;
+ struct sc_frame *d_frame = &ctx->d_frame;
+ struct sysmmu_prefbuf pb_reg[6];
+ unsigned int i = 0;
+
+ pb_reg[i].base = s_frame->addr.y;
+ pb_reg[i].size = s_frame->addr.ysize;
+ pb_reg[i++].config = SC_SRC_PBCONFIG;
+ if (s_frame->sc_fmt->num_comp >= 2) {
+ pb_reg[i].base = s_frame->addr.cb;
+ pb_reg[i].size = s_frame->addr.cbsize;
+ pb_reg[i++].config = SC_SRC_PBCONFIG;
+ }
+ if (s_frame->sc_fmt->num_comp >= 3) {
+ pb_reg[i].base = s_frame->addr.cr;
+ pb_reg[i].size = s_frame->addr.crsize;
+ pb_reg[i++].config = SC_SRC_PBCONFIG;
+ }
+
+ pb_reg[i].base = d_frame->addr.y;
+ pb_reg[i].size = d_frame->addr.ysize;
+ pb_reg[i++].config = SC_DST_PBCONFIG;
+ if (d_frame->sc_fmt->num_comp >= 2) {
+ pb_reg[i].base = d_frame->addr.cb;
+ pb_reg[i].size = d_frame->addr.cbsize;
+ pb_reg[i++].config = SC_DST_PBCONFIG;
+ }
+ if (d_frame->sc_fmt->num_comp >= 3) {
+ pb_reg[i].base = d_frame->addr.cr;
+ pb_reg[i].size = d_frame->addr.crsize;
+ pb_reg[i++].config = SC_DST_PBCONFIG;
+ }
+}
+
+static void sc_set_initial_phase(struct sc_ctx *ctx)
+{
+ struct sc_dev *sc = ctx->sc_dev;
+
+ /* TODO: need to check scaling, csc, rot according to H/W Goude */
+ sc_hwset_src_init_phase(sc, &ctx->init_phase);
+}
+
+static int sc_run_next_job(struct sc_dev *sc)
+{
+ unsigned long flags;
+ struct sc_ctx *ctx;
+ struct sc_frame *d_frame, *s_frame;
+ unsigned int pre_h_ratio = 0;
+ unsigned int pre_v_ratio = 0;
+ unsigned int h_ratio = SCALE_RATIO(1, 1);
+ unsigned int v_ratio = SCALE_RATIO(1, 1);
+ unsigned int ch_ratio = SCALE_RATIO(1, 1);
+ unsigned int cv_ratio = SCALE_RATIO(1, 1);
+ unsigned int h_shift, v_shift;
+ int ret;
+
+ spin_lock_irqsave(&sc->ctxlist_lock, flags);
+
+ if (sc->current_ctx || list_empty(&sc->context_list)) {
+ /* a job is currently being processed or no job is to run */
+ spin_unlock_irqrestore(&sc->ctxlist_lock, flags);
+ return 0;
+ }
+
+ ctx = list_first_entry(&sc->context_list, struct sc_ctx, node);
+
+ list_del_init(&ctx->node);
+
+ sc->current_ctx = ctx;
+
+ spin_unlock_irqrestore(&sc->ctxlist_lock, flags);
+
+ /*
+ * sc_run_next_job() must not reenter while sc->state is DEV_RUN.
+ * DEV_RUN is cleared when an operation is finished.
+ */
+ BUG_ON(test_bit(DEV_RUN, &sc->state));
+
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ ret = sc_power_clk_enable(sc);
+ if (ret) {
+ pm_runtime_put(sc->dev);
+ return ret;
+ }
+
+ sc_hwset_init(sc);
+
+ if (ctx->i_frame) {
+ set_bit(CTX_INT_FRAME, &ctx->flags);
+ d_frame = &ctx->i_frame->frame;
+ }
+
+ sc_set_csc_coef(ctx);
+
+ sc_hwset_src_image_format(sc, s_frame->sc_fmt);
+ sc_hwset_dst_image_format(sc, d_frame->sc_fmt);
+
+ sc_hwset_pre_multi_format(sc, s_frame->pre_multi, d_frame->pre_multi);
+
+ sc_hwset_src_imgsize(sc, s_frame);
+ sc_hwset_dst_imgsize(sc, d_frame);
+
+ h_ratio = ctx->h_ratio;
+ v_ratio = ctx->v_ratio;
+ pre_h_ratio = ctx->pre_h_ratio;
+ pre_v_ratio = ctx->pre_v_ratio;
+
+ if (!sc->variant->ratio_20bit) {
+
+ /* No prescaler, 1/4 precision */
+ BUG_ON(h_ratio > SCALE_RATIO(4, 1));
+ BUG_ON(v_ratio > SCALE_RATIO(4, 1));
+
+ h_ratio >>= 4;
+ v_ratio >>= 4;
+ }
+
+ h_shift = s_frame->sc_fmt->h_shift;
+ v_shift = s_frame->sc_fmt->v_shift;
+
+ if (!!(ctx->flip_rot_cfg & SCALER_ROT_90)) {
+ swap(pre_h_ratio, pre_v_ratio);
+ swap(h_shift, v_shift);
+ }
+
+ if (h_shift < d_frame->sc_fmt->h_shift)
+ ch_ratio = h_ratio * 2; /* chroma scaling down */
+ else if (h_shift > d_frame->sc_fmt->h_shift)
+ ch_ratio = h_ratio / 2; /* chroma scaling up */
+ else
+ ch_ratio = h_ratio;
+
+ if (v_shift < d_frame->sc_fmt->v_shift)
+ cv_ratio = v_ratio * 2; /* chroma scaling down */
+ else if (v_shift > d_frame->sc_fmt->v_shift)
+ cv_ratio = v_ratio / 2; /* chroma scaling up */
+ else
+ cv_ratio = v_ratio;
+
+ sc_hwset_hratio(sc, h_ratio, pre_h_ratio);
+ sc_hwset_vratio(sc, v_ratio, pre_v_ratio);
+
+ sc_hwset_polyphase_hcoef(sc, h_ratio, ch_ratio,
+ ctx->dnoise_ft.strength);
+ sc_hwset_polyphase_vcoef(sc, v_ratio, cv_ratio,
+ ctx->dnoise_ft.strength);
+
+ sc_hwset_src_pos(sc, s_frame->crop.left, s_frame->crop.top,
+ s_frame->sc_fmt->h_shift, s_frame->sc_fmt->v_shift);
+ sc_hwset_src_wh(sc, s_frame->crop.width, s_frame->crop.height,
+ pre_h_ratio, pre_v_ratio,
+ s_frame->sc_fmt->h_shift, s_frame->sc_fmt->v_shift);
+
+ sc_hwset_dst_pos(sc, d_frame->crop.left, d_frame->crop.top);
+ sc_hwset_dst_wh(sc, d_frame->crop.width, d_frame->crop.height);
+
+ if (sc->variant->initphase)
+ sc_set_initial_phase(ctx);
+
+ sc_hwset_src_addr(sc, &s_frame->addr);
+ sc_hwset_dst_addr(sc, &d_frame->addr);
+
+ sc_set_dithering(ctx);
+
+ if (ctx->bl_op)
+ sc_hwset_blend(sc, ctx->bl_op, ctx->pre_multi, ctx->g_alpha);
+
+ if (ctx->dnoise_ft.strength > SC_FT_BLUR)
+ sc_hwset_flip_rotation(sc, 0);
+ else
+ sc_hwset_flip_rotation(sc, ctx->flip_rot_cfg);
+
+ sc_hwset_int_en(sc);
+
+ set_bit(DEV_RUN, &sc->state);
+ set_bit(CTX_RUN, &ctx->flags);
+
+ sc_set_prefetch_buffers(sc->dev, ctx);
+
+ mod_timer(&sc->wdt.timer, jiffies + SC_TIMEOUT);
+
+ if (__measure_hw_latency) {
+ if (ctx->context_type == SC_CTX_V4L2_TYPE) {
+ struct vb2_v4l2_buffer *vb =
+ v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ struct v4l2_m2m_buffer *mb =
+ container_of(vb, typeof(*mb), vb);
+ struct vb2_sc_buffer *svb =
+ container_of(mb, typeof(*svb), mb);
+
+ svb->ktime = ktime_get();
+ } else {
+ ctx->ktime_m2m1shot = ktime_get();
+ }
+ }
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if (sc->cfw) {
+ ret = exynos_smc(MC_FC_SET_CFW_PROT,
+ MC_FC_DRM_SET_CFW_PROT,
+ SC_SMC_PROTECTION_ID(sc->dev_id), 0);
+ if (ret != SMC_TZPC_OK)
+ dev_err(sc->dev,
+ "fail to set cfw protection (%d)\n", ret);
+ }
+ if (ctx->cp_enabled) {
+ ret = sc_ctrl_protection(sc, ctx, true);
+ if (!ret)
+ set_bit(DEV_CP, &sc->state);
+ }
+#endif
+ sc_hwset_start(sc);
+
+ return 0;
+}
+
+static int sc_add_context_and_run(struct sc_dev *sc, struct sc_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sc->ctxlist_lock, flags);
+ list_add_tail(&ctx->node, &sc->context_list);
+ spin_unlock_irqrestore(&sc->ctxlist_lock, flags);
+
+ return sc_run_next_job(sc);
+}
+
+static irqreturn_t sc_irq_handler(int irq, void *priv)
+{
+ struct sc_dev *sc = priv;
+ struct sc_ctx *ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ u32 irq_status;
+
+ spin_lock(&sc->slock);
+
+ clear_bit(DEV_RUN, &sc->state);
+
+ /*
+ * ok to access sc->current_ctx withot ctxlist_lock held
+ * because it is not modified until sc_run_next_job() is called.
+ */
+ ctx = sc->current_ctx;
+
+ BUG_ON(!ctx);
+
+ irq_status = sc_hwget_and_clear_irq_status(sc);
+
+ if (SCALER_INT_OK(irq_status) && sc_process_2nd_stage(sc, ctx))
+ goto isr_unlock;
+
+ del_timer(&sc->wdt.timer);
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if (test_bit(DEV_CP, &sc->state)) {
+ sc_ctrl_protection(sc, ctx, false);
+ clear_bit(DEV_CP, &sc->state);
+ }
+#endif
+ sc_clk_power_disable(sc);
+
+ clear_bit(CTX_RUN, &ctx->flags);
+
+ if (ctx->context_type == SC_CTX_V4L2_TYPE) {
+ BUG_ON(ctx != v4l2_m2m_get_curr_priv(sc->m2m.m2m_dev));
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ BUG_ON(!src_vb || !dst_vb);
+
+ if (__measure_hw_latency) {
+ struct v4l2_m2m_buffer *mb =
+ container_of(dst_vb, typeof(*mb), vb);
+ struct vb2_sc_buffer *svb =
+ container_of(mb, typeof(*svb), mb);
+
+ dst_vb->vb2_buf.timestamp =
+ (__u32)ktime_us_delta(ktime_get(), svb->ktime);
+ }
+
+ v4l2_m2m_buf_done(src_vb,
+ SCALER_INT_OK(irq_status) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb,
+ SCALER_INT_OK(irq_status) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR);
+
+ if (test_bit(DEV_SUSPEND, &sc->state)) {
+ sc_dbg("wake up blocked process by suspend\n");
+ wake_up(&sc->wait);
+ } else {
+ v4l2_m2m_job_finish(sc->m2m.m2m_dev, ctx->m2m_ctx);
+ }
+
+ /* Wake up from CTX_ABORT state */
+ if (test_and_clear_bit(CTX_ABORT, &ctx->flags))
+ wake_up(&sc->wait);
+ } else {
+ struct m2m1shot_task *task =
+ m2m1shot_get_current_task(sc->m21dev);
+
+ BUG_ON(ctx->context_type != SC_CTX_M2M1SHOT_TYPE);
+
+ if (__measure_hw_latency)
+ task->task.reserved[1] =
+ (unsigned long)ktime_us_delta(
+ ktime_get(), ctx->ktime_m2m1shot);
+
+ m2m1shot_task_finish(sc->m21dev, task,
+ SCALER_INT_OK(irq_status));
+ }
+
+ spin_lock(&sc->ctxlist_lock);
+ sc->current_ctx = NULL;
+ spin_unlock(&sc->ctxlist_lock);
+
+ sc_run_next_job(sc);
+
+isr_unlock:
+ spin_unlock(&sc->slock);
+
+ return IRQ_HANDLED;
+}
+
+static int sc_get_bufaddr(struct sc_dev *sc, struct vb2_buffer *vb2buf,
+ struct sc_frame *frame)
+{
+ int ret;
+ unsigned int pixsize, bytesize;
+ void *cookie;
+
+ pixsize = frame->width * frame->height;
+ bytesize = (pixsize * frame->sc_fmt->bitperpixel[0]) >> 3;
+
+ frame->addr.y = vb2_dma_sg_plane_dma_addr(vb2buf, 0);
+ frame->addr.cb = 0;
+ frame->addr.cr = 0;
+ frame->addr.cbsize = 0;
+ frame->addr.crsize = 0;
+
+ switch (frame->sc_fmt->num_comp) {
+ case 1: /* rgb, yuyv */
+ frame->addr.ysize = bytesize;
+ break;
+ case 2:
+ if (frame->sc_fmt->num_planes == 1) {
+ if (frame->sc_fmt->pixelformat == V4L2_PIX_FMT_NV12N) {
+ unsigned int w = frame->width;
+ unsigned int h = frame->height;
+ frame->addr.cb =
+ NV12N_CBCR_BASE(frame->addr.y, w, h);
+ frame->addr.ysize = NV12N_Y_SIZE(w, h);
+ frame->addr.cbsize = NV12N_CBCR_SIZE(w, h);
+ } else if (frame->sc_fmt->pixelformat == V4L2_PIX_FMT_NV12N_10B) {
+ unsigned int w = frame->width;
+ unsigned int h = frame->height;
+ frame->addr.cb =
+ NV12N_10B_CBCR_BASE(frame->addr.y, w, h);
+ frame->addr.ysize = NV12N_Y_SIZE(w, h);
+ frame->addr.cbsize = NV12N_CBCR_SIZE(w, h);
+ } else {
+ frame->addr.cb = frame->addr.y + pixsize;
+ frame->addr.ysize = pixsize;
+ frame->addr.cbsize = bytesize - pixsize;
+ }
+ } else if (frame->sc_fmt->num_planes == 2) {
+ frame->addr.cb = vb2_dma_sg_plane_dma_addr(vb2buf, 1);
+ frame->addr.ysize =
+ pixsize * frame->sc_fmt->bitperpixel[0] >> 3;
+ frame->addr.cbsize =
+ pixsize * frame->sc_fmt->bitperpixel[1] >> 3;
+ }
+ break;
+ case 3:
+ if (frame->sc_fmt->num_planes == 1) {
+ if (sc_fmt_is_ayv12(frame->sc_fmt->pixelformat)) {
+ unsigned int c_span;
+ c_span = ALIGN(frame->width >> 1, 16);
+ frame->addr.ysize = pixsize;
+ frame->addr.cbsize = c_span * (frame->height >> 1);
+ frame->addr.crsize = frame->addr.cbsize;
+ frame->addr.cb = frame->addr.y + pixsize;
+ frame->addr.cr = frame->addr.cb + frame->addr.cbsize;
+ } else if (frame->sc_fmt->pixelformat ==
+ V4L2_PIX_FMT_YUV420N) {
+ unsigned int w = frame->width;
+ unsigned int h = frame->height;
+ frame->addr.ysize = YUV420N_Y_SIZE(w, h);
+ frame->addr.cbsize = YUV420N_CB_SIZE(w, h);
+ frame->addr.crsize = YUV420N_CR_SIZE(w, h);
+ frame->addr.cb =
+ YUV420N_CB_BASE(frame->addr.y, w, h);
+ frame->addr.cr =
+ YUV420N_CR_BASE(frame->addr.y, w, h);
+ } else {
+ frame->addr.ysize = pixsize;
+ frame->addr.cbsize = (bytesize - pixsize) / 2;
+ frame->addr.crsize = frame->addr.cbsize;
+ frame->addr.cb = frame->addr.y + pixsize;
+ frame->addr.cr = frame->addr.cb + frame->addr.cbsize;
+ }
+ } else if (frame->sc_fmt->num_planes == 3) {
+ frame->addr.cb = vb2_dma_sg_plane_dma_addr(vb2buf, 1);
+ frame->addr.cr = vb2_dma_sg_plane_dma_addr(vb2buf, 2);
+ frame->addr.ysize =
+ pixsize * frame->sc_fmt->bitperpixel[0] >> 3;
+ frame->addr.cbsize =
+ pixsize * frame->sc_fmt->bitperpixel[1] >> 3;
+ frame->addr.crsize =
+ pixsize * frame->sc_fmt->bitperpixel[2] >> 3;
+ } else {
+ dev_err(sc->dev, "Please check the num of comp\n");
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (frame->sc_fmt->pixelformat == V4L2_PIX_FMT_YVU420 ||
+ frame->sc_fmt->pixelformat == V4L2_PIX_FMT_YVU420M) {
+ u32 t_cb = frame->addr.cb;
+ frame->addr.cb = frame->addr.cr;
+ frame->addr.cr = t_cb;
+ }
+
+ sc_dbg("y addr %pa y size %#x\n", &frame->addr.y, frame->addr.ysize);
+ sc_dbg("cb addr %pa cb size %#x\n", &frame->addr.cb, frame->addr.cbsize);
+ sc_dbg("cr addr %pa cr size %#x\n", &frame->addr.cr, frame->addr.crsize);
+
+ return 0;
+}
+
+static void sc_m2m_device_run(void *priv)
+{
+ struct sc_ctx *ctx = priv;
+ struct sc_dev *sc = ctx->sc_dev;
+ struct sc_frame *s_frame, *d_frame;
+
+ if (test_bit(DEV_SUSPEND, &sc->state)) {
+ dev_err(sc->dev, "Scaler is in suspend state\n");
+ return;
+ }
+
+ if (test_bit(CTX_ABORT, &ctx->flags)) {
+ dev_err(sc->dev, "aborted scaler device run\n");
+ return;
+ }
+
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ sc_get_bufaddr(sc, v4l2_m2m_next_src_buf(ctx->m2m_ctx), s_frame);
+ sc_get_bufaddr(sc, v4l2_m2m_next_dst_buf(ctx->m2m_ctx), d_frame);
+
+ sc_add_context_and_run(sc, ctx);
+}
+
+static void sc_m2m_job_abort(void *priv)
+{
+ struct sc_ctx *ctx = priv;
+ int ret;
+
+ ret = sc_ctx_stop_req(ctx);
+ if (ret < 0)
+ dev_err(ctx->sc_dev->dev, "wait timeout\n");
+}
+
+static struct v4l2_m2m_ops sc_m2m_ops = {
+ .device_run = sc_m2m_device_run,
+ .job_abort = sc_m2m_job_abort,
+};
+
+static void sc_unregister_m2m_device(struct sc_dev *sc)
+{
+ v4l2_m2m_release(sc->m2m.m2m_dev);
+ video_device_release(sc->m2m.vfd);
+ v4l2_device_unregister(&sc->m2m.v4l2_dev);
+}
+
+static int sc_register_m2m_device(struct sc_dev *sc, int dev_id)
+{
+ struct v4l2_device *v4l2_dev;
+ struct device *dev;
+ struct video_device *vfd;
+ int ret = 0;
+
+ dev = sc->dev;
+ v4l2_dev = &sc->m2m.v4l2_dev;
+
+ scnprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s.m2m",
+ MODULE_NAME);
+
+ ret = v4l2_device_register(dev, v4l2_dev);
+ if (ret) {
+ dev_err(sc->dev, "failed to register v4l2 device\n");
+ return ret;
+ }
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ dev_err(sc->dev, "failed to allocate video device\n");
+ goto err_v4l2_dev;
+ }
+
+ vfd->fops = &sc_v4l2_fops;
+ vfd->ioctl_ops = &sc_v4l2_ioctl_ops;
+ vfd->release = video_device_release;
+ vfd->lock = &sc->lock;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->v4l2_dev = v4l2_dev;
+ scnprintf(vfd->name, sizeof(vfd->name), "%s:m2m", MODULE_NAME);
+
+ video_set_drvdata(vfd, sc);
+
+ sc->m2m.vfd = vfd;
+ sc->m2m.m2m_dev = v4l2_m2m_init(&sc_m2m_ops);
+ if (IS_ERR(sc->m2m.m2m_dev)) {
+ dev_err(sc->dev, "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(sc->m2m.m2m_dev);
+ goto err_dev_alloc;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 50 + dev_id);
+ if (ret) {
+ dev_err(sc->dev, "failed to register video device (video%d)\n",
+ 50 + dev_id);
+ goto err_m2m_dev;
+ }
+
+ return 0;
+
+err_m2m_dev:
+ v4l2_m2m_release(sc->m2m.m2m_dev);
+err_dev_alloc:
+ video_device_release(sc->m2m.vfd);
+err_v4l2_dev:
+ v4l2_device_unregister(v4l2_dev);
+
+ return ret;
+}
+
+static int sc_m2m1shot_init_context(struct m2m1shot_context *m21ctx)
+{
+ struct sc_dev *sc = dev_get_drvdata(m21ctx->m21dev->dev);
+ struct sc_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ atomic_inc(&sc->m2m.in_use);
+
+ if (!IS_ERR(sc->pclk)) {
+ ret = clk_prepare(sc->pclk);
+ if (ret) {
+ dev_err(sc->dev, "%s: failed to prepare PCLK(err %d)\n",
+ __func__, ret);
+ goto err_pclk;
+ }
+ }
+
+ if (!IS_ERR(sc->aclk)) {
+ ret = clk_prepare(sc->aclk);
+ if (ret) {
+ dev_err(sc->dev, "%s: failed to prepare ACLK(err %d)\n",
+ __func__, ret);
+ goto err_aclk;
+ }
+ }
+
+ ctx->context_type = SC_CTX_M2M1SHOT_TYPE;
+ INIT_LIST_HEAD(&ctx->node);
+ ctx->sc_dev = sc;
+
+ ctx->s_frame.sc_fmt = &sc_formats[0];
+ ctx->d_frame.sc_fmt = &sc_formats[0];
+
+ m21ctx->priv = ctx;
+ ctx->m21_ctx = m21ctx;
+
+ return 0;
+err_aclk:
+ if (!IS_ERR(sc->pclk))
+ clk_unprepare(sc->pclk);
+err_pclk:
+ kfree(ctx);
+ return ret;
+}
+
+static int sc_m2m1shot_free_context(struct m2m1shot_context *m21ctx)
+{
+ struct sc_ctx *ctx = m21ctx->priv;
+
+ atomic_dec(&ctx->sc_dev->m2m.in_use);
+ if (!IS_ERR(ctx->sc_dev->aclk))
+ clk_unprepare(ctx->sc_dev->aclk);
+ if (!IS_ERR(ctx->sc_dev->pclk))
+ clk_unprepare(ctx->sc_dev->pclk);
+ BUG_ON(!list_empty(&ctx->node));
+ destroy_intermediate_frame(ctx);
+ kfree(ctx);
+ return 0;
+}
+
+static int sc_m2m1shot_prepare_format(struct m2m1shot_context *m21ctx,
+ struct m2m1shot_pix_format *fmt,
+ enum dma_data_direction dir,
+ size_t bytes_used[])
+{
+ struct sc_ctx *ctx = m21ctx->priv;
+ struct sc_frame *frame = (dir == DMA_TO_DEVICE) ?
+ &ctx->s_frame : &ctx->d_frame;
+ s32 size_min = (dir == DMA_TO_DEVICE) ? 16 : 4;
+ int i;
+
+ frame->sc_fmt = sc_find_format(ctx->sc_dev, fmt->fmt,
+ (dir == DMA_TO_DEVICE));
+ if (!frame->sc_fmt) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: Pixel format %#x is not supported for %s\n",
+ __func__, fmt->fmt,
+ (dir == DMA_TO_DEVICE) ? "output" : "capture");
+ return -EINVAL;
+ }
+
+ if (!fmt->crop.width)
+ fmt->crop.width = fmt->width;
+ if (!fmt->crop.height)
+ fmt->crop.height = fmt->height;
+
+ if (!fmt->width || !fmt->height ||
+ !fmt->crop.width || !fmt->crop.height) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: neither width nor height can be zero\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((fmt->width > 8192) || (fmt->height > 8192)) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: requested image size %dx%d exceed 8192x8192\n",
+ __func__, fmt->width, fmt->height);
+ return -EINVAL;
+ }
+
+ if ((fmt->crop.width < size_min) || (fmt->crop.height < size_min)) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: image size %dx%d must not less than %dx%d\n",
+ __func__, fmt->width, fmt->height, size_min, size_min);
+ return -EINVAL;
+ }
+
+ if ((fmt->crop.left < 0) || (fmt->crop.top < 0)) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: negative crop offset(%d, %d) is not supported\n",
+ __func__, fmt->crop.left, fmt->crop.top);
+ return -EINVAL;
+ }
+
+ if ((fmt->width < (fmt->crop.width + fmt->crop.left)) ||
+ (fmt->height < (fmt->crop.height + fmt->crop.top))) {
+ dev_err(ctx->sc_dev->dev,
+ "%s: crop region(%d,%d,%d,%d) is larger than image\n",
+ __func__, fmt->crop.left, fmt->crop.top,
+ fmt->crop.width, fmt->crop.height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < frame->sc_fmt->num_planes; i++) {
+ if (sc_fmt_is_ayv12(fmt->fmt)) {
+ unsigned int y_size, c_span;
+ y_size = fmt->width * fmt->height;
+ c_span = ALIGN(fmt->width / 2, 16);
+ bytes_used[i] = y_size + (c_span * fmt->height / 2) * 2;
+ } else {
+ bytes_used[i] = fmt->width * fmt->height;
+ bytes_used[i] *= frame->sc_fmt->bitperpixel[i];
+ bytes_used[i] /= 8;
+ }
+ }
+
+ frame->width = fmt->width;
+ frame->height = fmt->height;
+ frame->crop = fmt->crop;
+
+ return frame->sc_fmt->num_planes;
+}
+
+static int sc_m2m1shot_prepare_operation(struct m2m1shot_context *m21ctx,
+ struct m2m1shot_task *task)
+{
+ struct sc_ctx *ctx = m21ctx->priv;
+ struct m2m1shot *shot = &task->task;
+ int ret;
+
+ if (!sc_configure_rotation_degree(ctx, shot->op.rotate))
+ return -EINVAL;
+
+ ctx->flip_rot_cfg &= ~(SCALER_FLIP_X_EN | SCALER_FLIP_Y_EN);
+
+ if (shot->op.op & M2M1SHOT_OP_FLIP_VIRT)
+ ctx->flip_rot_cfg |= SCALER_FLIP_X_EN;
+
+ if (shot->op.op & M2M1SHOT_OP_FLIP_HORI)
+ ctx->flip_rot_cfg |= SCALER_FLIP_Y_EN;
+
+ ctx->csc.csc_eq = !(shot->op.op & M2M1SHOT_OP_CSC_709) ?
+ V4L2_COLORSPACE_SMPTE170M : V4L2_COLORSPACE_REC709;
+ ctx->csc.csc_range = !(shot->op.op & M2M1SHOT_OP_CSC_WIDE) ?
+ SC_CSC_NARROW : SC_CSC_WIDE;
+ ctx->pre_multi = !!(shot->op.op & M2M1SHOT_OP_PREMULTIPLIED_ALPHA);
+
+ ctx->dnoise_ft.strength = (shot->op.op & SC_M2M1SHOT_OP_FILTER_MASK) >>
+ SC_M2M1SHOT_OP_FILTER_SHIFT;
+
+ ret = sc_find_scaling_ratio(m21ctx->priv);
+ if (ret)
+ return ret;
+
+ return sc_prepare_denoise_filter(m21ctx->priv);
+}
+
+static int sc_m2m1shot_prepare_buffer(struct m2m1shot_context *m21ctx,
+ struct m2m1shot_buffer_dma *buf_dma,
+ int plane,
+ enum dma_data_direction dir)
+{
+ int ret;
+
+ ret = m2m1shot_map_dma_buf(m21ctx->m21dev->dev,
+ &buf_dma->plane[plane], dir);
+ if (ret)
+ return ret;
+
+ ret = m2m1shot_dma_addr_map(m21ctx->m21dev->dev, buf_dma, plane, dir);
+ if (ret) {
+ m2m1shot_unmap_dma_buf(m21ctx->m21dev->dev,
+ &buf_dma->plane[plane], dir);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sc_m2m1shot_finish_buffer(struct m2m1shot_context *m21ctx,
+ struct m2m1shot_buffer_dma *buf_dma,
+ int plane,
+ enum dma_data_direction dir)
+{
+ m2m1shot_dma_addr_unmap(m21ctx->m21dev->dev, buf_dma, plane);
+ m2m1shot_unmap_dma_buf(m21ctx->m21dev->dev,
+ &buf_dma->plane[plane], dir);
+}
+
+static void sc_m2m1shot_get_bufaddr(struct sc_dev *sc,
+ struct m2m1shot_buffer_dma *buf, struct sc_frame *frame)
+{
+ unsigned int pixsize, bytesize;
+
+ pixsize = frame->width * frame->height;
+ bytesize = (pixsize * frame->sc_fmt->bitperpixel[0]) >> 3;
+
+ frame->addr.y = buf->plane[0].dma_addr;
+
+ frame->addr.cb = 0;
+ frame->addr.cr = 0;
+ frame->addr.cbsize = 0;
+ frame->addr.crsize = 0;
+
+ switch (frame->sc_fmt->num_comp) {
+ case 1: /* rgb, yuyv */
+ frame->addr.ysize = bytesize;
+ break;
+ case 2:
+ if (frame->sc_fmt->num_planes == 1) {
+ if (frame->sc_fmt->pixelformat == V4L2_PIX_FMT_NV12N) {
+ unsigned int w = frame->width;
+ unsigned int h = frame->height;
+ frame->addr.cb =
+ NV12N_CBCR_BASE(frame->addr.y, w, h);
+ frame->addr.ysize = NV12N_Y_SIZE(w, h);
+ frame->addr.cbsize = NV12N_CBCR_SIZE(w, h);
+ } else if (frame->sc_fmt->pixelformat == V4L2_PIX_FMT_NV12N_10B) {
+ unsigned int w = frame->width;
+ unsigned int h = frame->height;
+ frame->addr.cb =
+ NV12N_10B_CBCR_BASE(frame->addr.y, w, h);
+ frame->addr.ysize = NV12N_Y_SIZE(w, h);
+ frame->addr.cbsize = NV12N_CBCR_SIZE(w, h);
+ } else {
+ frame->addr.cb = frame->addr.y + pixsize;
+ frame->addr.ysize = pixsize;
+ frame->addr.cbsize = bytesize - pixsize;
+ }
+ } else if (frame->sc_fmt->num_planes == 2) {
+ frame->addr.cb = buf->plane[1].dma_addr;
+
+ frame->addr.ysize =
+ pixsize * frame->sc_fmt->bitperpixel[0] >> 3;
+ frame->addr.cbsize =
+ pixsize * frame->sc_fmt->bitperpixel[1] >> 3;
+ }
+ break;
+ case 3:
+ if (frame->sc_fmt->num_planes == 1) {
+ if (sc_fmt_is_ayv12(frame->sc_fmt->pixelformat)) {
+ unsigned int c_span;
+ c_span = ALIGN(frame->width >> 1, 16);
+ frame->addr.ysize = pixsize;
+ frame->addr.cbsize =
+ c_span * (frame->height >> 1);
+ frame->addr.crsize = frame->addr.cbsize;
+ frame->addr.cb = frame->addr.y + pixsize;
+ frame->addr.cr =
+ frame->addr.cb + frame->addr.cbsize;
+ } else if (frame->sc_fmt->pixelformat ==
+ V4L2_PIX_FMT_YUV420N) {
+ unsigned int w = frame->width;
+ unsigned int h = frame->height;
+ frame->addr.ysize = YUV420N_Y_SIZE(w, h);
+ frame->addr.cbsize = YUV420N_CB_SIZE(w, h);
+ frame->addr.crsize = YUV420N_CR_SIZE(w, h);
+ frame->addr.cb =
+ YUV420N_CB_BASE(frame->addr.y, w, h);
+ frame->addr.cr =
+ YUV420N_CR_BASE(frame->addr.y, w, h);
+ } else {
+ frame->addr.ysize = pixsize;
+ frame->addr.cbsize = (bytesize - pixsize) / 2;
+ frame->addr.crsize = frame->addr.cbsize;
+ frame->addr.cb = frame->addr.y + pixsize;
+ frame->addr.cr =
+ frame->addr.cb + frame->addr.cbsize;
+ }
+ } else if (frame->sc_fmt->num_planes == 3) {
+ frame->addr.cb = buf->plane[1].dma_addr;
+ frame->addr.cr = buf->plane[2].dma_addr;
+
+ frame->addr.ysize =
+ pixsize * frame->sc_fmt->bitperpixel[0] >> 3;
+ frame->addr.cbsize =
+ pixsize * frame->sc_fmt->bitperpixel[1] >> 3;
+ frame->addr.crsize =
+ pixsize * frame->sc_fmt->bitperpixel[2] >> 3;
+ } else {
+ dev_err(sc->dev, "Please check the num of comp\n");
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (frame->sc_fmt->pixelformat == V4L2_PIX_FMT_YVU420 ||
+ frame->sc_fmt->pixelformat == V4L2_PIX_FMT_YVU420M) {
+ u32 t_cb = frame->addr.cb;
+ frame->addr.cb = frame->addr.cr;
+ frame->addr.cr = t_cb;
+ }
+}
+
+static int sc_m2m1shot_device_run(struct m2m1shot_context *m21ctx,
+ struct m2m1shot_task *task)
+{
+ struct sc_ctx *ctx = m21ctx->priv;
+ struct sc_dev *sc = ctx->sc_dev;
+ struct sc_frame *s_frame, *d_frame;
+
+ if (test_bit(DEV_SUSPEND, &sc->state)) {
+ dev_err(sc->dev, "Scaler is in suspend state\n");
+ return -EAGAIN;
+ }
+
+ /* no aborted state is required for m2m1shot */
+
+ s_frame = &ctx->s_frame;
+ d_frame = &ctx->d_frame;
+
+ sc_m2m1shot_get_bufaddr(sc, &task->dma_buf_out, s_frame);
+ sc_m2m1shot_get_bufaddr(sc, &task->dma_buf_cap, d_frame);
+
+ return sc_add_context_and_run(sc, ctx);
+}
+
+static void sc_m2m1shot_timeout_task(struct m2m1shot_context *m21ctx,
+ struct m2m1shot_task *task)
+{
+ struct sc_ctx *ctx = m21ctx->priv;
+ struct sc_dev *sc = ctx->sc_dev;
+ unsigned long flags;
+
+ sc_hwregs_dump(sc);
+ exynos_sysmmu_show_status(sc->dev);
+
+ sc_hwset_soft_reset(sc);
+
+ sc_clk_power_disable(sc);
+
+ spin_lock_irqsave(&sc->ctxlist_lock, flags);
+ sc->current_ctx = NULL;
+ spin_unlock_irqrestore(&sc->ctxlist_lock, flags);
+
+ clear_bit(DEV_RUN, &sc->state);
+ clear_bit(CTX_RUN, &ctx->flags);
+
+ sc_run_next_job(sc);
+}
+
+static const struct m2m1shot_devops sc_m2m1shot_ops = {
+ .init_context = sc_m2m1shot_init_context,
+ .free_context = sc_m2m1shot_free_context,
+ .prepare_format = sc_m2m1shot_prepare_format,
+ .prepare_operation = sc_m2m1shot_prepare_operation,
+ .prepare_buffer = sc_m2m1shot_prepare_buffer,
+ .finish_buffer = sc_m2m1shot_finish_buffer,
+ .device_run = sc_m2m1shot_device_run,
+ .timeout_task = sc_m2m1shot_timeout_task,
+};
+
+static int __attribute__((unused)) sc_sysmmu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct sc_dev *sc = dev_get_drvdata(dev);
+
+ if (test_bit(DEV_RUN, &sc->state)) {
+ dev_info(dev, "System MMU fault called for IOVA %#lx\n", iova);
+ sc_hwregs_dump(sc);
+ }
+
+ return 0;
+}
+
+static int sc_clk_get(struct sc_dev *sc)
+{
+ sc->aclk = devm_clk_get(sc->dev, "gate");
+ if (IS_ERR(sc->aclk)) {
+ if (PTR_ERR(sc->aclk) != -ENOENT) {
+ dev_err(sc->dev, "Failed to get 'gate' clock: %ld",
+ PTR_ERR(sc->aclk));
+ return PTR_ERR(sc->aclk);
+ }
+ dev_info(sc->dev, "'gate' clock is not present\n");
+ }
+
+ sc->pclk = devm_clk_get(sc->dev, "gate2");
+ if (IS_ERR(sc->pclk)) {
+ if (PTR_ERR(sc->pclk) != -ENOENT) {
+ dev_err(sc->dev, "Failed to get 'gate2' clock: %ld",
+ PTR_ERR(sc->pclk));
+ return PTR_ERR(sc->pclk);
+ }
+ dev_info(sc->dev, "'gate2' clock is not present\n");
+ }
+
+ sc->clk_chld = devm_clk_get(sc->dev, "mux_user");
+ if (IS_ERR(sc->clk_chld)) {
+ if (PTR_ERR(sc->clk_chld) != -ENOENT) {
+ dev_err(sc->dev, "Failed to get 'mux_user' clock: %ld",
+ PTR_ERR(sc->clk_chld));
+ return PTR_ERR(sc->clk_chld);
+ }
+ dev_info(sc->dev, "'mux_user' clock is not present\n");
+ }
+
+ if (!IS_ERR(sc->clk_chld)) {
+ sc->clk_parn = devm_clk_get(sc->dev, "mux_src");
+ if (IS_ERR(sc->clk_parn)) {
+ dev_err(sc->dev, "Failed to get 'mux_src' clock: %ld",
+ PTR_ERR(sc->clk_parn));
+ return PTR_ERR(sc->clk_parn);
+ }
+ } else {
+ sc->clk_parn = ERR_PTR(-ENOENT);
+ }
+
+ return 0;
+}
+
+static void sc_clk_put(struct sc_dev *sc)
+{
+ if (!IS_ERR(sc->clk_parn))
+ clk_put(sc->clk_parn);
+
+ if (!IS_ERR(sc->clk_chld))
+ clk_put(sc->clk_chld);
+
+ if (!IS_ERR(sc->pclk))
+ clk_put(sc->pclk);
+
+ if (!IS_ERR(sc->aclk))
+ clk_put(sc->aclk);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sc_suspend(struct device *dev)
+{
+ struct sc_dev *sc = dev_get_drvdata(dev);
+ int ret;
+
+ set_bit(DEV_SUSPEND, &sc->state);
+
+ ret = wait_event_timeout(sc->wait,
+ !test_bit(DEV_RUN, &sc->state), SC_TIMEOUT);
+ if (ret == 0)
+ dev_err(sc->dev, "wait timeout\n");
+
+ return 0;
+}
+
+static int sc_resume(struct device *dev)
+{
+ struct sc_dev *sc = dev_get_drvdata(dev);
+
+ clear_bit(DEV_SUSPEND, &sc->state);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int sc_runtime_resume(struct device *dev)
+{
+ struct sc_dev *sc = dev_get_drvdata(dev);
+
+ if (!IS_ERR(sc->clk_chld) && !IS_ERR(sc->clk_parn)) {
+ int ret = clk_set_parent(sc->clk_chld, sc->clk_parn);
+ if (ret) {
+ dev_err(sc->dev, "%s: Failed to setup MUX: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ if (sc->qosreq_int_level > 0)
+ pm_qos_update_request(&sc->qosreq_int, sc->qosreq_int_level);
+
+ return 0;
+}
+
+static int sc_runtime_suspend(struct device *dev)
+{
+ struct sc_dev *sc = dev_get_drvdata(dev);
+ if (sc->qosreq_int_level > 0)
+ pm_qos_update_request(&sc->qosreq_int, 0);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops sc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sc_suspend, sc_resume)
+ SET_RUNTIME_PM_OPS(NULL, sc_runtime_resume, sc_runtime_suspend)
+};
+
+static int sc_probe(struct platform_device *pdev)
+{
+ struct sc_dev *sc;
+ struct resource *res;
+ int ret = 0;
+ size_t ivar;
+ u32 hwver;
+
+ sc = devm_kzalloc(&pdev->dev, sizeof(struct sc_dev), GFP_KERNEL);
+ if (!sc) {
+ dev_err(&pdev->dev, "no memory for scaler device\n");
+ return -ENOMEM;
+ }
+
+ sc->dev = &pdev->dev;
+
+ spin_lock_init(&sc->ctxlist_lock);
+ INIT_LIST_HEAD(&sc->context_list);
+ spin_lock_init(&sc->slock);
+ mutex_init(&sc->lock);
+ init_waitqueue_head(&sc->wait);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sc->regs))
+ return PTR_ERR(sc->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get IRQ resource\n");
+ return -ENOENT;
+ }
+
+ ret = devm_request_irq(&pdev->dev, res->start, sc_irq_handler, 0,
+ pdev->name, sc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to install irq\n");
+ return ret;
+ }
+
+ atomic_set(&sc->wdt.cnt, 0);
+ setup_timer(&sc->wdt.timer, sc_watchdog, (unsigned long)sc);
+
+ ret = sc_clk_get(sc);
+ if (ret)
+ return ret;
+
+ if (pdev->dev.of_node)
+ sc->dev_id = of_alias_get_id(pdev->dev.of_node, "scaler");
+ else
+ sc->dev_id = pdev->id;
+
+ sc->m21dev = m2m1shot_create_device(&pdev->dev, &sc_m2m1shot_ops,
+ "scaler", sc->dev_id, -1);
+ if (IS_ERR(sc->m21dev)) {
+ dev_err(&pdev->dev, "%s: Failed to create m2m1shot_device\n",
+ __func__);
+ return PTR_ERR(sc->m21dev);
+ }
+
+ platform_set_drvdata(pdev, sc);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = sc_register_m2m_device(sc, sc->dev_id);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register m2m device\n");
+ goto err_m2m;
+ }
+
+#if defined(CONFIG_PM_DEVFREQ) && defined(CONFIG_NEVER_DEFINED_FIX_ME)
+ if (!of_property_read_u32(pdev->dev.of_node, "mscl,int_qos_minlock",
+ (u32 *)&sc->qosreq_int_level)) {
+ if (sc->qosreq_int_level > 0) {
+ pm_qos_add_request(&sc->qosreq_int,
+ PM_QOS_DEVICE_THROUGHPUT, 0);
+ dev_info(&pdev->dev, "INT Min.Lock Freq. = %u\n",
+ sc->qosreq_int_level);
+ }
+ }
+#endif
+ if (of_property_read_u32(pdev->dev.of_node, "mscl,cfw",
+ (u32 *)&sc->cfw))
+ sc->cfw = 0;
+
+ ret = iovmm_activate(sc->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to activate iommu\n");
+ goto err_iommu;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: failed to local power on (err %d)\n",
+ __func__, ret);
+ goto err_ver_rpm_get;
+ }
+
+ if (!IS_ERR(sc->pclk)) {
+ ret = clk_prepare_enable(sc->pclk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: failed to enable PCLK (err %d)\n",
+ __func__, ret);
+ goto err_ver_pclk_get;
+ }
+ }
+
+ if (!IS_ERR(sc->aclk)) {
+ ret = clk_prepare_enable(sc->aclk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: failed to enable ACLK (err %d)\n",
+ __func__, ret);
+ goto err_ver_aclk_get;
+ }
+ }
+
+ sc->version = SCALER_VERSION(2, 0, 0);
+
+ hwver = __raw_readl(sc->regs + SCALER_VER);
+
+ /* selects the lowest version number if no version is matched */
+ for (ivar = 0; ivar < ARRAY_SIZE(sc_version_table); ivar++) {
+ sc->version = sc_version_table[ivar][1];
+ if (hwver == sc_version_table[ivar][0])
+ break;
+ }
+
+ for (ivar = 0; ivar < ARRAY_SIZE(sc_variant); ivar++) {
+ if (sc->version >= sc_variant[ivar].version) {
+ sc->variant = &sc_variant[ivar];
+ break;
+ }
+ }
+
+ if (!IS_ERR(sc->aclk))
+ clk_disable_unprepare(sc->aclk);
+ if (!IS_ERR(sc->pclk))
+ clk_disable_unprepare(sc->pclk);
+ pm_runtime_put(&pdev->dev);
+
+ iovmm_set_fault_handler(&pdev->dev, sc_sysmmu_fault_handler, sc);
+
+ dev_info(&pdev->dev,
+ "Driver probed successfully(version: %08x(%x))\n",
+ hwver, sc->version);
+
+ return 0;
+err_ver_aclk_get:
+ if (!IS_ERR(sc->pclk))
+ clk_disable_unprepare(sc->pclk);
+err_ver_pclk_get:
+ pm_runtime_put(&pdev->dev);
+err_ver_rpm_get:
+ iovmm_deactivate(sc->dev);
+err_iommu:
+ if (sc->qosreq_int_level > 0)
+ pm_qos_remove_request(&sc->qosreq_int);
+ sc_unregister_m2m_device(sc);
+err_m2m:
+ m2m1shot_destroy_device(sc->m21dev);
+
+ return ret;
+}
+
+static int sc_remove(struct platform_device *pdev)
+{
+ struct sc_dev *sc = platform_get_drvdata(pdev);
+
+ iovmm_deactivate(sc->dev);
+
+ sc_clk_put(sc);
+
+ if (timer_pending(&sc->wdt.timer))
+ del_timer(&sc->wdt.timer);
+
+ m2m1shot_destroy_device(sc->m21dev);
+
+ if (sc->qosreq_int_level > 0)
+ pm_qos_remove_request(&sc->qosreq_int);
+
+ return 0;
+}
+
+static const struct of_device_id exynos_sc_match[] = {
+ {
+ .compatible = "samsung,exynos5-scaler",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_sc_match);
+
+static struct platform_driver sc_driver = {
+ .probe = sc_probe,
+ .remove = sc_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &sc_pm_ops,
+ .of_match_table = of_match_ptr(exynos_sc_match),
+ }
+};
+
+module_platform_driver(sc_driver);
+
+MODULE_AUTHOR("Sunyoung, Kang <sy0816.kang@samsung.com>");
+MODULE_DESCRIPTION("EXYNOS m2m scaler driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register interface file for Exynos Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include "scaler.h"
+#include "scaler-regs.h"
+
+#define COEF(val_l, val_h) ((((val_h) & 0x1FF) << 16) | ((val_l) & 0x1FF))
+extern int sc_set_blur;
+
+/* Scaling coefficient value */
+static const __u32 sc_coef_8t_org[7][16][4] = {
+ { /* 8:8 or zoom-in */
+ {COEF( 0, 0), COEF( 0, 0), COEF(128, 0), COEF( 0, 0)},
+ {COEF( 0, 1), COEF( -2, 7), COEF(127, -6), COEF( 2, -1)},
+ {COEF( 0, 1), COEF( -5, 16), COEF(125, -12), COEF( 4, -1)},
+ {COEF( 0, 2), COEF( -8, 25), COEF(120, -15), COEF( 5, -1)},
+ {COEF(-1, 3), COEF(-10, 35), COEF(114, -18), COEF( 6, -1)},
+ {COEF(-1, 4), COEF(-13, 46), COEF(107, -20), COEF( 6, -1)},
+ {COEF(-1, 5), COEF(-16, 57), COEF( 99, -21), COEF( 7, -2)},
+ {COEF(-1, 5), COEF(-18, 68), COEF( 89, -20), COEF( 6, -1)},
+ {COEF(-1, 6), COEF(-20, 79), COEF( 79, -20), COEF( 6, -1)},
+ {COEF(-1, 6), COEF(-20, 89), COEF( 68, -18), COEF( 5, -1)},
+ {COEF(-2, 7), COEF(-21, 99), COEF( 57, -16), COEF( 5, -1)},
+ {COEF(-1, 6), COEF(-20, 107), COEF( 46, -13), COEF( 4, -1)},
+ {COEF(-1, 6), COEF(-18, 114), COEF( 35, -10), COEF( 3, -1)},
+ {COEF(-1, 5), COEF(-15, 120), COEF( 25, -8), COEF( 2, 0)},
+ {COEF(-1, 4), COEF(-12, 125), COEF( 16, -5), COEF( 1, 0)},
+ {COEF(-1, 2), COEF( -6, 127), COEF( 7, -2), COEF( 1, 0)}
+ }, { /* 8:7 Zoom-out */
+ {COEF( 0, 3), COEF( -8, 13), COEF(111, 14), COEF( -8, 3)},
+ {COEF(-1, 3), COEF(-10, 21), COEF(112, 7), COEF( -6, 2)},
+ {COEF(-1, 4), COEF(-12, 28), COEF(110, 1), COEF( -4, 2)},
+ {COEF(-1, 4), COEF(-13, 36), COEF(106, -3), COEF( -2, 1)},
+ {COEF(-1, 4), COEF(-15, 44), COEF(103, -7), COEF( -1, 1)},
+ {COEF(-1, 4), COEF(-16, 53), COEF( 97, -11), COEF( 1, 1)},
+ {COEF(-1, 4), COEF(-16, 61), COEF( 91, -13), COEF( 2, 0)},
+ {COEF(-1, 4), COEF(-17, 69), COEF( 85, -15), COEF( 3, 0)},
+ {COEF( 0, 3), COEF(-16, 77), COEF( 77, -16), COEF( 3, 0)},
+ {COEF( 0, 3), COEF(-15, 85), COEF( 69, -17), COEF( 4, -1)},
+ {COEF( 0, 2), COEF(-13, 91), COEF( 61, -16), COEF( 4, -1)},
+ {COEF( 1, 1), COEF(-11, 97), COEF( 53, -16), COEF( 4, -1)},
+ {COEF( 1, -1), COEF( -7, 103), COEF( 44, -15), COEF( 4, -1)},
+ {COEF( 1, -2), COEF( -3, 106), COEF( 36, -13), COEF( 4, -1)},
+ {COEF( 2, -4), COEF( 1, 110), COEF( 28, -12), COEF( 4, -1)},
+ {COEF( 2, -6), COEF( 7, 112), COEF( 21, -10), COEF( 3, -1)}
+ }, { /* 8:6 Zoom-out */
+ {COEF( 0, 2), COEF(-11, 25), COEF( 96, 25), COEF(-11, 2)},
+ {COEF( 0, 2), COEF(-12, 31), COEF( 96, 19), COEF(-10, 2)},
+ {COEF( 0, 2), COEF(-12, 37), COEF( 94, 14), COEF( -9, 2)},
+ {COEF( 0, 1), COEF(-12, 43), COEF( 92, 10), COEF( -8, 2)},
+ {COEF( 0, 1), COEF(-12, 49), COEF( 90, 5), COEF( -7, 2)},
+ {COEF( 1, 0), COEF(-12, 55), COEF( 86, 1), COEF( -5, 2)},
+ {COEF( 1, -1), COEF(-11, 61), COEF( 82, -2), COEF( -4, 2)},
+ {COEF( 1, -1), COEF( -9, 67), COEF( 77, -5), COEF( -3, 1)},
+ {COEF( 1, -2), COEF( -7, 72), COEF( 72, -7), COEF( -2, 1)},
+ {COEF( 1, -3), COEF( -5, 77), COEF( 67, -9), COEF( -1, 1)},
+ {COEF( 2, -4), COEF( -2, 82), COEF( 61, -11), COEF( -1, 1)},
+ {COEF( 2, -5), COEF( 1, 86), COEF( 55, -12), COEF( 0, 1)},
+ {COEF( 2, -7), COEF( 5, 90), COEF( 49, -12), COEF( 1, 0)},
+ {COEF( 2, -8), COEF( 10, 92), COEF( 43, -12), COEF( 1, 0)},
+ {COEF( 2, -9), COEF( 14, 94), COEF( 37, -12), COEF( 2, 0)},
+ {COEF( 2, -10), COEF( 19, 96), COEF( 31, -12), COEF( 2, 0)}
+ }, { /* 8:5 Zoom-out */
+ {COEF( 0, -1), COEF( -8, 33), COEF( 80, 33), COEF( -8, -1)},
+ {COEF( 1, -2), COEF( -7, 37), COEF( 80, 28), COEF( -8, -1)},
+ {COEF( 1, -2), COEF( -7, 41), COEF( 79, 24), COEF( -8, 0)},
+ {COEF( 1, -3), COEF( -6, 46), COEF( 78, 20), COEF( -8, 0)},
+ {COEF( 1, -3), COEF( -4, 50), COEF( 76, 16), COEF( -8, 0)},
+ {COEF( 1, -4), COEF( -3, 54), COEF( 74, 13), COEF( -7, 0)},
+ {COEF( 1, -5), COEF( -1, 58), COEF( 71, 10), COEF( -7, 1)},
+ {COEF( 1, -5), COEF( 1, 62), COEF( 68, 6), COEF( -6, 1)},
+ {COEF( 1, -6), COEF( 4, 65), COEF( 65, 4), COEF( -6, 1)},
+ {COEF( 1, -6), COEF( 6, 68), COEF( 62, 1), COEF( -5, 1)},
+ {COEF( 1, -7), COEF( 10, 71), COEF( 58, -1), COEF( -5, 1)},
+ {COEF( 0, -7), COEF( 13, 74), COEF( 54, -3), COEF( -4, 1)},
+ {COEF( 0, -8), COEF( 16, 76), COEF( 50, -4), COEF( -3, 1)},
+ {COEF( 0, -8), COEF( 20, 78), COEF( 46, -6), COEF( -3, 1)},
+ {COEF( 0, -8), COEF( 24, 79), COEF( 41, -7), COEF( -2, 1)},
+ {COEF(-1, -8), COEF( 28, 80), COEF( 37, -7), COEF( -2, 1)}
+ }, { /* 8:4 Zoom-out */
+ {COEF( 0, -3), COEF( 0, 35), COEF( 64, 35), COEF( 0, -3)},
+ {COEF( 0, -3), COEF( 1, 38), COEF( 64, 32), COEF( -1, -3)},
+ {COEF( 0, -3), COEF( 2, 41), COEF( 63, 29), COEF( -2, -2)},
+ {COEF( 0, -4), COEF( 4, 43), COEF( 63, 27), COEF( -3, -2)},
+ {COEF( 0, -4), COEF( 6, 46), COEF( 61, 24), COEF( -3, -2)},
+ {COEF( 0, -4), COEF( 7, 49), COEF( 60, 21), COEF( -3, -2)},
+ {COEF(-1, -4), COEF( 9, 51), COEF( 59, 19), COEF( -4, -1)},
+ {COEF(-1, -4), COEF( 12, 53), COEF( 57, 16), COEF( -4, -1)},
+ {COEF(-1, -4), COEF( 14, 55), COEF( 55, 14), COEF( -4, -1)},
+ {COEF(-1, -4), COEF( 16, 57), COEF( 53, 12), COEF( -4, -1)},
+ {COEF(-1, -4), COEF( 19, 59), COEF( 51, 9), COEF( -4, -1)},
+ {COEF(-2, -3), COEF( 21, 60), COEF( 49, 7), COEF( -4, 0)},
+ {COEF(-2, -3), COEF( 24, 61), COEF( 46, 6), COEF( -4, 0)},
+ {COEF(-2, -3), COEF( 27, 63), COEF( 43, 4), COEF( -4, 0)},
+ {COEF(-2, -2), COEF( 29, 63), COEF( 41, 2), COEF( -3, 0)},
+ {COEF(-3, -1), COEF( 32, 64), COEF( 38, 1), COEF( -3, 0)}
+ }, { /* 8:3 Zoom-out */
+ {COEF( 0, -1), COEF( 8, 33), COEF( 48, 33), COEF( 8, -1)},
+ {COEF(-1, -1), COEF( 9, 35), COEF( 49, 31), COEF( 7, -1)},
+ {COEF(-1, -1), COEF( 10, 36), COEF( 49, 30), COEF( 6, -1)},
+ {COEF(-1, -1), COEF( 12, 38), COEF( 48, 28), COEF( 5, -1)},
+ {COEF(-1, 0), COEF( 13, 39), COEF( 48, 26), COEF( 4, -1)},
+ {COEF(-1, 0), COEF( 15, 41), COEF( 47, 24), COEF( 3, -1)},
+ {COEF(-1, 0), COEF( 16, 42), COEF( 47, 23), COEF( 2, -1)},
+ {COEF(-1, 1), COEF( 18, 43), COEF( 45, 21), COEF( 2, -1)},
+ {COEF(-1, 1), COEF( 19, 45), COEF( 45, 19), COEF( 1, -1)},
+ {COEF(-1, 2), COEF( 21, 45), COEF( 43, 18), COEF( 1, -1)},
+ {COEF(-1, 2), COEF( 23, 47), COEF( 42, 16), COEF( 0, -1)},
+ {COEF(-1, 3), COEF( 24, 47), COEF( 41, 15), COEF( 0, -1)},
+ {COEF(-1, 4), COEF( 26, 48), COEF( 39, 13), COEF( 0, -1)},
+ {COEF(-1, 5), COEF( 28, 48), COEF( 38, 12), COEF( -1, -1)},
+ {COEF(-1, 6), COEF( 30, 49), COEF( 36, 10), COEF( -1, -1)},
+ {COEF(-1, 7), COEF( 31, 49), COEF( 35, 9), COEF( -1, -1)}
+ }, { /* 8:2 Zoom-out */
+ {COEF( 0, 2), COEF( 13, 30), COEF( 38, 30), COEF( 13, 2)},
+ {COEF( 0, 3), COEF( 14, 30), COEF( 38, 29), COEF( 12, 2)},
+ {COEF( 0, 3), COEF( 15, 31), COEF( 38, 28), COEF( 11, 2)},
+ {COEF( 0, 4), COEF( 16, 32), COEF( 38, 26), COEF( 10, 2)},
+ {COEF( 0, 4), COEF( 17, 33), COEF( 37, 26), COEF( 10, 1)},
+ {COEF( 0, 5), COEF( 18, 34), COEF( 37, 24), COEF( 9, 1)},
+ {COEF( 0, 5), COEF( 19, 34), COEF( 37, 24), COEF( 8, 1)},
+ {COEF( 1, 6), COEF( 20, 35), COEF( 36, 22), COEF( 7, 1)},
+ {COEF( 1, 6), COEF( 21, 36), COEF( 36, 21), COEF( 6, 1)},
+ {COEF( 1, 7), COEF( 22, 36), COEF( 35, 20), COEF( 6, 1)},
+ {COEF( 1, 8), COEF( 24, 37), COEF( 34, 19), COEF( 5, 0)},
+ {COEF( 1, 9), COEF( 24, 37), COEF( 34, 18), COEF( 5, 0)},
+ {COEF( 1, 10), COEF( 26, 37), COEF( 33, 17), COEF( 4, 0)},
+ {COEF( 2, 10), COEF( 26, 38), COEF( 32, 16), COEF( 4, 0)},
+ {COEF( 2, 11), COEF( 28, 38), COEF( 31, 15), COEF( 3, 0)},
+ {COEF( 2, 12), COEF( 29, 38), COEF( 30, 14), COEF( 3, 0)}
+ }
+};
+
+static const __u32 sc_coef_4t_org[7][16][2] = {
+ { /* 8:8 or zoom-in */
+ {COEF( 0, 0), COEF(128, 0)},
+ {COEF( 0, 5), COEF(127, -4)},
+ {COEF(-1, 11), COEF(124, -6)},
+ {COEF(-1, 19), COEF(118, -8)},
+ {COEF(-2, 27), COEF(111, -8)},
+ {COEF(-3, 37), COEF(102, -8)},
+ {COEF(-4, 48), COEF( 92, -8)},
+ {COEF(-5, 59), COEF( 81, -7)},
+ {COEF(-6, 70), COEF( 70, -6)},
+ {COEF(-7, 81), COEF( 59, -5)},
+ {COEF(-8, 92), COEF( 48, -4)},
+ {COEF(-8, 102), COEF( 37, -3)},
+ {COEF(-8, 111), COEF( 27, -2)},
+ {COEF(-8, 118), COEF( 19, -1)},
+ {COEF(-6, 124), COEF( 11, -1)},
+ {COEF(-4, 127), COEF( 5, 0)}
+ }, { /* 8:7 Zoom-out */
+ {COEF( 0, 8), COEF(112, 8)},
+ {COEF(-1, 14), COEF(111, 4)},
+ {COEF(-2, 20), COEF(109, 1)},
+ {COEF(-2, 27), COEF(105, -2)},
+ {COEF(-3, 34), COEF(100, -3)},
+ {COEF(-3, 43), COEF( 93, -5)},
+ {COEF(-4, 51), COEF( 86, -5)},
+ {COEF(-4, 60), COEF( 77, -5)},
+ {COEF(-5, 69), COEF( 69, -5)},
+ {COEF(-5, 77), COEF( 60, -4)},
+ {COEF(-5, 86), COEF( 51, -4)},
+ {COEF(-5, 93), COEF( 43, -3)},
+ {COEF(-3, 100), COEF( 34, -3)},
+ {COEF(-2, 105), COEF( 27, -2)},
+ {COEF( 1, 109), COEF( 20, -2)},
+ {COEF( 4, 111), COEF( 14, -1)}
+ }, { /* 8:6 Zoom-out */
+ {COEF( 0, 16), COEF( 96, 16)},
+ {COEF(-2, 21), COEF( 97, 12)},
+ {COEF(-2, 26), COEF( 96, 8)},
+ {COEF(-2, 32), COEF( 93, 5)},
+ {COEF(-2, 39), COEF( 89, 2)},
+ {COEF(-2, 46), COEF( 84, 0)},
+ {COEF(-3, 53), COEF( 79, -1)},
+ {COEF(-2, 59), COEF( 73, -2)},
+ {COEF(-2, 66), COEF( 66, -2)},
+ {COEF(-2, 73), COEF( 59, -2)},
+ {COEF(-1, 79), COEF( 53, -3)},
+ {COEF( 0, 84), COEF( 46, -2)},
+ {COEF( 2, 89), COEF( 39, -2)},
+ {COEF( 5, 93), COEF( 32, -2)},
+ {COEF( 8, 96), COEF( 26, -2)},
+ {COEF(12, 97), COEF( 21, -2)}
+ }, { /* 8:5 Zoom-out */
+ {COEF( 0, 22), COEF( 84, 22)},
+ {COEF(-1, 26), COEF( 85, 18)},
+ {COEF(-1, 31), COEF( 84, 14)},
+ {COEF(-1, 36), COEF( 82, 11)},
+ {COEF(-1, 42), COEF( 79, 8)},
+ {COEF(-1, 47), COEF( 76, 6)},
+ {COEF( 0, 52), COEF( 72, 4)},
+ {COEF( 0, 58), COEF( 68, 2)},
+ {COEF( 1, 63), COEF( 63, 1)},
+ {COEF( 2, 68), COEF( 58, 0)},
+ {COEF( 4, 72), COEF( 52, 0)},
+ {COEF( 6, 76), COEF( 47, -1)},
+ {COEF( 8, 79), COEF( 42, -1)},
+ {COEF(11, 82), COEF( 36, -1)},
+ {COEF(14, 84), COEF( 31, -1)},
+ {COEF(18, 85), COEF( 26, -1)}
+ }, { /* 8:4 Zoom-out */
+ {COEF( 0, 26), COEF( 76, 26)},
+ {COEF( 0, 30), COEF( 76, 22)},
+ {COEF( 0, 34), COEF( 75, 19)},
+ {COEF( 1, 38), COEF( 73, 16)},
+ {COEF( 1, 43), COEF( 71, 13)},
+ {COEF( 2, 47), COEF( 69, 10)},
+ {COEF( 3, 51), COEF( 66, 8)},
+ {COEF( 4, 55), COEF( 63, 6)},
+ {COEF( 5, 59), COEF( 59, 5)},
+ {COEF( 6, 63), COEF( 55, 4)},
+ {COEF( 8, 66), COEF( 51, 3)},
+ {COEF(10, 69), COEF( 47, 2)},
+ {COEF(13, 71), COEF( 43, 1)},
+ {COEF(16, 73), COEF( 38, 1)},
+ {COEF(19, 75), COEF( 34, 0)},
+ {COEF(22, 76), COEF( 30, 0)}
+ }, { /* 8:3 Zoom-out */
+ {COEF( 0, 29), COEF( 70, 29)},
+ {COEF( 2, 32), COEF( 68, 26)},
+ {COEF( 2, 36), COEF( 67, 23)},
+ {COEF( 3, 39), COEF( 66, 20)},
+ {COEF( 3, 43), COEF( 65, 17)},
+ {COEF( 4, 46), COEF( 63, 15)},
+ {COEF( 5, 50), COEF( 61, 12)},
+ {COEF( 7, 53), COEF( 58, 10)},
+ {COEF( 8, 56), COEF( 56, 8)},
+ {COEF(10, 58), COEF( 53, 7)},
+ {COEF(12, 61), COEF( 50, 5)},
+ {COEF(15, 63), COEF( 46, 4)},
+ {COEF(17, 65), COEF( 43, 3)},
+ {COEF(20, 66), COEF( 39, 3)},
+ {COEF(23, 67), COEF( 36, 2)},
+ {COEF(26, 68), COEF( 32, 2)}
+ }, { /* 8:2 Zoom-out */
+ {COEF( 0, 32), COEF( 64, 32)},
+ {COEF( 3, 34), COEF( 63, 28)},
+ {COEF( 4, 37), COEF( 62, 25)},
+ {COEF( 4, 40), COEF( 62, 22)},
+ {COEF( 5, 43), COEF( 61, 19)},
+ {COEF( 6, 46), COEF( 59, 17)},
+ {COEF( 7, 48), COEF( 58, 15)},
+ {COEF( 9, 51), COEF( 55, 13)},
+ {COEF(11, 53), COEF( 53, 11)},
+ {COEF(13, 55), COEF( 51, 9)},
+ {COEF(15, 58), COEF( 48, 7)},
+ {COEF(17, 59), COEF( 46, 6)},
+ {COEF(19, 61), COEF( 43, 5)},
+ {COEF(22, 62), COEF( 40, 4)},
+ {COEF(25, 62), COEF( 37, 4)},
+ {COEF(28, 63), COEF( 34, 3)}
+ },
+};
+
+static const __u32 sc_coef_8t_blur1[7][16][4] = {
+ { /* 8:8 or zoom-in */
+ {COEF( 0, -3), COEF( 0, 35), COEF( 64, 35), COEF( 0, -3)},
+ {COEF( 0, -3), COEF( 1, 38), COEF( 64, 31), COEF( -1, -2)},
+ {COEF( 0, -3), COEF( 2, 41), COEF( 63, 29), COEF( -2, -2)},
+ {COEF( 0, -4), COEF( 4, 44), COEF( 62, 26), COEF( -2, -2)},
+ {COEF( 0, -4), COEF( 6, 46), COEF( 62, 23), COEF( -3, -2)},
+ {COEF( 0, -4), COEF( 7, 49), COEF( 60, 20), COEF( -3, -1)},
+ {COEF(-1, -4), COEF( 9, 52), COEF( 59, 18), COEF( -4, -1)},
+ {COEF(-1, -4), COEF( 12, 53), COEF( 57, 16), COEF( -4, -1)},
+ {COEF(-1, -4), COEF( 14, 56), COEF( 55, 13), COEF( -4, -1)},
+ {COEF(-1, -4), COEF( 12, 53), COEF( 57, 16), COEF( -4, -1)},
+ {COEF(-1, -4), COEF( 9, 52), COEF( 59, 18), COEF( -4, -1)},
+ {COEF( 0, -4), COEF( 7, 49), COEF( 60, 20), COEF( -3, -1)},
+ {COEF( 0, -4), COEF( 6, 46), COEF( 62, 23), COEF( -3, -2)},
+ {COEF( 0, -4), COEF( 4, 44), COEF( 62, 26), COEF( -2, -2)},
+ {COEF( 0, -3), COEF( 2, 41), COEF( 63, 29), COEF( -2, -2)},
+ {COEF( 0, -3), COEF( 1, 38), COEF( 64, 31), COEF( -1, -2)}
+ }, { /* 8:7 Zoom-out */
+ {COEF(-1, -2), COEF( 4, 35), COEF( 56, 34), COEF( 4, -2)},
+ {COEF(-1, -3), COEF( 6, 37), COEF( 56, 32), COEF( 3, -2)},
+ {COEF(-1, -3), COEF( 7, 39), COEF( 56, 30), COEF( 2, -2)},
+ {COEF(-1, -3), COEF( 8, 42), COEF( 55, 28), COEF( 1, -2)},
+ {COEF(-1, -3), COEF( 10, 44), COEF( 54, 26), COEF( 0, -2)},
+ {COEF(-1, -2), COEF( 12, 45), COEF( 53, 23), COEF( 0, -2)},
+ {COEF(-1, -2), COEF( 14, 47), COEF( 52, 21), COEF( -1, -2)},
+ {COEF(-1, -2), COEF( 15, 48), COEF( 51, 19), COEF( -1, -1)},
+ {COEF(-1, -2), COEF( 17, 50), COEF( 50, 17), COEF( -2, -1)},
+ {COEF(-1, -2), COEF( 15, 48), COEF( 51, 19), COEF( -1, -1)},
+ {COEF(-1, -2), COEF( 14, 47), COEF( 52, 21), COEF( -1, -2)},
+ {COEF(-1, -2), COEF( 12, 45), COEF( 53, 23), COEF( 0, -2)},
+ {COEF(-1, -3), COEF( 10, 44), COEF( 54, 26), COEF( 0, -2)},
+ {COEF(-1, -3), COEF( 8, 42), COEF( 55, 28), COEF( 1, -2)},
+ {COEF(-1, -3), COEF( 7, 39), COEF( 56, 30), COEF( 2, -2)},
+ {COEF(-1, -3), COEF( 6, 37), COEF( 56, 32), COEF( 3, -2)}
+ }, { /* 8:6 Zoom-out */
+ {COEF( 0, -1), COEF( 8, 33), COEF( 49, 32), COEF( 8, -1)},
+ {COEF(-1, -1), COEF( 9, 35), COEF( 49, 31), COEF( 7, -1)},
+ {COEF(-1, -1), COEF( 11, 36), COEF( 49, 29), COEF( 6, -1)},
+ {COEF(-1, -1), COEF( 12, 38), COEF( 48, 28), COEF( 5, -1)},
+ {COEF(-1, 0), COEF( 12, 40), COEF( 48, 26), COEF( 4, -1)},
+ {COEF(-1, 0), COEF( 15, 41), COEF( 47, 24), COEF( 3, -1)},
+ {COEF(-1, 0), COEF( 17, 42), COEF( 46, 22), COEF( 3, -1)},
+ {COEF(-1, 1), COEF( 18, 43), COEF( 45, 21), COEF( 2, -1)},
+ {COEF(-1, 1), COEF( 20, 45), COEF( 44, 19), COEF( 1, -1)},
+ {COEF(-1, 1), COEF( 18, 43), COEF( 45, 21), COEF( 2, -1)},
+ {COEF(-1, 0), COEF( 17, 42), COEF( 46, 22), COEF( 3, -1)},
+ {COEF(-1, 0), COEF( 15, 41), COEF( 47, 24), COEF( 3, -1)},
+ {COEF(-1, 0), COEF( 12, 40), COEF( 48, 26), COEF( 4, -1)},
+ {COEF(-1, -1), COEF( 12, 38), COEF( 48, 28), COEF( 5, -1)},
+ {COEF(-1, -1), COEF( 11, 36), COEF( 49, 29), COEF( 6, -1)},
+ {COEF(-1, -1), COEF( 9, 35), COEF( 49, 31), COEF( 7, -1)}
+ }, { /* 8:5 Zoom-out */
+ {COEF( 0, 1), COEF( 11, 32), COEF( 42, 30), COEF( 11, 1)},
+ {COEF(-1, 1), COEF( 12, 33), COEF( 43, 30), COEF( 10, 0)},
+ {COEF(-1, 1), COEF( 13, 34), COEF( 43, 29), COEF( 9, 0)},
+ {COEF(-1, 2), COEF( 14, 35), COEF( 43, 27), COEF( 8, 0)},
+ {COEF(-1, 2), COEF( 16, 36), COEF( 42, 26), COEF( 7, 0)},
+ {COEF(-1, 2), COEF( 17, 37), COEF( 42, 25), COEF( 6, 0)},
+ {COEF( 0, 3), COEF( 18, 38), COEF( 41, 23), COEF( 5, 0)},
+ {COEF( 0, 3), COEF( 20, 39), COEF( 40, 22), COEF( 4, 0)},
+ {COEF( 0, 4), COEF( 21, 39), COEF( 40, 20), COEF( 4, 0)},
+ {COEF( 0, 3), COEF( 20, 39), COEF( 40, 22), COEF( 4, 0)},
+ {COEF( 0, 3), COEF( 18, 38), COEF( 41, 23), COEF( 5, 0)},
+ {COEF(-1, 2), COEF( 17, 37), COEF( 42, 25), COEF( 6, 0)},
+ {COEF(-1, 2), COEF( 16, 36), COEF( 42, 26), COEF( 7, 0)},
+ {COEF(-1, 2), COEF( 14, 35), COEF( 43, 27), COEF( 8, 0)},
+ {COEF(-1, 1), COEF( 13, 34), COEF( 43, 29), COEF( 9, 0)},
+ {COEF(-1, 1), COEF( 12, 33), COEF( 43, 30), COEF( 10, 0)}
+ }, { /* 8:4 Zoom-out */
+ {COEF( 0, 2), COEF( 13, 30), COEF( 39, 29), COEF( 13, 2)},
+ {COEF( 0, 3), COEF( 14, 31), COEF( 38, 28), COEF( 12, 2)},
+ {COEF( 0, 3), COEF( 15, 32), COEF( 38, 27), COEF( 11, 2)},
+ {COEF( 0, 4), COEF( 16, 33), COEF( 38, 26), COEF( 10, 1)},
+ {COEF( 0, 4), COEF( 17, 34), COEF( 37, 26), COEF( 9, 1)},
+ {COEF( 0, 6), COEF( 18, 34), COEF( 37, 24), COEF( 8, 1)},
+ {COEF( 0, 5), COEF( 19, 35), COEF( 37, 23), COEF( 8, 1)},
+ {COEF( 0, 6), COEF( 21, 35), COEF( 36, 22), COEF( 7, 1)},
+ {COEF( 1, 6), COEF( 22, 36), COEF( 35, 21), COEF( 6, 1)},
+ {COEF( 0, 6), COEF( 21, 35), COEF( 36, 22), COEF( 7, 1)},
+ {COEF( 0, 5), COEF( 19, 35), COEF( 37, 23), COEF( 8, 1)},
+ {COEF( 0, 6), COEF( 18, 34), COEF( 37, 24), COEF( 8, 1)},
+ {COEF( 0, 4), COEF( 17, 34), COEF( 37, 26), COEF( 9, 1)},
+ {COEF( 0, 4), COEF( 16, 33), COEF( 38, 26), COEF( 10, 1)},
+ {COEF( 0, 3), COEF( 15, 32), COEF( 38, 27), COEF( 11, 2)},
+ {COEF( 0, 3), COEF( 14, 31), COEF( 38, 28), COEF( 12, 2)}
+ }, { /* 8:3 Zoom-out */
+ {COEF( 0, 4), COEF( 15, 28), COEF( 35, 28), COEF( 14, 4)},
+ {COEF( 1, 5), COEF( 16, 29), COEF( 34, 27), COEF( 13, 3)},
+ {COEF( 1, 6), COEF( 16, 30), COEF( 34, 26), COEF( 12, 3)},
+ {COEF( 1, 6), COEF( 17, 30), COEF( 34, 25), COEF( 12, 3)},
+ {COEF( 1, 6), COEF( 18, 31), COEF( 34, 25), COEF( 11, 2)},
+ {COEF( 1, 7), COEF( 19, 31), COEF( 34, 24), COEF( 10, 2)},
+ {COEF( 2, 7), COEF( 20, 32), COEF( 33, 23), COEF( 9, 2)},
+ {COEF( 2, 8), COEF( 21, 32), COEF( 33, 22), COEF( 8, 2)},
+ {COEF( 2, 8), COEF( 22, 32), COEF( 33, 22), COEF( 8, 2)},
+ {COEF( 2, 8), COEF( 21, 32), COEF( 33, 23), COEF( 8, 2)},
+ {COEF( 2, 7), COEF( 20, 32), COEF( 33, 24), COEF( 9, 2)},
+ {COEF( 1, 7), COEF( 19, 31), COEF( 34, 18), COEF( 10, 2)},
+ {COEF( 1, 6), COEF( 18, 31), COEF( 34, 25), COEF( 11, 2)},
+ {COEF( 1, 6), COEF( 17, 30), COEF( 34, 25), COEF( 12, 3)},
+ {COEF( 1, 6), COEF( 16, 30), COEF( 34, 26), COEF( 12, 3)},
+ {COEF( 1, 5), COEF( 16, 29), COEF( 34, 27), COEF( 13, 3)}
+ }, { /* 8:2 Zoom-out */
+ {COEF( 0, 5), COEF( 16, 27), COEF( 33, 27), COEF( 15, 5)},
+ {COEF( 2, 6), COEF( 16, 27), COEF( 32, 26), COEF( 14, 5)},
+ {COEF( 2, 6), COEF( 17, 28), COEF( 32, 25), COEF( 13, 5)},
+ {COEF( 2, 6), COEF( 18, 29), COEF( 32, 25), COEF( 12, 4)},
+ {COEF( 2, 7), COEF( 19, 29), COEF( 31, 24), COEF( 12, 4)},
+ {COEF( 2, 8), COEF( 20, 30), COEF( 31, 23), COEF( 11, 3)},
+ {COEF( 2, 8), COEF( 20, 30), COEF( 31, 23), COEF( 11, 3)},
+ {COEF( 2, 9), COEF( 21, 30), COEF( 31, 22), COEF( 10, 3)},
+ {COEF( 3, 10), COEF( 22, 31), COEF( 30, 21), COEF( 9, 2)},
+ {COEF( 2, 9), COEF( 21, 30), COEF( 31, 22), COEF( 10, 3)},
+ {COEF( 2, 8), COEF( 20, 30), COEF( 31, 23), COEF( 11, 3)},
+ {COEF( 2, 8), COEF( 20, 30), COEF( 31, 23), COEF( 11, 3)},
+ {COEF( 2, 7), COEF( 19, 29), COEF( 31, 24), COEF( 12, 4)},
+ {COEF( 2, 6), COEF( 18, 29), COEF( 32, 25), COEF( 12, 4)},
+ {COEF( 2, 6), COEF( 17, 28), COEF( 32, 25), COEF( 13, 5)},
+ {COEF( 2, 6), COEF( 16, 27), COEF( 32, 26), COEF( 14, 5)}
+ }
+};
+
+static const __u32 sc_coef_4t_blur1[7][16][2] = {
+ { /* 8:8 or zoom-in */
+ {COEF( 0, 27), COEF( 76, 25)},
+ {COEF( 0, 31), COEF( 76, 21)},
+ {COEF( 0, 35), COEF( 75, 18)},
+ {COEF( 1, 39), COEF( 73, 15)},
+ {COEF( 1, 44), COEF( 71, 12)},
+ {COEF( 2, 48), COEF( 69, 9)},
+ {COEF( 3, 53), COEF( 65, 7)},
+ {COEF( 4, 57), COEF( 61, 6)},
+ {COEF( 5, 61), COEF( 58, 4)},
+ {COEF( 6, 61), COEF( 57, 4)},
+ {COEF( 7, 65), COEF( 53, 3)},
+ {COEF( 9, 69), COEF( 48, 2)},
+ {COEF(12, 71), COEF( 44, 1)},
+ {COEF(15, 73), COEF( 39, 1)},
+ {COEF(18, 75), COEF( 35, 0)},
+ {COEF(21, 76), COEF( 31, 0)}
+ }, { /* 8:7 Zoom-out */
+ {COEF( 0, 29), COEF( 73, 26)},
+ {COEF( 1, 32), COEF( 72, 23)},
+ {COEF( 1, 36), COEF( 72, 19)},
+ {COEF( 2, 40), COEF( 70, 16)},
+ {COEF( 2, 44), COEF( 68, 14)},
+ {COEF( 3, 48), COEF( 66, 11)},
+ {COEF( 4, 52), COEF( 63, 9)},
+ {COEF( 5, 56), COEF( 60, 7)},
+ {COEF( 7, 58), COEF( 57, 6)},
+ {COEF( 7, 60), COEF( 56, 5)},
+ {COEF( 9, 63), COEF( 52, 4)},
+ {COEF(11, 66), COEF( 48, 3)},
+ {COEF(14, 68), COEF( 44, 2)},
+ {COEF(16, 70), COEF( 40, 2)},
+ {COEF(19, 72), COEF( 36, 1)},
+ {COEF(23, 72), COEF( 32, 1)}
+ }, { /* 8:6 Zoom-out */
+ {COEF( 0, 30), COEF( 69, 29)},
+ {COEF( 2, 33), COEF( 69, 24)},
+ {COEF( 2, 37), COEF( 68, 21)},
+ {COEF( 3, 41), COEF( 66, 18)},
+ {COEF( 3, 44), COEF( 65, 16)},
+ {COEF( 4, 48), COEF( 63, 13)},
+ {COEF( 5, 51), COEF( 61, 11)},
+ {COEF( 7, 54), COEF( 58, 9)},
+ {COEF( 8, 58), COEF( 55, 7)},
+ {COEF( 9, 58), COEF( 54, 7)},
+ {COEF(11, 61), COEF( 51, 5)},
+ {COEF(13, 63), COEF( 48, 4)},
+ {COEF(16, 65), COEF( 44, 3)},
+ {COEF(18, 66), COEF( 41, 3)},
+ {COEF(21, 68), COEF( 37, 2)},
+ {COEF(24, 69), COEF( 33, 2)}
+ }, { /* 8:5 Zoom-out */
+ {COEF( 0, 32), COEF( 66, 30)},
+ {COEF( 3, 34), COEF( 66, 25)},
+ {COEF( 3, 38), COEF( 65, 22)},
+ {COEF( 4, 41), COEF( 64, 19)},
+ {COEF( 4, 44), COEF( 63, 17)},
+ {COEF( 5, 48), COEF( 61, 14)},
+ {COEF( 7, 50), COEF( 59, 12)},
+ {COEF( 8, 54), COEF( 56, 10)},
+ {COEF(10, 56), COEF( 54, 8)},
+ {COEF(10, 56), COEF( 54, 8)},
+ {COEF(12, 59), COEF( 50, 7)},
+ {COEF(14, 61), COEF( 48, 5)},
+ {COEF(17, 63), COEF( 44, 4)},
+ {COEF(19, 64), COEF( 41, 4)},
+ {COEF(22, 65), COEF( 38, 3)},
+ {COEF(25, 66), COEF( 34, 3)}
+ }, { /* 8:4 Zoom-out */
+ {COEF( 0, 34), COEF( 64, 30)},
+ {COEF( 3, 35), COEF( 64, 26)},
+ {COEF( 4, 38), COEF( 63, 23)},
+ {COEF( 4, 41), COEF( 62, 21)},
+ {COEF( 5, 44), COEF( 61, 18)},
+ {COEF( 6, 47), COEF( 60, 15)},
+ {COEF( 8, 50), COEF( 57, 13)},
+ {COEF( 9, 53), COEF( 55, 11)},
+ {COEF(11, 55), COEF( 53, 9)},
+ {COEF(13, 50), COEF( 57, 8)},
+ {COEF(13, 57), COEF( 50, 8)},
+ {COEF(15, 60), COEF( 47, 6)},
+ {COEF(18, 61), COEF( 44, 5)},
+ {COEF(21, 62), COEF( 41, 4)},
+ {COEF(23, 63), COEF( 38, 4)},
+ {COEF(26, 64), COEF( 35, 3)}
+ }, { /* 8:3 Zoom-out */
+ {COEF( 0, 32), COEF( 62, 32)},
+ {COEF( 4, 35), COEF( 62, 27)},
+ {COEF( 4, 38), COEF( 61, 25)},
+ {COEF( 5, 41), COEF( 60, 22)},
+ {COEF( 6, 44), COEF( 59, 19)},
+ {COEF( 7, 47), COEF( 58, 16)},
+ {COEF( 9, 49), COEF( 56, 14)},
+ {COEF(10, 52), COEF( 54, 12)},
+ {COEF(12, 54), COEF( 52, 10)},
+ {COEF(12, 54), COEF( 52, 10)},
+ {COEF(14, 56), COEF( 49, 9)},
+ {COEF(16, 58), COEF( 47, 7)},
+ {COEF(19, 59), COEF( 44, 6)},
+ {COEF(22, 60), COEF( 41, 5)},
+ {COEF(25, 61), COEF( 38, 4)},
+ {COEF(27, 62), COEF( 35, 4)}
+ }, { /* 8:2 Zoom-out */
+ {COEF( 2, 33), COEF( 61, 32)},
+ {COEF( 5, 36), COEF( 61, 26)},
+ {COEF( 5, 38), COEF( 60, 25)},
+ {COEF( 6, 41), COEF( 59, 22)},
+ {COEF( 7, 44), COEF( 58, 19)},
+ {COEF( 8, 46), COEF( 57, 17)},
+ {COEF( 9, 49), COEF( 55, 15)},
+ {COEF(11, 51), COEF( 53, 13)},
+ {COEF(13, 53), COEF( 51, 11)},
+ {COEF(13, 53), COEF( 51, 11)},
+ {COEF(15, 55), COEF( 49, 9)},
+ {COEF(17, 57), COEF( 46, 8)},
+ {COEF(19, 58), COEF( 44, 7)},
+ {COEF(22, 59), COEF( 41, 6)},
+ {COEF(25, 60), COEF( 38, 5)},
+ {COEF(26, 61), COEF( 36, 5)}
+ },
+};
+
+/* CSC(Color Space Conversion) coefficient value */
+static struct sc_csc_tab sc_no_csc = {
+ { 0x200, 0x000, 0x000, 0x000, 0x200, 0x000, 0x000, 0x000, 0x200 },
+};
+
+static struct sc_csc_tab sc_y2r = {
+ /* REC.601 Narrow */
+ { 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE60, 0x254, 0x409, 0x000 },
+ /* REC.601 Wide */
+ { 0x200, 0x000, 0x2BE, 0x200, 0xF54, 0xE9B, 0x200, 0x377, 0x000 },
+ /* REC.709 Narrow */
+ { 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE60, 0x254, 0x409, 0x000 },
+ /* REC.709 Wide */
+ { 0x200, 0x000, 0x314, 0x200, 0xFA2, 0xF15, 0x200, 0x3A2, 0x000 },
+ /* BT.2020 Narrow */
+ { 0x254, 0x000, 0x36F, 0x254, 0xF9E, 0xEAC, 0x254, 0x461, 0x000 },
+ /* BT.2020 Wide */
+ { 0x200, 0x000, 0x2F3, 0x200, 0xFAC, 0xEDB, 0x200, 0x3C3, 0x000 },
+ /* DCI-P3 Narrow */
+ { 0x254, 0x000, 0x3AE, 0x254, 0xF96, 0xEEE, 0x254, 0x456, 0x000 },
+ /* DCI-P3 Wide */
+ { 0x200, 0x000, 0x329, 0x200, 0xFA5, 0xF15, 0x200, 0x3B9, 0x000 },
+};
+
+static struct sc_csc_tab sc_r2y = {
+ /* REC.601 Narrow */
+ { 0x084, 0x102, 0x032, 0xFB4, 0xF6B, 0x0E1, 0x0E1, 0xF44, 0xFDC },
+ /* REC.601 Wide */
+ { 0x099, 0x12D, 0x03A, 0xFA8, 0xF52, 0x106, 0x106, 0xF25, 0xFD6 },
+ /* REC.709 Narrow */
+ { 0x05E, 0x13A, 0x020, 0xFCC, 0xF53, 0x0E1, 0x0E1, 0xF34, 0xFEC },
+ /* REC.709 Wide */
+ { 0x06D, 0x16E, 0x025, 0xFC4, 0xF36, 0x106, 0x106, 0xF12, 0xFE8 },
+ /* TODO: BT.2020 Narrow */
+ { 0x087, 0x15B, 0x01E, 0xFB9, 0xF47, 0x100, 0x100, 0xF15, 0xFEB },
+ /* BT.2020 Wide */
+ { 0x087, 0x15B, 0x01E, 0xFB9, 0xF47, 0x100, 0x100, 0xF15, 0xFEB },
+ /* TODO: DCI-P3 Narrow */
+ { 0x06B, 0x171, 0x023, 0xFC6, 0xF3A, 0x100, 0x100, 0xF16, 0xFEA },
+ /* DCI-P3 Wide */
+ { 0x06B, 0x171, 0x023, 0xFC6, 0xF3A, 0x100, 0x100, 0xF16, 0xFEA },
+};
+
+static struct sc_csc_tab *sc_csc_list[] = {
+ [0] = &sc_no_csc,
+ [1] = &sc_y2r,
+ [2] = &sc_r2y,
+};
+
+static struct sc_bl_op_val sc_bl_op_tbl[] = {
+ /* Sc, Sa, Dc, Da */
+ {ZERO, ZERO, ZERO, ZERO}, /* CLEAR */
+ { ONE, ONE, ZERO, ZERO}, /* SRC */
+ {ZERO, ZERO, ONE, ONE}, /* DST */
+ { ONE, ONE, INV_SA, INV_SA}, /* SRC_OVER */
+ {INV_DA, ONE, ONE, INV_SA}, /* DST_OVER */
+ {DST_A, DST_A, ZERO, ZERO}, /* SRC_IN */
+ {ZERO, ZERO, SRC_A, SRC_A}, /* DST_IN */
+ {INV_DA, INV_DA, ZERO, ZERO}, /* SRC_OUT */
+ {ZERO, ZERO, INV_SA, INV_SA}, /* DST_OUT */
+ {DST_A, ZERO, INV_SA, ONE}, /* SRC_ATOP */
+ {INV_DA, ONE, SRC_A, ZERO}, /* DST_ATOP */
+ {INV_DA, ONE, INV_SA, ONE}, /* XOR: need to WA */
+ {INV_DA, ONE, INV_SA, INV_SA}, /* DARKEN */
+ {INV_DA, ONE, INV_SA, INV_SA}, /* LIGHTEN */
+ {INV_DA, ONE, INV_SA, INV_SA}, /* MULTIPLY */
+ {ONE, ONE, INV_SC, INV_SA}, /* SCREEN */
+ {ONE, ONE, ONE, ONE}, /* ADD */
+};
+
+int sc_hwset_src_image_format(struct sc_dev *sc, const struct sc_fmt *fmt)
+{
+ writel(fmt->cfg_val, sc->regs + SCALER_SRC_CFG);
+ return 0;
+}
+
+int sc_hwset_dst_image_format(struct sc_dev *sc, const struct sc_fmt *fmt)
+{
+ writel(fmt->cfg_val, sc->regs + SCALER_DST_CFG);
+
+ /*
+ * When output format is RGB,
+ * CSC_Y_OFFSET_DST_EN should be 0
+ * to avoid color distortion
+ */
+ if (fmt->is_rgb) {
+ writel(readl(sc->regs + SCALER_CFG) &
+ ~SCALER_CFG_CSC_Y_OFFSET_DST,
+ sc->regs + SCALER_CFG);
+ }
+
+ return 0;
+}
+
+void sc_hwset_pre_multi_format(struct sc_dev *sc, bool src, bool dst)
+{
+ unsigned long cfg = readl(sc->regs + SCALER_SRC_CFG);
+
+ if (sc->version == SCALER_VERSION(4, 0, 1)) {
+ if (src != dst)
+ dev_err(sc->dev,
+ "pre-multi fmt should be same between src and dst\n");
+ return;
+ }
+
+ if (src && ((cfg & SCALER_CFG_FMT_MASK) == SCALER_CFG_FMT_ARGB8888)) {
+ cfg &= ~SCALER_CFG_FMT_MASK;
+ cfg |= SCALER_CFG_FMT_P_ARGB8888;
+ writel(cfg, sc->regs + SCALER_SRC_CFG);
+ }
+
+ cfg = readl(sc->regs + SCALER_DST_CFG);
+ if (dst && ((cfg & SCALER_CFG_FMT_MASK) == SCALER_CFG_FMT_ARGB8888)) {
+ cfg &= ~SCALER_CFG_FMT_MASK;
+ cfg |= SCALER_CFG_FMT_P_ARGB8888;
+ writel(cfg, sc->regs + SCALER_DST_CFG);
+ }
+}
+
+void get_blend_value(unsigned int *cfg, u32 val, bool pre_multi)
+{
+ unsigned int tmp;
+
+ *cfg &= ~(SCALER_SEL_INV_MASK | SCALER_SEL_MASK |
+ SCALER_OP_SEL_INV_MASK | SCALER_OP_SEL_MASK);
+
+ if (val == 0xff) {
+ *cfg |= (1 << SCALER_SEL_INV_SHIFT);
+ } else {
+ if (pre_multi)
+ *cfg |= (1 << SCALER_SEL_SHIFT);
+ else
+ *cfg |= (2 << SCALER_SEL_SHIFT);
+ tmp = val & 0xf;
+ *cfg |= (tmp << SCALER_OP_SEL_SHIFT);
+ }
+
+ if (val >= BL_INV_BIT_OFFSET)
+ *cfg |= (1 << SCALER_OP_SEL_INV_SHIFT);
+}
+
+void sc_hwset_blend(struct sc_dev *sc, enum sc_blend_op bl_op, bool pre_multi,
+ unsigned char g_alpha)
+{
+ unsigned int cfg = readl(sc->regs + SCALER_CFG);
+ int idx = bl_op - 1;
+
+ cfg |= SCALER_CFG_BLEND_EN;
+ writel(cfg, sc->regs + SCALER_CFG);
+
+ cfg = readl(sc->regs + SCALER_SRC_BLEND_COLOR);
+ get_blend_value(&cfg, sc_bl_op_tbl[idx].src_color, pre_multi);
+ if (g_alpha < 0xff)
+ cfg |= (SRC_GA << SCALER_OP_SEL_SHIFT);
+ writel(cfg, sc->regs + SCALER_SRC_BLEND_COLOR);
+ sc_dbg("src_blend_color is 0x%x, %d\n", cfg, pre_multi);
+
+ cfg = readl(sc->regs + SCALER_SRC_BLEND_ALPHA);
+ get_blend_value(&cfg, sc_bl_op_tbl[idx].src_alpha, 1);
+ if (g_alpha < 0xff)
+ cfg |= (SRC_GA << SCALER_OP_SEL_SHIFT) | (g_alpha << 0);
+ writel(cfg, sc->regs + SCALER_SRC_BLEND_ALPHA);
+ sc_dbg("src_blend_alpha is 0x%x\n", cfg);
+
+ cfg = readl(sc->regs + SCALER_DST_BLEND_COLOR);
+ get_blend_value(&cfg, sc_bl_op_tbl[idx].dst_color, pre_multi);
+ if (g_alpha < 0xff)
+ cfg |= ((INV_SAGA & 0xf) << SCALER_OP_SEL_SHIFT);
+ writel(cfg, sc->regs + SCALER_DST_BLEND_COLOR);
+ sc_dbg("dst_blend_color is 0x%x\n", cfg);
+
+ cfg = readl(sc->regs + SCALER_DST_BLEND_ALPHA);
+ get_blend_value(&cfg, sc_bl_op_tbl[idx].dst_alpha, 1);
+ if (g_alpha < 0xff)
+ cfg |= ((INV_SAGA & 0xf) << SCALER_OP_SEL_SHIFT);
+ writel(cfg, sc->regs + SCALER_DST_BLEND_ALPHA);
+ sc_dbg("dst_blend_alpha is 0x%x\n", cfg);
+
+ /*
+ * If dst format is non-premultiplied format
+ * and blending operation is enabled,
+ * result image should be divided by alpha value
+ * because the result is always pre-multiplied.
+ */
+ if (!pre_multi) {
+ cfg = readl(sc->regs + SCALER_CFG);
+ cfg |= SCALER_CFG_BL_DIV_ALPHA_EN;
+ writel(cfg, sc->regs + SCALER_CFG);
+ }
+}
+
+void sc_hwset_color_fill(struct sc_dev *sc, unsigned int val)
+{
+ unsigned int cfg = readl(sc->regs + SCALER_CFG);
+
+ cfg |= SCALER_CFG_FILL_EN;
+ writel(cfg, sc->regs + SCALER_CFG);
+
+ cfg = readl(sc->regs + SCALER_FILL_COLOR);
+ cfg = val;
+ writel(cfg, sc->regs + SCALER_FILL_COLOR);
+ sc_dbg("color filled is 0x%08x\n", val);
+}
+
+void sc_hwset_dith(struct sc_dev *sc, unsigned int val)
+{
+ unsigned int cfg = readl(sc->regs + SCALER_DITH_CFG);
+
+ cfg &= ~(SCALER_DITH_R_MASK | SCALER_DITH_G_MASK | SCALER_DITH_B_MASK);
+ cfg |= val;
+ writel(cfg, sc->regs + SCALER_DITH_CFG);
+}
+
+void sc_hwset_csc_coef(struct sc_dev *sc, enum sc_csc_idx idx,
+ struct sc_csc *csc)
+{
+ unsigned int i, j, tmp;
+ unsigned long cfg;
+ int *csc_eq_val;
+
+ if (idx == NO_CSC) {
+ csc_eq_val = sc_csc_list[idx]->narrow_601;
+ } else {
+ if (csc->csc_eq == V4L2_COLORSPACE_REC709) {
+ if (csc->csc_range == SC_CSC_NARROW)
+ csc_eq_val = sc_csc_list[idx]->narrow_709;
+ else
+ csc_eq_val = sc_csc_list[idx]->wide_709;
+ } else if (csc->csc_eq == V4L2_COLORSPACE_BT2020) {
+ if (csc->csc_range == SC_CSC_NARROW)
+ csc_eq_val = sc_csc_list[idx]->narrow_2020;
+ else
+ csc_eq_val = sc_csc_list[idx]->wide_2020;
+ } else if (csc->csc_eq == V4L2_COLORSPACE_DCI_P3) {
+ if (csc->csc_range == SC_CSC_NARROW)
+ csc_eq_val = sc_csc_list[idx]->narrow_p3;
+ else
+ csc_eq_val = sc_csc_list[idx]->wide_p3;
+ } else {
+ if (csc->csc_range == SC_CSC_NARROW)
+ csc_eq_val = sc_csc_list[idx]->narrow_601;
+ else
+ csc_eq_val = sc_csc_list[idx]->wide_601;
+ }
+ }
+
+ tmp = SCALER_CSC_COEF22 - SCALER_CSC_COEF00;
+
+ for (i = 0, j = 0; i < 9; i++, j += 4) {
+ cfg = readl(sc->regs + SCALER_CSC_COEF00 + j);
+ cfg &= ~SCALER_CSC_COEF_MASK;
+ cfg |= csc_eq_val[i];
+ writel(cfg, sc->regs + SCALER_CSC_COEF00 + j);
+ sc_dbg("csc value %d - %d\n", i, csc_eq_val[i]);
+ }
+
+ /* set CSC_Y_OFFSET_EN */
+ cfg = readl(sc->regs + SCALER_CFG);
+ if (idx == CSC_Y2R) {
+ if (csc->csc_range == SC_CSC_WIDE)
+ cfg &= ~SCALER_CFG_CSC_Y_OFFSET_SRC;
+ else
+ cfg |= SCALER_CFG_CSC_Y_OFFSET_SRC;
+ } else if (idx == CSC_R2Y) {
+ if (csc->csc_range == SC_CSC_WIDE)
+ cfg &= ~SCALER_CFG_CSC_Y_OFFSET_DST;
+ else
+ cfg |= SCALER_CFG_CSC_Y_OFFSET_DST;
+ }
+ writel(cfg, sc->regs + SCALER_CFG);
+}
+
+static const __u32 sc_scaling_ratio[] = {
+ 1048576, /* 0: 8:8 scaing or zoom-in */
+ 1198372, /* 1: 8:7 zoom-out */
+ 1398101, /* 2: 8:6 zoom-out */
+ 1677721, /* 3: 8:5 zoom-out */
+ 2097152, /* 4: 8:4 zoom-out */
+ 2796202, /* 5: 8:3 zoom-out */
+ /* higher ratio -> 6: 8:2 zoom-out */
+};
+
+static unsigned int sc_get_scale_filter(unsigned int ratio)
+{
+ unsigned int filter;
+
+ for (filter = 0; filter < ARRAY_SIZE(sc_scaling_ratio); filter++)
+ if (ratio <= sc_scaling_ratio[filter])
+ return filter;
+
+ return filter;
+}
+
+void sc_hwset_polyphase_hcoef(struct sc_dev *sc,
+ unsigned int yratio, unsigned int cratio,
+ unsigned int filter)
+{
+ unsigned int phase;
+ unsigned int yfilter = sc_get_scale_filter(yratio);
+ unsigned int cfilter = sc_get_scale_filter(cratio);
+ const __u32 (*sc_coef_8t)[16][4] = sc_coef_8t_org;
+
+ if (sc_set_blur || filter == SC_FT_BLUR)
+ sc_coef_8t = sc_coef_8t_blur1;
+
+ BUG_ON(yfilter >= ARRAY_SIZE(sc_coef_8t_org));
+ BUG_ON(cfilter >= ARRAY_SIZE(sc_coef_8t_org));
+
+ BUILD_BUG_ON(ARRAY_SIZE(sc_coef_8t_org[yfilter]) < 9);
+ BUILD_BUG_ON(ARRAY_SIZE(sc_coef_8t_org[cfilter]) < 9);
+
+ for (phase = 0; phase < 9; phase++) {
+ __raw_writel(sc_coef_8t[yfilter][phase][3],
+ sc->regs + SCALER_YHCOEF + phase * 16);
+ __raw_writel(sc_coef_8t[yfilter][phase][2],
+ sc->regs + SCALER_YHCOEF + phase * 16 + 4);
+ __raw_writel(sc_coef_8t[yfilter][phase][1],
+ sc->regs + SCALER_YHCOEF + phase * 16 + 8);
+ __raw_writel(sc_coef_8t[yfilter][phase][0],
+ sc->regs + SCALER_YHCOEF + phase * 16 + 12);
+ }
+
+ for (phase = 0; phase < 9; phase++) {
+ __raw_writel(sc_coef_8t[cfilter][phase][3],
+ sc->regs + SCALER_CHCOEF + phase * 16);
+ __raw_writel(sc_coef_8t[cfilter][phase][2],
+ sc->regs + SCALER_CHCOEF + phase * 16 + 4);
+ __raw_writel(sc_coef_8t[cfilter][phase][1],
+ sc->regs + SCALER_CHCOEF + phase * 16 + 8);
+ __raw_writel(sc_coef_8t[cfilter][phase][0],
+ sc->regs + SCALER_CHCOEF + phase * 16 + 12);
+ }
+}
+
+void sc_hwset_polyphase_vcoef(struct sc_dev *sc,
+ unsigned int yratio, unsigned int cratio,
+ unsigned int filter)
+{
+ unsigned int phase;
+ unsigned int yfilter = sc_get_scale_filter(yratio);
+ unsigned int cfilter = sc_get_scale_filter(cratio);
+ const __u32 (*sc_coef_4t)[16][2] = sc_coef_4t_org;
+
+ if (sc_set_blur || filter == SC_FT_BLUR)
+ sc_coef_4t = sc_coef_4t_blur1;
+
+ BUG_ON(yfilter >= ARRAY_SIZE(sc_coef_4t_org));
+ BUG_ON(cfilter >= ARRAY_SIZE(sc_coef_4t_org));
+
+ BUILD_BUG_ON(ARRAY_SIZE(sc_coef_4t_org[yfilter]) < 9);
+ BUILD_BUG_ON(ARRAY_SIZE(sc_coef_4t_org[cfilter]) < 9);
+
+ /* reset value of the coefficient registers are the 8:8 table */
+ for (phase = 0; phase < 9; phase++) {
+ __raw_writel(sc_coef_4t[yfilter][phase][1],
+ sc->regs + SCALER_YVCOEF + phase * 8);
+ __raw_writel(sc_coef_4t[yfilter][phase][0],
+ sc->regs + SCALER_YVCOEF + phase * 8 + 4);
+ }
+
+ for (phase = 0; phase < 9; phase++) {
+ __raw_writel(sc_coef_4t[cfilter][phase][1],
+ sc->regs + SCALER_CVCOEF + phase * 8);
+ __raw_writel(sc_coef_4t[cfilter][phase][0],
+ sc->regs + SCALER_CVCOEF + phase * 8 + 4);
+ }
+}
+
+void sc_hwset_src_imgsize(struct sc_dev *sc, struct sc_frame *frame)
+{
+ unsigned long cfg = 0;
+
+ cfg &= ~(SCALER_SRC_CSPAN_MASK | SCALER_SRC_YSPAN_MASK);
+ cfg |= frame->width;
+
+ /*
+ * TODO: C width should be half of Y width
+ * but, how to get the diffferent c width from user
+ * like AYV12 format
+ */
+ if (frame->sc_fmt->num_comp == 2)
+ cfg |= (frame->width << frame->sc_fmt->cspan) << 16;
+ if (frame->sc_fmt->num_comp == 3) {
+ if (sc_fmt_is_ayv12(frame->sc_fmt->pixelformat))
+ cfg |= ALIGN(frame->width >> 1, 16) << 16;
+ else if (frame->sc_fmt->cspan) /* YUV444 */
+ cfg |= frame->width << 16;
+ else
+ cfg |= (frame->width >> 1) << 16;
+ }
+
+ writel(cfg, sc->regs + SCALER_SRC_SPAN);
+}
+
+void sc_hwset_intsrc_imgsize(struct sc_dev *sc, int num_comp, __u32 width)
+{
+ unsigned long cfg = 0;
+
+ cfg &= ~(SCALER_SRC_CSPAN_MASK | SCALER_SRC_YSPAN_MASK);
+ cfg |= width;
+
+ /*
+ * TODO: C width should be half of Y width
+ * but, how to get the diffferent c width from user
+ * like AYV12 format
+ */
+ if (num_comp == 2)
+ cfg |= width << 16;
+ if (num_comp == 3)
+ cfg |= (width >> 1) << 16;
+
+ writel(cfg, sc->regs + SCALER_SRC_SPAN);
+}
+
+void sc_hwset_dst_imgsize(struct sc_dev *sc, struct sc_frame *frame)
+{
+ unsigned long cfg = 0;
+
+ cfg &= ~(SCALER_DST_CSPAN_MASK | SCALER_DST_YSPAN_MASK);
+ cfg |= frame->width;
+
+ /*
+ * TODO: C width should be half of Y width
+ * but, how to get the diffferent c width from user
+ * like AYV12 format
+ */
+ if (frame->sc_fmt->num_comp == 2)
+ cfg |= (frame->width << frame->sc_fmt->cspan) << 16;
+ if (frame->sc_fmt->num_comp == 3) {
+ if (sc_fmt_is_ayv12(frame->sc_fmt->pixelformat))
+ cfg |= ALIGN(frame->width >> 1, 16) << 16;
+ else if (frame->sc_fmt->cspan) /* YUV444 */
+ cfg |= frame->width << 16;
+ else
+ cfg |= (frame->width >> 1) << 16;
+ }
+
+ writel(cfg, sc->regs + SCALER_DST_SPAN);
+}
+
+void sc_hwset_src_addr(struct sc_dev *sc, struct sc_addr *addr)
+{
+ writel(addr->y, sc->regs + SCALER_SRC_Y_BASE);
+ writel(addr->cb, sc->regs + SCALER_SRC_CB_BASE);
+ writel(addr->cr, sc->regs + SCALER_SRC_CR_BASE);
+}
+
+void sc_hwset_dst_addr(struct sc_dev *sc, struct sc_addr *addr)
+{
+ writel(addr->y, sc->regs + SCALER_DST_Y_BASE);
+ writel(addr->cb, sc->regs + SCALER_DST_CB_BASE);
+ writel(addr->cr, sc->regs + SCALER_DST_CR_BASE);
+}
+
+void sc_hwregs_dump(struct sc_dev *sc)
+{
+ dev_notice(sc->dev, "Dumping control registers...\n");
+ pr_notice("------------------------------------------------\n");
+
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x000, 0x044 - 0x000 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x050, 0x058 - 0x050 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x060, 0x134 - 0x060 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x140, 0x214 - 0x140 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x220, 0x240 - 0x220 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x250, 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x260, 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x278, 4, false);
+ if (sc->version <= SCALER_VERSION(2, 1, 1))
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x280, 0x28C - 0x280 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x290, 0x298 - 0x290 + 4, false);
+ if (sc->version <= SCALER_VERSION(2, 1, 1))
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x2A8, 0x2A8 - 0x2A0 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x2B0, 0x2C4 - 0x2B0 + 4, false);
+ if (sc->version >= SCALER_VERSION(3, 0, 0))
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x2D0, 0x2DC - 0x2D0 + 4, false);
+
+ /* shadow registers */
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1004, 0x1004 - 0x1004 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1010, 0x1044 - 0x1010 + 4, false);
+
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1050, 0x1058 - 0x1050 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1060, 0x1134 - 0x1060 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1140, 0x1214 - 0x1140 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1220, 0x1240 - 0x1220 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1250, 4, false);
+ if (sc->version <= SCALER_VERSION(2, 1, 1) ||
+ sc->version <= SCALER_VERSION(4, 2, 0))
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1280, 0x128C - 0x1280 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1290, 0x1298 - 0x1290 + 4, false);
+ if (sc->version >= SCALER_VERSION(3, 0, 0))
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x12D0, 0x12DC - 0x12D0 + 4, false);
+ if (sc->version >= SCALER_VERSION(4, 2, 0)) {
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1300, 0x1304 - 0x1300 + 4, false);
+ print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, 16, 4,
+ sc->regs + 0x1310, 0x1318 - 0x1310 + 4, false);
+ }
+
+ pr_notice("------------------------------------------------\n");
+}
+
+/* starts from the second status which is the begining of err status */
+const static char *sc_irq_err_status[] = {
+ [ 0] = "illigal src color",
+ [ 1] = "illigal src Y base",
+ [ 2] = "illigal src Cb base",
+ [ 3] = "illigal src Cr base",
+ [ 4] = "illigal src Y span",
+ [ 5] = "illigal src C span",
+ [ 6] = "illigal src YH pos",
+ [ 7] = "illigal src YV pos",
+ [ 8] = "illigal src CH pos",
+ [ 9] = "illigal src CV pos",
+ [10] = "illigal src width",
+ [11] = "illigal src height",
+ [12] = "illigal dst color",
+ [13] = "illigal dst Y base",
+ [14] = "illigal dst Cb base",
+ [15] = "illigal dst Cr base",
+ [16] = "illigal dst Y span",
+ [17] = "illigal dst C span",
+ [18] = "illigal dst H pos",
+ [19] = "illigal dst V pos",
+ [20] = "illigal dst width",
+ [21] = "illigal dst height",
+ [23] = "illigal scaling ratio",
+ [25] = "illigal pre-scaler width/height",
+ [28] = "AXI Write Error Response",
+ [29] = "AXI Read Error Response",
+ [31] = "timeout",
+};
+
+static void sc_print_irq_err_status(struct sc_dev *sc, u32 status)
+{
+ unsigned int i = 0;
+
+ status >>= 1; /* ignore the INT_STATUS_FRAME_END */
+ if (status) {
+ sc_hwregs_dump(sc);
+
+ while (status) {
+ if (status & 1)
+ dev_err(sc->dev,
+ "Scaler reported error %u: %s\n",
+ i + 1, sc_irq_err_status[i]);
+ i++;
+ status >>= 1;
+ }
+ }
+}
+
+u32 sc_hwget_and_clear_irq_status(struct sc_dev *sc)
+{
+ u32 val = __raw_readl(sc->regs + SCALER_INT_STATUS);
+ sc_print_irq_err_status(sc, val);
+ __raw_writel(val, sc->regs + SCALER_INT_STATUS);
+ return val;
+}
--- /dev/null
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register header file for Exynos Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __SCALER_REGS_H__
+#define __SCALER_REGS_H__
+
+#include "scaler.h"
+
+/* Status */
+#define SCALER_STATUS 0x00
+
+/* Configuration */
+#define SCALER_CFG 0x04
+#define SCALER_CFG_DRCG_EN (1 << 31)
+#define SCALER_CFG_CORE_BYP_EN (1 << 29)
+#define SCALER_CFG_SRAM_CG_EN (1 << 28)
+#define SCALER_CFG_FILL_EN (1 << 24)
+#define SCALER_CFG_BL_DIV_ALPHA_EN (1 << 17)
+#define SCALER_CFG_BLEND_EN (1 << 16)
+#define SCALER_CFG_CSC_Y_OFFSET_SRC (1 << 10)
+#define SCALER_CFG_BURST_WR (1 << 8)
+#define SCALER_CFG_BURST_RD (1 << 7)
+#define SCALER_CFG_CSC_Y_OFFSET_DST (1 << 9)
+#define SCALER_CFG_SOFT_RST (1 << 1)
+#define SCALER_CFG_START_CMD (1 << 0)
+
+/* Interrupt */
+#define SCALER_INT_EN 0x08
+#define SCALER_INT_EN_FRAME_END (1 << 0)
+#define SCALER_INT_EN_ALL 0x807fffff
+#define SCALER_INT_EN_ALL_v3 0x82ffffff
+#define SCALER_INT_EN_ALL_v4 0xb2ffffff
+#define SCALER_INT_OK(status) ((status) == SCALER_INT_EN_FRAME_END)
+
+#define SCALER_INT_STATUS 0x0c
+#define SCALER_INT_STATUS_FRAME_END (1 << 0)
+
+#define SCALER_SRC_CFG 0x10
+
+/* Source Image Configuration */
+#define SCALER_CFG_TILE_EN (1 << 10)
+#define SCALER_CFG_VHALF_PHASE_EN (1 << 9)
+#define SCALER_CFG_BIG_ENDIAN (1 << 8)
+#define SCALER_CFG_SWAP_MASK (3 << 5)
+#define SCALER_CFG_BYTE_SWAP (1 << 5)
+#define SCALER_CFG_HWORD_SWAP (2 << 5)
+#define SCALER_CFG_BYTE_HWORD_SWAP (3 << 5)
+#define SCALER_CFG_FMT_MASK (0x1f << 0)
+#define SCALER_CFG_FMT_YCBCR420_2P (0 << 0)
+#define SCALER_CFG_FMT_YUYV (0xa << 0)
+#define SCALER_CFG_FMT_UYVY (0xb << 0)
+#define SCALER_CFG_FMT_YVYU (9 << 0)
+#define SCALER_CFG_FMT_YCBCR422_2P (2 << 0)
+#define SCALER_CFG_FMT_YCBCR444_2P (3 << 0)
+#define SCALER_CFG_FMT_RGB565 (4 << 0)
+#define SCALER_CFG_FMT_ARGB1555 (5 << 0)
+#define SCALER_CFG_FMT_ARGB4444 (0xc << 0)
+#define SCALER_CFG_FMT_ARGB8888 (6 << 0)
+#define SCALER_CFG_FMT_RGBA8888 (0xe << 0)
+#define SCALER_CFG_FMT_P_ARGB8888 (7 << 0)
+#define SCALER_CFG_FMT_L8A8 (0xd << 0)
+#define SCALER_CFG_FMT_L8 (0xf << 0)
+#define SCALER_CFG_FMT_YCRCB420_2P (0x10 << 0)
+#define SCALER_CFG_FMT_YCRCB422_2P (0x12 << 0)
+#define SCALER_CFG_FMT_YCRCB444_2P (0x13 << 0)
+#define SCALER_CFG_FMT_YCBCR420_3P (0x14 << 0)
+#define SCALER_CFG_FMT_YCBCR422_3P (0x16 << 0)
+#define SCALER_CFG_FMT_YCBCR444_3P (0x17 << 0)
+
+/* Source Y Base Address */
+#define SCALER_SRC_Y_BASE 0x14
+#define SCALER_SRC_CB_BASE 0x18
+#define SCALER_SRC_CR_BASE 0x294
+#define SCALER_SRC_SPAN 0x1c
+#define SCALER_SRC_CSPAN_MASK (0xffff << 16)
+#define SCALER_SRC_YSPAN_MASK (0xffff << 0)
+
+#define SCALER_SRC_Y_POS 0x20
+#define SCALER_SRC_WH 0x24
+#define SCALER_SRC_PRESC_WH 0x2C
+
+#define SCALER_SRC_C_POS 0x28
+
+#define SCALER_DST_CFG 0x30
+#define SCALER_DST_Y_BASE 0x34
+#define SCALER_DST_CB_BASE 0x38
+#define SCALER_DST_CR_BASE 0x298
+#define SCALER_DST_SPAN 0x3c
+#define SCALER_DST_CSPAN_MASK (0xffff << 16)
+#define SCALER_DST_YSPAN_MASK (0xffff << 0)
+
+#define SCALER_DST_WH 0x40
+
+#define SCALER_DST_POS 0x44
+
+#define SCALER_H_RATIO 0x50
+#define SCALER_V_RATIO 0x54
+
+#define SCALER_ROT_CFG 0x58
+#define SCALER_ROT_MASK (3 << 0)
+#define SCALER_FLIP_MASK (3 << 2)
+#define SCALER_FLIP_X_EN (1 << 3)
+#define SCALER_FLIP_Y_EN (1 << 2)
+#define SCALER_ROT_90 (1 << 0)
+#define SCALER_ROT_180 (2 << 0)
+#define SCALER_ROT_270 (3 << 0)
+
+#define SCALER_LAT_CON 0x5c
+
+#define SCALER_YHCOEF 0x60
+#define SCALER_YVCOEF 0xf0
+#define SCALER_CHCOEF 0x140
+#define SCALER_CVCOEF 0x1d0
+
+#define SCALER_CSC_COEF00 0x220
+#define SCALER_CSC_COEF10 0x224
+#define SCALER_CSC_COEF20 0x228
+#define SCALER_CSC_COEF01 0x22c
+#define SCALER_CSC_COEF11 0x230
+#define SCALER_CSC_COEF21 0x234
+#define SCALER_CSC_COEF02 0x238
+#define SCALER_CSC_COEF12 0x23c
+#define SCALER_CSC_COEF22 0x240
+#define SCALER_CSC_COEF_MASK (0xfff << 0)
+
+#define SCALER_DITH_CFG 0x250
+#define SCALER_DITH_R_MASK (7 << 6)
+#define SCALER_DITH_G_MASK (7 << 3)
+#define SCALER_DITH_B_MASK (7 << 0)
+#define SCALER_DITH_R_SHIFT (6)
+#define SCALER_DITH_G_SHIFT (3)
+#define SCALER_DITH_B_SHIFT (0)
+
+#define SCALER_VER 0x260
+
+#define SCALER_CRC_COLOR01 0x270
+#define SCALER_CRC_COLOR23 0x274
+#define SCALER_CYCLE_COUNT 0x278
+
+#define SCALER_SRC_BLEND_COLOR 0x280
+#define SCALER_SRC_BLEND_ALPHA 0x284
+#define SCALER_DST_BLEND_COLOR 0x288
+#define SCALER_DST_BLEND_ALPHA 0x28c
+#define SCALER_SEL_INV_MASK (1 << 31)
+#define SCALER_SEL_MASK (2 << 29)
+#define SCALER_OP_SEL_INV_MASK (1 << 28)
+#define SCALER_OP_SEL_MASK (0xf << 24)
+#define SCALER_SEL_INV_SHIFT (31)
+#define SCALER_SEL_SHIFT (29)
+#define SCALER_OP_SEL_INV_SHIFT (28)
+#define SCALER_OP_SEL_SHIFT (24)
+
+#define SCALER_FILL_COLOR 0x290
+
+#define SCALER_TIMEOUT_CTRL 0x2c0
+#define SCALER_TIMEOUT_CNT 0x2c4
+
+#define SCALER_SRC_YH_INIT_PHASE 0x2d0
+#define SCALER_SRC_YV_INIT_PHASE 0x2d4
+#define SCALER_SRC_CH_INIT_PHASE 0x2d8
+#define SCALER_SRC_CV_INIT_PHASE 0x2dc
+
+/* macros to make words to SFR */
+#define SCALER_VAL_WH(w, h) (((w) & 0x3FFF) << 16) | ((h) & 0x3FFF)
+#define SCALER_VAL_SRC_POS(l, t) (((l) & 0x3FFF) << 18) | (((t) & 0x3FFF) << 2)
+#define SCALER_VAL_DST_POS(l, t) (((l) & 0x3FFF) << 16) | ((t) & 0x3FFF)
+
+static inline void sc_hwset_src_pos(struct sc_dev *sc, __s32 left, __s32 top,
+ unsigned int chshift, unsigned int cvshift)
+{
+ /* SRC pos have fractional part of 2 bits which is not used */
+ __raw_writel(SCALER_VAL_SRC_POS(left, top),
+ sc->regs + SCALER_SRC_Y_POS);
+ __raw_writel(SCALER_VAL_SRC_POS(left >> chshift, top >> cvshift),
+ sc->regs + SCALER_SRC_C_POS);
+}
+
+static inline void sc_hwset_src_wh(struct sc_dev *sc, __s32 width, __s32 height,
+ unsigned int pre_h_ratio, unsigned int pre_v_ratio,
+ unsigned int chshift, unsigned int cvshift)
+{
+ __s32 pre_width = round_down(width >> pre_h_ratio, 1 << chshift);
+ __s32 pre_height = round_down(height >> pre_v_ratio, 1 << cvshift);
+ sc_dbg("width %d, height %d\n", pre_width, pre_height);
+
+ if (sc->variant->prescale) {
+ /*
+ * crops the width and height if the pre-scaling result violates
+ * the width/height constraints:
+ * - result width or height is not a natural number
+ * - result width or height violates the constrains
+ * of YUV420/422
+ */
+ width = pre_width << pre_h_ratio;
+ height = pre_height << pre_v_ratio;
+ __raw_writel(SCALER_VAL_WH(width, height),
+ sc->regs + SCALER_SRC_PRESC_WH);
+ }
+
+ __raw_writel(SCALER_VAL_WH(pre_width, pre_height),
+ sc->regs + SCALER_SRC_WH);
+}
+
+static inline void sc_hwset_dst_pos(struct sc_dev *sc, __s32 left, __s32 top)
+{
+ __raw_writel(SCALER_VAL_DST_POS(left, top), sc->regs + SCALER_DST_POS);
+}
+
+static inline void sc_hwset_dst_wh(struct sc_dev *sc, __s32 width, __s32 height)
+{
+ __raw_writel(SCALER_VAL_WH(width, height), sc->regs + SCALER_DST_WH);
+}
+
+static inline void sc_hwset_hratio(struct sc_dev *sc, u32 ratio, u32 pre_ratio)
+{
+ __raw_writel((pre_ratio << 28) | ratio, sc->regs + SCALER_H_RATIO);
+}
+
+static inline void sc_hwset_vratio(struct sc_dev *sc, u32 ratio, u32 pre_ratio)
+{
+ __raw_writel((pre_ratio << 28) | ratio, sc->regs + SCALER_V_RATIO);
+}
+
+static inline void sc_hwset_flip_rotation(struct sc_dev *sc, u32 flip_rot_cfg)
+{
+ __raw_writel(flip_rot_cfg & 0xF, sc->regs + SCALER_ROT_CFG);
+}
+
+static inline void sc_hwset_int_en(struct sc_dev *sc)
+{
+ unsigned int val;
+
+ if (sc->version < SCALER_VERSION(3, 0, 0))
+ val = SCALER_INT_EN_ALL;
+ else if (sc->version < SCALER_VERSION(4, 0, 1) ||
+ sc->version == SCALER_VERSION(4, 2, 0))
+ val = SCALER_INT_EN_ALL_v3;
+ else
+ val = SCALER_INT_EN_ALL_v4;
+ __raw_writel(val, sc->regs + SCALER_INT_EN);
+}
+
+static inline void sc_clear_aux_power_cfg(struct sc_dev *sc)
+{
+ /* Clearing all power saving features */
+ __raw_writel(__raw_readl(sc->regs + SCALER_CFG) & ~SCALER_CFG_DRCG_EN,
+ sc->regs + SCALER_CFG);
+}
+
+static inline void sc_hwset_init(struct sc_dev *sc)
+{
+ unsigned long cfg;
+
+#ifdef SC_NO_SOFTRST
+ cfg = (SCALER_CFG_CSC_Y_OFFSET_SRC | SCALER_CFG_CSC_Y_OFFSET_DST);
+#else
+ cfg = SCALER_CFG_SOFT_RST;
+#endif
+ writel(cfg, sc->regs + SCALER_CFG);
+
+ if (sc->version >= SCALER_VERSION(3, 0, 1))
+ __raw_writel(
+ __raw_readl(sc->regs + SCALER_CFG) | SCALER_CFG_DRCG_EN,
+ sc->regs + SCALER_CFG);
+ if (sc->version >= SCALER_VERSION(4, 0, 1) &&
+ sc->version != SCALER_VERSION(4, 2, 0)) {
+ __raw_writel(
+ __raw_readl(sc->regs + SCALER_CFG) | SCALER_CFG_BURST_RD,
+ sc->regs + SCALER_CFG);
+ __raw_writel(
+ __raw_readl(sc->regs + SCALER_CFG) & ~SCALER_CFG_BURST_WR,
+ sc->regs + SCALER_CFG);
+ }
+}
+
+static inline void sc_hwset_soft_reset(struct sc_dev *sc)
+{
+ writel(SCALER_CFG_SOFT_RST, sc->regs + SCALER_CFG);
+}
+
+static inline void sc_hwset_start(struct sc_dev *sc)
+{
+ unsigned long cfg = __raw_readl(sc->regs + SCALER_CFG);
+
+ cfg |= SCALER_CFG_START_CMD;
+ if (sc->version >= SCALER_VERSION(3, 0, 1)) {
+ cfg |= SCALER_CFG_CORE_BYP_EN;
+ cfg |= SCALER_CFG_SRAM_CG_EN;
+ }
+ writel(cfg, sc->regs + SCALER_CFG);
+}
+
+u32 sc_hwget_and_clear_irq_status(struct sc_dev *sc);
+
+#define SCALER_FRACT_VAL(x) (x << (20 - SC_CROP_FRACT_MULTI))
+#define SCALER_INIT_PHASE_VAL(i, f) (((i) & 0xf << 20) | \
+ ((SCALER_FRACT_VAL(f)) & 0xfffff))
+static inline void sc_hwset_src_init_phase(struct sc_dev *sc, struct sc_init_phase *ip)
+{
+ if (ip->yh) {
+ __raw_writel(SCALER_INIT_PHASE_VAL(0, ip->yh),
+ sc->regs + SCALER_SRC_YH_INIT_PHASE);
+ __raw_writel(SCALER_INIT_PHASE_VAL(0, ip->ch),
+ sc->regs + SCALER_SRC_CH_INIT_PHASE);
+ sc_dbg("initial phase value is yh 0x%x, ch 0x%x\n",
+ SCALER_FRACT_VAL(ip->yh), SCALER_FRACT_VAL(ip->ch));
+ }
+
+ if (ip->yv) {
+ __raw_writel(SCALER_INIT_PHASE_VAL(0, ip->yv),
+ sc->regs + SCALER_SRC_YV_INIT_PHASE);
+ __raw_writel(SCALER_INIT_PHASE_VAL(0, ip->cv),
+ sc->regs + SCALER_SRC_CV_INIT_PHASE);
+ sc_dbg("initial phase value is yv 0x%x, cv 0x%x\n",
+ SCALER_FRACT_VAL(ip->yv), SCALER_FRACT_VAL(ip->cv));
+ }
+}
+
+void sc_hwset_polyphase_hcoef(struct sc_dev *sc,
+ unsigned int yratio, unsigned int cratio, unsigned int filter);
+void sc_hwset_polyphase_vcoef(struct sc_dev *sc,
+ unsigned int yratio, unsigned int cratio, unsigned int filter);
+
+#endif /*__SCALER_REGS_H__*/
--- /dev/null
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Header file for Exynos Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SCALER__H_
+#define SCALER__H_
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/videodev2_exynos_media.h>
+#include <linux/io.h>
+#include <linux/pm_qos.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ctrls.h>
+
+extern int sc_log_level;
+#define sc_dbg(fmt, args...) \
+ do { \
+ if (sc_log_level) \
+ pr_debug("[%s:%d] " \
+ fmt, __func__, __LINE__, ##args); \
+ } while (0)
+
+#define MODULE_NAME "exynos5-scaler"
+#define SC_MAX_DEVS 1
+#define SC_TIMEOUT (2 * HZ) /* 2 seconds */
+#define SC_WDT_CNT 3
+#define SC_MAX_CTRL_NUM 11
+
+#define SC_MAX_PLANES 3
+/* Address index */
+#define SC_ADDR_RGB 0
+#define SC_ADDR_Y 0
+#define SC_ADDR_CB 1
+#define SC_ADDR_CBCR 1
+#define SC_ADDR_CR 2
+
+/* Scaler hardware device state */
+#define DEV_RUN 1
+#define DEV_SUSPEND 2
+#define DEV_CP 4 /* contents path protection */
+
+/* Scaler m2m context state */
+#define CTX_PARAMS 1
+#define CTX_STREAMING 2
+#define CTX_RUN 3
+#define CTX_ABORT 4
+#define CTX_SRC_FMT 5
+#define CTX_DST_FMT 6
+#define CTX_INT_FRAME 7 /* intermediate frame available */
+
+/* CSC equation */
+#define SC_CSC_NARROW 0
+#define SC_CSC_WIDE 1
+
+/* Scaler Crop Fixed Point value */
+#define SC_CROP_FRACT_SHIFT 15
+#define SC_CROP_FRACT_MULTI 16
+#define SC_CROP_FRACT_MASK ((1 << SC_CROP_FRACT_MULTI) - 1)
+#define SC_CROP_INT_MASK ((1 << SC_CROP_FRACT_SHIFT) - 1)
+#define SC_CROP_GET_FR_VAL(x) ((x >> SC_CROP_FRACT_SHIFT) & SC_CROP_FRACT_MASK)
+#define SC_CROP_MAKE_FR_VAL(i, f) ((((f) & SC_CROP_FRACT_MASK) \
+ << SC_CROP_FRACT_SHIFT) | ((i) & SC_CROP_INT_MASK))
+
+#define fh_to_sc_ctx(__fh) container_of(__fh, struct sc_ctx, fh)
+#define sc_fmt_is_rgb888(x) ((x == V4L2_PIX_FMT_RGB32) || \
+ (x == V4L2_PIX_FMT_BGR32))
+#define sc_fmt_is_yuv422(x) ((x == V4L2_PIX_FMT_YUYV) || \
+ (x == V4L2_PIX_FMT_UYVY) || (x == V4L2_PIX_FMT_YVYU) || \
+ (x == V4L2_PIX_FMT_YUV422P) || (x == V4L2_PIX_FMT_NV16) || \
+ (x == V4L2_PIX_FMT_NV61))
+#define sc_fmt_is_yuv420(x) ((x == V4L2_PIX_FMT_YUV420) || \
+ (x == V4L2_PIX_FMT_YVU420) || (x == V4L2_PIX_FMT_NV12) || \
+ (x == V4L2_PIX_FMT_NV21) || (x == V4L2_PIX_FMT_NV12M) || \
+ (x == V4L2_PIX_FMT_NV21M) || (x == V4L2_PIX_FMT_YUV420M) || \
+ (x == V4L2_PIX_FMT_YVU420M) || (x == V4L2_PIX_FMT_NV12MT_16X16))
+#define sc_fmt_is_ayv12(x) ((x) == V4L2_PIX_FMT_YVU420)
+#define sc_dith_val(a, b, c) ((a << SCALER_DITH_R_SHIFT) | \
+ (b << SCALER_DITH_G_SHIFT) | (c << SCALER_DITH_B_SHIFT))
+
+#define SCALER_VERSION(x, y, z) (((x) << 16) | ((y) << 8) | (z))
+
+#define SC_FMT_PREMULTI_FLAG 10
+
+/* for blend operation */
+#define V4L2_CID_GLOBAL_ALPHA (V4L2_CID_EXYNOS_BASE + 1)
+#define V4L2_CID_2D_BLEND_OP (V4L2_CID_EXYNOS_BASE + 103)
+#define V4L2_CID_2D_COLOR_FILL (V4L2_CID_EXYNOS_BASE + 104)
+#define V4L2_CID_2D_DITH (V4L2_CID_EXYNOS_BASE + 105)
+#define V4L2_CID_2D_FMT_PREMULTI (V4L2_CID_EXYNOS_BASE + 106)
+
+/* for denoising filter */
+#define SC_CID_DNOISE_FT (V4L2_CID_EXYNOS_BASE + 150)
+#define SC_M2M1SHOT_OP_FILTER_SHIFT (28)
+#define SC_M2M1SHOT_OP_FILTER_MASK (0xf << 28)
+
+enum sc_csc_idx {
+ NO_CSC,
+ CSC_Y2R,
+ CSC_R2Y,
+};
+
+struct sc_csc_tab {
+ int narrow_601[9];
+ int wide_601[9];
+ int narrow_709[9];
+ int wide_709[9];
+ int narrow_2020[9];
+ int wide_2020[9];
+ int narrow_p3[9];
+ int wide_p3[9];
+};
+
+enum sc_clk_status {
+ SC_CLK_ON,
+ SC_CLK_OFF,
+};
+
+enum sc_clocks {
+ SC_GATE_CLK,
+ SC_CHLD_CLK,
+ SC_PARN_CLK
+};
+
+enum sc_dith {
+ SC_DITH_NO,
+ SC_DITH_8BIT,
+ SC_DITH_6BIT,
+ SC_DITH_5BIT,
+ SC_DITH_4BIT,
+};
+
+/*
+ * blending operation
+ * The order is from Android PorterDuff.java
+ */
+enum sc_blend_op {
+ /* [0, 0] */
+ BL_OP_CLR = 1,
+ /* [Sa, Sc] */
+ BL_OP_SRC,
+ /* [Da, Dc] */
+ BL_OP_DST,
+ /* [Sa + (1 - Sa)*Da, Rc = Sc + (1 - Sa)*Dc] */
+ BL_OP_SRC_OVER,
+ /* [Sa + (1 - Sa)*Da, Rc = Dc + (1 - Da)*Sc] */
+ BL_OP_DST_OVER,
+ /* [Sa * Da, Sc * Da] */
+ BL_OP_SRC_IN,
+ /* [Sa * Da, Sa * Dc] */
+ BL_OP_DST_IN,
+ /* [Sa * (1 - Da), Sc * (1 - Da)] */
+ BL_OP_SRC_OUT,
+ /* [Da * (1 - Sa), Dc * (1 - Sa)] */
+ BL_OP_DST_OUT,
+ /* [Da, Sc * Da + (1 - Sa) * Dc] */
+ BL_OP_SRC_ATOP,
+ /* [Sa, Sc * (1 - Da) + Sa * Dc ] */
+ BL_OP_DST_ATOP,
+ /* [-(Sa * Da), Sc * (1 - Da) + (1 - Sa) * Dc] */
+ BL_OP_XOR,
+ /* [Sa + Da - Sa*Da, Sc*(1 - Da) + Dc*(1 - Sa) + min(Sc, Dc)] */
+ BL_OP_DARKEN,
+ /* [Sa + Da - Sa*Da, Sc*(1 - Da) + Dc*(1 - Sa) + max(Sc, Dc)] */
+ BL_OP_LIGHTEN,
+ /** [Sa * Da, Sc * Dc] */
+ BL_OP_MULTIPLY,
+ /* [Sa + Da - Sa * Da, Sc + Dc - Sc * Dc] */
+ BL_OP_SCREEN,
+ /* Saturate(S + D) */
+ BL_OP_ADD,
+};
+
+/*
+ * Co = <src color op> * Cs + <dst color op> * Cd
+ * Ao = <src_alpha_op> * As + <dst_color_op> * Ad
+ */
+#define BL_INV_BIT_OFFSET 0x10
+
+enum sc_bl_comp {
+ ONE = 0,
+ SRC_A,
+ SRC_C,
+ DST_A,
+ SRC_GA = 0x5,
+ INV_SA = 0x11,
+ INV_SC,
+ INV_DA,
+ INV_SAGA = 0x17,
+ ZERO = 0xff,
+};
+
+struct sc_bl_op_val {
+ u32 src_color;
+ u32 src_alpha;
+ u32 dst_color;
+ u32 dst_alpha;
+};
+
+/*
+ * struct sc_size_limit - Scaler variant size information
+ *
+ * @min_w: minimum pixel width size
+ * @min_h: minimum pixel height size
+ * @max_w: maximum pixel width size
+ * @max_h: maximum pixel height size
+ */
+struct sc_size_limit {
+ u16 min_w;
+ u16 min_h;
+ u16 max_w;
+ u16 max_h;
+};
+
+struct sc_variant {
+ struct sc_size_limit limit_input;
+ struct sc_size_limit limit_output;
+ u32 version;
+ u32 sc_up_max;
+ u32 sc_down_min;
+ u32 sc_down_swmin;
+ u8 blending:1;
+ u8 prescale:1;
+ u8 ratio_20bit:1;
+ u8 initphase:1;
+};
+
+/*
+ * struct sc_fmt - the driver's internal color format data
+ * @name: format description
+ * @pixelformat: the fourcc code for this format, 0 if not applicable
+ * @num_planes: number of physically non-contiguous data planes
+ * @num_comp: number of color components(ex. RGB, Y, Cb, Cr)
+ * @h_div: horizontal division value of C against Y for crop
+ * @v_div: vertical division value of C against Y for crop
+ * @bitperpixel: bits per pixel
+ * @color: the corresponding sc_color_fmt
+ */
+struct sc_fmt {
+ char *name;
+ u32 pixelformat;
+ u32 cfg_val;
+ u8 bitperpixel[SC_MAX_PLANES];
+ u8 num_planes:2;
+ u8 num_comp:2;
+ u8 h_shift:1;
+ u8 v_shift:1;
+ u8 is_rgb:1;
+ u8 cspan:1;
+};
+
+struct sc_addr {
+ dma_addr_t y;
+ dma_addr_t cb;
+ dma_addr_t cr;
+ unsigned int ysize;
+ unsigned int cbsize;
+ unsigned int crsize;
+};
+
+/*
+ * struct sc_frame - source/target frame properties
+ * @fmt: buffer format(like virtual screen)
+ * @crop: image size / position
+ * @addr: buffer start address(access using SC_ADDR_XXX)
+ * @bytesused: image size in bytes (w x h x bpp)
+ */
+struct sc_frame {
+ const struct sc_fmt *sc_fmt;
+ unsigned short width;
+ unsigned short height;
+ __u32 pixelformat;
+ struct v4l2_rect crop;
+
+ struct sc_addr addr;
+ __u32 bytesused[SC_MAX_PLANES];
+ bool pre_multi;
+};
+
+struct sc_int_frame {
+ struct sc_frame frame;
+ struct ion_client *client;
+ struct ion_handle *handle[3];
+ struct sc_addr src_addr;
+ struct sc_addr dst_addr;
+};
+
+/*
+ * struct sc_m2m_device - v4l2 memory-to-memory device data
+ * @v4l2_dev: v4l2 device
+ * @vfd: the video device node
+ * @m2m_dev: v4l2 memory-to-memory device data
+ * @in_use: the open count
+ */
+struct sc_m2m_device {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd;
+ struct v4l2_m2m_dev *m2m_dev;
+ atomic_t in_use;
+};
+
+struct sc_wdt {
+ struct timer_list timer;
+ atomic_t cnt;
+};
+
+struct sc_csc {
+ unsigned int csc_eq;
+ bool csc_range;
+};
+
+struct sc_init_phase {
+ u32 yh;
+ u32 yv;
+ u32 ch;
+ u32 cv;
+ u32 w;
+ u32 h;
+};
+
+enum sc_ft {
+ SC_FT_NONE = 0,
+ SC_FT_BLUR,
+ SC_FT_240,
+ SC_FT_480,
+ SC_FT_720,
+ SC_FT_1080,
+ SC_FT_MAX,
+};
+
+struct sc_dnoise_filter {
+ u32 strength;
+ u32 w;
+ u32 h;
+};
+
+struct sc_ctx;
+
+/*
+ * struct sc_dev - the abstraction for Rotator device
+ * @dev: pointer to the Rotator device
+ * @variant: the IP variant information
+ * @m2m: memory-to-memory V4L2 device information
+ * @aclk: aclk required for scaler operation
+ * @pclk: pclk required for scaler operation
+ * @clk_chld: child clk of mux required for scaler operation
+ * @clk_parn: parent clk of mux required for scaler operation
+ * @regs: the mapped hardware registers
+ * @regs_res: the resource claimed for IO registers
+ * @wait: interrupt handler waitqueue
+ * @ws: work struct
+ * @state: device state flags
+ * @alloc_ctx: videobuf2 memory allocator context
+ * @slock: the spinlock pscecting this data structure
+ * @lock: the mutex pscecting this data structure
+ * @wdt: watchdog timer information
+ * @version: IP version number
+ * @cfw: cfw flag
+ * @pb_disable: prefetch-buffer disable flag
+ */
+struct sc_dev {
+ struct device *dev;
+ const struct sc_variant *variant;
+ struct sc_m2m_device m2m;
+ struct m2m1shot_device *m21dev;
+ struct clk *aclk;
+ struct clk *pclk;
+ struct clk *clk_chld;
+ struct clk *clk_parn;
+ void __iomem *regs;
+ struct resource *regs_res;
+ struct workqueue_struct *qosclr_int_wq;
+ wait_queue_head_t wait;
+ unsigned long state;
+ struct vb2_alloc_ctx *alloc_ctx;
+ spinlock_t slock;
+ struct mutex lock;
+ struct sc_wdt wdt;
+ spinlock_t ctxlist_lock;
+ struct sc_ctx *current_ctx;
+ struct list_head context_list; /* for sc_ctx_abs.node */
+ struct pm_qos_request qosreq_int;
+ s32 qosreq_int_level;
+ int dev_id;
+ u32 version;
+ bool pb_disable;
+ u32 cfw;
+};
+
+enum SC_CONTEXT_TYPE {
+ SC_CTX_V4L2_TYPE,
+ SC_CTX_M2M1SHOT_TYPE
+};
+
+/*
+ * sc_ctx - the abstration for Rotator open context
+ * @node: list to be added to sc_dev.context_list
+ * @context_type determines if the context is @m2m_ctx or @m21_ctx.
+ * @sc_dev: the Rotator device this context applies to
+ * @m2m_ctx: memory-to-memory device context
+ * @m21_ctx: m2m1shot context
+ * @frame: source frame properties
+ * @ctrl_handler: v4l2 controls handler
+ * @fh: v4l2 file handle
+ * @ktime: start time of a task of m2m1shot
+ * @flip_rot_cfg: rotation and flip configuration
+ * @bl_op: image blend mode
+ * @dith: image dithering mode
+ * @g_alpha: global alpha value
+ * @color_fill: enable color fill
+ * @flags: context state flags
+ * @pre_multi: pre-multiplied format
+ * @csc: csc equation value
+ */
+struct sc_ctx {
+ struct list_head node;
+ enum SC_CONTEXT_TYPE context_type;
+ struct sc_dev *sc_dev;
+ union {
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct m2m1shot_context *m21_ctx;
+ };
+ struct sc_frame s_frame;
+ struct sc_int_frame *i_frame;
+ struct sc_frame d_frame;
+ struct v4l2_ctrl_handler ctrl_handler;
+ union {
+ struct v4l2_fh fh;
+ ktime_t ktime_m2m1shot;
+ };
+ u32 flip_rot_cfg; /* SCALER_ROT_CFG */
+ enum sc_blend_op bl_op;
+ u32 dith;
+ u32 g_alpha;
+ bool color_fill;
+ unsigned int h_ratio;
+ unsigned int v_ratio;
+ unsigned int pre_h_ratio;
+ unsigned int pre_v_ratio;
+ unsigned long flags;
+ bool pre_multi;
+ bool cp_enabled;
+ struct sc_csc csc;
+ struct sc_init_phase init_phase;
+ struct sc_dnoise_filter dnoise_ft;
+};
+
+static inline struct sc_frame *ctx_get_frame(struct sc_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ struct sc_frame *frame;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+ } else {
+ dev_err(ctx->sc_dev->dev,
+ "Wrong V4L2 buffer type %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return frame;
+}
+
+int sc_hwset_src_image_format(struct sc_dev *sc, const struct sc_fmt *);
+int sc_hwset_dst_image_format(struct sc_dev *sc, const struct sc_fmt *);
+void sc_hwset_pre_multi_format(struct sc_dev *sc, bool src, bool dst);
+void sc_hwset_blend(struct sc_dev *sc, enum sc_blend_op bl_op, bool pre_multi,
+ unsigned char g_alpha);
+void sc_hwset_color_fill(struct sc_dev *sc, unsigned int val);
+void sc_hwset_dith(struct sc_dev *sc, unsigned int val);
+void sc_hwset_csc_coef(struct sc_dev *sc, enum sc_csc_idx idx,
+ struct sc_csc *csc);
+void sc_hwset_src_imgsize(struct sc_dev *sc, struct sc_frame *frame);
+void sc_hwset_dst_imgsize(struct sc_dev *sc, struct sc_frame *frame);
+void sc_hwset_src_crop(struct sc_dev *sc, struct v4l2_rect *rect,
+ const struct sc_fmt *fmt,
+ unsigned int pre_h_ratio, unsigned int pre_v_ratio);
+void sc_hwset_dst_crop(struct sc_dev *sc, struct v4l2_rect *rect);
+void sc_hwset_src_addr(struct sc_dev *sc, struct sc_addr *addr);
+void sc_hwset_dst_addr(struct sc_dev *sc, struct sc_addr *addr);
+void sc_hwset_hcoef(struct sc_dev *sc, unsigned int coef);
+void sc_hwset_vcoef(struct sc_dev *sc, unsigned int coef);
+
+void sc_hwregs_dump(struct sc_dev *sc);
+
+#endif /* SCALER__H_ */