source "drivers/media/cec/Kconfig"
+config MEDIA_M2M1SHOT
+ bool "Non-streaming m2m media processing (m2m1shot) API"
+ ---help---
+ Enables support for non-steaming m2m media processing API for some
+ device drivers like JPEG codec.
+
+config MEDIA_M2M1SHOT_TESTDEV
+ depends on MEDIA_M2M1SHOT
+ bool "Test driver for non-streaming m2m media processing (m2m1shot) API"
+
#
# Media controller
# Selectable only for webcam/grabbers, as other drivers don't use it
obj-$(CONFIG_VIDEO_DEV) += v4l2-core/
obj-$(CONFIG_DVB_CORE) += dvb-core/
+obj-$(CONFIG_MEDIA_M2M1SHOT) += m2m1shot.o m2m1shot-helper.o
+obj-$(CONFIG_MEDIA_M2M1SHOT_TESTDEV) += m2m1shot-testdev.o
+
# There are both core and drivers at RC subtree - merge before drivers
obj-y += rc/
--- /dev/null
+/*
+ * drivers/media/m2m1shot-helper.c
+ *
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ *
+ * Contact: Cho KyongHo <pullip.cho@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/exynos_iovmm.h>
+#include <linux/exynos_ion.h>
+
+#include <media/m2m1shot-helper.h>
+
+int m2m1shot_map_dma_buf(struct device *dev,
+ struct m2m1shot_buffer_plane_dma *plane,
+ enum dma_data_direction dir)
+{
+ if (plane->dmabuf) {
+ plane->sgt = dma_buf_map_attachment(plane->attachment, dir);
+ if (IS_ERR(plane->sgt)) {
+ dev_err(dev, "%s: failed to map attacment of dma_buf\n",
+ __func__);
+ return PTR_ERR(plane->sgt);
+ }
+
+ exynos_ion_sync_dmabuf_for_device(dev, plane->dmabuf,
+ plane->bytes_used, dir);
+ } else { /* userptr */
+ exynos_ion_sync_sg_for_device(dev, plane->bytes_used,
+ plane->sgt, dir);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(m2m1shot_map_dma_buf);
+
+void m2m1shot_unmap_dma_buf(struct device *dev,
+ struct m2m1shot_buffer_plane_dma *plane,
+ enum dma_data_direction dir)
+{
+ if (plane->dmabuf) {
+ exynos_ion_sync_dmabuf_for_cpu(dev, plane->dmabuf,
+ plane->bytes_used, dir);
+ dma_buf_unmap_attachment(plane->attachment, plane->sgt, dir);
+ } else {
+ exynos_ion_sync_sg_for_cpu(dev, plane->bytes_used,
+ plane->sgt, dir);
+ }
+}
+EXPORT_SYMBOL(m2m1shot_unmap_dma_buf);
+
+int m2m1shot_dma_addr_map(struct device *dev,
+ struct m2m1shot_buffer_dma *buf,
+ int plane_idx, enum dma_data_direction dir)
+{
+ struct m2m1shot_buffer_plane_dma *plane = &buf->plane[plane_idx];
+ dma_addr_t iova;
+
+ if (plane->dmabuf) {
+ iova = ion_iovmm_map(plane->attachment, 0,
+ plane->bytes_used, dir, 0);
+ } else {
+ iova = iovmm_map(dev, plane->sgt->sgl, 0,
+ plane->bytes_used, dir, 0);
+ }
+
+ if (IS_ERR_VALUE(iova))
+ return (int)iova;
+
+ buf->plane[plane_idx].dma_addr = iova + plane->offset;
+
+ return 0;
+}
+
+void m2m1shot_dma_addr_unmap(struct device *dev,
+ struct m2m1shot_buffer_dma *buf, int plane_idx)
+{
+ struct m2m1shot_buffer_plane_dma *plane = &buf->plane[plane_idx];
+ dma_addr_t dma_addr = plane->dma_addr - plane->offset;
+
+ if (plane->dmabuf)
+ ion_iovmm_unmap(plane->attachment, dma_addr);
+ else
+ iovmm_unmap(dev, dma_addr);
+
+ plane->dma_addr = 0;
+}
--- /dev/null
+/*
+ * drivers/media/m2m1shot-testdev.c
+ *
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ *
+ * Contact: Cho KyongHo <pullip.cho@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <media/m2m1shot.h>
+#include <media/m2m1shot-helper.h>
+
+struct m2m1shot_testdev_drvdata {
+ struct m2m1shot_device *m21dev;
+ struct task_struct *thread;
+ wait_queue_head_t waitqueue;
+ struct list_head task_list;
+ spinlock_t lock;
+};
+
+#define M2M1SHOT_TESTDEV_IOC_TIMEOUT _IOW('T', 0, unsigned long)
+
+struct m2m1shot_testdev_fmt {
+ __u32 fmt;
+ const char *fmt_name;
+ __u8 mbpp[3]; /* bytes * 2 per pixel: should be divided by 2 */
+ __u8 planes;
+ __u8 buf_mbpp[3]; /* bytes * 2 per pixel: should be divided by 2 */
+ __u8 buf_planes;
+};
+
+#define TO_MBPP(bpp) ((bpp) << 2)
+#define TO_MBPPDIV(bpp_diven, bpp_diver) (((bpp_diven) << 2) / (bpp_diver))
+#define TO_BPP(mbpp) ((mbpp) >> 2)
+
+static struct m2m1shot_testdev_fmt m2m1shot_testdev_fmtlist[] = {
+ {
+ .fmt = V4L2_PIX_FMT_RGB565,
+ .fmt_name = "RGB565",
+ .mbpp = { TO_MBPP(2), 0, 0},
+ .planes = 1,
+ .buf_mbpp = { TO_MBPP(2), 0, 0},
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_BGR32,
+ .fmt_name = "BGR32",
+ .mbpp = { TO_MBPP(4), 0, 0 },
+ .planes = 1,
+ .buf_mbpp = { TO_MBPP(4), 0, 0 },
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_RGB32,
+ .fmt_name = "RGB32",
+ .mbpp = { TO_MBPP(4), 0, 0 },
+ .planes = 1,
+ .buf_mbpp = { TO_MBPP(4), 0, 0 },
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_YUV420,
+ .fmt_name = "YUV4:2:0",
+ .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4), TO_MBPPDIV(1, 4) },
+ .planes = 3,
+ .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
+ 0, 0 },
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_NV12,
+ .fmt_name = "Y/CbCr4:2:0(NV12)",
+ .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4), 0 },
+ .planes = 2,
+ .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
+ 0, 0 },
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_NV21,
+ .fmt_name = "Y/CrCb4:2:0(NV21)",
+ .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4), 0 },
+ .planes = 2,
+ .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
+ 0, 0 },
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_NV12M,
+ .fmt_name = "Y/CbCr4:2:0(NV12M)",
+ .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4), 0 },
+ .planes = 2,
+ .buf_mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
+ 0 },
+ .buf_planes = 2,
+ }, {
+ .fmt = V4L2_PIX_FMT_NV21M,
+ .fmt_name = "Y/CrCb4:2:0(NV21M)",
+ .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4), 0 },
+ .planes = 2,
+ .buf_mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
+ 0 },
+ .buf_planes = 2,
+ }, {
+ .fmt = V4L2_PIX_FMT_YUV422P,
+ .fmt_name = "YUV4:2:2",
+ .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 2), TO_MBPPDIV(1, 2) },
+ .planes = 3,
+ .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
+ 0, 0 },
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_NV16,
+ .fmt_name = "Y/CbCr4:2:2(NV16)",
+ .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2), 0 },
+ .planes = 2,
+ .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
+ 0, 0 },
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_NV61,
+ .fmt_name = "Y/CrCb4:2:2(NV61)",
+ .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2), 0 },
+ .planes = 2,
+ .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
+ 0, 0 },
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_YUYV,
+ .fmt_name = "YUV4:2:2(YUYV)",
+ .mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
+ 0, 0},
+ .planes = 1,
+ .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
+ 0, 0 },
+ .buf_planes = 1,
+ }, {
+ .fmt = V4L2_PIX_FMT_YUV444,
+ .fmt_name = "YUV4:4:4",
+ .mbpp = { TO_MBPP(1), TO_MBPP(1), TO_MBPP(1) },
+ .planes = 3,
+ .buf_mbpp = { TO_MBPP(1) + TO_MBPP(1) + TO_MBPP(1), 0, 0 },
+ .buf_planes = 1,
+ },
+};
+
+static int m2m1shot_testdev_init_context(struct m2m1shot_context *ctx)
+{
+ ctx->priv = NULL; /* no timeout generated */
+
+ return 0;
+}
+
+static int m2m1shot_testdev_free_context(struct m2m1shot_context *ctx)
+{
+ return 0;
+}
+
+static int m2m1shot_testdev_prepare_format(struct m2m1shot_context *ctx,
+ struct m2m1shot_pix_format *fmt,
+ enum dma_data_direction dir,
+ size_t bytes_used[])
+{
+ size_t i, j;
+ for (i = 0; i < ARRAY_SIZE(m2m1shot_testdev_fmtlist); i++) {
+ if (fmt->fmt == m2m1shot_testdev_fmtlist[i].fmt)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(m2m1shot_testdev_fmtlist)) {
+ dev_err(ctx->m21dev->dev, "Unknown format %#x\n", fmt->fmt);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < m2m1shot_testdev_fmtlist[i].buf_planes; j++)
+ bytes_used[j] = TO_BPP(m2m1shot_testdev_fmtlist[i].buf_mbpp[j] *
+ fmt->width * fmt->height);
+ return m2m1shot_testdev_fmtlist[i].buf_planes;
+}
+
+static int m2m1shot_testdev_prepare_buffer(struct m2m1shot_context *ctx,
+ struct m2m1shot_buffer_dma *dma_buffer,
+ int plane,
+ enum dma_data_direction dir)
+{
+ return m2m1shot_map_dma_buf(ctx->m21dev->dev,
+ &dma_buffer->plane[plane], dir);
+}
+
+static void m2m1shot_testdev_finish_buffer(struct m2m1shot_context *ctx,
+ struct m2m1shot_buffer_dma *dma_buffer,
+ int plane,
+ enum dma_data_direction dir)
+{
+ m2m1shot_unmap_dma_buf(ctx->m21dev->dev, &dma_buffer->plane[plane], dir);
+}
+
+struct m2m1shot_testdev_work {
+ struct list_head node;
+ struct m2m1shot_context *ctx;
+ struct m2m1shot_task *task;
+};
+
+static int m2m1shot_testdev_worker(void *data)
+{
+ struct m2m1shot_testdev_drvdata *drvdata = data;
+
+ while (true) {
+ struct m2m1shot_testdev_work *work;
+ struct m2m1shot_task *task;
+
+ wait_event_freezable(drvdata->waitqueue,
+ !list_empty(&drvdata->task_list));
+
+ spin_lock(&drvdata->lock);
+ BUG_ON(list_empty(&drvdata->task_list));
+ work = list_first_entry(&drvdata->task_list,
+ struct m2m1shot_testdev_work, node);
+ list_del(&work->node);
+ BUG_ON(!list_empty(&drvdata->task_list));
+ spin_unlock(&drvdata->lock);
+
+ msleep(20);
+
+ task = m2m1shot_get_current_task(drvdata->m21dev);
+ BUG_ON(!task);
+
+ BUG_ON(task != work->task);
+ BUG_ON(task->ctx != work->ctx);
+
+ kfree(work);
+
+ m2m1shot_task_finish(drvdata->m21dev, task, true);
+ }
+
+ return 0;
+
+}
+
+static int m2m1shot_testdev_device_run(struct m2m1shot_context *ctx,
+ struct m2m1shot_task *task)
+{
+ struct m2m1shot_testdev_work *work;
+ struct device *dev = ctx->m21dev->dev;
+ struct m2m1shot_testdev_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (ctx->priv) /* timeout generated */
+ return 0;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ pr_err("%s: Failed to allocate work struct\n", __func__);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&work->node);
+ work->ctx = ctx;
+ work->task = task;
+
+ spin_lock(&drvdata->lock);
+ BUG_ON(!list_empty(&drvdata->task_list));
+ list_add_tail(&work->node, &drvdata->task_list);
+ spin_unlock(&drvdata->lock);
+
+ if (current != drvdata->thread);
+ wake_up(&drvdata->waitqueue);
+
+ return 0;
+}
+
+static void m2m1shot_testdev_timeout_task(struct m2m1shot_context *ctx,
+ struct m2m1shot_task *task)
+{
+ dev_info(ctx->m21dev->dev, "%s: Timeout occurred\n", __func__);
+}
+
+static long m2m1shot_testdev_ioctl(struct m2m1shot_context *ctx,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned long timeout;
+
+ if (cmd != M2M1SHOT_TESTDEV_IOC_TIMEOUT) {
+ dev_err(ctx->m21dev->dev, "%s: Unknown ioctl cmd %#x\n",
+ __func__, cmd);
+ return -ENOSYS;
+ }
+
+ if (get_user(timeout, (unsigned long __user *)arg)) {
+ dev_err(ctx->m21dev->dev,
+ "%s: Failed to read user data\n", __func__);
+ return -EFAULT;
+ }
+
+ if (timeout)
+ ctx->priv = (void *)1; /* timeout generated */
+ else
+ ctx->priv = NULL; /* timeout not generated */
+
+ dev_info(ctx->m21dev->dev, "%s: Timeout geration is %s",
+ __func__, timeout ? "set" : "unset");
+
+ return 0;
+}
+
+static const struct m2m1shot_devops m2m1shot_testdev_ops = {
+ .init_context = m2m1shot_testdev_init_context,
+ .free_context = m2m1shot_testdev_free_context,
+ .prepare_format = m2m1shot_testdev_prepare_format,
+ .prepare_buffer = m2m1shot_testdev_prepare_buffer,
+ .finish_buffer = m2m1shot_testdev_finish_buffer,
+ .device_run = m2m1shot_testdev_device_run,
+ .timeout_task = m2m1shot_testdev_timeout_task,
+ .custom_ioctl = m2m1shot_testdev_ioctl,
+};
+
+static struct platform_device m2m1shot_testdev_pdev = {
+ .name = "m2m1shot_testdev",
+};
+
+static int m2m1shot_testdev_init(void)
+{
+ int ret = 0;
+ struct m2m1shot_device *m21dev;
+ struct m2m1shot_testdev_drvdata *drvdata;
+
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ pr_err("%s: Failed allocate drvdata\n", __func__);
+ return -ENOMEM;
+ }
+ ret = platform_device_register(&m2m1shot_testdev_pdev);
+ if (ret) {
+ pr_err("%s: Failed to register platform device\n", __func__);
+ goto err_register;
+ }
+
+ m21dev = m2m1shot_create_device(&m2m1shot_testdev_pdev.dev,
+ &m2m1shot_testdev_ops, "testdev", -1, msecs_to_jiffies(500));
+ if (IS_ERR(m21dev)) {
+ pr_err("%s: Failed to create m2m1shot device\n", __func__);
+ ret = PTR_ERR(m21dev);
+ goto err_create;
+ }
+
+ drvdata->thread = kthread_run(m2m1shot_testdev_worker, drvdata,
+ "%s", "m2m1shot_tesdev_worker");
+ if (IS_ERR(drvdata->thread)) {
+ pr_err("%s: Failed create worker thread\n", __func__);
+ ret = PTR_ERR(drvdata->thread);
+ goto err_worker;
+ }
+
+ drvdata->m21dev = m21dev;
+ INIT_LIST_HEAD(&drvdata->task_list);
+ spin_lock_init(&drvdata->lock);
+ init_waitqueue_head(&drvdata->waitqueue);
+
+ dev_set_drvdata(&m2m1shot_testdev_pdev.dev, drvdata);
+
+ return 0;
+
+err_worker:
+ m2m1shot_destroy_device(m21dev);
+err_create:
+ platform_device_unregister(&m2m1shot_testdev_pdev);
+err_register:
+ kfree(drvdata);
+ return ret;
+}
+module_init(m2m1shot_testdev_init);
--- /dev/null
+/*
+ * drivers/media/m2m1shot.c
+ *
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ *
+ * Contact: Cho KyongHo <pullip.cho@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+
+#include <media/m2m1shot.h>
+
+#define M2M1SHOT_DEVNODE_PREFIX "m2m1shot_"
+#define M2M1SHOT_DEVNODE_PREFIX_LEN 9
+
+static void m2m1shot_task_schedule(struct m2m1shot_device *m21dev)
+{
+ struct m2m1shot_task *task;
+ unsigned long flags;
+
+next_task:
+ spin_lock_irqsave(&m21dev->lock_task, flags);
+ if (list_empty(&m21dev->tasks)) {
+ /* No task to run */
+ spin_unlock_irqrestore(&m21dev->lock_task, flags);
+ return;
+ }
+
+ if (m21dev->current_task) {
+ /* H/W is working */
+ spin_unlock_irqrestore(&m21dev->lock_task, flags);
+ return;
+ }
+
+ task = list_first_entry(&m21dev->tasks,
+ struct m2m1shot_task, task_node);
+ list_del(&task->task_node);
+
+ m21dev->current_task = task;
+
+ spin_unlock_irqrestore(&m21dev->lock_task, flags);
+
+ task->state = M2M1SHOT_BUFSTATE_PROCESSING;
+
+ if (m21dev->ops->device_run(task->ctx, task)) {
+ task->state = M2M1SHOT_BUFSTATE_ERROR;
+
+ spin_lock_irqsave(&m21dev->lock_task, flags);
+ m21dev->current_task = NULL;
+ spin_unlock_irqrestore(&m21dev->lock_task, flags);
+
+ complete(&task->complete);
+
+ goto next_task;
+ }
+}
+
+void m2m1shot_task_finish(struct m2m1shot_device *m21dev,
+ struct m2m1shot_task *task, bool success)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m21dev->lock_task, flags);
+ BUG_ON(!m21dev->current_task);
+ BUG_ON(m21dev->current_task != task);
+ m21dev->current_task = NULL;
+ spin_unlock_irqrestore(&m21dev->lock_task, flags);
+
+ task->state = success ?
+ M2M1SHOT_BUFSTATE_DONE : M2M1SHOT_BUFSTATE_ERROR;
+
+ complete(&task->complete);
+
+ m2m1shot_task_schedule(m21dev);
+}
+EXPORT_SYMBOL(m2m1shot_task_finish);
+
+void m2m1shot_task_cancel(struct m2m1shot_device *m21dev,
+ struct m2m1shot_task *task,
+ enum m2m1shot_state reason)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m21dev->lock_task, flags);
+ m21dev->current_task = NULL;
+ spin_unlock_irqrestore(&m21dev->lock_task, flags);
+
+ task->state = reason;
+
+ complete(&task->complete);
+
+ m2m1shot_task_schedule(m21dev);
+}
+EXPORT_SYMBOL(m2m1shot_task_cancel);
+
+static void m2m1shot_buffer_put_dma_buf_plane(
+ struct m2m1shot_buffer_plane_dma *plane)
+{
+ dma_buf_detach(plane->dmabuf, plane->attachment);
+ dma_buf_put(plane->dmabuf);
+ plane->dmabuf = NULL;
+}
+
+static void m2m1shot_buffer_put_dma_buf(struct m2m1shot_buffer *buffer,
+ struct m2m1shot_buffer_dma *dma_buffer)
+{
+ int i;
+
+ for (i = 0; i < buffer->num_planes; i++)
+ m2m1shot_buffer_put_dma_buf_plane(&dma_buffer->plane[i]);
+}
+
+static int m2m1shot_buffer_get_dma_buf(struct m2m1shot_device *m21dev,
+ struct m2m1shot_buffer *buffer,
+ struct m2m1shot_buffer_dma *dma_buffer)
+{
+ struct m2m1shot_buffer_plane_dma *plane;
+ int i, ret;
+
+ for (i = 0; i < buffer->num_planes; i++) {
+ plane = &dma_buffer->plane[i];
+
+ plane->dmabuf = dma_buf_get(buffer->plane[i].fd);
+ if (IS_ERR(plane->dmabuf)) {
+ dev_err(m21dev->dev,
+ "%s: failed to get dmabuf of fd %d\n",
+ __func__, buffer->plane[i].fd);
+ ret = PTR_ERR(plane->dmabuf);
+ goto err;
+ }
+
+ if (plane->dmabuf->size < plane->bytes_used) {
+ dev_err(m21dev->dev,
+ "%s: needs %zx bytes but dmabuf is %zx\n",
+ __func__, plane->bytes_used,
+ plane->dmabuf->size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ plane->attachment = dma_buf_attach(plane->dmabuf, m21dev->dev);
+ if (IS_ERR(plane->attachment)) {
+ dev_err(m21dev->dev,
+ "%s: Failed to attach dmabuf\n", __func__);
+ ret = PTR_ERR(plane->attachment);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ if (!IS_ERR(plane->dmabuf)) /* release dmabuf of the last iteration */
+ dma_buf_put(plane->dmabuf);
+
+ while (i-- > 0)
+ m2m1shot_buffer_put_dma_buf_plane(&dma_buffer->plane[i]);
+
+ return ret;
+}
+
+static int m2m1shot_buffer_get_userptr_plane(struct m2m1shot_device *m21dev,
+ struct m2m1shot_buffer_plane_dma *plane,
+ unsigned long start, size_t len, int write)
+{
+ size_t last_size = 0;
+ struct page **pages;
+ int nr_pages = 0;
+ int ret = 0, i;
+ off_t start_off;
+ struct scatterlist *sgl;
+
+ last_size = (start + len) & ~PAGE_MASK;
+ if (last_size == 0)
+ last_size = PAGE_SIZE;
+
+ start_off = offset_in_page(start);
+
+ start = round_down(start, PAGE_SIZE);
+
+ nr_pages = PFN_DOWN(PAGE_ALIGN(len + start_off));
+
+ pages = vmalloc(nr_pages * sizeof(*pages));
+ if (!pages)
+ return -ENOMEM;
+
+ ret = get_user_pages_fast(start, nr_pages, write, pages);
+ if (ret != nr_pages) {
+ dev_err(m21dev->dev,
+ "%s: failed to pin user pages in %#lx ~ %#lx\n",
+ __func__, start, start + len);
+
+ if (ret < 0)
+ goto err_get;
+
+ nr_pages = ret;
+ ret = -EFAULT;
+ goto err_pin;
+ }
+
+ plane->sgt = kmalloc(sizeof(*plane->sgt), GFP_KERNEL);
+ if (!plane->sgt) {
+ dev_err(m21dev->dev,
+ "%s: failed to allocate sgtable\n", __func__);
+ ret = -ENOMEM;
+ goto err_sgtable;
+ }
+
+ ret = sg_alloc_table(plane->sgt, nr_pages, GFP_KERNEL);
+ if (ret) {
+ dev_err(m21dev->dev,
+ "%s: failed to allocate sglist\n", __func__);
+ goto err_sg;
+ }
+
+ sgl = plane->sgt->sgl;
+
+ sg_set_page(sgl, pages[0],
+ (nr_pages == 1) ? len : PAGE_SIZE - start_off,
+ start_off);
+ sg_dma_address(sgl) = page_to_phys(sg_page(sgl));
+
+ sgl = sg_next(sgl);
+
+ /* nr_pages == 1 if sgl == NULL here */
+ for (i = 1; i < (nr_pages - 1); i++) {
+ sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
+ sg_dma_address(sgl) = page_to_phys(sg_page(sgl));
+ sgl = sg_next(sgl);
+ }
+
+ if (sgl) {
+ sg_set_page(sgl, pages[i], last_size, 0);
+ sg_dma_address(sgl) = page_to_phys(sg_page(sgl));
+ }
+
+ vfree(pages);
+
+ return 0;
+err_sg:
+ kfree(plane->sgt);
+err_sgtable:
+err_pin:
+ for (i = 0; i < nr_pages; i++)
+ put_page(pages[i]);
+err_get:
+ vfree(pages);
+
+ return ret;
+}
+
+static void m2m1shot_buffer_put_userptr_plane(
+ struct m2m1shot_buffer_plane_dma *plane, int write)
+{
+ if (plane->dmabuf) {
+ m2m1shot_buffer_put_dma_buf_plane(plane);
+ } else {
+ struct sg_table *sgt = plane->sgt;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
+ if (write)
+ set_page_dirty_lock(sg_page(sg));
+ put_page(sg_page(sg));
+ }
+
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+}
+
+static struct dma_buf *m2m1shot_buffer_check_userptr(
+ struct m2m1shot_device *m21dev, unsigned long start, size_t len,
+ off_t *out_offset)
+{
+ struct dma_buf *dmabuf = NULL;
+ struct vm_area_struct *vma;
+
+ down_read(¤t->mm->mmap_sem);
+ vma = find_vma(current->mm, start);
+ if (!vma || (start < vma->vm_start)) {
+ dev_err(m21dev->dev, "%s: Incorrect user buffer @ %#lx/%#zx\n",
+ __func__, start, len);
+ dmabuf = ERR_PTR(-EINVAL);
+ goto finish;
+ }
+
+ if (!vma->vm_file)
+ goto finish;
+
+ dmabuf = get_dma_buf_file(vma->vm_file);
+ if (dmabuf != NULL)
+ *out_offset = start - vma->vm_start;
+finish:
+ up_read(¤t->mm->mmap_sem);
+ return dmabuf;
+}
+
+static int m2m1shot_buffer_get_userptr(struct m2m1shot_device *m21dev,
+ struct m2m1shot_buffer *buffer,
+ struct m2m1shot_buffer_dma *dma_buffer,
+ int write)
+{
+ int i, ret = 0;
+ struct dma_buf *dmabuf;
+ off_t offset;
+
+ for (i = 0; i < buffer->num_planes; i++) {
+ dmabuf = m2m1shot_buffer_check_userptr(m21dev,
+ buffer->plane[i].userptr, buffer->plane[i].len,
+ &offset);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto err;
+ } else if (dmabuf) {
+ if (dmabuf->size < dma_buffer->plane[i].bytes_used) {
+ dev_err(m21dev->dev,
+ "%s: needs %zu bytes but dmabuf is %zu\n",
+ __func__,
+ dma_buffer->plane[i].bytes_used,
+ dmabuf->size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dma_buffer->plane[i].dmabuf = dmabuf;
+ dma_buffer->plane[i].attachment = dma_buf_attach(
+ dmabuf, m21dev->dev);
+ dma_buffer->plane[i].offset = offset;
+ if (IS_ERR(dma_buffer->plane[i].attachment)) {
+ dev_err(m21dev->dev,
+ "%s: Failed to attach dmabuf\n",
+ __func__);
+ ret = PTR_ERR(dma_buffer->plane[i].attachment);
+ dma_buf_put(dmabuf);
+ goto err;
+ }
+ } else {
+ ret = m2m1shot_buffer_get_userptr_plane(m21dev,
+ &dma_buffer->plane[i],
+ buffer->plane[i].userptr,
+ buffer->plane[i].len,
+ write);
+ }
+
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ while (i-- > 0)
+ m2m1shot_buffer_put_userptr_plane(&dma_buffer->plane[i], write);
+
+ return ret;
+}
+
+static void m2m1shot_buffer_put_userptr(struct m2m1shot_buffer *buffer,
+ struct m2m1shot_buffer_dma *dma_buffer,
+ int write)
+{
+ int i;
+
+ for (i = 0; i < buffer->num_planes; i++)
+ m2m1shot_buffer_put_userptr_plane(&dma_buffer->plane[i], write);
+}
+
+static int m2m1shot_prepare_get_buffer(struct m2m1shot_context *ctx,
+ struct m2m1shot_buffer *buffer,
+ struct m2m1shot_buffer_dma *dma_buffer,
+ enum dma_data_direction dir)
+{
+ struct m2m1shot_device *m21dev = ctx->m21dev;
+ int i, ret;
+
+ for (i = 0; i < buffer->num_planes; i++) {
+ struct m2m1shot_buffer_plane_dma *plane;
+
+ plane = &dma_buffer->plane[i];
+
+ if (plane->bytes_used == 0) {
+ /*
+ * bytes_used = 0 means that the size of the plane is
+ * not able to be decided by the driver because it is
+ * dependent upon the content in the buffer.
+ * The best example of the buffer is the buffer of JPEG
+ * encoded stream for decompression.
+ */
+ plane->bytes_used = buffer->plane[i].len;
+ } else if (buffer->plane[i].len < plane->bytes_used) {
+ dev_err(m21dev->dev,
+ "%s: needs %zx bytes but %zx is given\n",
+ __func__, plane->bytes_used,
+ buffer->plane[i].len);
+ return -EINVAL;
+ }
+ }
+
+ if ((buffer->type != M2M1SHOT_BUFFER_USERPTR) &&
+ (buffer->type != M2M1SHOT_BUFFER_DMABUF)) {
+ dev_err(m21dev->dev, "%s: unknown buffer type %u\n",
+ __func__, buffer->type);
+ return -EINVAL;
+ }
+
+ if (buffer->type == M2M1SHOT_BUFFER_DMABUF)
+ ret = m2m1shot_buffer_get_dma_buf(m21dev, buffer, dma_buffer);
+ else
+ ret = m2m1shot_buffer_get_userptr(m21dev, buffer, dma_buffer,
+ (dir == DMA_TO_DEVICE) ? 0 : 1);
+
+ if (ret)
+ return ret;
+
+ dma_buffer->buffer = buffer;
+
+ for (i = 0; i < buffer->num_planes; i++) {
+ /* the callback function should fill 'dma_addr' field */
+ ret = m21dev->ops->prepare_buffer(ctx, dma_buffer, i, dir);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_err(m21dev->dev, "%s: Failed to prepare plane %u", __func__, i);
+
+ while (i-- > 0)
+ m21dev->ops->finish_buffer(ctx, dma_buffer, i, dir);
+
+ if (buffer->type == M2M1SHOT_BUFFER_DMABUF)
+ m2m1shot_buffer_put_dma_buf(buffer, dma_buffer);
+ else
+ m2m1shot_buffer_put_userptr(buffer, dma_buffer,
+ (dir == DMA_TO_DEVICE) ? 0 : 1);
+
+ return ret;
+}
+
+static void m2m1shot_finish_buffer(struct m2m1shot_device *m21dev,
+ struct m2m1shot_context *ctx,
+ struct m2m1shot_buffer *buffer,
+ struct m2m1shot_buffer_dma *dma_buffer,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < buffer->num_planes; i++)
+ m21dev->ops->finish_buffer(ctx, dma_buffer, i, dir);
+
+ if (buffer->type == M2M1SHOT_BUFFER_DMABUF)
+ m2m1shot_buffer_put_dma_buf(buffer, dma_buffer);
+ else
+ m2m1shot_buffer_put_userptr(buffer, dma_buffer,
+ (dir == DMA_TO_DEVICE) ? 0 : 1);
+}
+
+static int m2m1shot_prepare_format(struct m2m1shot_device *m21dev,
+ struct m2m1shot_context *ctx,
+ struct m2m1shot_task *task)
+{
+ int i, ret;
+ size_t out_sizes[M2M1SHOT_MAX_PLANES] = { 0 };
+ size_t cap_sizes[M2M1SHOT_MAX_PLANES] = { 0 };
+
+ if (task->task.buf_out.num_planes > M2M1SHOT_MAX_PLANES) {
+ dev_err(m21dev->dev, "Invalid number of output planes %u.\n",
+ task->task.buf_out.num_planes);
+ return -EINVAL;
+ }
+
+ if (task->task.buf_cap.num_planes > M2M1SHOT_MAX_PLANES) {
+ dev_err(m21dev->dev, "Invalid number of capture planes %u.\n",
+ task->task.buf_cap.num_planes);
+ return -EINVAL;
+ }
+
+ ret = m21dev->ops->prepare_format(ctx, &task->task.fmt_out,
+ DMA_TO_DEVICE, out_sizes);
+ if (ret < 0)
+ return ret;
+
+ if (task->task.buf_out.num_planes != ret) {
+ dev_err(m21dev->dev,
+ "%s: needs %u output planes but %u is given\n",
+ __func__, ret, task->task.buf_out.num_planes);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < task->task.buf_out.num_planes; i++)
+ task->dma_buf_out.plane[i].bytes_used = out_sizes[i];
+
+ ret = m21dev->ops->prepare_format(ctx, &task->task.fmt_cap,
+ DMA_FROM_DEVICE, cap_sizes);
+ if (ret < 0)
+ return ret;
+
+ if (task->task.buf_cap.num_planes < ret) {
+ dev_err(m21dev->dev,
+ "%s: needs %u capture planes but %u is given\n",
+ __func__, ret, task->task.buf_cap.num_planes);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < task->task.buf_cap.num_planes; i++)
+ task->dma_buf_cap.plane[i].bytes_used = cap_sizes[i];
+
+ if (m21dev->ops->prepare_operation) {
+ ret = m21dev->ops->prepare_operation(ctx, task);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int m2m1shot_prepare_task(struct m2m1shot_device *m21dev,
+ struct m2m1shot_context *ctx,
+ struct m2m1shot_task *task)
+{
+ int ret;
+
+ ret = m2m1shot_prepare_format(m21dev, ctx, task);
+ if (ret)
+ return ret;
+
+ ret = m2m1shot_prepare_get_buffer(ctx, &task->task.buf_out,
+ &task->dma_buf_out, DMA_TO_DEVICE);
+ if (ret) {
+ dev_err(m21dev->dev, "%s: Failed to get output buffer\n",
+ __func__);
+ return ret;
+ }
+
+ ret = m2m1shot_prepare_get_buffer(ctx, &task->task.buf_cap,
+ &task->dma_buf_cap, DMA_FROM_DEVICE);
+ if (ret) {
+ m2m1shot_finish_buffer(m21dev, ctx,
+ &task->task.buf_out, &task->dma_buf_out, DMA_TO_DEVICE);
+ dev_err(m21dev->dev, "%s: Failed to get capture buffer\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void m2m1shot_finish_task(struct m2m1shot_device *m21dev,
+ struct m2m1shot_context *ctx,
+ struct m2m1shot_task *task)
+{
+ m2m1shot_finish_buffer(m21dev, ctx,
+ &task->task.buf_cap, &task->dma_buf_cap,
+ DMA_FROM_DEVICE);
+ m2m1shot_finish_buffer(m21dev, ctx,
+ &task->task.buf_out, &task->dma_buf_out, DMA_TO_DEVICE);
+}
+
+static void m2m1shot_destroy_context(struct kref *kref)
+{
+ struct m2m1shot_context *ctx = container_of(kref,
+ struct m2m1shot_context, kref);
+
+ ctx->m21dev->ops->free_context(ctx);
+
+ spin_lock(&ctx->m21dev->lock_ctx);
+ list_del(&ctx->node);
+ spin_unlock(&ctx->m21dev->lock_ctx);
+
+ kfree(ctx);
+}
+
+static int m2m1shot_process(struct m2m1shot_context *ctx,
+ struct m2m1shot_task *task)
+{
+ struct m2m1shot_device *m21dev = ctx->m21dev;
+ unsigned long flags;
+ int ret;
+
+ INIT_LIST_HEAD(&task->task_node);
+ init_completion(&task->complete);
+
+ kref_get(&ctx->kref);
+
+ mutex_lock(&ctx->mutex);
+
+ ret = m2m1shot_prepare_task(m21dev, ctx, task);
+ if (ret)
+ goto err;
+
+ task->ctx = ctx;
+ task->state = M2M1SHOT_BUFSTATE_READY;
+
+ spin_lock_irqsave(&m21dev->lock_task, flags);
+ list_add_tail(&task->task_node, &m21dev->tasks);
+ spin_unlock_irqrestore(&m21dev->lock_task, flags);
+
+ m2m1shot_task_schedule(m21dev);
+
+ if (m21dev->timeout_jiffies != -1) {
+ unsigned long elapsed;
+ elapsed = wait_for_completion_timeout(&task->complete,
+ m21dev->timeout_jiffies);
+ if (!elapsed) { /* timed out */
+ m2m1shot_task_cancel(m21dev, task,
+ M2M1SHOT_BUFSTATE_TIMEDOUT);
+
+ m21dev->ops->timeout_task(ctx, task);
+
+ m2m1shot_finish_task(m21dev, ctx, task);
+
+ dev_notice(m21dev->dev, "%s: %u msecs timed out\n",
+ __func__,
+ jiffies_to_msecs(m21dev->timeout_jiffies));
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+ } else {
+ wait_for_completion(&task->complete);
+ }
+
+ BUG_ON(task->state == M2M1SHOT_BUFSTATE_READY);
+
+ m2m1shot_finish_task(m21dev, ctx, task);
+err:
+
+ mutex_unlock(&ctx->mutex);
+
+ kref_put(&ctx->kref, m2m1shot_destroy_context);
+
+ if (ret)
+ return ret;
+ return (task->state == M2M1SHOT_BUFSTATE_DONE) ? 0 : -EINVAL;
+}
+
+static int m2m1shot_open(struct inode *inode, struct file *filp)
+{
+ struct m2m1shot_device *m21dev = container_of(filp->private_data,
+ struct m2m1shot_device, misc);
+ struct m2m1shot_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->node);
+ kref_init(&ctx->kref);
+ mutex_init(&ctx->mutex);
+
+ ctx->m21dev = m21dev;
+
+ spin_lock(&m21dev->lock_ctx);
+ list_add_tail(&ctx->node, &m21dev->contexts);
+ spin_unlock(&m21dev->lock_ctx);
+
+ filp->private_data = ctx;
+
+ ret = m21dev->ops->init_context(ctx);
+ if (ret) /* kref_put() is not called not to call .free_context() */
+ kfree(ctx);
+
+ return ret;
+}
+
+static int m2m1shot_release(struct inode *inode, struct file *filp)
+{
+ struct m2m1shot_context *ctx = filp->private_data;
+
+ kref_put(&ctx->kref, m2m1shot_destroy_context);
+
+ return 0;
+}
+
+static long m2m1shot_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct m2m1shot_context *ctx = filp->private_data;
+ struct m2m1shot_device *m21dev = ctx->m21dev;
+
+ switch (cmd) {
+ case M2M1SHOT_IOC_PROCESS:
+ {
+ struct m2m1shot_task data;
+ int ret;
+
+ memset(&data, 0, sizeof(data));
+
+ if (copy_from_user(&data.task,
+ (void __user *)arg, sizeof(data.task))) {
+ dev_err(m21dev->dev,
+ "%s: Failed to read userdata\n", __func__);
+ return -EFAULT;
+ }
+
+ /*
+ * m2m1shot_process() does not wake up
+ * until the given task finishes
+ */
+ ret = m2m1shot_process(ctx, &data);
+
+ if (copy_to_user((void __user *)arg, &data.task,
+ sizeof(data.task))) {
+ dev_err(m21dev->dev,
+ "%s: Failed to read userdata\n", __func__);
+ return -EFAULT;
+ }
+
+ return ret;
+ }
+ case M2M1SHOT_IOC_CUSTOM:
+ {
+ struct m2m1shot_custom_data data;
+
+ if (!m21dev->ops->custom_ioctl) {
+ dev_err(m21dev->dev,
+ "%s: custom_ioctl not defined\n", __func__);
+ return -ENOSYS;
+ }
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ dev_err(m21dev->dev,
+ "%s: Failed to read custom data\n", __func__);
+ return -EFAULT;
+ }
+
+ return m21dev->ops->custom_ioctl(ctx, data.cmd, data.arg);
+ }
+ default:
+ dev_err(m21dev->dev, "%s: Unknown ioctl cmd %x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+struct compat_m2m1shot_rect {
+ compat_short_t left;
+ compat_short_t top;
+ compat_ushort_t width;
+ compat_ushort_t height;
+};
+
+struct compat_m2m1shot_pix_format {
+ compat_uint_t fmt;
+ compat_uint_t width;
+ compat_uint_t height;
+ struct v4l2_rect crop;
+};
+
+struct compat_m2m1shot_buffer_plane {
+ union {
+ compat_int_t fd;
+ compat_ulong_t userptr;
+ };
+ compat_size_t len;
+};
+
+struct compat_m2m1shot_buffer {
+ struct compat_m2m1shot_buffer_plane plane[M2M1SHOT_MAX_PLANES];
+ __u8 type;
+ __u8 num_planes;
+};
+
+struct compat_m2m1shot_operation {
+ compat_short_t quality_level;
+ compat_short_t rotate;
+ compat_uint_t op; /* or-ing M2M1SHOT_FLIP_VIRT/HORI */
+};
+
+struct compat_m2m1shot {
+ struct compat_m2m1shot_pix_format fmt_out;
+ struct compat_m2m1shot_pix_format fmt_cap;
+ struct compat_m2m1shot_buffer buf_out;
+ struct compat_m2m1shot_buffer buf_cap;
+ struct compat_m2m1shot_operation op;
+ compat_ulong_t reserved[2];
+};
+
+struct compat_m2m1shot_custom_data {
+ compat_uint_t cmd;
+ compat_ulong_t arg;
+};
+
+#define COMPAT_M2M1SHOT_IOC_PROCESS _IOWR('M', 0, struct compat_m2m1shot)
+#define COMPAT_M2M1SHOT_IOC_CUSTOM \
+ _IOWR('M', 16, struct compat_m2m1shot_custom_data)
+
+static long m2m1shot_compat_ioctl32(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct m2m1shot_context *ctx = filp->private_data;
+ struct m2m1shot_device *m21dev = ctx->m21dev;
+
+ switch (cmd) {
+ case COMPAT_M2M1SHOT_IOC_PROCESS:
+ {
+ struct compat_m2m1shot data;
+ struct m2m1shot_task task;
+ int i, ret;
+
+ memset(&task, 0, sizeof(task));
+
+ if (copy_from_user(&data, compat_ptr(arg), sizeof(data))) {
+ dev_err(m21dev->dev,
+ "%s: Failed to read userdata\n", __func__);
+ return -EFAULT;
+ }
+
+ if ((data.buf_out.num_planes > M2M1SHOT_MAX_PLANES) ||
+ (data.buf_cap.num_planes > M2M1SHOT_MAX_PLANES)) {
+ dev_err(m21dev->dev,
+ "%s: Invalid plane number (out %u/cap %u)\n",
+ __func__, data.buf_out.num_planes,
+ data.buf_cap.num_planes);
+ return -EINVAL;
+ }
+
+ task.task.fmt_out.fmt = data.fmt_out.fmt;
+ task.task.fmt_out.width = data.fmt_out.width;
+ task.task.fmt_out.height = data.fmt_out.height;
+ task.task.fmt_out.crop.left = data.fmt_out.crop.left;
+ task.task.fmt_out.crop.top = data.fmt_out.crop.top;
+ task.task.fmt_out.crop.width = data.fmt_out.crop.width;
+ task.task.fmt_out.crop.height = data.fmt_out.crop.height;
+ task.task.fmt_cap.fmt = data.fmt_cap.fmt;
+ task.task.fmt_cap.width = data.fmt_cap.width;
+ task.task.fmt_cap.height = data.fmt_cap.height;
+ task.task.fmt_cap.crop.left = data.fmt_cap.crop.left;
+ task.task.fmt_cap.crop.top = data.fmt_cap.crop.top;
+ task.task.fmt_cap.crop.width = data.fmt_cap.crop.width;
+ task.task.fmt_cap.crop.height = data.fmt_cap.crop.height;
+ for (i = 0; i < data.buf_out.num_planes; i++) {
+ task.task.buf_out.plane[i].len =
+ data.buf_out.plane[i].len;
+ if (data.buf_out.type == M2M1SHOT_BUFFER_DMABUF)
+ task.task.buf_out.plane[i].fd =
+ data.buf_out.plane[i].fd;
+ else /* data.buf_out.type == M2M1SHOT_BUFFER_USERPTR */
+ task.task.buf_out.plane[i].userptr =
+ data.buf_out.plane[i].userptr;
+ }
+ task.task.buf_out.type = data.buf_out.type;
+ task.task.buf_out.num_planes = data.buf_out.num_planes;
+ for (i = 0; i < data.buf_cap.num_planes; i++) {
+ task.task.buf_cap.plane[i].len =
+ data.buf_cap.plane[i].len;
+ if (data.buf_cap.type == M2M1SHOT_BUFFER_DMABUF)
+ task.task.buf_cap.plane[i].fd =
+ data.buf_cap.plane[i].fd;
+ else /* data.buf_cap.type == M2M1SHOT_BUFFER_USERPTR */
+ task.task.buf_cap.plane[i].userptr =
+ data.buf_cap.plane[i].userptr;
+ }
+ task.task.buf_cap.type = data.buf_cap.type;
+ task.task.buf_cap.num_planes = data.buf_cap.num_planes;
+ task.task.op.quality_level = data.op.quality_level;
+ task.task.op.rotate = data.op.rotate;
+ task.task.op.op = data.op.op;
+ task.task.reserved[0] = data.reserved[0];
+ task.task.reserved[1] = data.reserved[1];
+
+ /*
+ * m2m1shot_process() does not wake up
+ * until the given task finishes
+ */
+ ret = m2m1shot_process(ctx, &task);
+ if (ret) {
+ dev_err(m21dev->dev,
+ "%s: Failed to process m2m1shot task\n",
+ __func__);
+ return ret;
+ }
+
+ data.fmt_out.fmt = task.task.fmt_out.fmt;
+ data.fmt_out.width = task.task.fmt_out.width;
+ data.fmt_out.height = task.task.fmt_out.height;
+ data.fmt_out.crop.left = task.task.fmt_out.crop.left;
+ data.fmt_out.crop.top = task.task.fmt_out.crop.top;
+ data.fmt_out.crop.width = task.task.fmt_out.crop.width;
+ data.fmt_out.crop.height = task.task.fmt_out.crop.height;
+ data.fmt_cap.fmt = task.task.fmt_cap.fmt;
+ data.fmt_cap.width = task.task.fmt_cap.width;
+ data.fmt_cap.height = task.task.fmt_cap.height;
+ data.fmt_cap.crop.left = task.task.fmt_cap.crop.left;
+ data.fmt_cap.crop.top = task.task.fmt_cap.crop.top;
+ data.fmt_cap.crop.width = task.task.fmt_cap.crop.width;
+ data.fmt_cap.crop.height = task.task.fmt_cap.crop.height;
+ for (i = 0; i < task.task.buf_out.num_planes; i++) {
+ data.buf_out.plane[i].len =
+ task.task.buf_out.plane[i].len;
+ if (task.task.buf_out.type == M2M1SHOT_BUFFER_DMABUF)
+ data.buf_out.plane[i].fd =
+ task.task.buf_out.plane[i].fd;
+ else /* buf_out.type == M2M1SHOT_BUFFER_USERPTR */
+ data.buf_out.plane[i].userptr =
+ task.task.buf_out.plane[i].userptr;
+ }
+ data.buf_out.type = task.task.buf_out.type;
+ data.buf_out.num_planes = task.task.buf_out.num_planes;
+ for (i = 0; i < task.task.buf_cap.num_planes; i++) {
+ data.buf_cap.plane[i].len =
+ task.task.buf_cap.plane[i].len;
+ if (task.task.buf_cap.type == M2M1SHOT_BUFFER_DMABUF)
+ data.buf_cap.plane[i].fd =
+ task.task.buf_cap.plane[i].fd;
+ else /* buf_cap.type == M2M1SHOT_BUFFER_USERPTR */
+ data.buf_cap.plane[i].userptr =
+ task.task.buf_cap.plane[i].userptr;
+ }
+ data.buf_cap.type = task.task.buf_cap.type;
+ data.buf_cap.num_planes = task.task.buf_cap.num_planes;
+ data.op.quality_level = task.task.op.quality_level;
+ data.op.rotate = task.task.op.rotate;
+ data.op.op = task.task.op.op;
+ data.reserved[0] = task.task.reserved[0];
+ data.reserved[1] = task.task.reserved[1];
+
+ if (copy_to_user(compat_ptr(arg), &data, sizeof(data))) {
+ dev_err(m21dev->dev,
+ "%s: Failed to copy into userdata\n", __func__);
+ return -EFAULT;
+ }
+
+ return 0;
+ }
+ case COMPAT_M2M1SHOT_IOC_CUSTOM:
+ {
+ struct compat_m2m1shot_custom_data data;
+
+ if (!m21dev->ops->custom_ioctl) {
+ dev_err(m21dev->dev,
+ "%s: custom_ioctl not defined\n", __func__);
+ return -ENOSYS;
+ }
+
+ if (copy_from_user(&data, compat_ptr(arg), sizeof(data))) {
+ dev_err(m21dev->dev,
+ "%s: Failed to read custom data\n", __func__);
+ return -EFAULT;
+ }
+
+ return m21dev->ops->custom_ioctl(ctx, data.cmd, data.arg);
+ }
+ default:
+ dev_err(m21dev->dev, "%s: Unknown ioctl cmd %x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct file_operations m2m1shot_fops = {
+ .owner = THIS_MODULE,
+ .open = m2m1shot_open,
+ .release = m2m1shot_release,
+ .unlocked_ioctl = m2m1shot_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = m2m1shot_compat_ioctl32,
+#endif
+};
+
+struct m2m1shot_device *m2m1shot_create_device(struct device *dev,
+ const struct m2m1shot_devops *ops,
+ const char *suffix, int id,
+ unsigned long timeout_jiffies)
+{
+ struct m2m1shot_device *m21dev;
+ char *name;
+ size_t name_size;
+ int ret = -ENOMEM;
+
+ /* TODO: ops callback check */
+ if (!ops || !ops->prepare_format || !ops->prepare_buffer) {
+ dev_err(dev, "%s: m2m1shot_devops is not provided\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!suffix) {
+ dev_err(dev, "%s: suffix of node name is not specified\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ name_size = M2M1SHOT_DEVNODE_PREFIX_LEN + strlen(suffix) + 1;
+
+ if (id >= 0)
+ name_size += 3; /* instance number: maximul 3 digits */
+
+ name = kmalloc(name_size, GFP_KERNEL);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
+
+ if (id < 0)
+ scnprintf(name, name_size,
+ M2M1SHOT_DEVNODE_PREFIX "%s", suffix);
+ else
+ scnprintf(name, name_size,
+ M2M1SHOT_DEVNODE_PREFIX "%s%d", suffix, id);
+
+ m21dev = kzalloc(sizeof(*m21dev), GFP_KERNEL);
+ if (!m21dev)
+ goto err_m21dev;
+
+ m21dev->misc.minor = MISC_DYNAMIC_MINOR;
+ m21dev->misc.name = name;
+ m21dev->misc.fops = &m2m1shot_fops;
+ ret = misc_register(&m21dev->misc);
+ if (ret)
+ goto err_misc;
+
+ INIT_LIST_HEAD(&m21dev->tasks);
+ INIT_LIST_HEAD(&m21dev->contexts);
+
+ spin_lock_init(&m21dev->lock_task);
+ spin_lock_init(&m21dev->lock_ctx);
+
+ m21dev->dev = dev;
+ m21dev->ops = ops;
+ m21dev->timeout_jiffies = timeout_jiffies;
+
+ return m21dev;
+
+err_m21dev:
+ kfree(name);
+err_misc:
+ kfree(m21dev);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(m2m1shot_create_device);
+
+void m2m1shot_destroy_device(struct m2m1shot_device *m21dev)
+{
+ misc_deregister(&m21dev->misc);
+ kfree(m21dev->misc.name);
+ kfree(m21dev);
+}
+EXPORT_SYMBOL(m2m1shot_destroy_device);
--- /dev/null
+/*
+ * include/media/m2m1shot-helper.h
+ *
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ *
+ * Contact: Cho KyongHo <pullip.cho@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef _M2M1SHOT_HELPER_H_
+#define _M2M1SHOT_HELPER_H_
+#include <linux/kernel.h>
+#include <linux/dma-buf.h>
+
+#include <media/m2m1shot.h>
+
+int m2m1shot_map_dma_buf(struct device *dev,
+ struct m2m1shot_buffer_plane_dma *plane,
+ enum dma_data_direction dir);
+
+void m2m1shot_unmap_dma_buf(struct device *dev,
+ struct m2m1shot_buffer_plane_dma *plane,
+ enum dma_data_direction dir);
+
+int m2m1shot_dma_addr_map(struct device *dev,
+ struct m2m1shot_buffer_dma *buf,
+ int plane_idx, enum dma_data_direction dir);
+
+void m2m1shot_dma_addr_unmap(struct device *dev,
+ struct m2m1shot_buffer_dma *buf, int plane_idx);
+
+static inline dma_addr_t m2m1shot_dma_address(
+ struct m2m1shot_buffer_plane_dma *plane)
+{
+ return plane->dma_addr;
+}
+
+#endif /* _M2M1SHOT_HELPER_H_ */
--- /dev/null
+/*
+ * include/media/m2m1shot.h
+ *
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ *
+ * Contact: Cho KyongHo <pullip.cho@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef _M2M1SHOT_H_
+#define _M2M1SHOT_H_
+
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+
+#include <uapi/linux/m2m1shot.h>
+
+struct m2m1shot_devops;
+struct m2m1shot_task;
+
+/**
+ * struct m2m1shot_device
+ *
+ * @misc : misc device desciptor for user-kernel interface
+ * @tasks : the list of the tasks that are to be scheduled
+ * @contexts : the list of the contexts that is created for this device
+ * @dev : the client device desciptor
+ * @lock_task : lock to protect the consistency of @tasks list and
+ * @current_task
+ * @lock_ctx : lock to protect the consistency of @contexts list
+ * @timeout_jiffies: timeout jiffoes for a task. If a task is not finished
+ * until @timeout_jiffies elapsed,
+ * m2m1shot_devops.timeout_task() is invoked an the task
+ * is canced. The user will get an error.
+ * @current_task: indicate the task that is currently being processed
+ * @ops : callback functions that the client device driver must
+ * implement according to the events.
+ */
+struct m2m1shot_device {
+ struct miscdevice misc;
+ struct list_head tasks; /* pending tasks for processing */
+ struct list_head contexts; /* created contexts of this device */
+ struct device *dev;
+ spinlock_t lock_task; /* lock with irqsave for tasks */
+ spinlock_t lock_ctx; /* lock for contexts */
+ unsigned long timeout_jiffies; /* timeout jiffies for a task */
+ struct m2m1shot_task *current_task; /* current working task */
+ const struct m2m1shot_devops *ops;
+};
+
+/**
+ * struct m2m1shot_context - context of tasks
+ *
+ * @node : node entry to m2m1shot_device.contexts
+ * @mutex : lock to prevent racing between tasks between the same contexts
+ * @kref : usage count of the context not to release the context while a
+ * : task being processed.
+ * @m21dev : the singleton device instance that the context is born
+ * @priv : private data that is allowed to store client drivers' private
+ * data
+ */
+struct m2m1shot_context {
+ struct list_head node;
+ struct mutex mutex;
+ struct kref kref;
+ struct m2m1shot_device *m21dev;
+ void *priv;
+};
+
+/**
+ * enum m2m1shot_state - state of a task
+ *
+ * @M2M1SHOT_BUFSTATE_READY : Task is verified and scheduled for processing
+ * @M2M1SHOT_BUFSTATE_PROCESSING: Task is being processed by H/W.
+ * @M2M1SHOT_BUFSTATE_DONE : Task is completed.
+ * @M2M1SHOT_BUFSTATE_TIMEDOUT : Task is not completed until a timed out value
+ * @M2M1SHOT_BUFSTATE_ERROR: : Task is not processed due to verification
+ * failure
+ */
+enum m2m1shot_state {
+ M2M1SHOT_BUFSTATE_READY,
+ M2M1SHOT_BUFSTATE_PROCESSING,
+ M2M1SHOT_BUFSTATE_DONE,
+ M2M1SHOT_BUFSTATE_TIMEDOUT,
+ M2M1SHOT_BUFSTATE_ERROR,
+};
+
+/**
+ * struct m2m1shot_buffer_plane_dma - descriptions of a buffer
+ *
+ * @bytes_used : the size of the buffer that is accessed by H/W. This is filled
+ * by the client device driver when
+ * m2m1shot_devops.prepare_format() is called.
+ * @dmabuf : pointer to dmabuf descriptor if the buffer type is
+ * M2M1SHOT_BUFFER_DMABUF.
+ * @attachment : pointer to dmabuf attachment descriptor if the buffer type is
+ * M2M1SHOT_BUFFER_DMABUF.
+ * @sgt : scatter-gather list that describes physical memory information
+ * : of the buffer
+ * @dma_addr : DMA address that is the address of the buffer in the H/W's
+ * : address space.
+ * @priv : the client device driver's private data
+ */
+struct m2m1shot_buffer_plane_dma {
+ size_t bytes_used;
+ struct dma_buf *dmabuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ dma_addr_t dma_addr;
+ void *priv;
+ off_t offset;
+};
+
+/**
+ * struct m2m1shot_buffer_dma - description of buffers for a task
+ *
+ * @buffers : pointer to m2m1shot.buf_out or m2m1shot.buf_cap that are specified
+ * by user
+ * @plane : descriptions of buffers to pin the buffers while the task is
+ * processed.
+ */
+struct m2m1shot_buffer_dma {
+ /* pointer to m2m1shot_task.task.buf_out/cap */
+ const struct m2m1shot_buffer *buffer;
+ struct m2m1shot_buffer_plane_dma plane[M2M1SHOT_MAX_PLANES];
+};
+
+/**
+ * struct m2m1shot_task - describes a task to process
+ *
+ * @task : descriptions about the frames and format to process
+ * @task_node : list entry to m2m1shot_device.tasks
+ * @ctx : pointer to m2m1shot_context that the task is valid under.
+ * @complete : waiter to finish the task
+ * @dma_buf_out : descriptions of the capture buffers
+ * @dma_buf_cap : descriptions of the output buffers
+ * @state : state of the task
+ */
+struct m2m1shot_task {
+ struct m2m1shot task;
+ struct list_head task_node;
+ struct m2m1shot_context *ctx;
+ struct completion complete;
+ struct m2m1shot_buffer_dma dma_buf_out;
+ struct m2m1shot_buffer_dma dma_buf_cap;
+ enum m2m1shot_state state;
+};
+
+/**
+ * struct vb2_mem_ops - memory handling/memory allocator operations
+ *
+ * @init_context: [MANDATORY]
+ * called on creation of struct m2m1shot_context to give a chance
+ * to the driver for registering the driver's private data.
+ * New m2m1shot_context is created when user opens a device node
+ * of m2m1shot.
+ * @free_context: [MANDATORY]
+ * called on destruction of struct m2m1shot_context to inform
+ * the driver for unregistring the driver's private data.
+ * m2m1shot_context is destroyed when a user releases all
+ * references to the open file of the device node of m2m1shot.
+ * @prepare_format: [MANDATORY]
+ * called on user's request to process a task. The driver
+ * receives the format and resolutions of the images to
+ * process, should return the sizes in bytes and the number of
+ * buffers(planes) that the driver needs to process the images.
+ * @prepare_operation [OPTIONAL]
+ * called after format checking is finished. The drivers can
+ * check or prepare the followings:
+ * - scaling constraint check
+ * - constructing private or constext data
+ * @prepare_buffer: [MANDATORY]
+ * called after all size validation are passed. The driver
+ * should complete all procedures for safe access by H/W
+ * accelerators to process the given buffers.
+ * @finish_buffer: [MANDATORY]
+ * called after the task is finished to release all references
+ * to the buffers. The drivers should release all resources
+ * related to the buffer. This is called for every buffers that
+ * is called @prepare_buffer regardless of the processing the
+ * task is successful.
+ * @device_run: [MANDATORY]
+ * called on the time of the processing the given task. The driver
+ * should run H/W. The driver does not wait for an IRQ.
+ * @timeout_task: [MANDATORY]
+ * called when timed out of processing of the given task. The
+ * driver can reset the H/W to cancle the current task.
+ * @custom_ioctl: [OPTIONAL]
+ * The driver can directly interact with this @custom_ioctl.
+ */
+struct m2m1shot_devops {
+ int (*init_context)(struct m2m1shot_context *ctx);
+ int (*free_context)(struct m2m1shot_context *ctx);
+ /* return value: number of planes */
+ int (*prepare_format)(struct m2m1shot_context *ctx,
+ struct m2m1shot_pix_format *fmt,
+ enum dma_data_direction dir,
+ size_t bytes_used[]);
+ int (*prepare_operation)(struct m2m1shot_context *ctx,
+ struct m2m1shot_task *task);
+ int (*prepare_buffer)(struct m2m1shot_context *ctx,
+ struct m2m1shot_buffer_dma *dma_buffer,
+ int plane,
+ enum dma_data_direction dir);
+ void (*finish_buffer)(struct m2m1shot_context *ctx,
+ struct m2m1shot_buffer_dma *dma_buffer,
+ int plane,
+ enum dma_data_direction dir);
+ int (*device_run)(struct m2m1shot_context *ctx,
+ struct m2m1shot_task *task);
+ void (*timeout_task)(struct m2m1shot_context *ctx,
+ struct m2m1shot_task *task);
+ /* optional */
+ long (*custom_ioctl)(struct m2m1shot_context *ctx,
+ unsigned int cmd, unsigned long arg);
+};
+
+/**
+ * m2m1shot_task_finish - notify a task is finishes and schedule new task
+ *
+ * - m21dev: pointer to struct m2m1shot_device
+ * - task: The task that is finished
+ * - success: true if task is processed successfully, false otherwise.
+ *
+ * This function wakes up the process that is waiting for the completion of
+ * @task. An IRQ handler is the best place to call this function. If @error
+ * is true, the user that requested @task will receive an error.
+ */
+void m2m1shot_task_finish(struct m2m1shot_device *m21dev,
+ struct m2m1shot_task *task, bool error);
+
+/**
+ * m2m1shot_task_cancel - cancel a task and schedule the next task
+ *
+ * - m21dev: pointer to struct m2m1shot_device
+ * - task: The task that is finished
+ * - reason: the reason of canceling the task
+ *
+ * This function is called by the driver that wants to cancel @task and
+ * schedule the next task. Most drivers do not need to call this function.
+ * Keep in mind that this function does not wake up the process that is blocked
+ * for the completion of @task.
+ */
+void m2m1shot_task_cancel(struct m2m1shot_device *m21dev,
+ struct m2m1shot_task *task,
+ enum m2m1shot_state reason);
+
+/**
+ * m2m1shot_create_device - create and initialze constructs for m2m1shot
+ *
+ * - dev: the device that m2m1shot provides services
+ * - ops: callbacks to m2m1shot
+ * - suffix: suffix of device node in /dev after 'm2m1shot_'
+ * - id: device instance number if a device has multiple instance. @id will be
+ * attached after @suffix. If @id is -1, no instance number is attached to
+ * @suffix.
+ * - timeout_jiffies: timeout jiffies for a task being processed. For 0,
+ * m2m1shot waits for completion of a task infinitely.
+ *
+ * Returns the pointer to struct m2m1shot_device on success.
+ * Returns -error on failure.
+ * This function is most likely called in probe() of a driver.
+ */
+struct m2m1shot_device *m2m1shot_create_device(struct device *dev,
+ const struct m2m1shot_devops *ops,
+ const char *suffix, int id,
+ unsigned long timeout_jiffies);
+
+/**
+ * m2m1shot_destroy_device - destroy all constructs for m2m1shot
+ *
+ * m21dev - pointer to struct m2m1shot_device that is returned by
+ * m2m1shot_create_device()
+ *
+ * The driver that has a valid pointer to struct m2m1shot_device returned by
+ * m2m1shot_create_device() must call m2m1shot_destroy_device() before removing
+ * the driver.
+ */
+void m2m1shot_destroy_device(struct m2m1shot_device *m21dev);
+
+/**
+ * m2m1shot_get_current_task - request the current task under processing
+ *
+ * m21dev - pointer to struct m2m1shot_device.
+ *
+ * Returns the pointer to struct m2m1shot_task that is processed by the device
+ * whose driver is mostly the caller. The returned pointer is valid until
+ * m2m1shot_task_finish() which schedules another task.
+ * NULL is returned if no task is currently processed.
+ */
+static inline struct m2m1shot_task *m2m1shot_get_current_task(
+ struct m2m1shot_device *m21dev)
+{
+ return m21dev->current_task;
+}
+
+/**
+ * m2m1shot_set_dma_address - set DMA address
+ */
+static inline void m2m1shot_set_dma_address(
+ struct m2m1shot_buffer_dma *buffer_dma,
+ int plane, dma_addr_t dma_addr)
+{
+ buffer_dma->plane[plane].dma_addr = dma_addr;
+}
+
+#endif /* _M2M1SHOT_H_ */
--- /dev/null
+/*
+ * M2M One-shot header file
+ * - Handling H/W accellerators for non-streaming media processing
+ *
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _UAPI__M2M1SHOT_H_
+#define _UAPI__M2M1SHOT_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#define M2M1SHOT_MAX_PLANES 3
+
+struct m2m1shot_rect {
+ __s16 left;
+ __s16 top;
+ __u16 width;
+ __u16 height;
+};
+
+struct m2m1shot_pix_format {
+ __u32 fmt;
+ __u32 width;
+ __u32 height;
+ struct v4l2_rect crop;
+};
+
+enum m2m1shot_buffer_type {
+ M2M1SHOT_BUFFER_NONE, /* no buffer is set */
+ M2M1SHOT_BUFFER_DMABUF,
+ M2M1SHOT_BUFFER_USERPTR,
+};
+
+struct m2m1shot_buffer_plane {
+ union {
+ __s32 fd;
+ unsigned long userptr;
+ };
+ size_t len;
+};
+
+struct m2m1shot_buffer {
+ struct m2m1shot_buffer_plane plane[M2M1SHOT_MAX_PLANES];
+ __u8 type;
+ __u8 num_planes;
+};
+
+#define M2M1SHOT_OP_FLIP_VIRT (1 << 0)
+#define M2M1SHOT_OP_FLIP_HORI (1 << 1)
+#define M2M1SHOT_OP_CSC_WIDE (1 << 8)
+#define M2M1SHOT_OP_CSC_NARROW (1 << 9)
+#define M2M1SHOT_OP_CSC_601 (1 << 10)
+#define M2M1SHOT_OP_CSC_709 (1 << 11)
+#define M2M1SHOT_OP_PREMULTIPLIED_ALPHA (1 << 16)
+#define M2M1SHOT_OP_DITHERING (1 << 17)
+
+struct m2m1shot_operation {
+ __s16 quality_level;
+ __s16 rotate;
+ __u32 op; /* or-ing M2M1SHOT_FLIP_OP_VIRT/HORI */
+};
+
+struct m2m1shot {
+ struct m2m1shot_pix_format fmt_out;
+ struct m2m1shot_pix_format fmt_cap;
+ struct m2m1shot_buffer buf_out;
+ struct m2m1shot_buffer buf_cap;
+ struct m2m1shot_operation op;
+ unsigned long reserved[2];
+};
+
+struct m2m1shot_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+#define M2M1SHOT_IOC_PROCESS _IOWR('M', 0, struct m2m1shot)
+#define M2M1SHOT_IOC_CUSTOM _IOWR('M', 16, struct m2m1shot_custom_data)
+
+#endif /* _UAPI__M2M1SHOT_H_ */