[COMMON] fimc-is2: Add dual sync settings for 5E9 & OV12A10
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / drivers / media / m2m1shot-testdev.c
CommitLineData
40d0b6e1 1/*
2 * drivers/media/m2m1shot-testdev.c
3 *
4 * Copyright (C) 2014 Samsung Electronics Co., Ltd.
5 *
6 * Contact: Cho KyongHo <pullip.cho@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/device.h>
26#include <linux/uaccess.h>
27#include <linux/platform_device.h>
28#include <linux/jiffies.h>
29#include <linux/videodev2.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include <linux/list.h>
33#include <linux/kthread.h>
34#include <linux/freezer.h>
35
36#include <media/m2m1shot.h>
37#include <media/m2m1shot-helper.h>
38
39struct m2m1shot_testdev_drvdata {
40 struct m2m1shot_device *m21dev;
41 struct task_struct *thread;
42 wait_queue_head_t waitqueue;
43 struct list_head task_list;
44 spinlock_t lock;
45};
46
47#define M2M1SHOT_TESTDEV_IOC_TIMEOUT _IOW('T', 0, unsigned long)
48
49struct m2m1shot_testdev_fmt {
50 __u32 fmt;
51 const char *fmt_name;
52 __u8 mbpp[3]; /* bytes * 2 per pixel: should be divided by 2 */
53 __u8 planes;
54 __u8 buf_mbpp[3]; /* bytes * 2 per pixel: should be divided by 2 */
55 __u8 buf_planes;
56};
57
58#define TO_MBPP(bpp) ((bpp) << 2)
59#define TO_MBPPDIV(bpp_diven, bpp_diver) (((bpp_diven) << 2) / (bpp_diver))
60#define TO_BPP(mbpp) ((mbpp) >> 2)
61
62static struct m2m1shot_testdev_fmt m2m1shot_testdev_fmtlist[] = {
63 {
64 .fmt = V4L2_PIX_FMT_RGB565,
65 .fmt_name = "RGB565",
66 .mbpp = { TO_MBPP(2), 0, 0},
67 .planes = 1,
68 .buf_mbpp = { TO_MBPP(2), 0, 0},
69 .buf_planes = 1,
70 }, {
71 .fmt = V4L2_PIX_FMT_BGR32,
72 .fmt_name = "BGR32",
73 .mbpp = { TO_MBPP(4), 0, 0 },
74 .planes = 1,
75 .buf_mbpp = { TO_MBPP(4), 0, 0 },
76 .buf_planes = 1,
77 }, {
78 .fmt = V4L2_PIX_FMT_RGB32,
79 .fmt_name = "RGB32",
80 .mbpp = { TO_MBPP(4), 0, 0 },
81 .planes = 1,
82 .buf_mbpp = { TO_MBPP(4), 0, 0 },
83 .buf_planes = 1,
84 }, {
85 .fmt = V4L2_PIX_FMT_YUV420,
86 .fmt_name = "YUV4:2:0",
87 .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4), TO_MBPPDIV(1, 4) },
88 .planes = 3,
89 .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
90 0, 0 },
91 .buf_planes = 1,
92 }, {
93 .fmt = V4L2_PIX_FMT_NV12,
94 .fmt_name = "Y/CbCr4:2:0(NV12)",
95 .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4), 0 },
96 .planes = 2,
97 .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
98 0, 0 },
99 .buf_planes = 1,
100 }, {
101 .fmt = V4L2_PIX_FMT_NV21,
102 .fmt_name = "Y/CrCb4:2:0(NV21)",
103 .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4), 0 },
104 .planes = 2,
105 .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
106 0, 0 },
107 .buf_planes = 1,
108 }, {
109 .fmt = V4L2_PIX_FMT_NV12M,
110 .fmt_name = "Y/CbCr4:2:0(NV12M)",
111 .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4), 0 },
112 .planes = 2,
113 .buf_mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
114 0 },
115 .buf_planes = 2,
116 }, {
117 .fmt = V4L2_PIX_FMT_NV21M,
118 .fmt_name = "Y/CrCb4:2:0(NV21M)",
119 .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4), 0 },
120 .planes = 2,
121 .buf_mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 4) + TO_MBPPDIV(1, 4),
122 0 },
123 .buf_planes = 2,
124 }, {
125 .fmt = V4L2_PIX_FMT_YUV422P,
126 .fmt_name = "YUV4:2:2",
127 .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 2), TO_MBPPDIV(1, 2) },
128 .planes = 3,
129 .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
130 0, 0 },
131 .buf_planes = 1,
132 }, {
133 .fmt = V4L2_PIX_FMT_NV16,
134 .fmt_name = "Y/CbCr4:2:2(NV16)",
135 .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2), 0 },
136 .planes = 2,
137 .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
138 0, 0 },
139 .buf_planes = 1,
140 }, {
141 .fmt = V4L2_PIX_FMT_NV61,
142 .fmt_name = "Y/CrCb4:2:2(NV61)",
143 .mbpp = { TO_MBPP(1), TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2), 0 },
144 .planes = 2,
145 .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
146 0, 0 },
147 .buf_planes = 1,
148 }, {
149 .fmt = V4L2_PIX_FMT_YUYV,
150 .fmt_name = "YUV4:2:2(YUYV)",
151 .mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
152 0, 0},
153 .planes = 1,
154 .buf_mbpp = { TO_MBPP(1) + TO_MBPPDIV(1, 2) + TO_MBPPDIV(1, 2),
155 0, 0 },
156 .buf_planes = 1,
157 }, {
158 .fmt = V4L2_PIX_FMT_YUV444,
159 .fmt_name = "YUV4:4:4",
160 .mbpp = { TO_MBPP(1), TO_MBPP(1), TO_MBPP(1) },
161 .planes = 3,
162 .buf_mbpp = { TO_MBPP(1) + TO_MBPP(1) + TO_MBPP(1), 0, 0 },
163 .buf_planes = 1,
164 },
165};
166
167static int m2m1shot_testdev_init_context(struct m2m1shot_context *ctx)
168{
169 ctx->priv = NULL; /* no timeout generated */
170
171 return 0;
172}
173
174static int m2m1shot_testdev_free_context(struct m2m1shot_context *ctx)
175{
176 return 0;
177}
178
179static int m2m1shot_testdev_prepare_format(struct m2m1shot_context *ctx,
180 struct m2m1shot_pix_format *fmt,
181 enum dma_data_direction dir,
182 size_t bytes_used[])
183{
184 size_t i, j;
185 for (i = 0; i < ARRAY_SIZE(m2m1shot_testdev_fmtlist); i++) {
186 if (fmt->fmt == m2m1shot_testdev_fmtlist[i].fmt)
187 break;
188 }
189
190 if (i == ARRAY_SIZE(m2m1shot_testdev_fmtlist)) {
191 dev_err(ctx->m21dev->dev, "Unknown format %#x\n", fmt->fmt);
192 return -EINVAL;
193 }
194
195 for (j = 0; j < m2m1shot_testdev_fmtlist[i].buf_planes; j++)
196 bytes_used[j] = TO_BPP(m2m1shot_testdev_fmtlist[i].buf_mbpp[j] *
197 fmt->width * fmt->height);
198 return m2m1shot_testdev_fmtlist[i].buf_planes;
199}
200
201static int m2m1shot_testdev_prepare_buffer(struct m2m1shot_context *ctx,
202 struct m2m1shot_buffer_dma *dma_buffer,
203 int plane,
204 enum dma_data_direction dir)
205{
206 return m2m1shot_map_dma_buf(ctx->m21dev->dev,
207 &dma_buffer->plane[plane], dir);
208}
209
210static void m2m1shot_testdev_finish_buffer(struct m2m1shot_context *ctx,
211 struct m2m1shot_buffer_dma *dma_buffer,
212 int plane,
213 enum dma_data_direction dir)
214{
215 m2m1shot_unmap_dma_buf(ctx->m21dev->dev, &dma_buffer->plane[plane], dir);
216}
217
218struct m2m1shot_testdev_work {
219 struct list_head node;
220 struct m2m1shot_context *ctx;
221 struct m2m1shot_task *task;
222};
223
224static int m2m1shot_testdev_worker(void *data)
225{
226 struct m2m1shot_testdev_drvdata *drvdata = data;
227
228 while (true) {
229 struct m2m1shot_testdev_work *work;
230 struct m2m1shot_task *task;
231
232 wait_event_freezable(drvdata->waitqueue,
233 !list_empty(&drvdata->task_list));
234
235 spin_lock(&drvdata->lock);
236 BUG_ON(list_empty(&drvdata->task_list));
237 work = list_first_entry(&drvdata->task_list,
238 struct m2m1shot_testdev_work, node);
239 list_del(&work->node);
240 BUG_ON(!list_empty(&drvdata->task_list));
241 spin_unlock(&drvdata->lock);
242
243 msleep(20);
244
245 task = m2m1shot_get_current_task(drvdata->m21dev);
246 BUG_ON(!task);
247
248 BUG_ON(task != work->task);
249 BUG_ON(task->ctx != work->ctx);
250
251 kfree(work);
252
253 m2m1shot_task_finish(drvdata->m21dev, task, true);
254 }
255
256 return 0;
257
258}
259
260static int m2m1shot_testdev_device_run(struct m2m1shot_context *ctx,
261 struct m2m1shot_task *task)
262{
263 struct m2m1shot_testdev_work *work;
264 struct device *dev = ctx->m21dev->dev;
265 struct m2m1shot_testdev_drvdata *drvdata = dev_get_drvdata(dev);
266
267 if (ctx->priv) /* timeout generated */
268 return 0;
269
270 work = kmalloc(sizeof(*work), GFP_KERNEL);
271 if (!work) {
272 pr_err("%s: Failed to allocate work struct\n", __func__);
273 return -ENOMEM;
274 }
275
276 INIT_LIST_HEAD(&work->node);
277 work->ctx = ctx;
278 work->task = task;
279
280 spin_lock(&drvdata->lock);
281 BUG_ON(!list_empty(&drvdata->task_list));
282 list_add_tail(&work->node, &drvdata->task_list);
283 spin_unlock(&drvdata->lock);
284
285 if (current != drvdata->thread);
286 wake_up(&drvdata->waitqueue);
287
288 return 0;
289}
290
291static void m2m1shot_testdev_timeout_task(struct m2m1shot_context *ctx,
292 struct m2m1shot_task *task)
293{
294 dev_info(ctx->m21dev->dev, "%s: Timeout occurred\n", __func__);
295}
296
297static long m2m1shot_testdev_ioctl(struct m2m1shot_context *ctx,
298 unsigned int cmd, unsigned long arg)
299{
300 unsigned long timeout;
301
302 if (cmd != M2M1SHOT_TESTDEV_IOC_TIMEOUT) {
303 dev_err(ctx->m21dev->dev, "%s: Unknown ioctl cmd %#x\n",
304 __func__, cmd);
305 return -ENOSYS;
306 }
307
308 if (get_user(timeout, (unsigned long __user *)arg)) {
309 dev_err(ctx->m21dev->dev,
310 "%s: Failed to read user data\n", __func__);
311 return -EFAULT;
312 }
313
314 if (timeout)
315 ctx->priv = (void *)1; /* timeout generated */
316 else
317 ctx->priv = NULL; /* timeout not generated */
318
319 dev_info(ctx->m21dev->dev, "%s: Timeout geration is %s",
320 __func__, timeout ? "set" : "unset");
321
322 return 0;
323}
324
325static const struct m2m1shot_devops m2m1shot_testdev_ops = {
326 .init_context = m2m1shot_testdev_init_context,
327 .free_context = m2m1shot_testdev_free_context,
328 .prepare_format = m2m1shot_testdev_prepare_format,
329 .prepare_buffer = m2m1shot_testdev_prepare_buffer,
330 .finish_buffer = m2m1shot_testdev_finish_buffer,
331 .device_run = m2m1shot_testdev_device_run,
332 .timeout_task = m2m1shot_testdev_timeout_task,
333 .custom_ioctl = m2m1shot_testdev_ioctl,
334};
335
336static struct platform_device m2m1shot_testdev_pdev = {
337 .name = "m2m1shot_testdev",
338};
339
340static int m2m1shot_testdev_init(void)
341{
342 int ret = 0;
343 struct m2m1shot_device *m21dev;
344 struct m2m1shot_testdev_drvdata *drvdata;
345
346 drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
347 if (!drvdata) {
348 pr_err("%s: Failed allocate drvdata\n", __func__);
349 return -ENOMEM;
350 }
351 ret = platform_device_register(&m2m1shot_testdev_pdev);
352 if (ret) {
353 pr_err("%s: Failed to register platform device\n", __func__);
354 goto err_register;
355 }
356
357 m21dev = m2m1shot_create_device(&m2m1shot_testdev_pdev.dev,
358 &m2m1shot_testdev_ops, "testdev", -1, msecs_to_jiffies(500));
359 if (IS_ERR(m21dev)) {
360 pr_err("%s: Failed to create m2m1shot device\n", __func__);
361 ret = PTR_ERR(m21dev);
362 goto err_create;
363 }
364
365 drvdata->thread = kthread_run(m2m1shot_testdev_worker, drvdata,
366 "%s", "m2m1shot_tesdev_worker");
367 if (IS_ERR(drvdata->thread)) {
368 pr_err("%s: Failed create worker thread\n", __func__);
369 ret = PTR_ERR(drvdata->thread);
370 goto err_worker;
371 }
372
373 drvdata->m21dev = m21dev;
374 INIT_LIST_HEAD(&drvdata->task_list);
375 spin_lock_init(&drvdata->lock);
376 init_waitqueue_head(&drvdata->waitqueue);
377
378 dev_set_drvdata(&m2m1shot_testdev_pdev.dev, drvdata);
379
380 return 0;
381
382err_worker:
383 m2m1shot_destroy_device(m21dev);
384err_create:
385 platform_device_unregister(&m2m1shot_testdev_pdev);
386err_register:
387 kfree(drvdata);
388 return ret;
389}
390module_init(m2m1shot_testdev_init);