[COMMON] fimc-is2: Add dual sync settings for 5E9 & OV12A10
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / drivers / media / m2m1shot.c
CommitLineData
40d0b6e1 1/*
2 * drivers/media/m2m1shot.c
3 *
4 * Copyright (C) 2014 Samsung Electronics Co., Ltd.
5 *
6 * Contact: Cho KyongHo <pullip.cho@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/device.h>
28#include <linux/dma-buf.h>
29#include <linux/vmalloc.h>
30#include <linux/sched.h>
31
32#include <media/m2m1shot.h>
33
34#define M2M1SHOT_DEVNODE_PREFIX "m2m1shot_"
35#define M2M1SHOT_DEVNODE_PREFIX_LEN 9
36
37static void m2m1shot_task_schedule(struct m2m1shot_device *m21dev)
38{
39 struct m2m1shot_task *task;
40 unsigned long flags;
41
42next_task:
43 spin_lock_irqsave(&m21dev->lock_task, flags);
44 if (list_empty(&m21dev->tasks)) {
45 /* No task to run */
46 spin_unlock_irqrestore(&m21dev->lock_task, flags);
47 return;
48 }
49
50 if (m21dev->current_task) {
51 /* H/W is working */
52 spin_unlock_irqrestore(&m21dev->lock_task, flags);
53 return;
54 }
55
56 task = list_first_entry(&m21dev->tasks,
57 struct m2m1shot_task, task_node);
58 list_del(&task->task_node);
59
60 m21dev->current_task = task;
61
62 spin_unlock_irqrestore(&m21dev->lock_task, flags);
63
64 task->state = M2M1SHOT_BUFSTATE_PROCESSING;
65
66 if (m21dev->ops->device_run(task->ctx, task)) {
67 task->state = M2M1SHOT_BUFSTATE_ERROR;
68
69 spin_lock_irqsave(&m21dev->lock_task, flags);
70 m21dev->current_task = NULL;
71 spin_unlock_irqrestore(&m21dev->lock_task, flags);
72
73 complete(&task->complete);
74
75 goto next_task;
76 }
77}
78
79void m2m1shot_task_finish(struct m2m1shot_device *m21dev,
80 struct m2m1shot_task *task, bool success)
81{
82 unsigned long flags;
83
84 spin_lock_irqsave(&m21dev->lock_task, flags);
85 BUG_ON(!m21dev->current_task);
86 BUG_ON(m21dev->current_task != task);
87 m21dev->current_task = NULL;
88 spin_unlock_irqrestore(&m21dev->lock_task, flags);
89
90 task->state = success ?
91 M2M1SHOT_BUFSTATE_DONE : M2M1SHOT_BUFSTATE_ERROR;
92
93 complete(&task->complete);
94
95 m2m1shot_task_schedule(m21dev);
96}
97EXPORT_SYMBOL(m2m1shot_task_finish);
98
99void m2m1shot_task_cancel(struct m2m1shot_device *m21dev,
100 struct m2m1shot_task *task,
101 enum m2m1shot_state reason)
102{
103 unsigned long flags;
104
105 spin_lock_irqsave(&m21dev->lock_task, flags);
106 m21dev->current_task = NULL;
107 spin_unlock_irqrestore(&m21dev->lock_task, flags);
108
109 task->state = reason;
110
111 complete(&task->complete);
112
113 m2m1shot_task_schedule(m21dev);
114}
115EXPORT_SYMBOL(m2m1shot_task_cancel);
116
117static void m2m1shot_buffer_put_dma_buf_plane(
118 struct m2m1shot_buffer_plane_dma *plane)
119{
120 dma_buf_detach(plane->dmabuf, plane->attachment);
121 dma_buf_put(plane->dmabuf);
122 plane->dmabuf = NULL;
123}
124
125static void m2m1shot_buffer_put_dma_buf(struct m2m1shot_buffer *buffer,
126 struct m2m1shot_buffer_dma *dma_buffer)
127{
128 int i;
129
130 for (i = 0; i < buffer->num_planes; i++)
131 m2m1shot_buffer_put_dma_buf_plane(&dma_buffer->plane[i]);
132}
133
134static int m2m1shot_buffer_get_dma_buf(struct m2m1shot_device *m21dev,
135 struct m2m1shot_buffer *buffer,
136 struct m2m1shot_buffer_dma *dma_buffer)
137{
138 struct m2m1shot_buffer_plane_dma *plane;
139 int i, ret;
140
141 for (i = 0; i < buffer->num_planes; i++) {
142 plane = &dma_buffer->plane[i];
143
144 plane->dmabuf = dma_buf_get(buffer->plane[i].fd);
145 if (IS_ERR(plane->dmabuf)) {
146 dev_err(m21dev->dev,
147 "%s: failed to get dmabuf of fd %d\n",
148 __func__, buffer->plane[i].fd);
149 ret = PTR_ERR(plane->dmabuf);
150 goto err;
151 }
152
153 if (plane->dmabuf->size < plane->bytes_used) {
154 dev_err(m21dev->dev,
155 "%s: needs %zx bytes but dmabuf is %zx\n",
156 __func__, plane->bytes_used,
157 plane->dmabuf->size);
158 ret = -EINVAL;
159 goto err;
160 }
161
162 plane->attachment = dma_buf_attach(plane->dmabuf, m21dev->dev);
163 if (IS_ERR(plane->attachment)) {
164 dev_err(m21dev->dev,
165 "%s: Failed to attach dmabuf\n", __func__);
166 ret = PTR_ERR(plane->attachment);
167 goto err;
168 }
169 }
170
171 return 0;
172err:
173 if (!IS_ERR(plane->dmabuf)) /* release dmabuf of the last iteration */
174 dma_buf_put(plane->dmabuf);
175
176 while (i-- > 0)
177 m2m1shot_buffer_put_dma_buf_plane(&dma_buffer->plane[i]);
178
179 return ret;
180}
181
40d0b6e1 182static int m2m1shot_prepare_get_buffer(struct m2m1shot_context *ctx,
183 struct m2m1shot_buffer *buffer,
184 struct m2m1shot_buffer_dma *dma_buffer,
185 enum dma_data_direction dir)
186{
187 struct m2m1shot_device *m21dev = ctx->m21dev;
188 int i, ret;
189
190 for (i = 0; i < buffer->num_planes; i++) {
191 struct m2m1shot_buffer_plane_dma *plane;
192
193 plane = &dma_buffer->plane[i];
194
195 if (plane->bytes_used == 0) {
196 /*
197 * bytes_used = 0 means that the size of the plane is
198 * not able to be decided by the driver because it is
199 * dependent upon the content in the buffer.
200 * The best example of the buffer is the buffer of JPEG
201 * encoded stream for decompression.
202 */
203 plane->bytes_used = buffer->plane[i].len;
204 } else if (buffer->plane[i].len < plane->bytes_used) {
205 dev_err(m21dev->dev,
206 "%s: needs %zx bytes but %zx is given\n",
207 __func__, plane->bytes_used,
208 buffer->plane[i].len);
209 return -EINVAL;
210 }
211 }
212
213 if ((buffer->type != M2M1SHOT_BUFFER_USERPTR) &&
214 (buffer->type != M2M1SHOT_BUFFER_DMABUF)) {
215 dev_err(m21dev->dev, "%s: unknown buffer type %u\n",
216 __func__, buffer->type);
217 return -EINVAL;
218 }
219
07e82f1a 220 if (buffer->type == M2M1SHOT_BUFFER_DMABUF) {
40d0b6e1 221 ret = m2m1shot_buffer_get_dma_buf(m21dev, buffer, dma_buffer);
07e82f1a
CK
222 if (ret)
223 return ret;
224 }
40d0b6e1 225
226 dma_buffer->buffer = buffer;
227
228 for (i = 0; i < buffer->num_planes; i++) {
229 /* the callback function should fill 'dma_addr' field */
230 ret = m21dev->ops->prepare_buffer(ctx, dma_buffer, i, dir);
231 if (ret)
232 goto err;
233 }
234
235 return 0;
236err:
237 dev_err(m21dev->dev, "%s: Failed to prepare plane %u", __func__, i);
238
239 while (i-- > 0)
240 m21dev->ops->finish_buffer(ctx, dma_buffer, i, dir);
241
242 if (buffer->type == M2M1SHOT_BUFFER_DMABUF)
243 m2m1shot_buffer_put_dma_buf(buffer, dma_buffer);
40d0b6e1 244
245 return ret;
246}
247
248static void m2m1shot_finish_buffer(struct m2m1shot_device *m21dev,
249 struct m2m1shot_context *ctx,
250 struct m2m1shot_buffer *buffer,
251 struct m2m1shot_buffer_dma *dma_buffer,
252 enum dma_data_direction dir)
253{
254 int i;
255
256 for (i = 0; i < buffer->num_planes; i++)
257 m21dev->ops->finish_buffer(ctx, dma_buffer, i, dir);
258
259 if (buffer->type == M2M1SHOT_BUFFER_DMABUF)
260 m2m1shot_buffer_put_dma_buf(buffer, dma_buffer);
40d0b6e1 261}
262
263static int m2m1shot_prepare_format(struct m2m1shot_device *m21dev,
264 struct m2m1shot_context *ctx,
265 struct m2m1shot_task *task)
266{
267 int i, ret;
268 size_t out_sizes[M2M1SHOT_MAX_PLANES] = { 0 };
269 size_t cap_sizes[M2M1SHOT_MAX_PLANES] = { 0 };
270
271 if (task->task.buf_out.num_planes > M2M1SHOT_MAX_PLANES) {
272 dev_err(m21dev->dev, "Invalid number of output planes %u.\n",
273 task->task.buf_out.num_planes);
274 return -EINVAL;
275 }
276
277 if (task->task.buf_cap.num_planes > M2M1SHOT_MAX_PLANES) {
278 dev_err(m21dev->dev, "Invalid number of capture planes %u.\n",
279 task->task.buf_cap.num_planes);
280 return -EINVAL;
281 }
282
283 ret = m21dev->ops->prepare_format(ctx, &task->task.fmt_out,
284 DMA_TO_DEVICE, out_sizes);
285 if (ret < 0)
286 return ret;
287
288 if (task->task.buf_out.num_planes != ret) {
289 dev_err(m21dev->dev,
290 "%s: needs %u output planes but %u is given\n",
291 __func__, ret, task->task.buf_out.num_planes);
292 return -EINVAL;
293 }
294
295 for (i = 0; i < task->task.buf_out.num_planes; i++)
296 task->dma_buf_out.plane[i].bytes_used = out_sizes[i];
297
298 ret = m21dev->ops->prepare_format(ctx, &task->task.fmt_cap,
299 DMA_FROM_DEVICE, cap_sizes);
300 if (ret < 0)
301 return ret;
302
303 if (task->task.buf_cap.num_planes < ret) {
304 dev_err(m21dev->dev,
305 "%s: needs %u capture planes but %u is given\n",
306 __func__, ret, task->task.buf_cap.num_planes);
307 return -EINVAL;
308 }
309
310 for (i = 0; i < task->task.buf_cap.num_planes; i++)
311 task->dma_buf_cap.plane[i].bytes_used = cap_sizes[i];
312
313 if (m21dev->ops->prepare_operation) {
314 ret = m21dev->ops->prepare_operation(ctx, task);
315 if (ret)
316 return ret;
317 }
318
319 return 0;
320}
321
322static int m2m1shot_prepare_task(struct m2m1shot_device *m21dev,
323 struct m2m1shot_context *ctx,
324 struct m2m1shot_task *task)
325{
326 int ret;
327
328 ret = m2m1shot_prepare_format(m21dev, ctx, task);
329 if (ret)
330 return ret;
331
332 ret = m2m1shot_prepare_get_buffer(ctx, &task->task.buf_out,
333 &task->dma_buf_out, DMA_TO_DEVICE);
334 if (ret) {
335 dev_err(m21dev->dev, "%s: Failed to get output buffer\n",
336 __func__);
337 return ret;
338 }
339
340 ret = m2m1shot_prepare_get_buffer(ctx, &task->task.buf_cap,
341 &task->dma_buf_cap, DMA_FROM_DEVICE);
342 if (ret) {
343 m2m1shot_finish_buffer(m21dev, ctx,
344 &task->task.buf_out, &task->dma_buf_out, DMA_TO_DEVICE);
345 dev_err(m21dev->dev, "%s: Failed to get capture buffer\n",
346 __func__);
347 return ret;
348 }
349
350 return 0;
351}
352
353static void m2m1shot_finish_task(struct m2m1shot_device *m21dev,
354 struct m2m1shot_context *ctx,
355 struct m2m1shot_task *task)
356{
357 m2m1shot_finish_buffer(m21dev, ctx,
358 &task->task.buf_cap, &task->dma_buf_cap,
359 DMA_FROM_DEVICE);
360 m2m1shot_finish_buffer(m21dev, ctx,
361 &task->task.buf_out, &task->dma_buf_out, DMA_TO_DEVICE);
362}
363
364static void m2m1shot_destroy_context(struct kref *kref)
365{
366 struct m2m1shot_context *ctx = container_of(kref,
367 struct m2m1shot_context, kref);
368
369 ctx->m21dev->ops->free_context(ctx);
370
371 spin_lock(&ctx->m21dev->lock_ctx);
372 list_del(&ctx->node);
373 spin_unlock(&ctx->m21dev->lock_ctx);
374
375 kfree(ctx);
376}
377
378static int m2m1shot_process(struct m2m1shot_context *ctx,
379 struct m2m1shot_task *task)
380{
381 struct m2m1shot_device *m21dev = ctx->m21dev;
382 unsigned long flags;
383 int ret;
384
385 INIT_LIST_HEAD(&task->task_node);
386 init_completion(&task->complete);
387
388 kref_get(&ctx->kref);
389
390 mutex_lock(&ctx->mutex);
391
392 ret = m2m1shot_prepare_task(m21dev, ctx, task);
393 if (ret)
394 goto err;
395
396 task->ctx = ctx;
397 task->state = M2M1SHOT_BUFSTATE_READY;
398
399 spin_lock_irqsave(&m21dev->lock_task, flags);
400 list_add_tail(&task->task_node, &m21dev->tasks);
401 spin_unlock_irqrestore(&m21dev->lock_task, flags);
402
403 m2m1shot_task_schedule(m21dev);
404
405 if (m21dev->timeout_jiffies != -1) {
406 unsigned long elapsed;
407 elapsed = wait_for_completion_timeout(&task->complete,
408 m21dev->timeout_jiffies);
409 if (!elapsed) { /* timed out */
410 m2m1shot_task_cancel(m21dev, task,
411 M2M1SHOT_BUFSTATE_TIMEDOUT);
412
413 m21dev->ops->timeout_task(ctx, task);
414
415 m2m1shot_finish_task(m21dev, ctx, task);
416
417 dev_notice(m21dev->dev, "%s: %u msecs timed out\n",
418 __func__,
419 jiffies_to_msecs(m21dev->timeout_jiffies));
420 ret = -ETIMEDOUT;
421 goto err;
422 }
423 } else {
424 wait_for_completion(&task->complete);
425 }
426
427 BUG_ON(task->state == M2M1SHOT_BUFSTATE_READY);
428
429 m2m1shot_finish_task(m21dev, ctx, task);
430err:
431
432 mutex_unlock(&ctx->mutex);
433
434 kref_put(&ctx->kref, m2m1shot_destroy_context);
435
436 if (ret)
437 return ret;
438 return (task->state == M2M1SHOT_BUFSTATE_DONE) ? 0 : -EINVAL;
439}
440
441static int m2m1shot_open(struct inode *inode, struct file *filp)
442{
443 struct m2m1shot_device *m21dev = container_of(filp->private_data,
444 struct m2m1shot_device, misc);
445 struct m2m1shot_context *ctx;
446 int ret;
447
448 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
449 if (!ctx)
450 return -ENOMEM;
451
452 INIT_LIST_HEAD(&ctx->node);
453 kref_init(&ctx->kref);
454 mutex_init(&ctx->mutex);
455
456 ctx->m21dev = m21dev;
457
458 spin_lock(&m21dev->lock_ctx);
459 list_add_tail(&ctx->node, &m21dev->contexts);
460 spin_unlock(&m21dev->lock_ctx);
461
462 filp->private_data = ctx;
463
464 ret = m21dev->ops->init_context(ctx);
465 if (ret) /* kref_put() is not called not to call .free_context() */
466 kfree(ctx);
467
468 return ret;
469}
470
471static int m2m1shot_release(struct inode *inode, struct file *filp)
472{
473 struct m2m1shot_context *ctx = filp->private_data;
474
475 kref_put(&ctx->kref, m2m1shot_destroy_context);
476
477 return 0;
478}
479
480static long m2m1shot_ioctl(struct file *filp,
481 unsigned int cmd, unsigned long arg)
482{
483 struct m2m1shot_context *ctx = filp->private_data;
484 struct m2m1shot_device *m21dev = ctx->m21dev;
485
486 switch (cmd) {
487 case M2M1SHOT_IOC_PROCESS:
488 {
489 struct m2m1shot_task data;
490 int ret;
491
492 memset(&data, 0, sizeof(data));
493
494 if (copy_from_user(&data.task,
495 (void __user *)arg, sizeof(data.task))) {
496 dev_err(m21dev->dev,
497 "%s: Failed to read userdata\n", __func__);
498 return -EFAULT;
499 }
500
501 /*
502 * m2m1shot_process() does not wake up
503 * until the given task finishes
504 */
505 ret = m2m1shot_process(ctx, &data);
506
507 if (copy_to_user((void __user *)arg, &data.task,
508 sizeof(data.task))) {
509 dev_err(m21dev->dev,
510 "%s: Failed to read userdata\n", __func__);
511 return -EFAULT;
512 }
513
514 return ret;
515 }
516 case M2M1SHOT_IOC_CUSTOM:
517 {
518 struct m2m1shot_custom_data data;
519
520 if (!m21dev->ops->custom_ioctl) {
521 dev_err(m21dev->dev,
522 "%s: custom_ioctl not defined\n", __func__);
523 return -ENOSYS;
524 }
525
526 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
527 dev_err(m21dev->dev,
528 "%s: Failed to read custom data\n", __func__);
529 return -EFAULT;
530 }
531
532 return m21dev->ops->custom_ioctl(ctx, data.cmd, data.arg);
533 }
534 default:
535 dev_err(m21dev->dev, "%s: Unknown ioctl cmd %x\n",
536 __func__, cmd);
537 return -EINVAL;
538 }
539
540 return 0;
541}
542
543#ifdef CONFIG_COMPAT
544struct compat_m2m1shot_rect {
545 compat_short_t left;
546 compat_short_t top;
547 compat_ushort_t width;
548 compat_ushort_t height;
549};
550
551struct compat_m2m1shot_pix_format {
552 compat_uint_t fmt;
553 compat_uint_t width;
554 compat_uint_t height;
555 struct v4l2_rect crop;
556};
557
558struct compat_m2m1shot_buffer_plane {
559 union {
560 compat_int_t fd;
561 compat_ulong_t userptr;
562 };
563 compat_size_t len;
564};
565
566struct compat_m2m1shot_buffer {
567 struct compat_m2m1shot_buffer_plane plane[M2M1SHOT_MAX_PLANES];
568 __u8 type;
569 __u8 num_planes;
570};
571
572struct compat_m2m1shot_operation {
573 compat_short_t quality_level;
574 compat_short_t rotate;
575 compat_uint_t op; /* or-ing M2M1SHOT_FLIP_VIRT/HORI */
576};
577
578struct compat_m2m1shot {
579 struct compat_m2m1shot_pix_format fmt_out;
580 struct compat_m2m1shot_pix_format fmt_cap;
581 struct compat_m2m1shot_buffer buf_out;
582 struct compat_m2m1shot_buffer buf_cap;
583 struct compat_m2m1shot_operation op;
584 compat_ulong_t reserved[2];
585};
586
587struct compat_m2m1shot_custom_data {
588 compat_uint_t cmd;
589 compat_ulong_t arg;
590};
591
592#define COMPAT_M2M1SHOT_IOC_PROCESS _IOWR('M', 0, struct compat_m2m1shot)
593#define COMPAT_M2M1SHOT_IOC_CUSTOM \
594 _IOWR('M', 16, struct compat_m2m1shot_custom_data)
595
596static long m2m1shot_compat_ioctl32(struct file *filp,
597 unsigned int cmd, unsigned long arg)
598{
599 struct m2m1shot_context *ctx = filp->private_data;
600 struct m2m1shot_device *m21dev = ctx->m21dev;
601
602 switch (cmd) {
603 case COMPAT_M2M1SHOT_IOC_PROCESS:
604 {
605 struct compat_m2m1shot data;
606 struct m2m1shot_task task;
607 int i, ret;
608
609 memset(&task, 0, sizeof(task));
610
611 if (copy_from_user(&data, compat_ptr(arg), sizeof(data))) {
612 dev_err(m21dev->dev,
613 "%s: Failed to read userdata\n", __func__);
614 return -EFAULT;
615 }
616
617 if ((data.buf_out.num_planes > M2M1SHOT_MAX_PLANES) ||
618 (data.buf_cap.num_planes > M2M1SHOT_MAX_PLANES)) {
619 dev_err(m21dev->dev,
620 "%s: Invalid plane number (out %u/cap %u)\n",
621 __func__, data.buf_out.num_planes,
622 data.buf_cap.num_planes);
623 return -EINVAL;
624 }
625
626 task.task.fmt_out.fmt = data.fmt_out.fmt;
627 task.task.fmt_out.width = data.fmt_out.width;
628 task.task.fmt_out.height = data.fmt_out.height;
629 task.task.fmt_out.crop.left = data.fmt_out.crop.left;
630 task.task.fmt_out.crop.top = data.fmt_out.crop.top;
631 task.task.fmt_out.crop.width = data.fmt_out.crop.width;
632 task.task.fmt_out.crop.height = data.fmt_out.crop.height;
633 task.task.fmt_cap.fmt = data.fmt_cap.fmt;
634 task.task.fmt_cap.width = data.fmt_cap.width;
635 task.task.fmt_cap.height = data.fmt_cap.height;
636 task.task.fmt_cap.crop.left = data.fmt_cap.crop.left;
637 task.task.fmt_cap.crop.top = data.fmt_cap.crop.top;
638 task.task.fmt_cap.crop.width = data.fmt_cap.crop.width;
639 task.task.fmt_cap.crop.height = data.fmt_cap.crop.height;
640 for (i = 0; i < data.buf_out.num_planes; i++) {
641 task.task.buf_out.plane[i].len =
642 data.buf_out.plane[i].len;
643 if (data.buf_out.type == M2M1SHOT_BUFFER_DMABUF)
644 task.task.buf_out.plane[i].fd =
645 data.buf_out.plane[i].fd;
646 else /* data.buf_out.type == M2M1SHOT_BUFFER_USERPTR */
647 task.task.buf_out.plane[i].userptr =
648 data.buf_out.plane[i].userptr;
649 }
650 task.task.buf_out.type = data.buf_out.type;
651 task.task.buf_out.num_planes = data.buf_out.num_planes;
652 for (i = 0; i < data.buf_cap.num_planes; i++) {
653 task.task.buf_cap.plane[i].len =
654 data.buf_cap.plane[i].len;
655 if (data.buf_cap.type == M2M1SHOT_BUFFER_DMABUF)
656 task.task.buf_cap.plane[i].fd =
657 data.buf_cap.plane[i].fd;
658 else /* data.buf_cap.type == M2M1SHOT_BUFFER_USERPTR */
659 task.task.buf_cap.plane[i].userptr =
660 data.buf_cap.plane[i].userptr;
661 }
662 task.task.buf_cap.type = data.buf_cap.type;
663 task.task.buf_cap.num_planes = data.buf_cap.num_planes;
664 task.task.op.quality_level = data.op.quality_level;
665 task.task.op.rotate = data.op.rotate;
666 task.task.op.op = data.op.op;
667 task.task.reserved[0] = data.reserved[0];
668 task.task.reserved[1] = data.reserved[1];
669
670 /*
671 * m2m1shot_process() does not wake up
672 * until the given task finishes
673 */
674 ret = m2m1shot_process(ctx, &task);
675 if (ret) {
676 dev_err(m21dev->dev,
677 "%s: Failed to process m2m1shot task\n",
678 __func__);
679 return ret;
680 }
681
682 data.fmt_out.fmt = task.task.fmt_out.fmt;
683 data.fmt_out.width = task.task.fmt_out.width;
684 data.fmt_out.height = task.task.fmt_out.height;
685 data.fmt_out.crop.left = task.task.fmt_out.crop.left;
686 data.fmt_out.crop.top = task.task.fmt_out.crop.top;
687 data.fmt_out.crop.width = task.task.fmt_out.crop.width;
688 data.fmt_out.crop.height = task.task.fmt_out.crop.height;
689 data.fmt_cap.fmt = task.task.fmt_cap.fmt;
690 data.fmt_cap.width = task.task.fmt_cap.width;
691 data.fmt_cap.height = task.task.fmt_cap.height;
692 data.fmt_cap.crop.left = task.task.fmt_cap.crop.left;
693 data.fmt_cap.crop.top = task.task.fmt_cap.crop.top;
694 data.fmt_cap.crop.width = task.task.fmt_cap.crop.width;
695 data.fmt_cap.crop.height = task.task.fmt_cap.crop.height;
696 for (i = 0; i < task.task.buf_out.num_planes; i++) {
697 data.buf_out.plane[i].len =
698 task.task.buf_out.plane[i].len;
699 if (task.task.buf_out.type == M2M1SHOT_BUFFER_DMABUF)
700 data.buf_out.plane[i].fd =
701 task.task.buf_out.plane[i].fd;
702 else /* buf_out.type == M2M1SHOT_BUFFER_USERPTR */
703 data.buf_out.plane[i].userptr =
704 task.task.buf_out.plane[i].userptr;
705 }
706 data.buf_out.type = task.task.buf_out.type;
707 data.buf_out.num_planes = task.task.buf_out.num_planes;
708 for (i = 0; i < task.task.buf_cap.num_planes; i++) {
709 data.buf_cap.plane[i].len =
710 task.task.buf_cap.plane[i].len;
711 if (task.task.buf_cap.type == M2M1SHOT_BUFFER_DMABUF)
712 data.buf_cap.plane[i].fd =
713 task.task.buf_cap.plane[i].fd;
714 else /* buf_cap.type == M2M1SHOT_BUFFER_USERPTR */
715 data.buf_cap.plane[i].userptr =
716 task.task.buf_cap.plane[i].userptr;
717 }
718 data.buf_cap.type = task.task.buf_cap.type;
719 data.buf_cap.num_planes = task.task.buf_cap.num_planes;
720 data.op.quality_level = task.task.op.quality_level;
721 data.op.rotate = task.task.op.rotate;
722 data.op.op = task.task.op.op;
daf3fe7d
CK
723 data.reserved[0] = (compat_ulong_t)task.task.reserved[0];
724 data.reserved[1] = (compat_ulong_t)task.task.reserved[1];
40d0b6e1 725
726 if (copy_to_user(compat_ptr(arg), &data, sizeof(data))) {
727 dev_err(m21dev->dev,
728 "%s: Failed to copy into userdata\n", __func__);
729 return -EFAULT;
730 }
731
732 return 0;
733 }
734 case COMPAT_M2M1SHOT_IOC_CUSTOM:
735 {
736 struct compat_m2m1shot_custom_data data;
737
738 if (!m21dev->ops->custom_ioctl) {
739 dev_err(m21dev->dev,
740 "%s: custom_ioctl not defined\n", __func__);
741 return -ENOSYS;
742 }
743
744 if (copy_from_user(&data, compat_ptr(arg), sizeof(data))) {
745 dev_err(m21dev->dev,
746 "%s: Failed to read custom data\n", __func__);
747 return -EFAULT;
748 }
749
750 return m21dev->ops->custom_ioctl(ctx, data.cmd, data.arg);
751 }
752 default:
753 dev_err(m21dev->dev, "%s: Unknown ioctl cmd %x\n",
754 __func__, cmd);
755 return -EINVAL;
756 }
757
758 return 0;
759}
760#endif
761
762static const struct file_operations m2m1shot_fops = {
763 .owner = THIS_MODULE,
764 .open = m2m1shot_open,
765 .release = m2m1shot_release,
766 .unlocked_ioctl = m2m1shot_ioctl,
767#ifdef CONFIG_COMPAT
768 .compat_ioctl = m2m1shot_compat_ioctl32,
769#endif
770};
771
772struct m2m1shot_device *m2m1shot_create_device(struct device *dev,
773 const struct m2m1shot_devops *ops,
774 const char *suffix, int id,
775 unsigned long timeout_jiffies)
776{
777 struct m2m1shot_device *m21dev;
778 char *name;
779 size_t name_size;
780 int ret = -ENOMEM;
781
782 /* TODO: ops callback check */
783 if (!ops || !ops->prepare_format || !ops->prepare_buffer) {
784 dev_err(dev, "%s: m2m1shot_devops is not provided\n", __func__);
785 return ERR_PTR(-EINVAL);
786 }
787
788 if (!suffix) {
789 dev_err(dev, "%s: suffix of node name is not specified\n",
790 __func__);
791 return ERR_PTR(-EINVAL);
792 }
793
794 name_size = M2M1SHOT_DEVNODE_PREFIX_LEN + strlen(suffix) + 1;
795
796 if (id >= 0)
797 name_size += 3; /* instance number: maximul 3 digits */
798
799 name = kmalloc(name_size, GFP_KERNEL);
800 if (!name)
801 return ERR_PTR(-ENOMEM);
802
803 if (id < 0)
804 scnprintf(name, name_size,
805 M2M1SHOT_DEVNODE_PREFIX "%s", suffix);
806 else
807 scnprintf(name, name_size,
808 M2M1SHOT_DEVNODE_PREFIX "%s%d", suffix, id);
809
810 m21dev = kzalloc(sizeof(*m21dev), GFP_KERNEL);
811 if (!m21dev)
812 goto err_m21dev;
813
814 m21dev->misc.minor = MISC_DYNAMIC_MINOR;
815 m21dev->misc.name = name;
816 m21dev->misc.fops = &m2m1shot_fops;
817 ret = misc_register(&m21dev->misc);
818 if (ret)
819 goto err_misc;
820
821 INIT_LIST_HEAD(&m21dev->tasks);
822 INIT_LIST_HEAD(&m21dev->contexts);
823
824 spin_lock_init(&m21dev->lock_task);
825 spin_lock_init(&m21dev->lock_ctx);
826
827 m21dev->dev = dev;
828 m21dev->ops = ops;
829 m21dev->timeout_jiffies = timeout_jiffies;
830
831 return m21dev;
832
833err_m21dev:
834 kfree(name);
835err_misc:
836 kfree(m21dev);
837
838 return ERR_PTR(ret);
839}
840EXPORT_SYMBOL(m2m1shot_create_device);
841
842void m2m1shot_destroy_device(struct m2m1shot_device *m21dev)
843{
844 misc_deregister(&m21dev->misc);
845 kfree(m21dev->misc.name);
846 kfree(m21dev);
847}
848EXPORT_SYMBOL(m2m1shot_destroy_device);