2 * drivers/media/m2m1shot.c
4 * Copyright (C) 2014 Samsung Electronics Co., Ltd.
6 * Contact: Cho KyongHo <pullip.cho@samsung.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/device.h>
28 #include <linux/dma-buf.h>
29 #include <linux/vmalloc.h>
30 #include <linux/sched.h>
32 #include <media/m2m1shot.h>
34 #define M2M1SHOT_DEVNODE_PREFIX "m2m1shot_"
35 #define M2M1SHOT_DEVNODE_PREFIX_LEN 9
37 static void m2m1shot_task_schedule(struct m2m1shot_device
*m21dev
)
39 struct m2m1shot_task
*task
;
43 spin_lock_irqsave(&m21dev
->lock_task
, flags
);
44 if (list_empty(&m21dev
->tasks
)) {
46 spin_unlock_irqrestore(&m21dev
->lock_task
, flags
);
50 if (m21dev
->current_task
) {
52 spin_unlock_irqrestore(&m21dev
->lock_task
, flags
);
56 task
= list_first_entry(&m21dev
->tasks
,
57 struct m2m1shot_task
, task_node
);
58 list_del(&task
->task_node
);
60 m21dev
->current_task
= task
;
62 spin_unlock_irqrestore(&m21dev
->lock_task
, flags
);
64 task
->state
= M2M1SHOT_BUFSTATE_PROCESSING
;
66 if (m21dev
->ops
->device_run(task
->ctx
, task
)) {
67 task
->state
= M2M1SHOT_BUFSTATE_ERROR
;
69 spin_lock_irqsave(&m21dev
->lock_task
, flags
);
70 m21dev
->current_task
= NULL
;
71 spin_unlock_irqrestore(&m21dev
->lock_task
, flags
);
73 complete(&task
->complete
);
79 void m2m1shot_task_finish(struct m2m1shot_device
*m21dev
,
80 struct m2m1shot_task
*task
, bool success
)
84 spin_lock_irqsave(&m21dev
->lock_task
, flags
);
85 BUG_ON(!m21dev
->current_task
);
86 BUG_ON(m21dev
->current_task
!= task
);
87 m21dev
->current_task
= NULL
;
88 spin_unlock_irqrestore(&m21dev
->lock_task
, flags
);
90 task
->state
= success
?
91 M2M1SHOT_BUFSTATE_DONE
: M2M1SHOT_BUFSTATE_ERROR
;
93 complete(&task
->complete
);
95 m2m1shot_task_schedule(m21dev
);
97 EXPORT_SYMBOL(m2m1shot_task_finish
);
99 void m2m1shot_task_cancel(struct m2m1shot_device
*m21dev
,
100 struct m2m1shot_task
*task
,
101 enum m2m1shot_state reason
)
105 spin_lock_irqsave(&m21dev
->lock_task
, flags
);
106 m21dev
->current_task
= NULL
;
107 spin_unlock_irqrestore(&m21dev
->lock_task
, flags
);
109 task
->state
= reason
;
111 complete(&task
->complete
);
113 m2m1shot_task_schedule(m21dev
);
115 EXPORT_SYMBOL(m2m1shot_task_cancel
);
117 static void m2m1shot_buffer_put_dma_buf_plane(
118 struct m2m1shot_buffer_plane_dma
*plane
)
120 dma_buf_detach(plane
->dmabuf
, plane
->attachment
);
121 dma_buf_put(plane
->dmabuf
);
122 plane
->dmabuf
= NULL
;
125 static void m2m1shot_buffer_put_dma_buf(struct m2m1shot_buffer
*buffer
,
126 struct m2m1shot_buffer_dma
*dma_buffer
)
130 for (i
= 0; i
< buffer
->num_planes
; i
++)
131 m2m1shot_buffer_put_dma_buf_plane(&dma_buffer
->plane
[i
]);
134 static int m2m1shot_buffer_get_dma_buf(struct m2m1shot_device
*m21dev
,
135 struct m2m1shot_buffer
*buffer
,
136 struct m2m1shot_buffer_dma
*dma_buffer
)
138 struct m2m1shot_buffer_plane_dma
*plane
;
141 for (i
= 0; i
< buffer
->num_planes
; i
++) {
142 plane
= &dma_buffer
->plane
[i
];
144 plane
->dmabuf
= dma_buf_get(buffer
->plane
[i
].fd
);
145 if (IS_ERR(plane
->dmabuf
)) {
147 "%s: failed to get dmabuf of fd %d\n",
148 __func__
, buffer
->plane
[i
].fd
);
149 ret
= PTR_ERR(plane
->dmabuf
);
153 if (plane
->dmabuf
->size
< plane
->bytes_used
) {
155 "%s: needs %zx bytes but dmabuf is %zx\n",
156 __func__
, plane
->bytes_used
,
157 plane
->dmabuf
->size
);
162 plane
->attachment
= dma_buf_attach(plane
->dmabuf
, m21dev
->dev
);
163 if (IS_ERR(plane
->attachment
)) {
165 "%s: Failed to attach dmabuf\n", __func__
);
166 ret
= PTR_ERR(plane
->attachment
);
173 if (!IS_ERR(plane
->dmabuf
)) /* release dmabuf of the last iteration */
174 dma_buf_put(plane
->dmabuf
);
177 m2m1shot_buffer_put_dma_buf_plane(&dma_buffer
->plane
[i
]);
182 static struct dma_buf
*m2m1shot_buffer_check_userptr(
183 struct m2m1shot_device
*m21dev
, unsigned long start
, size_t len
,
186 struct dma_buf
*dmabuf
= NULL
;
187 struct vm_area_struct
*vma
;
189 down_read(¤t
->mm
->mmap_sem
);
190 vma
= find_vma(current
->mm
, start
);
191 if (!vma
|| (start
< vma
->vm_start
)) {
192 dev_err(m21dev
->dev
, "%s: Incorrect user buffer @ %#lx/%#zx\n",
193 __func__
, start
, len
);
194 dmabuf
= ERR_PTR(-EINVAL
);
201 dmabuf
= get_dma_buf_file(vma
->vm_file
);
203 *out_offset
= start
- vma
->vm_start
;
205 up_read(¤t
->mm
->mmap_sem
);
209 static int m2m1shot_buffer_get_userptr(struct m2m1shot_device
*m21dev
,
210 struct m2m1shot_buffer
*buffer
,
211 struct m2m1shot_buffer_dma
*dma_buffer
,
215 struct dma_buf
*dmabuf
;
218 for (i
= 0; i
< buffer
->num_planes
; i
++) {
219 dmabuf
= m2m1shot_buffer_check_userptr(m21dev
,
220 buffer
->plane
[i
].userptr
, buffer
->plane
[i
].len
,
222 if (IS_ERR(dmabuf
)) {
223 ret
= PTR_ERR(dmabuf
);
226 if (dmabuf
->size
< dma_buffer
->plane
[i
].bytes_used
) {
228 "%s: needs %zu bytes but dmabuf is %zu\n",
230 dma_buffer
->plane
[i
].bytes_used
,
236 dma_buffer
->plane
[i
].dmabuf
= dmabuf
;
237 dma_buffer
->plane
[i
].attachment
= dma_buf_attach(
238 dmabuf
, m21dev
->dev
);
239 dma_buffer
->plane
[i
].offset
= offset
;
240 if (IS_ERR(dma_buffer
->plane
[i
].attachment
)) {
242 "%s: Failed to attach dmabuf\n",
244 ret
= PTR_ERR(dma_buffer
->plane
[i
].attachment
);
254 m2m1shot_buffer_put_dma_buf_plane(&dma_buffer
->plane
[i
]);
259 static void m2m1shot_buffer_put_userptr(struct m2m1shot_buffer
*buffer
,
260 struct m2m1shot_buffer_dma
*dma_buffer
,
265 for (i
= 0; i
< buffer
->num_planes
; i
++)
266 if (dma_buffer
->plane
[i
].dmabuf
)
267 m2m1shot_buffer_put_dma_buf_plane(
268 &dma_buffer
->plane
[i
]);
271 static int m2m1shot_prepare_get_buffer(struct m2m1shot_context
*ctx
,
272 struct m2m1shot_buffer
*buffer
,
273 struct m2m1shot_buffer_dma
*dma_buffer
,
274 enum dma_data_direction dir
)
276 struct m2m1shot_device
*m21dev
= ctx
->m21dev
;
279 for (i
= 0; i
< buffer
->num_planes
; i
++) {
280 struct m2m1shot_buffer_plane_dma
*plane
;
282 plane
= &dma_buffer
->plane
[i
];
284 if (plane
->bytes_used
== 0) {
286 * bytes_used = 0 means that the size of the plane is
287 * not able to be decided by the driver because it is
288 * dependent upon the content in the buffer.
289 * The best example of the buffer is the buffer of JPEG
290 * encoded stream for decompression.
292 plane
->bytes_used
= buffer
->plane
[i
].len
;
293 } else if (buffer
->plane
[i
].len
< plane
->bytes_used
) {
295 "%s: needs %zx bytes but %zx is given\n",
296 __func__
, plane
->bytes_used
,
297 buffer
->plane
[i
].len
);
302 if ((buffer
->type
!= M2M1SHOT_BUFFER_USERPTR
) &&
303 (buffer
->type
!= M2M1SHOT_BUFFER_DMABUF
)) {
304 dev_err(m21dev
->dev
, "%s: unknown buffer type %u\n",
305 __func__
, buffer
->type
);
309 if (buffer
->type
== M2M1SHOT_BUFFER_DMABUF
)
310 ret
= m2m1shot_buffer_get_dma_buf(m21dev
, buffer
, dma_buffer
);
312 ret
= m2m1shot_buffer_get_userptr(m21dev
, buffer
, dma_buffer
,
313 (dir
== DMA_TO_DEVICE
) ? 0 : 1);
318 dma_buffer
->buffer
= buffer
;
320 for (i
= 0; i
< buffer
->num_planes
; i
++) {
321 /* the callback function should fill 'dma_addr' field */
322 ret
= m21dev
->ops
->prepare_buffer(ctx
, dma_buffer
, i
, dir
);
329 dev_err(m21dev
->dev
, "%s: Failed to prepare plane %u", __func__
, i
);
332 m21dev
->ops
->finish_buffer(ctx
, dma_buffer
, i
, dir
);
334 if (buffer
->type
== M2M1SHOT_BUFFER_DMABUF
)
335 m2m1shot_buffer_put_dma_buf(buffer
, dma_buffer
);
337 m2m1shot_buffer_put_userptr(buffer
, dma_buffer
,
338 (dir
== DMA_TO_DEVICE
) ? 0 : 1);
343 static void m2m1shot_finish_buffer(struct m2m1shot_device
*m21dev
,
344 struct m2m1shot_context
*ctx
,
345 struct m2m1shot_buffer
*buffer
,
346 struct m2m1shot_buffer_dma
*dma_buffer
,
347 enum dma_data_direction dir
)
351 for (i
= 0; i
< buffer
->num_planes
; i
++)
352 m21dev
->ops
->finish_buffer(ctx
, dma_buffer
, i
, dir
);
354 if (buffer
->type
== M2M1SHOT_BUFFER_DMABUF
)
355 m2m1shot_buffer_put_dma_buf(buffer
, dma_buffer
);
357 m2m1shot_buffer_put_userptr(buffer
, dma_buffer
,
358 (dir
== DMA_TO_DEVICE
) ? 0 : 1);
361 static int m2m1shot_prepare_format(struct m2m1shot_device
*m21dev
,
362 struct m2m1shot_context
*ctx
,
363 struct m2m1shot_task
*task
)
366 size_t out_sizes
[M2M1SHOT_MAX_PLANES
] = { 0 };
367 size_t cap_sizes
[M2M1SHOT_MAX_PLANES
] = { 0 };
369 if (task
->task
.buf_out
.num_planes
> M2M1SHOT_MAX_PLANES
) {
370 dev_err(m21dev
->dev
, "Invalid number of output planes %u.\n",
371 task
->task
.buf_out
.num_planes
);
375 if (task
->task
.buf_cap
.num_planes
> M2M1SHOT_MAX_PLANES
) {
376 dev_err(m21dev
->dev
, "Invalid number of capture planes %u.\n",
377 task
->task
.buf_cap
.num_planes
);
381 ret
= m21dev
->ops
->prepare_format(ctx
, &task
->task
.fmt_out
,
382 DMA_TO_DEVICE
, out_sizes
);
386 if (task
->task
.buf_out
.num_planes
!= ret
) {
388 "%s: needs %u output planes but %u is given\n",
389 __func__
, ret
, task
->task
.buf_out
.num_planes
);
393 for (i
= 0; i
< task
->task
.buf_out
.num_planes
; i
++)
394 task
->dma_buf_out
.plane
[i
].bytes_used
= out_sizes
[i
];
396 ret
= m21dev
->ops
->prepare_format(ctx
, &task
->task
.fmt_cap
,
397 DMA_FROM_DEVICE
, cap_sizes
);
401 if (task
->task
.buf_cap
.num_planes
< ret
) {
403 "%s: needs %u capture planes but %u is given\n",
404 __func__
, ret
, task
->task
.buf_cap
.num_planes
);
408 for (i
= 0; i
< task
->task
.buf_cap
.num_planes
; i
++)
409 task
->dma_buf_cap
.plane
[i
].bytes_used
= cap_sizes
[i
];
411 if (m21dev
->ops
->prepare_operation
) {
412 ret
= m21dev
->ops
->prepare_operation(ctx
, task
);
420 static int m2m1shot_prepare_task(struct m2m1shot_device
*m21dev
,
421 struct m2m1shot_context
*ctx
,
422 struct m2m1shot_task
*task
)
426 ret
= m2m1shot_prepare_format(m21dev
, ctx
, task
);
430 ret
= m2m1shot_prepare_get_buffer(ctx
, &task
->task
.buf_out
,
431 &task
->dma_buf_out
, DMA_TO_DEVICE
);
433 dev_err(m21dev
->dev
, "%s: Failed to get output buffer\n",
438 ret
= m2m1shot_prepare_get_buffer(ctx
, &task
->task
.buf_cap
,
439 &task
->dma_buf_cap
, DMA_FROM_DEVICE
);
441 m2m1shot_finish_buffer(m21dev
, ctx
,
442 &task
->task
.buf_out
, &task
->dma_buf_out
, DMA_TO_DEVICE
);
443 dev_err(m21dev
->dev
, "%s: Failed to get capture buffer\n",
451 static void m2m1shot_finish_task(struct m2m1shot_device
*m21dev
,
452 struct m2m1shot_context
*ctx
,
453 struct m2m1shot_task
*task
)
455 m2m1shot_finish_buffer(m21dev
, ctx
,
456 &task
->task
.buf_cap
, &task
->dma_buf_cap
,
458 m2m1shot_finish_buffer(m21dev
, ctx
,
459 &task
->task
.buf_out
, &task
->dma_buf_out
, DMA_TO_DEVICE
);
462 static void m2m1shot_destroy_context(struct kref
*kref
)
464 struct m2m1shot_context
*ctx
= container_of(kref
,
465 struct m2m1shot_context
, kref
);
467 ctx
->m21dev
->ops
->free_context(ctx
);
469 spin_lock(&ctx
->m21dev
->lock_ctx
);
470 list_del(&ctx
->node
);
471 spin_unlock(&ctx
->m21dev
->lock_ctx
);
476 static int m2m1shot_process(struct m2m1shot_context
*ctx
,
477 struct m2m1shot_task
*task
)
479 struct m2m1shot_device
*m21dev
= ctx
->m21dev
;
483 INIT_LIST_HEAD(&task
->task_node
);
484 init_completion(&task
->complete
);
486 kref_get(&ctx
->kref
);
488 mutex_lock(&ctx
->mutex
);
490 ret
= m2m1shot_prepare_task(m21dev
, ctx
, task
);
495 task
->state
= M2M1SHOT_BUFSTATE_READY
;
497 spin_lock_irqsave(&m21dev
->lock_task
, flags
);
498 list_add_tail(&task
->task_node
, &m21dev
->tasks
);
499 spin_unlock_irqrestore(&m21dev
->lock_task
, flags
);
501 m2m1shot_task_schedule(m21dev
);
503 if (m21dev
->timeout_jiffies
!= -1) {
504 unsigned long elapsed
;
505 elapsed
= wait_for_completion_timeout(&task
->complete
,
506 m21dev
->timeout_jiffies
);
507 if (!elapsed
) { /* timed out */
508 m2m1shot_task_cancel(m21dev
, task
,
509 M2M1SHOT_BUFSTATE_TIMEDOUT
);
511 m21dev
->ops
->timeout_task(ctx
, task
);
513 m2m1shot_finish_task(m21dev
, ctx
, task
);
515 dev_notice(m21dev
->dev
, "%s: %u msecs timed out\n",
517 jiffies_to_msecs(m21dev
->timeout_jiffies
));
522 wait_for_completion(&task
->complete
);
525 BUG_ON(task
->state
== M2M1SHOT_BUFSTATE_READY
);
527 m2m1shot_finish_task(m21dev
, ctx
, task
);
530 mutex_unlock(&ctx
->mutex
);
532 kref_put(&ctx
->kref
, m2m1shot_destroy_context
);
536 return (task
->state
== M2M1SHOT_BUFSTATE_DONE
) ? 0 : -EINVAL
;
539 static int m2m1shot_open(struct inode
*inode
, struct file
*filp
)
541 struct m2m1shot_device
*m21dev
= container_of(filp
->private_data
,
542 struct m2m1shot_device
, misc
);
543 struct m2m1shot_context
*ctx
;
546 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
550 INIT_LIST_HEAD(&ctx
->node
);
551 kref_init(&ctx
->kref
);
552 mutex_init(&ctx
->mutex
);
554 ctx
->m21dev
= m21dev
;
556 spin_lock(&m21dev
->lock_ctx
);
557 list_add_tail(&ctx
->node
, &m21dev
->contexts
);
558 spin_unlock(&m21dev
->lock_ctx
);
560 filp
->private_data
= ctx
;
562 ret
= m21dev
->ops
->init_context(ctx
);
563 if (ret
) /* kref_put() is not called not to call .free_context() */
569 static int m2m1shot_release(struct inode
*inode
, struct file
*filp
)
571 struct m2m1shot_context
*ctx
= filp
->private_data
;
573 kref_put(&ctx
->kref
, m2m1shot_destroy_context
);
578 static long m2m1shot_ioctl(struct file
*filp
,
579 unsigned int cmd
, unsigned long arg
)
581 struct m2m1shot_context
*ctx
= filp
->private_data
;
582 struct m2m1shot_device
*m21dev
= ctx
->m21dev
;
585 case M2M1SHOT_IOC_PROCESS
:
587 struct m2m1shot_task data
;
590 memset(&data
, 0, sizeof(data
));
592 if (copy_from_user(&data
.task
,
593 (void __user
*)arg
, sizeof(data
.task
))) {
595 "%s: Failed to read userdata\n", __func__
);
600 * m2m1shot_process() does not wake up
601 * until the given task finishes
603 ret
= m2m1shot_process(ctx
, &data
);
605 if (copy_to_user((void __user
*)arg
, &data
.task
,
606 sizeof(data
.task
))) {
608 "%s: Failed to read userdata\n", __func__
);
614 case M2M1SHOT_IOC_CUSTOM
:
616 struct m2m1shot_custom_data data
;
618 if (!m21dev
->ops
->custom_ioctl
) {
620 "%s: custom_ioctl not defined\n", __func__
);
624 if (copy_from_user(&data
, (void __user
*)arg
, sizeof(data
))) {
626 "%s: Failed to read custom data\n", __func__
);
630 return m21dev
->ops
->custom_ioctl(ctx
, data
.cmd
, data
.arg
);
633 dev_err(m21dev
->dev
, "%s: Unknown ioctl cmd %x\n",
642 struct compat_m2m1shot_rect
{
645 compat_ushort_t width
;
646 compat_ushort_t height
;
649 struct compat_m2m1shot_pix_format
{
652 compat_uint_t height
;
653 struct v4l2_rect crop
;
656 struct compat_m2m1shot_buffer_plane
{
659 compat_ulong_t userptr
;
664 struct compat_m2m1shot_buffer
{
665 struct compat_m2m1shot_buffer_plane plane
[M2M1SHOT_MAX_PLANES
];
670 struct compat_m2m1shot_operation
{
671 compat_short_t quality_level
;
672 compat_short_t rotate
;
673 compat_uint_t op
; /* or-ing M2M1SHOT_FLIP_VIRT/HORI */
676 struct compat_m2m1shot
{
677 struct compat_m2m1shot_pix_format fmt_out
;
678 struct compat_m2m1shot_pix_format fmt_cap
;
679 struct compat_m2m1shot_buffer buf_out
;
680 struct compat_m2m1shot_buffer buf_cap
;
681 struct compat_m2m1shot_operation op
;
682 compat_ulong_t reserved
[2];
685 struct compat_m2m1shot_custom_data
{
690 #define COMPAT_M2M1SHOT_IOC_PROCESS _IOWR('M', 0, struct compat_m2m1shot)
691 #define COMPAT_M2M1SHOT_IOC_CUSTOM \
692 _IOWR('M', 16, struct compat_m2m1shot_custom_data)
694 static long m2m1shot_compat_ioctl32(struct file
*filp
,
695 unsigned int cmd
, unsigned long arg
)
697 struct m2m1shot_context
*ctx
= filp
->private_data
;
698 struct m2m1shot_device
*m21dev
= ctx
->m21dev
;
701 case COMPAT_M2M1SHOT_IOC_PROCESS
:
703 struct compat_m2m1shot data
;
704 struct m2m1shot_task task
;
707 memset(&task
, 0, sizeof(task
));
709 if (copy_from_user(&data
, compat_ptr(arg
), sizeof(data
))) {
711 "%s: Failed to read userdata\n", __func__
);
715 if ((data
.buf_out
.num_planes
> M2M1SHOT_MAX_PLANES
) ||
716 (data
.buf_cap
.num_planes
> M2M1SHOT_MAX_PLANES
)) {
718 "%s: Invalid plane number (out %u/cap %u)\n",
719 __func__
, data
.buf_out
.num_planes
,
720 data
.buf_cap
.num_planes
);
724 task
.task
.fmt_out
.fmt
= data
.fmt_out
.fmt
;
725 task
.task
.fmt_out
.width
= data
.fmt_out
.width
;
726 task
.task
.fmt_out
.height
= data
.fmt_out
.height
;
727 task
.task
.fmt_out
.crop
.left
= data
.fmt_out
.crop
.left
;
728 task
.task
.fmt_out
.crop
.top
= data
.fmt_out
.crop
.top
;
729 task
.task
.fmt_out
.crop
.width
= data
.fmt_out
.crop
.width
;
730 task
.task
.fmt_out
.crop
.height
= data
.fmt_out
.crop
.height
;
731 task
.task
.fmt_cap
.fmt
= data
.fmt_cap
.fmt
;
732 task
.task
.fmt_cap
.width
= data
.fmt_cap
.width
;
733 task
.task
.fmt_cap
.height
= data
.fmt_cap
.height
;
734 task
.task
.fmt_cap
.crop
.left
= data
.fmt_cap
.crop
.left
;
735 task
.task
.fmt_cap
.crop
.top
= data
.fmt_cap
.crop
.top
;
736 task
.task
.fmt_cap
.crop
.width
= data
.fmt_cap
.crop
.width
;
737 task
.task
.fmt_cap
.crop
.height
= data
.fmt_cap
.crop
.height
;
738 for (i
= 0; i
< data
.buf_out
.num_planes
; i
++) {
739 task
.task
.buf_out
.plane
[i
].len
=
740 data
.buf_out
.plane
[i
].len
;
741 if (data
.buf_out
.type
== M2M1SHOT_BUFFER_DMABUF
)
742 task
.task
.buf_out
.plane
[i
].fd
=
743 data
.buf_out
.plane
[i
].fd
;
744 else /* data.buf_out.type == M2M1SHOT_BUFFER_USERPTR */
745 task
.task
.buf_out
.plane
[i
].userptr
=
746 data
.buf_out
.plane
[i
].userptr
;
748 task
.task
.buf_out
.type
= data
.buf_out
.type
;
749 task
.task
.buf_out
.num_planes
= data
.buf_out
.num_planes
;
750 for (i
= 0; i
< data
.buf_cap
.num_planes
; i
++) {
751 task
.task
.buf_cap
.plane
[i
].len
=
752 data
.buf_cap
.plane
[i
].len
;
753 if (data
.buf_cap
.type
== M2M1SHOT_BUFFER_DMABUF
)
754 task
.task
.buf_cap
.plane
[i
].fd
=
755 data
.buf_cap
.plane
[i
].fd
;
756 else /* data.buf_cap.type == M2M1SHOT_BUFFER_USERPTR */
757 task
.task
.buf_cap
.plane
[i
].userptr
=
758 data
.buf_cap
.plane
[i
].userptr
;
760 task
.task
.buf_cap
.type
= data
.buf_cap
.type
;
761 task
.task
.buf_cap
.num_planes
= data
.buf_cap
.num_planes
;
762 task
.task
.op
.quality_level
= data
.op
.quality_level
;
763 task
.task
.op
.rotate
= data
.op
.rotate
;
764 task
.task
.op
.op
= data
.op
.op
;
765 task
.task
.reserved
[0] = data
.reserved
[0];
766 task
.task
.reserved
[1] = data
.reserved
[1];
769 * m2m1shot_process() does not wake up
770 * until the given task finishes
772 ret
= m2m1shot_process(ctx
, &task
);
775 "%s: Failed to process m2m1shot task\n",
780 data
.fmt_out
.fmt
= task
.task
.fmt_out
.fmt
;
781 data
.fmt_out
.width
= task
.task
.fmt_out
.width
;
782 data
.fmt_out
.height
= task
.task
.fmt_out
.height
;
783 data
.fmt_out
.crop
.left
= task
.task
.fmt_out
.crop
.left
;
784 data
.fmt_out
.crop
.top
= task
.task
.fmt_out
.crop
.top
;
785 data
.fmt_out
.crop
.width
= task
.task
.fmt_out
.crop
.width
;
786 data
.fmt_out
.crop
.height
= task
.task
.fmt_out
.crop
.height
;
787 data
.fmt_cap
.fmt
= task
.task
.fmt_cap
.fmt
;
788 data
.fmt_cap
.width
= task
.task
.fmt_cap
.width
;
789 data
.fmt_cap
.height
= task
.task
.fmt_cap
.height
;
790 data
.fmt_cap
.crop
.left
= task
.task
.fmt_cap
.crop
.left
;
791 data
.fmt_cap
.crop
.top
= task
.task
.fmt_cap
.crop
.top
;
792 data
.fmt_cap
.crop
.width
= task
.task
.fmt_cap
.crop
.width
;
793 data
.fmt_cap
.crop
.height
= task
.task
.fmt_cap
.crop
.height
;
794 for (i
= 0; i
< task
.task
.buf_out
.num_planes
; i
++) {
795 data
.buf_out
.plane
[i
].len
=
796 task
.task
.buf_out
.plane
[i
].len
;
797 if (task
.task
.buf_out
.type
== M2M1SHOT_BUFFER_DMABUF
)
798 data
.buf_out
.plane
[i
].fd
=
799 task
.task
.buf_out
.plane
[i
].fd
;
800 else /* buf_out.type == M2M1SHOT_BUFFER_USERPTR */
801 data
.buf_out
.plane
[i
].userptr
=
802 task
.task
.buf_out
.plane
[i
].userptr
;
804 data
.buf_out
.type
= task
.task
.buf_out
.type
;
805 data
.buf_out
.num_planes
= task
.task
.buf_out
.num_planes
;
806 for (i
= 0; i
< task
.task
.buf_cap
.num_planes
; i
++) {
807 data
.buf_cap
.plane
[i
].len
=
808 task
.task
.buf_cap
.plane
[i
].len
;
809 if (task
.task
.buf_cap
.type
== M2M1SHOT_BUFFER_DMABUF
)
810 data
.buf_cap
.plane
[i
].fd
=
811 task
.task
.buf_cap
.plane
[i
].fd
;
812 else /* buf_cap.type == M2M1SHOT_BUFFER_USERPTR */
813 data
.buf_cap
.plane
[i
].userptr
=
814 task
.task
.buf_cap
.plane
[i
].userptr
;
816 data
.buf_cap
.type
= task
.task
.buf_cap
.type
;
817 data
.buf_cap
.num_planes
= task
.task
.buf_cap
.num_planes
;
818 data
.op
.quality_level
= task
.task
.op
.quality_level
;
819 data
.op
.rotate
= task
.task
.op
.rotate
;
820 data
.op
.op
= task
.task
.op
.op
;
821 data
.reserved
[0] = task
.task
.reserved
[0];
822 data
.reserved
[1] = task
.task
.reserved
[1];
824 if (copy_to_user(compat_ptr(arg
), &data
, sizeof(data
))) {
826 "%s: Failed to copy into userdata\n", __func__
);
832 case COMPAT_M2M1SHOT_IOC_CUSTOM
:
834 struct compat_m2m1shot_custom_data data
;
836 if (!m21dev
->ops
->custom_ioctl
) {
838 "%s: custom_ioctl not defined\n", __func__
);
842 if (copy_from_user(&data
, compat_ptr(arg
), sizeof(data
))) {
844 "%s: Failed to read custom data\n", __func__
);
848 return m21dev
->ops
->custom_ioctl(ctx
, data
.cmd
, data
.arg
);
851 dev_err(m21dev
->dev
, "%s: Unknown ioctl cmd %x\n",
860 static const struct file_operations m2m1shot_fops
= {
861 .owner
= THIS_MODULE
,
862 .open
= m2m1shot_open
,
863 .release
= m2m1shot_release
,
864 .unlocked_ioctl
= m2m1shot_ioctl
,
866 .compat_ioctl
= m2m1shot_compat_ioctl32
,
870 struct m2m1shot_device
*m2m1shot_create_device(struct device
*dev
,
871 const struct m2m1shot_devops
*ops
,
872 const char *suffix
, int id
,
873 unsigned long timeout_jiffies
)
875 struct m2m1shot_device
*m21dev
;
880 /* TODO: ops callback check */
881 if (!ops
|| !ops
->prepare_format
|| !ops
->prepare_buffer
) {
882 dev_err(dev
, "%s: m2m1shot_devops is not provided\n", __func__
);
883 return ERR_PTR(-EINVAL
);
887 dev_err(dev
, "%s: suffix of node name is not specified\n",
889 return ERR_PTR(-EINVAL
);
892 name_size
= M2M1SHOT_DEVNODE_PREFIX_LEN
+ strlen(suffix
) + 1;
895 name_size
+= 3; /* instance number: maximul 3 digits */
897 name
= kmalloc(name_size
, GFP_KERNEL
);
899 return ERR_PTR(-ENOMEM
);
902 scnprintf(name
, name_size
,
903 M2M1SHOT_DEVNODE_PREFIX
"%s", suffix
);
905 scnprintf(name
, name_size
,
906 M2M1SHOT_DEVNODE_PREFIX
"%s%d", suffix
, id
);
908 m21dev
= kzalloc(sizeof(*m21dev
), GFP_KERNEL
);
912 m21dev
->misc
.minor
= MISC_DYNAMIC_MINOR
;
913 m21dev
->misc
.name
= name
;
914 m21dev
->misc
.fops
= &m2m1shot_fops
;
915 ret
= misc_register(&m21dev
->misc
);
919 INIT_LIST_HEAD(&m21dev
->tasks
);
920 INIT_LIST_HEAD(&m21dev
->contexts
);
922 spin_lock_init(&m21dev
->lock_task
);
923 spin_lock_init(&m21dev
->lock_ctx
);
927 m21dev
->timeout_jiffies
= timeout_jiffies
;
938 EXPORT_SYMBOL(m2m1shot_create_device
);
940 void m2m1shot_destroy_device(struct m2m1shot_device
*m21dev
)
942 misc_deregister(&m21dev
->misc
);
943 kfree(m21dev
->misc
.name
);
946 EXPORT_SYMBOL(m2m1shot_destroy_device
);