drivers: tty: samsung: fix misleading intendation
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / media / m2m1shot.c
1 /*
2 * drivers/media/m2m1shot.c
3 *
4 * Copyright (C) 2014 Samsung Electronics Co., Ltd.
5 *
6 * Contact: Cho KyongHo <pullip.cho@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/device.h>
28 #include <linux/dma-buf.h>
29 #include <linux/vmalloc.h>
30 #include <linux/sched.h>
31
32 #include <media/m2m1shot.h>
33
34 #define M2M1SHOT_DEVNODE_PREFIX "m2m1shot_"
35 #define M2M1SHOT_DEVNODE_PREFIX_LEN 9
36
37 static void m2m1shot_task_schedule(struct m2m1shot_device *m21dev)
38 {
39 struct m2m1shot_task *task;
40 unsigned long flags;
41
42 next_task:
43 spin_lock_irqsave(&m21dev->lock_task, flags);
44 if (list_empty(&m21dev->tasks)) {
45 /* No task to run */
46 spin_unlock_irqrestore(&m21dev->lock_task, flags);
47 return;
48 }
49
50 if (m21dev->current_task) {
51 /* H/W is working */
52 spin_unlock_irqrestore(&m21dev->lock_task, flags);
53 return;
54 }
55
56 task = list_first_entry(&m21dev->tasks,
57 struct m2m1shot_task, task_node);
58 list_del(&task->task_node);
59
60 m21dev->current_task = task;
61
62 spin_unlock_irqrestore(&m21dev->lock_task, flags);
63
64 task->state = M2M1SHOT_BUFSTATE_PROCESSING;
65
66 if (m21dev->ops->device_run(task->ctx, task)) {
67 task->state = M2M1SHOT_BUFSTATE_ERROR;
68
69 spin_lock_irqsave(&m21dev->lock_task, flags);
70 m21dev->current_task = NULL;
71 spin_unlock_irqrestore(&m21dev->lock_task, flags);
72
73 complete(&task->complete);
74
75 goto next_task;
76 }
77 }
78
79 void m2m1shot_task_finish(struct m2m1shot_device *m21dev,
80 struct m2m1shot_task *task, bool success)
81 {
82 unsigned long flags;
83
84 spin_lock_irqsave(&m21dev->lock_task, flags);
85 BUG_ON(!m21dev->current_task);
86 BUG_ON(m21dev->current_task != task);
87 m21dev->current_task = NULL;
88 spin_unlock_irqrestore(&m21dev->lock_task, flags);
89
90 task->state = success ?
91 M2M1SHOT_BUFSTATE_DONE : M2M1SHOT_BUFSTATE_ERROR;
92
93 complete(&task->complete);
94
95 m2m1shot_task_schedule(m21dev);
96 }
97 EXPORT_SYMBOL(m2m1shot_task_finish);
98
99 void m2m1shot_task_cancel(struct m2m1shot_device *m21dev,
100 struct m2m1shot_task *task,
101 enum m2m1shot_state reason)
102 {
103 unsigned long flags;
104
105 spin_lock_irqsave(&m21dev->lock_task, flags);
106 m21dev->current_task = NULL;
107 spin_unlock_irqrestore(&m21dev->lock_task, flags);
108
109 task->state = reason;
110
111 complete(&task->complete);
112
113 m2m1shot_task_schedule(m21dev);
114 }
115 EXPORT_SYMBOL(m2m1shot_task_cancel);
116
117 static void m2m1shot_buffer_put_dma_buf_plane(
118 struct m2m1shot_buffer_plane_dma *plane)
119 {
120 dma_buf_detach(plane->dmabuf, plane->attachment);
121 dma_buf_put(plane->dmabuf);
122 plane->dmabuf = NULL;
123 }
124
125 static void m2m1shot_buffer_put_dma_buf(struct m2m1shot_buffer *buffer,
126 struct m2m1shot_buffer_dma *dma_buffer)
127 {
128 int i;
129
130 for (i = 0; i < buffer->num_planes; i++)
131 m2m1shot_buffer_put_dma_buf_plane(&dma_buffer->plane[i]);
132 }
133
134 static int m2m1shot_buffer_get_dma_buf(struct m2m1shot_device *m21dev,
135 struct m2m1shot_buffer *buffer,
136 struct m2m1shot_buffer_dma *dma_buffer)
137 {
138 struct m2m1shot_buffer_plane_dma *plane;
139 int i, ret;
140
141 for (i = 0; i < buffer->num_planes; i++) {
142 plane = &dma_buffer->plane[i];
143
144 plane->dmabuf = dma_buf_get(buffer->plane[i].fd);
145 if (IS_ERR(plane->dmabuf)) {
146 dev_err(m21dev->dev,
147 "%s: failed to get dmabuf of fd %d\n",
148 __func__, buffer->plane[i].fd);
149 ret = PTR_ERR(plane->dmabuf);
150 goto err;
151 }
152
153 if (plane->dmabuf->size < plane->bytes_used) {
154 dev_err(m21dev->dev,
155 "%s: needs %zx bytes but dmabuf is %zx\n",
156 __func__, plane->bytes_used,
157 plane->dmabuf->size);
158 ret = -EINVAL;
159 goto err;
160 }
161
162 plane->attachment = dma_buf_attach(plane->dmabuf, m21dev->dev);
163 if (IS_ERR(plane->attachment)) {
164 dev_err(m21dev->dev,
165 "%s: Failed to attach dmabuf\n", __func__);
166 ret = PTR_ERR(plane->attachment);
167 goto err;
168 }
169 }
170
171 return 0;
172 err:
173 if (!IS_ERR(plane->dmabuf)) /* release dmabuf of the last iteration */
174 dma_buf_put(plane->dmabuf);
175
176 while (i-- > 0)
177 m2m1shot_buffer_put_dma_buf_plane(&dma_buffer->plane[i]);
178
179 return ret;
180 }
181
182 static struct dma_buf *m2m1shot_buffer_check_userptr(
183 struct m2m1shot_device *m21dev, unsigned long start, size_t len,
184 off_t *out_offset)
185 {
186 struct dma_buf *dmabuf = NULL;
187 struct vm_area_struct *vma;
188
189 down_read(&current->mm->mmap_sem);
190 vma = find_vma(current->mm, start);
191 if (!vma || (start < vma->vm_start)) {
192 dev_err(m21dev->dev, "%s: Incorrect user buffer @ %#lx/%#zx\n",
193 __func__, start, len);
194 dmabuf = ERR_PTR(-EINVAL);
195 goto finish;
196 }
197
198 if (!vma->vm_file)
199 goto finish;
200
201 dmabuf = get_dma_buf_file(vma->vm_file);
202 if (dmabuf != NULL)
203 *out_offset = start - vma->vm_start;
204 finish:
205 up_read(&current->mm->mmap_sem);
206 return dmabuf;
207 }
208
209 static int m2m1shot_buffer_get_userptr(struct m2m1shot_device *m21dev,
210 struct m2m1shot_buffer *buffer,
211 struct m2m1shot_buffer_dma *dma_buffer,
212 int write)
213 {
214 int i, ret = 0;
215 struct dma_buf *dmabuf;
216 off_t offset;
217
218 for (i = 0; i < buffer->num_planes; i++) {
219 dmabuf = m2m1shot_buffer_check_userptr(m21dev,
220 buffer->plane[i].userptr, buffer->plane[i].len,
221 &offset);
222 if (IS_ERR(dmabuf)) {
223 ret = PTR_ERR(dmabuf);
224 goto err;
225 } else if (dmabuf) {
226 if (dmabuf->size < dma_buffer->plane[i].bytes_used) {
227 dev_err(m21dev->dev,
228 "%s: needs %zu bytes but dmabuf is %zu\n",
229 __func__,
230 dma_buffer->plane[i].bytes_used,
231 dmabuf->size);
232 ret = -EINVAL;
233 goto err;
234 }
235
236 dma_buffer->plane[i].dmabuf = dmabuf;
237 dma_buffer->plane[i].attachment = dma_buf_attach(
238 dmabuf, m21dev->dev);
239 dma_buffer->plane[i].offset = offset;
240 if (IS_ERR(dma_buffer->plane[i].attachment)) {
241 dev_err(m21dev->dev,
242 "%s: Failed to attach dmabuf\n",
243 __func__);
244 ret = PTR_ERR(dma_buffer->plane[i].attachment);
245 dma_buf_put(dmabuf);
246 goto err;
247 }
248 }
249 }
250
251 return 0;
252 err:
253 while (i-- > 0)
254 m2m1shot_buffer_put_dma_buf_plane(&dma_buffer->plane[i]);
255
256 return ret;
257 }
258
259 static void m2m1shot_buffer_put_userptr(struct m2m1shot_buffer *buffer,
260 struct m2m1shot_buffer_dma *dma_buffer,
261 int write)
262 {
263 int i;
264
265 for (i = 0; i < buffer->num_planes; i++)
266 if (dma_buffer->plane[i].dmabuf)
267 m2m1shot_buffer_put_dma_buf_plane(
268 &dma_buffer->plane[i]);
269 }
270
271 static int m2m1shot_prepare_get_buffer(struct m2m1shot_context *ctx,
272 struct m2m1shot_buffer *buffer,
273 struct m2m1shot_buffer_dma *dma_buffer,
274 enum dma_data_direction dir)
275 {
276 struct m2m1shot_device *m21dev = ctx->m21dev;
277 int i, ret;
278
279 for (i = 0; i < buffer->num_planes; i++) {
280 struct m2m1shot_buffer_plane_dma *plane;
281
282 plane = &dma_buffer->plane[i];
283
284 if (plane->bytes_used == 0) {
285 /*
286 * bytes_used = 0 means that the size of the plane is
287 * not able to be decided by the driver because it is
288 * dependent upon the content in the buffer.
289 * The best example of the buffer is the buffer of JPEG
290 * encoded stream for decompression.
291 */
292 plane->bytes_used = buffer->plane[i].len;
293 } else if (buffer->plane[i].len < plane->bytes_used) {
294 dev_err(m21dev->dev,
295 "%s: needs %zx bytes but %zx is given\n",
296 __func__, plane->bytes_used,
297 buffer->plane[i].len);
298 return -EINVAL;
299 }
300 }
301
302 if ((buffer->type != M2M1SHOT_BUFFER_USERPTR) &&
303 (buffer->type != M2M1SHOT_BUFFER_DMABUF)) {
304 dev_err(m21dev->dev, "%s: unknown buffer type %u\n",
305 __func__, buffer->type);
306 return -EINVAL;
307 }
308
309 if (buffer->type == M2M1SHOT_BUFFER_DMABUF)
310 ret = m2m1shot_buffer_get_dma_buf(m21dev, buffer, dma_buffer);
311 else
312 ret = m2m1shot_buffer_get_userptr(m21dev, buffer, dma_buffer,
313 (dir == DMA_TO_DEVICE) ? 0 : 1);
314
315 if (ret)
316 return ret;
317
318 dma_buffer->buffer = buffer;
319
320 for (i = 0; i < buffer->num_planes; i++) {
321 /* the callback function should fill 'dma_addr' field */
322 ret = m21dev->ops->prepare_buffer(ctx, dma_buffer, i, dir);
323 if (ret)
324 goto err;
325 }
326
327 return 0;
328 err:
329 dev_err(m21dev->dev, "%s: Failed to prepare plane %u", __func__, i);
330
331 while (i-- > 0)
332 m21dev->ops->finish_buffer(ctx, dma_buffer, i, dir);
333
334 if (buffer->type == M2M1SHOT_BUFFER_DMABUF)
335 m2m1shot_buffer_put_dma_buf(buffer, dma_buffer);
336 else
337 m2m1shot_buffer_put_userptr(buffer, dma_buffer,
338 (dir == DMA_TO_DEVICE) ? 0 : 1);
339
340 return ret;
341 }
342
343 static void m2m1shot_finish_buffer(struct m2m1shot_device *m21dev,
344 struct m2m1shot_context *ctx,
345 struct m2m1shot_buffer *buffer,
346 struct m2m1shot_buffer_dma *dma_buffer,
347 enum dma_data_direction dir)
348 {
349 int i;
350
351 for (i = 0; i < buffer->num_planes; i++)
352 m21dev->ops->finish_buffer(ctx, dma_buffer, i, dir);
353
354 if (buffer->type == M2M1SHOT_BUFFER_DMABUF)
355 m2m1shot_buffer_put_dma_buf(buffer, dma_buffer);
356 else
357 m2m1shot_buffer_put_userptr(buffer, dma_buffer,
358 (dir == DMA_TO_DEVICE) ? 0 : 1);
359 }
360
361 static int m2m1shot_prepare_format(struct m2m1shot_device *m21dev,
362 struct m2m1shot_context *ctx,
363 struct m2m1shot_task *task)
364 {
365 int i, ret;
366 size_t out_sizes[M2M1SHOT_MAX_PLANES] = { 0 };
367 size_t cap_sizes[M2M1SHOT_MAX_PLANES] = { 0 };
368
369 if (task->task.buf_out.num_planes > M2M1SHOT_MAX_PLANES) {
370 dev_err(m21dev->dev, "Invalid number of output planes %u.\n",
371 task->task.buf_out.num_planes);
372 return -EINVAL;
373 }
374
375 if (task->task.buf_cap.num_planes > M2M1SHOT_MAX_PLANES) {
376 dev_err(m21dev->dev, "Invalid number of capture planes %u.\n",
377 task->task.buf_cap.num_planes);
378 return -EINVAL;
379 }
380
381 ret = m21dev->ops->prepare_format(ctx, &task->task.fmt_out,
382 DMA_TO_DEVICE, out_sizes);
383 if (ret < 0)
384 return ret;
385
386 if (task->task.buf_out.num_planes != ret) {
387 dev_err(m21dev->dev,
388 "%s: needs %u output planes but %u is given\n",
389 __func__, ret, task->task.buf_out.num_planes);
390 return -EINVAL;
391 }
392
393 for (i = 0; i < task->task.buf_out.num_planes; i++)
394 task->dma_buf_out.plane[i].bytes_used = out_sizes[i];
395
396 ret = m21dev->ops->prepare_format(ctx, &task->task.fmt_cap,
397 DMA_FROM_DEVICE, cap_sizes);
398 if (ret < 0)
399 return ret;
400
401 if (task->task.buf_cap.num_planes < ret) {
402 dev_err(m21dev->dev,
403 "%s: needs %u capture planes but %u is given\n",
404 __func__, ret, task->task.buf_cap.num_planes);
405 return -EINVAL;
406 }
407
408 for (i = 0; i < task->task.buf_cap.num_planes; i++)
409 task->dma_buf_cap.plane[i].bytes_used = cap_sizes[i];
410
411 if (m21dev->ops->prepare_operation) {
412 ret = m21dev->ops->prepare_operation(ctx, task);
413 if (ret)
414 return ret;
415 }
416
417 return 0;
418 }
419
420 static int m2m1shot_prepare_task(struct m2m1shot_device *m21dev,
421 struct m2m1shot_context *ctx,
422 struct m2m1shot_task *task)
423 {
424 int ret;
425
426 ret = m2m1shot_prepare_format(m21dev, ctx, task);
427 if (ret)
428 return ret;
429
430 ret = m2m1shot_prepare_get_buffer(ctx, &task->task.buf_out,
431 &task->dma_buf_out, DMA_TO_DEVICE);
432 if (ret) {
433 dev_err(m21dev->dev, "%s: Failed to get output buffer\n",
434 __func__);
435 return ret;
436 }
437
438 ret = m2m1shot_prepare_get_buffer(ctx, &task->task.buf_cap,
439 &task->dma_buf_cap, DMA_FROM_DEVICE);
440 if (ret) {
441 m2m1shot_finish_buffer(m21dev, ctx,
442 &task->task.buf_out, &task->dma_buf_out, DMA_TO_DEVICE);
443 dev_err(m21dev->dev, "%s: Failed to get capture buffer\n",
444 __func__);
445 return ret;
446 }
447
448 return 0;
449 }
450
451 static void m2m1shot_finish_task(struct m2m1shot_device *m21dev,
452 struct m2m1shot_context *ctx,
453 struct m2m1shot_task *task)
454 {
455 m2m1shot_finish_buffer(m21dev, ctx,
456 &task->task.buf_cap, &task->dma_buf_cap,
457 DMA_FROM_DEVICE);
458 m2m1shot_finish_buffer(m21dev, ctx,
459 &task->task.buf_out, &task->dma_buf_out, DMA_TO_DEVICE);
460 }
461
462 static void m2m1shot_destroy_context(struct kref *kref)
463 {
464 struct m2m1shot_context *ctx = container_of(kref,
465 struct m2m1shot_context, kref);
466
467 ctx->m21dev->ops->free_context(ctx);
468
469 spin_lock(&ctx->m21dev->lock_ctx);
470 list_del(&ctx->node);
471 spin_unlock(&ctx->m21dev->lock_ctx);
472
473 kfree(ctx);
474 }
475
476 static int m2m1shot_process(struct m2m1shot_context *ctx,
477 struct m2m1shot_task *task)
478 {
479 struct m2m1shot_device *m21dev = ctx->m21dev;
480 unsigned long flags;
481 int ret;
482
483 INIT_LIST_HEAD(&task->task_node);
484 init_completion(&task->complete);
485
486 kref_get(&ctx->kref);
487
488 mutex_lock(&ctx->mutex);
489
490 ret = m2m1shot_prepare_task(m21dev, ctx, task);
491 if (ret)
492 goto err;
493
494 task->ctx = ctx;
495 task->state = M2M1SHOT_BUFSTATE_READY;
496
497 spin_lock_irqsave(&m21dev->lock_task, flags);
498 list_add_tail(&task->task_node, &m21dev->tasks);
499 spin_unlock_irqrestore(&m21dev->lock_task, flags);
500
501 m2m1shot_task_schedule(m21dev);
502
503 if (m21dev->timeout_jiffies != -1) {
504 unsigned long elapsed;
505 elapsed = wait_for_completion_timeout(&task->complete,
506 m21dev->timeout_jiffies);
507 if (!elapsed) { /* timed out */
508 m2m1shot_task_cancel(m21dev, task,
509 M2M1SHOT_BUFSTATE_TIMEDOUT);
510
511 m21dev->ops->timeout_task(ctx, task);
512
513 m2m1shot_finish_task(m21dev, ctx, task);
514
515 dev_notice(m21dev->dev, "%s: %u msecs timed out\n",
516 __func__,
517 jiffies_to_msecs(m21dev->timeout_jiffies));
518 ret = -ETIMEDOUT;
519 goto err;
520 }
521 } else {
522 wait_for_completion(&task->complete);
523 }
524
525 BUG_ON(task->state == M2M1SHOT_BUFSTATE_READY);
526
527 m2m1shot_finish_task(m21dev, ctx, task);
528 err:
529
530 mutex_unlock(&ctx->mutex);
531
532 kref_put(&ctx->kref, m2m1shot_destroy_context);
533
534 if (ret)
535 return ret;
536 return (task->state == M2M1SHOT_BUFSTATE_DONE) ? 0 : -EINVAL;
537 }
538
539 static int m2m1shot_open(struct inode *inode, struct file *filp)
540 {
541 struct m2m1shot_device *m21dev = container_of(filp->private_data,
542 struct m2m1shot_device, misc);
543 struct m2m1shot_context *ctx;
544 int ret;
545
546 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
547 if (!ctx)
548 return -ENOMEM;
549
550 INIT_LIST_HEAD(&ctx->node);
551 kref_init(&ctx->kref);
552 mutex_init(&ctx->mutex);
553
554 ctx->m21dev = m21dev;
555
556 spin_lock(&m21dev->lock_ctx);
557 list_add_tail(&ctx->node, &m21dev->contexts);
558 spin_unlock(&m21dev->lock_ctx);
559
560 filp->private_data = ctx;
561
562 ret = m21dev->ops->init_context(ctx);
563 if (ret) /* kref_put() is not called not to call .free_context() */
564 kfree(ctx);
565
566 return ret;
567 }
568
569 static int m2m1shot_release(struct inode *inode, struct file *filp)
570 {
571 struct m2m1shot_context *ctx = filp->private_data;
572
573 kref_put(&ctx->kref, m2m1shot_destroy_context);
574
575 return 0;
576 }
577
578 static long m2m1shot_ioctl(struct file *filp,
579 unsigned int cmd, unsigned long arg)
580 {
581 struct m2m1shot_context *ctx = filp->private_data;
582 struct m2m1shot_device *m21dev = ctx->m21dev;
583
584 switch (cmd) {
585 case M2M1SHOT_IOC_PROCESS:
586 {
587 struct m2m1shot_task data;
588 int ret;
589
590 memset(&data, 0, sizeof(data));
591
592 if (copy_from_user(&data.task,
593 (void __user *)arg, sizeof(data.task))) {
594 dev_err(m21dev->dev,
595 "%s: Failed to read userdata\n", __func__);
596 return -EFAULT;
597 }
598
599 /*
600 * m2m1shot_process() does not wake up
601 * until the given task finishes
602 */
603 ret = m2m1shot_process(ctx, &data);
604
605 if (copy_to_user((void __user *)arg, &data.task,
606 sizeof(data.task))) {
607 dev_err(m21dev->dev,
608 "%s: Failed to read userdata\n", __func__);
609 return -EFAULT;
610 }
611
612 return ret;
613 }
614 case M2M1SHOT_IOC_CUSTOM:
615 {
616 struct m2m1shot_custom_data data;
617
618 if (!m21dev->ops->custom_ioctl) {
619 dev_err(m21dev->dev,
620 "%s: custom_ioctl not defined\n", __func__);
621 return -ENOSYS;
622 }
623
624 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
625 dev_err(m21dev->dev,
626 "%s: Failed to read custom data\n", __func__);
627 return -EFAULT;
628 }
629
630 return m21dev->ops->custom_ioctl(ctx, data.cmd, data.arg);
631 }
632 default:
633 dev_err(m21dev->dev, "%s: Unknown ioctl cmd %x\n",
634 __func__, cmd);
635 return -EINVAL;
636 }
637
638 return 0;
639 }
640
641 #ifdef CONFIG_COMPAT
642 struct compat_m2m1shot_rect {
643 compat_short_t left;
644 compat_short_t top;
645 compat_ushort_t width;
646 compat_ushort_t height;
647 };
648
649 struct compat_m2m1shot_pix_format {
650 compat_uint_t fmt;
651 compat_uint_t width;
652 compat_uint_t height;
653 struct v4l2_rect crop;
654 };
655
656 struct compat_m2m1shot_buffer_plane {
657 union {
658 compat_int_t fd;
659 compat_ulong_t userptr;
660 };
661 compat_size_t len;
662 };
663
664 struct compat_m2m1shot_buffer {
665 struct compat_m2m1shot_buffer_plane plane[M2M1SHOT_MAX_PLANES];
666 __u8 type;
667 __u8 num_planes;
668 };
669
670 struct compat_m2m1shot_operation {
671 compat_short_t quality_level;
672 compat_short_t rotate;
673 compat_uint_t op; /* or-ing M2M1SHOT_FLIP_VIRT/HORI */
674 };
675
676 struct compat_m2m1shot {
677 struct compat_m2m1shot_pix_format fmt_out;
678 struct compat_m2m1shot_pix_format fmt_cap;
679 struct compat_m2m1shot_buffer buf_out;
680 struct compat_m2m1shot_buffer buf_cap;
681 struct compat_m2m1shot_operation op;
682 compat_ulong_t reserved[2];
683 };
684
685 struct compat_m2m1shot_custom_data {
686 compat_uint_t cmd;
687 compat_ulong_t arg;
688 };
689
690 #define COMPAT_M2M1SHOT_IOC_PROCESS _IOWR('M', 0, struct compat_m2m1shot)
691 #define COMPAT_M2M1SHOT_IOC_CUSTOM \
692 _IOWR('M', 16, struct compat_m2m1shot_custom_data)
693
694 static long m2m1shot_compat_ioctl32(struct file *filp,
695 unsigned int cmd, unsigned long arg)
696 {
697 struct m2m1shot_context *ctx = filp->private_data;
698 struct m2m1shot_device *m21dev = ctx->m21dev;
699
700 switch (cmd) {
701 case COMPAT_M2M1SHOT_IOC_PROCESS:
702 {
703 struct compat_m2m1shot data;
704 struct m2m1shot_task task;
705 int i, ret;
706
707 memset(&task, 0, sizeof(task));
708
709 if (copy_from_user(&data, compat_ptr(arg), sizeof(data))) {
710 dev_err(m21dev->dev,
711 "%s: Failed to read userdata\n", __func__);
712 return -EFAULT;
713 }
714
715 if ((data.buf_out.num_planes > M2M1SHOT_MAX_PLANES) ||
716 (data.buf_cap.num_planes > M2M1SHOT_MAX_PLANES)) {
717 dev_err(m21dev->dev,
718 "%s: Invalid plane number (out %u/cap %u)\n",
719 __func__, data.buf_out.num_planes,
720 data.buf_cap.num_planes);
721 return -EINVAL;
722 }
723
724 task.task.fmt_out.fmt = data.fmt_out.fmt;
725 task.task.fmt_out.width = data.fmt_out.width;
726 task.task.fmt_out.height = data.fmt_out.height;
727 task.task.fmt_out.crop.left = data.fmt_out.crop.left;
728 task.task.fmt_out.crop.top = data.fmt_out.crop.top;
729 task.task.fmt_out.crop.width = data.fmt_out.crop.width;
730 task.task.fmt_out.crop.height = data.fmt_out.crop.height;
731 task.task.fmt_cap.fmt = data.fmt_cap.fmt;
732 task.task.fmt_cap.width = data.fmt_cap.width;
733 task.task.fmt_cap.height = data.fmt_cap.height;
734 task.task.fmt_cap.crop.left = data.fmt_cap.crop.left;
735 task.task.fmt_cap.crop.top = data.fmt_cap.crop.top;
736 task.task.fmt_cap.crop.width = data.fmt_cap.crop.width;
737 task.task.fmt_cap.crop.height = data.fmt_cap.crop.height;
738 for (i = 0; i < data.buf_out.num_planes; i++) {
739 task.task.buf_out.plane[i].len =
740 data.buf_out.plane[i].len;
741 if (data.buf_out.type == M2M1SHOT_BUFFER_DMABUF)
742 task.task.buf_out.plane[i].fd =
743 data.buf_out.plane[i].fd;
744 else /* data.buf_out.type == M2M1SHOT_BUFFER_USERPTR */
745 task.task.buf_out.plane[i].userptr =
746 data.buf_out.plane[i].userptr;
747 }
748 task.task.buf_out.type = data.buf_out.type;
749 task.task.buf_out.num_planes = data.buf_out.num_planes;
750 for (i = 0; i < data.buf_cap.num_planes; i++) {
751 task.task.buf_cap.plane[i].len =
752 data.buf_cap.plane[i].len;
753 if (data.buf_cap.type == M2M1SHOT_BUFFER_DMABUF)
754 task.task.buf_cap.plane[i].fd =
755 data.buf_cap.plane[i].fd;
756 else /* data.buf_cap.type == M2M1SHOT_BUFFER_USERPTR */
757 task.task.buf_cap.plane[i].userptr =
758 data.buf_cap.plane[i].userptr;
759 }
760 task.task.buf_cap.type = data.buf_cap.type;
761 task.task.buf_cap.num_planes = data.buf_cap.num_planes;
762 task.task.op.quality_level = data.op.quality_level;
763 task.task.op.rotate = data.op.rotate;
764 task.task.op.op = data.op.op;
765 task.task.reserved[0] = data.reserved[0];
766 task.task.reserved[1] = data.reserved[1];
767
768 /*
769 * m2m1shot_process() does not wake up
770 * until the given task finishes
771 */
772 ret = m2m1shot_process(ctx, &task);
773 if (ret) {
774 dev_err(m21dev->dev,
775 "%s: Failed to process m2m1shot task\n",
776 __func__);
777 return ret;
778 }
779
780 data.fmt_out.fmt = task.task.fmt_out.fmt;
781 data.fmt_out.width = task.task.fmt_out.width;
782 data.fmt_out.height = task.task.fmt_out.height;
783 data.fmt_out.crop.left = task.task.fmt_out.crop.left;
784 data.fmt_out.crop.top = task.task.fmt_out.crop.top;
785 data.fmt_out.crop.width = task.task.fmt_out.crop.width;
786 data.fmt_out.crop.height = task.task.fmt_out.crop.height;
787 data.fmt_cap.fmt = task.task.fmt_cap.fmt;
788 data.fmt_cap.width = task.task.fmt_cap.width;
789 data.fmt_cap.height = task.task.fmt_cap.height;
790 data.fmt_cap.crop.left = task.task.fmt_cap.crop.left;
791 data.fmt_cap.crop.top = task.task.fmt_cap.crop.top;
792 data.fmt_cap.crop.width = task.task.fmt_cap.crop.width;
793 data.fmt_cap.crop.height = task.task.fmt_cap.crop.height;
794 for (i = 0; i < task.task.buf_out.num_planes; i++) {
795 data.buf_out.plane[i].len =
796 task.task.buf_out.plane[i].len;
797 if (task.task.buf_out.type == M2M1SHOT_BUFFER_DMABUF)
798 data.buf_out.plane[i].fd =
799 task.task.buf_out.plane[i].fd;
800 else /* buf_out.type == M2M1SHOT_BUFFER_USERPTR */
801 data.buf_out.plane[i].userptr =
802 task.task.buf_out.plane[i].userptr;
803 }
804 data.buf_out.type = task.task.buf_out.type;
805 data.buf_out.num_planes = task.task.buf_out.num_planes;
806 for (i = 0; i < task.task.buf_cap.num_planes; i++) {
807 data.buf_cap.plane[i].len =
808 task.task.buf_cap.plane[i].len;
809 if (task.task.buf_cap.type == M2M1SHOT_BUFFER_DMABUF)
810 data.buf_cap.plane[i].fd =
811 task.task.buf_cap.plane[i].fd;
812 else /* buf_cap.type == M2M1SHOT_BUFFER_USERPTR */
813 data.buf_cap.plane[i].userptr =
814 task.task.buf_cap.plane[i].userptr;
815 }
816 data.buf_cap.type = task.task.buf_cap.type;
817 data.buf_cap.num_planes = task.task.buf_cap.num_planes;
818 data.op.quality_level = task.task.op.quality_level;
819 data.op.rotate = task.task.op.rotate;
820 data.op.op = task.task.op.op;
821 data.reserved[0] = task.task.reserved[0];
822 data.reserved[1] = task.task.reserved[1];
823
824 if (copy_to_user(compat_ptr(arg), &data, sizeof(data))) {
825 dev_err(m21dev->dev,
826 "%s: Failed to copy into userdata\n", __func__);
827 return -EFAULT;
828 }
829
830 return 0;
831 }
832 case COMPAT_M2M1SHOT_IOC_CUSTOM:
833 {
834 struct compat_m2m1shot_custom_data data;
835
836 if (!m21dev->ops->custom_ioctl) {
837 dev_err(m21dev->dev,
838 "%s: custom_ioctl not defined\n", __func__);
839 return -ENOSYS;
840 }
841
842 if (copy_from_user(&data, compat_ptr(arg), sizeof(data))) {
843 dev_err(m21dev->dev,
844 "%s: Failed to read custom data\n", __func__);
845 return -EFAULT;
846 }
847
848 return m21dev->ops->custom_ioctl(ctx, data.cmd, data.arg);
849 }
850 default:
851 dev_err(m21dev->dev, "%s: Unknown ioctl cmd %x\n",
852 __func__, cmd);
853 return -EINVAL;
854 }
855
856 return 0;
857 }
858 #endif
859
860 static const struct file_operations m2m1shot_fops = {
861 .owner = THIS_MODULE,
862 .open = m2m1shot_open,
863 .release = m2m1shot_release,
864 .unlocked_ioctl = m2m1shot_ioctl,
865 #ifdef CONFIG_COMPAT
866 .compat_ioctl = m2m1shot_compat_ioctl32,
867 #endif
868 };
869
870 struct m2m1shot_device *m2m1shot_create_device(struct device *dev,
871 const struct m2m1shot_devops *ops,
872 const char *suffix, int id,
873 unsigned long timeout_jiffies)
874 {
875 struct m2m1shot_device *m21dev;
876 char *name;
877 size_t name_size;
878 int ret = -ENOMEM;
879
880 /* TODO: ops callback check */
881 if (!ops || !ops->prepare_format || !ops->prepare_buffer) {
882 dev_err(dev, "%s: m2m1shot_devops is not provided\n", __func__);
883 return ERR_PTR(-EINVAL);
884 }
885
886 if (!suffix) {
887 dev_err(dev, "%s: suffix of node name is not specified\n",
888 __func__);
889 return ERR_PTR(-EINVAL);
890 }
891
892 name_size = M2M1SHOT_DEVNODE_PREFIX_LEN + strlen(suffix) + 1;
893
894 if (id >= 0)
895 name_size += 3; /* instance number: maximul 3 digits */
896
897 name = kmalloc(name_size, GFP_KERNEL);
898 if (!name)
899 return ERR_PTR(-ENOMEM);
900
901 if (id < 0)
902 scnprintf(name, name_size,
903 M2M1SHOT_DEVNODE_PREFIX "%s", suffix);
904 else
905 scnprintf(name, name_size,
906 M2M1SHOT_DEVNODE_PREFIX "%s%d", suffix, id);
907
908 m21dev = kzalloc(sizeof(*m21dev), GFP_KERNEL);
909 if (!m21dev)
910 goto err_m21dev;
911
912 m21dev->misc.minor = MISC_DYNAMIC_MINOR;
913 m21dev->misc.name = name;
914 m21dev->misc.fops = &m2m1shot_fops;
915 ret = misc_register(&m21dev->misc);
916 if (ret)
917 goto err_misc;
918
919 INIT_LIST_HEAD(&m21dev->tasks);
920 INIT_LIST_HEAD(&m21dev->contexts);
921
922 spin_lock_init(&m21dev->lock_task);
923 spin_lock_init(&m21dev->lock_ctx);
924
925 m21dev->dev = dev;
926 m21dev->ops = ops;
927 m21dev->timeout_jiffies = timeout_jiffies;
928
929 return m21dev;
930
931 err_m21dev:
932 kfree(name);
933 err_misc:
934 kfree(m21dev);
935
936 return ERR_PTR(ret);
937 }
938 EXPORT_SYMBOL(m2m1shot_create_device);
939
940 void m2m1shot_destroy_device(struct m2m1shot_device *m21dev)
941 {
942 misc_deregister(&m21dev->misc);
943 kfree(m21dev->misc.name);
944 kfree(m21dev);
945 }
946 EXPORT_SYMBOL(m2m1shot_destroy_device);