2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/types.h>
18 #include <linux/clk.h>
19 #include <linux/pm_runtime.h>
20 #include <plat/map-base.h>
23 #include <drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_gem.h"
26 #include "exynos_drm_ipp.h"
27 #include "exynos_drm_iommu.h"
30 * IPP is stand for Image Post Processing and
31 * supports image scaler/rotator and input/output DMA operations.
32 * using FIMC, GSC, Rotator, so on.
33 * IPP is integration device driver of same attribute h/w
38 * 1. expand command control id.
39 * 2. integrate property and config.
40 * 3. removed send_event id check routine.
41 * 4. compare send_event id if needed.
42 * 5. free subdrv_remove notifier callback list if needed.
43 * 6. need to check subdrv_open about multi-open.
44 * 7. need to power_on implement power and sysmmu ctrl.
47 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
51 * A structure of event.
53 * @base: base of event.
56 struct drm_exynos_ipp_send_event
{
57 struct drm_pending_event base
;
58 struct drm_exynos_ipp_event event
;
62 * A structure of memory node.
64 * @list: list head to memory queue information.
65 * @ops_id: id of operations.
66 * @prop_id: id of property.
67 * @buf_id: id of buffer.
68 * @buf_info: gem objects and dma address, size.
69 * @filp: a pointer to drm_file.
71 struct drm_exynos_ipp_mem_node
{
72 struct list_head list
;
73 enum drm_exynos_ops_id ops_id
;
76 struct drm_exynos_ipp_buf_info buf_info
;
77 struct drm_file
*filp
;
81 * A structure of ipp context.
83 * @subdrv: prepare initialization using subdrv.
84 * @ipp_lock: lock for synchronization of access to ipp_idr.
85 * @prop_lock: lock for synchronization of access to prop_idr.
86 * @ipp_idr: ipp driver idr.
87 * @prop_idr: property idr.
88 * @event_workq: event work queue.
89 * @cmd_workq: command work queue.
92 struct exynos_drm_subdrv subdrv
;
93 struct mutex ipp_lock
;
94 struct mutex prop_lock
;
97 struct workqueue_struct
*event_workq
;
98 struct workqueue_struct
*cmd_workq
;
101 static LIST_HEAD(exynos_drm_ippdrv_list
);
102 static DEFINE_MUTEX(exynos_drm_ippdrv_lock
);
103 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list
);
105 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv
*ippdrv
)
107 DRM_DEBUG_KMS("%s\n", __func__
);
112 mutex_lock(&exynos_drm_ippdrv_lock
);
113 list_add_tail(&ippdrv
->drv_list
, &exynos_drm_ippdrv_list
);
114 mutex_unlock(&exynos_drm_ippdrv_lock
);
119 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv
*ippdrv
)
121 DRM_DEBUG_KMS("%s\n", __func__
);
126 mutex_lock(&exynos_drm_ippdrv_lock
);
127 list_del(&ippdrv
->drv_list
);
128 mutex_unlock(&exynos_drm_ippdrv_lock
);
133 static int ipp_create_id(struct idr
*id_idr
, struct mutex
*lock
, void *obj
,
138 DRM_DEBUG_KMS("%s\n", __func__
);
141 /* ensure there is space available to allocate a handle */
142 if (idr_pre_get(id_idr
, GFP_KERNEL
) == 0) {
143 DRM_ERROR("failed to get idr.\n");
147 /* do the allocation under our mutexlock */
149 ret
= idr_get_new_above(id_idr
, obj
, 1, (int *)idp
);
157 static void *ipp_find_obj(struct idr
*id_idr
, struct mutex
*lock
, u32 id
)
161 DRM_DEBUG_KMS("%s:id[%d]\n", __func__
, id
);
165 /* find object using handle */
166 obj
= idr_find(id_idr
, id
);
168 DRM_ERROR("failed to find object.\n");
170 return ERR_PTR(-ENODEV
);
178 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv
*ippdrv
,
179 enum drm_exynos_ipp_cmd cmd
)
182 * check dedicated flag and WB, OUTPUT operation with
185 if (ippdrv
->dedicated
|| (!ipp_is_m2m_cmd(cmd
) &&
186 !pm_runtime_suspended(ippdrv
->dev
)))
192 static struct exynos_drm_ippdrv
*ipp_find_driver(struct ipp_context
*ctx
,
193 struct drm_exynos_ipp_property
*property
)
195 struct exynos_drm_ippdrv
*ippdrv
;
196 u32 ipp_id
= property
->ipp_id
;
198 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__
, ipp_id
);
201 /* find ipp driver using idr */
202 ippdrv
= ipp_find_obj(&ctx
->ipp_idr
, &ctx
->ipp_lock
,
204 if (IS_ERR_OR_NULL(ippdrv
)) {
205 DRM_ERROR("not found ipp%d driver.\n", ipp_id
);
210 * WB, OUTPUT opertion not supported multi-operation.
211 * so, make dedicated state at set property ioctl.
212 * when ipp driver finished operations, clear dedicated flags.
214 if (ipp_check_dedicated(ippdrv
, property
->cmd
)) {
215 DRM_ERROR("already used choose device.\n");
216 return ERR_PTR(-EBUSY
);
220 * This is necessary to find correct device in ipp drivers.
221 * ipp drivers have different abilities,
222 * so need to check property.
224 if (ippdrv
->check_property
&&
225 ippdrv
->check_property(ippdrv
->dev
, property
)) {
226 DRM_ERROR("not support property.\n");
227 return ERR_PTR(-EINVAL
);
233 * This case is search all ipp driver for finding.
234 * user application don't set ipp_id in this case,
235 * so ipp subsystem search correct driver in driver list.
237 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
238 if (ipp_check_dedicated(ippdrv
, property
->cmd
)) {
239 DRM_DEBUG_KMS("%s:used device.\n", __func__
);
243 if (ippdrv
->check_property
&&
244 ippdrv
->check_property(ippdrv
->dev
, property
)) {
245 DRM_DEBUG_KMS("%s:not support property.\n",
253 DRM_ERROR("not support ipp driver operations.\n");
256 return ERR_PTR(-ENODEV
);
259 static struct exynos_drm_ippdrv
*ipp_find_drv_by_handle(u32 prop_id
)
261 struct exynos_drm_ippdrv
*ippdrv
;
262 struct drm_exynos_ipp_cmd_node
*c_node
;
265 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, prop_id
);
267 if (list_empty(&exynos_drm_ippdrv_list
)) {
268 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__
);
269 return ERR_PTR(-ENODEV
);
273 * This case is search ipp driver by prop_id handle.
274 * sometimes, ipp subsystem find driver by prop_id.
275 * e.g PAUSE state, queue buf, command contro.
277 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
278 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__
,
279 count
++, (int)ippdrv
);
281 if (!list_empty(&ippdrv
->cmd_list
)) {
282 list_for_each_entry(c_node
, &ippdrv
->cmd_list
, list
)
283 if (c_node
->property
.prop_id
== prop_id
)
288 return ERR_PTR(-ENODEV
);
291 int exynos_drm_ipp_get_property(struct drm_device
*drm_dev
, void *data
,
292 struct drm_file
*file
)
294 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
295 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
296 struct device
*dev
= priv
->dev
;
297 struct ipp_context
*ctx
= get_ipp_context(dev
);
298 struct drm_exynos_ipp_prop_list
*prop_list
= data
;
299 struct exynos_drm_ippdrv
*ippdrv
;
302 DRM_DEBUG_KMS("%s\n", __func__
);
305 DRM_ERROR("invalid context.\n");
310 DRM_ERROR("invalid property parameter.\n");
314 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__
, prop_list
->ipp_id
);
316 if (!prop_list
->ipp_id
) {
317 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
)
320 * Supports ippdrv list count for user application.
321 * First step user application getting ippdrv count.
322 * and second step getting ippdrv capability using ipp_id.
324 prop_list
->count
= count
;
327 * Getting ippdrv capability by ipp_id.
328 * some deivce not supported wb, output interface.
329 * so, user application detect correct ipp driver
332 ippdrv
= ipp_find_obj(&ctx
->ipp_idr
, &ctx
->ipp_lock
,
335 DRM_ERROR("not found ipp%d driver.\n",
340 prop_list
= ippdrv
->prop_list
;
346 static void ipp_print_property(struct drm_exynos_ipp_property
*property
,
349 struct drm_exynos_ipp_config
*config
= &property
->config
[idx
];
350 struct drm_exynos_pos
*pos
= &config
->pos
;
351 struct drm_exynos_sz
*sz
= &config
->sz
;
353 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
354 __func__
, property
->prop_id
, idx
? "dst" : "src", config
->fmt
);
356 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
357 __func__
, pos
->x
, pos
->y
, pos
->w
, pos
->h
,
358 sz
->hsize
, sz
->vsize
, config
->flip
, config
->degree
);
361 static int ipp_find_and_set_property(struct drm_exynos_ipp_property
*property
)
363 struct exynos_drm_ippdrv
*ippdrv
;
364 struct drm_exynos_ipp_cmd_node
*c_node
;
365 u32 prop_id
= property
->prop_id
;
367 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, prop_id
);
369 ippdrv
= ipp_find_drv_by_handle(prop_id
);
370 if (IS_ERR_OR_NULL(ippdrv
)) {
371 DRM_ERROR("failed to get ipp driver.\n");
376 * Find command node using command list in ippdrv.
377 * when we find this command no using prop_id.
378 * return property information set in this command node.
380 list_for_each_entry(c_node
, &ippdrv
->cmd_list
, list
) {
381 if ((c_node
->property
.prop_id
== prop_id
) &&
382 (c_node
->state
== IPP_STATE_STOP
)) {
383 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
384 __func__
, property
->cmd
, (int)ippdrv
);
386 c_node
->property
= *property
;
391 DRM_ERROR("failed to search property.\n");
396 static struct drm_exynos_ipp_cmd_work
*ipp_create_cmd_work(void)
398 struct drm_exynos_ipp_cmd_work
*cmd_work
;
400 DRM_DEBUG_KMS("%s\n", __func__
);
402 cmd_work
= kzalloc(sizeof(*cmd_work
), GFP_KERNEL
);
404 DRM_ERROR("failed to alloc cmd_work.\n");
405 return ERR_PTR(-ENOMEM
);
408 INIT_WORK((struct work_struct
*)cmd_work
, ipp_sched_cmd
);
413 static struct drm_exynos_ipp_event_work
*ipp_create_event_work(void)
415 struct drm_exynos_ipp_event_work
*event_work
;
417 DRM_DEBUG_KMS("%s\n", __func__
);
419 event_work
= kzalloc(sizeof(*event_work
), GFP_KERNEL
);
421 DRM_ERROR("failed to alloc event_work.\n");
422 return ERR_PTR(-ENOMEM
);
425 INIT_WORK((struct work_struct
*)event_work
, ipp_sched_event
);
430 int exynos_drm_ipp_set_property(struct drm_device
*drm_dev
, void *data
,
431 struct drm_file
*file
)
433 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
434 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
435 struct device
*dev
= priv
->dev
;
436 struct ipp_context
*ctx
= get_ipp_context(dev
);
437 struct drm_exynos_ipp_property
*property
= data
;
438 struct exynos_drm_ippdrv
*ippdrv
;
439 struct drm_exynos_ipp_cmd_node
*c_node
;
442 DRM_DEBUG_KMS("%s\n", __func__
);
445 DRM_ERROR("invalid context.\n");
450 DRM_ERROR("invalid property parameter.\n");
455 * This is log print for user application property.
456 * user application set various property.
459 ipp_print_property(property
, i
);
462 * set property ioctl generated new prop_id.
463 * but in this case already asigned prop_id using old set property.
464 * e.g PAUSE state. this case supports find current prop_id and use it
465 * instead of allocation.
467 if (property
->prop_id
) {
468 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, property
->prop_id
);
469 return ipp_find_and_set_property(property
);
472 /* find ipp driver using ipp id */
473 ippdrv
= ipp_find_driver(ctx
, property
);
474 if (IS_ERR_OR_NULL(ippdrv
)) {
475 DRM_ERROR("failed to get ipp driver.\n");
479 /* allocate command node */
480 c_node
= kzalloc(sizeof(*c_node
), GFP_KERNEL
);
482 DRM_ERROR("failed to allocate map node.\n");
486 /* create property id */
487 ret
= ipp_create_id(&ctx
->prop_idr
, &ctx
->prop_lock
, c_node
,
490 DRM_ERROR("failed to create id.\n");
494 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
495 __func__
, property
->prop_id
, property
->cmd
, (int)ippdrv
);
497 /* stored property information and ippdrv in private data */
499 c_node
->property
= *property
;
500 c_node
->state
= IPP_STATE_IDLE
;
502 c_node
->start_work
= ipp_create_cmd_work();
503 if (IS_ERR_OR_NULL(c_node
->start_work
)) {
504 DRM_ERROR("failed to create start work.\n");
508 c_node
->stop_work
= ipp_create_cmd_work();
509 if (IS_ERR_OR_NULL(c_node
->stop_work
)) {
510 DRM_ERROR("failed to create stop work.\n");
514 c_node
->event_work
= ipp_create_event_work();
515 if (IS_ERR_OR_NULL(c_node
->event_work
)) {
516 DRM_ERROR("failed to create event work.\n");
520 mutex_init(&c_node
->cmd_lock
);
521 mutex_init(&c_node
->mem_lock
);
522 mutex_init(&c_node
->event_lock
);
524 init_completion(&c_node
->start_complete
);
525 init_completion(&c_node
->stop_complete
);
528 INIT_LIST_HEAD(&c_node
->mem_list
[i
]);
530 INIT_LIST_HEAD(&c_node
->event_list
);
531 list_splice_init(&priv
->event_list
, &c_node
->event_list
);
532 list_add_tail(&c_node
->list
, &ippdrv
->cmd_list
);
534 /* make dedicated state without m2m */
535 if (!ipp_is_m2m_cmd(property
->cmd
))
536 ippdrv
->dedicated
= true;
541 kfree(c_node
->stop_work
);
543 kfree(c_node
->start_work
);
549 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node
*c_node
)
551 DRM_DEBUG_KMS("%s\n", __func__
);
554 list_del(&c_node
->list
);
557 mutex_destroy(&c_node
->cmd_lock
);
558 mutex_destroy(&c_node
->mem_lock
);
559 mutex_destroy(&c_node
->event_lock
);
561 /* free command node */
562 kfree(c_node
->start_work
);
563 kfree(c_node
->stop_work
);
564 kfree(c_node
->event_work
);
568 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node
*c_node
)
570 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
571 struct drm_exynos_ipp_mem_node
*m_node
;
572 struct list_head
*head
;
573 int ret
, i
, count
[EXYNOS_DRM_OPS_MAX
] = { 0, };
575 DRM_DEBUG_KMS("%s\n", __func__
);
577 mutex_lock(&c_node
->mem_lock
);
579 for_each_ipp_ops(i
) {
580 /* source/destination memory list */
581 head
= &c_node
->mem_list
[i
];
583 if (list_empty(head
)) {
584 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__
,
589 /* find memory node entry */
590 list_for_each_entry(m_node
, head
, list
) {
591 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__
,
592 i
? "dst" : "src", count
[i
], (int)m_node
);
597 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__
,
598 min(count
[EXYNOS_DRM_OPS_SRC
], count
[EXYNOS_DRM_OPS_DST
]),
599 max(count
[EXYNOS_DRM_OPS_SRC
], count
[EXYNOS_DRM_OPS_DST
]));
602 * M2M operations should be need paired memory address.
603 * so, need to check minimum count about src, dst.
604 * other case not use paired memory, so use maximum count
606 if (ipp_is_m2m_cmd(property
->cmd
))
607 ret
= min(count
[EXYNOS_DRM_OPS_SRC
],
608 count
[EXYNOS_DRM_OPS_DST
]);
610 ret
= max(count
[EXYNOS_DRM_OPS_SRC
],
611 count
[EXYNOS_DRM_OPS_DST
]);
613 mutex_unlock(&c_node
->mem_lock
);
618 static struct drm_exynos_ipp_mem_node
619 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node
*c_node
,
620 struct drm_exynos_ipp_queue_buf
*qbuf
)
622 struct drm_exynos_ipp_mem_node
*m_node
;
623 struct list_head
*head
;
626 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__
, qbuf
->buf_id
);
628 /* source/destination memory list */
629 head
= &c_node
->mem_list
[qbuf
->ops_id
];
631 /* find memory node from memory list */
632 list_for_each_entry(m_node
, head
, list
) {
633 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
634 __func__
, count
++, (int)m_node
);
636 /* compare buffer id */
637 if (m_node
->buf_id
== qbuf
->buf_id
)
644 static int ipp_set_mem_node(struct exynos_drm_ippdrv
*ippdrv
,
645 struct drm_exynos_ipp_cmd_node
*c_node
,
646 struct drm_exynos_ipp_mem_node
*m_node
)
648 struct exynos_drm_ipp_ops
*ops
= NULL
;
651 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__
, (int)m_node
);
654 DRM_ERROR("invalid queue node.\n");
658 mutex_lock(&c_node
->mem_lock
);
660 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__
, m_node
->ops_id
);
662 /* get operations callback */
663 ops
= ippdrv
->ops
[m_node
->ops_id
];
665 DRM_ERROR("not support ops.\n");
670 /* set address and enable irq */
672 ret
= ops
->set_addr(ippdrv
->dev
, &m_node
->buf_info
,
673 m_node
->buf_id
, IPP_BUF_ENQUEUE
);
675 DRM_ERROR("failed to set addr.\n");
681 mutex_unlock(&c_node
->mem_lock
);
685 static struct drm_exynos_ipp_mem_node
686 *ipp_get_mem_node(struct drm_device
*drm_dev
,
687 struct drm_file
*file
,
688 struct drm_exynos_ipp_cmd_node
*c_node
,
689 struct drm_exynos_ipp_queue_buf
*qbuf
)
691 struct drm_exynos_ipp_mem_node
*m_node
;
692 struct drm_exynos_ipp_buf_info buf_info
;
696 DRM_DEBUG_KMS("%s\n", __func__
);
698 mutex_lock(&c_node
->mem_lock
);
700 m_node
= kzalloc(sizeof(*m_node
), GFP_KERNEL
);
702 DRM_ERROR("failed to allocate queue node.\n");
706 /* clear base address for error handling */
707 memset(&buf_info
, 0x0, sizeof(buf_info
));
709 /* operations, buffer id */
710 m_node
->ops_id
= qbuf
->ops_id
;
711 m_node
->prop_id
= qbuf
->prop_id
;
712 m_node
->buf_id
= qbuf
->buf_id
;
714 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__
,
715 (int)m_node
, qbuf
->ops_id
);
716 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__
,
717 qbuf
->prop_id
, m_node
->buf_id
);
719 for_each_ipp_planar(i
) {
720 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__
,
723 /* get dma address by handle */
724 if (qbuf
->handle
[i
]) {
725 addr
= exynos_drm_gem_get_dma_addr(drm_dev
,
726 qbuf
->handle
[i
], file
);
728 DRM_ERROR("failed to get addr.\n");
732 buf_info
.handles
[i
] = qbuf
->handle
[i
];
733 buf_info
.base
[i
] = *(dma_addr_t
*) addr
;
734 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
735 __func__
, i
, buf_info
.base
[i
],
736 (int)buf_info
.handles
[i
]);
741 m_node
->buf_info
= buf_info
;
742 list_add_tail(&m_node
->list
, &c_node
->mem_list
[qbuf
->ops_id
]);
744 mutex_unlock(&c_node
->mem_lock
);
750 mutex_unlock(&c_node
->mem_lock
);
751 return ERR_PTR(-EFAULT
);
754 static int ipp_put_mem_node(struct drm_device
*drm_dev
,
755 struct drm_exynos_ipp_cmd_node
*c_node
,
756 struct drm_exynos_ipp_mem_node
*m_node
)
760 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__
, (int)m_node
);
763 DRM_ERROR("invalid dequeue node.\n");
767 if (list_empty(&m_node
->list
)) {
768 DRM_ERROR("empty memory node.\n");
772 mutex_lock(&c_node
->mem_lock
);
774 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__
, m_node
->ops_id
);
777 for_each_ipp_planar(i
) {
778 unsigned long handle
= m_node
->buf_info
.handles
[i
];
780 exynos_drm_gem_put_dma_addr(drm_dev
, handle
,
784 /* delete list in queue */
785 list_del(&m_node
->list
);
788 mutex_unlock(&c_node
->mem_lock
);
793 static void ipp_free_event(struct drm_pending_event
*event
)
798 static int ipp_get_event(struct drm_device
*drm_dev
,
799 struct drm_file
*file
,
800 struct drm_exynos_ipp_cmd_node
*c_node
,
801 struct drm_exynos_ipp_queue_buf
*qbuf
)
803 struct drm_exynos_ipp_send_event
*e
;
806 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__
,
807 qbuf
->ops_id
, qbuf
->buf_id
);
809 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
812 DRM_ERROR("failed to allocate event.\n");
813 spin_lock_irqsave(&drm_dev
->event_lock
, flags
);
814 file
->event_space
+= sizeof(e
->event
);
815 spin_unlock_irqrestore(&drm_dev
->event_lock
, flags
);
820 e
->event
.base
.type
= DRM_EXYNOS_IPP_EVENT
;
821 e
->event
.base
.length
= sizeof(e
->event
);
822 e
->event
.user_data
= qbuf
->user_data
;
823 e
->event
.prop_id
= qbuf
->prop_id
;
824 e
->event
.buf_id
[EXYNOS_DRM_OPS_DST
] = qbuf
->buf_id
;
825 e
->base
.event
= &e
->event
.base
;
826 e
->base
.file_priv
= file
;
827 e
->base
.destroy
= ipp_free_event
;
828 list_add_tail(&e
->base
.link
, &c_node
->event_list
);
833 static void ipp_put_event(struct drm_exynos_ipp_cmd_node
*c_node
,
834 struct drm_exynos_ipp_queue_buf
*qbuf
)
836 struct drm_exynos_ipp_send_event
*e
, *te
;
839 DRM_DEBUG_KMS("%s\n", __func__
);
841 if (list_empty(&c_node
->event_list
)) {
842 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__
);
846 list_for_each_entry_safe(e
, te
, &c_node
->event_list
, base
.link
) {
847 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
848 __func__
, count
++, (int)e
);
851 * quf == NULL condition means all event deletion.
852 * stop operations want to delete all event list.
853 * another case delete only same buf id.
857 list_del(&e
->base
.link
);
861 /* compare buffer id */
862 if (qbuf
&& (qbuf
->buf_id
==
863 e
->event
.buf_id
[EXYNOS_DRM_OPS_DST
])) {
865 list_del(&e
->base
.link
);
872 void ipp_handle_cmd_work(struct device
*dev
,
873 struct exynos_drm_ippdrv
*ippdrv
,
874 struct drm_exynos_ipp_cmd_work
*cmd_work
,
875 struct drm_exynos_ipp_cmd_node
*c_node
)
877 struct ipp_context
*ctx
= get_ipp_context(dev
);
879 cmd_work
->ippdrv
= ippdrv
;
880 cmd_work
->c_node
= c_node
;
881 queue_work(ctx
->cmd_workq
, (struct work_struct
*)cmd_work
);
884 static int ipp_queue_buf_with_run(struct device
*dev
,
885 struct drm_exynos_ipp_cmd_node
*c_node
,
886 struct drm_exynos_ipp_mem_node
*m_node
,
887 struct drm_exynos_ipp_queue_buf
*qbuf
)
889 struct exynos_drm_ippdrv
*ippdrv
;
890 struct drm_exynos_ipp_property
*property
;
891 struct exynos_drm_ipp_ops
*ops
;
894 DRM_DEBUG_KMS("%s\n", __func__
);
896 ippdrv
= ipp_find_drv_by_handle(qbuf
->prop_id
);
897 if (IS_ERR_OR_NULL(ippdrv
)) {
898 DRM_ERROR("failed to get ipp driver.\n");
902 ops
= ippdrv
->ops
[qbuf
->ops_id
];
904 DRM_ERROR("failed to get ops.\n");
908 property
= &c_node
->property
;
910 if (c_node
->state
!= IPP_STATE_START
) {
911 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__
);
915 if (!ipp_check_mem_list(c_node
)) {
916 DRM_DEBUG_KMS("%s:empty memory.\n", __func__
);
921 * If set destination buffer and enabled clock,
922 * then m2m operations need start operations at queue_buf
924 if (ipp_is_m2m_cmd(property
->cmd
)) {
925 struct drm_exynos_ipp_cmd_work
*cmd_work
= c_node
->start_work
;
927 cmd_work
->ctrl
= IPP_CTRL_PLAY
;
928 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
930 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
932 DRM_ERROR("failed to set m node.\n");
940 static void ipp_clean_queue_buf(struct drm_device
*drm_dev
,
941 struct drm_exynos_ipp_cmd_node
*c_node
,
942 struct drm_exynos_ipp_queue_buf
*qbuf
)
944 struct drm_exynos_ipp_mem_node
*m_node
, *tm_node
;
946 DRM_DEBUG_KMS("%s\n", __func__
);
948 if (!list_empty(&c_node
->mem_list
[qbuf
->ops_id
])) {
950 list_for_each_entry_safe(m_node
, tm_node
,
951 &c_node
->mem_list
[qbuf
->ops_id
], list
) {
952 if (m_node
->buf_id
== qbuf
->buf_id
&&
953 m_node
->ops_id
== qbuf
->ops_id
)
954 ipp_put_mem_node(drm_dev
, c_node
, m_node
);
959 int exynos_drm_ipp_queue_buf(struct drm_device
*drm_dev
, void *data
,
960 struct drm_file
*file
)
962 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
963 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
964 struct device
*dev
= priv
->dev
;
965 struct ipp_context
*ctx
= get_ipp_context(dev
);
966 struct drm_exynos_ipp_queue_buf
*qbuf
= data
;
967 struct drm_exynos_ipp_cmd_node
*c_node
;
968 struct drm_exynos_ipp_mem_node
*m_node
;
971 DRM_DEBUG_KMS("%s\n", __func__
);
974 DRM_ERROR("invalid buf parameter.\n");
978 if (qbuf
->ops_id
>= EXYNOS_DRM_OPS_MAX
) {
979 DRM_ERROR("invalid ops parameter.\n");
983 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
984 __func__
, qbuf
->prop_id
, qbuf
->ops_id
? "dst" : "src",
985 qbuf
->buf_id
, qbuf
->buf_type
);
987 /* find command node */
988 c_node
= ipp_find_obj(&ctx
->prop_idr
, &ctx
->prop_lock
,
991 DRM_ERROR("failed to get command node.\n");
996 switch (qbuf
->buf_type
) {
997 case IPP_BUF_ENQUEUE
:
998 /* get memory node */
999 m_node
= ipp_get_mem_node(drm_dev
, file
, c_node
, qbuf
);
1000 if (IS_ERR(m_node
)) {
1001 DRM_ERROR("failed to get m_node.\n");
1002 return PTR_ERR(m_node
);
1006 * first step get event for destination buffer.
1007 * and second step when M2M case run with destination buffer
1010 if (qbuf
->ops_id
== EXYNOS_DRM_OPS_DST
) {
1011 /* get event for destination buffer */
1012 ret
= ipp_get_event(drm_dev
, file
, c_node
, qbuf
);
1014 DRM_ERROR("failed to get event.\n");
1015 goto err_clean_node
;
1019 * M2M case run play control for streaming feature.
1020 * other case set address and waiting.
1022 ret
= ipp_queue_buf_with_run(dev
, c_node
, m_node
, qbuf
);
1024 DRM_ERROR("failed to run command.\n");
1025 goto err_clean_node
;
1029 case IPP_BUF_DEQUEUE
:
1030 mutex_lock(&c_node
->cmd_lock
);
1032 /* put event for destination buffer */
1033 if (qbuf
->ops_id
== EXYNOS_DRM_OPS_DST
)
1034 ipp_put_event(c_node
, qbuf
);
1036 ipp_clean_queue_buf(drm_dev
, c_node
, qbuf
);
1038 mutex_unlock(&c_node
->cmd_lock
);
1041 DRM_ERROR("invalid buffer control.\n");
1048 DRM_ERROR("clean memory nodes.\n");
1050 ipp_clean_queue_buf(drm_dev
, c_node
, qbuf
);
1054 static bool exynos_drm_ipp_check_valid(struct device
*dev
,
1055 enum drm_exynos_ipp_ctrl ctrl
, enum drm_exynos_ipp_state state
)
1057 DRM_DEBUG_KMS("%s\n", __func__
);
1059 if (ctrl
!= IPP_CTRL_PLAY
) {
1060 if (pm_runtime_suspended(dev
)) {
1061 DRM_ERROR("pm:runtime_suspended.\n");
1068 if (state
!= IPP_STATE_IDLE
)
1072 if (state
== IPP_STATE_STOP
)
1075 case IPP_CTRL_PAUSE
:
1076 if (state
!= IPP_STATE_START
)
1079 case IPP_CTRL_RESUME
:
1080 if (state
!= IPP_STATE_STOP
)
1084 DRM_ERROR("invalid state.\n");
1092 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl
, state
);
1096 int exynos_drm_ipp_cmd_ctrl(struct drm_device
*drm_dev
, void *data
,
1097 struct drm_file
*file
)
1099 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
1100 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
1101 struct exynos_drm_ippdrv
*ippdrv
= NULL
;
1102 struct device
*dev
= priv
->dev
;
1103 struct ipp_context
*ctx
= get_ipp_context(dev
);
1104 struct drm_exynos_ipp_cmd_ctrl
*cmd_ctrl
= data
;
1105 struct drm_exynos_ipp_cmd_work
*cmd_work
;
1106 struct drm_exynos_ipp_cmd_node
*c_node
;
1108 DRM_DEBUG_KMS("%s\n", __func__
);
1111 DRM_ERROR("invalid context.\n");
1116 DRM_ERROR("invalid control parameter.\n");
1120 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__
,
1121 cmd_ctrl
->ctrl
, cmd_ctrl
->prop_id
);
1123 ippdrv
= ipp_find_drv_by_handle(cmd_ctrl
->prop_id
);
1124 if (IS_ERR(ippdrv
)) {
1125 DRM_ERROR("failed to get ipp driver.\n");
1126 return PTR_ERR(ippdrv
);
1129 c_node
= ipp_find_obj(&ctx
->prop_idr
, &ctx
->prop_lock
,
1132 DRM_ERROR("invalid command node list.\n");
1136 if (!exynos_drm_ipp_check_valid(ippdrv
->dev
, cmd_ctrl
->ctrl
,
1138 DRM_ERROR("invalid state.\n");
1142 switch (cmd_ctrl
->ctrl
) {
1144 if (pm_runtime_suspended(ippdrv
->dev
))
1145 pm_runtime_get_sync(ippdrv
->dev
);
1146 c_node
->state
= IPP_STATE_START
;
1148 cmd_work
= c_node
->start_work
;
1149 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1150 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1151 c_node
->state
= IPP_STATE_START
;
1154 cmd_work
= c_node
->stop_work
;
1155 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1156 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1158 if (!wait_for_completion_timeout(&c_node
->stop_complete
,
1159 msecs_to_jiffies(300))) {
1160 DRM_ERROR("timeout stop:prop_id[%d]\n",
1161 c_node
->property
.prop_id
);
1164 c_node
->state
= IPP_STATE_STOP
;
1165 ippdrv
->dedicated
= false;
1166 ipp_clean_cmd_node(c_node
);
1168 if (list_empty(&ippdrv
->cmd_list
))
1169 pm_runtime_put_sync(ippdrv
->dev
);
1171 case IPP_CTRL_PAUSE
:
1172 cmd_work
= c_node
->stop_work
;
1173 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1174 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1176 if (!wait_for_completion_timeout(&c_node
->stop_complete
,
1177 msecs_to_jiffies(200))) {
1178 DRM_ERROR("timeout stop:prop_id[%d]\n",
1179 c_node
->property
.prop_id
);
1182 c_node
->state
= IPP_STATE_STOP
;
1184 case IPP_CTRL_RESUME
:
1185 c_node
->state
= IPP_STATE_START
;
1186 cmd_work
= c_node
->start_work
;
1187 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1188 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1191 DRM_ERROR("could not support this state currently.\n");
1195 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__
,
1196 cmd_ctrl
->ctrl
, cmd_ctrl
->prop_id
);
1201 int exynos_drm_ippnb_register(struct notifier_block
*nb
)
1203 return blocking_notifier_chain_register(
1204 &exynos_drm_ippnb_list
, nb
);
1207 int exynos_drm_ippnb_unregister(struct notifier_block
*nb
)
1209 return blocking_notifier_chain_unregister(
1210 &exynos_drm_ippnb_list
, nb
);
1213 int exynos_drm_ippnb_send_event(unsigned long val
, void *v
)
1215 return blocking_notifier_call_chain(
1216 &exynos_drm_ippnb_list
, val
, v
);
1219 static int ipp_set_property(struct exynos_drm_ippdrv
*ippdrv
,
1220 struct drm_exynos_ipp_property
*property
)
1222 struct exynos_drm_ipp_ops
*ops
= NULL
;
1227 DRM_ERROR("invalid property parameter.\n");
1231 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, property
->prop_id
);
1233 /* reset h/w block */
1234 if (ippdrv
->reset
&&
1235 ippdrv
->reset(ippdrv
->dev
)) {
1236 DRM_ERROR("failed to reset.\n");
1240 /* set source,destination operations */
1241 for_each_ipp_ops(i
) {
1242 struct drm_exynos_ipp_config
*config
=
1243 &property
->config
[i
];
1245 ops
= ippdrv
->ops
[i
];
1246 if (!ops
|| !config
) {
1247 DRM_ERROR("not support ops and config.\n");
1253 ret
= ops
->set_fmt(ippdrv
->dev
, config
->fmt
);
1255 DRM_ERROR("not support format.\n");
1260 /* set transform for rotation, flip */
1261 if (ops
->set_transf
) {
1262 ret
= ops
->set_transf(ippdrv
->dev
, config
->degree
,
1263 config
->flip
, &swap
);
1265 DRM_ERROR("not support tranf.\n");
1271 if (ops
->set_size
) {
1272 ret
= ops
->set_size(ippdrv
->dev
, swap
, &config
->pos
,
1275 DRM_ERROR("not support size.\n");
1284 static int ipp_start_property(struct exynos_drm_ippdrv
*ippdrv
,
1285 struct drm_exynos_ipp_cmd_node
*c_node
)
1287 struct drm_exynos_ipp_mem_node
*m_node
;
1288 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
1289 struct list_head
*head
;
1292 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, property
->prop_id
);
1294 /* store command info in ippdrv */
1295 ippdrv
->cmd
= c_node
;
1297 if (!ipp_check_mem_list(c_node
)) {
1298 DRM_DEBUG_KMS("%s:empty memory.\n", __func__
);
1302 /* set current property in ippdrv */
1303 ret
= ipp_set_property(ippdrv
, property
);
1305 DRM_ERROR("failed to set property.\n");
1311 switch (property
->cmd
) {
1313 for_each_ipp_ops(i
) {
1314 /* source/destination memory list */
1315 head
= &c_node
->mem_list
[i
];
1317 m_node
= list_first_entry(head
,
1318 struct drm_exynos_ipp_mem_node
, list
);
1320 DRM_ERROR("failed to get node.\n");
1325 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1326 __func__
, (int)m_node
);
1328 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
1330 DRM_ERROR("failed to set m node.\n");
1336 /* destination memory list */
1337 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_DST
];
1339 list_for_each_entry(m_node
, head
, list
) {
1340 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
1342 DRM_ERROR("failed to set m node.\n");
1347 case IPP_CMD_OUTPUT
:
1348 /* source memory list */
1349 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
];
1351 list_for_each_entry(m_node
, head
, list
) {
1352 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
1354 DRM_ERROR("failed to set m node.\n");
1360 DRM_ERROR("invalid operations.\n");
1364 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__
, property
->cmd
);
1366 /* start operations */
1367 if (ippdrv
->start
) {
1368 ret
= ippdrv
->start(ippdrv
->dev
, property
->cmd
);
1370 DRM_ERROR("failed to start ops.\n");
1378 static int ipp_stop_property(struct drm_device
*drm_dev
,
1379 struct exynos_drm_ippdrv
*ippdrv
,
1380 struct drm_exynos_ipp_cmd_node
*c_node
)
1382 struct drm_exynos_ipp_mem_node
*m_node
, *tm_node
;
1383 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
1384 struct list_head
*head
;
1387 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, property
->prop_id
);
1390 ipp_put_event(c_node
, NULL
);
1393 switch (property
->cmd
) {
1395 for_each_ipp_ops(i
) {
1396 /* source/destination memory list */
1397 head
= &c_node
->mem_list
[i
];
1399 if (list_empty(head
)) {
1400 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1405 list_for_each_entry_safe(m_node
, tm_node
,
1407 ret
= ipp_put_mem_node(drm_dev
, c_node
,
1410 DRM_ERROR("failed to put m_node.\n");
1417 /* destination memory list */
1418 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_DST
];
1420 if (list_empty(head
)) {
1421 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__
);
1425 list_for_each_entry_safe(m_node
, tm_node
, head
, list
) {
1426 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1428 DRM_ERROR("failed to put m_node.\n");
1433 case IPP_CMD_OUTPUT
:
1434 /* source memory list */
1435 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
];
1437 if (list_empty(head
)) {
1438 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__
);
1442 list_for_each_entry_safe(m_node
, tm_node
, head
, list
) {
1443 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1445 DRM_ERROR("failed to put m_node.\n");
1451 DRM_ERROR("invalid operations.\n");
1457 /* stop operations */
1459 ippdrv
->stop(ippdrv
->dev
, property
->cmd
);
1464 void ipp_sched_cmd(struct work_struct
*work
)
1466 struct drm_exynos_ipp_cmd_work
*cmd_work
=
1467 (struct drm_exynos_ipp_cmd_work
*)work
;
1468 struct exynos_drm_ippdrv
*ippdrv
;
1469 struct drm_exynos_ipp_cmd_node
*c_node
;
1470 struct drm_exynos_ipp_property
*property
;
1473 DRM_DEBUG_KMS("%s\n", __func__
);
1475 ippdrv
= cmd_work
->ippdrv
;
1477 DRM_ERROR("invalid ippdrv list.\n");
1481 c_node
= cmd_work
->c_node
;
1483 DRM_ERROR("invalid command node list.\n");
1487 mutex_lock(&c_node
->cmd_lock
);
1489 property
= &c_node
->property
;
1491 DRM_ERROR("failed to get property:prop_id[%d]\n",
1492 c_node
->property
.prop_id
);
1496 switch (cmd_work
->ctrl
) {
1498 case IPP_CTRL_RESUME
:
1499 ret
= ipp_start_property(ippdrv
, c_node
);
1501 DRM_ERROR("failed to start property:prop_id[%d]\n",
1502 c_node
->property
.prop_id
);
1507 * M2M case supports wait_completion of transfer.
1508 * because M2M case supports single unit operation
1509 * with multiple queue.
1510 * M2M need to wait completion of data transfer.
1512 if (ipp_is_m2m_cmd(property
->cmd
)) {
1513 if (!wait_for_completion_timeout
1514 (&c_node
->start_complete
, msecs_to_jiffies(200))) {
1515 DRM_ERROR("timeout event:prop_id[%d]\n",
1516 c_node
->property
.prop_id
);
1522 case IPP_CTRL_PAUSE
:
1523 ret
= ipp_stop_property(ippdrv
->drm_dev
, ippdrv
,
1526 DRM_ERROR("failed to stop property.\n");
1530 complete(&c_node
->stop_complete
);
1533 DRM_ERROR("unknown control type\n");
1537 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__
, cmd_work
->ctrl
);
1540 mutex_unlock(&c_node
->cmd_lock
);
1543 static int ipp_send_event(struct exynos_drm_ippdrv
*ippdrv
,
1544 struct drm_exynos_ipp_cmd_node
*c_node
, int *buf_id
)
1546 struct drm_device
*drm_dev
= ippdrv
->drm_dev
;
1547 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
1548 struct drm_exynos_ipp_mem_node
*m_node
;
1549 struct drm_exynos_ipp_queue_buf qbuf
;
1550 struct drm_exynos_ipp_send_event
*e
;
1551 struct list_head
*head
;
1553 unsigned long flags
;
1554 u32 tbuf_id
[EXYNOS_DRM_OPS_MAX
] = {0, };
1558 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__
,
1559 i
? "dst" : "src", buf_id
[i
]);
1562 DRM_ERROR("failed to get drm_dev.\n");
1567 DRM_ERROR("failed to get property.\n");
1571 if (list_empty(&c_node
->event_list
)) {
1572 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__
);
1576 if (!ipp_check_mem_list(c_node
)) {
1577 DRM_DEBUG_KMS("%s:empty memory.\n", __func__
);
1582 switch (property
->cmd
) {
1584 for_each_ipp_ops(i
) {
1585 /* source/destination memory list */
1586 head
= &c_node
->mem_list
[i
];
1588 m_node
= list_first_entry(head
,
1589 struct drm_exynos_ipp_mem_node
, list
);
1591 DRM_ERROR("empty memory node.\n");
1595 tbuf_id
[i
] = m_node
->buf_id
;
1596 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__
,
1597 i
? "dst" : "src", tbuf_id
[i
]);
1599 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1601 DRM_ERROR("failed to put m_node.\n");
1605 /* clear buf for finding */
1606 memset(&qbuf
, 0x0, sizeof(qbuf
));
1607 qbuf
.ops_id
= EXYNOS_DRM_OPS_DST
;
1608 qbuf
.buf_id
= buf_id
[EXYNOS_DRM_OPS_DST
];
1610 /* get memory node entry */
1611 m_node
= ipp_find_mem_node(c_node
, &qbuf
);
1613 DRM_ERROR("empty memory node.\n");
1617 tbuf_id
[EXYNOS_DRM_OPS_DST
] = m_node
->buf_id
;
1619 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1621 DRM_ERROR("failed to put m_node.\n");
1623 case IPP_CMD_OUTPUT
:
1624 /* source memory list */
1625 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
];
1627 m_node
= list_first_entry(head
,
1628 struct drm_exynos_ipp_mem_node
, list
);
1630 DRM_ERROR("empty memory node.\n");
1634 tbuf_id
[EXYNOS_DRM_OPS_SRC
] = m_node
->buf_id
;
1636 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1638 DRM_ERROR("failed to put m_node.\n");
1641 DRM_ERROR("invalid operations.\n");
1645 if (tbuf_id
[EXYNOS_DRM_OPS_DST
] != buf_id
[EXYNOS_DRM_OPS_DST
])
1646 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1647 tbuf_id
[1], buf_id
[1], property
->prop_id
);
1650 * command node have event list of destination buffer
1651 * If destination buffer enqueue to mem list,
1652 * then we make event and link to event list tail.
1653 * so, we get first event for first enqueued buffer.
1655 e
= list_first_entry(&c_node
->event_list
,
1656 struct drm_exynos_ipp_send_event
, base
.link
);
1659 DRM_ERROR("empty event.\n");
1663 do_gettimeofday(&now
);
1664 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1665 , __func__
, now
.tv_sec
, now
.tv_usec
);
1666 e
->event
.tv_sec
= now
.tv_sec
;
1667 e
->event
.tv_usec
= now
.tv_usec
;
1668 e
->event
.prop_id
= property
->prop_id
;
1670 /* set buffer id about source destination */
1672 e
->event
.buf_id
[i
] = tbuf_id
[i
];
1674 spin_lock_irqsave(&drm_dev
->event_lock
, flags
);
1675 list_move_tail(&e
->base
.link
, &e
->base
.file_priv
->event_list
);
1676 wake_up_interruptible(&e
->base
.file_priv
->event_wait
);
1677 spin_unlock_irqrestore(&drm_dev
->event_lock
, flags
);
1679 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__
,
1680 property
->cmd
, property
->prop_id
, tbuf_id
[EXYNOS_DRM_OPS_DST
]);
1685 void ipp_sched_event(struct work_struct
*work
)
1687 struct drm_exynos_ipp_event_work
*event_work
=
1688 (struct drm_exynos_ipp_event_work
*)work
;
1689 struct exynos_drm_ippdrv
*ippdrv
;
1690 struct drm_exynos_ipp_cmd_node
*c_node
;
1694 DRM_ERROR("failed to get event_work.\n");
1698 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__
,
1699 event_work
->buf_id
[EXYNOS_DRM_OPS_DST
]);
1701 ippdrv
= event_work
->ippdrv
;
1703 DRM_ERROR("failed to get ipp driver.\n");
1707 c_node
= ippdrv
->cmd
;
1709 DRM_ERROR("failed to get command node.\n");
1714 * IPP supports command thread, event thread synchronization.
1715 * If IPP close immediately from user land, then IPP make
1716 * synchronization with command thread, so make complete event.
1717 * or going out operations.
1719 if (c_node
->state
!= IPP_STATE_START
) {
1720 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1721 __func__
, c_node
->state
, c_node
->property
.prop_id
);
1722 goto err_completion
;
1725 mutex_lock(&c_node
->event_lock
);
1727 ret
= ipp_send_event(ippdrv
, c_node
, event_work
->buf_id
);
1729 DRM_ERROR("failed to send event.\n");
1730 goto err_completion
;
1734 if (ipp_is_m2m_cmd(c_node
->property
.cmd
))
1735 complete(&c_node
->start_complete
);
1737 mutex_unlock(&c_node
->event_lock
);
1740 static int ipp_subdrv_probe(struct drm_device
*drm_dev
, struct device
*dev
)
1742 struct ipp_context
*ctx
= get_ipp_context(dev
);
1743 struct exynos_drm_ippdrv
*ippdrv
;
1746 DRM_DEBUG_KMS("%s\n", __func__
);
1748 /* get ipp driver entry */
1749 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
1750 ippdrv
->drm_dev
= drm_dev
;
1752 ret
= ipp_create_id(&ctx
->ipp_idr
, &ctx
->ipp_lock
, ippdrv
,
1755 DRM_ERROR("failed to create id.\n");
1759 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__
,
1760 count
++, (int)ippdrv
, ippdrv
->ipp_id
);
1762 if (ippdrv
->ipp_id
== 0) {
1763 DRM_ERROR("failed to get ipp_id[%d]\n",
1768 /* store parent device for node */
1769 ippdrv
->parent_dev
= dev
;
1771 /* store event work queue and handler */
1772 ippdrv
->event_workq
= ctx
->event_workq
;
1773 ippdrv
->sched_event
= ipp_sched_event
;
1774 INIT_LIST_HEAD(&ippdrv
->cmd_list
);
1776 if (is_drm_iommu_supported(drm_dev
)) {
1777 ret
= drm_iommu_attach_device(drm_dev
, ippdrv
->dev
);
1779 DRM_ERROR("failed to activate iommu\n");
1788 /* get ipp driver entry */
1789 list_for_each_entry_reverse(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
)
1790 if (is_drm_iommu_supported(drm_dev
))
1791 drm_iommu_detach_device(drm_dev
, ippdrv
->dev
);
1794 idr_remove_all(&ctx
->ipp_idr
);
1795 idr_remove_all(&ctx
->prop_idr
);
1796 idr_destroy(&ctx
->ipp_idr
);
1797 idr_destroy(&ctx
->prop_idr
);
1801 static void ipp_subdrv_remove(struct drm_device
*drm_dev
, struct device
*dev
)
1803 struct exynos_drm_ippdrv
*ippdrv
;
1805 DRM_DEBUG_KMS("%s\n", __func__
);
1807 /* get ipp driver entry */
1808 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
1809 if (is_drm_iommu_supported(drm_dev
))
1810 drm_iommu_detach_device(drm_dev
, ippdrv
->dev
);
1812 ippdrv
->drm_dev
= NULL
;
1813 exynos_drm_ippdrv_unregister(ippdrv
);
1817 static int ipp_subdrv_open(struct drm_device
*drm_dev
, struct device
*dev
,
1818 struct drm_file
*file
)
1820 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
1821 struct exynos_drm_ipp_private
*priv
;
1823 DRM_DEBUG_KMS("%s\n", __func__
);
1825 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1827 DRM_ERROR("failed to allocate priv.\n");
1831 file_priv
->ipp_priv
= priv
;
1833 INIT_LIST_HEAD(&priv
->event_list
);
1835 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__
, (int)priv
);
1840 static void ipp_subdrv_close(struct drm_device
*drm_dev
, struct device
*dev
,
1841 struct drm_file
*file
)
1843 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
1844 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
1845 struct exynos_drm_ippdrv
*ippdrv
= NULL
;
1846 struct drm_exynos_ipp_cmd_node
*c_node
, *tc_node
;
1849 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__
, (int)priv
);
1851 if (list_empty(&exynos_drm_ippdrv_list
)) {
1852 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__
);
1856 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
1857 if (list_empty(&ippdrv
->cmd_list
))
1860 list_for_each_entry_safe(c_node
, tc_node
,
1861 &ippdrv
->cmd_list
, list
) {
1862 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1863 __func__
, count
++, (int)ippdrv
);
1865 if (c_node
->priv
== priv
) {
1867 * userland goto unnormal state. process killed.
1868 * and close the file.
1869 * so, IPP didn't called stop cmd ctrl.
1870 * so, we are make stop operation in this state.
1872 if (c_node
->state
== IPP_STATE_START
) {
1873 ipp_stop_property(drm_dev
, ippdrv
,
1875 c_node
->state
= IPP_STATE_STOP
;
1878 ippdrv
->dedicated
= false;
1879 ipp_clean_cmd_node(c_node
);
1880 if (list_empty(&ippdrv
->cmd_list
))
1881 pm_runtime_put_sync(ippdrv
->dev
);
1891 static int __devinit
ipp_probe(struct platform_device
*pdev
)
1893 struct device
*dev
= &pdev
->dev
;
1894 struct ipp_context
*ctx
;
1895 struct exynos_drm_subdrv
*subdrv
;
1898 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
1902 DRM_DEBUG_KMS("%s\n", __func__
);
1904 mutex_init(&ctx
->ipp_lock
);
1905 mutex_init(&ctx
->prop_lock
);
1907 idr_init(&ctx
->ipp_idr
);
1908 idr_init(&ctx
->prop_idr
);
1911 * create single thread for ipp event
1912 * IPP supports event thread for IPP drivers.
1913 * IPP driver send event_work to this thread.
1914 * and IPP event thread send event to user process.
1916 ctx
->event_workq
= create_singlethread_workqueue("ipp_event");
1917 if (!ctx
->event_workq
) {
1918 dev_err(dev
, "failed to create event workqueue\n");
1924 * create single thread for ipp command
1925 * IPP supports command thread for user process.
1926 * user process make command node using set property ioctl.
1927 * and make start_work and send this work to command thread.
1928 * and then this command thread start property.
1930 ctx
->cmd_workq
= create_singlethread_workqueue("ipp_cmd");
1931 if (!ctx
->cmd_workq
) {
1932 dev_err(dev
, "failed to create cmd workqueue\n");
1934 goto err_event_workq
;
1937 /* set sub driver informations */
1938 subdrv
= &ctx
->subdrv
;
1940 subdrv
->probe
= ipp_subdrv_probe
;
1941 subdrv
->remove
= ipp_subdrv_remove
;
1942 subdrv
->open
= ipp_subdrv_open
;
1943 subdrv
->close
= ipp_subdrv_close
;
1945 platform_set_drvdata(pdev
, ctx
);
1947 ret
= exynos_drm_subdrv_register(subdrv
);
1949 DRM_ERROR("failed to register drm ipp device.\n");
1953 dev_info(&pdev
->dev
, "drm ipp registered successfully.\n");
1958 destroy_workqueue(ctx
->cmd_workq
);
1960 destroy_workqueue(ctx
->event_workq
);
1966 static int __devexit
ipp_remove(struct platform_device
*pdev
)
1968 struct ipp_context
*ctx
= platform_get_drvdata(pdev
);
1970 DRM_DEBUG_KMS("%s\n", __func__
);
1972 /* unregister sub driver */
1973 exynos_drm_subdrv_unregister(&ctx
->subdrv
);
1975 /* remove,destroy ipp idr */
1976 idr_remove_all(&ctx
->ipp_idr
);
1977 idr_remove_all(&ctx
->prop_idr
);
1978 idr_destroy(&ctx
->ipp_idr
);
1979 idr_destroy(&ctx
->prop_idr
);
1981 mutex_destroy(&ctx
->ipp_lock
);
1982 mutex_destroy(&ctx
->prop_lock
);
1984 /* destroy command, event work queue */
1985 destroy_workqueue(ctx
->cmd_workq
);
1986 destroy_workqueue(ctx
->event_workq
);
1993 static int ipp_power_ctrl(struct ipp_context
*ctx
, bool enable
)
1995 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__
, enable
);
2000 #ifdef CONFIG_PM_SLEEP
2001 static int ipp_suspend(struct device
*dev
)
2003 struct ipp_context
*ctx
= get_ipp_context(dev
);
2005 DRM_DEBUG_KMS("%s\n", __func__
);
2007 if (pm_runtime_suspended(dev
))
2010 return ipp_power_ctrl(ctx
, false);
2013 static int ipp_resume(struct device
*dev
)
2015 struct ipp_context
*ctx
= get_ipp_context(dev
);
2017 DRM_DEBUG_KMS("%s\n", __func__
);
2019 if (!pm_runtime_suspended(dev
))
2020 return ipp_power_ctrl(ctx
, true);
2026 #ifdef CONFIG_PM_RUNTIME
2027 static int ipp_runtime_suspend(struct device
*dev
)
2029 struct ipp_context
*ctx
= get_ipp_context(dev
);
2031 DRM_DEBUG_KMS("%s\n", __func__
);
2033 return ipp_power_ctrl(ctx
, false);
2036 static int ipp_runtime_resume(struct device
*dev
)
2038 struct ipp_context
*ctx
= get_ipp_context(dev
);
2040 DRM_DEBUG_KMS("%s\n", __func__
);
2042 return ipp_power_ctrl(ctx
, true);
2046 static const struct dev_pm_ops ipp_pm_ops
= {
2047 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend
, ipp_resume
)
2048 SET_RUNTIME_PM_OPS(ipp_runtime_suspend
, ipp_runtime_resume
, NULL
)
2051 struct platform_driver ipp_driver
= {
2053 .remove
= __devexit_p(ipp_remove
),
2055 .name
= "exynos-drm-ipp",
2056 .owner
= THIS_MODULE
,