ARM: at91: fix board-rm9200-dt after sys_timer conversion
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / exynos / exynos_drm_ipp.c
1 /*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/types.h>
18 #include <linux/clk.h>
19 #include <linux/pm_runtime.h>
20 #include <plat/map-base.h>
21
22 #include <drm/drmP.h>
23 #include <drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_gem.h"
26 #include "exynos_drm_ipp.h"
27 #include "exynos_drm_iommu.h"
28
29 /*
30 * IPP is stand for Image Post Processing and
31 * supports image scaler/rotator and input/output DMA operations.
32 * using FIMC, GSC, Rotator, so on.
33 * IPP is integration device driver of same attribute h/w
34 */
35
36 /*
37 * TODO
38 * 1. expand command control id.
39 * 2. integrate property and config.
40 * 3. removed send_event id check routine.
41 * 4. compare send_event id if needed.
42 * 5. free subdrv_remove notifier callback list if needed.
43 * 6. need to check subdrv_open about multi-open.
44 * 7. need to power_on implement power and sysmmu ctrl.
45 */
46
47 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
49
50 /*
51 * A structure of event.
52 *
53 * @base: base of event.
54 * @event: ipp event.
55 */
56 struct drm_exynos_ipp_send_event {
57 struct drm_pending_event base;
58 struct drm_exynos_ipp_event event;
59 };
60
61 /*
62 * A structure of memory node.
63 *
64 * @list: list head to memory queue information.
65 * @ops_id: id of operations.
66 * @prop_id: id of property.
67 * @buf_id: id of buffer.
68 * @buf_info: gem objects and dma address, size.
69 * @filp: a pointer to drm_file.
70 */
71 struct drm_exynos_ipp_mem_node {
72 struct list_head list;
73 enum drm_exynos_ops_id ops_id;
74 u32 prop_id;
75 u32 buf_id;
76 struct drm_exynos_ipp_buf_info buf_info;
77 struct drm_file *filp;
78 };
79
80 /*
81 * A structure of ipp context.
82 *
83 * @subdrv: prepare initialization using subdrv.
84 * @ipp_lock: lock for synchronization of access to ipp_idr.
85 * @prop_lock: lock for synchronization of access to prop_idr.
86 * @ipp_idr: ipp driver idr.
87 * @prop_idr: property idr.
88 * @event_workq: event work queue.
89 * @cmd_workq: command work queue.
90 */
91 struct ipp_context {
92 struct exynos_drm_subdrv subdrv;
93 struct mutex ipp_lock;
94 struct mutex prop_lock;
95 struct idr ipp_idr;
96 struct idr prop_idr;
97 struct workqueue_struct *event_workq;
98 struct workqueue_struct *cmd_workq;
99 };
100
101 static LIST_HEAD(exynos_drm_ippdrv_list);
102 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104
105 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
106 {
107 DRM_DEBUG_KMS("%s\n", __func__);
108
109 if (!ippdrv)
110 return -EINVAL;
111
112 mutex_lock(&exynos_drm_ippdrv_lock);
113 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
114 mutex_unlock(&exynos_drm_ippdrv_lock);
115
116 return 0;
117 }
118
119 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
120 {
121 DRM_DEBUG_KMS("%s\n", __func__);
122
123 if (!ippdrv)
124 return -EINVAL;
125
126 mutex_lock(&exynos_drm_ippdrv_lock);
127 list_del(&ippdrv->drv_list);
128 mutex_unlock(&exynos_drm_ippdrv_lock);
129
130 return 0;
131 }
132
133 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
134 u32 *idp)
135 {
136 int ret;
137
138 DRM_DEBUG_KMS("%s\n", __func__);
139
140 again:
141 /* ensure there is space available to allocate a handle */
142 if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
143 DRM_ERROR("failed to get idr.\n");
144 return -ENOMEM;
145 }
146
147 /* do the allocation under our mutexlock */
148 mutex_lock(lock);
149 ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
150 mutex_unlock(lock);
151 if (ret == -EAGAIN)
152 goto again;
153
154 return ret;
155 }
156
157 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
158 {
159 void *obj;
160
161 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
162
163 mutex_lock(lock);
164
165 /* find object using handle */
166 obj = idr_find(id_idr, id);
167 if (!obj) {
168 DRM_ERROR("failed to find object.\n");
169 mutex_unlock(lock);
170 return ERR_PTR(-ENODEV);
171 }
172
173 mutex_unlock(lock);
174
175 return obj;
176 }
177
178 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
179 enum drm_exynos_ipp_cmd cmd)
180 {
181 /*
182 * check dedicated flag and WB, OUTPUT operation with
183 * power on state.
184 */
185 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
186 !pm_runtime_suspended(ippdrv->dev)))
187 return true;
188
189 return false;
190 }
191
192 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
193 struct drm_exynos_ipp_property *property)
194 {
195 struct exynos_drm_ippdrv *ippdrv;
196 u32 ipp_id = property->ipp_id;
197
198 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
199
200 if (ipp_id) {
201 /* find ipp driver using idr */
202 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
203 ipp_id);
204 if (IS_ERR_OR_NULL(ippdrv)) {
205 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
206 return ippdrv;
207 }
208
209 /*
210 * WB, OUTPUT opertion not supported multi-operation.
211 * so, make dedicated state at set property ioctl.
212 * when ipp driver finished operations, clear dedicated flags.
213 */
214 if (ipp_check_dedicated(ippdrv, property->cmd)) {
215 DRM_ERROR("already used choose device.\n");
216 return ERR_PTR(-EBUSY);
217 }
218
219 /*
220 * This is necessary to find correct device in ipp drivers.
221 * ipp drivers have different abilities,
222 * so need to check property.
223 */
224 if (ippdrv->check_property &&
225 ippdrv->check_property(ippdrv->dev, property)) {
226 DRM_ERROR("not support property.\n");
227 return ERR_PTR(-EINVAL);
228 }
229
230 return ippdrv;
231 } else {
232 /*
233 * This case is search all ipp driver for finding.
234 * user application don't set ipp_id in this case,
235 * so ipp subsystem search correct driver in driver list.
236 */
237 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
238 if (ipp_check_dedicated(ippdrv, property->cmd)) {
239 DRM_DEBUG_KMS("%s:used device.\n", __func__);
240 continue;
241 }
242
243 if (ippdrv->check_property &&
244 ippdrv->check_property(ippdrv->dev, property)) {
245 DRM_DEBUG_KMS("%s:not support property.\n",
246 __func__);
247 continue;
248 }
249
250 return ippdrv;
251 }
252
253 DRM_ERROR("not support ipp driver operations.\n");
254 }
255
256 return ERR_PTR(-ENODEV);
257 }
258
259 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
260 {
261 struct exynos_drm_ippdrv *ippdrv;
262 struct drm_exynos_ipp_cmd_node *c_node;
263 int count = 0;
264
265 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
266
267 if (list_empty(&exynos_drm_ippdrv_list)) {
268 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
269 return ERR_PTR(-ENODEV);
270 }
271
272 /*
273 * This case is search ipp driver by prop_id handle.
274 * sometimes, ipp subsystem find driver by prop_id.
275 * e.g PAUSE state, queue buf, command contro.
276 */
277 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
278 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
279 count++, (int)ippdrv);
280
281 if (!list_empty(&ippdrv->cmd_list)) {
282 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
283 if (c_node->property.prop_id == prop_id)
284 return ippdrv;
285 }
286 }
287
288 return ERR_PTR(-ENODEV);
289 }
290
291 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
292 struct drm_file *file)
293 {
294 struct drm_exynos_file_private *file_priv = file->driver_priv;
295 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
296 struct device *dev = priv->dev;
297 struct ipp_context *ctx = get_ipp_context(dev);
298 struct drm_exynos_ipp_prop_list *prop_list = data;
299 struct exynos_drm_ippdrv *ippdrv;
300 int count = 0;
301
302 DRM_DEBUG_KMS("%s\n", __func__);
303
304 if (!ctx) {
305 DRM_ERROR("invalid context.\n");
306 return -EINVAL;
307 }
308
309 if (!prop_list) {
310 DRM_ERROR("invalid property parameter.\n");
311 return -EINVAL;
312 }
313
314 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
315
316 if (!prop_list->ipp_id) {
317 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
318 count++;
319 /*
320 * Supports ippdrv list count for user application.
321 * First step user application getting ippdrv count.
322 * and second step getting ippdrv capability using ipp_id.
323 */
324 prop_list->count = count;
325 } else {
326 /*
327 * Getting ippdrv capability by ipp_id.
328 * some deivce not supported wb, output interface.
329 * so, user application detect correct ipp driver
330 * using this ioctl.
331 */
332 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
333 prop_list->ipp_id);
334 if (!ippdrv) {
335 DRM_ERROR("not found ipp%d driver.\n",
336 prop_list->ipp_id);
337 return -EINVAL;
338 }
339
340 prop_list = ippdrv->prop_list;
341 }
342
343 return 0;
344 }
345
346 static void ipp_print_property(struct drm_exynos_ipp_property *property,
347 int idx)
348 {
349 struct drm_exynos_ipp_config *config = &property->config[idx];
350 struct drm_exynos_pos *pos = &config->pos;
351 struct drm_exynos_sz *sz = &config->sz;
352
353 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
354 __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
355
356 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
357 __func__, pos->x, pos->y, pos->w, pos->h,
358 sz->hsize, sz->vsize, config->flip, config->degree);
359 }
360
361 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
362 {
363 struct exynos_drm_ippdrv *ippdrv;
364 struct drm_exynos_ipp_cmd_node *c_node;
365 u32 prop_id = property->prop_id;
366
367 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
368
369 ippdrv = ipp_find_drv_by_handle(prop_id);
370 if (IS_ERR_OR_NULL(ippdrv)) {
371 DRM_ERROR("failed to get ipp driver.\n");
372 return -EINVAL;
373 }
374
375 /*
376 * Find command node using command list in ippdrv.
377 * when we find this command no using prop_id.
378 * return property information set in this command node.
379 */
380 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
381 if ((c_node->property.prop_id == prop_id) &&
382 (c_node->state == IPP_STATE_STOP)) {
383 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
384 __func__, property->cmd, (int)ippdrv);
385
386 c_node->property = *property;
387 return 0;
388 }
389 }
390
391 DRM_ERROR("failed to search property.\n");
392
393 return -EINVAL;
394 }
395
396 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
397 {
398 struct drm_exynos_ipp_cmd_work *cmd_work;
399
400 DRM_DEBUG_KMS("%s\n", __func__);
401
402 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
403 if (!cmd_work) {
404 DRM_ERROR("failed to alloc cmd_work.\n");
405 return ERR_PTR(-ENOMEM);
406 }
407
408 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
409
410 return cmd_work;
411 }
412
413 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
414 {
415 struct drm_exynos_ipp_event_work *event_work;
416
417 DRM_DEBUG_KMS("%s\n", __func__);
418
419 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
420 if (!event_work) {
421 DRM_ERROR("failed to alloc event_work.\n");
422 return ERR_PTR(-ENOMEM);
423 }
424
425 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
426
427 return event_work;
428 }
429
430 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
431 struct drm_file *file)
432 {
433 struct drm_exynos_file_private *file_priv = file->driver_priv;
434 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
435 struct device *dev = priv->dev;
436 struct ipp_context *ctx = get_ipp_context(dev);
437 struct drm_exynos_ipp_property *property = data;
438 struct exynos_drm_ippdrv *ippdrv;
439 struct drm_exynos_ipp_cmd_node *c_node;
440 int ret, i;
441
442 DRM_DEBUG_KMS("%s\n", __func__);
443
444 if (!ctx) {
445 DRM_ERROR("invalid context.\n");
446 return -EINVAL;
447 }
448
449 if (!property) {
450 DRM_ERROR("invalid property parameter.\n");
451 return -EINVAL;
452 }
453
454 /*
455 * This is log print for user application property.
456 * user application set various property.
457 */
458 for_each_ipp_ops(i)
459 ipp_print_property(property, i);
460
461 /*
462 * set property ioctl generated new prop_id.
463 * but in this case already asigned prop_id using old set property.
464 * e.g PAUSE state. this case supports find current prop_id and use it
465 * instead of allocation.
466 */
467 if (property->prop_id) {
468 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
469 return ipp_find_and_set_property(property);
470 }
471
472 /* find ipp driver using ipp id */
473 ippdrv = ipp_find_driver(ctx, property);
474 if (IS_ERR_OR_NULL(ippdrv)) {
475 DRM_ERROR("failed to get ipp driver.\n");
476 return -EINVAL;
477 }
478
479 /* allocate command node */
480 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
481 if (!c_node) {
482 DRM_ERROR("failed to allocate map node.\n");
483 return -ENOMEM;
484 }
485
486 /* create property id */
487 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
488 &property->prop_id);
489 if (ret) {
490 DRM_ERROR("failed to create id.\n");
491 goto err_clear;
492 }
493
494 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
495 __func__, property->prop_id, property->cmd, (int)ippdrv);
496
497 /* stored property information and ippdrv in private data */
498 c_node->priv = priv;
499 c_node->property = *property;
500 c_node->state = IPP_STATE_IDLE;
501
502 c_node->start_work = ipp_create_cmd_work();
503 if (IS_ERR_OR_NULL(c_node->start_work)) {
504 DRM_ERROR("failed to create start work.\n");
505 goto err_clear;
506 }
507
508 c_node->stop_work = ipp_create_cmd_work();
509 if (IS_ERR_OR_NULL(c_node->stop_work)) {
510 DRM_ERROR("failed to create stop work.\n");
511 goto err_free_start;
512 }
513
514 c_node->event_work = ipp_create_event_work();
515 if (IS_ERR_OR_NULL(c_node->event_work)) {
516 DRM_ERROR("failed to create event work.\n");
517 goto err_free_stop;
518 }
519
520 mutex_init(&c_node->cmd_lock);
521 mutex_init(&c_node->mem_lock);
522 mutex_init(&c_node->event_lock);
523
524 init_completion(&c_node->start_complete);
525 init_completion(&c_node->stop_complete);
526
527 for_each_ipp_ops(i)
528 INIT_LIST_HEAD(&c_node->mem_list[i]);
529
530 INIT_LIST_HEAD(&c_node->event_list);
531 list_splice_init(&priv->event_list, &c_node->event_list);
532 list_add_tail(&c_node->list, &ippdrv->cmd_list);
533
534 /* make dedicated state without m2m */
535 if (!ipp_is_m2m_cmd(property->cmd))
536 ippdrv->dedicated = true;
537
538 return 0;
539
540 err_free_stop:
541 kfree(c_node->stop_work);
542 err_free_start:
543 kfree(c_node->start_work);
544 err_clear:
545 kfree(c_node);
546 return ret;
547 }
548
549 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
550 {
551 DRM_DEBUG_KMS("%s\n", __func__);
552
553 /* delete list */
554 list_del(&c_node->list);
555
556 /* destroy mutex */
557 mutex_destroy(&c_node->cmd_lock);
558 mutex_destroy(&c_node->mem_lock);
559 mutex_destroy(&c_node->event_lock);
560
561 /* free command node */
562 kfree(c_node->start_work);
563 kfree(c_node->stop_work);
564 kfree(c_node->event_work);
565 kfree(c_node);
566 }
567
568 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
569 {
570 struct drm_exynos_ipp_property *property = &c_node->property;
571 struct drm_exynos_ipp_mem_node *m_node;
572 struct list_head *head;
573 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
574
575 DRM_DEBUG_KMS("%s\n", __func__);
576
577 mutex_lock(&c_node->mem_lock);
578
579 for_each_ipp_ops(i) {
580 /* source/destination memory list */
581 head = &c_node->mem_list[i];
582
583 if (list_empty(head)) {
584 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
585 i ? "dst" : "src");
586 continue;
587 }
588
589 /* find memory node entry */
590 list_for_each_entry(m_node, head, list) {
591 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
592 i ? "dst" : "src", count[i], (int)m_node);
593 count[i]++;
594 }
595 }
596
597 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
598 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
599 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
600
601 /*
602 * M2M operations should be need paired memory address.
603 * so, need to check minimum count about src, dst.
604 * other case not use paired memory, so use maximum count
605 */
606 if (ipp_is_m2m_cmd(property->cmd))
607 ret = min(count[EXYNOS_DRM_OPS_SRC],
608 count[EXYNOS_DRM_OPS_DST]);
609 else
610 ret = max(count[EXYNOS_DRM_OPS_SRC],
611 count[EXYNOS_DRM_OPS_DST]);
612
613 mutex_unlock(&c_node->mem_lock);
614
615 return ret;
616 }
617
618 static struct drm_exynos_ipp_mem_node
619 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
620 struct drm_exynos_ipp_queue_buf *qbuf)
621 {
622 struct drm_exynos_ipp_mem_node *m_node;
623 struct list_head *head;
624 int count = 0;
625
626 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
627
628 /* source/destination memory list */
629 head = &c_node->mem_list[qbuf->ops_id];
630
631 /* find memory node from memory list */
632 list_for_each_entry(m_node, head, list) {
633 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
634 __func__, count++, (int)m_node);
635
636 /* compare buffer id */
637 if (m_node->buf_id == qbuf->buf_id)
638 return m_node;
639 }
640
641 return NULL;
642 }
643
644 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
645 struct drm_exynos_ipp_cmd_node *c_node,
646 struct drm_exynos_ipp_mem_node *m_node)
647 {
648 struct exynos_drm_ipp_ops *ops = NULL;
649 int ret = 0;
650
651 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
652
653 if (!m_node) {
654 DRM_ERROR("invalid queue node.\n");
655 return -EFAULT;
656 }
657
658 mutex_lock(&c_node->mem_lock);
659
660 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
661
662 /* get operations callback */
663 ops = ippdrv->ops[m_node->ops_id];
664 if (!ops) {
665 DRM_ERROR("not support ops.\n");
666 ret = -EFAULT;
667 goto err_unlock;
668 }
669
670 /* set address and enable irq */
671 if (ops->set_addr) {
672 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
673 m_node->buf_id, IPP_BUF_ENQUEUE);
674 if (ret) {
675 DRM_ERROR("failed to set addr.\n");
676 goto err_unlock;
677 }
678 }
679
680 err_unlock:
681 mutex_unlock(&c_node->mem_lock);
682 return ret;
683 }
684
685 static struct drm_exynos_ipp_mem_node
686 *ipp_get_mem_node(struct drm_device *drm_dev,
687 struct drm_file *file,
688 struct drm_exynos_ipp_cmd_node *c_node,
689 struct drm_exynos_ipp_queue_buf *qbuf)
690 {
691 struct drm_exynos_ipp_mem_node *m_node;
692 struct drm_exynos_ipp_buf_info buf_info;
693 void *addr;
694 int i;
695
696 DRM_DEBUG_KMS("%s\n", __func__);
697
698 mutex_lock(&c_node->mem_lock);
699
700 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
701 if (!m_node) {
702 DRM_ERROR("failed to allocate queue node.\n");
703 goto err_unlock;
704 }
705
706 /* clear base address for error handling */
707 memset(&buf_info, 0x0, sizeof(buf_info));
708
709 /* operations, buffer id */
710 m_node->ops_id = qbuf->ops_id;
711 m_node->prop_id = qbuf->prop_id;
712 m_node->buf_id = qbuf->buf_id;
713
714 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
715 (int)m_node, qbuf->ops_id);
716 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
717 qbuf->prop_id, m_node->buf_id);
718
719 for_each_ipp_planar(i) {
720 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
721 i, qbuf->handle[i]);
722
723 /* get dma address by handle */
724 if (qbuf->handle[i]) {
725 addr = exynos_drm_gem_get_dma_addr(drm_dev,
726 qbuf->handle[i], file);
727 if (IS_ERR(addr)) {
728 DRM_ERROR("failed to get addr.\n");
729 goto err_clear;
730 }
731
732 buf_info.handles[i] = qbuf->handle[i];
733 buf_info.base[i] = *(dma_addr_t *) addr;
734 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
735 __func__, i, buf_info.base[i],
736 (int)buf_info.handles[i]);
737 }
738 }
739
740 m_node->filp = file;
741 m_node->buf_info = buf_info;
742 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
743
744 mutex_unlock(&c_node->mem_lock);
745 return m_node;
746
747 err_clear:
748 kfree(m_node);
749 err_unlock:
750 mutex_unlock(&c_node->mem_lock);
751 return ERR_PTR(-EFAULT);
752 }
753
754 static int ipp_put_mem_node(struct drm_device *drm_dev,
755 struct drm_exynos_ipp_cmd_node *c_node,
756 struct drm_exynos_ipp_mem_node *m_node)
757 {
758 int i;
759
760 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
761
762 if (!m_node) {
763 DRM_ERROR("invalid dequeue node.\n");
764 return -EFAULT;
765 }
766
767 if (list_empty(&m_node->list)) {
768 DRM_ERROR("empty memory node.\n");
769 return -ENOMEM;
770 }
771
772 mutex_lock(&c_node->mem_lock);
773
774 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
775
776 /* put gem buffer */
777 for_each_ipp_planar(i) {
778 unsigned long handle = m_node->buf_info.handles[i];
779 if (handle)
780 exynos_drm_gem_put_dma_addr(drm_dev, handle,
781 m_node->filp);
782 }
783
784 /* delete list in queue */
785 list_del(&m_node->list);
786 kfree(m_node);
787
788 mutex_unlock(&c_node->mem_lock);
789
790 return 0;
791 }
792
793 static void ipp_free_event(struct drm_pending_event *event)
794 {
795 kfree(event);
796 }
797
798 static int ipp_get_event(struct drm_device *drm_dev,
799 struct drm_file *file,
800 struct drm_exynos_ipp_cmd_node *c_node,
801 struct drm_exynos_ipp_queue_buf *qbuf)
802 {
803 struct drm_exynos_ipp_send_event *e;
804 unsigned long flags;
805
806 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
807 qbuf->ops_id, qbuf->buf_id);
808
809 e = kzalloc(sizeof(*e), GFP_KERNEL);
810
811 if (!e) {
812 DRM_ERROR("failed to allocate event.\n");
813 spin_lock_irqsave(&drm_dev->event_lock, flags);
814 file->event_space += sizeof(e->event);
815 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
816 return -ENOMEM;
817 }
818
819 /* make event */
820 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
821 e->event.base.length = sizeof(e->event);
822 e->event.user_data = qbuf->user_data;
823 e->event.prop_id = qbuf->prop_id;
824 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
825 e->base.event = &e->event.base;
826 e->base.file_priv = file;
827 e->base.destroy = ipp_free_event;
828 list_add_tail(&e->base.link, &c_node->event_list);
829
830 return 0;
831 }
832
833 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
834 struct drm_exynos_ipp_queue_buf *qbuf)
835 {
836 struct drm_exynos_ipp_send_event *e, *te;
837 int count = 0;
838
839 DRM_DEBUG_KMS("%s\n", __func__);
840
841 if (list_empty(&c_node->event_list)) {
842 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
843 return;
844 }
845
846 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
847 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
848 __func__, count++, (int)e);
849
850 /*
851 * quf == NULL condition means all event deletion.
852 * stop operations want to delete all event list.
853 * another case delete only same buf id.
854 */
855 if (!qbuf) {
856 /* delete list */
857 list_del(&e->base.link);
858 kfree(e);
859 }
860
861 /* compare buffer id */
862 if (qbuf && (qbuf->buf_id ==
863 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
864 /* delete list */
865 list_del(&e->base.link);
866 kfree(e);
867 return;
868 }
869 }
870 }
871
872 void ipp_handle_cmd_work(struct device *dev,
873 struct exynos_drm_ippdrv *ippdrv,
874 struct drm_exynos_ipp_cmd_work *cmd_work,
875 struct drm_exynos_ipp_cmd_node *c_node)
876 {
877 struct ipp_context *ctx = get_ipp_context(dev);
878
879 cmd_work->ippdrv = ippdrv;
880 cmd_work->c_node = c_node;
881 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
882 }
883
884 static int ipp_queue_buf_with_run(struct device *dev,
885 struct drm_exynos_ipp_cmd_node *c_node,
886 struct drm_exynos_ipp_mem_node *m_node,
887 struct drm_exynos_ipp_queue_buf *qbuf)
888 {
889 struct exynos_drm_ippdrv *ippdrv;
890 struct drm_exynos_ipp_property *property;
891 struct exynos_drm_ipp_ops *ops;
892 int ret;
893
894 DRM_DEBUG_KMS("%s\n", __func__);
895
896 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
897 if (IS_ERR_OR_NULL(ippdrv)) {
898 DRM_ERROR("failed to get ipp driver.\n");
899 return -EFAULT;
900 }
901
902 ops = ippdrv->ops[qbuf->ops_id];
903 if (!ops) {
904 DRM_ERROR("failed to get ops.\n");
905 return -EFAULT;
906 }
907
908 property = &c_node->property;
909
910 if (c_node->state != IPP_STATE_START) {
911 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
912 return 0;
913 }
914
915 if (!ipp_check_mem_list(c_node)) {
916 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
917 return 0;
918 }
919
920 /*
921 * If set destination buffer and enabled clock,
922 * then m2m operations need start operations at queue_buf
923 */
924 if (ipp_is_m2m_cmd(property->cmd)) {
925 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
926
927 cmd_work->ctrl = IPP_CTRL_PLAY;
928 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
929 } else {
930 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
931 if (ret) {
932 DRM_ERROR("failed to set m node.\n");
933 return ret;
934 }
935 }
936
937 return 0;
938 }
939
940 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
941 struct drm_exynos_ipp_cmd_node *c_node,
942 struct drm_exynos_ipp_queue_buf *qbuf)
943 {
944 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
945
946 DRM_DEBUG_KMS("%s\n", __func__);
947
948 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
949 /* delete list */
950 list_for_each_entry_safe(m_node, tm_node,
951 &c_node->mem_list[qbuf->ops_id], list) {
952 if (m_node->buf_id == qbuf->buf_id &&
953 m_node->ops_id == qbuf->ops_id)
954 ipp_put_mem_node(drm_dev, c_node, m_node);
955 }
956 }
957 }
958
959 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
960 struct drm_file *file)
961 {
962 struct drm_exynos_file_private *file_priv = file->driver_priv;
963 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
964 struct device *dev = priv->dev;
965 struct ipp_context *ctx = get_ipp_context(dev);
966 struct drm_exynos_ipp_queue_buf *qbuf = data;
967 struct drm_exynos_ipp_cmd_node *c_node;
968 struct drm_exynos_ipp_mem_node *m_node;
969 int ret;
970
971 DRM_DEBUG_KMS("%s\n", __func__);
972
973 if (!qbuf) {
974 DRM_ERROR("invalid buf parameter.\n");
975 return -EINVAL;
976 }
977
978 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
979 DRM_ERROR("invalid ops parameter.\n");
980 return -EINVAL;
981 }
982
983 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
984 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
985 qbuf->buf_id, qbuf->buf_type);
986
987 /* find command node */
988 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
989 qbuf->prop_id);
990 if (!c_node) {
991 DRM_ERROR("failed to get command node.\n");
992 return -EFAULT;
993 }
994
995 /* buffer control */
996 switch (qbuf->buf_type) {
997 case IPP_BUF_ENQUEUE:
998 /* get memory node */
999 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
1000 if (IS_ERR(m_node)) {
1001 DRM_ERROR("failed to get m_node.\n");
1002 return PTR_ERR(m_node);
1003 }
1004
1005 /*
1006 * first step get event for destination buffer.
1007 * and second step when M2M case run with destination buffer
1008 * if needed.
1009 */
1010 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
1011 /* get event for destination buffer */
1012 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
1013 if (ret) {
1014 DRM_ERROR("failed to get event.\n");
1015 goto err_clean_node;
1016 }
1017
1018 /*
1019 * M2M case run play control for streaming feature.
1020 * other case set address and waiting.
1021 */
1022 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1023 if (ret) {
1024 DRM_ERROR("failed to run command.\n");
1025 goto err_clean_node;
1026 }
1027 }
1028 break;
1029 case IPP_BUF_DEQUEUE:
1030 mutex_lock(&c_node->cmd_lock);
1031
1032 /* put event for destination buffer */
1033 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1034 ipp_put_event(c_node, qbuf);
1035
1036 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1037
1038 mutex_unlock(&c_node->cmd_lock);
1039 break;
1040 default:
1041 DRM_ERROR("invalid buffer control.\n");
1042 return -EINVAL;
1043 }
1044
1045 return 0;
1046
1047 err_clean_node:
1048 DRM_ERROR("clean memory nodes.\n");
1049
1050 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1051 return ret;
1052 }
1053
1054 static bool exynos_drm_ipp_check_valid(struct device *dev,
1055 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1056 {
1057 DRM_DEBUG_KMS("%s\n", __func__);
1058
1059 if (ctrl != IPP_CTRL_PLAY) {
1060 if (pm_runtime_suspended(dev)) {
1061 DRM_ERROR("pm:runtime_suspended.\n");
1062 goto err_status;
1063 }
1064 }
1065
1066 switch (ctrl) {
1067 case IPP_CTRL_PLAY:
1068 if (state != IPP_STATE_IDLE)
1069 goto err_status;
1070 break;
1071 case IPP_CTRL_STOP:
1072 if (state == IPP_STATE_STOP)
1073 goto err_status;
1074 break;
1075 case IPP_CTRL_PAUSE:
1076 if (state != IPP_STATE_START)
1077 goto err_status;
1078 break;
1079 case IPP_CTRL_RESUME:
1080 if (state != IPP_STATE_STOP)
1081 goto err_status;
1082 break;
1083 default:
1084 DRM_ERROR("invalid state.\n");
1085 goto err_status;
1086 break;
1087 }
1088
1089 return true;
1090
1091 err_status:
1092 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1093 return false;
1094 }
1095
1096 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1097 struct drm_file *file)
1098 {
1099 struct drm_exynos_file_private *file_priv = file->driver_priv;
1100 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1101 struct exynos_drm_ippdrv *ippdrv = NULL;
1102 struct device *dev = priv->dev;
1103 struct ipp_context *ctx = get_ipp_context(dev);
1104 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1105 struct drm_exynos_ipp_cmd_work *cmd_work;
1106 struct drm_exynos_ipp_cmd_node *c_node;
1107
1108 DRM_DEBUG_KMS("%s\n", __func__);
1109
1110 if (!ctx) {
1111 DRM_ERROR("invalid context.\n");
1112 return -EINVAL;
1113 }
1114
1115 if (!cmd_ctrl) {
1116 DRM_ERROR("invalid control parameter.\n");
1117 return -EINVAL;
1118 }
1119
1120 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1121 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1122
1123 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1124 if (IS_ERR(ippdrv)) {
1125 DRM_ERROR("failed to get ipp driver.\n");
1126 return PTR_ERR(ippdrv);
1127 }
1128
1129 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1130 cmd_ctrl->prop_id);
1131 if (!c_node) {
1132 DRM_ERROR("invalid command node list.\n");
1133 return -EINVAL;
1134 }
1135
1136 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1137 c_node->state)) {
1138 DRM_ERROR("invalid state.\n");
1139 return -EINVAL;
1140 }
1141
1142 switch (cmd_ctrl->ctrl) {
1143 case IPP_CTRL_PLAY:
1144 if (pm_runtime_suspended(ippdrv->dev))
1145 pm_runtime_get_sync(ippdrv->dev);
1146 c_node->state = IPP_STATE_START;
1147
1148 cmd_work = c_node->start_work;
1149 cmd_work->ctrl = cmd_ctrl->ctrl;
1150 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1151 c_node->state = IPP_STATE_START;
1152 break;
1153 case IPP_CTRL_STOP:
1154 cmd_work = c_node->stop_work;
1155 cmd_work->ctrl = cmd_ctrl->ctrl;
1156 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1157
1158 if (!wait_for_completion_timeout(&c_node->stop_complete,
1159 msecs_to_jiffies(300))) {
1160 DRM_ERROR("timeout stop:prop_id[%d]\n",
1161 c_node->property.prop_id);
1162 }
1163
1164 c_node->state = IPP_STATE_STOP;
1165 ippdrv->dedicated = false;
1166 ipp_clean_cmd_node(c_node);
1167
1168 if (list_empty(&ippdrv->cmd_list))
1169 pm_runtime_put_sync(ippdrv->dev);
1170 break;
1171 case IPP_CTRL_PAUSE:
1172 cmd_work = c_node->stop_work;
1173 cmd_work->ctrl = cmd_ctrl->ctrl;
1174 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1175
1176 if (!wait_for_completion_timeout(&c_node->stop_complete,
1177 msecs_to_jiffies(200))) {
1178 DRM_ERROR("timeout stop:prop_id[%d]\n",
1179 c_node->property.prop_id);
1180 }
1181
1182 c_node->state = IPP_STATE_STOP;
1183 break;
1184 case IPP_CTRL_RESUME:
1185 c_node->state = IPP_STATE_START;
1186 cmd_work = c_node->start_work;
1187 cmd_work->ctrl = cmd_ctrl->ctrl;
1188 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1189 break;
1190 default:
1191 DRM_ERROR("could not support this state currently.\n");
1192 return -EINVAL;
1193 }
1194
1195 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1196 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1197
1198 return 0;
1199 }
1200
1201 int exynos_drm_ippnb_register(struct notifier_block *nb)
1202 {
1203 return blocking_notifier_chain_register(
1204 &exynos_drm_ippnb_list, nb);
1205 }
1206
1207 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1208 {
1209 return blocking_notifier_chain_unregister(
1210 &exynos_drm_ippnb_list, nb);
1211 }
1212
1213 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1214 {
1215 return blocking_notifier_call_chain(
1216 &exynos_drm_ippnb_list, val, v);
1217 }
1218
1219 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1220 struct drm_exynos_ipp_property *property)
1221 {
1222 struct exynos_drm_ipp_ops *ops = NULL;
1223 bool swap = false;
1224 int ret, i;
1225
1226 if (!property) {
1227 DRM_ERROR("invalid property parameter.\n");
1228 return -EINVAL;
1229 }
1230
1231 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1232
1233 /* reset h/w block */
1234 if (ippdrv->reset &&
1235 ippdrv->reset(ippdrv->dev)) {
1236 DRM_ERROR("failed to reset.\n");
1237 return -EINVAL;
1238 }
1239
1240 /* set source,destination operations */
1241 for_each_ipp_ops(i) {
1242 struct drm_exynos_ipp_config *config =
1243 &property->config[i];
1244
1245 ops = ippdrv->ops[i];
1246 if (!ops || !config) {
1247 DRM_ERROR("not support ops and config.\n");
1248 return -EINVAL;
1249 }
1250
1251 /* set format */
1252 if (ops->set_fmt) {
1253 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1254 if (ret) {
1255 DRM_ERROR("not support format.\n");
1256 return ret;
1257 }
1258 }
1259
1260 /* set transform for rotation, flip */
1261 if (ops->set_transf) {
1262 ret = ops->set_transf(ippdrv->dev, config->degree,
1263 config->flip, &swap);
1264 if (ret) {
1265 DRM_ERROR("not support tranf.\n");
1266 return -EINVAL;
1267 }
1268 }
1269
1270 /* set size */
1271 if (ops->set_size) {
1272 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1273 &config->sz);
1274 if (ret) {
1275 DRM_ERROR("not support size.\n");
1276 return ret;
1277 }
1278 }
1279 }
1280
1281 return 0;
1282 }
1283
1284 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1285 struct drm_exynos_ipp_cmd_node *c_node)
1286 {
1287 struct drm_exynos_ipp_mem_node *m_node;
1288 struct drm_exynos_ipp_property *property = &c_node->property;
1289 struct list_head *head;
1290 int ret, i;
1291
1292 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1293
1294 /* store command info in ippdrv */
1295 ippdrv->cmd = c_node;
1296
1297 if (!ipp_check_mem_list(c_node)) {
1298 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1299 return -ENOMEM;
1300 }
1301
1302 /* set current property in ippdrv */
1303 ret = ipp_set_property(ippdrv, property);
1304 if (ret) {
1305 DRM_ERROR("failed to set property.\n");
1306 ippdrv->cmd = NULL;
1307 return ret;
1308 }
1309
1310 /* check command */
1311 switch (property->cmd) {
1312 case IPP_CMD_M2M:
1313 for_each_ipp_ops(i) {
1314 /* source/destination memory list */
1315 head = &c_node->mem_list[i];
1316
1317 m_node = list_first_entry(head,
1318 struct drm_exynos_ipp_mem_node, list);
1319 if (!m_node) {
1320 DRM_ERROR("failed to get node.\n");
1321 ret = -EFAULT;
1322 return ret;
1323 }
1324
1325 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1326 __func__, (int)m_node);
1327
1328 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1329 if (ret) {
1330 DRM_ERROR("failed to set m node.\n");
1331 return ret;
1332 }
1333 }
1334 break;
1335 case IPP_CMD_WB:
1336 /* destination memory list */
1337 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1338
1339 list_for_each_entry(m_node, head, list) {
1340 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1341 if (ret) {
1342 DRM_ERROR("failed to set m node.\n");
1343 return ret;
1344 }
1345 }
1346 break;
1347 case IPP_CMD_OUTPUT:
1348 /* source memory list */
1349 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1350
1351 list_for_each_entry(m_node, head, list) {
1352 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1353 if (ret) {
1354 DRM_ERROR("failed to set m node.\n");
1355 return ret;
1356 }
1357 }
1358 break;
1359 default:
1360 DRM_ERROR("invalid operations.\n");
1361 return -EINVAL;
1362 }
1363
1364 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1365
1366 /* start operations */
1367 if (ippdrv->start) {
1368 ret = ippdrv->start(ippdrv->dev, property->cmd);
1369 if (ret) {
1370 DRM_ERROR("failed to start ops.\n");
1371 return ret;
1372 }
1373 }
1374
1375 return 0;
1376 }
1377
1378 static int ipp_stop_property(struct drm_device *drm_dev,
1379 struct exynos_drm_ippdrv *ippdrv,
1380 struct drm_exynos_ipp_cmd_node *c_node)
1381 {
1382 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1383 struct drm_exynos_ipp_property *property = &c_node->property;
1384 struct list_head *head;
1385 int ret = 0, i;
1386
1387 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1388
1389 /* put event */
1390 ipp_put_event(c_node, NULL);
1391
1392 /* check command */
1393 switch (property->cmd) {
1394 case IPP_CMD_M2M:
1395 for_each_ipp_ops(i) {
1396 /* source/destination memory list */
1397 head = &c_node->mem_list[i];
1398
1399 if (list_empty(head)) {
1400 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1401 __func__);
1402 break;
1403 }
1404
1405 list_for_each_entry_safe(m_node, tm_node,
1406 head, list) {
1407 ret = ipp_put_mem_node(drm_dev, c_node,
1408 m_node);
1409 if (ret) {
1410 DRM_ERROR("failed to put m_node.\n");
1411 goto err_clear;
1412 }
1413 }
1414 }
1415 break;
1416 case IPP_CMD_WB:
1417 /* destination memory list */
1418 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1419
1420 if (list_empty(head)) {
1421 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1422 break;
1423 }
1424
1425 list_for_each_entry_safe(m_node, tm_node, head, list) {
1426 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1427 if (ret) {
1428 DRM_ERROR("failed to put m_node.\n");
1429 goto err_clear;
1430 }
1431 }
1432 break;
1433 case IPP_CMD_OUTPUT:
1434 /* source memory list */
1435 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1436
1437 if (list_empty(head)) {
1438 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1439 break;
1440 }
1441
1442 list_for_each_entry_safe(m_node, tm_node, head, list) {
1443 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1444 if (ret) {
1445 DRM_ERROR("failed to put m_node.\n");
1446 goto err_clear;
1447 }
1448 }
1449 break;
1450 default:
1451 DRM_ERROR("invalid operations.\n");
1452 ret = -EINVAL;
1453 goto err_clear;
1454 }
1455
1456 err_clear:
1457 /* stop operations */
1458 if (ippdrv->stop)
1459 ippdrv->stop(ippdrv->dev, property->cmd);
1460
1461 return ret;
1462 }
1463
1464 void ipp_sched_cmd(struct work_struct *work)
1465 {
1466 struct drm_exynos_ipp_cmd_work *cmd_work =
1467 (struct drm_exynos_ipp_cmd_work *)work;
1468 struct exynos_drm_ippdrv *ippdrv;
1469 struct drm_exynos_ipp_cmd_node *c_node;
1470 struct drm_exynos_ipp_property *property;
1471 int ret;
1472
1473 DRM_DEBUG_KMS("%s\n", __func__);
1474
1475 ippdrv = cmd_work->ippdrv;
1476 if (!ippdrv) {
1477 DRM_ERROR("invalid ippdrv list.\n");
1478 return;
1479 }
1480
1481 c_node = cmd_work->c_node;
1482 if (!c_node) {
1483 DRM_ERROR("invalid command node list.\n");
1484 return;
1485 }
1486
1487 mutex_lock(&c_node->cmd_lock);
1488
1489 property = &c_node->property;
1490 if (!property) {
1491 DRM_ERROR("failed to get property:prop_id[%d]\n",
1492 c_node->property.prop_id);
1493 goto err_unlock;
1494 }
1495
1496 switch (cmd_work->ctrl) {
1497 case IPP_CTRL_PLAY:
1498 case IPP_CTRL_RESUME:
1499 ret = ipp_start_property(ippdrv, c_node);
1500 if (ret) {
1501 DRM_ERROR("failed to start property:prop_id[%d]\n",
1502 c_node->property.prop_id);
1503 goto err_unlock;
1504 }
1505
1506 /*
1507 * M2M case supports wait_completion of transfer.
1508 * because M2M case supports single unit operation
1509 * with multiple queue.
1510 * M2M need to wait completion of data transfer.
1511 */
1512 if (ipp_is_m2m_cmd(property->cmd)) {
1513 if (!wait_for_completion_timeout
1514 (&c_node->start_complete, msecs_to_jiffies(200))) {
1515 DRM_ERROR("timeout event:prop_id[%d]\n",
1516 c_node->property.prop_id);
1517 goto err_unlock;
1518 }
1519 }
1520 break;
1521 case IPP_CTRL_STOP:
1522 case IPP_CTRL_PAUSE:
1523 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1524 c_node);
1525 if (ret) {
1526 DRM_ERROR("failed to stop property.\n");
1527 goto err_unlock;
1528 }
1529
1530 complete(&c_node->stop_complete);
1531 break;
1532 default:
1533 DRM_ERROR("unknown control type\n");
1534 break;
1535 }
1536
1537 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1538
1539 err_unlock:
1540 mutex_unlock(&c_node->cmd_lock);
1541 }
1542
1543 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1544 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1545 {
1546 struct drm_device *drm_dev = ippdrv->drm_dev;
1547 struct drm_exynos_ipp_property *property = &c_node->property;
1548 struct drm_exynos_ipp_mem_node *m_node;
1549 struct drm_exynos_ipp_queue_buf qbuf;
1550 struct drm_exynos_ipp_send_event *e;
1551 struct list_head *head;
1552 struct timeval now;
1553 unsigned long flags;
1554 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1555 int ret, i;
1556
1557 for_each_ipp_ops(i)
1558 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1559 i ? "dst" : "src", buf_id[i]);
1560
1561 if (!drm_dev) {
1562 DRM_ERROR("failed to get drm_dev.\n");
1563 return -EINVAL;
1564 }
1565
1566 if (!property) {
1567 DRM_ERROR("failed to get property.\n");
1568 return -EINVAL;
1569 }
1570
1571 if (list_empty(&c_node->event_list)) {
1572 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1573 return 0;
1574 }
1575
1576 if (!ipp_check_mem_list(c_node)) {
1577 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1578 return 0;
1579 }
1580
1581 /* check command */
1582 switch (property->cmd) {
1583 case IPP_CMD_M2M:
1584 for_each_ipp_ops(i) {
1585 /* source/destination memory list */
1586 head = &c_node->mem_list[i];
1587
1588 m_node = list_first_entry(head,
1589 struct drm_exynos_ipp_mem_node, list);
1590 if (!m_node) {
1591 DRM_ERROR("empty memory node.\n");
1592 return -ENOMEM;
1593 }
1594
1595 tbuf_id[i] = m_node->buf_id;
1596 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1597 i ? "dst" : "src", tbuf_id[i]);
1598
1599 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1600 if (ret)
1601 DRM_ERROR("failed to put m_node.\n");
1602 }
1603 break;
1604 case IPP_CMD_WB:
1605 /* clear buf for finding */
1606 memset(&qbuf, 0x0, sizeof(qbuf));
1607 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1608 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1609
1610 /* get memory node entry */
1611 m_node = ipp_find_mem_node(c_node, &qbuf);
1612 if (!m_node) {
1613 DRM_ERROR("empty memory node.\n");
1614 return -ENOMEM;
1615 }
1616
1617 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1618
1619 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1620 if (ret)
1621 DRM_ERROR("failed to put m_node.\n");
1622 break;
1623 case IPP_CMD_OUTPUT:
1624 /* source memory list */
1625 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1626
1627 m_node = list_first_entry(head,
1628 struct drm_exynos_ipp_mem_node, list);
1629 if (!m_node) {
1630 DRM_ERROR("empty memory node.\n");
1631 return -ENOMEM;
1632 }
1633
1634 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1635
1636 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1637 if (ret)
1638 DRM_ERROR("failed to put m_node.\n");
1639 break;
1640 default:
1641 DRM_ERROR("invalid operations.\n");
1642 return -EINVAL;
1643 }
1644
1645 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1646 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1647 tbuf_id[1], buf_id[1], property->prop_id);
1648
1649 /*
1650 * command node have event list of destination buffer
1651 * If destination buffer enqueue to mem list,
1652 * then we make event and link to event list tail.
1653 * so, we get first event for first enqueued buffer.
1654 */
1655 e = list_first_entry(&c_node->event_list,
1656 struct drm_exynos_ipp_send_event, base.link);
1657
1658 if (!e) {
1659 DRM_ERROR("empty event.\n");
1660 return -EINVAL;
1661 }
1662
1663 do_gettimeofday(&now);
1664 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1665 , __func__, now.tv_sec, now.tv_usec);
1666 e->event.tv_sec = now.tv_sec;
1667 e->event.tv_usec = now.tv_usec;
1668 e->event.prop_id = property->prop_id;
1669
1670 /* set buffer id about source destination */
1671 for_each_ipp_ops(i)
1672 e->event.buf_id[i] = tbuf_id[i];
1673
1674 spin_lock_irqsave(&drm_dev->event_lock, flags);
1675 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1676 wake_up_interruptible(&e->base.file_priv->event_wait);
1677 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1678
1679 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1680 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1681
1682 return 0;
1683 }
1684
1685 void ipp_sched_event(struct work_struct *work)
1686 {
1687 struct drm_exynos_ipp_event_work *event_work =
1688 (struct drm_exynos_ipp_event_work *)work;
1689 struct exynos_drm_ippdrv *ippdrv;
1690 struct drm_exynos_ipp_cmd_node *c_node;
1691 int ret;
1692
1693 if (!event_work) {
1694 DRM_ERROR("failed to get event_work.\n");
1695 return;
1696 }
1697
1698 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1699 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1700
1701 ippdrv = event_work->ippdrv;
1702 if (!ippdrv) {
1703 DRM_ERROR("failed to get ipp driver.\n");
1704 return;
1705 }
1706
1707 c_node = ippdrv->cmd;
1708 if (!c_node) {
1709 DRM_ERROR("failed to get command node.\n");
1710 return;
1711 }
1712
1713 /*
1714 * IPP supports command thread, event thread synchronization.
1715 * If IPP close immediately from user land, then IPP make
1716 * synchronization with command thread, so make complete event.
1717 * or going out operations.
1718 */
1719 if (c_node->state != IPP_STATE_START) {
1720 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1721 __func__, c_node->state, c_node->property.prop_id);
1722 goto err_completion;
1723 }
1724
1725 mutex_lock(&c_node->event_lock);
1726
1727 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1728 if (ret) {
1729 DRM_ERROR("failed to send event.\n");
1730 goto err_completion;
1731 }
1732
1733 err_completion:
1734 if (ipp_is_m2m_cmd(c_node->property.cmd))
1735 complete(&c_node->start_complete);
1736
1737 mutex_unlock(&c_node->event_lock);
1738 }
1739
1740 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1741 {
1742 struct ipp_context *ctx = get_ipp_context(dev);
1743 struct exynos_drm_ippdrv *ippdrv;
1744 int ret, count = 0;
1745
1746 DRM_DEBUG_KMS("%s\n", __func__);
1747
1748 /* get ipp driver entry */
1749 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1750 ippdrv->drm_dev = drm_dev;
1751
1752 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1753 &ippdrv->ipp_id);
1754 if (ret) {
1755 DRM_ERROR("failed to create id.\n");
1756 goto err_idr;
1757 }
1758
1759 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1760 count++, (int)ippdrv, ippdrv->ipp_id);
1761
1762 if (ippdrv->ipp_id == 0) {
1763 DRM_ERROR("failed to get ipp_id[%d]\n",
1764 ippdrv->ipp_id);
1765 goto err_idr;
1766 }
1767
1768 /* store parent device for node */
1769 ippdrv->parent_dev = dev;
1770
1771 /* store event work queue and handler */
1772 ippdrv->event_workq = ctx->event_workq;
1773 ippdrv->sched_event = ipp_sched_event;
1774 INIT_LIST_HEAD(&ippdrv->cmd_list);
1775
1776 if (is_drm_iommu_supported(drm_dev)) {
1777 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1778 if (ret) {
1779 DRM_ERROR("failed to activate iommu\n");
1780 goto err_iommu;
1781 }
1782 }
1783 }
1784
1785 return 0;
1786
1787 err_iommu:
1788 /* get ipp driver entry */
1789 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1790 if (is_drm_iommu_supported(drm_dev))
1791 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1792
1793 err_idr:
1794 idr_remove_all(&ctx->ipp_idr);
1795 idr_remove_all(&ctx->prop_idr);
1796 idr_destroy(&ctx->ipp_idr);
1797 idr_destroy(&ctx->prop_idr);
1798 return ret;
1799 }
1800
1801 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1802 {
1803 struct exynos_drm_ippdrv *ippdrv;
1804
1805 DRM_DEBUG_KMS("%s\n", __func__);
1806
1807 /* get ipp driver entry */
1808 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1809 if (is_drm_iommu_supported(drm_dev))
1810 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1811
1812 ippdrv->drm_dev = NULL;
1813 exynos_drm_ippdrv_unregister(ippdrv);
1814 }
1815 }
1816
1817 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1818 struct drm_file *file)
1819 {
1820 struct drm_exynos_file_private *file_priv = file->driver_priv;
1821 struct exynos_drm_ipp_private *priv;
1822
1823 DRM_DEBUG_KMS("%s\n", __func__);
1824
1825 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1826 if (!priv) {
1827 DRM_ERROR("failed to allocate priv.\n");
1828 return -ENOMEM;
1829 }
1830 priv->dev = dev;
1831 file_priv->ipp_priv = priv;
1832
1833 INIT_LIST_HEAD(&priv->event_list);
1834
1835 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1836
1837 return 0;
1838 }
1839
1840 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1841 struct drm_file *file)
1842 {
1843 struct drm_exynos_file_private *file_priv = file->driver_priv;
1844 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1845 struct exynos_drm_ippdrv *ippdrv = NULL;
1846 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1847 int count = 0;
1848
1849 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1850
1851 if (list_empty(&exynos_drm_ippdrv_list)) {
1852 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1853 goto err_clear;
1854 }
1855
1856 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1857 if (list_empty(&ippdrv->cmd_list))
1858 continue;
1859
1860 list_for_each_entry_safe(c_node, tc_node,
1861 &ippdrv->cmd_list, list) {
1862 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1863 __func__, count++, (int)ippdrv);
1864
1865 if (c_node->priv == priv) {
1866 /*
1867 * userland goto unnormal state. process killed.
1868 * and close the file.
1869 * so, IPP didn't called stop cmd ctrl.
1870 * so, we are make stop operation in this state.
1871 */
1872 if (c_node->state == IPP_STATE_START) {
1873 ipp_stop_property(drm_dev, ippdrv,
1874 c_node);
1875 c_node->state = IPP_STATE_STOP;
1876 }
1877
1878 ippdrv->dedicated = false;
1879 ipp_clean_cmd_node(c_node);
1880 if (list_empty(&ippdrv->cmd_list))
1881 pm_runtime_put_sync(ippdrv->dev);
1882 }
1883 }
1884 }
1885
1886 err_clear:
1887 kfree(priv);
1888 return;
1889 }
1890
1891 static int __devinit ipp_probe(struct platform_device *pdev)
1892 {
1893 struct device *dev = &pdev->dev;
1894 struct ipp_context *ctx;
1895 struct exynos_drm_subdrv *subdrv;
1896 int ret;
1897
1898 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1899 if (!ctx)
1900 return -ENOMEM;
1901
1902 DRM_DEBUG_KMS("%s\n", __func__);
1903
1904 mutex_init(&ctx->ipp_lock);
1905 mutex_init(&ctx->prop_lock);
1906
1907 idr_init(&ctx->ipp_idr);
1908 idr_init(&ctx->prop_idr);
1909
1910 /*
1911 * create single thread for ipp event
1912 * IPP supports event thread for IPP drivers.
1913 * IPP driver send event_work to this thread.
1914 * and IPP event thread send event to user process.
1915 */
1916 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1917 if (!ctx->event_workq) {
1918 dev_err(dev, "failed to create event workqueue\n");
1919 ret = -EINVAL;
1920 goto err_clear;
1921 }
1922
1923 /*
1924 * create single thread for ipp command
1925 * IPP supports command thread for user process.
1926 * user process make command node using set property ioctl.
1927 * and make start_work and send this work to command thread.
1928 * and then this command thread start property.
1929 */
1930 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1931 if (!ctx->cmd_workq) {
1932 dev_err(dev, "failed to create cmd workqueue\n");
1933 ret = -EINVAL;
1934 goto err_event_workq;
1935 }
1936
1937 /* set sub driver informations */
1938 subdrv = &ctx->subdrv;
1939 subdrv->dev = dev;
1940 subdrv->probe = ipp_subdrv_probe;
1941 subdrv->remove = ipp_subdrv_remove;
1942 subdrv->open = ipp_subdrv_open;
1943 subdrv->close = ipp_subdrv_close;
1944
1945 platform_set_drvdata(pdev, ctx);
1946
1947 ret = exynos_drm_subdrv_register(subdrv);
1948 if (ret < 0) {
1949 DRM_ERROR("failed to register drm ipp device.\n");
1950 goto err_cmd_workq;
1951 }
1952
1953 dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1954
1955 return 0;
1956
1957 err_cmd_workq:
1958 destroy_workqueue(ctx->cmd_workq);
1959 err_event_workq:
1960 destroy_workqueue(ctx->event_workq);
1961 err_clear:
1962 kfree(ctx);
1963 return ret;
1964 }
1965
1966 static int __devexit ipp_remove(struct platform_device *pdev)
1967 {
1968 struct ipp_context *ctx = platform_get_drvdata(pdev);
1969
1970 DRM_DEBUG_KMS("%s\n", __func__);
1971
1972 /* unregister sub driver */
1973 exynos_drm_subdrv_unregister(&ctx->subdrv);
1974
1975 /* remove,destroy ipp idr */
1976 idr_remove_all(&ctx->ipp_idr);
1977 idr_remove_all(&ctx->prop_idr);
1978 idr_destroy(&ctx->ipp_idr);
1979 idr_destroy(&ctx->prop_idr);
1980
1981 mutex_destroy(&ctx->ipp_lock);
1982 mutex_destroy(&ctx->prop_lock);
1983
1984 /* destroy command, event work queue */
1985 destroy_workqueue(ctx->cmd_workq);
1986 destroy_workqueue(ctx->event_workq);
1987
1988 kfree(ctx);
1989
1990 return 0;
1991 }
1992
1993 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1994 {
1995 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1996
1997 return 0;
1998 }
1999
2000 #ifdef CONFIG_PM_SLEEP
2001 static int ipp_suspend(struct device *dev)
2002 {
2003 struct ipp_context *ctx = get_ipp_context(dev);
2004
2005 DRM_DEBUG_KMS("%s\n", __func__);
2006
2007 if (pm_runtime_suspended(dev))
2008 return 0;
2009
2010 return ipp_power_ctrl(ctx, false);
2011 }
2012
2013 static int ipp_resume(struct device *dev)
2014 {
2015 struct ipp_context *ctx = get_ipp_context(dev);
2016
2017 DRM_DEBUG_KMS("%s\n", __func__);
2018
2019 if (!pm_runtime_suspended(dev))
2020 return ipp_power_ctrl(ctx, true);
2021
2022 return 0;
2023 }
2024 #endif
2025
2026 #ifdef CONFIG_PM_RUNTIME
2027 static int ipp_runtime_suspend(struct device *dev)
2028 {
2029 struct ipp_context *ctx = get_ipp_context(dev);
2030
2031 DRM_DEBUG_KMS("%s\n", __func__);
2032
2033 return ipp_power_ctrl(ctx, false);
2034 }
2035
2036 static int ipp_runtime_resume(struct device *dev)
2037 {
2038 struct ipp_context *ctx = get_ipp_context(dev);
2039
2040 DRM_DEBUG_KMS("%s\n", __func__);
2041
2042 return ipp_power_ctrl(ctx, true);
2043 }
2044 #endif
2045
2046 static const struct dev_pm_ops ipp_pm_ops = {
2047 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
2048 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
2049 };
2050
2051 struct platform_driver ipp_driver = {
2052 .probe = ipp_probe,
2053 .remove = __devexit_p(ipp_remove),
2054 .driver = {
2055 .name = "exynos-drm-ipp",
2056 .owner = THIS_MODULE,
2057 .pm = &ipp_pm_ops,
2058 },
2059 };
2060