3 * (C) COPYRIGHT 2012-2013, 2017 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <linux/version.h>
19 #include <asm/uaccess.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
23 #include <linux/cdev.h>
24 #include <linux/device.h>
25 #include <linux/slab.h>
26 #include <linux/atomic.h>
27 #include <linux/reservation.h>
28 #include <linux/dma-buf.h>
29 #include <linux/wait.h>
30 #include <linux/sched.h>
31 #include <linux/poll.h>
32 #include <linux/anon_inodes.h>
33 #include <linux/file.h>
35 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
37 #include <linux/fence.h>
39 #define dma_fence_context_alloc(a) fence_context_alloc(a)
40 #define dma_fence_init(a, b, c, d, e) fence_init(a, b, c, d, e)
41 #define dma_fence_get(a) fence_get(a)
42 #define dma_fence_put(a) fence_put(a)
43 #define dma_fence_signal(a) fence_signal(a)
44 #define dma_fence_is_signaled(a) fence_is_signaled(a)
45 #define dma_fence_add_callback(a, b, c) fence_add_callback(a, b, c)
46 #define dma_fence_remove_callback(a, b) fence_remove_callback(a, b)
47 #define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->status ?: 1 : 0)
51 #include <linux/dma-fence.h>
53 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
54 #define dma_fence_get_status(a) (dma_fence_is_signaled(a) ? \
61 #include "dma_buf_lock.h"
63 /* Maximum number of buffers that a single handle can address */
64 #define DMA_BUF_LOCK_BUF_MAX 32
66 #define DMA_BUF_LOCK_DEBUG 1
68 #define DMA_BUF_LOCK_INIT_BIAS 0xFF
70 static dev_t dma_buf_lock_dev
;
71 static struct cdev dma_buf_lock_cdev
;
72 static struct class *dma_buf_lock_class
;
73 static char dma_buf_lock_dev_name
[] = "dma_buf_lock";
75 #ifdef HAVE_UNLOCKED_IOCTL
76 static long dma_buf_lock_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
);
78 static int dma_buf_lock_ioctl(struct inode
*inode
, struct file
*filp
, unsigned int cmd
, unsigned long arg
);
81 static struct file_operations dma_buf_lock_fops
=
84 #ifdef HAVE_UNLOCKED_IOCTL
85 .unlocked_ioctl
= dma_buf_lock_ioctl
,
87 .ioctl
= dma_buf_lock_ioctl
,
89 .compat_ioctl
= dma_buf_lock_ioctl
,
92 typedef struct dma_buf_lock_resource
94 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
97 struct dma_fence fence
;
99 int *list_of_dma_buf_fds
; /* List of buffers copied from userspace */
100 atomic_t locked
; /* Status of lock */
101 struct dma_buf
**dma_bufs
;
102 unsigned long exclusive
; /* Exclusive access bitmap */
103 atomic_t fence_dep_count
; /* Number of dma-fence dependencies */
104 struct list_head dma_fence_callbacks
; /* list of all callbacks set up to wait on other fences */
105 wait_queue_head_t wait
;
106 struct kref refcount
;
107 struct list_head link
;
109 } dma_buf_lock_resource
;
112 * struct dma_buf_lock_fence_cb - Callback data struct for dma-fence
113 * @fence_cb: Callback function
114 * @fence: Pointer to the fence object on which this callback is waiting
115 * @res: Pointer to dma_buf_lock_resource that is waiting on this callback
116 * @node: List head for linking this callback to the lock resource
118 struct dma_buf_lock_fence_cb
{
119 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
120 struct fence_cb fence_cb
;
123 struct dma_fence_cb fence_cb
;
124 struct dma_fence
*fence
;
126 struct dma_buf_lock_resource
*res
;
127 struct list_head node
;
130 static LIST_HEAD(dma_buf_lock_resource_list
);
131 static DEFINE_MUTEX(dma_buf_lock_mutex
);
133 static inline int is_dma_buf_lock_file(struct file
*);
134 static void dma_buf_lock_dounlock(struct kref
*ref
);
137 /*** dma_buf_lock fence part ***/
139 /* Spin lock protecting all Mali fences as fence->lock. */
140 static DEFINE_SPINLOCK(dma_buf_lock_fence_lock
);
143 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
144 dma_buf_lock_fence_get_driver_name(struct fence
*fence
)
146 dma_buf_lock_fence_get_driver_name(struct dma_fence
*fence
)
149 return "dma_buf_lock";
153 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
154 dma_buf_lock_fence_get_timeline_name(struct fence
*fence
)
156 dma_buf_lock_fence_get_timeline_name(struct dma_fence
*fence
)
159 return "dma_buf_lock.timeline";
163 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
164 dma_buf_lock_fence_enable_signaling(struct fence
*fence
)
166 dma_buf_lock_fence_enable_signaling(struct dma_fence
*fence
)
172 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
173 const struct fence_ops dma_buf_lock_fence_ops
= {
174 .wait
= fence_default_wait
,
176 const struct dma_fence_ops dma_buf_lock_fence_ops
= {
177 .wait
= dma_fence_default_wait
,
179 .get_driver_name
= dma_buf_lock_fence_get_driver_name
,
180 .get_timeline_name
= dma_buf_lock_fence_get_timeline_name
,
181 .enable_signaling
= dma_buf_lock_fence_enable_signaling
,
185 dma_buf_lock_fence_init(dma_buf_lock_resource
*resource
)
187 dma_fence_init(&resource
->fence
,
188 &dma_buf_lock_fence_ops
,
189 &dma_buf_lock_fence_lock
,
195 dma_buf_lock_fence_free_callbacks(dma_buf_lock_resource
*resource
)
197 struct dma_buf_lock_fence_cb
*cb
, *tmp
;
199 /* Clean up and free callbacks. */
200 list_for_each_entry_safe(cb
, tmp
, &resource
->dma_fence_callbacks
, node
) {
201 /* Cancel callbacks that hasn't been called yet and release the
202 * reference taken in dma_buf_lock_fence_add_callback().
204 dma_fence_remove_callback(cb
->fence
, &cb
->fence_cb
);
205 dma_fence_put(cb
->fence
);
212 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
213 dma_buf_lock_fence_callback(struct fence
*fence
, struct fence_cb
*cb
)
215 dma_buf_lock_fence_callback(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
218 struct dma_buf_lock_fence_cb
*dma_buf_lock_cb
= container_of(cb
,
219 struct dma_buf_lock_fence_cb
,
221 dma_buf_lock_resource
*resource
= dma_buf_lock_cb
->res
;
223 #if DMA_BUF_LOCK_DEBUG
224 printk(KERN_DEBUG
"dma_buf_lock_fence_callback\n");
227 /* Callback function will be invoked in atomic context. */
229 if (atomic_dec_and_test(&resource
->fence_dep_count
)) {
230 atomic_set(&resource
->locked
, 1);
231 wake_up(&resource
->wait
);
232 /* A work item can be queued at this point to invoke
233 * dma_buf_lock_fence_free_callbacks.
238 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
240 dma_buf_lock_fence_add_callback(dma_buf_lock_resource
*resource
,
242 fence_func_t callback
)
245 dma_buf_lock_fence_add_callback(dma_buf_lock_resource
*resource
,
246 struct dma_fence
*fence
,
247 dma_fence_func_t callback
)
251 struct dma_buf_lock_fence_cb
*fence_cb
;
256 fence_cb
= kmalloc(sizeof(*fence_cb
), GFP_KERNEL
);
260 fence_cb
->fence
= fence
;
261 fence_cb
->res
= resource
;
262 INIT_LIST_HEAD(&fence_cb
->node
);
264 err
= dma_fence_add_callback(fence
, &fence_cb
->fence_cb
,
267 if (err
== -ENOENT
) {
268 /* Fence signaled, get the completion result */
269 err
= dma_fence_get_status(fence
);
271 /* remap success completion to err code */
280 * Get reference to fence that will be kept until callback gets
281 * cleaned up in dma_buf_lock_fence_free_callbacks().
283 dma_fence_get(fence
);
284 atomic_inc(&resource
->fence_dep_count
);
285 /* Add callback to resource's list of callbacks */
286 list_add(&fence_cb
->node
, &resource
->dma_fence_callbacks
);
293 dma_buf_lock_add_fence_reservation_callback(dma_buf_lock_resource
*resource
,
294 struct reservation_object
*resv
,
297 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
298 struct fence
*excl_fence
= NULL
;
299 struct fence
**shared_fences
= NULL
;
301 struct dma_fence
*excl_fence
= NULL
;
302 struct dma_fence
**shared_fences
= NULL
;
304 unsigned int shared_count
= 0;
307 err
= reservation_object_get_fences_rcu(resv
,
315 err
= dma_buf_lock_fence_add_callback(resource
,
317 dma_buf_lock_fence_callback
);
319 /* Release our reference, taken by reservation_object_get_fences_rcu(),
320 * to the fence. We have set up our callback (if that was possible),
321 * and it's the fence's owner is responsible for singling the fence
322 * before allowing it to disappear.
324 dma_fence_put(excl_fence
);
331 for (i
= 0; i
< shared_count
; i
++) {
332 err
= dma_buf_lock_fence_add_callback(resource
,
334 dma_buf_lock_fence_callback
);
340 /* Release all our references to the shared fences, taken by
341 * reservation_object_get_fences_rcu(). We have set up our callback (if
342 * that was possible), and it's the fence's owner is responsible for
343 * signaling the fence before allowing it to disappear.
346 for (i
= 0; i
< shared_count
; i
++)
347 dma_fence_put(shared_fences
[i
]);
348 kfree(shared_fences
);
354 dma_buf_lock_release_fence_reservation(dma_buf_lock_resource
*resource
,
355 struct ww_acquire_ctx
*ctx
)
359 for (r
= 0; r
< resource
->count
; r
++)
360 ww_mutex_unlock(&resource
->dma_bufs
[r
]->resv
->lock
);
361 ww_acquire_fini(ctx
);
365 dma_buf_lock_acquire_fence_reservation(dma_buf_lock_resource
*resource
,
366 struct ww_acquire_ctx
*ctx
)
368 struct reservation_object
*content_resv
= NULL
;
369 unsigned int content_resv_idx
= 0;
373 ww_acquire_init(ctx
, &reservation_ww_class
);
376 for (r
= 0; r
< resource
->count
; r
++) {
377 if (resource
->dma_bufs
[r
]->resv
== content_resv
) {
382 err
= ww_mutex_lock(&resource
->dma_bufs
[r
]->resv
->lock
, ctx
);
387 ww_acquire_done(ctx
);
391 content_resv_idx
= r
;
393 /* Unlock the locked one ones */
395 ww_mutex_unlock(&resource
->dma_bufs
[r
]->resv
->lock
);
398 ww_mutex_unlock(&content_resv
->lock
);
400 /* If we deadlock try with lock_slow and retry */
401 if (err
== -EDEADLK
) {
402 #if DMA_BUF_LOCK_DEBUG
403 printk(KERN_DEBUG
"deadlock at dma_buf fd %i\n",
404 resource
->list_of_dma_buf_fds
[content_resv_idx
]);
406 content_resv
= resource
->dma_bufs
[content_resv_idx
]->resv
;
407 ww_mutex_lock_slow(&content_resv
->lock
, ctx
);
411 /* If we are here the function failed */
412 ww_acquire_fini(ctx
);
416 static int dma_buf_lock_handle_release(struct inode
*inode
, struct file
*file
)
418 dma_buf_lock_resource
*resource
;
420 if (!is_dma_buf_lock_file(file
))
423 resource
= file
->private_data
;
424 #if DMA_BUF_LOCK_DEBUG
425 printk("dma_buf_lock_handle_release\n");
427 mutex_lock(&dma_buf_lock_mutex
);
428 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
429 mutex_unlock(&dma_buf_lock_mutex
);
434 static unsigned int dma_buf_lock_handle_poll(struct file
*file
,
435 struct poll_table_struct
*wait
)
437 dma_buf_lock_resource
*resource
;
438 unsigned int ret
= 0;
440 if (!is_dma_buf_lock_file(file
))
443 resource
= file
->private_data
;
444 #if DMA_BUF_LOCK_DEBUG
445 printk("dma_buf_lock_handle_poll\n");
447 if (1 == atomic_read(&resource
->locked
))
449 /* Resources have been locked */
450 ret
= POLLIN
| POLLRDNORM
;
451 if (resource
->exclusive
)
453 ret
|= POLLOUT
| POLLWRNORM
;
458 if (!poll_does_not_wait(wait
))
460 poll_wait(file
, &resource
->wait
, wait
);
463 #if DMA_BUF_LOCK_DEBUG
464 printk("dma_buf_lock_handle_poll : return %i\n", ret
);
469 static const struct file_operations dma_buf_lock_handle_fops
= {
470 .owner
= THIS_MODULE
,
471 .release
= dma_buf_lock_handle_release
,
472 .poll
= dma_buf_lock_handle_poll
,
476 * is_dma_buf_lock_file - Check if struct file* is associated with dma_buf_lock
478 static inline int is_dma_buf_lock_file(struct file
*file
)
480 return file
->f_op
== &dma_buf_lock_handle_fops
;
486 * Start requested lock.
488 * Allocates required memory, copies dma_buf_fd list from userspace,
489 * acquires related reservation objects, and starts the lock.
491 static int dma_buf_lock_dolock(dma_buf_lock_k_request
*request
)
493 dma_buf_lock_resource
*resource
;
494 struct ww_acquire_ctx ww_ctx
;
500 if (NULL
== request
->list_of_dma_buf_fds
)
504 if (request
->count
<= 0)
508 if (request
->count
> DMA_BUF_LOCK_BUF_MAX
)
512 if (request
->exclusive
!= DMA_BUF_LOCK_NONEXCLUSIVE
&&
513 request
->exclusive
!= DMA_BUF_LOCK_EXCLUSIVE
)
518 resource
= kzalloc(sizeof(dma_buf_lock_resource
), GFP_KERNEL
);
519 if (NULL
== resource
)
524 atomic_set(&resource
->locked
, 0);
525 kref_init(&resource
->refcount
);
526 INIT_LIST_HEAD(&resource
->link
);
527 resource
->count
= request
->count
;
529 /* Allocate space to store dma_buf_fds received from user space */
530 size
= request
->count
* sizeof(int);
531 resource
->list_of_dma_buf_fds
= kmalloc(size
, GFP_KERNEL
);
533 if (NULL
== resource
->list_of_dma_buf_fds
)
539 /* Allocate space to store dma_buf pointers associated with dma_buf_fds */
540 size
= sizeof(struct dma_buf
*) * request
->count
;
541 resource
->dma_bufs
= kmalloc(size
, GFP_KERNEL
);
543 if (NULL
== resource
->dma_bufs
)
545 kfree(resource
->list_of_dma_buf_fds
);
550 /* Copy requested list of dma_buf_fds from user space */
551 size
= request
->count
* sizeof(int);
552 if (0 != copy_from_user(resource
->list_of_dma_buf_fds
, (void __user
*)request
->list_of_dma_buf_fds
, size
))
554 kfree(resource
->list_of_dma_buf_fds
);
555 kfree(resource
->dma_bufs
);
559 #if DMA_BUF_LOCK_DEBUG
560 for (i
= 0; i
< request
->count
; i
++)
562 printk("dma_buf %i = %X\n", i
, resource
->list_of_dma_buf_fds
[i
]);
566 /* Initialize the fence associated with dma_buf_lock resource */
567 dma_buf_lock_fence_init(resource
);
569 INIT_LIST_HEAD(&resource
->dma_fence_callbacks
);
571 atomic_set(&resource
->fence_dep_count
, DMA_BUF_LOCK_INIT_BIAS
);
573 /* Add resource to global list */
574 mutex_lock(&dma_buf_lock_mutex
);
576 list_add(&resource
->link
, &dma_buf_lock_resource_list
);
578 mutex_unlock(&dma_buf_lock_mutex
);
580 for (i
= 0; i
< request
->count
; i
++)
582 /* Convert fd into dma_buf structure */
583 resource
->dma_bufs
[i
] = dma_buf_get(resource
->list_of_dma_buf_fds
[i
]);
585 if (IS_ERR_VALUE(PTR_ERR(resource
->dma_bufs
[i
])))
587 mutex_lock(&dma_buf_lock_mutex
);
588 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
589 mutex_unlock(&dma_buf_lock_mutex
);
593 /*Check the reservation object associated with dma_buf */
594 if (NULL
== resource
->dma_bufs
[i
]->resv
)
596 mutex_lock(&dma_buf_lock_mutex
);
597 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
598 mutex_unlock(&dma_buf_lock_mutex
);
601 #if DMA_BUF_LOCK_DEBUG
602 printk(KERN_DEBUG
"dma_buf_lock_dolock : dma_buf_fd %i dma_buf %p dma_fence reservation %p\n",
603 resource
->list_of_dma_buf_fds
[i
], resource
->dma_bufs
[i
], resource
->dma_bufs
[i
]->resv
);
607 init_waitqueue_head(&resource
->wait
);
609 kref_get(&resource
->refcount
);
611 /* Create file descriptor associated with lock request */
612 fd
= anon_inode_getfd("dma_buf_lock", &dma_buf_lock_handle_fops
,
613 (void *)resource
, 0);
616 mutex_lock(&dma_buf_lock_mutex
);
617 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
618 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
619 mutex_unlock(&dma_buf_lock_mutex
);
623 resource
->exclusive
= request
->exclusive
;
625 /* Start locking process */
626 ret
= dma_buf_lock_acquire_fence_reservation(resource
, &ww_ctx
);
628 #if DMA_BUF_LOCK_DEBUG
629 printk(KERN_DEBUG
"dma_buf_lock_dolock : Error %d locking reservations.\n", ret
);
632 mutex_lock(&dma_buf_lock_mutex
);
633 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
634 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
635 mutex_unlock(&dma_buf_lock_mutex
);
639 for (i
= 0; i
< request
->count
; i
++) {
640 struct reservation_object
*resv
= resource
->dma_bufs
[i
]->resv
;
642 if (!test_bit(i
, &resource
->exclusive
)) {
643 ret
= reservation_object_reserve_shared(resv
);
645 #if DMA_BUF_LOCK_DEBUG
646 printk(KERN_DEBUG
"dma_buf_lock_dolock : Error %d reserving space for shared fence.\n", ret
);
651 ret
= dma_buf_lock_add_fence_reservation_callback(resource
,
655 #if DMA_BUF_LOCK_DEBUG
656 printk(KERN_DEBUG
"dma_buf_lock_dolock : Error %d adding reservation to callback.\n", ret
);
661 reservation_object_add_shared_fence(resv
, &resource
->fence
);
663 ret
= dma_buf_lock_add_fence_reservation_callback(resource
,
667 #if DMA_BUF_LOCK_DEBUG
668 printk(KERN_DEBUG
"dma_buf_lock_dolock : Error %d adding reservation to callback.\n", ret
);
673 reservation_object_add_excl_fence(resv
, &resource
->fence
);
677 dma_buf_lock_release_fence_reservation(resource
, &ww_ctx
);
679 if (IS_ERR_VALUE((unsigned long)ret
))
683 mutex_lock(&dma_buf_lock_mutex
);
684 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
685 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
686 mutex_unlock(&dma_buf_lock_mutex
);
691 /* Test if the callbacks were already triggered */
692 if (!atomic_sub_return(DMA_BUF_LOCK_INIT_BIAS
, &resource
->fence_dep_count
))
693 atomic_set(&resource
->locked
, 1);
695 #if DMA_BUF_LOCK_DEBUG
696 printk("dma_buf_lock_dolock : complete\n");
698 mutex_lock(&dma_buf_lock_mutex
);
699 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
700 mutex_unlock(&dma_buf_lock_mutex
);
705 static void dma_buf_lock_dounlock(struct kref
*ref
)
708 dma_buf_lock_resource
*resource
= container_of(ref
, dma_buf_lock_resource
, refcount
);
710 atomic_set(&resource
->locked
, 0);
712 /* Signal the resource's fence. */
713 dma_fence_signal(&resource
->fence
);
715 dma_buf_lock_fence_free_callbacks(resource
);
717 list_del(&resource
->link
);
719 for (i
= 0; i
< resource
->count
; i
++)
721 if (resource
->dma_bufs
[i
])
722 dma_buf_put(resource
->dma_bufs
[i
]);
725 kfree(resource
->dma_bufs
);
726 kfree(resource
->list_of_dma_buf_fds
);
727 dma_fence_put(&resource
->fence
);
730 static int __init
dma_buf_lock_init(void)
733 #if DMA_BUF_LOCK_DEBUG
734 printk("dma_buf_lock_init\n");
736 err
= alloc_chrdev_region(&dma_buf_lock_dev
, 0, 1, dma_buf_lock_dev_name
);
740 cdev_init(&dma_buf_lock_cdev
, &dma_buf_lock_fops
);
742 err
= cdev_add(&dma_buf_lock_cdev
, dma_buf_lock_dev
, 1);
746 dma_buf_lock_class
= class_create(THIS_MODULE
, dma_buf_lock_dev_name
);
747 if (IS_ERR(dma_buf_lock_class
))
749 err
= PTR_ERR(dma_buf_lock_class
);
754 mdev
= device_create(dma_buf_lock_class
, NULL
, dma_buf_lock_dev
, NULL
, dma_buf_lock_dev_name
);
761 class_destroy(dma_buf_lock_class
);
763 cdev_del(&dma_buf_lock_cdev
);
766 unregister_chrdev_region(dma_buf_lock_dev
, 1);
768 #if DMA_BUF_LOCK_DEBUG
769 printk("dma_buf_lock_init failed\n");
774 static void __exit
dma_buf_lock_exit(void)
776 #if DMA_BUF_LOCK_DEBUG
777 printk("dma_buf_lock_exit\n");
780 /* Unlock all outstanding references */
783 mutex_lock(&dma_buf_lock_mutex
);
784 if (list_empty(&dma_buf_lock_resource_list
))
786 mutex_unlock(&dma_buf_lock_mutex
);
791 dma_buf_lock_resource
*resource
= list_entry(dma_buf_lock_resource_list
.next
,
792 dma_buf_lock_resource
, link
);
793 kref_put(&resource
->refcount
, dma_buf_lock_dounlock
);
794 mutex_unlock(&dma_buf_lock_mutex
);
798 device_destroy(dma_buf_lock_class
, dma_buf_lock_dev
);
800 class_destroy(dma_buf_lock_class
);
802 cdev_del(&dma_buf_lock_cdev
);
804 unregister_chrdev_region(dma_buf_lock_dev
, 1);
807 #ifdef HAVE_UNLOCKED_IOCTL
808 static long dma_buf_lock_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
810 static int dma_buf_lock_ioctl(struct inode
*inode
, struct file
*filp
, unsigned int cmd
, unsigned long arg
)
813 dma_buf_lock_k_request request
;
814 int size
= _IOC_SIZE(cmd
);
816 if (_IOC_TYPE(cmd
) != DMA_BUF_LOCK_IOC_MAGIC
)
821 if ((_IOC_NR(cmd
) < DMA_BUF_LOCK_IOC_MINNR
) || (_IOC_NR(cmd
) > DMA_BUF_LOCK_IOC_MAXNR
))
828 case DMA_BUF_LOCK_FUNC_LOCK_ASYNC
:
829 if (size
!= sizeof(dma_buf_lock_k_request
))
833 if (copy_from_user(&request
, (void __user
*)arg
, size
))
837 #if DMA_BUF_LOCK_DEBUG
838 printk("DMA_BUF_LOCK_FUNC_LOCK_ASYNC - %i\n", request
.count
);
840 return dma_buf_lock_dolock(&request
);
846 module_init(dma_buf_lock_init
);
847 module_exit(dma_buf_lock_exit
);
849 MODULE_LICENSE("GPL");