gpu: update midgard r21p0 kernel driver
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_mali-driver.git] / t83x / kernel / drivers / base / dma_buf_lock / src / dma_buf_lock.c
1 /*
2 *
3 * (C) COPYRIGHT 2012-2013, 2017 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16
17
18 #include <linux/version.h>
19 #include <asm/uaccess.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/device.h>
25 #include <linux/slab.h>
26 #include <linux/atomic.h>
27 #include <linux/reservation.h>
28 #include <linux/dma-buf.h>
29 #include <linux/wait.h>
30 #include <linux/sched.h>
31 #include <linux/poll.h>
32 #include <linux/anon_inodes.h>
33 #include <linux/file.h>
34
35 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
36
37 #include <linux/fence.h>
38
39 #define dma_fence_context_alloc(a) fence_context_alloc(a)
40 #define dma_fence_init(a, b, c, d, e) fence_init(a, b, c, d, e)
41 #define dma_fence_get(a) fence_get(a)
42 #define dma_fence_put(a) fence_put(a)
43 #define dma_fence_signal(a) fence_signal(a)
44 #define dma_fence_is_signaled(a) fence_is_signaled(a)
45 #define dma_fence_add_callback(a, b, c) fence_add_callback(a, b, c)
46 #define dma_fence_remove_callback(a, b) fence_remove_callback(a, b)
47 #define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->status ?: 1 : 0)
48
49 #else
50
51 #include <linux/dma-fence.h>
52
53 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
54 #define dma_fence_get_status(a) (dma_fence_is_signaled(a) ? \
55 (a)->status ?: 1 \
56 : 0)
57 #endif
58
59 #endif /* < 4.10.0 */
60
61 #include "dma_buf_lock.h"
62
63 /* Maximum number of buffers that a single handle can address */
64 #define DMA_BUF_LOCK_BUF_MAX 32
65
66 #define DMA_BUF_LOCK_DEBUG 1
67
68 #define DMA_BUF_LOCK_INIT_BIAS 0xFF
69
70 static dev_t dma_buf_lock_dev;
71 static struct cdev dma_buf_lock_cdev;
72 static struct class *dma_buf_lock_class;
73 static char dma_buf_lock_dev_name[] = "dma_buf_lock";
74
75 #ifdef HAVE_UNLOCKED_IOCTL
76 static long dma_buf_lock_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
77 #else
78 static int dma_buf_lock_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
79 #endif
80
81 static struct file_operations dma_buf_lock_fops =
82 {
83 .owner = THIS_MODULE,
84 #ifdef HAVE_UNLOCKED_IOCTL
85 .unlocked_ioctl = dma_buf_lock_ioctl,
86 #else
87 .ioctl = dma_buf_lock_ioctl,
88 #endif
89 .compat_ioctl = dma_buf_lock_ioctl,
90 };
91
92 typedef struct dma_buf_lock_resource
93 {
94 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
95 struct fence fence;
96 #else
97 struct dma_fence fence;
98 #endif
99 int *list_of_dma_buf_fds; /* List of buffers copied from userspace */
100 atomic_t locked; /* Status of lock */
101 struct dma_buf **dma_bufs;
102 unsigned long exclusive; /* Exclusive access bitmap */
103 atomic_t fence_dep_count; /* Number of dma-fence dependencies */
104 struct list_head dma_fence_callbacks; /* list of all callbacks set up to wait on other fences */
105 wait_queue_head_t wait;
106 struct kref refcount;
107 struct list_head link;
108 int count;
109 } dma_buf_lock_resource;
110
111 /**
112 * struct dma_buf_lock_fence_cb - Callback data struct for dma-fence
113 * @fence_cb: Callback function
114 * @fence: Pointer to the fence object on which this callback is waiting
115 * @res: Pointer to dma_buf_lock_resource that is waiting on this callback
116 * @node: List head for linking this callback to the lock resource
117 */
118 struct dma_buf_lock_fence_cb {
119 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
120 struct fence_cb fence_cb;
121 struct fence *fence;
122 #else
123 struct dma_fence_cb fence_cb;
124 struct dma_fence *fence;
125 #endif
126 struct dma_buf_lock_resource *res;
127 struct list_head node;
128 };
129
130 static LIST_HEAD(dma_buf_lock_resource_list);
131 static DEFINE_MUTEX(dma_buf_lock_mutex);
132
133 static inline int is_dma_buf_lock_file(struct file *);
134 static void dma_buf_lock_dounlock(struct kref *ref);
135
136
137 /*** dma_buf_lock fence part ***/
138
139 /* Spin lock protecting all Mali fences as fence->lock. */
140 static DEFINE_SPINLOCK(dma_buf_lock_fence_lock);
141
142 static const char *
143 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
144 dma_buf_lock_fence_get_driver_name(struct fence *fence)
145 #else
146 dma_buf_lock_fence_get_driver_name(struct dma_fence *fence)
147 #endif
148 {
149 return "dma_buf_lock";
150 }
151
152 static const char *
153 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
154 dma_buf_lock_fence_get_timeline_name(struct fence *fence)
155 #else
156 dma_buf_lock_fence_get_timeline_name(struct dma_fence *fence)
157 #endif
158 {
159 return "dma_buf_lock.timeline";
160 }
161
162 static bool
163 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
164 dma_buf_lock_fence_enable_signaling(struct fence *fence)
165 #else
166 dma_buf_lock_fence_enable_signaling(struct dma_fence *fence)
167 #endif
168 {
169 return true;
170 }
171
172 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
173 const struct fence_ops dma_buf_lock_fence_ops = {
174 .wait = fence_default_wait,
175 #else
176 const struct dma_fence_ops dma_buf_lock_fence_ops = {
177 .wait = dma_fence_default_wait,
178 #endif
179 .get_driver_name = dma_buf_lock_fence_get_driver_name,
180 .get_timeline_name = dma_buf_lock_fence_get_timeline_name,
181 .enable_signaling = dma_buf_lock_fence_enable_signaling,
182 };
183
184 static void
185 dma_buf_lock_fence_init(dma_buf_lock_resource *resource)
186 {
187 dma_fence_init(&resource->fence,
188 &dma_buf_lock_fence_ops,
189 &dma_buf_lock_fence_lock,
190 0,
191 0);
192 }
193
194 static void
195 dma_buf_lock_fence_free_callbacks(dma_buf_lock_resource *resource)
196 {
197 struct dma_buf_lock_fence_cb *cb, *tmp;
198
199 /* Clean up and free callbacks. */
200 list_for_each_entry_safe(cb, tmp, &resource->dma_fence_callbacks, node) {
201 /* Cancel callbacks that hasn't been called yet and release the
202 * reference taken in dma_buf_lock_fence_add_callback().
203 */
204 dma_fence_remove_callback(cb->fence, &cb->fence_cb);
205 dma_fence_put(cb->fence);
206 list_del(&cb->node);
207 kfree(cb);
208 }
209 }
210
211 static void
212 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
213 dma_buf_lock_fence_callback(struct fence *fence, struct fence_cb *cb)
214 #else
215 dma_buf_lock_fence_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
216 #endif
217 {
218 struct dma_buf_lock_fence_cb *dma_buf_lock_cb = container_of(cb,
219 struct dma_buf_lock_fence_cb,
220 fence_cb);
221 dma_buf_lock_resource *resource = dma_buf_lock_cb->res;
222
223 #if DMA_BUF_LOCK_DEBUG
224 printk(KERN_DEBUG "dma_buf_lock_fence_callback\n");
225 #endif
226
227 /* Callback function will be invoked in atomic context. */
228
229 if (atomic_dec_and_test(&resource->fence_dep_count)) {
230 atomic_set(&resource->locked, 1);
231 wake_up(&resource->wait);
232 /* A work item can be queued at this point to invoke
233 * dma_buf_lock_fence_free_callbacks.
234 */
235 }
236 }
237
238 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
239 static int
240 dma_buf_lock_fence_add_callback(dma_buf_lock_resource *resource,
241 struct fence *fence,
242 fence_func_t callback)
243 #else
244 static int
245 dma_buf_lock_fence_add_callback(dma_buf_lock_resource *resource,
246 struct dma_fence *fence,
247 dma_fence_func_t callback)
248 #endif
249 {
250 int err = 0;
251 struct dma_buf_lock_fence_cb *fence_cb;
252
253 if (!fence)
254 return -EINVAL;
255
256 fence_cb = kmalloc(sizeof(*fence_cb), GFP_KERNEL);
257 if (!fence_cb)
258 return -ENOMEM;
259
260 fence_cb->fence = fence;
261 fence_cb->res = resource;
262 INIT_LIST_HEAD(&fence_cb->node);
263
264 err = dma_fence_add_callback(fence, &fence_cb->fence_cb,
265 callback);
266
267 if (err == -ENOENT) {
268 /* Fence signaled, get the completion result */
269 err = dma_fence_get_status(fence);
270
271 /* remap success completion to err code */
272 if (err == 1)
273 err = 0;
274
275 kfree(fence_cb);
276 } else if (err) {
277 kfree(fence_cb);
278 } else {
279 /*
280 * Get reference to fence that will be kept until callback gets
281 * cleaned up in dma_buf_lock_fence_free_callbacks().
282 */
283 dma_fence_get(fence);
284 atomic_inc(&resource->fence_dep_count);
285 /* Add callback to resource's list of callbacks */
286 list_add(&fence_cb->node, &resource->dma_fence_callbacks);
287 }
288
289 return err;
290 }
291
292 static int
293 dma_buf_lock_add_fence_reservation_callback(dma_buf_lock_resource *resource,
294 struct reservation_object *resv,
295 bool exclusive)
296 {
297 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
298 struct fence *excl_fence = NULL;
299 struct fence **shared_fences = NULL;
300 #else
301 struct dma_fence *excl_fence = NULL;
302 struct dma_fence **shared_fences = NULL;
303 #endif
304 unsigned int shared_count = 0;
305 int err, i;
306
307 err = reservation_object_get_fences_rcu(resv,
308 &excl_fence,
309 &shared_count,
310 &shared_fences);
311 if (err)
312 return err;
313
314 if (excl_fence) {
315 err = dma_buf_lock_fence_add_callback(resource,
316 excl_fence,
317 dma_buf_lock_fence_callback);
318
319 /* Release our reference, taken by reservation_object_get_fences_rcu(),
320 * to the fence. We have set up our callback (if that was possible),
321 * and it's the fence's owner is responsible for singling the fence
322 * before allowing it to disappear.
323 */
324 dma_fence_put(excl_fence);
325
326 if (err)
327 goto out;
328 }
329
330 if (exclusive) {
331 for (i = 0; i < shared_count; i++) {
332 err = dma_buf_lock_fence_add_callback(resource,
333 shared_fences[i],
334 dma_buf_lock_fence_callback);
335 if (err)
336 goto out;
337 }
338 }
339
340 /* Release all our references to the shared fences, taken by
341 * reservation_object_get_fences_rcu(). We have set up our callback (if
342 * that was possible), and it's the fence's owner is responsible for
343 * signaling the fence before allowing it to disappear.
344 */
345 out:
346 for (i = 0; i < shared_count; i++)
347 dma_fence_put(shared_fences[i]);
348 kfree(shared_fences);
349
350 return err;
351 }
352
353 static void
354 dma_buf_lock_release_fence_reservation(dma_buf_lock_resource *resource,
355 struct ww_acquire_ctx *ctx)
356 {
357 unsigned int r;
358
359 for (r = 0; r < resource->count; r++)
360 ww_mutex_unlock(&resource->dma_bufs[r]->resv->lock);
361 ww_acquire_fini(ctx);
362 }
363
364 static int
365 dma_buf_lock_acquire_fence_reservation(dma_buf_lock_resource *resource,
366 struct ww_acquire_ctx *ctx)
367 {
368 struct reservation_object *content_resv = NULL;
369 unsigned int content_resv_idx = 0;
370 unsigned int r;
371 int err = 0;
372
373 ww_acquire_init(ctx, &reservation_ww_class);
374
375 retry:
376 for (r = 0; r < resource->count; r++) {
377 if (resource->dma_bufs[r]->resv == content_resv) {
378 content_resv = NULL;
379 continue;
380 }
381
382 err = ww_mutex_lock(&resource->dma_bufs[r]->resv->lock, ctx);
383 if (err)
384 goto error;
385 }
386
387 ww_acquire_done(ctx);
388 return err;
389
390 error:
391 content_resv_idx = r;
392
393 /* Unlock the locked one ones */
394 while (r--)
395 ww_mutex_unlock(&resource->dma_bufs[r]->resv->lock);
396
397 if (content_resv)
398 ww_mutex_unlock(&content_resv->lock);
399
400 /* If we deadlock try with lock_slow and retry */
401 if (err == -EDEADLK) {
402 #if DMA_BUF_LOCK_DEBUG
403 printk(KERN_DEBUG "deadlock at dma_buf fd %i\n",
404 resource->list_of_dma_buf_fds[content_resv_idx]);
405 #endif
406 content_resv = resource->dma_bufs[content_resv_idx]->resv;
407 ww_mutex_lock_slow(&content_resv->lock, ctx);
408 goto retry;
409 }
410
411 /* If we are here the function failed */
412 ww_acquire_fini(ctx);
413 return err;
414 }
415
416 static int dma_buf_lock_handle_release(struct inode *inode, struct file *file)
417 {
418 dma_buf_lock_resource *resource;
419
420 if (!is_dma_buf_lock_file(file))
421 return -EINVAL;
422
423 resource = file->private_data;
424 #if DMA_BUF_LOCK_DEBUG
425 printk("dma_buf_lock_handle_release\n");
426 #endif
427 mutex_lock(&dma_buf_lock_mutex);
428 kref_put(&resource->refcount, dma_buf_lock_dounlock);
429 mutex_unlock(&dma_buf_lock_mutex);
430
431 return 0;
432 }
433
434 static unsigned int dma_buf_lock_handle_poll(struct file *file,
435 struct poll_table_struct *wait)
436 {
437 dma_buf_lock_resource *resource;
438 unsigned int ret = 0;
439
440 if (!is_dma_buf_lock_file(file))
441 return POLLERR;
442
443 resource = file->private_data;
444 #if DMA_BUF_LOCK_DEBUG
445 printk("dma_buf_lock_handle_poll\n");
446 #endif
447 if (1 == atomic_read(&resource->locked))
448 {
449 /* Resources have been locked */
450 ret = POLLIN | POLLRDNORM;
451 if (resource->exclusive)
452 {
453 ret |= POLLOUT | POLLWRNORM;
454 }
455 }
456 else
457 {
458 if (!poll_does_not_wait(wait))
459 {
460 poll_wait(file, &resource->wait, wait);
461 }
462 }
463 #if DMA_BUF_LOCK_DEBUG
464 printk("dma_buf_lock_handle_poll : return %i\n", ret);
465 #endif
466 return ret;
467 }
468
469 static const struct file_operations dma_buf_lock_handle_fops = {
470 .owner = THIS_MODULE,
471 .release = dma_buf_lock_handle_release,
472 .poll = dma_buf_lock_handle_poll,
473 };
474
475 /*
476 * is_dma_buf_lock_file - Check if struct file* is associated with dma_buf_lock
477 */
478 static inline int is_dma_buf_lock_file(struct file *file)
479 {
480 return file->f_op == &dma_buf_lock_handle_fops;
481 }
482
483
484
485 /*
486 * Start requested lock.
487 *
488 * Allocates required memory, copies dma_buf_fd list from userspace,
489 * acquires related reservation objects, and starts the lock.
490 */
491 static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
492 {
493 dma_buf_lock_resource *resource;
494 struct ww_acquire_ctx ww_ctx;
495 int size;
496 int fd;
497 int i;
498 int ret;
499
500 if (NULL == request->list_of_dma_buf_fds)
501 {
502 return -EINVAL;
503 }
504 if (request->count <= 0)
505 {
506 return -EINVAL;
507 }
508 if (request->count > DMA_BUF_LOCK_BUF_MAX)
509 {
510 return -EINVAL;
511 }
512 if (request->exclusive != DMA_BUF_LOCK_NONEXCLUSIVE &&
513 request->exclusive != DMA_BUF_LOCK_EXCLUSIVE)
514 {
515 return -EINVAL;
516 }
517
518 resource = kzalloc(sizeof(dma_buf_lock_resource), GFP_KERNEL);
519 if (NULL == resource)
520 {
521 return -ENOMEM;
522 }
523
524 atomic_set(&resource->locked, 0);
525 kref_init(&resource->refcount);
526 INIT_LIST_HEAD(&resource->link);
527 resource->count = request->count;
528
529 /* Allocate space to store dma_buf_fds received from user space */
530 size = request->count * sizeof(int);
531 resource->list_of_dma_buf_fds = kmalloc(size, GFP_KERNEL);
532
533 if (NULL == resource->list_of_dma_buf_fds)
534 {
535 kfree(resource);
536 return -ENOMEM;
537 }
538
539 /* Allocate space to store dma_buf pointers associated with dma_buf_fds */
540 size = sizeof(struct dma_buf *) * request->count;
541 resource->dma_bufs = kmalloc(size, GFP_KERNEL);
542
543 if (NULL == resource->dma_bufs)
544 {
545 kfree(resource->list_of_dma_buf_fds);
546 kfree(resource);
547 return -ENOMEM;
548 }
549
550 /* Copy requested list of dma_buf_fds from user space */
551 size = request->count * sizeof(int);
552 if (0 != copy_from_user(resource->list_of_dma_buf_fds, (void __user *)request->list_of_dma_buf_fds, size))
553 {
554 kfree(resource->list_of_dma_buf_fds);
555 kfree(resource->dma_bufs);
556 kfree(resource);
557 return -ENOMEM;
558 }
559 #if DMA_BUF_LOCK_DEBUG
560 for (i = 0; i < request->count; i++)
561 {
562 printk("dma_buf %i = %X\n", i, resource->list_of_dma_buf_fds[i]);
563 }
564 #endif
565
566 /* Initialize the fence associated with dma_buf_lock resource */
567 dma_buf_lock_fence_init(resource);
568
569 INIT_LIST_HEAD(&resource->dma_fence_callbacks);
570
571 atomic_set(&resource->fence_dep_count, DMA_BUF_LOCK_INIT_BIAS);
572
573 /* Add resource to global list */
574 mutex_lock(&dma_buf_lock_mutex);
575
576 list_add(&resource->link, &dma_buf_lock_resource_list);
577
578 mutex_unlock(&dma_buf_lock_mutex);
579
580 for (i = 0; i < request->count; i++)
581 {
582 /* Convert fd into dma_buf structure */
583 resource->dma_bufs[i] = dma_buf_get(resource->list_of_dma_buf_fds[i]);
584
585 if (IS_ERR_VALUE(PTR_ERR(resource->dma_bufs[i])))
586 {
587 mutex_lock(&dma_buf_lock_mutex);
588 kref_put(&resource->refcount, dma_buf_lock_dounlock);
589 mutex_unlock(&dma_buf_lock_mutex);
590 return -EINVAL;
591 }
592
593 /*Check the reservation object associated with dma_buf */
594 if (NULL == resource->dma_bufs[i]->resv)
595 {
596 mutex_lock(&dma_buf_lock_mutex);
597 kref_put(&resource->refcount, dma_buf_lock_dounlock);
598 mutex_unlock(&dma_buf_lock_mutex);
599 return -EINVAL;
600 }
601 #if DMA_BUF_LOCK_DEBUG
602 printk(KERN_DEBUG "dma_buf_lock_dolock : dma_buf_fd %i dma_buf %p dma_fence reservation %p\n",
603 resource->list_of_dma_buf_fds[i], resource->dma_bufs[i], resource->dma_bufs[i]->resv);
604 #endif
605 }
606
607 init_waitqueue_head(&resource->wait);
608
609 kref_get(&resource->refcount);
610
611 /* Create file descriptor associated with lock request */
612 fd = anon_inode_getfd("dma_buf_lock", &dma_buf_lock_handle_fops,
613 (void *)resource, 0);
614 if (fd < 0)
615 {
616 mutex_lock(&dma_buf_lock_mutex);
617 kref_put(&resource->refcount, dma_buf_lock_dounlock);
618 kref_put(&resource->refcount, dma_buf_lock_dounlock);
619 mutex_unlock(&dma_buf_lock_mutex);
620 return fd;
621 }
622
623 resource->exclusive = request->exclusive;
624
625 /* Start locking process */
626 ret = dma_buf_lock_acquire_fence_reservation(resource, &ww_ctx);
627 if (ret) {
628 #if DMA_BUF_LOCK_DEBUG
629 printk(KERN_DEBUG "dma_buf_lock_dolock : Error %d locking reservations.\n", ret);
630 #endif
631 put_unused_fd(fd);
632 mutex_lock(&dma_buf_lock_mutex);
633 kref_put(&resource->refcount, dma_buf_lock_dounlock);
634 kref_put(&resource->refcount, dma_buf_lock_dounlock);
635 mutex_unlock(&dma_buf_lock_mutex);
636 return ret;
637 }
638
639 for (i = 0; i < request->count; i++) {
640 struct reservation_object *resv = resource->dma_bufs[i]->resv;
641
642 if (!test_bit(i, &resource->exclusive)) {
643 ret = reservation_object_reserve_shared(resv);
644 if (ret) {
645 #if DMA_BUF_LOCK_DEBUG
646 printk(KERN_DEBUG "dma_buf_lock_dolock : Error %d reserving space for shared fence.\n", ret);
647 #endif
648 break;
649 }
650
651 ret = dma_buf_lock_add_fence_reservation_callback(resource,
652 resv,
653 false);
654 if (ret) {
655 #if DMA_BUF_LOCK_DEBUG
656 printk(KERN_DEBUG "dma_buf_lock_dolock : Error %d adding reservation to callback.\n", ret);
657 #endif
658 break;
659 }
660
661 reservation_object_add_shared_fence(resv, &resource->fence);
662 } else {
663 ret = dma_buf_lock_add_fence_reservation_callback(resource,
664 resv,
665 true);
666 if (ret) {
667 #if DMA_BUF_LOCK_DEBUG
668 printk(KERN_DEBUG "dma_buf_lock_dolock : Error %d adding reservation to callback.\n", ret);
669 #endif
670 break;
671 }
672
673 reservation_object_add_excl_fence(resv, &resource->fence);
674 }
675 }
676
677 dma_buf_lock_release_fence_reservation(resource, &ww_ctx);
678
679 if (IS_ERR_VALUE((unsigned long)ret))
680 {
681 put_unused_fd(fd);
682
683 mutex_lock(&dma_buf_lock_mutex);
684 kref_put(&resource->refcount, dma_buf_lock_dounlock);
685 kref_put(&resource->refcount, dma_buf_lock_dounlock);
686 mutex_unlock(&dma_buf_lock_mutex);
687
688 return ret;
689 }
690
691 /* Test if the callbacks were already triggered */
692 if (!atomic_sub_return(DMA_BUF_LOCK_INIT_BIAS, &resource->fence_dep_count))
693 atomic_set(&resource->locked, 1);
694
695 #if DMA_BUF_LOCK_DEBUG
696 printk("dma_buf_lock_dolock : complete\n");
697 #endif
698 mutex_lock(&dma_buf_lock_mutex);
699 kref_put(&resource->refcount, dma_buf_lock_dounlock);
700 mutex_unlock(&dma_buf_lock_mutex);
701
702 return fd;
703 }
704
705 static void dma_buf_lock_dounlock(struct kref *ref)
706 {
707 int i;
708 dma_buf_lock_resource *resource = container_of(ref, dma_buf_lock_resource, refcount);
709
710 atomic_set(&resource->locked, 0);
711
712 /* Signal the resource's fence. */
713 dma_fence_signal(&resource->fence);
714
715 dma_buf_lock_fence_free_callbacks(resource);
716
717 list_del(&resource->link);
718
719 for (i = 0; i < resource->count; i++)
720 {
721 if (resource->dma_bufs[i])
722 dma_buf_put(resource->dma_bufs[i]);
723 }
724
725 kfree(resource->dma_bufs);
726 kfree(resource->list_of_dma_buf_fds);
727 dma_fence_put(&resource->fence);
728 }
729
730 static int __init dma_buf_lock_init(void)
731 {
732 int err;
733 #if DMA_BUF_LOCK_DEBUG
734 printk("dma_buf_lock_init\n");
735 #endif
736 err = alloc_chrdev_region(&dma_buf_lock_dev, 0, 1, dma_buf_lock_dev_name);
737
738 if (0 == err)
739 {
740 cdev_init(&dma_buf_lock_cdev, &dma_buf_lock_fops);
741
742 err = cdev_add(&dma_buf_lock_cdev, dma_buf_lock_dev, 1);
743
744 if (0 == err)
745 {
746 dma_buf_lock_class = class_create(THIS_MODULE, dma_buf_lock_dev_name);
747 if (IS_ERR(dma_buf_lock_class))
748 {
749 err = PTR_ERR(dma_buf_lock_class);
750 }
751 else
752 {
753 struct device *mdev;
754 mdev = device_create(dma_buf_lock_class, NULL, dma_buf_lock_dev, NULL, dma_buf_lock_dev_name);
755 if (!IS_ERR(mdev))
756 {
757 return 0;
758 }
759
760 err = PTR_ERR(mdev);
761 class_destroy(dma_buf_lock_class);
762 }
763 cdev_del(&dma_buf_lock_cdev);
764 }
765
766 unregister_chrdev_region(dma_buf_lock_dev, 1);
767 }
768 #if DMA_BUF_LOCK_DEBUG
769 printk("dma_buf_lock_init failed\n");
770 #endif
771 return err;
772 }
773
774 static void __exit dma_buf_lock_exit(void)
775 {
776 #if DMA_BUF_LOCK_DEBUG
777 printk("dma_buf_lock_exit\n");
778 #endif
779
780 /* Unlock all outstanding references */
781 while (1)
782 {
783 mutex_lock(&dma_buf_lock_mutex);
784 if (list_empty(&dma_buf_lock_resource_list))
785 {
786 mutex_unlock(&dma_buf_lock_mutex);
787 break;
788 }
789 else
790 {
791 dma_buf_lock_resource *resource = list_entry(dma_buf_lock_resource_list.next,
792 dma_buf_lock_resource, link);
793 kref_put(&resource->refcount, dma_buf_lock_dounlock);
794 mutex_unlock(&dma_buf_lock_mutex);
795 }
796 }
797
798 device_destroy(dma_buf_lock_class, dma_buf_lock_dev);
799
800 class_destroy(dma_buf_lock_class);
801
802 cdev_del(&dma_buf_lock_cdev);
803
804 unregister_chrdev_region(dma_buf_lock_dev, 1);
805 }
806
807 #ifdef HAVE_UNLOCKED_IOCTL
808 static long dma_buf_lock_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
809 #else
810 static int dma_buf_lock_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
811 #endif
812 {
813 dma_buf_lock_k_request request;
814 int size = _IOC_SIZE(cmd);
815
816 if (_IOC_TYPE(cmd) != DMA_BUF_LOCK_IOC_MAGIC)
817 {
818 return -ENOTTY;
819
820 }
821 if ((_IOC_NR(cmd) < DMA_BUF_LOCK_IOC_MINNR) || (_IOC_NR(cmd) > DMA_BUF_LOCK_IOC_MAXNR))
822 {
823 return -ENOTTY;
824 }
825
826 switch (cmd)
827 {
828 case DMA_BUF_LOCK_FUNC_LOCK_ASYNC:
829 if (size != sizeof(dma_buf_lock_k_request))
830 {
831 return -ENOTTY;
832 }
833 if (copy_from_user(&request, (void __user *)arg, size))
834 {
835 return -EFAULT;
836 }
837 #if DMA_BUF_LOCK_DEBUG
838 printk("DMA_BUF_LOCK_FUNC_LOCK_ASYNC - %i\n", request.count);
839 #endif
840 return dma_buf_lock_dolock(&request);
841 }
842
843 return -ENOTTY;
844 }
845
846 module_init(dma_buf_lock_init);
847 module_exit(dma_buf_lock_exit);
848
849 MODULE_LICENSE("GPL");
850