2 * Framework for buffer objects that can be shared across devices/subsystems.
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 * Many thanks to linaro-mm-sig list, and specially
8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10 * refining of this idea.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published by
14 * the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program. If not, see <http://www.gnu.org/licenses/>.
26 #include <linux/slab.h>
27 #include <linux/dma-buf.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/export.h>
30 #include <linux/debugfs.h>
31 #include <linux/seq_file.h>
33 static inline int is_dma_buf_file(struct file
*);
36 struct list_head head
;
40 static struct dma_buf_list db_list
;
42 static int dma_buf_release(struct inode
*inode
, struct file
*file
)
44 struct dma_buf
*dmabuf
;
46 if (!is_dma_buf_file(file
))
49 dmabuf
= file
->private_data
;
51 BUG_ON(dmabuf
->vmapping_counter
);
53 dmabuf
->ops
->release(dmabuf
);
55 mutex_lock(&db_list
.lock
);
56 list_del(&dmabuf
->list_node
);
57 mutex_unlock(&db_list
.lock
);
63 static int dma_buf_mmap_internal(struct file
*file
, struct vm_area_struct
*vma
)
65 struct dma_buf
*dmabuf
;
67 if (!is_dma_buf_file(file
))
70 dmabuf
= file
->private_data
;
72 /* check for overflowing the buffer's size */
73 if (vma
->vm_pgoff
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) >
74 dmabuf
->size
>> PAGE_SHIFT
)
77 return dmabuf
->ops
->mmap(dmabuf
, vma
);
80 static const struct file_operations dma_buf_fops
= {
81 .release
= dma_buf_release
,
82 .mmap
= dma_buf_mmap_internal
,
86 * is_dma_buf_file - Check if struct file* is associated with dma_buf
88 static inline int is_dma_buf_file(struct file
*file
)
90 return file
->f_op
== &dma_buf_fops
;
94 * dma_buf_export_named - Creates a new dma_buf, and associates an anon file
95 * with this buffer, so it can be exported.
96 * Also connect the allocator specific data and ops to the buffer.
97 * Additionally, provide a name string for exporter; useful in debugging.
99 * @priv: [in] Attach private data of allocator to this buffer
100 * @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
101 * @size: [in] Size of the buffer
102 * @flags: [in] mode flags for the file.
103 * @exp_name: [in] name of the exporting module - useful for debugging.
105 * Returns, on success, a newly created dma_buf object, which wraps the
106 * supplied private data and operations for dma_buf_ops. On either missing
107 * ops, or error in allocating struct dma_buf, will return negative error.
110 struct dma_buf
*dma_buf_export_named(void *priv
, const struct dma_buf_ops
*ops
,
111 size_t size
, int flags
, const char *exp_name
)
113 struct dma_buf
*dmabuf
;
116 if (WARN_ON(!priv
|| !ops
118 || !ops
->unmap_dma_buf
123 return ERR_PTR(-EINVAL
);
126 dmabuf
= kzalloc(sizeof(struct dma_buf
), GFP_KERNEL
);
128 return ERR_PTR(-ENOMEM
);
133 dmabuf
->exp_name
= exp_name
;
135 file
= anon_inode_getfile("dmabuf", &dma_buf_fops
, dmabuf
, flags
);
139 mutex_init(&dmabuf
->lock
);
140 INIT_LIST_HEAD(&dmabuf
->attachments
);
142 mutex_lock(&db_list
.lock
);
143 list_add(&dmabuf
->list_node
, &db_list
.head
);
144 mutex_unlock(&db_list
.lock
);
148 EXPORT_SYMBOL_GPL(dma_buf_export_named
);
152 * dma_buf_fd - returns a file descriptor for the given dma_buf
153 * @dmabuf: [in] pointer to dma_buf for which fd is required.
154 * @flags: [in] flags to give to fd
156 * On success, returns an associated 'fd'. Else, returns error.
158 int dma_buf_fd(struct dma_buf
*dmabuf
, int flags
)
162 if (!dmabuf
|| !dmabuf
->file
)
165 fd
= get_unused_fd_flags(flags
);
169 fd_install(fd
, dmabuf
->file
);
173 EXPORT_SYMBOL_GPL(dma_buf_fd
);
176 * dma_buf_get - returns the dma_buf structure related to an fd
177 * @fd: [in] fd associated with the dma_buf to be returned
179 * On success, returns the dma_buf structure associated with an fd; uses
180 * file's refcounting done by fget to increase refcount. returns ERR_PTR
183 struct dma_buf
*dma_buf_get(int fd
)
190 return ERR_PTR(-EBADF
);
192 if (!is_dma_buf_file(file
)) {
194 return ERR_PTR(-EINVAL
);
197 return file
->private_data
;
199 EXPORT_SYMBOL_GPL(dma_buf_get
);
202 * dma_buf_put - decreases refcount of the buffer
203 * @dmabuf: [in] buffer to reduce refcount of
205 * Uses file's refcounting done implicitly by fput()
207 void dma_buf_put(struct dma_buf
*dmabuf
)
209 if (WARN_ON(!dmabuf
|| !dmabuf
->file
))
214 EXPORT_SYMBOL_GPL(dma_buf_put
);
217 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
218 * calls attach() of dma_buf_ops to allow device-specific attach functionality
219 * @dmabuf: [in] buffer to attach device to.
220 * @dev: [in] device to be attached.
222 * Returns struct dma_buf_attachment * for this attachment; may return negative
226 struct dma_buf_attachment
*dma_buf_attach(struct dma_buf
*dmabuf
,
229 struct dma_buf_attachment
*attach
;
232 if (WARN_ON(!dmabuf
|| !dev
))
233 return ERR_PTR(-EINVAL
);
235 attach
= kzalloc(sizeof(struct dma_buf_attachment
), GFP_KERNEL
);
237 return ERR_PTR(-ENOMEM
);
240 attach
->dmabuf
= dmabuf
;
242 mutex_lock(&dmabuf
->lock
);
244 if (dmabuf
->ops
->attach
) {
245 ret
= dmabuf
->ops
->attach(dmabuf
, dev
, attach
);
249 list_add(&attach
->node
, &dmabuf
->attachments
);
251 mutex_unlock(&dmabuf
->lock
);
256 mutex_unlock(&dmabuf
->lock
);
259 EXPORT_SYMBOL_GPL(dma_buf_attach
);
262 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
263 * optionally calls detach() of dma_buf_ops for device-specific detach
264 * @dmabuf: [in] buffer to detach from.
265 * @attach: [in] attachment to be detached; is free'd after this call.
268 void dma_buf_detach(struct dma_buf
*dmabuf
, struct dma_buf_attachment
*attach
)
270 if (WARN_ON(!dmabuf
|| !attach
))
273 mutex_lock(&dmabuf
->lock
);
274 list_del(&attach
->node
);
275 if (dmabuf
->ops
->detach
)
276 dmabuf
->ops
->detach(dmabuf
, attach
);
278 mutex_unlock(&dmabuf
->lock
);
281 EXPORT_SYMBOL_GPL(dma_buf_detach
);
284 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
285 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
287 * @attach: [in] attachment whose scatterlist is to be returned
288 * @direction: [in] direction of DMA transfer
290 * Returns sg_table containing the scatterlist to be returned; may return NULL
294 struct sg_table
*dma_buf_map_attachment(struct dma_buf_attachment
*attach
,
295 enum dma_data_direction direction
)
297 struct sg_table
*sg_table
= ERR_PTR(-EINVAL
);
301 if (WARN_ON(!attach
|| !attach
->dmabuf
))
302 return ERR_PTR(-EINVAL
);
304 sg_table
= attach
->dmabuf
->ops
->map_dma_buf(attach
, direction
);
308 EXPORT_SYMBOL_GPL(dma_buf_map_attachment
);
311 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
312 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
314 * @attach: [in] attachment to unmap buffer from
315 * @sg_table: [in] scatterlist info of the buffer to unmap
316 * @direction: [in] direction of DMA transfer
319 void dma_buf_unmap_attachment(struct dma_buf_attachment
*attach
,
320 struct sg_table
*sg_table
,
321 enum dma_data_direction direction
)
325 if (WARN_ON(!attach
|| !attach
->dmabuf
|| !sg_table
))
328 attach
->dmabuf
->ops
->unmap_dma_buf(attach
, sg_table
,
331 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment
);
335 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
336 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
337 * preparations. Coherency is only guaranteed in the specified range for the
338 * specified access direction.
339 * @dmabuf: [in] buffer to prepare cpu access for.
340 * @start: [in] start of range for cpu access.
341 * @len: [in] length of range for cpu access.
342 * @direction: [in] length of range for cpu access.
344 * Can return negative error values, returns 0 on success.
346 int dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
, size_t start
, size_t len
,
347 enum dma_data_direction direction
)
351 if (WARN_ON(!dmabuf
))
354 if (dmabuf
->ops
->begin_cpu_access
)
355 ret
= dmabuf
->ops
->begin_cpu_access(dmabuf
, start
, len
, direction
);
359 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access
);
362 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
363 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
364 * actions. Coherency is only guaranteed in the specified range for the
365 * specified access direction.
366 * @dmabuf: [in] buffer to complete cpu access for.
367 * @start: [in] start of range for cpu access.
368 * @len: [in] length of range for cpu access.
369 * @direction: [in] length of range for cpu access.
371 * This call must always succeed.
373 void dma_buf_end_cpu_access(struct dma_buf
*dmabuf
, size_t start
, size_t len
,
374 enum dma_data_direction direction
)
378 if (dmabuf
->ops
->end_cpu_access
)
379 dmabuf
->ops
->end_cpu_access(dmabuf
, start
, len
, direction
);
381 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access
);
384 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
385 * space. The same restrictions as for kmap_atomic and friends apply.
386 * @dmabuf: [in] buffer to map page from.
387 * @page_num: [in] page in PAGE_SIZE units to map.
389 * This call must always succeed, any necessary preparations that might fail
390 * need to be done in begin_cpu_access.
392 void *dma_buf_kmap_atomic(struct dma_buf
*dmabuf
, unsigned long page_num
)
396 return dmabuf
->ops
->kmap_atomic(dmabuf
, page_num
);
398 EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic
);
401 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
402 * @dmabuf: [in] buffer to unmap page from.
403 * @page_num: [in] page in PAGE_SIZE units to unmap.
404 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
406 * This call must always succeed.
408 void dma_buf_kunmap_atomic(struct dma_buf
*dmabuf
, unsigned long page_num
,
413 if (dmabuf
->ops
->kunmap_atomic
)
414 dmabuf
->ops
->kunmap_atomic(dmabuf
, page_num
, vaddr
);
416 EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic
);
419 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
420 * same restrictions as for kmap and friends apply.
421 * @dmabuf: [in] buffer to map page from.
422 * @page_num: [in] page in PAGE_SIZE units to map.
424 * This call must always succeed, any necessary preparations that might fail
425 * need to be done in begin_cpu_access.
427 void *dma_buf_kmap(struct dma_buf
*dmabuf
, unsigned long page_num
)
431 return dmabuf
->ops
->kmap(dmabuf
, page_num
);
433 EXPORT_SYMBOL_GPL(dma_buf_kmap
);
436 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
437 * @dmabuf: [in] buffer to unmap page from.
438 * @page_num: [in] page in PAGE_SIZE units to unmap.
439 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
441 * This call must always succeed.
443 void dma_buf_kunmap(struct dma_buf
*dmabuf
, unsigned long page_num
,
448 if (dmabuf
->ops
->kunmap
)
449 dmabuf
->ops
->kunmap(dmabuf
, page_num
, vaddr
);
451 EXPORT_SYMBOL_GPL(dma_buf_kunmap
);
455 * dma_buf_mmap - Setup up a userspace mmap with the given vma
456 * @dmabuf: [in] buffer that should back the vma
457 * @vma: [in] vma for the mmap
458 * @pgoff: [in] offset in pages where this mmap should start within the
461 * This function adjusts the passed in vma so that it points at the file of the
462 * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
463 * checking on the size of the vma. Then it calls the exporters mmap function to
464 * set up the mapping.
466 * Can return negative error values, returns 0 on success.
468 int dma_buf_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
,
471 struct file
*oldfile
;
474 if (WARN_ON(!dmabuf
|| !vma
))
477 /* check for offset overflow */
478 if (pgoff
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) < pgoff
)
481 /* check for overflowing the buffer's size */
482 if (pgoff
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) >
483 dmabuf
->size
>> PAGE_SHIFT
)
486 /* readjust the vma */
487 get_file(dmabuf
->file
);
488 oldfile
= vma
->vm_file
;
489 vma
->vm_file
= dmabuf
->file
;
490 vma
->vm_pgoff
= pgoff
;
492 ret
= dmabuf
->ops
->mmap(dmabuf
, vma
);
494 /* restore old parameters on failure */
495 vma
->vm_file
= oldfile
;
504 EXPORT_SYMBOL_GPL(dma_buf_mmap
);
507 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
508 * address space. Same restrictions as for vmap and friends apply.
509 * @dmabuf: [in] buffer to vmap
511 * This call may fail due to lack of virtual mapping address space.
512 * These calls are optional in drivers. The intended use for them
513 * is for mapping objects linear in kernel space for high use objects.
514 * Please attempt to use kmap/kunmap before thinking about these interfaces.
516 void *dma_buf_vmap(struct dma_buf
*dmabuf
)
520 if (WARN_ON(!dmabuf
))
523 if (!dmabuf
->ops
->vmap
)
526 mutex_lock(&dmabuf
->lock
);
527 if (dmabuf
->vmapping_counter
) {
528 dmabuf
->vmapping_counter
++;
529 BUG_ON(!dmabuf
->vmap_ptr
);
530 ptr
= dmabuf
->vmap_ptr
;
534 BUG_ON(dmabuf
->vmap_ptr
);
536 ptr
= dmabuf
->ops
->vmap(dmabuf
);
537 if (IS_ERR_OR_NULL(ptr
))
540 dmabuf
->vmap_ptr
= ptr
;
541 dmabuf
->vmapping_counter
= 1;
544 mutex_unlock(&dmabuf
->lock
);
547 EXPORT_SYMBOL_GPL(dma_buf_vmap
);
550 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
551 * @dmabuf: [in] buffer to vunmap
552 * @vaddr: [in] vmap to vunmap
554 void dma_buf_vunmap(struct dma_buf
*dmabuf
, void *vaddr
)
556 if (WARN_ON(!dmabuf
))
559 BUG_ON(!dmabuf
->vmap_ptr
);
560 BUG_ON(dmabuf
->vmapping_counter
== 0);
561 BUG_ON(dmabuf
->vmap_ptr
!= vaddr
);
563 mutex_lock(&dmabuf
->lock
);
564 if (--dmabuf
->vmapping_counter
== 0) {
565 if (dmabuf
->ops
->vunmap
)
566 dmabuf
->ops
->vunmap(dmabuf
, vaddr
);
567 dmabuf
->vmap_ptr
= NULL
;
569 mutex_unlock(&dmabuf
->lock
);
571 EXPORT_SYMBOL_GPL(dma_buf_vunmap
);
573 #ifdef CONFIG_DEBUG_FS
574 static int dma_buf_describe(struct seq_file
*s
)
577 struct dma_buf
*buf_obj
;
578 struct dma_buf_attachment
*attach_obj
;
579 int count
= 0, attach_count
;
582 ret
= mutex_lock_interruptible(&db_list
.lock
);
587 seq_printf(s
, "\nDma-buf Objects:\n");
588 seq_printf(s
, "\texp_name\tsize\tflags\tmode\tcount\n");
590 list_for_each_entry(buf_obj
, &db_list
.head
, list_node
) {
591 ret
= mutex_lock_interruptible(&buf_obj
->lock
);
595 "\tERROR locking buffer object: skipping\n");
601 seq_printf(s
, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
602 buf_obj
->exp_name
, buf_obj
->size
,
603 buf_obj
->file
->f_flags
, buf_obj
->file
->f_mode
,
604 (long)(buf_obj
->file
->f_count
.counter
));
606 seq_printf(s
, "\t\tAttached Devices:\n");
609 list_for_each_entry(attach_obj
, &buf_obj
->attachments
, node
) {
610 seq_printf(s
, "\t\t");
612 seq_printf(s
, "%s\n", attach_obj
->dev
->init_name
);
616 seq_printf(s
, "\n\t\tTotal %d devices attached\n",
620 size
+= buf_obj
->size
;
621 mutex_unlock(&buf_obj
->lock
);
624 seq_printf(s
, "\nTotal %d objects, %zu bytes\n", count
, size
);
626 mutex_unlock(&db_list
.lock
);
630 static int dma_buf_show(struct seq_file
*s
, void *unused
)
632 void (*func
)(struct seq_file
*) = s
->private;
637 static int dma_buf_debug_open(struct inode
*inode
, struct file
*file
)
639 return single_open(file
, dma_buf_show
, inode
->i_private
);
642 static const struct file_operations dma_buf_debug_fops
= {
643 .open
= dma_buf_debug_open
,
646 .release
= single_release
,
649 static struct dentry
*dma_buf_debugfs_dir
;
651 static int dma_buf_init_debugfs(void)
654 dma_buf_debugfs_dir
= debugfs_create_dir("dma_buf", NULL
);
655 if (IS_ERR(dma_buf_debugfs_dir
)) {
656 err
= PTR_ERR(dma_buf_debugfs_dir
);
657 dma_buf_debugfs_dir
= NULL
;
661 err
= dma_buf_debugfs_create_file("bufinfo", dma_buf_describe
);
664 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
669 static void dma_buf_uninit_debugfs(void)
671 if (dma_buf_debugfs_dir
)
672 debugfs_remove_recursive(dma_buf_debugfs_dir
);
675 int dma_buf_debugfs_create_file(const char *name
,
676 int (*write
)(struct seq_file
*))
680 d
= debugfs_create_file(name
, S_IRUGO
, dma_buf_debugfs_dir
,
681 write
, &dma_buf_debug_fops
);
689 static inline int dma_buf_init_debugfs(void)
693 static inline void dma_buf_uninit_debugfs(void)
698 static int __init
dma_buf_init(void)
700 mutex_init(&db_list
.lock
);
701 INIT_LIST_HEAD(&db_list
.head
);
702 dma_buf_init_debugfs();
705 subsys_initcall(dma_buf_init
);
707 static void __exit
dma_buf_deinit(void)
709 dma_buf_uninit_debugfs();
711 __exitcall(dma_buf_deinit
);