3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <drm/exynos_drm.h>
28 #include "exynos_drm_drv.h"
29 #include "exynos_drm_gem.h"
31 #include <linux/dma-buf.h>
33 static struct sg_table
*exynos_pages_to_sg(struct page
**pages
, int nr_pages
,
34 unsigned int page_size
)
36 struct sg_table
*sgt
= NULL
;
37 struct scatterlist
*sgl
;
40 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
44 ret
= sg_alloc_table(sgt
, nr_pages
, GFP_KERNEL
);
48 if (page_size
< PAGE_SIZE
)
49 page_size
= PAGE_SIZE
;
51 for_each_sg(sgt
->sgl
, sgl
, nr_pages
, i
)
52 sg_set_page(sgl
, pages
[i
], page_size
, 0);
63 static struct sg_table
*
64 exynos_gem_map_dma_buf(struct dma_buf_attachment
*attach
,
65 enum dma_data_direction dir
)
67 struct exynos_drm_gem_obj
*gem_obj
= attach
->dmabuf
->priv
;
68 struct drm_device
*dev
= gem_obj
->base
.dev
;
69 struct exynos_drm_gem_buf
*buf
;
70 struct sg_table
*sgt
= NULL
;
74 DRM_DEBUG_PRIME("%s\n", __FILE__
);
76 mutex_lock(&dev
->struct_mutex
);
78 buf
= gem_obj
->buffer
;
80 /* there should always be pages allocated. */
82 DRM_ERROR("pages is null.\n");
86 npages
= buf
->size
/ buf
->page_size
;
88 sgt
= exynos_pages_to_sg(buf
->pages
, npages
, buf
->page_size
);
90 DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
93 nents
= dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
95 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
96 npages
, buf
->size
, buf
->page_size
);
99 mutex_unlock(&dev
->struct_mutex
);
103 static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment
*attach
,
104 struct sg_table
*sgt
,
105 enum dma_data_direction dir
)
107 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
113 static void exynos_dmabuf_release(struct dma_buf
*dmabuf
)
115 struct exynos_drm_gem_obj
*exynos_gem_obj
= dmabuf
->priv
;
117 DRM_DEBUG_PRIME("%s\n", __FILE__
);
120 * exynos_dmabuf_release() call means that file object's
121 * f_count is 0 and it calls drm_gem_object_handle_unreference()
122 * to drop the references that these values had been increased
123 * at drm_prime_handle_to_fd()
125 if (exynos_gem_obj
->base
.export_dma_buf
== dmabuf
) {
126 exynos_gem_obj
->base
.export_dma_buf
= NULL
;
129 * drop this gem object refcount to release allocated buffer
132 drm_gem_object_unreference_unlocked(&exynos_gem_obj
->base
);
136 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf
*dma_buf
,
137 unsigned long page_num
)
144 static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf
*dma_buf
,
145 unsigned long page_num
,
151 static void *exynos_gem_dmabuf_kmap(struct dma_buf
*dma_buf
,
152 unsigned long page_num
)
159 static void exynos_gem_dmabuf_kunmap(struct dma_buf
*dma_buf
,
160 unsigned long page_num
, void *addr
)
165 static int exynos_gem_dmabuf_mmap(struct dma_buf
*dma_buf
,
166 struct vm_area_struct
*vma
)
171 static struct dma_buf_ops exynos_dmabuf_ops
= {
172 .map_dma_buf
= exynos_gem_map_dma_buf
,
173 .unmap_dma_buf
= exynos_gem_unmap_dma_buf
,
174 .kmap
= exynos_gem_dmabuf_kmap
,
175 .kmap_atomic
= exynos_gem_dmabuf_kmap_atomic
,
176 .kunmap
= exynos_gem_dmabuf_kunmap
,
177 .kunmap_atomic
= exynos_gem_dmabuf_kunmap_atomic
,
178 .mmap
= exynos_gem_dmabuf_mmap
,
179 .release
= exynos_dmabuf_release
,
182 struct dma_buf
*exynos_dmabuf_prime_export(struct drm_device
*drm_dev
,
183 struct drm_gem_object
*obj
, int flags
)
185 struct exynos_drm_gem_obj
*exynos_gem_obj
= to_exynos_gem_obj(obj
);
187 return dma_buf_export(exynos_gem_obj
, &exynos_dmabuf_ops
,
188 exynos_gem_obj
->base
.size
, 0600);
191 struct drm_gem_object
*exynos_dmabuf_prime_import(struct drm_device
*drm_dev
,
192 struct dma_buf
*dma_buf
)
194 struct dma_buf_attachment
*attach
;
195 struct sg_table
*sgt
;
196 struct scatterlist
*sgl
;
197 struct exynos_drm_gem_obj
*exynos_gem_obj
;
198 struct exynos_drm_gem_buf
*buffer
;
202 DRM_DEBUG_PRIME("%s\n", __FILE__
);
204 /* is this one of own objects? */
205 if (dma_buf
->ops
== &exynos_dmabuf_ops
) {
206 struct drm_gem_object
*obj
;
208 exynos_gem_obj
= dma_buf
->priv
;
209 obj
= &exynos_gem_obj
->base
;
211 /* is it from our device? */
212 if (obj
->dev
== drm_dev
) {
213 drm_gem_object_reference(obj
);
218 attach
= dma_buf_attach(dma_buf
, drm_dev
->dev
);
220 return ERR_PTR(-EINVAL
);
223 sgt
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
224 if (IS_ERR_OR_NULL(sgt
)) {
229 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
231 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
233 goto err_unmap_attach
;
236 buffer
->pages
= kzalloc(sizeof(*page
) * sgt
->nents
, GFP_KERNEL
);
237 if (!buffer
->pages
) {
238 DRM_ERROR("failed to allocate pages.\n");
240 goto err_free_buffer
;
243 exynos_gem_obj
= exynos_drm_gem_init(drm_dev
, dma_buf
->size
);
244 if (!exynos_gem_obj
) {
251 if (sgt
->nents
== 1) {
252 buffer
->dma_addr
= sg_dma_address(sgt
->sgl
);
253 buffer
->size
= sg_dma_len(sgt
->sgl
);
255 /* always physically continuous memory if sgt->nents is 1. */
256 exynos_gem_obj
->flags
|= EXYNOS_BO_CONTIG
;
260 buffer
->dma_addr
= sg_dma_address(sgl
);
261 while (i
< sgt
->nents
) {
262 buffer
->pages
[i
] = sg_page(sgl
);
263 buffer
->size
+= sg_dma_len(sgl
);
268 exynos_gem_obj
->flags
|= EXYNOS_BO_NONCONTIG
;
271 exynos_gem_obj
->buffer
= buffer
;
273 exynos_gem_obj
->base
.import_attach
= attach
;
275 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer
->dma_addr
,
278 return &exynos_gem_obj
->base
;
281 kfree(buffer
->pages
);
282 buffer
->pages
= NULL
;
287 dma_buf_unmap_attachment(attach
, sgt
, DMA_BIDIRECTIONAL
);
289 dma_buf_detach(dma_buf
, attach
);
293 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
294 MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
295 MODULE_LICENSE("GPL");