2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * based on nouveau_prime.c
24 * Authors: Alex Deucher
29 #include <drm/radeon_drm.h>
31 #include <linux/dma-buf.h>
33 static struct sg_table
*radeon_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
34 enum dma_data_direction dir
)
36 struct radeon_bo
*bo
= attachment
->dmabuf
->priv
;
37 struct drm_device
*dev
= bo
->rdev
->ddev
;
38 int npages
= bo
->tbo
.num_pages
;
42 mutex_lock(&dev
->struct_mutex
);
43 sg
= drm_prime_pages_to_sg(bo
->tbo
.ttm
->pages
, npages
);
44 nents
= dma_map_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
45 mutex_unlock(&dev
->struct_mutex
);
49 static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
50 struct sg_table
*sg
, enum dma_data_direction dir
)
52 dma_unmap_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
57 static void radeon_gem_dmabuf_release(struct dma_buf
*dma_buf
)
59 struct radeon_bo
*bo
= dma_buf
->priv
;
61 if (bo
->gem_base
.export_dma_buf
== dma_buf
) {
62 DRM_ERROR("unreference dmabuf %p\n", &bo
->gem_base
);
63 bo
->gem_base
.export_dma_buf
= NULL
;
64 drm_gem_object_unreference_unlocked(&bo
->gem_base
);
68 static void *radeon_gem_kmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
)
73 static void radeon_gem_kunmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
77 static void *radeon_gem_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
82 static void radeon_gem_kunmap(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
87 static int radeon_gem_prime_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
92 static void *radeon_gem_prime_vmap(struct dma_buf
*dma_buf
)
94 struct radeon_bo
*bo
= dma_buf
->priv
;
95 struct drm_device
*dev
= bo
->rdev
->ddev
;
98 mutex_lock(&dev
->struct_mutex
);
99 if (bo
->vmapping_count
) {
100 bo
->vmapping_count
++;
104 ret
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
,
107 mutex_unlock(&dev
->struct_mutex
);
110 bo
->vmapping_count
= 1;
112 mutex_unlock(&dev
->struct_mutex
);
113 return bo
->dma_buf_vmap
.virtual;
116 static void radeon_gem_prime_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
118 struct radeon_bo
*bo
= dma_buf
->priv
;
119 struct drm_device
*dev
= bo
->rdev
->ddev
;
121 mutex_lock(&dev
->struct_mutex
);
122 bo
->vmapping_count
--;
123 if (bo
->vmapping_count
== 0) {
124 ttm_bo_kunmap(&bo
->dma_buf_vmap
);
126 mutex_unlock(&dev
->struct_mutex
);
128 const static struct dma_buf_ops radeon_dmabuf_ops
= {
129 .map_dma_buf
= radeon_gem_map_dma_buf
,
130 .unmap_dma_buf
= radeon_gem_unmap_dma_buf
,
131 .release
= radeon_gem_dmabuf_release
,
132 .kmap
= radeon_gem_kmap
,
133 .kmap_atomic
= radeon_gem_kmap_atomic
,
134 .kunmap
= radeon_gem_kunmap
,
135 .kunmap_atomic
= radeon_gem_kunmap_atomic
,
136 .mmap
= radeon_gem_prime_mmap
,
137 .vmap
= radeon_gem_prime_vmap
,
138 .vunmap
= radeon_gem_prime_vunmap
,
141 static int radeon_prime_create(struct drm_device
*dev
,
144 struct radeon_bo
**pbo
)
146 struct radeon_device
*rdev
= dev
->dev_private
;
147 struct radeon_bo
*bo
;
150 ret
= radeon_bo_create(rdev
, size
, PAGE_SIZE
, false,
151 RADEON_GEM_DOMAIN_GTT
, sg
, pbo
);
155 bo
->gem_base
.driver_private
= bo
;
157 mutex_lock(&rdev
->gem
.mutex
);
158 list_add_tail(&bo
->list
, &rdev
->gem
.objects
);
159 mutex_unlock(&rdev
->gem
.mutex
);
164 struct dma_buf
*radeon_gem_prime_export(struct drm_device
*dev
,
165 struct drm_gem_object
*obj
,
168 struct radeon_bo
*bo
= gem_to_radeon_bo(obj
);
171 ret
= radeon_bo_reserve(bo
, false);
172 if (unlikely(ret
!= 0))
175 /* pin buffer into GTT */
176 ret
= radeon_bo_pin(bo
, RADEON_GEM_DOMAIN_GTT
, NULL
);
178 radeon_bo_unreserve(bo
);
181 radeon_bo_unreserve(bo
);
182 return dma_buf_export(bo
, &radeon_dmabuf_ops
, obj
->size
, flags
);
185 struct drm_gem_object
*radeon_gem_prime_import(struct drm_device
*dev
,
186 struct dma_buf
*dma_buf
)
188 struct dma_buf_attachment
*attach
;
190 struct radeon_bo
*bo
;
193 if (dma_buf
->ops
== &radeon_dmabuf_ops
) {
195 if (bo
->gem_base
.dev
== dev
) {
196 drm_gem_object_reference(&bo
->gem_base
);
197 dma_buf_put(dma_buf
);
198 return &bo
->gem_base
;
203 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
205 return ERR_CAST(attach
);
207 sg
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
213 ret
= radeon_prime_create(dev
, dma_buf
->size
, sg
, &bo
);
217 bo
->gem_base
.import_attach
= attach
;
219 return &bo
->gem_base
;
222 dma_buf_unmap_attachment(attach
, sg
, DMA_BIDIRECTIONAL
);
224 dma_buf_detach(dma_buf
, attach
);