Commit | Line | Data |
---|---|---|
e9bf5f36 DA |
1 | /* |
2 | * Copyright 2011 Red Hat Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Dave Airlie | |
23 | */ | |
22b33e8e | 24 | |
77145f1c | 25 | #include <linux/dma-buf.h> |
22b33e8e | 26 | |
612a9aab | 27 | #include <drm/drmP.h> |
22b33e8e | 28 | |
77145f1c BS |
29 | #include "nouveau_drm.h" |
30 | #include "nouveau_gem.h" | |
22b33e8e DA |
31 | |
32 | static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment, | |
33 | enum dma_data_direction dir) | |
34 | { | |
35 | struct nouveau_bo *nvbo = attachment->dmabuf->priv; | |
36 | struct drm_device *dev = nvbo->gem->dev; | |
37 | int npages = nvbo->bo.num_pages; | |
38 | struct sg_table *sg; | |
39 | int nents; | |
40 | ||
41 | mutex_lock(&dev->struct_mutex); | |
42 | sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); | |
43 | nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); | |
44 | mutex_unlock(&dev->struct_mutex); | |
45 | return sg; | |
46 | } | |
47 | ||
48 | static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | |
49 | struct sg_table *sg, enum dma_data_direction dir) | |
50 | { | |
51 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); | |
52 | sg_free_table(sg); | |
53 | kfree(sg); | |
54 | } | |
55 | ||
56 | static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf) | |
57 | { | |
58 | struct nouveau_bo *nvbo = dma_buf->priv; | |
59 | ||
60 | if (nvbo->gem->export_dma_buf == dma_buf) { | |
61 | nvbo->gem->export_dma_buf = NULL; | |
62 | drm_gem_object_unreference_unlocked(nvbo->gem); | |
63 | } | |
64 | } | |
65 | ||
66 | static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) | |
67 | { | |
68 | return NULL; | |
69 | } | |
70 | ||
71 | static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | |
72 | { | |
73 | ||
74 | } | |
75 | static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num) | |
76 | { | |
77 | return NULL; | |
78 | } | |
79 | ||
80 | static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | |
81 | { | |
82 | ||
83 | } | |
84 | ||
e1bbc4bf DA |
85 | static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) |
86 | { | |
87 | return -EINVAL; | |
88 | } | |
89 | ||
35916ace DA |
90 | static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf) |
91 | { | |
92 | struct nouveau_bo *nvbo = dma_buf->priv; | |
93 | struct drm_device *dev = nvbo->gem->dev; | |
94 | int ret; | |
95 | ||
96 | mutex_lock(&dev->struct_mutex); | |
97 | if (nvbo->vmapping_count) { | |
98 | nvbo->vmapping_count++; | |
99 | goto out_unlock; | |
100 | } | |
101 | ||
102 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, | |
103 | &nvbo->dma_buf_vmap); | |
104 | if (ret) { | |
105 | mutex_unlock(&dev->struct_mutex); | |
106 | return ERR_PTR(ret); | |
107 | } | |
108 | nvbo->vmapping_count = 1; | |
109 | out_unlock: | |
110 | mutex_unlock(&dev->struct_mutex); | |
111 | return nvbo->dma_buf_vmap.virtual; | |
112 | } | |
113 | ||
114 | static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) | |
115 | { | |
116 | struct nouveau_bo *nvbo = dma_buf->priv; | |
117 | struct drm_device *dev = nvbo->gem->dev; | |
118 | ||
119 | mutex_lock(&dev->struct_mutex); | |
120 | nvbo->vmapping_count--; | |
121 | if (nvbo->vmapping_count == 0) { | |
122 | ttm_bo_kunmap(&nvbo->dma_buf_vmap); | |
123 | } | |
124 | mutex_unlock(&dev->struct_mutex); | |
125 | } | |
126 | ||
41ceeeb2 | 127 | static const struct dma_buf_ops nouveau_dmabuf_ops = { |
22b33e8e DA |
128 | .map_dma_buf = nouveau_gem_map_dma_buf, |
129 | .unmap_dma_buf = nouveau_gem_unmap_dma_buf, | |
130 | .release = nouveau_gem_dmabuf_release, | |
131 | .kmap = nouveau_gem_kmap, | |
132 | .kmap_atomic = nouveau_gem_kmap_atomic, | |
133 | .kunmap = nouveau_gem_kunmap, | |
134 | .kunmap_atomic = nouveau_gem_kunmap_atomic, | |
e1bbc4bf | 135 | .mmap = nouveau_gem_prime_mmap, |
35916ace DA |
136 | .vmap = nouveau_gem_prime_vmap, |
137 | .vunmap = nouveau_gem_prime_vunmap, | |
22b33e8e DA |
138 | }; |
139 | ||
140 | static int | |
141 | nouveau_prime_new(struct drm_device *dev, | |
142 | size_t size, | |
143 | struct sg_table *sg, | |
144 | struct nouveau_bo **pnvbo) | |
145 | { | |
146 | struct nouveau_bo *nvbo; | |
147 | u32 flags = 0; | |
148 | int ret; | |
149 | ||
150 | flags = TTM_PL_FLAG_TT; | |
151 | ||
152 | ret = nouveau_bo_new(dev, size, 0, flags, 0, 0, | |
153 | sg, pnvbo); | |
154 | if (ret) | |
155 | return ret; | |
156 | nvbo = *pnvbo; | |
157 | ||
158 | /* we restrict allowed domains on nv50+ to only the types | |
159 | * that were requested at creation time. not possibly on | |
160 | * earlier chips without busting the ABI. | |
161 | */ | |
162 | nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; | |
163 | nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); | |
164 | if (!nvbo->gem) { | |
165 | nouveau_bo_ref(NULL, pnvbo); | |
166 | return -ENOMEM; | |
167 | } | |
168 | ||
169 | nvbo->gem->driver_private = nvbo; | |
170 | return 0; | |
171 | } | |
172 | ||
173 | struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, | |
174 | struct drm_gem_object *obj, int flags) | |
175 | { | |
176 | struct nouveau_bo *nvbo = nouveau_gem_object(obj); | |
177 | int ret = 0; | |
178 | ||
179 | /* pin buffer into GTT */ | |
180 | ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); | |
181 | if (ret) | |
182 | return ERR_PTR(-EINVAL); | |
183 | ||
184 | return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags); | |
185 | } | |
186 | ||
187 | struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev, | |
188 | struct dma_buf *dma_buf) | |
189 | { | |
190 | struct dma_buf_attachment *attach; | |
191 | struct sg_table *sg; | |
192 | struct nouveau_bo *nvbo; | |
193 | int ret; | |
194 | ||
195 | if (dma_buf->ops == &nouveau_dmabuf_ops) { | |
196 | nvbo = dma_buf->priv; | |
197 | if (nvbo->gem) { | |
198 | if (nvbo->gem->dev == dev) { | |
199 | drm_gem_object_reference(nvbo->gem); | |
200 | return nvbo->gem; | |
201 | } | |
202 | } | |
203 | } | |
204 | /* need to attach */ | |
205 | attach = dma_buf_attach(dma_buf, dev->dev); | |
206 | if (IS_ERR(attach)) | |
207 | return ERR_PTR(PTR_ERR(attach)); | |
208 | ||
209 | sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | |
210 | if (IS_ERR(sg)) { | |
211 | ret = PTR_ERR(sg); | |
212 | goto fail_detach; | |
213 | } | |
214 | ||
215 | ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo); | |
216 | if (ret) | |
217 | goto fail_unmap; | |
218 | ||
219 | nvbo->gem->import_attach = attach; | |
220 | ||
221 | return nvbo->gem; | |
222 | ||
223 | fail_unmap: | |
224 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); | |
225 | fail_detach: | |
226 | dma_buf_detach(dma_buf, attach); | |
227 | return ERR_PTR(ret); | |
228 | } | |
229 |