Merge tag 'ep93xx-fixes-for-3.6' of git://github.com/RyanMallon/linux-ep93xx into...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / nouveau / nouveau_prime.c
CommitLineData
e9bf5f36
DA
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 */
22b33e8e
DA
24
25#include "drmP.h"
26#include "drm.h"
27
28#include "nouveau_drv.h"
29#include "nouveau_drm.h"
30#include "nouveau_dma.h"
31
32#include <linux/dma-buf.h>
33
34static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
35 enum dma_data_direction dir)
36{
37 struct nouveau_bo *nvbo = attachment->dmabuf->priv;
38 struct drm_device *dev = nvbo->gem->dev;
39 int npages = nvbo->bo.num_pages;
40 struct sg_table *sg;
41 int nents;
42
43 mutex_lock(&dev->struct_mutex);
44 sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
45 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
46 mutex_unlock(&dev->struct_mutex);
47 return sg;
48}
49
50static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
51 struct sg_table *sg, enum dma_data_direction dir)
52{
53 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
54 sg_free_table(sg);
55 kfree(sg);
56}
57
58static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
59{
60 struct nouveau_bo *nvbo = dma_buf->priv;
61
62 if (nvbo->gem->export_dma_buf == dma_buf) {
63 nvbo->gem->export_dma_buf = NULL;
64 drm_gem_object_unreference_unlocked(nvbo->gem);
65 }
66}
67
68static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
69{
70 return NULL;
71}
72
73static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
74{
75
76}
77static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
78{
79 return NULL;
80}
81
82static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
83{
84
85}
86
e1bbc4bf
DA
87static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
88{
89 return -EINVAL;
90}
91
35916ace
DA
92static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
93{
94 struct nouveau_bo *nvbo = dma_buf->priv;
95 struct drm_device *dev = nvbo->gem->dev;
96 int ret;
97
98 mutex_lock(&dev->struct_mutex);
99 if (nvbo->vmapping_count) {
100 nvbo->vmapping_count++;
101 goto out_unlock;
102 }
103
104 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
105 &nvbo->dma_buf_vmap);
106 if (ret) {
107 mutex_unlock(&dev->struct_mutex);
108 return ERR_PTR(ret);
109 }
110 nvbo->vmapping_count = 1;
111out_unlock:
112 mutex_unlock(&dev->struct_mutex);
113 return nvbo->dma_buf_vmap.virtual;
114}
115
116static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
117{
118 struct nouveau_bo *nvbo = dma_buf->priv;
119 struct drm_device *dev = nvbo->gem->dev;
120
121 mutex_lock(&dev->struct_mutex);
122 nvbo->vmapping_count--;
123 if (nvbo->vmapping_count == 0) {
124 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
125 }
126 mutex_unlock(&dev->struct_mutex);
127}
128
41ceeeb2 129static const struct dma_buf_ops nouveau_dmabuf_ops = {
22b33e8e
DA
130 .map_dma_buf = nouveau_gem_map_dma_buf,
131 .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
132 .release = nouveau_gem_dmabuf_release,
133 .kmap = nouveau_gem_kmap,
134 .kmap_atomic = nouveau_gem_kmap_atomic,
135 .kunmap = nouveau_gem_kunmap,
136 .kunmap_atomic = nouveau_gem_kunmap_atomic,
e1bbc4bf 137 .mmap = nouveau_gem_prime_mmap,
35916ace
DA
138 .vmap = nouveau_gem_prime_vmap,
139 .vunmap = nouveau_gem_prime_vunmap,
22b33e8e
DA
140};
141
142static int
143nouveau_prime_new(struct drm_device *dev,
144 size_t size,
145 struct sg_table *sg,
146 struct nouveau_bo **pnvbo)
147{
148 struct nouveau_bo *nvbo;
149 u32 flags = 0;
150 int ret;
151
152 flags = TTM_PL_FLAG_TT;
153
154 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
155 sg, pnvbo);
156 if (ret)
157 return ret;
158 nvbo = *pnvbo;
159
160 /* we restrict allowed domains on nv50+ to only the types
161 * that were requested at creation time. not possibly on
162 * earlier chips without busting the ABI.
163 */
164 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
165 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
166 if (!nvbo->gem) {
167 nouveau_bo_ref(NULL, pnvbo);
168 return -ENOMEM;
169 }
170
171 nvbo->gem->driver_private = nvbo;
172 return 0;
173}
174
175struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
176 struct drm_gem_object *obj, int flags)
177{
178 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
179 int ret = 0;
180
181 /* pin buffer into GTT */
182 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
183 if (ret)
184 return ERR_PTR(-EINVAL);
185
186 return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
187}
188
189struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
190 struct dma_buf *dma_buf)
191{
192 struct dma_buf_attachment *attach;
193 struct sg_table *sg;
194 struct nouveau_bo *nvbo;
195 int ret;
196
197 if (dma_buf->ops == &nouveau_dmabuf_ops) {
198 nvbo = dma_buf->priv;
199 if (nvbo->gem) {
200 if (nvbo->gem->dev == dev) {
201 drm_gem_object_reference(nvbo->gem);
202 return nvbo->gem;
203 }
204 }
205 }
206 /* need to attach */
207 attach = dma_buf_attach(dma_buf, dev->dev);
208 if (IS_ERR(attach))
209 return ERR_PTR(PTR_ERR(attach));
210
211 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
212 if (IS_ERR(sg)) {
213 ret = PTR_ERR(sg);
214 goto fail_detach;
215 }
216
217 ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
218 if (ret)
219 goto fail_unmap;
220
221 nvbo->gem->import_attach = attach;
222
223 return nvbo->gem;
224
225fail_unmap:
226 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
227fail_detach:
228 dma_buf_detach(dma_buf, attach);
229 return ERR_PTR(ret);
230}
231