Merge branches 'audit', 'delay', 'fixes', 'misc' and 'sta2x11' into for-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / radeon / radeon_gem.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include "drmP.h"
29 #include "drm.h"
30 #include "radeon_drm.h"
31 #include "radeon.h"
32
33 int radeon_gem_object_init(struct drm_gem_object *obj)
34 {
35 BUG();
36
37 return 0;
38 }
39
40 void radeon_gem_object_free(struct drm_gem_object *gobj)
41 {
42 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
43
44 if (robj) {
45 if (robj->gem_base.import_attach)
46 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
47 radeon_bo_unref(&robj);
48 }
49 }
50
51 int radeon_gem_object_create(struct radeon_device *rdev, int size,
52 int alignment, int initial_domain,
53 bool discardable, bool kernel,
54 struct drm_gem_object **obj)
55 {
56 struct radeon_bo *robj;
57 int r;
58
59 *obj = NULL;
60 /* At least align on page size */
61 if (alignment < PAGE_SIZE) {
62 alignment = PAGE_SIZE;
63 }
64 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
65 if (r) {
66 if (r != -ERESTARTSYS)
67 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
68 size, initial_domain, alignment, r);
69 return r;
70 }
71 *obj = &robj->gem_base;
72
73 mutex_lock(&rdev->gem.mutex);
74 list_add_tail(&robj->list, &rdev->gem.objects);
75 mutex_unlock(&rdev->gem.mutex);
76
77 return 0;
78 }
79
80 int radeon_gem_set_domain(struct drm_gem_object *gobj,
81 uint32_t rdomain, uint32_t wdomain)
82 {
83 struct radeon_bo *robj;
84 uint32_t domain;
85 int r;
86
87 /* FIXME: reeimplement */
88 robj = gem_to_radeon_bo(gobj);
89 /* work out where to validate the buffer to */
90 domain = wdomain;
91 if (!domain) {
92 domain = rdomain;
93 }
94 if (!domain) {
95 /* Do nothings */
96 printk(KERN_WARNING "Set domain without domain !\n");
97 return 0;
98 }
99 if (domain == RADEON_GEM_DOMAIN_CPU) {
100 /* Asking for cpu access wait for object idle */
101 r = radeon_bo_wait(robj, NULL, false);
102 if (r) {
103 printk(KERN_ERR "Failed to wait for object !\n");
104 return r;
105 }
106 }
107 return 0;
108 }
109
110 int radeon_gem_init(struct radeon_device *rdev)
111 {
112 INIT_LIST_HEAD(&rdev->gem.objects);
113 return 0;
114 }
115
116 void radeon_gem_fini(struct radeon_device *rdev)
117 {
118 radeon_bo_force_delete(rdev);
119 }
120
121 /*
122 * Call from drm_gem_handle_create which appear in both new and open ioctl
123 * case.
124 */
125 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
126 {
127 return 0;
128 }
129
130 void radeon_gem_object_close(struct drm_gem_object *obj,
131 struct drm_file *file_priv)
132 {
133 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
134 struct radeon_device *rdev = rbo->rdev;
135 struct radeon_fpriv *fpriv = file_priv->driver_priv;
136 struct radeon_vm *vm = &fpriv->vm;
137 struct radeon_bo_va *bo_va, *tmp;
138
139 if (rdev->family < CHIP_CAYMAN) {
140 return;
141 }
142
143 if (radeon_bo_reserve(rbo, false)) {
144 return;
145 }
146 list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
147 if (bo_va->vm == vm) {
148 /* remove from this vm address space */
149 mutex_lock(&vm->mutex);
150 list_del(&bo_va->vm_list);
151 mutex_unlock(&vm->mutex);
152 list_del(&bo_va->bo_list);
153 kfree(bo_va);
154 }
155 }
156 radeon_bo_unreserve(rbo);
157 }
158
159 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
160 {
161 if (r == -EDEADLK) {
162 radeon_mutex_lock(&rdev->cs_mutex);
163 r = radeon_gpu_reset(rdev);
164 if (!r)
165 r = -EAGAIN;
166 radeon_mutex_unlock(&rdev->cs_mutex);
167 }
168 return r;
169 }
170
171 /*
172 * GEM ioctls.
173 */
174 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
175 struct drm_file *filp)
176 {
177 struct radeon_device *rdev = dev->dev_private;
178 struct drm_radeon_gem_info *args = data;
179 struct ttm_mem_type_manager *man;
180 unsigned i;
181
182 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
183
184 args->vram_size = rdev->mc.real_vram_size;
185 args->vram_visible = (u64)man->size << PAGE_SHIFT;
186 if (rdev->stollen_vga_memory)
187 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
188 args->vram_visible -= radeon_fbdev_total_size(rdev);
189 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
190 for(i = 0; i < RADEON_NUM_RINGS; ++i)
191 args->gart_size -= rdev->ring[i].ring_size;
192 return 0;
193 }
194
195 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
196 struct drm_file *filp)
197 {
198 /* TODO: implement */
199 DRM_ERROR("unimplemented %s\n", __func__);
200 return -ENOSYS;
201 }
202
203 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
204 struct drm_file *filp)
205 {
206 /* TODO: implement */
207 DRM_ERROR("unimplemented %s\n", __func__);
208 return -ENOSYS;
209 }
210
211 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
212 struct drm_file *filp)
213 {
214 struct radeon_device *rdev = dev->dev_private;
215 struct drm_radeon_gem_create *args = data;
216 struct drm_gem_object *gobj;
217 uint32_t handle;
218 int r;
219
220 /* create a gem object to contain this object in */
221 args->size = roundup(args->size, PAGE_SIZE);
222 r = radeon_gem_object_create(rdev, args->size, args->alignment,
223 args->initial_domain, false,
224 false, &gobj);
225 if (r) {
226 r = radeon_gem_handle_lockup(rdev, r);
227 return r;
228 }
229 r = drm_gem_handle_create(filp, gobj, &handle);
230 /* drop reference from allocate - handle holds it now */
231 drm_gem_object_unreference_unlocked(gobj);
232 if (r) {
233 r = radeon_gem_handle_lockup(rdev, r);
234 return r;
235 }
236 args->handle = handle;
237 return 0;
238 }
239
240 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
241 struct drm_file *filp)
242 {
243 /* transition the BO to a domain -
244 * just validate the BO into a certain domain */
245 struct drm_radeon_gem_set_domain *args = data;
246 struct drm_gem_object *gobj;
247 struct radeon_bo *robj;
248 int r;
249
250 /* for now if someone requests domain CPU -
251 * just make sure the buffer is finished with */
252
253 /* just do a BO wait for now */
254 gobj = drm_gem_object_lookup(dev, filp, args->handle);
255 if (gobj == NULL) {
256 return -ENOENT;
257 }
258 robj = gem_to_radeon_bo(gobj);
259
260 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
261
262 drm_gem_object_unreference_unlocked(gobj);
263 r = radeon_gem_handle_lockup(robj->rdev, r);
264 return r;
265 }
266
267 int radeon_mode_dumb_mmap(struct drm_file *filp,
268 struct drm_device *dev,
269 uint32_t handle, uint64_t *offset_p)
270 {
271 struct drm_gem_object *gobj;
272 struct radeon_bo *robj;
273
274 gobj = drm_gem_object_lookup(dev, filp, handle);
275 if (gobj == NULL) {
276 return -ENOENT;
277 }
278 robj = gem_to_radeon_bo(gobj);
279 *offset_p = radeon_bo_mmap_offset(robj);
280 drm_gem_object_unreference_unlocked(gobj);
281 return 0;
282 }
283
284 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
285 struct drm_file *filp)
286 {
287 struct drm_radeon_gem_mmap *args = data;
288
289 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
290 }
291
292 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
293 struct drm_file *filp)
294 {
295 struct radeon_device *rdev = dev->dev_private;
296 struct drm_radeon_gem_busy *args = data;
297 struct drm_gem_object *gobj;
298 struct radeon_bo *robj;
299 int r;
300 uint32_t cur_placement = 0;
301
302 gobj = drm_gem_object_lookup(dev, filp, args->handle);
303 if (gobj == NULL) {
304 return -ENOENT;
305 }
306 robj = gem_to_radeon_bo(gobj);
307 r = radeon_bo_wait(robj, &cur_placement, true);
308 switch (cur_placement) {
309 case TTM_PL_VRAM:
310 args->domain = RADEON_GEM_DOMAIN_VRAM;
311 break;
312 case TTM_PL_TT:
313 args->domain = RADEON_GEM_DOMAIN_GTT;
314 break;
315 case TTM_PL_SYSTEM:
316 args->domain = RADEON_GEM_DOMAIN_CPU;
317 default:
318 break;
319 }
320 drm_gem_object_unreference_unlocked(gobj);
321 r = radeon_gem_handle_lockup(rdev, r);
322 return r;
323 }
324
325 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *filp)
327 {
328 struct radeon_device *rdev = dev->dev_private;
329 struct drm_radeon_gem_wait_idle *args = data;
330 struct drm_gem_object *gobj;
331 struct radeon_bo *robj;
332 int r;
333
334 gobj = drm_gem_object_lookup(dev, filp, args->handle);
335 if (gobj == NULL) {
336 return -ENOENT;
337 }
338 robj = gem_to_radeon_bo(gobj);
339 r = radeon_bo_wait(robj, NULL, false);
340 /* callback hw specific functions if any */
341 if (rdev->asic->ioctl_wait_idle)
342 robj->rdev->asic->ioctl_wait_idle(rdev, robj);
343 drm_gem_object_unreference_unlocked(gobj);
344 r = radeon_gem_handle_lockup(rdev, r);
345 return r;
346 }
347
348 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
349 struct drm_file *filp)
350 {
351 struct drm_radeon_gem_set_tiling *args = data;
352 struct drm_gem_object *gobj;
353 struct radeon_bo *robj;
354 int r = 0;
355
356 DRM_DEBUG("%d \n", args->handle);
357 gobj = drm_gem_object_lookup(dev, filp, args->handle);
358 if (gobj == NULL)
359 return -ENOENT;
360 robj = gem_to_radeon_bo(gobj);
361 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
362 drm_gem_object_unreference_unlocked(gobj);
363 return r;
364 }
365
366 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
367 struct drm_file *filp)
368 {
369 struct drm_radeon_gem_get_tiling *args = data;
370 struct drm_gem_object *gobj;
371 struct radeon_bo *rbo;
372 int r = 0;
373
374 DRM_DEBUG("\n");
375 gobj = drm_gem_object_lookup(dev, filp, args->handle);
376 if (gobj == NULL)
377 return -ENOENT;
378 rbo = gem_to_radeon_bo(gobj);
379 r = radeon_bo_reserve(rbo, false);
380 if (unlikely(r != 0))
381 goto out;
382 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
383 radeon_bo_unreserve(rbo);
384 out:
385 drm_gem_object_unreference_unlocked(gobj);
386 return r;
387 }
388
389 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
390 struct drm_file *filp)
391 {
392 struct drm_radeon_gem_va *args = data;
393 struct drm_gem_object *gobj;
394 struct radeon_device *rdev = dev->dev_private;
395 struct radeon_fpriv *fpriv = filp->driver_priv;
396 struct radeon_bo *rbo;
397 struct radeon_bo_va *bo_va;
398 u32 invalid_flags;
399 int r = 0;
400
401 if (!rdev->vm_manager.enabled) {
402 args->operation = RADEON_VA_RESULT_ERROR;
403 return -ENOTTY;
404 }
405
406 /* !! DONT REMOVE !!
407 * We don't support vm_id yet, to be sure we don't have have broken
408 * userspace, reject anyone trying to use non 0 value thus moving
409 * forward we can use those fields without breaking existant userspace
410 */
411 if (args->vm_id) {
412 args->operation = RADEON_VA_RESULT_ERROR;
413 return -EINVAL;
414 }
415
416 if (args->offset < RADEON_VA_RESERVED_SIZE) {
417 dev_err(&dev->pdev->dev,
418 "offset 0x%lX is in reserved area 0x%X\n",
419 (unsigned long)args->offset,
420 RADEON_VA_RESERVED_SIZE);
421 args->operation = RADEON_VA_RESULT_ERROR;
422 return -EINVAL;
423 }
424
425 /* don't remove, we need to enforce userspace to set the snooped flag
426 * otherwise we will endup with broken userspace and we won't be able
427 * to enable this feature without adding new interface
428 */
429 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
430 if ((args->flags & invalid_flags)) {
431 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
432 args->flags, invalid_flags);
433 args->operation = RADEON_VA_RESULT_ERROR;
434 return -EINVAL;
435 }
436 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
437 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
438 args->operation = RADEON_VA_RESULT_ERROR;
439 return -EINVAL;
440 }
441
442 switch (args->operation) {
443 case RADEON_VA_MAP:
444 case RADEON_VA_UNMAP:
445 break;
446 default:
447 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
448 args->operation);
449 args->operation = RADEON_VA_RESULT_ERROR;
450 return -EINVAL;
451 }
452
453 gobj = drm_gem_object_lookup(dev, filp, args->handle);
454 if (gobj == NULL) {
455 args->operation = RADEON_VA_RESULT_ERROR;
456 return -ENOENT;
457 }
458 rbo = gem_to_radeon_bo(gobj);
459 r = radeon_bo_reserve(rbo, false);
460 if (r) {
461 args->operation = RADEON_VA_RESULT_ERROR;
462 drm_gem_object_unreference_unlocked(gobj);
463 return r;
464 }
465 switch (args->operation) {
466 case RADEON_VA_MAP:
467 bo_va = radeon_bo_va(rbo, &fpriv->vm);
468 if (bo_va) {
469 args->operation = RADEON_VA_RESULT_VA_EXIST;
470 args->offset = bo_va->soffset;
471 goto out;
472 }
473 r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
474 args->offset, args->flags);
475 break;
476 case RADEON_VA_UNMAP:
477 r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
478 break;
479 default:
480 break;
481 }
482 args->operation = RADEON_VA_RESULT_OK;
483 if (r) {
484 args->operation = RADEON_VA_RESULT_ERROR;
485 }
486 out:
487 radeon_bo_unreserve(rbo);
488 drm_gem_object_unreference_unlocked(gobj);
489 return r;
490 }
491
492 int radeon_mode_dumb_create(struct drm_file *file_priv,
493 struct drm_device *dev,
494 struct drm_mode_create_dumb *args)
495 {
496 struct radeon_device *rdev = dev->dev_private;
497 struct drm_gem_object *gobj;
498 uint32_t handle;
499 int r;
500
501 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
502 args->size = args->pitch * args->height;
503 args->size = ALIGN(args->size, PAGE_SIZE);
504
505 r = radeon_gem_object_create(rdev, args->size, 0,
506 RADEON_GEM_DOMAIN_VRAM,
507 false, ttm_bo_type_device,
508 &gobj);
509 if (r)
510 return -ENOMEM;
511
512 r = drm_gem_handle_create(file_priv, gobj, &handle);
513 /* drop reference from allocate - handle holds it now */
514 drm_gem_object_unreference_unlocked(gobj);
515 if (r) {
516 return r;
517 }
518 args->handle = handle;
519 return 0;
520 }
521
522 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
523 struct drm_device *dev,
524 uint32_t handle)
525 {
526 return drm_gem_handle_delete(file_priv, handle);
527 }