drm/radeon: introduce kernel modesetting for radeon hardware
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / radeon / radeon_ring.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/seq_file.h>
29 #include "drmP.h"
30 #include "radeon_drm.h"
31 #include "radeon_reg.h"
32 #include "radeon.h"
33 #include "atom.h"
34
35 int radeon_debugfs_ib_init(struct radeon_device *rdev);
36
37 /*
38 * IB.
39 */
40 int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41 {
42 struct radeon_fence *fence;
43 struct radeon_ib *nib;
44 unsigned long i;
45 int r = 0;
46
47 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence);
49 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n");
51 return r;
52 }
53 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
55 if (i < RADEON_IB_POOL_SIZE) {
56 set_bit(i, rdev->ib_pool.alloc_bm);
57 rdev->ib_pool.ibs[i].length_dw = 0;
58 *ib = &rdev->ib_pool.ibs[i];
59 goto out;
60 }
61 if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
62 /* we go do nothings here */
63 DRM_ERROR("all IB allocated none scheduled.\n");
64 r = -EINVAL;
65 goto out;
66 }
67 /* get the first ib on the scheduled list */
68 nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
69 struct radeon_ib, list);
70 if (nib->fence == NULL) {
71 /* we go do nothings here */
72 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
73 r = -EINVAL;
74 goto out;
75 }
76 r = radeon_fence_wait(nib->fence, false);
77 if (r) {
78 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
79 (unsigned long)nib->gpu_addr, nib->length_dw);
80 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
81 goto out;
82 }
83 radeon_fence_unref(&nib->fence);
84 nib->length_dw = 0;
85 list_del(&nib->list);
86 INIT_LIST_HEAD(&nib->list);
87 *ib = nib;
88 out:
89 mutex_unlock(&rdev->ib_pool.mutex);
90 if (r) {
91 radeon_fence_unref(&fence);
92 } else {
93 (*ib)->fence = fence;
94 }
95 return r;
96 }
97
98 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
99 {
100 struct radeon_ib *tmp = *ib;
101
102 *ib = NULL;
103 if (tmp == NULL) {
104 return;
105 }
106 mutex_lock(&rdev->ib_pool.mutex);
107 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
108 /* IB is scheduled & not signaled don't do anythings */
109 mutex_unlock(&rdev->ib_pool.mutex);
110 return;
111 }
112 list_del(&tmp->list);
113 INIT_LIST_HEAD(&tmp->list);
114 if (tmp->fence) {
115 radeon_fence_unref(&tmp->fence);
116 }
117 tmp->length_dw = 0;
118 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
119 mutex_unlock(&rdev->ib_pool.mutex);
120 }
121
122 static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
123 {
124 while ((ib->length_dw & rdev->cp.align_mask)) {
125 ib->ptr[ib->length_dw++] = PACKET2(0);
126 }
127 }
128
129 static void radeon_ib_cpu_flush(struct radeon_device *rdev,
130 struct radeon_ib *ib)
131 {
132 unsigned long tmp;
133 unsigned i;
134
135 /* To force CPU cache flush ugly but seems reliable */
136 for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) {
137 tmp = readl(&ib->ptr[i]);
138 }
139 }
140
141 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
142 {
143 int r = 0;
144
145 mutex_lock(&rdev->ib_pool.mutex);
146 radeon_ib_align(rdev, ib);
147 radeon_ib_cpu_flush(rdev, ib);
148 if (!ib->length_dw || !rdev->cp.ready) {
149 /* TODO: Nothings in the ib we should report. */
150 mutex_unlock(&rdev->ib_pool.mutex);
151 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
152 return -EINVAL;
153 }
154 /* 64 dwords should be enought for fence too */
155 r = radeon_ring_lock(rdev, 64);
156 if (r) {
157 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
158 mutex_unlock(&rdev->ib_pool.mutex);
159 return r;
160 }
161 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
162 radeon_ring_write(rdev, ib->gpu_addr);
163 radeon_ring_write(rdev, ib->length_dw);
164 radeon_fence_emit(rdev, ib->fence);
165 radeon_ring_unlock_commit(rdev);
166 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
167 mutex_unlock(&rdev->ib_pool.mutex);
168 return 0;
169 }
170
171 int radeon_ib_pool_init(struct radeon_device *rdev)
172 {
173 void *ptr;
174 uint64_t gpu_addr;
175 int i;
176 int r = 0;
177
178 /* Allocate 1M object buffer */
179 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
180 r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
181 true, RADEON_GEM_DOMAIN_GTT,
182 false, &rdev->ib_pool.robj);
183 if (r) {
184 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
185 return r;
186 }
187 r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
188 if (r) {
189 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
190 return r;
191 }
192 r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
193 if (r) {
194 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
195 return r;
196 }
197 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
198 unsigned offset;
199
200 offset = i * 64 * 1024;
201 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
202 rdev->ib_pool.ibs[i].ptr = ptr + offset;
203 rdev->ib_pool.ibs[i].idx = i;
204 rdev->ib_pool.ibs[i].length_dw = 0;
205 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
206 }
207 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
208 rdev->ib_pool.ready = true;
209 DRM_INFO("radeon: ib pool ready.\n");
210 if (radeon_debugfs_ib_init(rdev)) {
211 DRM_ERROR("Failed to register debugfs file for IB !\n");
212 }
213 return r;
214 }
215
216 void radeon_ib_pool_fini(struct radeon_device *rdev)
217 {
218 if (!rdev->ib_pool.ready) {
219 return;
220 }
221 mutex_lock(&rdev->ib_pool.mutex);
222 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
223 if (rdev->ib_pool.robj) {
224 radeon_object_kunmap(rdev->ib_pool.robj);
225 radeon_object_unref(&rdev->ib_pool.robj);
226 rdev->ib_pool.robj = NULL;
227 }
228 mutex_unlock(&rdev->ib_pool.mutex);
229 }
230
231 int radeon_ib_test(struct radeon_device *rdev)
232 {
233 struct radeon_ib *ib;
234 uint32_t scratch;
235 uint32_t tmp = 0;
236 unsigned i;
237 int r;
238
239 r = radeon_scratch_get(rdev, &scratch);
240 if (r) {
241 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
242 return r;
243 }
244 WREG32(scratch, 0xCAFEDEAD);
245 r = radeon_ib_get(rdev, &ib);
246 if (r) {
247 return r;
248 }
249 ib->ptr[0] = PACKET0(scratch, 0);
250 ib->ptr[1] = 0xDEADBEEF;
251 ib->ptr[2] = PACKET2(0);
252 ib->ptr[3] = PACKET2(0);
253 ib->ptr[4] = PACKET2(0);
254 ib->ptr[5] = PACKET2(0);
255 ib->ptr[6] = PACKET2(0);
256 ib->ptr[7] = PACKET2(0);
257 ib->length_dw = 8;
258 r = radeon_ib_schedule(rdev, ib);
259 if (r) {
260 radeon_scratch_free(rdev, scratch);
261 radeon_ib_free(rdev, &ib);
262 return r;
263 }
264 r = radeon_fence_wait(ib->fence, false);
265 if (r) {
266 return r;
267 }
268 for (i = 0; i < rdev->usec_timeout; i++) {
269 tmp = RREG32(scratch);
270 if (tmp == 0xDEADBEEF) {
271 break;
272 }
273 DRM_UDELAY(1);
274 }
275 if (i < rdev->usec_timeout) {
276 DRM_INFO("ib test succeeded in %u usecs\n", i);
277 } else {
278 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
279 scratch, tmp);
280 r = -EINVAL;
281 }
282 radeon_scratch_free(rdev, scratch);
283 radeon_ib_free(rdev, &ib);
284 return r;
285 }
286
287
288 /*
289 * Ring.
290 */
291 void radeon_ring_free_size(struct radeon_device *rdev)
292 {
293 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
294 /* This works because ring_size is a power of 2 */
295 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
296 rdev->cp.ring_free_dw -= rdev->cp.wptr;
297 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
298 if (!rdev->cp.ring_free_dw) {
299 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
300 }
301 }
302
303 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
304 {
305 int r;
306
307 /* Align requested size with padding so unlock_commit can
308 * pad safely */
309 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
310 mutex_lock(&rdev->cp.mutex);
311 while (ndw > (rdev->cp.ring_free_dw - 1)) {
312 radeon_ring_free_size(rdev);
313 if (ndw < rdev->cp.ring_free_dw) {
314 break;
315 }
316 r = radeon_fence_wait_next(rdev);
317 if (r) {
318 mutex_unlock(&rdev->cp.mutex);
319 return r;
320 }
321 }
322 rdev->cp.count_dw = ndw;
323 rdev->cp.wptr_old = rdev->cp.wptr;
324 return 0;
325 }
326
327 void radeon_ring_unlock_commit(struct radeon_device *rdev)
328 {
329 unsigned count_dw_pad;
330 unsigned i;
331
332 /* We pad to match fetch size */
333 count_dw_pad = (rdev->cp.align_mask + 1) -
334 (rdev->cp.wptr & rdev->cp.align_mask);
335 for (i = 0; i < count_dw_pad; i++) {
336 radeon_ring_write(rdev, PACKET2(0));
337 }
338 DRM_MEMORYBARRIER();
339 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
340 (void)RREG32(RADEON_CP_RB_WPTR);
341 mutex_unlock(&rdev->cp.mutex);
342 }
343
344 void radeon_ring_unlock_undo(struct radeon_device *rdev)
345 {
346 rdev->cp.wptr = rdev->cp.wptr_old;
347 mutex_unlock(&rdev->cp.mutex);
348 }
349
350 int radeon_ring_test(struct radeon_device *rdev)
351 {
352 uint32_t scratch;
353 uint32_t tmp = 0;
354 unsigned i;
355 int r;
356
357 r = radeon_scratch_get(rdev, &scratch);
358 if (r) {
359 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
360 return r;
361 }
362 WREG32(scratch, 0xCAFEDEAD);
363 r = radeon_ring_lock(rdev, 2);
364 if (r) {
365 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
366 radeon_scratch_free(rdev, scratch);
367 return r;
368 }
369 radeon_ring_write(rdev, PACKET0(scratch, 0));
370 radeon_ring_write(rdev, 0xDEADBEEF);
371 radeon_ring_unlock_commit(rdev);
372 for (i = 0; i < rdev->usec_timeout; i++) {
373 tmp = RREG32(scratch);
374 if (tmp == 0xDEADBEEF) {
375 break;
376 }
377 DRM_UDELAY(1);
378 }
379 if (i < rdev->usec_timeout) {
380 DRM_INFO("ring test succeeded in %d usecs\n", i);
381 } else {
382 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
383 scratch, tmp);
384 r = -EINVAL;
385 }
386 radeon_scratch_free(rdev, scratch);
387 return r;
388 }
389
390 int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
391 {
392 int r;
393
394 rdev->cp.ring_size = ring_size;
395 /* Allocate ring buffer */
396 if (rdev->cp.ring_obj == NULL) {
397 r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
398 true,
399 RADEON_GEM_DOMAIN_GTT,
400 false,
401 &rdev->cp.ring_obj);
402 if (r) {
403 DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
404 mutex_unlock(&rdev->cp.mutex);
405 return r;
406 }
407 r = radeon_object_pin(rdev->cp.ring_obj,
408 RADEON_GEM_DOMAIN_GTT,
409 &rdev->cp.gpu_addr);
410 if (r) {
411 DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
412 mutex_unlock(&rdev->cp.mutex);
413 return r;
414 }
415 r = radeon_object_kmap(rdev->cp.ring_obj,
416 (void **)&rdev->cp.ring);
417 if (r) {
418 DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
419 mutex_unlock(&rdev->cp.mutex);
420 return r;
421 }
422 }
423 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
424 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
425 return 0;
426 }
427
428 void radeon_ring_fini(struct radeon_device *rdev)
429 {
430 mutex_lock(&rdev->cp.mutex);
431 if (rdev->cp.ring_obj) {
432 radeon_object_kunmap(rdev->cp.ring_obj);
433 radeon_object_unpin(rdev->cp.ring_obj);
434 radeon_object_unref(&rdev->cp.ring_obj);
435 rdev->cp.ring = NULL;
436 rdev->cp.ring_obj = NULL;
437 }
438 mutex_unlock(&rdev->cp.mutex);
439 }
440
441
442 /*
443 * Debugfs info
444 */
445 #if defined(CONFIG_DEBUG_FS)
446 static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
447 {
448 struct drm_info_node *node = (struct drm_info_node *) m->private;
449 struct radeon_ib *ib = node->info_ent->data;
450 unsigned i;
451
452 if (ib == NULL) {
453 return 0;
454 }
455 seq_printf(m, "IB %04lu\n", ib->idx);
456 seq_printf(m, "IB fence %p\n", ib->fence);
457 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
458 for (i = 0; i < ib->length_dw; i++) {
459 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
460 }
461 return 0;
462 }
463
464 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
465 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
466 #endif
467
468 int radeon_debugfs_ib_init(struct radeon_device *rdev)
469 {
470 #if defined(CONFIG_DEBUG_FS)
471 unsigned i;
472
473 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
474 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
475 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
476 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
477 radeon_debugfs_ib_list[i].driver_features = 0;
478 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
479 }
480 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
481 RADEON_IB_POOL_SIZE);
482 #else
483 return 0;
484 #endif
485 }