Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
c507f7ef | 27 | * Christian König |
771fe6b9 JG |
28 | */ |
29 | #include <linux/seq_file.h> | |
5a0e3ad6 | 30 | #include <linux/slab.h> |
771fe6b9 JG |
31 | #include "drmP.h" |
32 | #include "radeon_drm.h" | |
33 | #include "radeon_reg.h" | |
34 | #include "radeon.h" | |
35 | #include "atom.h" | |
36 | ||
c507f7ef JG |
37 | /* |
38 | * IB. | |
39 | */ | |
40 | int radeon_debugfs_sa_init(struct radeon_device *rdev); | |
771fe6b9 | 41 | |
ce580fab AK |
42 | u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) |
43 | { | |
44 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | |
45 | u32 pg_idx, pg_offset; | |
46 | u32 idx_value = 0; | |
47 | int new_page; | |
48 | ||
49 | pg_idx = (idx * 4) / PAGE_SIZE; | |
50 | pg_offset = (idx * 4) % PAGE_SIZE; | |
51 | ||
52 | if (ibc->kpage_idx[0] == pg_idx) | |
53 | return ibc->kpage[0][pg_offset/4]; | |
54 | if (ibc->kpage_idx[1] == pg_idx) | |
55 | return ibc->kpage[1][pg_offset/4]; | |
56 | ||
57 | new_page = radeon_cs_update_pages(p, pg_idx); | |
58 | if (new_page < 0) { | |
59 | p->parser_error = new_page; | |
60 | return 0; | |
61 | } | |
62 | ||
63 | idx_value = ibc->kpage[new_page][pg_offset/4]; | |
64 | return idx_value; | |
65 | } | |
66 | ||
69e130a6 | 67 | int radeon_ib_get(struct radeon_device *rdev, int ring, |
f2e39221 | 68 | struct radeon_ib *ib, unsigned size) |
771fe6b9 | 69 | { |
c507f7ef | 70 | int r; |
b15ba512 | 71 | |
f2e39221 | 72 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); |
c507f7ef JG |
73 | if (r) { |
74 | dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); | |
c507f7ef | 75 | return r; |
b15ba512 | 76 | } |
f2e39221 | 77 | r = radeon_fence_create(rdev, &ib->fence, ring); |
c507f7ef JG |
78 | if (r) { |
79 | dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r); | |
f2e39221 | 80 | radeon_sa_bo_free(rdev, &ib->sa_bo, NULL); |
c507f7ef | 81 | return r; |
771fe6b9 | 82 | } |
c507f7ef | 83 | |
f2e39221 JG |
84 | ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); |
85 | ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); | |
86 | ib->vm_id = 0; | |
87 | ib->is_const_ib = false; | |
88 | ib->semaphore = NULL; | |
c507f7ef JG |
89 | |
90 | return 0; | |
771fe6b9 JG |
91 | } |
92 | ||
f2e39221 | 93 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) |
771fe6b9 | 94 | { |
f2e39221 JG |
95 | radeon_semaphore_free(rdev, ib->semaphore, ib->fence); |
96 | radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); | |
97 | radeon_fence_unref(&ib->fence); | |
771fe6b9 JG |
98 | } |
99 | ||
771fe6b9 JG |
100 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
101 | { | |
e32eb50d | 102 | struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; |
771fe6b9 JG |
103 | int r = 0; |
104 | ||
e32eb50d | 105 | if (!ib->length_dw || !ring->ready) { |
771fe6b9 | 106 | /* TODO: Nothings in the ib we should report. */ |
c507f7ef | 107 | dev_err(rdev->dev, "couldn't schedule ib\n"); |
771fe6b9 JG |
108 | return -EINVAL; |
109 | } | |
ecb114a1 | 110 | |
6cdf6585 | 111 | /* 64 dwords should be enough for fence too */ |
e32eb50d | 112 | r = radeon_ring_lock(rdev, ring, 64); |
771fe6b9 | 113 | if (r) { |
c507f7ef | 114 | dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); |
771fe6b9 JG |
115 | return r; |
116 | } | |
4c87bc26 | 117 | radeon_ring_ib_execute(rdev, ib->fence->ring, ib); |
771fe6b9 | 118 | radeon_fence_emit(rdev, ib->fence); |
e32eb50d | 119 | radeon_ring_unlock_commit(rdev, ring); |
771fe6b9 JG |
120 | return 0; |
121 | } | |
122 | ||
123 | int radeon_ib_pool_init(struct radeon_device *rdev) | |
124 | { | |
c507f7ef | 125 | int r; |
771fe6b9 | 126 | |
c507f7ef | 127 | if (rdev->ib_pool_ready) { |
d54fbd49 JG |
128 | return 0; |
129 | } | |
c507f7ef | 130 | r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, |
c3b7fe8b CK |
131 | RADEON_IB_POOL_SIZE*64*1024, |
132 | RADEON_GEM_DOMAIN_GTT); | |
133 | if (r) { | |
c3b7fe8b CK |
134 | return r; |
135 | } | |
c507f7ef JG |
136 | rdev->ib_pool_ready = true; |
137 | if (radeon_debugfs_sa_init(rdev)) { | |
138 | dev_err(rdev->dev, "failed to register debugfs file for SA\n"); | |
771fe6b9 | 139 | } |
b15ba512 | 140 | return 0; |
771fe6b9 JG |
141 | } |
142 | ||
143 | void radeon_ib_pool_fini(struct radeon_device *rdev) | |
144 | { | |
c507f7ef JG |
145 | if (rdev->ib_pool_ready) { |
146 | radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo); | |
147 | rdev->ib_pool_ready = false; | |
771fe6b9 | 148 | } |
771fe6b9 JG |
149 | } |
150 | ||
b15ba512 JG |
151 | int radeon_ib_pool_start(struct radeon_device *rdev) |
152 | { | |
c507f7ef | 153 | return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); |
b15ba512 JG |
154 | } |
155 | ||
156 | int radeon_ib_pool_suspend(struct radeon_device *rdev) | |
157 | { | |
c507f7ef | 158 | return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); |
b15ba512 | 159 | } |
771fe6b9 | 160 | |
7bd560e8 CK |
161 | int radeon_ib_ring_tests(struct radeon_device *rdev) |
162 | { | |
163 | unsigned i; | |
164 | int r; | |
165 | ||
166 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
167 | struct radeon_ring *ring = &rdev->ring[i]; | |
168 | ||
169 | if (!ring->ready) | |
170 | continue; | |
171 | ||
172 | r = radeon_ib_test(rdev, i, ring); | |
173 | if (r) { | |
174 | ring->ready = false; | |
175 | ||
176 | if (i == RADEON_RING_TYPE_GFX_INDEX) { | |
177 | /* oh, oh, that's really bad */ | |
178 | DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r); | |
179 | rdev->accel_working = false; | |
180 | return r; | |
181 | ||
182 | } else { | |
183 | /* still not good, but we can live with it */ | |
184 | DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r); | |
185 | } | |
186 | } | |
187 | } | |
188 | return 0; | |
189 | } | |
190 | ||
771fe6b9 JG |
191 | /* |
192 | * Ring. | |
193 | */ | |
c507f7ef JG |
194 | int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); |
195 | ||
196 | void radeon_ring_write(struct radeon_ring *ring, uint32_t v) | |
197 | { | |
198 | #if DRM_DEBUG_CODE | |
199 | if (ring->count_dw <= 0) { | |
200 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); | |
201 | } | |
202 | #endif | |
203 | ring->ring[ring->wptr++] = v; | |
204 | ring->wptr &= ring->ptr_mask; | |
205 | ring->count_dw--; | |
206 | ring->ring_free_dw--; | |
207 | } | |
208 | ||
e32eb50d | 209 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) |
bf852799 CK |
210 | { |
211 | /* r1xx-r5xx only has CP ring */ | |
212 | if (rdev->family < CHIP_R600) | |
213 | return RADEON_RING_TYPE_GFX_INDEX; | |
214 | ||
215 | if (rdev->family >= CHIP_CAYMAN) { | |
e32eb50d | 216 | if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]) |
bf852799 | 217 | return CAYMAN_RING_TYPE_CP1_INDEX; |
e32eb50d | 218 | else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]) |
bf852799 CK |
219 | return CAYMAN_RING_TYPE_CP2_INDEX; |
220 | } | |
221 | return RADEON_RING_TYPE_GFX_INDEX; | |
222 | } | |
223 | ||
e32eb50d | 224 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 225 | { |
78c5560a AD |
226 | u32 rptr; |
227 | ||
724c80e1 | 228 | if (rdev->wb.enabled) |
78c5560a | 229 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
5596a9db | 230 | else |
78c5560a AD |
231 | rptr = RREG32(ring->rptr_reg); |
232 | ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | |
771fe6b9 | 233 | /* This works because ring_size is a power of 2 */ |
e32eb50d CK |
234 | ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); |
235 | ring->ring_free_dw -= ring->wptr; | |
236 | ring->ring_free_dw &= ring->ptr_mask; | |
237 | if (!ring->ring_free_dw) { | |
238 | ring->ring_free_dw = ring->ring_size / 4; | |
771fe6b9 JG |
239 | } |
240 | } | |
241 | ||
7b1f2485 | 242 | |
e32eb50d | 243 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
771fe6b9 JG |
244 | { |
245 | int r; | |
246 | ||
247 | /* Align requested size with padding so unlock_commit can | |
248 | * pad safely */ | |
e32eb50d CK |
249 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
250 | while (ndw > (ring->ring_free_dw - 1)) { | |
251 | radeon_ring_free_size(rdev, ring); | |
252 | if (ndw < ring->ring_free_dw) { | |
771fe6b9 JG |
253 | break; |
254 | } | |
8a47cc9e | 255 | r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring)); |
91700f3c | 256 | if (r) |
771fe6b9 | 257 | return r; |
771fe6b9 | 258 | } |
e32eb50d CK |
259 | ring->count_dw = ndw; |
260 | ring->wptr_old = ring->wptr; | |
771fe6b9 JG |
261 | return 0; |
262 | } | |
263 | ||
e32eb50d | 264 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
91700f3c MG |
265 | { |
266 | int r; | |
267 | ||
d6999bc7 | 268 | mutex_lock(&rdev->ring_lock); |
e32eb50d | 269 | r = radeon_ring_alloc(rdev, ring, ndw); |
91700f3c | 270 | if (r) { |
d6999bc7 | 271 | mutex_unlock(&rdev->ring_lock); |
91700f3c MG |
272 | return r; |
273 | } | |
274 | return 0; | |
275 | } | |
276 | ||
e32eb50d | 277 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 JG |
278 | { |
279 | unsigned count_dw_pad; | |
280 | unsigned i; | |
281 | ||
282 | /* We pad to match fetch size */ | |
e32eb50d CK |
283 | count_dw_pad = (ring->align_mask + 1) - |
284 | (ring->wptr & ring->align_mask); | |
771fe6b9 | 285 | for (i = 0; i < count_dw_pad; i++) { |
78c5560a | 286 | radeon_ring_write(ring, ring->nop); |
771fe6b9 JG |
287 | } |
288 | DRM_MEMORYBARRIER(); | |
78c5560a | 289 | WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask); |
e32eb50d | 290 | (void)RREG32(ring->wptr_reg); |
91700f3c MG |
291 | } |
292 | ||
e32eb50d | 293 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
91700f3c | 294 | { |
e32eb50d | 295 | radeon_ring_commit(rdev, ring); |
d6999bc7 | 296 | mutex_unlock(&rdev->ring_lock); |
771fe6b9 JG |
297 | } |
298 | ||
d6999bc7 | 299 | void radeon_ring_undo(struct radeon_ring *ring) |
771fe6b9 | 300 | { |
e32eb50d | 301 | ring->wptr = ring->wptr_old; |
d6999bc7 CK |
302 | } |
303 | ||
304 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) | |
305 | { | |
306 | radeon_ring_undo(ring); | |
307 | mutex_unlock(&rdev->ring_lock); | |
771fe6b9 JG |
308 | } |
309 | ||
7b9ef16b CK |
310 | void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) |
311 | { | |
312 | int r; | |
313 | ||
7b9ef16b CK |
314 | radeon_ring_free_size(rdev, ring); |
315 | if (ring->rptr == ring->wptr) { | |
316 | r = radeon_ring_alloc(rdev, ring, 1); | |
317 | if (!r) { | |
318 | radeon_ring_write(ring, ring->nop); | |
319 | radeon_ring_commit(rdev, ring); | |
320 | } | |
321 | } | |
7b9ef16b CK |
322 | } |
323 | ||
069211e5 CK |
324 | void radeon_ring_lockup_update(struct radeon_ring *ring) |
325 | { | |
326 | ring->last_rptr = ring->rptr; | |
327 | ring->last_activity = jiffies; | |
328 | } | |
329 | ||
330 | /** | |
331 | * radeon_ring_test_lockup() - check if ring is lockedup by recording information | |
332 | * @rdev: radeon device structure | |
333 | * @ring: radeon_ring structure holding ring information | |
334 | * | |
335 | * We don't need to initialize the lockup tracking information as we will either | |
336 | * have CP rptr to a different value of jiffies wrap around which will force | |
337 | * initialization of the lockup tracking informations. | |
338 | * | |
339 | * A possible false positivie is if we get call after while and last_cp_rptr == | |
340 | * the current CP rptr, even if it's unlikely it might happen. To avoid this | |
341 | * if the elapsed time since last call is bigger than 2 second than we return | |
342 | * false and update the tracking information. Due to this the caller must call | |
343 | * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported | |
344 | * the fencing code should be cautious about that. | |
345 | * | |
346 | * Caller should write to the ring to force CP to do something so we don't get | |
347 | * false positive when CP is just gived nothing to do. | |
348 | * | |
349 | **/ | |
350 | bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | |
351 | { | |
352 | unsigned long cjiffies, elapsed; | |
353 | uint32_t rptr; | |
354 | ||
355 | cjiffies = jiffies; | |
356 | if (!time_after(cjiffies, ring->last_activity)) { | |
357 | /* likely a wrap around */ | |
358 | radeon_ring_lockup_update(ring); | |
359 | return false; | |
360 | } | |
361 | rptr = RREG32(ring->rptr_reg); | |
362 | ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | |
363 | if (ring->rptr != ring->last_rptr) { | |
364 | /* CP is still working no lockup */ | |
365 | radeon_ring_lockup_update(ring); | |
366 | return false; | |
367 | } | |
368 | elapsed = jiffies_to_msecs(cjiffies - ring->last_activity); | |
3368ff0c | 369 | if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { |
069211e5 CK |
370 | dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); |
371 | return true; | |
372 | } | |
373 | /* give a chance to the GPU ... */ | |
374 | return false; | |
375 | } | |
376 | ||
e32eb50d | 377 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
78c5560a AD |
378 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, |
379 | u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) | |
771fe6b9 JG |
380 | { |
381 | int r; | |
382 | ||
e32eb50d CK |
383 | ring->ring_size = ring_size; |
384 | ring->rptr_offs = rptr_offs; | |
385 | ring->rptr_reg = rptr_reg; | |
386 | ring->wptr_reg = wptr_reg; | |
78c5560a AD |
387 | ring->ptr_reg_shift = ptr_reg_shift; |
388 | ring->ptr_reg_mask = ptr_reg_mask; | |
389 | ring->nop = nop; | |
771fe6b9 | 390 | /* Allocate ring buffer */ |
e32eb50d CK |
391 | if (ring->ring_obj == NULL) { |
392 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, | |
40f5cf99 AD |
393 | RADEON_GEM_DOMAIN_GTT, |
394 | NULL, &ring->ring_obj); | |
771fe6b9 | 395 | if (r) { |
4c788679 | 396 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
771fe6b9 JG |
397 | return r; |
398 | } | |
e32eb50d | 399 | r = radeon_bo_reserve(ring->ring_obj, false); |
4c788679 JG |
400 | if (unlikely(r != 0)) |
401 | return r; | |
e32eb50d CK |
402 | r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, |
403 | &ring->gpu_addr); | |
771fe6b9 | 404 | if (r) { |
e32eb50d | 405 | radeon_bo_unreserve(ring->ring_obj); |
4c788679 | 406 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
771fe6b9 JG |
407 | return r; |
408 | } | |
e32eb50d CK |
409 | r = radeon_bo_kmap(ring->ring_obj, |
410 | (void **)&ring->ring); | |
411 | radeon_bo_unreserve(ring->ring_obj); | |
771fe6b9 | 412 | if (r) { |
4c788679 | 413 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
771fe6b9 JG |
414 | return r; |
415 | } | |
416 | } | |
e32eb50d CK |
417 | ring->ptr_mask = (ring->ring_size / 4) - 1; |
418 | ring->ring_free_dw = ring->ring_size / 4; | |
ec1a6cce CK |
419 | if (radeon_debugfs_ring_init(rdev, ring)) { |
420 | DRM_ERROR("Failed to register debugfs file for rings !\n"); | |
421 | } | |
771fe6b9 JG |
422 | return 0; |
423 | } | |
424 | ||
e32eb50d | 425 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 426 | { |
4c788679 | 427 | int r; |
ca2af923 | 428 | struct radeon_bo *ring_obj; |
4c788679 | 429 | |
d6999bc7 | 430 | mutex_lock(&rdev->ring_lock); |
e32eb50d | 431 | ring_obj = ring->ring_obj; |
d6999bc7 | 432 | ring->ready = false; |
e32eb50d CK |
433 | ring->ring = NULL; |
434 | ring->ring_obj = NULL; | |
d6999bc7 | 435 | mutex_unlock(&rdev->ring_lock); |
ca2af923 AD |
436 | |
437 | if (ring_obj) { | |
438 | r = radeon_bo_reserve(ring_obj, false); | |
4c788679 | 439 | if (likely(r == 0)) { |
ca2af923 AD |
440 | radeon_bo_kunmap(ring_obj); |
441 | radeon_bo_unpin(ring_obj); | |
442 | radeon_bo_unreserve(ring_obj); | |
4c788679 | 443 | } |
ca2af923 | 444 | radeon_bo_unref(&ring_obj); |
771fe6b9 | 445 | } |
771fe6b9 JG |
446 | } |
447 | ||
771fe6b9 JG |
448 | /* |
449 | * Debugfs info | |
450 | */ | |
451 | #if defined(CONFIG_DEBUG_FS) | |
af9720f4 CK |
452 | |
453 | static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |
454 | { | |
455 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
456 | struct drm_device *dev = node->minor->dev; | |
457 | struct radeon_device *rdev = dev->dev_private; | |
458 | int ridx = *(int*)node->info_ent->data; | |
459 | struct radeon_ring *ring = &rdev->ring[ridx]; | |
460 | unsigned count, i, j; | |
461 | ||
462 | radeon_ring_free_size(rdev, ring); | |
463 | count = (ring->ring_size / 4) - ring->ring_free_dw; | |
464 | seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); | |
465 | seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); | |
466 | seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); | |
467 | seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); | |
468 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | |
469 | seq_printf(m, "%u dwords in ring\n", count); | |
470 | i = ring->rptr; | |
471 | for (j = 0; j <= count; j++) { | |
472 | seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); | |
473 | i = (i + 1) & ring->ptr_mask; | |
474 | } | |
475 | return 0; | |
476 | } | |
477 | ||
478 | static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; | |
479 | static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; | |
480 | static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; | |
481 | ||
482 | static struct drm_info_list radeon_debugfs_ring_info_list[] = { | |
483 | {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, | |
484 | {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, | |
485 | {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, | |
486 | }; | |
487 | ||
711a9729 CK |
488 | static int radeon_debugfs_sa_info(struct seq_file *m, void *data) |
489 | { | |
490 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
491 | struct drm_device *dev = node->minor->dev; | |
492 | struct radeon_device *rdev = dev->dev_private; | |
493 | ||
c507f7ef | 494 | radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m); |
711a9729 CK |
495 | |
496 | return 0; | |
497 | ||
498 | } | |
499 | ||
500 | static struct drm_info_list radeon_debugfs_sa_list[] = { | |
501 | {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, | |
502 | }; | |
503 | ||
771fe6b9 JG |
504 | #endif |
505 | ||
ec1a6cce | 506 | int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) |
af9720f4 CK |
507 | { |
508 | #if defined(CONFIG_DEBUG_FS) | |
ec1a6cce CK |
509 | unsigned i; |
510 | for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) { | |
511 | struct drm_info_list *info = &radeon_debugfs_ring_info_list[i]; | |
512 | int ridx = *(int*)radeon_debugfs_ring_info_list[i].data; | |
513 | unsigned r; | |
514 | ||
515 | if (&rdev->ring[ridx] != ring) | |
516 | continue; | |
517 | ||
518 | r = radeon_debugfs_add_files(rdev, info, 1); | |
519 | if (r) | |
520 | return r; | |
521 | } | |
af9720f4 | 522 | #endif |
ec1a6cce | 523 | return 0; |
af9720f4 CK |
524 | } |
525 | ||
c507f7ef | 526 | int radeon_debugfs_sa_init(struct radeon_device *rdev) |
771fe6b9 JG |
527 | { |
528 | #if defined(CONFIG_DEBUG_FS) | |
c507f7ef | 529 | return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); |
771fe6b9 JG |
530 | #else |
531 | return 0; | |
532 | #endif | |
533 | } |