Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / gpu / drm / radeon / radeon_fence.c
1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
36 #include <linux/slab.h>
37 #include <drm/drmP.h>
38 #include "radeon_reg.h"
39 #include "radeon.h"
40 #include "radeon_trace.h"
41
42 /*
43 * Fences
44 * Fences mark an event in the GPUs pipeline and are used
45 * for GPU/CPU synchronization. When the fence is written,
46 * it is expected that all buffers associated with that fence
47 * are no longer in use by the associated ring on the GPU and
48 * that the the relevant GPU caches have been flushed. Whether
49 * we use a scratch register or memory location depends on the asic
50 * and whether writeback is enabled.
51 */
52
53 /**
54 * radeon_fence_write - write a fence value
55 *
56 * @rdev: radeon_device pointer
57 * @seq: sequence number to write
58 * @ring: ring index the fence is associated with
59 *
60 * Writes a fence value to memory or a scratch register (all asics).
61 */
62 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
63 {
64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
65 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
66 *drv->cpu_addr = cpu_to_le32(seq);
67 } else {
68 WREG32(drv->scratch_reg, seq);
69 }
70 }
71
72 /**
73 * radeon_fence_read - read a fence value
74 *
75 * @rdev: radeon_device pointer
76 * @ring: ring index the fence is associated with
77 *
78 * Reads a fence value from memory or a scratch register (all asics).
79 * Returns the value of the fence read from memory or register.
80 */
81 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
82 {
83 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
84 u32 seq = 0;
85
86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
87 seq = le32_to_cpu(*drv->cpu_addr);
88 } else {
89 seq = RREG32(drv->scratch_reg);
90 }
91 return seq;
92 }
93
94 /**
95 * radeon_fence_emit - emit a fence on the requested ring
96 *
97 * @rdev: radeon_device pointer
98 * @fence: radeon fence object
99 * @ring: ring index the fence is associated with
100 *
101 * Emits a fence command on the requested ring (all asics).
102 * Returns 0 on success, -ENOMEM on failure.
103 */
104 int radeon_fence_emit(struct radeon_device *rdev,
105 struct radeon_fence **fence,
106 int ring)
107 {
108 /* we are protected by the ring emission mutex */
109 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
110 if ((*fence) == NULL) {
111 return -ENOMEM;
112 }
113 kref_init(&((*fence)->kref));
114 (*fence)->rdev = rdev;
115 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
116 (*fence)->ring = ring;
117 radeon_fence_ring_emit(rdev, ring, *fence);
118 trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
119 return 0;
120 }
121
122 /**
123 * radeon_fence_process - process a fence
124 *
125 * @rdev: radeon_device pointer
126 * @ring: ring index the fence is associated with
127 *
128 * Checks the current fence value and wakes the fence queue
129 * if the sequence number has increased (all asics).
130 */
131 void radeon_fence_process(struct radeon_device *rdev, int ring)
132 {
133 uint64_t seq, last_seq, last_emitted;
134 unsigned count_loop = 0;
135 bool wake = false;
136
137 /* Note there is a scenario here for an infinite loop but it's
138 * very unlikely to happen. For it to happen, the current polling
139 * process need to be interrupted by another process and another
140 * process needs to update the last_seq btw the atomic read and
141 * xchg of the current process.
142 *
143 * More over for this to go in infinite loop there need to be
144 * continuously new fence signaled ie radeon_fence_read needs
145 * to return a different value each time for both the currently
146 * polling process and the other process that xchg the last_seq
147 * btw atomic read and xchg of the current process. And the
148 * value the other process set as last seq must be higher than
149 * the seq value we just read. Which means that current process
150 * need to be interrupted after radeon_fence_read and before
151 * atomic xchg.
152 *
153 * To be even more safe we count the number of time we loop and
154 * we bail after 10 loop just accepting the fact that we might
155 * have temporarly set the last_seq not to the true real last
156 * seq but to an older one.
157 */
158 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
159 do {
160 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
161 seq = radeon_fence_read(rdev, ring);
162 seq |= last_seq & 0xffffffff00000000LL;
163 if (seq < last_seq) {
164 seq &= 0xffffffff;
165 seq |= last_emitted & 0xffffffff00000000LL;
166 }
167
168 if (seq <= last_seq || seq > last_emitted) {
169 break;
170 }
171 /* If we loop over we don't want to return without
172 * checking if a fence is signaled as it means that the
173 * seq we just read is different from the previous on.
174 */
175 wake = true;
176 last_seq = seq;
177 if ((count_loop++) > 10) {
178 /* We looped over too many time leave with the
179 * fact that we might have set an older fence
180 * seq then the current real last seq as signaled
181 * by the hw.
182 */
183 break;
184 }
185 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
186
187 if (wake) {
188 rdev->fence_drv[ring].last_activity = jiffies;
189 wake_up_all(&rdev->fence_queue);
190 }
191 }
192
193 /**
194 * radeon_fence_destroy - destroy a fence
195 *
196 * @kref: fence kref
197 *
198 * Frees the fence object (all asics).
199 */
200 static void radeon_fence_destroy(struct kref *kref)
201 {
202 struct radeon_fence *fence;
203
204 fence = container_of(kref, struct radeon_fence, kref);
205 kfree(fence);
206 }
207
208 /**
209 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
210 *
211 * @rdev: radeon device pointer
212 * @seq: sequence number
213 * @ring: ring index the fence is associated with
214 *
215 * Check if the last singled fence sequnce number is >= the requested
216 * sequence number (all asics).
217 * Returns true if the fence has signaled (current fence value
218 * is >= requested value) or false if it has not (current fence
219 * value is < the requested value. Helper function for
220 * radeon_fence_signaled().
221 */
222 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
223 u64 seq, unsigned ring)
224 {
225 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
226 return true;
227 }
228 /* poll new last sequence at least once */
229 radeon_fence_process(rdev, ring);
230 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
231 return true;
232 }
233 return false;
234 }
235
236 /**
237 * radeon_fence_signaled - check if a fence has signaled
238 *
239 * @fence: radeon fence object
240 *
241 * Check if the requested fence has signaled (all asics).
242 * Returns true if the fence has signaled or false if it has not.
243 */
244 bool radeon_fence_signaled(struct radeon_fence *fence)
245 {
246 if (!fence) {
247 return true;
248 }
249 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
250 return true;
251 }
252 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
253 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
254 return true;
255 }
256 return false;
257 }
258
259 /**
260 * radeon_fence_wait_seq - wait for a specific sequence number
261 *
262 * @rdev: radeon device pointer
263 * @target_seq: sequence number we want to wait for
264 * @ring: ring index the fence is associated with
265 * @intr: use interruptable sleep
266 * @lock_ring: whether the ring should be locked or not
267 *
268 * Wait for the requested sequence number to be written (all asics).
269 * @intr selects whether to use interruptable (true) or non-interruptable
270 * (false) sleep when waiting for the sequence number. Helper function
271 * for radeon_fence_wait(), et al.
272 * Returns 0 if the sequence number has passed, error for all other cases.
273 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
274 * marked as not ready so no further jobs get scheduled until a successful
275 * reset.
276 */
277 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
278 unsigned ring, bool intr, bool lock_ring)
279 {
280 unsigned long timeout, last_activity;
281 uint64_t seq;
282 unsigned i;
283 bool signaled;
284 int r;
285
286 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
287 if (!rdev->ring[ring].ready) {
288 return -EBUSY;
289 }
290
291 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
292 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
293 /* the normal case, timeout is somewhere before last_activity */
294 timeout = rdev->fence_drv[ring].last_activity - timeout;
295 } else {
296 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
297 * anyway we will just wait for the minimum amount and then check for a lockup
298 */
299 timeout = 1;
300 }
301 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
302 /* Save current last activity valuee, used to check for GPU lockups */
303 last_activity = rdev->fence_drv[ring].last_activity;
304
305 trace_radeon_fence_wait_begin(rdev->ddev, seq);
306 radeon_irq_kms_sw_irq_get(rdev, ring);
307 if (intr) {
308 r = wait_event_interruptible_timeout(rdev->fence_queue,
309 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
310 timeout);
311 } else {
312 r = wait_event_timeout(rdev->fence_queue,
313 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
314 timeout);
315 }
316 radeon_irq_kms_sw_irq_put(rdev, ring);
317 if (unlikely(r < 0)) {
318 return r;
319 }
320 trace_radeon_fence_wait_end(rdev->ddev, seq);
321
322 if (unlikely(!signaled)) {
323 /* we were interrupted for some reason and fence
324 * isn't signaled yet, resume waiting */
325 if (r) {
326 continue;
327 }
328
329 /* check if sequence value has changed since last_activity */
330 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
331 continue;
332 }
333
334 if (lock_ring) {
335 mutex_lock(&rdev->ring_lock);
336 }
337
338 /* test if somebody else has already decided that this is a lockup */
339 if (last_activity != rdev->fence_drv[ring].last_activity) {
340 if (lock_ring) {
341 mutex_unlock(&rdev->ring_lock);
342 }
343 continue;
344 }
345
346 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
347 /* good news we believe it's a lockup */
348 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
349 target_seq, seq);
350
351 /* change last activity so nobody else think there is a lockup */
352 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
353 rdev->fence_drv[i].last_activity = jiffies;
354 }
355
356 /* mark the ring as not ready any more */
357 rdev->ring[ring].ready = false;
358 if (lock_ring) {
359 mutex_unlock(&rdev->ring_lock);
360 }
361 return -EDEADLK;
362 }
363
364 if (lock_ring) {
365 mutex_unlock(&rdev->ring_lock);
366 }
367 }
368 }
369 return 0;
370 }
371
372 /**
373 * radeon_fence_wait - wait for a fence to signal
374 *
375 * @fence: radeon fence object
376 * @intr: use interruptable sleep
377 *
378 * Wait for the requested fence to signal (all asics).
379 * @intr selects whether to use interruptable (true) or non-interruptable
380 * (false) sleep when waiting for the fence.
381 * Returns 0 if the fence has passed, error for all other cases.
382 */
383 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
384 {
385 int r;
386
387 if (fence == NULL) {
388 WARN(1, "Querying an invalid fence : %p !\n", fence);
389 return -EINVAL;
390 }
391
392 r = radeon_fence_wait_seq(fence->rdev, fence->seq,
393 fence->ring, intr, true);
394 if (r) {
395 return r;
396 }
397 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
398 return 0;
399 }
400
401 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
402 {
403 unsigned i;
404
405 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
406 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
407 return true;
408 }
409 }
410 return false;
411 }
412
413 /**
414 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
415 *
416 * @rdev: radeon device pointer
417 * @target_seq: sequence number(s) we want to wait for
418 * @intr: use interruptable sleep
419 *
420 * Wait for the requested sequence number(s) to be written by any ring
421 * (all asics). Sequnce number array is indexed by ring id.
422 * @intr selects whether to use interruptable (true) or non-interruptable
423 * (false) sleep when waiting for the sequence number. Helper function
424 * for radeon_fence_wait_any(), et al.
425 * Returns 0 if the sequence number has passed, error for all other cases.
426 */
427 static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
428 u64 *target_seq, bool intr)
429 {
430 unsigned long timeout, last_activity, tmp;
431 unsigned i, ring = RADEON_NUM_RINGS;
432 bool signaled;
433 int r;
434
435 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
436 if (!target_seq[i]) {
437 continue;
438 }
439
440 /* use the most recent one as indicator */
441 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
442 last_activity = rdev->fence_drv[i].last_activity;
443 }
444
445 /* For lockup detection just pick the lowest ring we are
446 * actively waiting for
447 */
448 if (i < ring) {
449 ring = i;
450 }
451 }
452
453 /* nothing to wait for ? */
454 if (ring == RADEON_NUM_RINGS) {
455 return -ENOENT;
456 }
457
458 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
459 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
460 if (time_after(last_activity, timeout)) {
461 /* the normal case, timeout is somewhere before last_activity */
462 timeout = last_activity - timeout;
463 } else {
464 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
465 * anyway we will just wait for the minimum amount and then check for a lockup
466 */
467 timeout = 1;
468 }
469
470 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
471 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
472 if (target_seq[i]) {
473 radeon_irq_kms_sw_irq_get(rdev, i);
474 }
475 }
476 if (intr) {
477 r = wait_event_interruptible_timeout(rdev->fence_queue,
478 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
479 timeout);
480 } else {
481 r = wait_event_timeout(rdev->fence_queue,
482 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
483 timeout);
484 }
485 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
486 if (target_seq[i]) {
487 radeon_irq_kms_sw_irq_put(rdev, i);
488 }
489 }
490 if (unlikely(r < 0)) {
491 return r;
492 }
493 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
494
495 if (unlikely(!signaled)) {
496 /* we were interrupted for some reason and fence
497 * isn't signaled yet, resume waiting */
498 if (r) {
499 continue;
500 }
501
502 mutex_lock(&rdev->ring_lock);
503 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
504 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
505 tmp = rdev->fence_drv[i].last_activity;
506 }
507 }
508 /* test if somebody else has already decided that this is a lockup */
509 if (last_activity != tmp) {
510 last_activity = tmp;
511 mutex_unlock(&rdev->ring_lock);
512 continue;
513 }
514
515 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
516 /* good news we believe it's a lockup */
517 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
518 target_seq[ring]);
519
520 /* change last activity so nobody else think there is a lockup */
521 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
522 rdev->fence_drv[i].last_activity = jiffies;
523 }
524
525 /* mark the ring as not ready any more */
526 rdev->ring[ring].ready = false;
527 mutex_unlock(&rdev->ring_lock);
528 return -EDEADLK;
529 }
530 mutex_unlock(&rdev->ring_lock);
531 }
532 }
533 return 0;
534 }
535
536 /**
537 * radeon_fence_wait_any - wait for a fence to signal on any ring
538 *
539 * @rdev: radeon device pointer
540 * @fences: radeon fence object(s)
541 * @intr: use interruptable sleep
542 *
543 * Wait for any requested fence to signal (all asics). Fence
544 * array is indexed by ring id. @intr selects whether to use
545 * interruptable (true) or non-interruptable (false) sleep when
546 * waiting for the fences. Used by the suballocator.
547 * Returns 0 if any fence has passed, error for all other cases.
548 */
549 int radeon_fence_wait_any(struct radeon_device *rdev,
550 struct radeon_fence **fences,
551 bool intr)
552 {
553 uint64_t seq[RADEON_NUM_RINGS];
554 unsigned i;
555 int r;
556
557 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
558 seq[i] = 0;
559
560 if (!fences[i]) {
561 continue;
562 }
563
564 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
565 /* something was allready signaled */
566 return 0;
567 }
568
569 seq[i] = fences[i]->seq;
570 }
571
572 r = radeon_fence_wait_any_seq(rdev, seq, intr);
573 if (r) {
574 return r;
575 }
576 return 0;
577 }
578
579 /**
580 * radeon_fence_wait_next_locked - wait for the next fence to signal
581 *
582 * @rdev: radeon device pointer
583 * @ring: ring index the fence is associated with
584 *
585 * Wait for the next fence on the requested ring to signal (all asics).
586 * Returns 0 if the next fence has passed, error for all other cases.
587 * Caller must hold ring lock.
588 */
589 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
590 {
591 uint64_t seq;
592
593 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
594 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
595 /* nothing to wait for, last_seq is
596 already the last emited fence */
597 return -ENOENT;
598 }
599 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
600 }
601
602 /**
603 * radeon_fence_wait_empty_locked - wait for all fences to signal
604 *
605 * @rdev: radeon device pointer
606 * @ring: ring index the fence is associated with
607 *
608 * Wait for all fences on the requested ring to signal (all asics).
609 * Returns 0 if the fences have passed, error for all other cases.
610 * Caller must hold ring lock.
611 */
612 void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
613 {
614 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
615
616 while(1) {
617 int r;
618 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
619 if (r == -EDEADLK) {
620 mutex_unlock(&rdev->ring_lock);
621 r = radeon_gpu_reset(rdev);
622 mutex_lock(&rdev->ring_lock);
623 if (!r)
624 continue;
625 }
626 if (r) {
627 dev_err(rdev->dev, "error waiting for ring to become"
628 " idle (%d)\n", r);
629 }
630 return;
631 }
632 }
633
634 /**
635 * radeon_fence_ref - take a ref on a fence
636 *
637 * @fence: radeon fence object
638 *
639 * Take a reference on a fence (all asics).
640 * Returns the fence.
641 */
642 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
643 {
644 kref_get(&fence->kref);
645 return fence;
646 }
647
648 /**
649 * radeon_fence_unref - remove a ref on a fence
650 *
651 * @fence: radeon fence object
652 *
653 * Remove a reference on a fence (all asics).
654 */
655 void radeon_fence_unref(struct radeon_fence **fence)
656 {
657 struct radeon_fence *tmp = *fence;
658
659 *fence = NULL;
660 if (tmp) {
661 kref_put(&tmp->kref, radeon_fence_destroy);
662 }
663 }
664
665 /**
666 * radeon_fence_count_emitted - get the count of emitted fences
667 *
668 * @rdev: radeon device pointer
669 * @ring: ring index the fence is associated with
670 *
671 * Get the number of fences emitted on the requested ring (all asics).
672 * Returns the number of emitted fences on the ring. Used by the
673 * dynpm code to ring track activity.
674 */
675 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
676 {
677 uint64_t emitted;
678
679 /* We are not protected by ring lock when reading the last sequence
680 * but it's ok to report slightly wrong fence count here.
681 */
682 radeon_fence_process(rdev, ring);
683 emitted = rdev->fence_drv[ring].sync_seq[ring]
684 - atomic64_read(&rdev->fence_drv[ring].last_seq);
685 /* to avoid 32bits warp around */
686 if (emitted > 0x10000000) {
687 emitted = 0x10000000;
688 }
689 return (unsigned)emitted;
690 }
691
692 /**
693 * radeon_fence_need_sync - do we need a semaphore
694 *
695 * @fence: radeon fence object
696 * @dst_ring: which ring to check against
697 *
698 * Check if the fence needs to be synced against another ring
699 * (all asics). If so, we need to emit a semaphore.
700 * Returns true if we need to sync with another ring, false if
701 * not.
702 */
703 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
704 {
705 struct radeon_fence_driver *fdrv;
706
707 if (!fence) {
708 return false;
709 }
710
711 if (fence->ring == dst_ring) {
712 return false;
713 }
714
715 /* we are protected by the ring mutex */
716 fdrv = &fence->rdev->fence_drv[dst_ring];
717 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
718 return false;
719 }
720
721 return true;
722 }
723
724 /**
725 * radeon_fence_note_sync - record the sync point
726 *
727 * @fence: radeon fence object
728 * @dst_ring: which ring to check against
729 *
730 * Note the sequence number at which point the fence will
731 * be synced with the requested ring (all asics).
732 */
733 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
734 {
735 struct radeon_fence_driver *dst, *src;
736 unsigned i;
737
738 if (!fence) {
739 return;
740 }
741
742 if (fence->ring == dst_ring) {
743 return;
744 }
745
746 /* we are protected by the ring mutex */
747 src = &fence->rdev->fence_drv[fence->ring];
748 dst = &fence->rdev->fence_drv[dst_ring];
749 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
750 if (i == dst_ring) {
751 continue;
752 }
753 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
754 }
755 }
756
757 /**
758 * radeon_fence_driver_start_ring - make the fence driver
759 * ready for use on the requested ring.
760 *
761 * @rdev: radeon device pointer
762 * @ring: ring index to start the fence driver on
763 *
764 * Make the fence driver ready for processing (all asics).
765 * Not all asics have all rings, so each asic will only
766 * start the fence driver on the rings it has.
767 * Returns 0 for success, errors for failure.
768 */
769 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
770 {
771 uint64_t index;
772 int r;
773
774 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
775 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
776 rdev->fence_drv[ring].scratch_reg = 0;
777 index = R600_WB_EVENT_OFFSET + ring * 4;
778 } else {
779 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
780 if (r) {
781 dev_err(rdev->dev, "fence failed to get scratch register\n");
782 return r;
783 }
784 index = RADEON_WB_SCRATCH_OFFSET +
785 rdev->fence_drv[ring].scratch_reg -
786 rdev->scratch.reg_base;
787 }
788 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
789 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
790 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
791 rdev->fence_drv[ring].initialized = true;
792 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
793 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
794 return 0;
795 }
796
797 /**
798 * radeon_fence_driver_init_ring - init the fence driver
799 * for the requested ring.
800 *
801 * @rdev: radeon device pointer
802 * @ring: ring index to start the fence driver on
803 *
804 * Init the fence driver for the requested ring (all asics).
805 * Helper function for radeon_fence_driver_init().
806 */
807 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
808 {
809 int i;
810
811 rdev->fence_drv[ring].scratch_reg = -1;
812 rdev->fence_drv[ring].cpu_addr = NULL;
813 rdev->fence_drv[ring].gpu_addr = 0;
814 for (i = 0; i < RADEON_NUM_RINGS; ++i)
815 rdev->fence_drv[ring].sync_seq[i] = 0;
816 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
817 rdev->fence_drv[ring].last_activity = jiffies;
818 rdev->fence_drv[ring].initialized = false;
819 }
820
821 /**
822 * radeon_fence_driver_init - init the fence driver
823 * for all possible rings.
824 *
825 * @rdev: radeon device pointer
826 *
827 * Init the fence driver for all possible rings (all asics).
828 * Not all asics have all rings, so each asic will only
829 * start the fence driver on the rings it has using
830 * radeon_fence_driver_start_ring().
831 * Returns 0 for success.
832 */
833 int radeon_fence_driver_init(struct radeon_device *rdev)
834 {
835 int ring;
836
837 init_waitqueue_head(&rdev->fence_queue);
838 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
839 radeon_fence_driver_init_ring(rdev, ring);
840 }
841 if (radeon_debugfs_fence_init(rdev)) {
842 dev_err(rdev->dev, "fence debugfs file creation failed\n");
843 }
844 return 0;
845 }
846
847 /**
848 * radeon_fence_driver_fini - tear down the fence driver
849 * for all possible rings.
850 *
851 * @rdev: radeon device pointer
852 *
853 * Tear down the fence driver for all possible rings (all asics).
854 */
855 void radeon_fence_driver_fini(struct radeon_device *rdev)
856 {
857 int ring;
858
859 mutex_lock(&rdev->ring_lock);
860 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
861 if (!rdev->fence_drv[ring].initialized)
862 continue;
863 radeon_fence_wait_empty_locked(rdev, ring);
864 wake_up_all(&rdev->fence_queue);
865 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
866 rdev->fence_drv[ring].initialized = false;
867 }
868 mutex_unlock(&rdev->ring_lock);
869 }
870
871
872 /*
873 * Fence debugfs
874 */
875 #if defined(CONFIG_DEBUG_FS)
876 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
877 {
878 struct drm_info_node *node = (struct drm_info_node *)m->private;
879 struct drm_device *dev = node->minor->dev;
880 struct radeon_device *rdev = dev->dev_private;
881 int i, j;
882
883 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
884 if (!rdev->fence_drv[i].initialized)
885 continue;
886
887 seq_printf(m, "--- ring %d ---\n", i);
888 seq_printf(m, "Last signaled fence 0x%016llx\n",
889 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
890 seq_printf(m, "Last emitted 0x%016llx\n",
891 rdev->fence_drv[i].sync_seq[i]);
892
893 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
894 if (i != j && rdev->fence_drv[j].initialized)
895 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
896 j, rdev->fence_drv[i].sync_seq[j]);
897 }
898 }
899 return 0;
900 }
901
902 static struct drm_info_list radeon_debugfs_fence_list[] = {
903 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
904 };
905 #endif
906
907 int radeon_debugfs_fence_init(struct radeon_device *rdev)
908 {
909 #if defined(CONFIG_DEBUG_FS)
910 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
911 #else
912 return 0;
913 #endif
914 }