2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
36 #include <linux/slab.h>
38 #include "radeon_reg.h"
40 #include "radeon_trace.h"
44 * Fences mark an event in the GPUs pipeline and are used
45 * for GPU/CPU synchronization. When the fence is written,
46 * it is expected that all buffers associated with that fence
47 * are no longer in use by the associated ring on the GPU and
48 * that the the relevant GPU caches have been flushed. Whether
49 * we use a scratch register or memory location depends on the asic
50 * and whether writeback is enabled.
54 * radeon_fence_write - write a fence value
56 * @rdev: radeon_device pointer
57 * @seq: sequence number to write
58 * @ring: ring index the fence is associated with
60 * Writes a fence value to memory or a scratch register (all asics).
62 static void radeon_fence_write(struct radeon_device
*rdev
, u32 seq
, int ring
)
64 struct radeon_fence_driver
*drv
= &rdev
->fence_drv
[ring
];
65 if (likely(rdev
->wb
.enabled
|| !drv
->scratch_reg
)) {
66 *drv
->cpu_addr
= cpu_to_le32(seq
);
68 WREG32(drv
->scratch_reg
, seq
);
73 * radeon_fence_read - read a fence value
75 * @rdev: radeon_device pointer
76 * @ring: ring index the fence is associated with
78 * Reads a fence value from memory or a scratch register (all asics).
79 * Returns the value of the fence read from memory or register.
81 static u32
radeon_fence_read(struct radeon_device
*rdev
, int ring
)
83 struct radeon_fence_driver
*drv
= &rdev
->fence_drv
[ring
];
86 if (likely(rdev
->wb
.enabled
|| !drv
->scratch_reg
)) {
87 seq
= le32_to_cpu(*drv
->cpu_addr
);
89 seq
= RREG32(drv
->scratch_reg
);
95 * radeon_fence_emit - emit a fence on the requested ring
97 * @rdev: radeon_device pointer
98 * @fence: radeon fence object
99 * @ring: ring index the fence is associated with
101 * Emits a fence command on the requested ring (all asics).
102 * Returns 0 on success, -ENOMEM on failure.
104 int radeon_fence_emit(struct radeon_device
*rdev
,
105 struct radeon_fence
**fence
,
108 /* we are protected by the ring emission mutex */
109 *fence
= kmalloc(sizeof(struct radeon_fence
), GFP_KERNEL
);
110 if ((*fence
) == NULL
) {
113 kref_init(&((*fence
)->kref
));
114 (*fence
)->rdev
= rdev
;
115 (*fence
)->seq
= ++rdev
->fence_drv
[ring
].sync_seq
[ring
];
116 (*fence
)->ring
= ring
;
117 radeon_fence_ring_emit(rdev
, ring
, *fence
);
118 trace_radeon_fence_emit(rdev
->ddev
, (*fence
)->seq
);
123 * radeon_fence_process - process a fence
125 * @rdev: radeon_device pointer
126 * @ring: ring index the fence is associated with
128 * Checks the current fence value and wakes the fence queue
129 * if the sequence number has increased (all asics).
131 void radeon_fence_process(struct radeon_device
*rdev
, int ring
)
133 uint64_t seq
, last_seq
, last_emitted
;
134 unsigned count_loop
= 0;
137 /* Note there is a scenario here for an infinite loop but it's
138 * very unlikely to happen. For it to happen, the current polling
139 * process need to be interrupted by another process and another
140 * process needs to update the last_seq btw the atomic read and
141 * xchg of the current process.
143 * More over for this to go in infinite loop there need to be
144 * continuously new fence signaled ie radeon_fence_read needs
145 * to return a different value each time for both the currently
146 * polling process and the other process that xchg the last_seq
147 * btw atomic read and xchg of the current process. And the
148 * value the other process set as last seq must be higher than
149 * the seq value we just read. Which means that current process
150 * need to be interrupted after radeon_fence_read and before
153 * To be even more safe we count the number of time we loop and
154 * we bail after 10 loop just accepting the fact that we might
155 * have temporarly set the last_seq not to the true real last
156 * seq but to an older one.
158 last_seq
= atomic64_read(&rdev
->fence_drv
[ring
].last_seq
);
160 last_emitted
= rdev
->fence_drv
[ring
].sync_seq
[ring
];
161 seq
= radeon_fence_read(rdev
, ring
);
162 seq
|= last_seq
& 0xffffffff00000000LL
;
163 if (seq
< last_seq
) {
165 seq
|= last_emitted
& 0xffffffff00000000LL
;
168 if (seq
<= last_seq
|| seq
> last_emitted
) {
171 /* If we loop over we don't want to return without
172 * checking if a fence is signaled as it means that the
173 * seq we just read is different from the previous on.
177 if ((count_loop
++) > 10) {
178 /* We looped over too many time leave with the
179 * fact that we might have set an older fence
180 * seq then the current real last seq as signaled
185 } while (atomic64_xchg(&rdev
->fence_drv
[ring
].last_seq
, seq
) > seq
);
188 rdev
->fence_drv
[ring
].last_activity
= jiffies
;
189 wake_up_all(&rdev
->fence_queue
);
194 * radeon_fence_destroy - destroy a fence
198 * Frees the fence object (all asics).
200 static void radeon_fence_destroy(struct kref
*kref
)
202 struct radeon_fence
*fence
;
204 fence
= container_of(kref
, struct radeon_fence
, kref
);
209 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
211 * @rdev: radeon device pointer
212 * @seq: sequence number
213 * @ring: ring index the fence is associated with
215 * Check if the last singled fence sequnce number is >= the requested
216 * sequence number (all asics).
217 * Returns true if the fence has signaled (current fence value
218 * is >= requested value) or false if it has not (current fence
219 * value is < the requested value. Helper function for
220 * radeon_fence_signaled().
222 static bool radeon_fence_seq_signaled(struct radeon_device
*rdev
,
223 u64 seq
, unsigned ring
)
225 if (atomic64_read(&rdev
->fence_drv
[ring
].last_seq
) >= seq
) {
228 /* poll new last sequence at least once */
229 radeon_fence_process(rdev
, ring
);
230 if (atomic64_read(&rdev
->fence_drv
[ring
].last_seq
) >= seq
) {
237 * radeon_fence_signaled - check if a fence has signaled
239 * @fence: radeon fence object
241 * Check if the requested fence has signaled (all asics).
242 * Returns true if the fence has signaled or false if it has not.
244 bool radeon_fence_signaled(struct radeon_fence
*fence
)
249 if (fence
->seq
== RADEON_FENCE_SIGNALED_SEQ
) {
252 if (radeon_fence_seq_signaled(fence
->rdev
, fence
->seq
, fence
->ring
)) {
253 fence
->seq
= RADEON_FENCE_SIGNALED_SEQ
;
260 * radeon_fence_wait_seq - wait for a specific sequence number
262 * @rdev: radeon device pointer
263 * @target_seq: sequence number we want to wait for
264 * @ring: ring index the fence is associated with
265 * @intr: use interruptable sleep
266 * @lock_ring: whether the ring should be locked or not
268 * Wait for the requested sequence number to be written (all asics).
269 * @intr selects whether to use interruptable (true) or non-interruptable
270 * (false) sleep when waiting for the sequence number. Helper function
271 * for radeon_fence_wait(), et al.
272 * Returns 0 if the sequence number has passed, error for all other cases.
273 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
274 * marked as not ready so no further jobs get scheduled until a successful
277 static int radeon_fence_wait_seq(struct radeon_device
*rdev
, u64 target_seq
,
278 unsigned ring
, bool intr
, bool lock_ring
)
280 unsigned long timeout
, last_activity
;
286 while (target_seq
> atomic64_read(&rdev
->fence_drv
[ring
].last_seq
)) {
287 if (!rdev
->ring
[ring
].ready
) {
291 timeout
= jiffies
- RADEON_FENCE_JIFFIES_TIMEOUT
;
292 if (time_after(rdev
->fence_drv
[ring
].last_activity
, timeout
)) {
293 /* the normal case, timeout is somewhere before last_activity */
294 timeout
= rdev
->fence_drv
[ring
].last_activity
- timeout
;
296 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
297 * anyway we will just wait for the minimum amount and then check for a lockup
301 seq
= atomic64_read(&rdev
->fence_drv
[ring
].last_seq
);
302 /* Save current last activity valuee, used to check for GPU lockups */
303 last_activity
= rdev
->fence_drv
[ring
].last_activity
;
305 trace_radeon_fence_wait_begin(rdev
->ddev
, seq
);
306 radeon_irq_kms_sw_irq_get(rdev
, ring
);
308 r
= wait_event_interruptible_timeout(rdev
->fence_queue
,
309 (signaled
= radeon_fence_seq_signaled(rdev
, target_seq
, ring
)),
312 r
= wait_event_timeout(rdev
->fence_queue
,
313 (signaled
= radeon_fence_seq_signaled(rdev
, target_seq
, ring
)),
316 radeon_irq_kms_sw_irq_put(rdev
, ring
);
317 if (unlikely(r
< 0)) {
320 trace_radeon_fence_wait_end(rdev
->ddev
, seq
);
322 if (unlikely(!signaled
)) {
323 /* we were interrupted for some reason and fence
324 * isn't signaled yet, resume waiting */
329 /* check if sequence value has changed since last_activity */
330 if (seq
!= atomic64_read(&rdev
->fence_drv
[ring
].last_seq
)) {
335 mutex_lock(&rdev
->ring_lock
);
338 /* test if somebody else has already decided that this is a lockup */
339 if (last_activity
!= rdev
->fence_drv
[ring
].last_activity
) {
341 mutex_unlock(&rdev
->ring_lock
);
346 if (radeon_ring_is_lockup(rdev
, ring
, &rdev
->ring
[ring
])) {
347 /* good news we believe it's a lockup */
348 dev_warn(rdev
->dev
, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
351 /* change last activity so nobody else think there is a lockup */
352 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
353 rdev
->fence_drv
[i
].last_activity
= jiffies
;
356 /* mark the ring as not ready any more */
357 rdev
->ring
[ring
].ready
= false;
359 mutex_unlock(&rdev
->ring_lock
);
365 mutex_unlock(&rdev
->ring_lock
);
373 * radeon_fence_wait - wait for a fence to signal
375 * @fence: radeon fence object
376 * @intr: use interruptable sleep
378 * Wait for the requested fence to signal (all asics).
379 * @intr selects whether to use interruptable (true) or non-interruptable
380 * (false) sleep when waiting for the fence.
381 * Returns 0 if the fence has passed, error for all other cases.
383 int radeon_fence_wait(struct radeon_fence
*fence
, bool intr
)
388 WARN(1, "Querying an invalid fence : %p !\n", fence
);
392 r
= radeon_fence_wait_seq(fence
->rdev
, fence
->seq
,
393 fence
->ring
, intr
, true);
397 fence
->seq
= RADEON_FENCE_SIGNALED_SEQ
;
401 static bool radeon_fence_any_seq_signaled(struct radeon_device
*rdev
, u64
*seq
)
405 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
406 if (seq
[i
] && radeon_fence_seq_signaled(rdev
, seq
[i
], i
)) {
414 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
416 * @rdev: radeon device pointer
417 * @target_seq: sequence number(s) we want to wait for
418 * @intr: use interruptable sleep
420 * Wait for the requested sequence number(s) to be written by any ring
421 * (all asics). Sequnce number array is indexed by ring id.
422 * @intr selects whether to use interruptable (true) or non-interruptable
423 * (false) sleep when waiting for the sequence number. Helper function
424 * for radeon_fence_wait_any(), et al.
425 * Returns 0 if the sequence number has passed, error for all other cases.
427 static int radeon_fence_wait_any_seq(struct radeon_device
*rdev
,
428 u64
*target_seq
, bool intr
)
430 unsigned long timeout
, last_activity
, tmp
;
431 unsigned i
, ring
= RADEON_NUM_RINGS
;
435 for (i
= 0, last_activity
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
436 if (!target_seq
[i
]) {
440 /* use the most recent one as indicator */
441 if (time_after(rdev
->fence_drv
[i
].last_activity
, last_activity
)) {
442 last_activity
= rdev
->fence_drv
[i
].last_activity
;
445 /* For lockup detection just pick the lowest ring we are
446 * actively waiting for
453 /* nothing to wait for ? */
454 if (ring
== RADEON_NUM_RINGS
) {
458 while (!radeon_fence_any_seq_signaled(rdev
, target_seq
)) {
459 timeout
= jiffies
- RADEON_FENCE_JIFFIES_TIMEOUT
;
460 if (time_after(last_activity
, timeout
)) {
461 /* the normal case, timeout is somewhere before last_activity */
462 timeout
= last_activity
- timeout
;
464 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
465 * anyway we will just wait for the minimum amount and then check for a lockup
470 trace_radeon_fence_wait_begin(rdev
->ddev
, target_seq
[ring
]);
471 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
473 radeon_irq_kms_sw_irq_get(rdev
, i
);
477 r
= wait_event_interruptible_timeout(rdev
->fence_queue
,
478 (signaled
= radeon_fence_any_seq_signaled(rdev
, target_seq
)),
481 r
= wait_event_timeout(rdev
->fence_queue
,
482 (signaled
= radeon_fence_any_seq_signaled(rdev
, target_seq
)),
485 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
487 radeon_irq_kms_sw_irq_put(rdev
, i
);
490 if (unlikely(r
< 0)) {
493 trace_radeon_fence_wait_end(rdev
->ddev
, target_seq
[ring
]);
495 if (unlikely(!signaled
)) {
496 /* we were interrupted for some reason and fence
497 * isn't signaled yet, resume waiting */
502 mutex_lock(&rdev
->ring_lock
);
503 for (i
= 0, tmp
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
504 if (time_after(rdev
->fence_drv
[i
].last_activity
, tmp
)) {
505 tmp
= rdev
->fence_drv
[i
].last_activity
;
508 /* test if somebody else has already decided that this is a lockup */
509 if (last_activity
!= tmp
) {
511 mutex_unlock(&rdev
->ring_lock
);
515 if (radeon_ring_is_lockup(rdev
, ring
, &rdev
->ring
[ring
])) {
516 /* good news we believe it's a lockup */
517 dev_warn(rdev
->dev
, "GPU lockup (waiting for 0x%016llx)\n",
520 /* change last activity so nobody else think there is a lockup */
521 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
522 rdev
->fence_drv
[i
].last_activity
= jiffies
;
525 /* mark the ring as not ready any more */
526 rdev
->ring
[ring
].ready
= false;
527 mutex_unlock(&rdev
->ring_lock
);
530 mutex_unlock(&rdev
->ring_lock
);
537 * radeon_fence_wait_any - wait for a fence to signal on any ring
539 * @rdev: radeon device pointer
540 * @fences: radeon fence object(s)
541 * @intr: use interruptable sleep
543 * Wait for any requested fence to signal (all asics). Fence
544 * array is indexed by ring id. @intr selects whether to use
545 * interruptable (true) or non-interruptable (false) sleep when
546 * waiting for the fences. Used by the suballocator.
547 * Returns 0 if any fence has passed, error for all other cases.
549 int radeon_fence_wait_any(struct radeon_device
*rdev
,
550 struct radeon_fence
**fences
,
553 uint64_t seq
[RADEON_NUM_RINGS
];
557 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
564 if (fences
[i
]->seq
== RADEON_FENCE_SIGNALED_SEQ
) {
565 /* something was allready signaled */
569 seq
[i
] = fences
[i
]->seq
;
572 r
= radeon_fence_wait_any_seq(rdev
, seq
, intr
);
580 * radeon_fence_wait_next_locked - wait for the next fence to signal
582 * @rdev: radeon device pointer
583 * @ring: ring index the fence is associated with
585 * Wait for the next fence on the requested ring to signal (all asics).
586 * Returns 0 if the next fence has passed, error for all other cases.
587 * Caller must hold ring lock.
589 int radeon_fence_wait_next_locked(struct radeon_device
*rdev
, int ring
)
593 seq
= atomic64_read(&rdev
->fence_drv
[ring
].last_seq
) + 1ULL;
594 if (seq
>= rdev
->fence_drv
[ring
].sync_seq
[ring
]) {
595 /* nothing to wait for, last_seq is
596 already the last emited fence */
599 return radeon_fence_wait_seq(rdev
, seq
, ring
, false, false);
603 * radeon_fence_wait_empty_locked - wait for all fences to signal
605 * @rdev: radeon device pointer
606 * @ring: ring index the fence is associated with
608 * Wait for all fences on the requested ring to signal (all asics).
609 * Returns 0 if the fences have passed, error for all other cases.
610 * Caller must hold ring lock.
612 void radeon_fence_wait_empty_locked(struct radeon_device
*rdev
, int ring
)
614 uint64_t seq
= rdev
->fence_drv
[ring
].sync_seq
[ring
];
618 r
= radeon_fence_wait_seq(rdev
, seq
, ring
, false, false);
620 mutex_unlock(&rdev
->ring_lock
);
621 r
= radeon_gpu_reset(rdev
);
622 mutex_lock(&rdev
->ring_lock
);
627 dev_err(rdev
->dev
, "error waiting for ring to become"
635 * radeon_fence_ref - take a ref on a fence
637 * @fence: radeon fence object
639 * Take a reference on a fence (all asics).
642 struct radeon_fence
*radeon_fence_ref(struct radeon_fence
*fence
)
644 kref_get(&fence
->kref
);
649 * radeon_fence_unref - remove a ref on a fence
651 * @fence: radeon fence object
653 * Remove a reference on a fence (all asics).
655 void radeon_fence_unref(struct radeon_fence
**fence
)
657 struct radeon_fence
*tmp
= *fence
;
661 kref_put(&tmp
->kref
, radeon_fence_destroy
);
666 * radeon_fence_count_emitted - get the count of emitted fences
668 * @rdev: radeon device pointer
669 * @ring: ring index the fence is associated with
671 * Get the number of fences emitted on the requested ring (all asics).
672 * Returns the number of emitted fences on the ring. Used by the
673 * dynpm code to ring track activity.
675 unsigned radeon_fence_count_emitted(struct radeon_device
*rdev
, int ring
)
679 /* We are not protected by ring lock when reading the last sequence
680 * but it's ok to report slightly wrong fence count here.
682 radeon_fence_process(rdev
, ring
);
683 emitted
= rdev
->fence_drv
[ring
].sync_seq
[ring
]
684 - atomic64_read(&rdev
->fence_drv
[ring
].last_seq
);
685 /* to avoid 32bits warp around */
686 if (emitted
> 0x10000000) {
687 emitted
= 0x10000000;
689 return (unsigned)emitted
;
693 * radeon_fence_need_sync - do we need a semaphore
695 * @fence: radeon fence object
696 * @dst_ring: which ring to check against
698 * Check if the fence needs to be synced against another ring
699 * (all asics). If so, we need to emit a semaphore.
700 * Returns true if we need to sync with another ring, false if
703 bool radeon_fence_need_sync(struct radeon_fence
*fence
, int dst_ring
)
705 struct radeon_fence_driver
*fdrv
;
711 if (fence
->ring
== dst_ring
) {
715 /* we are protected by the ring mutex */
716 fdrv
= &fence
->rdev
->fence_drv
[dst_ring
];
717 if (fence
->seq
<= fdrv
->sync_seq
[fence
->ring
]) {
725 * radeon_fence_note_sync - record the sync point
727 * @fence: radeon fence object
728 * @dst_ring: which ring to check against
730 * Note the sequence number at which point the fence will
731 * be synced with the requested ring (all asics).
733 void radeon_fence_note_sync(struct radeon_fence
*fence
, int dst_ring
)
735 struct radeon_fence_driver
*dst
, *src
;
742 if (fence
->ring
== dst_ring
) {
746 /* we are protected by the ring mutex */
747 src
= &fence
->rdev
->fence_drv
[fence
->ring
];
748 dst
= &fence
->rdev
->fence_drv
[dst_ring
];
749 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
753 dst
->sync_seq
[i
] = max(dst
->sync_seq
[i
], src
->sync_seq
[i
]);
758 * radeon_fence_driver_start_ring - make the fence driver
759 * ready for use on the requested ring.
761 * @rdev: radeon device pointer
762 * @ring: ring index to start the fence driver on
764 * Make the fence driver ready for processing (all asics).
765 * Not all asics have all rings, so each asic will only
766 * start the fence driver on the rings it has.
767 * Returns 0 for success, errors for failure.
769 int radeon_fence_driver_start_ring(struct radeon_device
*rdev
, int ring
)
774 radeon_scratch_free(rdev
, rdev
->fence_drv
[ring
].scratch_reg
);
775 if (rdev
->wb
.use_event
|| !radeon_ring_supports_scratch_reg(rdev
, &rdev
->ring
[ring
])) {
776 rdev
->fence_drv
[ring
].scratch_reg
= 0;
777 index
= R600_WB_EVENT_OFFSET
+ ring
* 4;
779 r
= radeon_scratch_get(rdev
, &rdev
->fence_drv
[ring
].scratch_reg
);
781 dev_err(rdev
->dev
, "fence failed to get scratch register\n");
784 index
= RADEON_WB_SCRATCH_OFFSET
+
785 rdev
->fence_drv
[ring
].scratch_reg
-
786 rdev
->scratch
.reg_base
;
788 rdev
->fence_drv
[ring
].cpu_addr
= &rdev
->wb
.wb
[index
/4];
789 rdev
->fence_drv
[ring
].gpu_addr
= rdev
->wb
.gpu_addr
+ index
;
790 radeon_fence_write(rdev
, atomic64_read(&rdev
->fence_drv
[ring
].last_seq
), ring
);
791 rdev
->fence_drv
[ring
].initialized
= true;
792 dev_info(rdev
->dev
, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
793 ring
, rdev
->fence_drv
[ring
].gpu_addr
, rdev
->fence_drv
[ring
].cpu_addr
);
798 * radeon_fence_driver_init_ring - init the fence driver
799 * for the requested ring.
801 * @rdev: radeon device pointer
802 * @ring: ring index to start the fence driver on
804 * Init the fence driver for the requested ring (all asics).
805 * Helper function for radeon_fence_driver_init().
807 static void radeon_fence_driver_init_ring(struct radeon_device
*rdev
, int ring
)
811 rdev
->fence_drv
[ring
].scratch_reg
= -1;
812 rdev
->fence_drv
[ring
].cpu_addr
= NULL
;
813 rdev
->fence_drv
[ring
].gpu_addr
= 0;
814 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
)
815 rdev
->fence_drv
[ring
].sync_seq
[i
] = 0;
816 atomic64_set(&rdev
->fence_drv
[ring
].last_seq
, 0);
817 rdev
->fence_drv
[ring
].last_activity
= jiffies
;
818 rdev
->fence_drv
[ring
].initialized
= false;
822 * radeon_fence_driver_init - init the fence driver
823 * for all possible rings.
825 * @rdev: radeon device pointer
827 * Init the fence driver for all possible rings (all asics).
828 * Not all asics have all rings, so each asic will only
829 * start the fence driver on the rings it has using
830 * radeon_fence_driver_start_ring().
831 * Returns 0 for success.
833 int radeon_fence_driver_init(struct radeon_device
*rdev
)
837 init_waitqueue_head(&rdev
->fence_queue
);
838 for (ring
= 0; ring
< RADEON_NUM_RINGS
; ring
++) {
839 radeon_fence_driver_init_ring(rdev
, ring
);
841 if (radeon_debugfs_fence_init(rdev
)) {
842 dev_err(rdev
->dev
, "fence debugfs file creation failed\n");
848 * radeon_fence_driver_fini - tear down the fence driver
849 * for all possible rings.
851 * @rdev: radeon device pointer
853 * Tear down the fence driver for all possible rings (all asics).
855 void radeon_fence_driver_fini(struct radeon_device
*rdev
)
859 mutex_lock(&rdev
->ring_lock
);
860 for (ring
= 0; ring
< RADEON_NUM_RINGS
; ring
++) {
861 if (!rdev
->fence_drv
[ring
].initialized
)
863 radeon_fence_wait_empty_locked(rdev
, ring
);
864 wake_up_all(&rdev
->fence_queue
);
865 radeon_scratch_free(rdev
, rdev
->fence_drv
[ring
].scratch_reg
);
866 rdev
->fence_drv
[ring
].initialized
= false;
868 mutex_unlock(&rdev
->ring_lock
);
875 #if defined(CONFIG_DEBUG_FS)
876 static int radeon_debugfs_fence_info(struct seq_file
*m
, void *data
)
878 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
879 struct drm_device
*dev
= node
->minor
->dev
;
880 struct radeon_device
*rdev
= dev
->dev_private
;
883 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
884 if (!rdev
->fence_drv
[i
].initialized
)
887 seq_printf(m
, "--- ring %d ---\n", i
);
888 seq_printf(m
, "Last signaled fence 0x%016llx\n",
889 (unsigned long long)atomic64_read(&rdev
->fence_drv
[i
].last_seq
));
890 seq_printf(m
, "Last emitted 0x%016llx\n",
891 rdev
->fence_drv
[i
].sync_seq
[i
]);
893 for (j
= 0; j
< RADEON_NUM_RINGS
; ++j
) {
894 if (i
!= j
&& rdev
->fence_drv
[j
].initialized
)
895 seq_printf(m
, "Last sync to ring %d 0x%016llx\n",
896 j
, rdev
->fence_drv
[i
].sync_seq
[j
]);
902 static struct drm_info_list radeon_debugfs_fence_list
[] = {
903 {"radeon_fence_info", &radeon_debugfs_fence_info
, 0, NULL
},
907 int radeon_debugfs_fence_init(struct radeon_device
*rdev
)
909 #if defined(CONFIG_DEBUG_FS)
910 return radeon_debugfs_add_files(rdev
, radeon_debugfs_fence_list
, 1);