drm/amdgpu: cleanup amdgpu_fence_wait_empty v2
authorChristian König <christian.koenig@amd.com>
Sun, 13 Mar 2016 18:37:01 +0000 (19:37 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 16 Mar 2016 21:59:32 +0000 (17:59 -0400)
Just wait for last fence instead of waiting for the sequence manually.

v2: don't use amdgpu_sched_jobs for the mask

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c

index 44eac91163eb5c2b4a7736aef634d7ab57f86aa0..d5bdd9633c8549b79ccff59fde3748f852a156aa 100644 (file)
@@ -227,57 +227,6 @@ static void amdgpu_fence_fallback(unsigned long arg)
        amdgpu_fence_process(ring);
 }
 
-/**
- * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
- *
- * @ring: ring the fence is associated with
- * @seq: sequence number
- *
- * Check if the last signaled fence sequnce number is >= the requested
- * sequence number (all asics).
- * Returns true if the fence has signaled (current fence value
- * is >= requested value) or false if it has not (current fence
- * value is < the requested value.  Helper function for
- * amdgpu_fence_signaled().
- */
-static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
-{
-       if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
-               return true;
-
-       /* poll new last sequence at least once */
-       amdgpu_fence_process(ring);
-       if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
-               return true;
-
-       return false;
-}
-
-/*
- * amdgpu_ring_wait_seq - wait for seq of the specific ring to signal
- * @ring: ring to wait on for the seq number
- * @seq: seq number wait for
- *
- * return value:
- * 0: seq signaled, and gpu not hang
- * -EINVAL: some paramter is not valid
- */
-static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
-{
-       BUG_ON(!ring);
-       if (seq > ring->fence_drv.sync_seq)
-               return -EINVAL;
-
-       if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
-               return 0;
-
-       amdgpu_fence_schedule_fallback(ring);
-       wait_event(ring->fence_drv.fence_queue,
-                  amdgpu_fence_seq_signaled(ring, seq));
-
-       return 0;
-}
-
 /**
  * amdgpu_fence_wait_empty - wait for all fences to signal
  *
@@ -286,16 +235,28 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
  *
  * Wait for all fences on the requested ring to signal (all asics).
  * Returns 0 if the fences have passed, error for all other cases.
- * Caller must hold ring lock.
  */
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 {
-       uint64_t seq = ring->fence_drv.sync_seq;
+       uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+       struct fence *fence, **ptr;
+       int r;
 
        if (!seq)
                return 0;
 
-       return amdgpu_fence_ring_wait_seq(ring, seq);
+       ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+       rcu_read_lock();
+       fence = rcu_dereference(*ptr);
+       if (!fence || !fence_get_rcu(fence)) {
+               rcu_read_unlock();
+               return 0;
+       }
+       rcu_read_unlock();
+
+       r = fence_wait(fence, false);
+       fence_put(fence);
+       return r;
 }
 
 /**