}
} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
- if (wake) {
- if (amdgpu_enable_scheduler) {
- uint64_t handled_seq =
- amd_sched_get_handled_seq(ring->scheduler);
- uint64_t latest_seq =
- atomic64_read(&ring->fence_drv.last_seq);
- if (handled_seq == latest_seq) {
- DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n",
- ring->idx, latest_seq);
- goto exit;
- }
- do {
- amd_sched_isr(ring->scheduler);
- } while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq);
- }
-
+ if (wake)
wake_up_all(&ring->fence_drv.fence_queue);
- }
-exit:
spin_unlock_irqrestore(&ring->fence_lock, irqflags);
}
return r;
}
+static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb)
+{
+ struct amdgpu_fence *fence =
+ container_of(cb, struct amdgpu_fence, cb);
+ amd_sched_isr(fence->ring->scheduler);
+}
+
static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
struct amd_context_entity *c_entity,
void *job)
{
int r = 0;
struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
+ struct amdgpu_fence *fence;
mutex_lock(&sched_job->job_lock);
r = amdgpu_ib_schedule(sched_job->adev,
sched_job->filp);
if (r)
goto err;
+ fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
+ if (fence_add_callback(&fence->base,
+ &fence->cb, amdgpu_fence_sched_cb))
+ goto err;
+
if (sched_job->run_job) {
r = sched_job->run_job(sched_job);
if (r)