extern int amdgpu_vm_block_size;
extern int amdgpu_enable_scheduler;
+#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
/* user fence */
struct amdgpu_user_fence uf;
+ struct amdgpu_ring *ring;
struct mutex job_lock;
struct work_struct job_work;
int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
goto out;
} else
parser->prepare_job = amdgpu_cs_parser_prepare_job;
-
+ parser->ring = ring;
parser->run_job = amdgpu_cs_parser_run_job;
parser->free_job = amdgpu_cs_parser_free_job;
amd_sched_push_job(ring->scheduler,
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
- if (amdgpu_enable_scheduler) {
- r = amd_sched_wait_ts(&ctx->rings[ring->idx].c_entity,
- wait->in.handle, true, timeout);
- if (r)
- return r;
- r = 1;
- } else {
- fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
- if (IS_ERR(fence))
- r = PTR_ERR(fence);
- else if (fence) {
- r = fence_wait_timeout(fence, true, timeout);
- fence_put(fence);
+ fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+ if (IS_ERR(fence))
+ r = PTR_ERR(fence);
+ else if (fence) {
+ r = fence_wait_timeout(fence, true, timeout);
+ fence_put(fence);
+ } else
+ r = 1;
- } else
- r = 1;
- }
amdgpu_ctx_put(ctx);
if (r < 0)
return r;
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
struct fence *fence;
uint64_t queued_seq;
+ int r;
+
+ if (amdgpu_enable_scheduler) {
+ r = amd_sched_wait_emit(&cring->c_entity,
+ seq,
+ true,
+ AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS);
+ if (r)
+ return NULL;
+ }
spin_lock(&ctx->ring_lock);
if (amdgpu_enable_scheduler)
sched_job->filp);
if (r)
goto err;
-
if (sched_job->run_job) {
r = sched_job->run_job(sched_job);
if (r)
goto err;
}
+ atomic64_set(&c_entity->last_emitted_v_seq,
+ sched_job->uf.sequence);
+ wake_up_all(&c_entity->wait_emit);
+
mutex_unlock(&sched_job->job_lock);
return;
err: