drm/amdgpu: make sure the fence is emitted before ring to get it.
authorChunming Zhou <david1.zhou@amd.com>
Tue, 21 Jul 2015 07:53:04 +0000 (15:53 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:35 +0000 (16:50 -0400)
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c

index becb26317467cc0d32f717e218d36b20f2b1c1c9..127867c2fc372cca34375d549d241df96aa7e889 100644 (file)
@@ -81,6 +81,7 @@ extern int amdgpu_vm_size;
 extern int amdgpu_vm_block_size;
 extern int amdgpu_enable_scheduler;
 
+#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
 #define AMDGPU_FENCE_JIFFIES_TIMEOUT           (HZ / 2)
 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
@@ -1239,6 +1240,7 @@ struct amdgpu_cs_parser {
        /* user fence */
        struct amdgpu_user_fence uf;
 
+       struct amdgpu_ring *ring;
        struct mutex job_lock;
        struct work_struct job_work;
        int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
index f9d4fe98566835cd6ba358a4a16a97c01f54ebb2..5f2403898b06c945cd8b4ddd71e898df56c05cb1 100644 (file)
@@ -915,7 +915,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                                goto out;
                } else
                        parser->prepare_job = amdgpu_cs_parser_prepare_job;
-
+               parser->ring = ring;
                parser->run_job = amdgpu_cs_parser_run_job;
                parser->free_job = amdgpu_cs_parser_free_job;
                amd_sched_push_job(ring->scheduler,
@@ -965,24 +965,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
        ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
        if (ctx == NULL)
                return -EINVAL;
-       if (amdgpu_enable_scheduler) {
-               r = amd_sched_wait_ts(&ctx->rings[ring->idx].c_entity,
-                                     wait->in.handle, true, timeout);
-               if (r)
-                       return r;
-               r = 1;
-       } else {
-               fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
-               if (IS_ERR(fence))
-                       r = PTR_ERR(fence);
 
-               else if (fence) {
-                       r = fence_wait_timeout(fence, true, timeout);
-                       fence_put(fence);
+       fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+       if (IS_ERR(fence))
+               r = PTR_ERR(fence);
+       else if (fence) {
+               r = fence_wait_timeout(fence, true, timeout);
+               fence_put(fence);
+       } else
+               r = 1;
 
-               } else
-                       r = 1;
-       }
        amdgpu_ctx_put(ctx);
        if (r < 0)
                return r;
index b9be250cb206fbf84488092c954aabd80283bdab..41bc7fc0ebf6572f01ddc0b85ce736a58b12c543 100644 (file)
@@ -261,6 +261,16 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
        struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
        struct fence *fence;
        uint64_t queued_seq;
+       int r;
+
+       if (amdgpu_enable_scheduler) {
+               r = amd_sched_wait_emit(&cring->c_entity,
+                                       seq,
+                                       true,
+                                       AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS);
+               if (r)
+                       return NULL;
+       }
 
        spin_lock(&ctx->ring_lock);
        if (amdgpu_enable_scheduler)
index 1f7bf31da7fca0e54bc19c8b135c528399fbf10d..46ec915c93441afe29c44ec1980454991b164f74 100644 (file)
@@ -56,12 +56,15 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
                               sched_job->filp);
        if (r)
                goto err;
-
        if (sched_job->run_job) {
                r = sched_job->run_job(sched_job);
                if (r)
                        goto err;
        }
+       atomic64_set(&c_entity->last_emitted_v_seq,
+                    sched_job->uf.sequence);
+       wake_up_all(&c_entity->wait_emit);
+
        mutex_unlock(&sched_job->job_lock);
        return;
 err: