drm/amdgpu: remove process_job callback from the scheduler
authorChristian König <christian.koenig@amd.com>
Mon, 31 Aug 2015 15:28:28 +0000 (17:28 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 23 Sep 2015 21:23:33 +0000 (17:23 -0400)
Just free the resources immediately after submitting the job.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

index de98fbd2971eded37ecb896921255d38787ce7b5..5724a81fbf5eecc3523b954c05e750cd7f98beb3 100644 (file)
@@ -35,8 +35,8 @@ static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
 
 static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
 {
+       struct amdgpu_fence *fence = NULL;
        struct amdgpu_job *sched_job;
-       struct amdgpu_fence *fence;
        int r;
 
        if (!job) {
@@ -49,41 +49,26 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
                               sched_job->num_ibs,
                               sched_job->ibs,
                               sched_job->base.owner);
-       if (r)
+       if (r) {
+               DRM_ERROR("Error scheduling IBs (%d)\n", r);
                goto err;
+       }
+
        fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
 
+err:
        if (sched_job->free_job)
                sched_job->free_job(sched_job);
 
        mutex_unlock(&sched_job->job_lock);
-       return &fence->base;
-
-err:
-       DRM_ERROR("Run job error\n");
-       mutex_unlock(&sched_job->job_lock);
-       job->sched->ops->process_job(job);
-       return NULL;
-}
-
-static void amdgpu_sched_process_job(struct amd_sched_job *job)
-{
-       struct amdgpu_job *sched_job;
-
-       if (!job) {
-               DRM_ERROR("job is null\n");
-               return;
-       }
-       sched_job = (struct amdgpu_job *)job;
-       /* after processing job, free memory */
        fence_put(&sched_job->base.s_fence->base);
        kfree(sched_job);
+       return fence ? &fence->base : NULL;
 }
 
 struct amd_sched_backend_ops amdgpu_sched_ops = {
        .dependency = amdgpu_sched_dependency,
        .run_job = amdgpu_sched_run_job,
-       .process_job = amdgpu_sched_process_job
 };
 
 int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
index 92b000d4307e7586053130406cab968c95e36aaa..191fd513d9791da1e7fbbbac833b834a78bf5d35 100644 (file)
@@ -354,7 +354,6 @@ static int amd_sched_main(void *param)
                s_fence = job->s_fence;
                atomic_inc(&sched->hw_rq_count);
                fence = sched->ops->run_job(job);
-               sched->ops->process_job(job);
                if (fence) {
                        r = fence_add_callback(fence, &s_fence->cb,
                                               amd_sched_process_job);
index 7a0552fb6ba410243df003c3fcb69f69deb36fb0..ac56d9211f7ca957dcf671c8c7bbcbee3f41a6e5 100644 (file)
@@ -93,7 +93,6 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 struct amd_sched_backend_ops {
        struct fence *(*dependency)(struct amd_sched_job *job);
        struct fence *(*run_job)(struct amd_sched_job *job);
-       void (*process_job)(struct amd_sched_job *job);
 };
 
 /**