drm/amdgpu: clean up non-scheduler code path (v2)
authorChunming Zhou <David1.Zhou@amd.com>
Fri, 15 Jan 2016 03:25:00 +0000 (11:25 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 10 Feb 2016 19:16:50 +0000 (14:16 -0500)
Non-scheduler code is longer supported.

v2: agd: rebased on upstream

Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Ken Wang <Qingqing.Wang@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 8c55c1d78ffe1822f0ad19e6a1373234b8906bd1..9d3dff2442179bea5fc8e0e124aba2d5cba5c593 100644 (file)
@@ -82,7 +82,6 @@ extern int amdgpu_vm_size;
 extern int amdgpu_vm_block_size;
 extern int amdgpu_vm_fault_stop;
 extern int amdgpu_vm_debug;
-extern int amdgpu_enable_scheduler;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
 extern int amdgpu_enable_semaphores;
index 0479ad5a66edde023da726044a4a229e1f27f309..ddeba55c3b7d0081ef370821892b8fdcf80eced4 100644 (file)
@@ -813,7 +813,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        if (r)
                goto out;
 
-       if (amdgpu_enable_scheduler && parser.num_ibs) {
+       if (parser.num_ibs) {
                struct amdgpu_ring * ring = parser.ibs->ring;
                struct amd_sched_fence *fence;
                struct amdgpu_job *job;
@@ -858,15 +858,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
                trace_amdgpu_cs_ioctl(job);
                amd_sched_entity_push_job(&job->base);
-
-       } else {
-               struct amdgpu_fence *fence;
-
-               r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
-                                      parser.filp);
-               fence = parser.ibs[parser.num_ibs - 1].fence;
-               parser.fence = fence_get(&fence->base);
-               cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
        }
 
 out:
index 17d1fb12128a26dafbb0c9cd8bbf3ee2f0e05feb..f1f4b453ece127dd8dacb0356454a55ebd61f52a 100644 (file)
@@ -45,29 +45,27 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
                ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
                        amdgpu_sched_jobs * i;
        }
-       if (amdgpu_enable_scheduler) {
-               /* create context entity for each ring */
-               for (i = 0; i < adev->num_rings; i++) {
-                       struct amd_sched_rq *rq;
-                       if (pri >= AMD_SCHED_MAX_PRIORITY) {
-                               kfree(ctx->fences);
-                               return -EINVAL;
-                       }
-                       rq = &adev->rings[i]->sched.sched_rq[pri];
-                       r = amd_sched_entity_init(&adev->rings[i]->sched,
-                                                 &ctx->rings[i].entity,
-                                                 rq, amdgpu_sched_jobs);
-                       if (r)
-                               break;
-               }
-
-               if (i < adev->num_rings) {
-                       for (j = 0; j < i; j++)
-                               amd_sched_entity_fini(&adev->rings[j]->sched,
-                                                     &ctx->rings[j].entity);
+       /* create context entity for each ring */
+       for (i = 0; i < adev->num_rings; i++) {
+               struct amd_sched_rq *rq;
+               if (pri >= AMD_SCHED_MAX_PRIORITY) {
                        kfree(ctx->fences);
-                       return r;
+                       return -EINVAL;
                }
+               rq = &adev->rings[i]->sched.sched_rq[pri];
+               r = amd_sched_entity_init(&adev->rings[i]->sched,
+                                         &ctx->rings[i].entity,
+                                         rq, amdgpu_sched_jobs);
+               if (r)
+                       break;
+       }
+
+       if (i < adev->num_rings) {
+               for (j = 0; j < i; j++)
+                       amd_sched_entity_fini(&adev->rings[j]->sched,
+                                             &ctx->rings[j].entity);
+               kfree(ctx->fences);
+               return r;
        }
        return 0;
 }
@@ -85,11 +83,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
                        fence_put(ctx->rings[i].fences[j]);
        kfree(ctx->fences);
 
-       if (amdgpu_enable_scheduler) {
-               for (i = 0; i < adev->num_rings; i++)
-                       amd_sched_entity_fini(&adev->rings[i]->sched,
-                                             &ctx->rings[i].entity);
-       }
+       for (i = 0; i < adev->num_rings; i++)
+               amd_sched_entity_fini(&adev->rings[i]->sched,
+                                     &ctx->rings[i].entity);
 }
 
 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
index 11573fd1f053144f7d8e65e26e27d65ad257f16e..8af888a2aa9f82cf6c106589556c2cb43e329268 100644 (file)
@@ -78,7 +78,6 @@ int amdgpu_vm_block_size = -1;
 int amdgpu_vm_fault_stop = 0;
 int amdgpu_vm_debug = 0;
 int amdgpu_exp_hw_support = 0;
-int amdgpu_enable_scheduler = 1;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_powerplay = -1;
@@ -152,9 +151,6 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
-MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
-module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
-
 MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
 module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
 
index 3671f9f220bd47617d0cd3761543cedcae581d5e..cac03e743b585fb80d99f0b10da18ec3cfda6325 100644 (file)
@@ -472,6 +472,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
 {
        int i, r;
+       long timeout;
 
        ring->fence_drv.cpu_addr = NULL;
        ring->fence_drv.gpu_addr = 0;
@@ -486,26 +487,24 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
 
        init_waitqueue_head(&ring->fence_drv.fence_queue);
 
-       if (amdgpu_enable_scheduler) {
-               long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
-               if (timeout == 0) {
-                       /*
-                        * FIXME:
-                        * Delayed workqueue cannot use it directly,
-                        * so the scheduler will not use delayed workqueue if
-                        * MAX_SCHEDULE_TIMEOUT is set.
-                        * Currently keep it simple and silly.
-                        */
-                       timeout = MAX_SCHEDULE_TIMEOUT;
-               }
-               r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
-                                  amdgpu_sched_hw_submission,
-                                  timeout, ring->name);
-               if (r) {
-                       DRM_ERROR("Failed to create scheduler on ring %s.\n",
-                                 ring->name);
-                       return r;
-               }
+       timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+       if (timeout == 0) {
+               /*
+                * FIXME:
+                * Delayed workqueue cannot use it directly,
+                * so the scheduler will not use delayed workqueue if
+                * MAX_SCHEDULE_TIMEOUT is set.
+                * Currently keep it simple and silly.
+                */
+               timeout = MAX_SCHEDULE_TIMEOUT;
+       }
+       r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+                          amdgpu_sched_hw_submission,
+                          timeout, ring->name);
+       if (r) {
+               DRM_ERROR("Failed to create scheduler on ring %s.\n",
+                         ring->name);
+               return r;
        }
 
        return 0;
index 3b58d70b73cd9679d4746f7d5a4b502cee2c5740..54cede30a69c7963ae5e312b80d42f33ecf51d2a 100644 (file)
@@ -199,10 +199,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
                return r;
        }
 
-       if (!amdgpu_enable_scheduler && ib->ctx)
-               ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
-                                                   &ib->fence->base);
-
        /* wrap the last IB with fence */
        if (ib->user) {
                uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
index 438c05254695586bb95a9e3caa2ea356aa4b941f..dd9fac302e55b8fa1b07ffe1b528aa6e411c707f 100644 (file)
@@ -76,33 +76,25 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                                         void *owner,
                                         struct fence **f)
 {
-       int r = 0;
-       if (amdgpu_enable_scheduler) {
-               struct amdgpu_job *job =
-                       kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
-               if (!job)
-                       return -ENOMEM;
-               job->base.sched = &ring->sched;
-               job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
-               job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
-               if (!job->base.s_fence) {
-                       kfree(job);
-                       return -ENOMEM;
-               }
-               *f = fence_get(&job->base.s_fence->base);
-
-               job->adev = adev;
-               job->ibs = ibs;
-               job->num_ibs = num_ibs;
-               job->owner = owner;
-               job->free_job = free_job;
-               amd_sched_entity_push_job(&job->base);
-       } else {
-               r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
-               if (r)
-                       return r;
-               *f = fence_get(&ibs[num_ibs - 1].fence->base);
+       struct amdgpu_job *job =
+               kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+       if (!job)
+               return -ENOMEM;
+       job->base.sched = &ring->sched;
+       job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+       job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
+       if (!job->base.s_fence) {
+               kfree(job);
+               return -ENOMEM;
        }
+       *f = fence_get(&job->base.s_fence->base);
+
+       job->adev = adev;
+       job->ibs = ibs;
+       job->num_ibs = num_ibs;
+       job->owner = owner;
+       job->free_job = free_job;
+       amd_sched_entity_push_job(&job->base);
 
        return 0;
 }
index 6442a06d6fdc1ce9115d280cc916b4e55923f2fa..100bfd4a0707a5066547f8dad33ec9ed55527816 100644 (file)
@@ -1070,10 +1070,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
        if (r)
                goto error_free;
 
-       if (!amdgpu_enable_scheduler) {
-               amdgpu_ib_free(adev, ib);
-               kfree(ib);
-       }
        return 0;
 error_free:
        amdgpu_ib_free(adev, ib);
index 53f987aeeacff5a4b48663850c7a413d64a9ba76..72193f1c8e99a40a21e9e7ad1ce12f646a9f49a1 100644 (file)
@@ -895,11 +895,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
                *fence = fence_get(f);
        amdgpu_bo_unref(&bo);
        fence_put(f);
-       if (amdgpu_enable_scheduler)
-               return 0;
 
-       amdgpu_ib_free(ring->adev, ib);
-       kfree(ib);
        return 0;
 err2:
        amdgpu_ib_free(ring->adev, ib);
index e882fbfacb121de520a63302c62c0830e362b4d8..16fbde9c5f56c916c14bee508d19e77528d9d826 100644 (file)
@@ -432,8 +432,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        if (fence)
                *fence = fence_get(f);
        fence_put(f);
-       if (amdgpu_enable_scheduler)
-               return 0;
+       return 0;
 err:
        amdgpu_ib_free(adev, ib);
        kfree(ib);
@@ -499,8 +498,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        if (fence)
                *fence = fence_get(f);
        fence_put(f);
-       if (amdgpu_enable_scheduler)
-               return 0;
+       return 0;
 err:
        amdgpu_ib_free(adev, ib);
        kfree(ib);
index 7e6414cffbefffed53645f0003bbcd5f17c65c38..cc28bdc02078a1b39700616e2455307cf8608817 100644 (file)
@@ -401,8 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        if (!r)
                amdgpu_bo_fence(bo, fence, true);
        fence_put(fence);
-       if (amdgpu_enable_scheduler)
-               return 0;
+       return 0;
 
 error_free:
        amdgpu_ib_free(adev, ib);
@@ -536,7 +535,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                fence_put(fence);
        }
 
-       if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
+       if (ib->length_dw == 0) {
                amdgpu_ib_free(adev, ib);
                kfree(ib);
        }
@@ -819,10 +818,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                *fence = fence_get(f);
        }
        fence_put(f);
-       if (!amdgpu_enable_scheduler) {
-               amdgpu_ib_free(adev, ib);
-               kfree(ib);
-       }
        return 0;
 
 error_free: