drm/amdgpu: rename fence->scheduler to sched v2
authorChristian König <christian.koenig@amd.com>
Mon, 7 Sep 2015 16:16:49 +0000 (18:16 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 23 Sep 2015 21:23:37 +0000 (17:23 -0400)
Just to be consistent with the other members.

v2: rename the ring member as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> (v1)
Reviewed-by: Chunming Zhou<david1.zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/amd/scheduler/sched_fence.c

index dbe061bf88cc22fb5f4a8444220c8cd23992e36d..9108b7c7d4a3d34b652744cefa1c86f5f0cb29de 100644 (file)
@@ -891,7 +891,7 @@ struct amdgpu_ring {
        struct amdgpu_device            *adev;
        const struct amdgpu_ring_funcs  *funcs;
        struct amdgpu_fence_driver      fence_drv;
-       struct amd_gpu_scheduler        *scheduler;
+       struct amd_gpu_scheduler        *sched;
 
        spinlock_t              fence_lock;
        struct mutex            *ring_lock;
index 546968a83b08ffba7db794579a438d0507aa61f5..6f39b2d2106da156fb37ee617ba7b21d0c343282 100644 (file)
@@ -848,7 +848,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
                if (!job)
                        return -ENOMEM;
-               job->base.sched = ring->scheduler;
+               job->base.sched = ring->sched;
                job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
                job->adev = parser->adev;
                job->ibs = parser->ibs;
index 20cbc4eb5a6f7f7bfc75b4c80f010eee061d8499..5494831e1a247010b3dbcd4de504b566fc0734f9 100644 (file)
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
                for (i = 0; i < adev->num_rings; i++) {
                        struct amd_sched_rq *rq;
                        if (kernel)
-                               rq = &adev->rings[i]->scheduler->kernel_rq;
+                               rq = &adev->rings[i]->sched->kernel_rq;
                        else
-                               rq = &adev->rings[i]->scheduler->sched_rq;
-                       r = amd_sched_entity_init(adev->rings[i]->scheduler,
+                               rq = &adev->rings[i]->sched->sched_rq;
+                       r = amd_sched_entity_init(adev->rings[i]->sched,
                                                  &ctx->rings[i].entity,
                                                  rq, amdgpu_sched_jobs);
                        if (r)
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
 
                if (i < adev->num_rings) {
                        for (j = 0; j < i; j++)
-                               amd_sched_entity_fini(adev->rings[j]->scheduler,
+                               amd_sched_entity_fini(adev->rings[j]->sched,
                                                      &ctx->rings[j].entity);
                        kfree(ctx);
                        return r;
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 
        if (amdgpu_enable_scheduler) {
                for (i = 0; i < adev->num_rings; i++)
-                       amd_sched_entity_fini(adev->rings[i]->scheduler,
+                       amd_sched_entity_fini(adev->rings[i]->sched,
                                              &ctx->rings[i].entity);
        }
 }
index 1be2bd6d07eac6593274038967b484d9525f04e3..8e8cd09d36c12b53f78819c097018c80f8b203d2 100644 (file)
@@ -626,11 +626,11 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
        ring->fence_drv.ring = ring;
 
        if (amdgpu_enable_scheduler) {
-               ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
-                                                  ring->idx,
-                                                  amdgpu_sched_hw_submission,
-                                                  (void *)ring->adev);
-               if (!ring->scheduler)
+               ring->sched = amd_sched_create(&amdgpu_sched_ops,
+                                              ring->idx,
+                                              amdgpu_sched_hw_submission,
+                                              (void *)ring->adev);
+               if (!ring->sched)
                        DRM_ERROR("Failed to create scheduler on ring %d.\n",
                                  ring->idx);
        }
@@ -681,8 +681,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
                wake_up_all(&ring->fence_drv.fence_queue);
                amdgpu_irq_put(adev, ring->fence_drv.irq_src,
                               ring->fence_drv.irq_type);
-               if (ring->scheduler)
-                       amd_sched_destroy(ring->scheduler);
+               if (ring->sched)
+                       amd_sched_destroy(ring->sched);
                ring->fence_drv.initialized = false;
        }
        mutex_unlock(&adev->ring_lock);
index b70ce106ca90cf135a060f56840b9eb9299507d9..7cf5405afe4e447121bb1d45f994d3f0b3e76582 100644 (file)
@@ -146,7 +146,7 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
 
        s_fence = to_amd_sched_fence(f);
        if (s_fence)
-               return s_fence->scheduler->ring_id;
+               return s_fence->sched->ring_id;
        a_fence = to_amdgpu_fence(f);
        if (a_fence)
                return a_fence->ring->idx;
@@ -437,7 +437,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
                        if (s_fence)
                                seq_printf(m, " protected by 0x%016x on ring %d",
                                           s_fence->base.seqno,
-                                          s_fence->scheduler->ring_id);
+                                          s_fence->sched->ring_id);
 
                }
                seq_printf(m, "\n");
index 58408da122c5b6251b12db85b20688bfa2a8ec5c..d1984fc5dfc41b2689da3ad42e2027e799b3648a 100644 (file)
@@ -85,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                        kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
                if (!job)
                        return -ENOMEM;
-               job->base.sched = ring->scheduler;
+               job->base.sched = ring->sched;
                job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
                job->adev = adev;
                job->ibs = ibs;
index cfd2999acc594e81c5a1dbd806bfa681dd068bd3..b57ca10a85335fb42420b4ce010ef93b5c6d6e7b 100644 (file)
@@ -66,7 +66,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
        if (a_fence)
                return a_fence->ring->adev == adev;
        if (s_fence)
-               return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+               return (struct amdgpu_device *)s_fence->sched->priv == adev;
        return false;
 }
 
index a9d582634d8e8b7c9f1ca8da90757afa5d1f241e..ec4842e58fd7e1d74ce6017cb8e0dbbf9924fbe3 100644 (file)
@@ -326,7 +326,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
 {
        struct amd_sched_fence *s_fence =
                container_of(cb, struct amd_sched_fence, cb);
-       struct amd_gpu_scheduler *sched = s_fence->scheduler;
+       struct amd_gpu_scheduler *sched = s_fence->sched;
 
        atomic_dec(&sched->hw_rq_count);
        amd_sched_fence_signal(s_fence);
index c4fe24e2cb21db502621e9df8626cbc91aafc71e..89d977dd30ac1d303931dc275608b0f3e978bd0e 100644 (file)
@@ -65,7 +65,7 @@ struct amd_sched_rq {
 struct amd_sched_fence {
        struct fence                    base;
        struct fence_cb                 cb;
-       struct amd_gpu_scheduler        *scheduler;
+       struct amd_gpu_scheduler        *sched;
        spinlock_t                      lock;
        void                            *owner;
 };
index 733522f7a0ea452d591c795d499dcb962cdea8aa..d802638094f4bb3817bb6612f1e37bf2479cf142 100644 (file)
@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
        if (fence == NULL)
                return NULL;
        fence->owner = owner;
-       fence->scheduler = s_entity->sched;
+       fence->sched = s_entity->sched;
        spin_lock_init(&fence->lock);
 
        seq = atomic_inc_return(&s_entity->fence_seq);
@@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence)
 static const char *amd_sched_fence_get_timeline_name(struct fence *f)
 {
        struct amd_sched_fence *fence = to_amd_sched_fence(f);
-       return (const char *)fence->scheduler->name;
+       return (const char *)fence->sched->name;
 }
 
 static bool amd_sched_fence_enable_signaling(struct fence *f)