Just to be consistent with the other members.
v2: rename the ring member as well.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> (v1)
Reviewed-by: Chunming Zhou<david1.zhou@amd.com>
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler *scheduler;
+ struct amd_gpu_scheduler *sched;
spinlock_t fence_lock;
struct mutex *ring_lock;
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job)
return -ENOMEM;
- job->base.sched = ring->scheduler;
+ job->base.sched = ring->sched;
job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
job->adev = parser->adev;
job->ibs = parser->ibs;
for (i = 0; i < adev->num_rings; i++) {
struct amd_sched_rq *rq;
if (kernel)
- rq = &adev->rings[i]->scheduler->kernel_rq;
+ rq = &adev->rings[i]->sched->kernel_rq;
else
- rq = &adev->rings[i]->scheduler->sched_rq;
- r = amd_sched_entity_init(adev->rings[i]->scheduler,
+ rq = &adev->rings[i]->sched->sched_rq;
+ r = amd_sched_entity_init(adev->rings[i]->sched,
&ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
if (i < adev->num_rings) {
for (j = 0; j < i; j++)
- amd_sched_entity_fini(adev->rings[j]->scheduler,
+ amd_sched_entity_fini(adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx);
return r;
if (amdgpu_enable_scheduler) {
for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(adev->rings[i]->scheduler,
+ amd_sched_entity_fini(adev->rings[i]->sched,
&ctx->rings[i].entity);
}
}
ring->fence_drv.ring = ring;
if (amdgpu_enable_scheduler) {
- ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
- ring->idx,
- amdgpu_sched_hw_submission,
- (void *)ring->adev);
- if (!ring->scheduler)
+ ring->sched = amd_sched_create(&amdgpu_sched_ops,
+ ring->idx,
+ amdgpu_sched_hw_submission,
+ (void *)ring->adev);
+ if (!ring->sched)
DRM_ERROR("Failed to create scheduler on ring %d.\n",
ring->idx);
}
wake_up_all(&ring->fence_drv.fence_queue);
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- if (ring->scheduler)
- amd_sched_destroy(ring->scheduler);
+ if (ring->sched)
+ amd_sched_destroy(ring->sched);
ring->fence_drv.initialized = false;
}
mutex_unlock(&adev->ring_lock);
s_fence = to_amd_sched_fence(f);
if (s_fence)
- return s_fence->scheduler->ring_id;
+ return s_fence->sched->ring_id;
a_fence = to_amdgpu_fence(f);
if (a_fence)
return a_fence->ring->idx;
if (s_fence)
seq_printf(m, " protected by 0x%016x on ring %d",
s_fence->base.seqno,
- s_fence->scheduler->ring_id);
+ s_fence->sched->ring_id);
}
seq_printf(m, "\n");
kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job)
return -ENOMEM;
- job->base.sched = ring->scheduler;
+ job->base.sched = ring->sched;
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
job->adev = adev;
job->ibs = ibs;
if (a_fence)
return a_fence->ring->adev == adev;
if (s_fence)
- return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+ return (struct amdgpu_device *)s_fence->sched->priv == adev;
return false;
}
{
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
- struct amd_gpu_scheduler *sched = s_fence->scheduler;
+ struct amd_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
amd_sched_fence_signal(s_fence);
struct amd_sched_fence {
struct fence base;
struct fence_cb cb;
- struct amd_gpu_scheduler *scheduler;
+ struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
};
if (fence == NULL)
return NULL;
fence->owner = owner;
- fence->scheduler = s_entity->sched;
+ fence->sched = s_entity->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&s_entity->fence_seq);
static const char *amd_sched_fence_get_timeline_name(struct fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- return (const char *)fence->scheduler->name;
+ return (const char *)fence->sched->name;
}
static bool amd_sched_fence_enable_signaling(struct fence *f)