s_fence = to_amd_sched_fence(f);
if (s_fence)
- return s_fence->entity->scheduler->ring_id;
+ return s_fence->scheduler->ring_id;
a_fence = to_amdgpu_fence(f);
if (a_fence)
return a_fence->ring->idx;
if (s_fence)
seq_printf(m, " protected by 0x%016x on ring %d",
s_fence->base.seqno,
- s_fence->entity->scheduler->ring_id);
+ s_fence->scheduler->ring_id);
}
seq_printf(m, "\n");
struct amd_sched_rq *rq,
uint32_t jobs)
{
- char name[20];
-
if (!(sched && entity && rq))
return -EINVAL;
entity->scheduler = sched;
init_waitqueue_head(&entity->wait_queue);
entity->fence_context = fence_context_alloc(1);
- snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
- memcpy(entity->name, name, 20);
if(kfifo_alloc(&entity->job_queue,
jobs * sizeof(void *),
GFP_KERNEL))
unsigned ring, unsigned hw_submission)
{
struct amd_gpu_scheduler *sched;
- char name[20];
sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
if (!sched)
sched->ops = ops;
sched->ring_id = ring;
sched->hw_submission_limit = hw_submission;
- snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
+ snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
amd_sched_rq_init(&sched->sched_rq);
amd_sched_rq_init(&sched->kernel_rq);
init_waitqueue_head(&sched->wait_queue);
atomic_set(&sched->hw_rq_count, 0);
/* Each scheduler will run on a seperate kernel thread */
- sched->thread = kthread_run(amd_sched_main, sched, name);
+ sched->thread = kthread_run(amd_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
kfree(sched);
struct amd_gpu_scheduler *scheduler;
wait_queue_head_t wait_queue;
uint64_t fence_context;
- char name[20];
};
/**
struct amd_sched_fence {
struct fence base;
- struct fence_cb cb;
- struct amd_sched_entity *entity;
+ struct amd_gpu_scheduler *scheduler;
spinlock_t lock;
};
uint32_t ring_id;
wait_queue_head_t wait_queue;
uint32_t hw_submission_limit;
+ char name[20];
};
struct amd_gpu_scheduler *
if (fence == NULL)
return NULL;
- fence->entity = s_entity;
+ fence->scheduler = s_entity->scheduler;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&s_entity->fence_seq);
static const char *amd_sched_fence_get_timeline_name(struct fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- return (const char *)fence->entity->name;
+ return (const char *)fence->scheduler->name;
}
static bool amd_sched_fence_enable_signaling(struct fence *f)