return NULL;
/* Kernel run queue has higher priority than normal run queue*/
- for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
+ for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
if (entity)
break;
sched->hw_submission_limit = hw_submission;
sched->name = name;
sched->timeout = timeout;
- for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
+ for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
amd_sched_rq_init(&sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
};
enum amd_sched_priority {
- AMD_SCHED_PRIORITY_KERNEL = 0,
- AMD_SCHED_PRIORITY_NORMAL,
- AMD_SCHED_MAX_PRIORITY
+ AMD_SCHED_PRIORITY_MIN,
+ AMD_SCHED_PRIORITY_NORMAL = AMD_SCHED_PRIORITY_MIN,
+ AMD_SCHED_PRIORITY_KERNEL,
+ AMD_SCHED_PRIORITY_MAX
};
/**
uint32_t hw_submission_limit;
long timeout;
const char *name;
- struct amd_sched_rq sched_rq[AMD_SCHED_MAX_PRIORITY];
+ struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX];
wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled;
atomic_t hw_rq_count;