Avoiding a couple of casts.
v2: rename c_entity to entity as well
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
#define AMDGPU_CTX_MAX_CS_PENDING 16
struct amdgpu_ctx_ring {
- uint64_t sequence;
- struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
- struct amd_context_entity c_entity;
+ uint64_t sequence;
+ struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
+ struct amd_sched_entity entity;
};
struct amdgpu_ctx {
struct amdgpu_ring * ring =
amdgpu_cs_parser_get_ring(adev, parser);
parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return(
- &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
+ &parser->ctx->rings[ring->idx].entity.last_queued_v_seq);
if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
r = amdgpu_cs_parser_prepare_job(parser);
if (r)
parser->run_job = amdgpu_cs_parser_run_job;
parser->free_job = amdgpu_cs_parser_free_job;
amd_sched_push_job(ring->scheduler,
- &parser->ctx->rings[ring->idx].c_entity,
+ &parser->ctx->rings[ring->idx].entity,
parser);
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
up_read(&adev->exclusive_lock);
rq = &adev->rings[i]->scheduler->kernel_rq;
else
rq = &adev->rings[i]->scheduler->sched_rq;
- r = amd_context_entity_init(adev->rings[i]->scheduler,
- &ctx->rings[i].c_entity,
- rq, amdgpu_sched_jobs);
+ r = amd_sched_entity_init(adev->rings[i]->scheduler,
+ &ctx->rings[i].entity,
+ rq, amdgpu_sched_jobs);
if (r)
break;
}
if (i < adev->num_rings) {
for (j = 0; j < i; j++)
- amd_context_entity_fini(adev->rings[j]->scheduler,
- &ctx->rings[j].c_entity);
+ amd_sched_entity_fini(adev->rings[j]->scheduler,
+ &ctx->rings[j].entity);
kfree(ctx);
return r;
}
if (amdgpu_enable_scheduler) {
for (i = 0; i < adev->num_rings; i++)
- amd_context_entity_fini(adev->rings[i]->scheduler,
- &ctx->rings[i].c_entity);
+ amd_sched_entity_fini(adev->rings[i]->scheduler,
+ &ctx->rings[i].entity);
}
}
int r;
if (amdgpu_enable_scheduler) {
- r = amd_sched_wait_emit(&cring->c_entity,
+ r = amd_sched_wait_emit(&cring->entity,
seq,
false,
-1);
spin_lock(&ctx->ring_lock);
if (amdgpu_enable_scheduler)
- queued_seq = amd_sched_next_queued_seq(&cring->c_entity);
+ queued_seq = amd_sched_next_queued_seq(&cring->entity);
else
queued_seq = cring->sequence;
#include "amdgpu.h"
static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *c_entity,
+ struct amd_sched_entity *entity,
void *job)
{
int r = 0;
}
static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *c_entity,
+ struct amd_sched_entity *entity,
struct amd_sched_job *job)
{
int r = 0;
goto err;
}
- amd_sched_emit(c_entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
+ amd_sched_emit(entity, sched_job->ibs[sched_job->num_ibs - 1].sequence);
mutex_unlock(&sched_job->job_lock);
return;
return -ENOMEM;
}
sched_job->free_job = free_job;
- v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+ v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
ibs[num_ibs - 1].sequence = v_seq;
amd_sched_push_job(ring->scheduler,
- &adev->kernel_ctx.rings[ring->idx].c_entity,
+ &adev->kernel_ctx.rings[ring->idx].entity,
sched_job);
r = amd_sched_wait_emit(
- &adev->kernel_ctx.rings[ring->idx].c_entity,
+ &adev->kernel_ctx.rings[ring->idx].entity,
v_seq,
false,
-1);
sched_job->job_param.vm.bo = bo;
sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job;
- v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+ v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
- &adev->kernel_ctx.rings[ring->idx].c_entity,
+ &adev->kernel_ctx.rings[ring->idx].entity,
sched_job);
- r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
+ r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].entity,
v_seq,
false,
-1);
sched_job->job_param.vm.bo = pd;
sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job;
- v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+ v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
- &adev->kernel_ctx.rings[ring->idx].c_entity,
+ &adev->kernel_ctx.rings[ring->idx].entity,
sched_job);
- r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
+ r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].entity,
v_seq,
false,
-1);
sched_job->job_param.vm_mapping.fence = fence;
sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
sched_job->free_job = amdgpu_vm_free_job;
- v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+ v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq);
ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
- &adev->kernel_ctx.rings[ring->idx].c_entity,
+ &adev->kernel_ctx.rings[ring->idx].entity,
sched_job);
- r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
+ r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].entity,
v_seq,
false,
-1);
return i ? p : NULL;
}
-static bool context_entity_is_waiting(struct amd_context_entity *entity)
+static bool context_entity_is_waiting(struct amd_sched_entity *entity)
{
/* TODO: sync obj for multi-ring synchronization */
return false;
static int gpu_entity_check_status(struct amd_sched_entity *entity)
{
- struct amd_context_entity *tmp;
-
if (entity == &entity->belongto_rq->head)
return -1;
- tmp = container_of(entity, typeof(*tmp), generic_entity);
- if (kfifo_is_empty(&tmp->job_queue) ||
- context_entity_is_waiting(tmp))
+ if (kfifo_is_empty(&entity->job_queue) ||
+ context_entity_is_waiting(entity))
return -1;
return 0;
* Select next entity from the kernel run queue, if not available,
* return null.
*/
-static struct amd_context_entity *
+static struct amd_sched_entity *
kernel_rq_select_context(struct amd_gpu_scheduler *sched)
{
struct amd_sched_entity *sched_entity;
- struct amd_context_entity *tmp = NULL;
struct amd_run_queue *rq = &sched->kernel_rq;
mutex_lock(&rq->lock);
sched_entity = rq_select_entity(rq);
- if (sched_entity)
- tmp = container_of(sched_entity,
- typeof(*tmp),
- generic_entity);
mutex_unlock(&rq->lock);
- return tmp;
+ return sched_entity;
}
/**
* Select next entity containing real IB submissions
*/
-static struct amd_context_entity *
+static struct amd_sched_entity *
select_context(struct amd_gpu_scheduler *sched)
{
- struct amd_context_entity *wake_entity = NULL;
- struct amd_context_entity *tmp;
+ struct amd_sched_entity *wake_entity = NULL;
+ struct amd_sched_entity *tmp;
struct amd_run_queue *rq;
if (!is_scheduler_ready(sched))
if (tmp != NULL)
goto exit;
- WARN_ON(offsetof(struct amd_context_entity, generic_entity) != 0);
-
rq = &sched->sched_rq;
mutex_lock(&rq->lock);
- tmp = container_of(rq_select_entity(rq),
- typeof(*tmp), generic_entity);
+ tmp = rq_select_entity(rq);
mutex_unlock(&rq->lock);
exit:
if (sched->current_entity && (sched->current_entity != tmp))
* Init a context entity used by scheduler when submit to HW ring.
*
* @sched The pointer to the scheduler
- * @entity The pointer to a valid amd_context_entity
+ * @entity The pointer to a valid amd_sched_entity
* @rq The run queue this entity belongs
* @kernel If this is an entity for the kernel
* @jobs The max number of jobs in the job queue
*
* return 0 if succeed. negative error code on failure
*/
-int amd_context_entity_init(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *entity,
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
struct amd_run_queue *rq,
uint32_t jobs)
{
if (!(sched && entity && rq))
return -EINVAL;
- memset(entity, 0, sizeof(struct amd_context_entity));
+ memset(entity, 0, sizeof(struct amd_sched_entity));
seq_ring = ((uint64_t)sched->ring_id) << 60;
spin_lock_init(&entity->lock);
- entity->generic_entity.belongto_rq = rq;
+ entity->belongto_rq = rq;
entity->scheduler = sched;
init_waitqueue_head(&entity->wait_queue);
init_waitqueue_head(&entity->wait_emit);
/* Add the entity to the run queue */
mutex_lock(&rq->lock);
- rq_add_entity(rq, &entity->generic_entity);
+ rq_add_entity(rq, entity);
mutex_unlock(&rq->lock);
return 0;
}
* return true if entity is initialized, false otherwise
*/
static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *entity)
+ struct amd_sched_entity *entity)
{
return entity->scheduler == sched &&
- entity->generic_entity.belongto_rq != NULL;
+ entity->belongto_rq != NULL;
}
static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *entity)
+ struct amd_sched_entity *entity)
{
/**
* Idle means no pending IBs, and the entity is not
*
* return 0 if succeed. negative error code on failure
*/
-int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *entity)
+int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity)
{
int r = 0;
- struct amd_run_queue *rq = entity->generic_entity.belongto_rq;
+ struct amd_run_queue *rq = entity->belongto_rq;
if (!is_context_entity_initialized(sched, entity))
return 0;
}
mutex_lock(&rq->lock);
- rq_remove_entity(rq, &entity->generic_entity);
+ rq_remove_entity(rq, entity);
mutex_unlock(&rq->lock);
kfifo_free(&entity->job_queue);
return r;
* Submit a normal job to the job queue
*
* @sched The pointer to the scheduler
- * @c_entity The pointer to amd_context_entity
+ * @c_entity The pointer to amd_sched_entity
* @job The pointer to job required to submit
* return 0 if succeed. -1 if failed.
* -2 indicate queue is full for this client, client should wait untill
* -1 other fail.
*/
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *c_entity,
+ struct amd_sched_entity *c_entity,
void *job)
{
while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
*
* return =0 signaled , <0 failed
*/
-int amd_sched_wait_emit(struct amd_context_entity *c_entity,
+int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
uint64_t seq,
bool intr,
long timeout)
int r;
void *job;
struct sched_param sparam = {.sched_priority = 1};
- struct amd_context_entity *c_entity = NULL;
+ struct amd_sched_entity *c_entity = NULL;
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
sched_setscheduler(current, SCHED_FIFO, &sparam);
* @entity The context entity
* @seq The sequence number for the latest emitted job
*/
-void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq)
+void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq)
{
atomic64_set(&c_entity->last_emitted_v_seq, seq);
wake_up_all(&c_entity->wait_emit);
*
* return the next queued sequence number
*/
-uint64_t amd_sched_next_queued_seq(struct amd_context_entity *c_entity)
+uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity)
{
return atomic64_read(&c_entity->last_queued_v_seq) + 1;
}
struct amd_sched_entity {
struct list_head list;
struct amd_run_queue *belongto_rq;
+ spinlock_t lock;
+ /* the virtual_seq is unique per context per ring */
+ atomic64_t last_queued_v_seq;
+ atomic64_t last_emitted_v_seq;
+ /* the job_queue maintains the jobs submitted by clients */
+ struct kfifo job_queue;
+ spinlock_t queue_lock;
+ struct amd_gpu_scheduler *scheduler;
+ wait_queue_head_t wait_queue;
+ wait_queue_head_t wait_emit;
+ bool is_pending;
};
/**
int (*check_entity_status)(struct amd_sched_entity *entity);
};
-/**
- * Context based scheduler entity, there can be multiple entities for
- * each context, and one entity per ring
-*/
-struct amd_context_entity {
- struct amd_sched_entity generic_entity;
- spinlock_t lock;
- /* the virtual_seq is unique per context per ring */
- atomic64_t last_queued_v_seq;
- atomic64_t last_emitted_v_seq;
- /* the job_queue maintains the jobs submitted by clients */
- struct kfifo job_queue;
- spinlock_t queue_lock;
- struct amd_gpu_scheduler *scheduler;
- wait_queue_head_t wait_queue;
- wait_queue_head_t wait_emit;
- bool is_pending;
-};
-
struct amd_sched_job {
struct list_head list;
struct fence_cb cb;
*/
struct amd_sched_backend_ops {
int (*prepare_job)(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *c_entity,
+ struct amd_sched_entity *c_entity,
void *job);
void (*run_job)(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *c_entity,
+ struct amd_sched_entity *c_entity,
struct amd_sched_job *job);
void (*process_job)(struct amd_gpu_scheduler *sched, void *job);
};
uint32_t granularity; /* in ms unit */
uint32_t preemption;
wait_queue_head_t wait_queue;
- struct amd_context_entity *current_entity;
+ struct amd_sched_entity *current_entity;
struct mutex sched_lock;
spinlock_t queue_lock;
uint32_t hw_submission_limit;
int amd_sched_destroy(struct amd_gpu_scheduler *sched);
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *c_entity,
+ struct amd_sched_entity *c_entity,
void *job);
-int amd_sched_wait_emit(struct amd_context_entity *c_entity,
+int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
uint64_t seq,
bool intr,
long timeout);
void amd_sched_process_job(struct amd_sched_job *sched_job);
uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched);
-int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *entity);
-
-int amd_context_entity_init(struct amd_gpu_scheduler *sched,
- struct amd_context_entity *entity,
- struct amd_run_queue *rq,
- uint32_t jobs);
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ struct amd_run_queue *rq,
+ uint32_t jobs);
+int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity);
-void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq);
+void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq);
-uint64_t amd_sched_next_queued_seq(struct amd_context_entity *c_entity);
+uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity);
#endif