if (amdgpu_enable_scheduler && parser->num_ibs) {
struct amdgpu_ring * ring =
amdgpu_cs_parser_get_ring(adev, parser);
+ parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return(
+ &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
r = amdgpu_cs_parser_prepare_job(parser);
if (r)
parser->ring = ring;
parser->run_job = amdgpu_cs_parser_run_job;
parser->free_job = amdgpu_cs_parser_free_job;
- parser->ibs[parser->num_ibs - 1].sequence =
- amd_sched_push_job(ring->scheduler,
+ amd_sched_push_job(ring->scheduler,
&parser->ctx->rings[ring->idx].c_entity,
parser);
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
{
int r = 0;
if (amdgpu_enable_scheduler) {
+ uint64_t v_seq;
struct amdgpu_cs_parser *sched_job =
amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx,
ibs, 1);
return -ENOMEM;
}
sched_job->free_job = free_job;
- ibs[num_ibs - 1].sequence = amd_sched_push_job(ring->scheduler,
+ v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+ ibs[num_ibs - 1].sequence = v_seq;
+ amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx.rings[ring->idx].c_entity,
sched_job);
r = amd_sched_wait_emit(
&adev->kernel_ctx.rings[ring->idx].c_entity,
- ibs[num_ibs - 1].sequence, false, -1);
+ v_seq,
+ false,
+ -1);
if (r)
WARN(true, "emit timeout\n");
} else
if (amdgpu_enable_scheduler) {
int r;
+ uint64_t v_seq;
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
&adev->kernel_ctx, ib, 1);
if(!sched_job)
sched_job->job_param.vm.bo = bo;
sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job;
- ib->sequence = amd_sched_push_job(ring->scheduler,
+ v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+ ib->sequence = v_seq;
+ amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx.rings[ring->idx].c_entity,
sched_job);
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
- ib->sequence, false, -1);
+ v_seq,
+ false,
+ -1);
if (r)
DRM_ERROR("emit timeout\n");
if (amdgpu_enable_scheduler) {
int r;
+ uint64_t v_seq;
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
&adev->kernel_ctx,
ib, 1);
sched_job->job_param.vm.bo = pd;
sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job;
- ib->sequence = amd_sched_push_job(ring->scheduler,
+ v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+ ib->sequence = v_seq;
+ amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx.rings[ring->idx].c_entity,
sched_job);
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
- ib->sequence, false, -1);
+ v_seq,
+ false,
+ -1);
if (r)
DRM_ERROR("emit timeout\n");
} else {
if (amdgpu_enable_scheduler) {
int r;
+ uint64_t v_seq;
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
&adev->kernel_ctx, ib, 1);
if(!sched_job)
sched_job->job_param.vm_mapping.fence = fence;
sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
sched_job->free_job = amdgpu_vm_free_job;
- ib->sequence = amd_sched_push_job(ring->scheduler,
+ v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].c_entity.last_queued_v_seq);
+ ib->sequence = v_seq;
+ amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx.rings[ring->idx].c_entity,
sched_job);
r = amd_sched_wait_emit(&adev->kernel_ctx.rings[ring->idx].c_entity,
- ib->sequence, false, -1);
+ v_seq,
+ false,
+ -1);
if (r)
DRM_ERROR("emit timeout\n");
} else {
* @sched The pointer to the scheduler
* @c_entity The pointer to amd_context_entity
* @job The pointer to job required to submit
- * return the virtual sequence number
+ * return 0 if succeed. -1 if failed.
+ * -2 indicate queue is full for this client, client should wait untill
+ * scheduler consum some queued command.
+ * -1 other fail.
*/
-uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched,
+int amd_sched_push_job(struct amd_gpu_scheduler *sched,
struct amd_context_entity *c_entity,
void *job)
{
}
wake_up_interruptible(&sched->wait_queue);
-
- return atomic64_inc_return(&c_entity->last_queued_v_seq);
+ return 0;
}
/**