struct amdgpu_bo *bo;
/* write-back address offset to bo start */
uint32_t offset;
- uint64_t sequence;
};
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence);
+ struct fence *fence, uint64_t queued_seq);
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
ib->oa_size = amdgpu_bo_size(oa);
}
}
-
/* wrap the last IB with user fence */
if (parser->uf.bo) {
struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
if (amdgpu_enable_scheduler && parser->num_ibs) {
struct amdgpu_ring * ring =
amdgpu_cs_parser_get_ring(adev, parser);
- parser->uf.sequence = atomic64_inc_return(
+ parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return(
&parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
r = amdgpu_cs_parser_prepare_job(parser);
amd_sched_push_job(ring->scheduler,
&parser->ctx->rings[ring->idx].c_entity,
parser);
- cs->out.handle = parser->uf.sequence;
+ cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
up_read(&adev->exclusive_lock);
return 0;
}
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct fence *fence)
+ struct fence *fence, uint64_t queued_seq)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = 0;
struct fence *other = NULL;
if (amdgpu_enable_scheduler)
- seq = atomic64_read(&cring->c_entity.last_queued_v_seq);
+ seq = queued_seq;
else
seq = cring->sequence;
idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx, *old_ctx;
struct amdgpu_vm *vm;
+ uint64_t sequence;
unsigned i;
int r = 0;
return r;
}
+ sequence = amdgpu_enable_scheduler ? ib->sequence : 0;
+
if (ib->ctx)
ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
- &ib->fence->base);
+ &ib->fence->base,
+ sequence);
/* wrap the last IB with fence */
if (ib->user) {
goto err;
}
atomic64_set(&c_entity->last_emitted_v_seq,
- sched_job->uf.sequence);
+ sched_job->ibs[sched_job->num_ibs - 1].sequence);
wake_up_all(&c_entity->wait_emit);
mutex_unlock(&sched_job->job_lock);
if (sched_job->ctx) {
c_entity = &sched_job->ctx->rings[ring->idx].c_entity;
atomic64_set(&c_entity->last_signaled_v_seq,
- sched_job->uf.sequence);
+ sched_job->ibs[sched_job->num_ibs - 1].sequence);
}
/* wake up users waiting for time stamp */
sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
- sched_job->uf.sequence = v_seq;
+ ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx->rings[ring->idx].c_entity,
sched_job);
sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
- sched_job->uf.sequence = v_seq;
+ ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx->rings[ring->idx].c_entity,
sched_job);
sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
sched_job->free_job = amdgpu_vm_free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
- sched_job->uf.sequence = v_seq;
+ ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx->rings[ring->idx].c_entity,
sched_job);