We only have one context for all IBs.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
struct amdgpu_user_fence *user;
unsigned vm_id;
uint64_t vm_pd_addr;
- uint64_t ctx;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
struct fence *fence; /* the hw fence */
uint32_t num_ibs;
void *owner;
+ uint64_t ctx;
struct amdgpu_user_fence uf;
};
#define to_amdgpu_job(sched_job) \
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
- ib->ctx = parser->ctx->rings[ring->idx].entity.fence_context;
j++;
}
union drm_amdgpu_cs *cs)
{
struct amdgpu_ring *ring = p->job->ring;
+ struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct fence *fence;
struct amdgpu_job *job;
int r;
p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched,
- &p->ctx->rings[ring->idx].entity,
- amdgpu_job_timeout_func,
- amdgpu_job_free_func,
- p->filp, &fence);
+ entity, amdgpu_job_timeout_func,
+ amdgpu_job_free_func,
+ p->filp, &fence);
if (r) {
amdgpu_job_free(job);
return r;
}
job->owner = p->filp;
+ job->ctx = entity->fence_context;
p->fence = fence_get(fence);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
- struct fence *hwf;
- struct amdgpu_vm *vm = NULL;
- unsigned i, patch_offset = ~0;
bool skip_preamble, need_ctx_switch;
+ unsigned patch_offset = ~0;
+ struct amdgpu_vm *vm;
+ struct fence *hwf;
+ uint64_t ctx;
+ unsigned i;
int r = 0;
if (num_ibs == 0)
return -EINVAL;
- if (job) /* for domain0 job like ring test, ibs->job is not assigned */
+ /* ring tests don't use a job */
+ if (job) {
vm = job->vm;
+ ctx = job->ctx;
+ } else {
+ vm = NULL;
+ ctx = 0;
+ }
if (!ring->ready) {
dev_err(adev->dev, "couldn't schedule ib\n");
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
- skip_preamble = ring->current_ctx == ib->ctx;
- need_ctx_switch = ring->current_ctx != ib->ctx;
+ skip_preamble = ring->current_ctx == ctx;
+ need_ctx_switch = ring->current_ctx != ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
- ring->current_ctx = ibs->ctx;
+ ring->current_ctx = ctx;
amdgpu_ring_commit(ring);
return 0;
}
return -EINVAL;
r = amd_sched_job_init(&job->base, &ring->sched,
- entity,
- amdgpu_job_timeout_func,
- amdgpu_job_free_func,
- owner, &fence);
+ entity, amdgpu_job_timeout_func,
+ amdgpu_job_free_func, owner, &fence);
if (r)
return r;
job->owner = owner;
+ job->ctx = entity->fence_context;
*f = fence_get(fence);
amd_sched_entity_push_job(&job->base);