#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
-struct amdgpu_user_fence {
- /* write-back bo */
- struct amdgpu_bo *bo;
- /* write-back address offset to bo start */
- uint32_t offset;
-};
-
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
- struct amdgpu_user_fence *user;
uint32_t flags;
- /* resulting sequence number */
- uint64_t sequence;
};
enum amdgpu_ring_type {
struct amdgpu_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
- uint32_t *kdata;
+ void *kdata;
};
struct amdgpu_cs_parser {
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
- struct amdgpu_user_fence uf;
+
+ /* user fence handling */
+ struct amdgpu_bo *uf_bo;
+ uint32_t uf_offset;
+ uint64_t uf_sequence;
+
};
#define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base)
}
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
- struct amdgpu_user_fence *uf,
- struct drm_amdgpu_cs_chunk_fence *fence_data)
+ struct drm_amdgpu_cs_chunk_fence *data,
+ uint32_t *offset)
{
struct drm_gem_object *gobj;
- uint32_t handle;
- handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
- fence_data->handle);
+ data->handle);
if (gobj == NULL)
return -EINVAL;
- uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- uf->offset = fence_data->offset;
-
- if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
- drm_gem_object_unreference_unlocked(gobj);
- return -EINVAL;
- }
-
- p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
+ p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL;
+ *offset = data->offset;
drm_gem_object_unreference_unlocked(gobj);
+
+ if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
+ amdgpu_bo_unref(&p->uf_entry.robj);
+ return -EINVAL;
+ }
+
return 0;
}
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
- struct amdgpu_user_fence uf = {};
unsigned size, num_ibs = 0;
+ uint32_t uf_offset = 0;
int i;
int ret;
goto free_partial_kdata;
}
- ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata);
+ ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
+ &uf_offset);
if (ret)
goto free_partial_kdata;
if (ret)
goto free_all_kdata;
- p->job->uf = uf;
+ if (p->uf_entry.robj) {
+ p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
+ p->job->uf_offset = uf_offset;
+ }
kfree(chunk_array);
return 0;
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
- if (p->job->uf.bo)
+ if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
j++;
}
- /* wrap the last IB with user fence */
- if (parser->job->uf.bo) {
- struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
-
- /* UVD & VCE fw doesn't support user fences */
- if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
- return -EINVAL;
-
- ib->user = &parser->job->uf;
- }
+ /* UVD & VCE fw doesn't support user fences */
+ if (parser->job->uf_bo && (
+ parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
+ parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+ return -EINVAL;
return 0;
}
job->ctx = entity->fence_context;
p->fence = fence_get(fence);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
- job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
+ job->uf_sequence = cs->out.handle;
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);