It is required to support fence per context.
v2: add amdgpu_ctx_get/put
v3: improve get/put
v4: squash hlock fix
Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
uint32_t id,struct amdgpu_ctx_state *state);
void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
uint64_t seq[AMDGPU_MAX_RINGS] = {0};
struct amdgpu_ring *ring = NULL;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
+ struct amdgpu_ctx *ctx;
long r;
+ ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
+
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
wait->in.ring, &ring);
if (r)
seq[ring->idx] = wait->in.handle;
r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout);
+ amdgpu_ctx_put(ctx);
if (r < 0)
return r;
return r;
}
+
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
+{
+ struct amdgpu_ctx *ctx;
+ struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+
+ mutex_lock(&mgr->lock);
+ ctx = idr_find(&mgr->ctx_handles, id);
+ if (ctx)
+ kref_get(&ctx->refcount);
+ mutex_unlock(&mgr->lock);
+ return ctx;
+}
+
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
+{
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx_mgr *mgr;
+
+ if (ctx == NULL)
+ return -EINVAL;
+
+ fpriv = ctx->fpriv;
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ kref_put(&ctx->refcount, amdgpu_ctx_do_release);
+ mutex_unlock(&mgr->lock);
+
+ return 0;
+}
uint32_t ip_type;
uint32_t ip_instance;
uint32_t ring;
- uint32_t _pad;
+ uint32_t ctx_id;
};
struct drm_amdgpu_wait_cs_out {