struct amdgpu_bo *bo;
/* write-back address offset to bo start */
uint32_t offset;
+ /* resulting sequence number */
+ uint64_t sequence;
};
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
* context related structures
*/
+#define AMDGPU_CTX_MAX_CS_PENDING 16
+
+struct amdgpu_ctx_ring {
+ uint64_t sequence;
+ struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
+};
+
struct amdgpu_ctx {
struct kref refcount;
unsigned reset_counter;
+ spinlock_t ring_lock;
+ struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
};
struct amdgpu_ctx_mgr {
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct fence *fence);
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq);
+
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
sizeof(struct drm_amdgpu_cs_chunk_dep);
for (j = 0; j < num_deps; ++j) {
- struct amdgpu_fence *fence;
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx;
+ struct fence *fence;
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
deps[j].ip_instance,
if (ctx == NULL)
return -EINVAL;
- r = amdgpu_fence_recreate(ring, p->filp,
- deps[j].handle,
- &fence);
- if (r) {
+ fence = amdgpu_ctx_get_fence(ctx, ring,
+ deps[j].handle);
+ if (IS_ERR(fence)) {
+ r = PTR_ERR(fence);
amdgpu_ctx_put(ctx);
return r;
- }
-
- r = amdgpu_sync_fence(adev, &ib->sync, &fence->base);
- amdgpu_fence_unref(&fence);
- amdgpu_ctx_put(ctx);
- if (r)
- return r;
+ } else if (fence) {
+ r = amdgpu_sync_fence(adev, &ib->sync, fence);
+ fence_put(fence);
+ amdgpu_ctx_put(ctx);
+ if (r)
+ return r;
+ }
}
}
r = amdgpu_cs_ib_fill(adev, &parser);
}
- if (!r)
+ if (!r) {
r = amdgpu_cs_dependencies(adev, &parser);
+ if (r)
+ DRM_ERROR("Failed in the dependencies handling %d!\n", r);
+ }
if (r) {
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
goto out;
}
- cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
+ cs->out.handle = parser.uf.sequence;
out:
amdgpu_cs_parser_fini(&parser, r, true);
up_read(&adev->exclusive_lock);
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
- struct amdgpu_fence *fence = NULL;
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
+ struct fence *fence;
long r;
+ r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
+ wait->in.ring, &ring);
+ if (r)
+ return r;
+
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
- r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
- wait->in.ring, &ring);
- if (r) {
- amdgpu_ctx_put(ctx);
- return r;
- }
+ fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+ if (IS_ERR(fence))
+ r = PTR_ERR(fence);
- r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
- if (r) {
- amdgpu_ctx_put(ctx);
- return r;
- }
+ else if (fence) {
+ r = fence_wait_timeout(fence, true, timeout);
+ fence_put(fence);
+
+ } else
+ r = 1;
- r = fence_wait_timeout(&fence->base, true, timeout);
- amdgpu_fence_unref(&fence);
amdgpu_ctx_put(ctx);
if (r < 0)
return r;
static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
+ unsigned i, j;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
+ fence_put(ctx->rings[i].fences[j]);
kfree(ctx);
}
int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
uint32_t *id)
{
- int r;
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+ int i, r;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
memset(ctx, 0, sizeof(*ctx));
kref_init(&ctx->refcount);
+ spin_lock_init(&ctx->ring_lock);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ ctx->rings[i].sequence = 1;
mutex_unlock(&mgr->lock);
return 0;
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
return 0;
}
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct fence *fence)
+{
+ struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+ uint64_t seq = cring->sequence;
+ unsigned idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+ struct fence *other = cring->fences[idx];
+
+ if (other) {
+ signed long r;
+ r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ }
+
+ fence_get(fence);
+
+ spin_lock(&ctx->ring_lock);
+ cring->fences[idx] = fence;
+ cring->sequence++;
+ spin_unlock(&ctx->ring_lock);
+
+ fence_put(other);
+
+ return seq;
+}
+
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq)
+{
+ struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+ struct fence *fence;
+
+ spin_lock(&ctx->ring_lock);
+ if (seq >= cring->sequence) {
+ spin_unlock(&ctx->ring_lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (seq < cring->sequence - AMDGPU_CTX_MAX_CS_PENDING) {
+ spin_unlock(&ctx->ring_lock);
+ return NULL;
+ }
+
+ fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
+ spin_unlock(&ctx->ring_lock);
+
+ return fence;
+}
/* wrap the last IB with fence */
if (ib->user) {
uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
+ ib->user->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
+ &ib->fence->base);
addr += ib->user->offset;
- amdgpu_ring_emit_fence(ring, addr, ib->fence->seq,
+ amdgpu_ring_emit_fence(ring, addr, ib->user->sequence,
AMDGPU_FENCE_FLAG_64BIT);
}