return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
struct radeon_semaphore *semaphore);
void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
struct radeon_fence *fence);
-void radeon_semaphore_sync_resv(struct radeon_semaphore *semaphore,
- struct reservation_object *resv,
- bool shared);
+int radeon_semaphore_sync_resv(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ struct reservation_object *resv,
+ bool shared);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int waiting_ring);
return 0;
}
-static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
+static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
- int i;
+ int i, r = 0;
for (i = 0; i < p->nrelocs; i++) {
struct reservation_object *resv;
continue;
resv = p->relocs[i].robj->tbo.resv;
- radeon_semaphore_sync_resv(p->ib.semaphore, resv,
- p->relocs[i].tv.shared);
+ r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
+ p->relocs[i].tv.shared);
+
+ if (r)
+ break;
}
+ return r;
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
return r;
}
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to sync rings: %i\n", r);
+ return r;
+ }
+
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
(parser->ring == TN_RING_TYPE_VCE2_INDEX))
radeon_vce_note_usage(rdev);
- radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
if (r) {
goto out;
}
- radeon_cs_sync_rings(parser);
+
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to sync rings: %i\n", r);
+ goto out;
+ }
radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
if ((rdev->family >= CHIP_TAHITI) &&
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
+ /*
+ * This function should not be called on !radeon fences.
+ * If this is the case, it would mean this function can
+ * also be called on radeon fences belonging to another card.
+ * exclusive_lock is not held in that case.
+ */
+ if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
+ return fence_wait(&fence->base, intr);
+
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
*
* Sync to the fence using this semaphore object
*/
-void radeon_semaphore_sync_resv(struct radeon_semaphore *sema,
- struct reservation_object *resv,
- bool shared)
+int radeon_semaphore_sync_resv(struct radeon_device *rdev,
+ struct radeon_semaphore *sema,
+ struct reservation_object *resv,
+ bool shared)
{
struct reservation_object_list *flist;
struct fence *f;
+ struct radeon_fence *fence;
unsigned i;
+ int r = 0;
/* always sync to the exclusive fence */
f = reservation_object_get_excl(resv);
- radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f);
+ fence = f ? to_radeon_fence(f) : NULL;
+ if (fence && fence->rdev == rdev)
+ radeon_semaphore_sync_fence(sema, fence);
+ else if (f)
+ r = fence_wait(f, true);
flist = reservation_object_get_list(resv);
- if (shared || !flist)
- return;
+ if (shared || !flist || r)
+ return r;
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
reservation_object_held(resv));
- radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f);
+ fence = to_radeon_fence(f);
+ if (fence && fence->rdev == rdev)
+ radeon_semaphore_sync_fence(sema, fence);
+ else
+ r = fence_wait(f, true);
+
+ if (r)
+ break;
}
+ return r;
}
/**
if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_semaphore_sync_resv(ib.semaphore, pd->tbo.resv, false);
+ radeon_semaphore_sync_resv(rdev, ib.semaphore, pd->tbo.resv, false);
radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
unsigned nptes;
uint64_t pte;
- radeon_semaphore_sync_resv(ib->semaphore, pt->tbo.resv, false);
+ radeon_semaphore_sync_resv(rdev, ib->semaphore, pt->tbo.resv, false);
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {