}
EXPORT_SYMBOL(ttm_bo_mem_space);
-int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
-{
- if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
- return -EBUSY;
-
- return wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->cpu_writers) == 0);
-}
-EXPORT_SYMBOL(ttm_bo_wait_cpu);
-
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible, bool no_wait_reserve,
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
- if (atomic_dec_and_test(&bo->cpu_writers))
- wake_up_all(&bo->event_queue);
+ atomic_dec(&bo->cpu_writers);
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
- ret = ttm_bo_wait_cpu(bo, false);
- if (ret)
- return ret;
- goto retry;
+ return -EBUSY;
}
}
* @no_wait: Return immediately if buffer is busy.
*
* Synchronizes a buffer object for CPU RW access. This means
- * blocking command submission that affects the buffer and
- * waiting for buffer idle. This lock is recursive.
+ * command submission that affects the buffer will return -EBUSY
+ * until ttm_bo_synccpu_write_release is called.
+ *
* Returns
* -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal.
extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-/**
- * ttm_bo_wait_for_cpu
- *
- * @bo: Pointer to a struct ttm_buffer_object.
- * @no_wait: Don't sleep while waiting.
- *
- * Wait until a buffer object is no longer sync'ed for CPU access.
- * Returns:
- * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
- * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
- */
-
-extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
-
extern void ttm_bo_global_release(struct drm_global_reference *ref);
extern int ttm_bo_global_init(struct drm_global_reference *ref);