struct drm_file *filp);
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int radeon_gem_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
/* VRAM scratch page for HDP bug */
struct r700_vram_scratch {
p->relocs[i].lobj.wdomain = r->write_domain;
p->relocs[i].lobj.rdomain = r->read_domains;
p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].lobj.tv.usage = TTM_USAGE_READWRITE;
+ if (r->read_domains)
+ p->relocs[i].lobj.tv.usage |= TTM_USAGE_READ;
+ if (r->write_domain)
+ p->relocs[i].lobj.tv.usage |= TTM_USAGE_WRITE;
p->relocs[i].handle = r->handle;
p->relocs[i].flags = r->flags;
radeon_bo_list_add_object(&p->relocs[i].lobj,
* 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
* 2.10.0 - fusion 2D tiling
* 2.11.0 - backend map, initial compute support for the CS checker
+ * 2.12.0 - DRM_RADEON_GEM_WAIT ioctl
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 11
+#define KMS_DRIVER_MINOR 12
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = radeon_bo_wait(robj, NULL, false);
+ r = radeon_bo_wait(robj, NULL, false, TTM_USAGE_READWRITE);
if (r) {
printk(KERN_ERR "Failed to wait for object !\n");
return r;
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, &cur_placement, true);
+ r = radeon_bo_wait(robj, &cur_placement, true, TTM_USAGE_READWRITE);
switch (cur_placement) {
case TTM_PL_VRAM:
args->domain = RADEON_GEM_DOMAIN_VRAM;
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, NULL, false);
+ r = radeon_bo_wait(robj, NULL, false, TTM_USAGE_READWRITE);
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
return r;
}
+int radeon_gem_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_wait *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_bo *robj;
+ bool no_wait = (args->flags & RADEON_GEM_NO_WAIT) != 0;
+ enum ttm_buffer_usage usage = 0;
+ int r;
+
+ if (args->flags & RADEON_GEM_USAGE_READ)
+ usage |= TTM_USAGE_READ;
+ if (args->flags & RADEON_GEM_USAGE_WRITE)
+ usage |= TTM_USAGE_WRITE;
+ if (!usage)
+ usage = TTM_USAGE_READWRITE;
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ return -ENOENT;
+ }
+ robj = gem_to_radeon_bo(gobj);
+ r = radeon_bo_wait(robj, NULL, no_wait, usage);
+ /* callback hw specific functions if any */
+ if (!no_wait && robj->rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+}
+
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT, radeon_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
}
static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
- bool no_wait)
+ bool no_wait, enum ttm_buffer_usage usage)
{
int r;
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait, TTM_USAGE_READWRITE);
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait, usage);
spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
#define DRM_RADEON_GEM_SET_TILING 0x28
#define DRM_RADEON_GEM_GET_TILING 0x29
#define DRM_RADEON_GEM_BUSY 0x2a
+#define DRM_RADEON_GEM_WAIT 0x2b
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
+#define DRM_IOCTL_RADEON_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT, struct drm_radeon_gem_wait)
typedef struct drm_radeon_init {
enum {
uint32_t domain;
};
+#define RADEON_GEM_NO_WAIT 0x1
+#define RADEON_GEM_USAGE_READ 0x2
+#define RADEON_GEM_USAGE_WRITE 0x4
+
+struct drm_radeon_gem_wait {
+ uint32_t handle;
+ uint32_t flags; /* one of RADEON_GEM_* */
+};
+
struct drm_radeon_gem_pread {
/** Handle for the object being read. */
uint32_t handle;