.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_map_offset = i915_gem_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
+ bool dumb,
uint32_t *handle_p)
{
struct drm_i915_gem_object *obj;
if (obj == NULL)
return -ENOMEM;
+ obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
- args->size, &args->handle);
+ args->size, true, &args->handle);
}
/**
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
- args->size, &args->handle);
+ args->size, false, &args->handle);
}
static inline int
drm_gem_free_mmap_offset(&obj->base);
}
-int
+static int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle,
+ uint32_t handle, bool dumb,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
goto unlock;
}
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
+ "Illegal dumb map of accelerated buffer.\n");
+
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
return ret;
}
+int
+i915_gem_dumb_map_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ return i915_gem_mmap_gtt(file, dev, handle, true, offset);
+}
+
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
{
struct drm_i915_gem_mmap_gtt *args = data;
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+ return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
}
static inline int
goto err;
}
+ WARN_ONCE(obj->base.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects);
}
if (ret)
return ret;
+ bo->gem.dumb = true;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
+
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(!(gem->dumb || gem->import_attach),
+ "Illegal dumb map of accelerated buffer.\n");
+
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
+ WARN_ONCE(nvbo->gem.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
return r;
}
-int radeon_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
+static int radeon_mode_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, bool dumb,
+ uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
if (gobj == NULL) {
return -ENOENT;
}
+
+ /*
+ * We don't allow dumb mmaps on objects created using another
+ * interface.
+ */
+ WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
+ "Illegal dumb map of GPU buffer.\n");
+
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
+{
+ return radeon_mode_mmap(filp, dev, handle, true, offset_p);
+}
+
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_radeon_gem_mmap *args = data;
- return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+ return radeon_mode_mmap(filp, dev, args->handle, false,
+ &args->addr_ptr);
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
+ gobj->dumb = true;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+ WARN_ONCE(bo->gem_base.dumb,
+ "GPU use of dumb buffer is illegal.\n");
+
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
*
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
+
+ /**
+ * dumb - created as dumb buffer
+ * Whether the gem object was created using the dumb buffer interface
+ * as such it may not be used for GPU rendering.
+ */
+ bool dumb;
};
void drm_gem_object_release(struct drm_gem_object *obj);