/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
- struct drm_i915_gem_object *src_obj)
+ struct drm_i915_gem_object *src_obj,
+ u32 batch_start_offset,
+ u32 batch_len)
{
int ret = 0;
int needs_clflush = 0;
- u32 *src_addr, *dest_addr = NULL;
+ u32 *src_base, *dest_base = NULL;
+ u32 *src_addr, *dest_addr;
+ u32 offset = batch_start_offset / sizeof(*dest_addr);
+ u32 end = batch_start_offset + batch_len;
+
+ if (end > dest_obj->base.size || end > src_obj->base.size)
+ return ERR_PTR(-E2BIG);
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) {
return ERR_PTR(ret);
}
- src_addr = vmap_batch(src_obj);
- if (!src_addr) {
+ src_base = vmap_batch(src_obj);
+ if (!src_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
ret = -ENOMEM;
goto unpin_src;
}
+ src_addr = src_base + offset;
+
if (needs_clflush)
- drm_clflush_virt_range((char *)src_addr, src_obj->base.size);
+ drm_clflush_virt_range((char *)src_addr, batch_len);
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) {
goto unmap_src;
}
- dest_addr = vmap_batch(dest_obj);
- if (!dest_addr) {
+ dest_base = vmap_batch(dest_obj);
+ if (!dest_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
ret = -ENOMEM;
goto unmap_src;
}
- memcpy(dest_addr, src_addr, src_obj->base.size);
- if (dest_obj->base.size > src_obj->base.size)
- memset((u8 *)dest_addr + src_obj->base.size, 0,
- dest_obj->base.size - src_obj->base.size);
+ dest_addr = dest_base + offset;
+
+ if (batch_start_offset != 0)
+ memset((u8 *)dest_base, 0, batch_start_offset);
+
+ memcpy(dest_addr, src_addr, batch_len);
+ memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
unmap_src:
- vunmap(src_addr);
+ vunmap(src_base);
unpin_src:
i915_gem_object_unpin_pages(src_obj);
- return ret ? ERR_PTR(ret) : dest_addr;
+ return ret ? ERR_PTR(ret) : dest_base;
}
/**
* @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
+ * @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master?
*
* Parses the specified batch buffer looking for privilege violations as
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
+ u32 batch_len,
bool is_master)
{
int ret = 0;
struct drm_i915_cmd_descriptor default_desc = { 0 };
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
- batch_base = copy_batch(shadow_batch_obj, batch_obj);
+ batch_base = copy_batch(shadow_batch_obj, batch_obj,
+ batch_start_offset, batch_len);
if (IS_ERR(batch_base)) {
DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
return PTR_ERR(batch_base);
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
/*
- * We use the source object's size because the shadow object is as
+ * We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
- batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
+ batch_end = cmd + (batch_len / sizeof(*batch_end));
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;