From: Brad Volkin Date: Thu, 11 Dec 2014 20:13:12 +0000 (-0800) Subject: drm/i915: Tidy up execbuffer command parsing code X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=7174537627836119e8b960afcc7139a138e02802;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git drm/i915: Tidy up execbuffer command parsing code Move it to a separate function since the main do_execbuffer function already has so much going on. v2: - Move pin/unpin calls inside i915_parse_cmds() (Chris W, v4 7/7 feedback) Issue: VIZ-4719 Signed-off-by: Brad Volkin Reviewed-By: Jon Bloomfield Signed-off-by: Daniel Vetter --- diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index a698b47df331..0d757cd313b5 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1050,10 +1050,17 @@ int i915_parse_cmds(struct intel_engine_cs *ring, struct drm_i915_cmd_descriptor default_desc = { 0 }; bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ + ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0); + if (ret) { + DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n"); + return -1; + } + batch_base = copy_batch(shadow_batch_obj, batch_obj, batch_start_offset, batch_len); if (IS_ERR(batch_base)) { DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); + i915_gem_object_ggtt_unpin(shadow_batch_obj); return PTR_ERR(batch_base); } @@ -1124,6 +1131,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring, } vunmap(batch_base); + i915_gem_object_ggtt_unpin(shadow_batch_obj); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 86de3720527b..8330660af978 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1072,6 +1072,65 @@ i915_emit_box(struct intel_engine_cs *ring, return 0; } +static struct drm_i915_gem_object* +i915_gem_execbuffer_parse(struct intel_engine_cs *ring, + struct drm_i915_gem_exec_object2 *shadow_exec_entry, + struct eb_vmas *eb, + struct drm_i915_gem_object *batch_obj, + u32 batch_start_offset, + u32 batch_len, + bool is_master, + u32 *flags) +{ + struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev); + struct drm_i915_gem_object *shadow_batch_obj; + int ret; + + shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool, + batch_obj->base.size); + if (IS_ERR(shadow_batch_obj)) + return shadow_batch_obj; + + ret = i915_parse_cmds(ring, + batch_obj, + shadow_batch_obj, + batch_start_offset, + batch_len, + is_master); + if (ret) { + if (ret == -EACCES) + return batch_obj; + } else { + struct i915_vma *vma; + + memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); + + vma = i915_gem_obj_to_ggtt(shadow_batch_obj); + vma->exec_entry = shadow_exec_entry; + vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE; + drm_gem_object_reference(&shadow_batch_obj->base); + list_add_tail(&vma->exec_list, &eb->vmas); + + shadow_batch_obj->base.pending_read_domains = + batch_obj->base.pending_read_domains; + + /* + * Set the DISPATCH_SECURE bit to remove the NON_SECURE + * bit from MI_BATCH_BUFFER_START commands issued in the + * dispatch_execbuffer implementations. We specifically + * don't want that set when the command parser is + * enabled. + * + * FIXME: with aliasing ppgtt, buffers that should only + * be in ggtt still end up in the aliasing ppgtt. remove + * this check when that is fixed. + */ + if (USES_FULL_PPGTT(dev)) + *flags |= I915_DISPATCH_SECURE; + } + + return ret ? ERR_PTR(ret) : shadow_batch_obj; +} int i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, @@ -1289,7 +1348,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_private *dev_priv = dev->dev_private; struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; - struct drm_i915_gem_object *shadow_batch_obj = NULL; struct drm_i915_gem_exec_object2 shadow_exec_entry; struct intel_engine_cs *ring; struct intel_context *ctx; @@ -1409,62 +1467,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } if (i915_needs_cmd_parser(ring)) { - shadow_batch_obj = - i915_gem_batch_pool_get(&dev_priv->mm.batch_pool, - batch_obj->base.size); - if (IS_ERR(shadow_batch_obj)) { - ret = PTR_ERR(shadow_batch_obj); - /* Don't try to clean up the obj in the error path */ - shadow_batch_obj = NULL; + batch_obj = i915_gem_execbuffer_parse(ring, + &shadow_exec_entry, + eb, + batch_obj, + args->batch_start_offset, + args->batch_len, + file->is_master, + &flags); + if (IS_ERR(batch_obj)) { + ret = PTR_ERR(batch_obj); goto err; } - - ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0); - if (ret) - goto err; - - ret = i915_parse_cmds(ring, - batch_obj, - shadow_batch_obj, - args->batch_start_offset, - args->batch_len, - file->is_master); - i915_gem_object_ggtt_unpin(shadow_batch_obj); - - if (ret) { - if (ret != -EACCES) - goto err; - } else { - struct i915_vma *vma; - - memset(&shadow_exec_entry, 0, - sizeof(shadow_exec_entry)); - - vma = i915_gem_obj_to_ggtt(shadow_batch_obj); - vma->exec_entry = &shadow_exec_entry; - vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE; - drm_gem_object_reference(&shadow_batch_obj->base); - list_add_tail(&vma->exec_list, &eb->vmas); - - shadow_batch_obj->base.pending_read_domains = - batch_obj->base.pending_read_domains; - - batch_obj = shadow_batch_obj; - - /* - * Set the DISPATCH_SECURE bit to remove the NON_SECURE - * bit from MI_BATCH_BUFFER_START commands issued in the - * dispatch_execbuffer implementations. We specifically - * don't want that set when the command parser is - * enabled. - * - * FIXME: with aliasing ppgtt, buffers that should only - * be in ggtt still end up in the aliasing ppgtt. remove - * this check when that is fixed. - */ - if (USES_FULL_PPGTT(dev)) - flags |= I915_DISPATCH_SECURE; - } } batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;