From 4c914c0c7c787b8f730128a8cdcca9c50b0784ab Mon Sep 17 00:00:00 2001 From: Brad Volkin Date: Tue, 18 Feb 2014 10:15:45 -0800 Subject: [PATCH] drm/i915: Refactor shmem pread setup The command parser is going to need the same synchronization and setup logic, so factor it out for reuse. v2: Add a check that the object is backed by shmem Signed-off-by: Brad Volkin Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 3 ++ drivers/gpu/drm/i915/i915_gem.c | 51 ++++++++++++++++++++++++--------- 2 files changed, 40 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 75e5187ce89f..ef386bf9cecd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2140,6 +2140,9 @@ void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); +int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, + int *needs_clflush); + int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 18ea6bccbbf0..177c20722656 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -327,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, return 0; } +/* + * Pins the specified object's pages and synchronizes the object with + * GPU accesses. Sets needs_clflush to non-zero if the caller should + * flush the object from the CPU cache. + */ +int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, + int *needs_clflush) +{ + int ret; + + *needs_clflush = 0; + + if (!obj->base.filp) + return -EINVAL; + + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { + /* If we're not in the cpu read domain, set ourself into the gtt + * read domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will dirty the data + * anyway again before the next pread happens. */ + *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, + obj->cache_level); + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; + } + + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + return ret; +} + /* Per-page copy function for the shmem pread fastpath. * Flushes invalid cachelines before reading the target if * needs_clflush is set. */ @@ -424,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev, obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { - /* If we're not in the cpu read domain, set ourself into the gtt - * read domain and manually flush cachelines (if required). This - * optimizes for the case when the gpu will dirty the data - * anyway again before the next pread happens. */ - needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); - ret = i915_gem_object_wait_rendering(obj, true); - if (ret) - return ret; - } - - ret = i915_gem_object_get_pages(obj); + ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); if (ret) return ret; - i915_gem_object_pin_pages(obj); - offset = args->offset; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, -- 2.20.1