drm/i915: kill ranged cpu read domain support
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Sun, 25 Mar 2012 17:47:32 +0000 (19:47 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 27 Mar 2012 11:25:32 +0000 (13:25 +0200)
No longer needed.

Tested-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c

index 93e06a3225ccf1ada40a4a5674f8d7e993f25605..e11fcb09aea2d3c68267283bbfb65f660b38bb48 100644 (file)
@@ -927,13 +927,6 @@ struct drm_i915_gem_object {
        /** Record of address bit 17 of each page at last unbind. */
        unsigned long *bit_17;
 
-
-       /**
-        * If present, while GEM_DOMAIN_CPU is in the read domain this array
-        * flags which individual pages are valid.
-        */
-       uint8_t *page_cpu_valid;
-
        /** User space pin count and filp owning the pin */
        uint32_t user_pin_count;
        struct drm_file *pin_filp;
index 9cdeeef5d6d76586458a9f11d4dd54e15f6c03ff..34ef339158c1a609d5aa50f2fc9a47e9a6aa2578 100644 (file)
 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
-                                                                 uint64_t offset,
-                                                                 uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                                                    unsigned alignment,
                                                    bool map_and_fenceable);
@@ -2990,11 +2986,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 
        i915_gem_object_flush_gtt_write_domain(obj);
 
-       /* If we have a partially-valid cache of the object in the CPU,
-        * finish invalidating it and free the per-page flags.
-        */
-       i915_gem_object_set_to_full_cpu_read_domain(obj);
-
        old_write_domain = obj->base.write_domain;
        old_read_domains = obj->base.read_domains;
 
@@ -3025,113 +3016,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        return 0;
 }
 
-/**
- * Moves the object from a partially CPU read to a full one.
- *
- * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
- * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
- */
-static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
-{
-       if (!obj->page_cpu_valid)
-               return;
-
-       /* If we're partially in the CPU read domain, finish moving it in.
-        */
-       if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
-               int i;
-
-               for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
-                       if (obj->page_cpu_valid[i])
-                               continue;
-                       drm_clflush_pages(obj->pages + i, 1);
-               }
-       }
-
-       /* Free the page_cpu_valid mappings which are now stale, whether
-        * or not we've got I915_GEM_DOMAIN_CPU.
-        */
-       kfree(obj->page_cpu_valid);
-       obj->page_cpu_valid = NULL;
-}
-
-/**
- * Set the CPU read domain on a range of the object.
- *
- * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
- * not entirely valid.  The page_cpu_valid member of the object flags which
- * pages have been flushed, and will be respected by
- * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
- * of the whole object.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
-                                         uint64_t offset, uint64_t size)
-{
-       uint32_t old_read_domains;
-       int i, ret;
-
-       if (offset == 0 && size == obj->base.size)
-               return i915_gem_object_set_to_cpu_domain(obj, 0);
-
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
-       if (ret)
-               return ret;
-
-       ret = i915_gem_object_wait_rendering(obj);
-       if (ret)
-               return ret;
-
-       i915_gem_object_flush_gtt_write_domain(obj);
-
-       /* If we're already fully in the CPU read domain, we're done. */
-       if (obj->page_cpu_valid == NULL &&
-           (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
-               return 0;
-
-       /* Otherwise, create/clear the per-page CPU read domain flag if we're
-        * newly adding I915_GEM_DOMAIN_CPU
-        */
-       if (obj->page_cpu_valid == NULL) {
-               obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
-                                             GFP_KERNEL);
-               if (obj->page_cpu_valid == NULL)
-                       return -ENOMEM;
-       } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
-               memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
-
-       /* Flush the cache on any pages that are still invalid from the CPU's
-        * perspective.
-        */
-       for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
-            i++) {
-               if (obj->page_cpu_valid[i])
-                       continue;
-
-               drm_clflush_pages(obj->pages + i, 1);
-
-               obj->page_cpu_valid[i] = 1;
-       }
-
-       /* It should now be out of any other write domains, and we can update
-        * the domain values for our changes.
-        */
-       BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
-
-       old_read_domains = obj->base.read_domains;
-       obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
-
-       trace_i915_gem_object_change_domain(obj,
-                                           old_read_domains,
-                                           obj->base.write_domain);
-
-       return 0;
-}
-
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3556,7 +3440,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
        drm_gem_object_release(&obj->base);
        i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
-       kfree(obj->page_cpu_valid);
        kfree(obj->bit_17);
        kfree(obj);
 }