drm/i915: Map the ringbuffer using WB on LLC machines
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 8 Oct 2015 12:39:54 +0000 (13:39 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 19 Oct 2015 10:12:02 +0000 (12:12 +0200)
If we have llc coherency, we can write directly into the ringbuffer
using ordinary cached writes rather than forcing WC access.

v2: An important consequence is that we can forgo the mappable request
for WB ringbuffers, allowing for many more simultaneous contexts.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/intel_ringbuffer.c

index 0359736fe97918fc842ae96c38c5c4831e567b76..d6e12de82aaa9861c517d265ee6be4b35f09e08b 100644 (file)
@@ -2002,11 +2002,35 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
 
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
-       iounmap(ringbuf->virtual_start);
+       if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
+               vunmap(ringbuf->virtual_start);
+       else
+               iounmap(ringbuf->virtual_start);
        ringbuf->virtual_start = NULL;
        i915_gem_object_ggtt_unpin(ringbuf->obj);
 }
 
+static u32 *vmap_obj(struct drm_i915_gem_object *obj)
+{
+       struct sg_page_iter sg_iter;
+       struct page **pages;
+       void *addr;
+       int i;
+
+       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+       if (pages == NULL)
+               return NULL;
+
+       i = 0;
+       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
+               pages[i++] = sg_page_iter_page(&sg_iter);
+
+       addr = vmap(pages, i, 0, PAGE_KERNEL);
+       drm_free_large(pages);
+
+       return addr;
+}
+
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                                     struct intel_ringbuffer *ringbuf)
 {
@@ -2014,21 +2038,39 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
        struct drm_i915_gem_object *obj = ringbuf->obj;
        int ret;
 
-       ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
-       if (ret)
-               return ret;
+       if (HAS_LLC(dev_priv) && !obj->stolen) {
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+               if (ret)
+                       return ret;
 
-       ret = i915_gem_object_set_to_gtt_domain(obj, true);
-       if (ret) {
-               i915_gem_object_ggtt_unpin(obj);
-               return ret;
-       }
+               ret = i915_gem_object_set_to_cpu_domain(obj, true);
+               if (ret) {
+                       i915_gem_object_ggtt_unpin(obj);
+                       return ret;
+               }
+
+               ringbuf->virtual_start = vmap_obj(obj);
+               if (ringbuf->virtual_start == NULL) {
+                       i915_gem_object_ggtt_unpin(obj);
+                       return -ENOMEM;
+               }
+       } else {
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+               if (ret)
+                       return ret;
 
-       ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
-                       i915_gem_obj_ggtt_offset(obj), ringbuf->size);
-       if (ringbuf->virtual_start == NULL) {
-               i915_gem_object_ggtt_unpin(obj);
-               return -EINVAL;
+               ret = i915_gem_object_set_to_gtt_domain(obj, true);
+               if (ret) {
+                       i915_gem_object_ggtt_unpin(obj);
+                       return ret;
+               }
+
+               ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+                                                   i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+               if (ringbuf->virtual_start == NULL) {
+                       i915_gem_object_ggtt_unpin(obj);
+                       return -EINVAL;
+               }
        }
 
        return 0;