drm/i915: Only pwrite through the GTT if there is space in the aperture
authorChris Wilson <chris@chris-wilson.co.uk>
Sat, 11 Aug 2012 14:41:04 +0000 (15:41 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 24 Aug 2012 00:03:33 +0000 (02:03 +0200)
Avoid stalling and waiting for the GPU by checking to see if there is
sufficient inactive space in the aperture for us to bind the buffer
prior to writing through the GTT. If there is inadequate space we will
have to stall waiting for the GPU, and incur overheads moving objects
about. Instead, only incur the clflush overhead on the target object by
writing through shmem.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c

index cbd3cd0747b98792372cc4c00f9ed2382534ed37..06a88be067f194b9503fa1ac73ef48ed3c9760a9 100644 (file)
@@ -1307,7 +1307,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 void i915_gem_free_object(struct drm_gem_object *obj);
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
                                     uint32_t alignment,
-                                    bool map_and_fenceable);
+                                    bool map_and_fenceable,
+                                    bool nonblocking);
 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
@@ -1454,7 +1455,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
                                          unsigned alignment,
                                          unsigned cache_level,
-                                         bool mappable);
+                                         bool mappable,
+                                         bool nonblock);
 int i915_gem_evict_everything(struct drm_device *dev);
 
 /* i915_gem_stolen.c */
index e1ec587c42c8f614ae81ccad60d4bb8e5780a866..1ec5f7ef2cc2417b32cccdf55de09ec84649d608 100644 (file)
@@ -41,7 +41,8 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                                                    unsigned alignment,
-                                                   bool map_and_fenceable);
+                                                   bool map_and_fenceable,
+                                                   bool nonblocking);
 static int i915_gem_phys_pwrite(struct drm_device *dev,
                                struct drm_i915_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
@@ -609,7 +610,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        char __user *user_data;
        int page_offset, page_length, ret;
 
-       ret = i915_gem_object_pin(obj, 0, true);
+       ret = i915_gem_object_pin(obj, 0, true, true);
        if (ret)
                goto out;
 
@@ -925,10 +926,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       if (obj->gtt_space &&
-           obj->cache_level == I915_CACHE_NONE &&
+       if (obj->cache_level == I915_CACHE_NONE &&
            obj->tiling_mode == I915_TILING_NONE &&
-           obj->map_and_fenceable &&
            obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
                /* Note that the gtt paths might fail with non-page-backed user
@@ -936,7 +935,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                 * textures). Fallback to the shmem path in that case. */
        }
 
-       if (ret == -EFAULT)
+       if (ret == -EFAULT || ret == -ENOSPC)
                ret = i915_gem_shmem_pwrite(dev, obj, args, file);
 
 out:
@@ -1115,7 +1114,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                        goto unlock;
        }
        if (!obj->gtt_space) {
-               ret = i915_gem_object_bind_to_gtt(obj, 0, true);
+               ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
                if (ret)
                        goto unlock;
 
@@ -2772,7 +2771,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
 static int
 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                            unsigned alignment,
-                           bool map_and_fenceable)
+                           bool map_and_fenceable,
+                           bool nonblocking)
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2848,7 +2848,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
        if (obj->gtt_space == NULL) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
-                                              map_and_fenceable);
+                                              map_and_fenceable,
+                                              nonblocking);
                if (ret)
                        return ret;
 
@@ -3188,7 +3189,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
         * (e.g. libkms for the bootup splash), we have to ensure that we
         * always use map_and_fenceable for all scanout buffers.
         */
-       ret = i915_gem_object_pin(obj, alignment, true);
+       ret = i915_gem_object_pin(obj, alignment, true, false);
        if (ret)
                return ret;
 
@@ -3325,7 +3326,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
                    uint32_t alignment,
-                   bool map_and_fenceable)
+                   bool map_and_fenceable,
+                   bool nonblocking)
 {
        int ret;
 
@@ -3349,7 +3351,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 
        if (obj->gtt_space == NULL) {
                ret = i915_gem_object_bind_to_gtt(obj, alignment,
-                                                 map_and_fenceable);
+                                                 map_and_fenceable,
+                                                 nonblocking);
                if (ret)
                        return ret;
        }
@@ -3407,7 +3410,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        obj->user_pin_count++;
        obj->pin_filp = file;
        if (obj->user_pin_count == 1) {
-               ret = i915_gem_object_pin(obj, args->alignment, true);
+               ret = i915_gem_object_pin(obj, args->alignment, true, false);
                if (ret)
                        goto out;
        }
index 5c2d354cebbd744daeb77a39fa06485053514159..4aa7ecf77ede168e9e829391bcb766b1ba2821da 100644 (file)
@@ -221,7 +221,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
         * default context.
         */
        dev_priv->ring[RCS].default_context = ctx;
-       ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
+       ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
        if (ret)
                goto err_destroy;
 
@@ -374,7 +374,7 @@ static int do_switch(struct i915_hw_context *to)
        if (from_obj == to->obj)
                return 0;
 
-       ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
+       ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
        if (ret)
                return ret;
 
index 74635da7c4984bc2c22fd82cc0348431328e1df9..a2d8acde85508e2aa11ab05073cb0db8b7701339 100644 (file)
@@ -45,7 +45,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 int
 i915_gem_evict_something(struct drm_device *dev, int min_size,
                         unsigned alignment, unsigned cache_level,
-                        bool mappable)
+                        bool mappable, bool nonblocking)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
@@ -92,12 +92,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
                        goto found;
        }
 
+       if (nonblocking)
+               goto none;
+
        /* Now merge in the soon-to-be-expired objects... */
        list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
                if (mark_free(obj, &unwind_list))
                        goto found;
        }
 
+none:
        /* Nothing found, clean up and bail out! */
        while (!list_empty(&unwind_list)) {
                obj = list_first_entry(&unwind_list,
index 834a636b44f078eb5c4d3016663f16adf7975c4c..f7346d87655129863d0af297063f25f7c281b4e3 100644 (file)
@@ -354,7 +354,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
                obj->tiling_mode != I915_TILING_NONE;
        need_mappable = need_fence || need_reloc_mappable(obj);
 
-       ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+       ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
        if (ret)
                return ret;
 
index c0f48580405a21977f365ee9c4251928fd64c7ce..afd0f30ab882cf2834cfd2474a43bae41f69b532 100644 (file)
@@ -1383,7 +1383,7 @@ void intel_setup_overlay(struct drm_device *dev)
                }
                overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
        } else {
-               ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
+               ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
                if (ret) {
                        DRM_ERROR("failed to pin overlay register bo\n");
                        goto out_free_bo;
index c0407aa5baac14d5bfa0115ed819287f77fca6ec..c0721ffb56a2ab131b736b250c8ccbd7b174ef1f 100644 (file)
@@ -2138,7 +2138,7 @@ intel_alloc_context_page(struct drm_device *dev)
                return NULL;
        }
 
-       ret = i915_gem_object_pin(ctx, 4096, true);
+       ret = i915_gem_object_pin(ctx, 4096, true, false);
        if (ret) {
                DRM_ERROR("failed to pin power context: %d\n", ret);
                goto err_unref;
index c828169c73ae8103c351f177000688cd4500b6f4..ac93643731aad1947d9ea1e2c7cc20fb19f69105 100644 (file)
@@ -391,7 +391,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
 
        i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-       ret = i915_gem_object_pin(obj, 4096, true);
+       ret = i915_gem_object_pin(obj, 4096, true, false);
        if (ret)
                goto err_unref;
 
@@ -979,7 +979,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
 
        i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-       ret = i915_gem_object_pin(obj, 4096, true);
+       ret = i915_gem_object_pin(obj, 4096, true, false);
        if (ret != 0) {
                goto err_unref;
        }
@@ -1036,7 +1036,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 
        ring->obj = obj;
 
-       ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
+       ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
        if (ret)
                goto err_unref;