drm/i915: Fallback to single PAGE_SIZE segments for DMA remapping
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 19 Dec 2016 12:43:45 +0000 (12:43 +0000)
committerJani Nikula <jani.nikula@intel.com>
Tue, 20 Dec 2016 14:30:47 +0000 (16:30 +0200)
If we at first do not succeed with attempting to remap our physical
pages using a coalesced scattergather list, try again with one
scattergather entry per page. This should help with swiotlb as it uses a
limited buffer size and only searches for contiguous chunks within its
buffer aligned up to the next boundary - i.e. we may prematurely cause a
failure as we are unable to utilize the unused space between large
chunks and trigger an error such as:

 i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes)

Reported-by: Juergen Gross <jgross@suse.com>
Tested-by: Juergen Gross <jgross@suse.com>
Fixes: 871dfbd67d4e ("drm/i915: Allow compaction upto SWIOTLB max segment size")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: <drm-intel-fixes@lists.freedesktop.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20161219124346.550-1-chris@chris-wilson.co.uk
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
(cherry picked from commit d766ef53006c2c38a7fe2bef0904105a793383f2)
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
drivers/gpu/drm/i915/i915_gem.c

index 3b6eb651d88a8a14706cfc5ea33d3c3374d71fb7..c6f80bc3b7bc696e2f535026cf34cb456fd21ca3 100644 (file)
@@ -2342,7 +2342,8 @@ static struct sg_table *
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       int page_count, i;
+       const unsigned long page_count = obj->base.size / PAGE_SIZE;
+       unsigned long i;
        struct address_space *mapping;
        struct sg_table *st;
        struct scatterlist *sg;
@@ -2368,7 +2369,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        if (st == NULL)
                return ERR_PTR(-ENOMEM);
 
-       page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
        if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
                kfree(st);
                return ERR_PTR(-ENOMEM);
@@ -2427,8 +2428,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        i915_sg_trim(st);
 
        ret = i915_gem_gtt_prepare_pages(obj, st);
-       if (ret)
-               goto err_pages;
+       if (ret) {
+               /* DMA remapping failed? One possible cause is that
+                * it could not reserve enough large entries, asking
+                * for PAGE_SIZE chunks instead may be helpful.
+                */
+               if (max_segment > PAGE_SIZE) {
+                       for_each_sgt_page(page, sgt_iter, st)
+                               put_page(page);
+                       sg_free_table(st);
+
+                       max_segment = PAGE_SIZE;
+                       goto rebuild_st;
+               } else {
+                       dev_warn(&dev_priv->drm.pdev->dev,
+                                "Failed to DMA remap %lu pages\n",
+                                page_count);
+                       goto err_pages;
+               }
+       }
 
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj, st);