From 93927ca52a55c23e0a6a305e7e9082e8411ac9fa Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 10 Jan 2013 18:03:00 +0100 Subject: [PATCH] drm/i915: Revert shrinker changes from "Track unbound pages" This partially reverts commit 6c085a728cf000ac1865d66f8c9b52935558b328 Author: Chris Wilson Date: Mon Aug 20 11:40:46 2012 +0200 drm/i915: Track unbound pages Closer inspection of that patch revealed a bunch of unrelated changes in the shrinker: - The shrinker count is now in pages instead of objects. - For counting the shrinkable objects the old code only looked at the inactive list, the new code looks at all bounds objects (including pinned ones). That is obviously in addition to the new unbound list. - The shrinker cound is no longer scaled with sysctl_vfs_cache_pressure. Note though that with the default tuning value of vfs_cache_pressue = 100 this doesn't affect the shrinker behaviour. - When actually shrinking objects, the old code first dropped purgeable objects, then normal (inactive) objects. Only then did it, in a last-ditch effort idle the gpu and evict everything. The new code omits the intermediate step of evicting normal inactive objects. Safe for the first change, which seems benign, and the shrinker count scaling, which is a bit a different story, the endresult of all these changes is that the shrinker is _much_ more likely to fall back to the last-ditch resort of idling the gpu and evicting everything. The old code could only do that if something else evicted lots of objects meanwhile (since without any other changes the nr_to_scan will be smaller than the object count). Reverting the vfs_cache_pressure behaviour itself is a bit bogus: Only dentry/inode object caches should scale their shrinker counts with vfs_cache_pressure. Originally I've had that change reverted, too. But Chris Wilson insisted that it's too bogus and shouldn't again see the light of day. Hence revert all these other changes and restore the old shrinker behaviour, with the minor adjustment that we now first scan the unbound list, then the inactive list for each object category (purgeable or normal). A similar patch has been tested by a few people affected by the gen4/5 hangs which started to appear in 3.7, which some people bisected to the "drm/i915: Track unbound pages" commit. But just disabling the unbound logic alone didn't change things at all. Note that this patch doesn't fix the referenced bugs, it only hides the underlying bug(s) well enough to restore pre-3.7 behaviour. The key to achieve that is to massively reduce the likelyhood of going into a full gpu stall and evicting everything. v2: Reword commit message a bit, taking Chris Wilson's comment into account. v3: On Chris Wilson's insistency, do not reinstate the rather bogus vfs_cache_pressure change. Tested-by: Greg KH Tested-by: Dave Kleikamp References: https://bugs.freedesktop.org/show_bug.cgi?id=55984 References: https://bugs.freedesktop.org/show_bug.cgi?id=57122 References: https://bugs.freedesktop.org/show_bug.cgi?id=56916 References: https://bugs.freedesktop.org/show_bug.cgi?id=57136 Cc: Chris Wilson Cc: stable@vger.kernel.org Acked-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5791ecd908a5..8febea6daa08 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) } static long -i915_gem_purge(struct drm_i915_private *dev_priv, long target) +__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, + bool purgeable_only) { struct drm_i915_gem_object *obj, *next; long count = 0; @@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) { - if (i915_gem_object_is_purgeable(obj) && + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && i915_gem_object_put_pages(obj) == 0) { count += obj->base.size >> PAGE_SHIFT; if (count >= target) @@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) { - if (i915_gem_object_is_purgeable(obj) && + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && i915_gem_object_unbind(obj) == 0 && i915_gem_object_put_pages(obj) == 0) { count += obj->base.size >> PAGE_SHIFT; @@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) return count; } +static long +i915_gem_purge(struct drm_i915_private *dev_priv, long target) +{ + return __i915_gem_shrink(dev_priv, target, true); +} + static void i915_gem_shrink_all(struct drm_i915_private *dev_priv) { @@ -4395,6 +4402,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) if (nr_to_scan) { nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); + if (nr_to_scan > 0) + nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan, + false); if (nr_to_scan > 0) i915_gem_shrink_all(dev_priv); } @@ -4403,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; -- 2.20.1