drm/i915; Preallocate the lazy request
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 4 Sep 2013 09:45:52 +0000 (10:45 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 5 Sep 2013 10:03:53 +0000 (12:03 +0200)
It is possible for us to be forced to perform an allocation for the lazy
request whilst running the shrinker. This allocation may fail, leaving
us unable to reclaim any memory leading to premature OOM. A neat
solution to the problem is to preallocate the request at the same time
as acquiring the seqno for the ring transaction. This means that we can
report ENOMEM prior to touching the rings.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 858e78886637f93e194d17b2e14fd48d3e3002ec..399e159016e2eb4a62f6848acafbc0332e3d042a 100644 (file)
@@ -2041,8 +2041,8 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
-       request = kmalloc(sizeof(*request), GFP_KERNEL);
-       if (request == NULL)
+       request = ring->preallocated_lazy_request;
+       if (WARN_ON(request == NULL))
                return -ENOMEM;
 
        /* Record the position of the start of the request so that
@@ -2053,10 +2053,8 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        request_ring_position = intel_ring_get_tail(ring);
 
        ret = ring->add_request(ring);
-       if (ret) {
-               kfree(request);
+       if (ret)
                return ret;
-       }
 
        request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
@@ -2095,6 +2093,7 @@ int __i915_add_request(struct intel_ring_buffer *ring,
 
        trace_i915_gem_request_add(ring, request->seqno);
        ring->outstanding_lazy_seqno = 0;
+       ring->preallocated_lazy_request = NULL;
 
        if (!dev_priv->ums.mm_suspended) {
                i915_queue_hangcheck(ring->dev);
index a83ff1863a5ed4a08c546aff8d86bdc4887f6e3c..284afaf5d6ffc8357418b4202cc7ae5614f35e2c 100644 (file)
@@ -1498,6 +1498,16 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
        if (ring->outstanding_lazy_seqno)
                return 0;
 
+       if (ring->preallocated_lazy_request == NULL) {
+               struct drm_i915_gem_request *request;
+
+               request = kmalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return -ENOMEM;
+
+               ring->preallocated_lazy_request = request;
+       }
+
        return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
index c6aa2b3c8c268d061b467d6d12569c22d753424d..ad2dd65c63f8ff970ba2541b735470444bd96959 100644 (file)
@@ -140,6 +140,7 @@ struct  intel_ring_buffer {
        /**
         * Do we have some not yet emitted requests outstanding?
         */
+       struct drm_i915_gem_request *preallocated_lazy_request;
        u32 outstanding_lazy_seqno;
        bool gpu_caches_dirty;
        bool fbc_dirty;