drm/i915: Expand bool interruptible to pass flags to i915_wait_request()
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 9 Sep 2016 13:11:49 +0000 (14:11 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 9 Sep 2016 13:23:03 +0000 (14:23 +0100)
We need finer control over wakeup behaviour during i915_wait_request(),
so expand the current bool interruptible to a bitmask.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-9-chris@chris-wilson.co.uk
12 files changed:
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_request.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 24bf923a230ff5da4c2ee79d698904c87a868d60..0e4cbad7a88375a39e4711cba23c2fdead0407c4 100644 (file)
@@ -4794,7 +4794,7 @@ i915_drop_caches_set(void *data, u64 val)
                return ret;
 
        if (val & DROP_ACTIVE) {
-               ret = i915_gem_wait_for_idle(dev_priv, true);
+               ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
                if (ret)
                        goto unlock;
        }
index dced7e72b625d71dd723a5a0163e56340eacc750..20b7743f8ec5b2ba02d9067509a9677bd7c7f8a1 100644 (file)
@@ -3270,7 +3270,7 @@ int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                                       bool interruptible);
+                                       unsigned int flags);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void i915_gem_resume(struct drm_device *dev);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
index 87a4f3543f0bb1e028254ef87570990b0108836c..4617250c300062dfb45b7cefa19284964f7c87ed 100644 (file)
@@ -386,7 +386,8 @@ __unsafe_wait_rendering(struct drm_i915_gem_object *obj,
                int ret;
 
                ret = i915_gem_active_wait_unlocked(&active[idx],
-                                                   true, NULL, rps);
+                                                   I915_WAIT_INTERRUPTIBLE,
+                                                   NULL, rps);
                if (ret)
                        return ret;
        }
@@ -2026,7 +2027,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
         * to claim that space for ourselves, we need to take the big
         * struct_mutex to free the requests+objects and allocate our slot.
         */
-       err = i915_gem_wait_for_idle(dev_priv, true);
+       err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
        if (err)
                return err;
 
@@ -2779,7 +2780,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        active = __I915_BO_ACTIVE(obj);
        for_each_active(active, idx) {
                s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
-               ret = i915_gem_active_wait_unlocked(&obj->last_read[idx], true,
+               ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
+                                                   I915_WAIT_INTERRUPTIBLE,
                                                    timeout, rps);
                if (ret)
                        break;
@@ -2982,7 +2984,7 @@ destroy:
 }
 
 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                          bool interruptible)
+                          unsigned int flags)
 {
        struct intel_engine_cs *engine;
        int ret;
@@ -2991,7 +2993,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
                if (engine->last_context == NULL)
                        continue;
 
-               ret = intel_engine_idle(engine, interruptible);
+               ret = intel_engine_idle(engine, flags);
                if (ret)
                        return ret;
        }
@@ -3746,7 +3748,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (target == NULL)
                return 0;
 
-       ret = i915_wait_request(target, true, NULL, NULL);
+       ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
        i915_gem_request_put(target);
 
        return ret;
@@ -4302,7 +4304,7 @@ int i915_gem_suspend(struct drm_device *dev)
        if (ret)
                goto err;
 
-       ret = i915_gem_wait_for_idle(dev_priv, true);
+       ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
        if (ret)
                goto err;
 
index 815d5fbe07acaa935dc17eca3cbc6de660ed7b6f..103085246975b33c297bbc98b1d6535f687c4117 100644 (file)
@@ -170,7 +170,7 @@ search_again:
        if (ret)
                return ret;
 
-       ret = i915_gem_wait_for_idle(dev_priv, true);
+       ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
        if (ret)
                return ret;
 
@@ -275,7 +275,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
                                return ret;
                }
 
-               ret = i915_gem_wait_for_idle(dev_priv, true);
+               ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
                if (ret)
                        return ret;
 
index e16c38086abef13b03e7867cd7737a74b70bf7b7..9bcac52b826886cecba44b613d12c1733d5022b2 100644 (file)
@@ -2683,7 +2683,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, false)) {
+               if (i915_gem_wait_for_idle(dev_priv, 0)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
index 24eb4b1b7540e0f52f9ee08389deb686c1de4ec3..f4c15f319d08d4dfeb41de62545cd0402a83442f 100644 (file)
@@ -260,7 +260,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 
        /* Carefully retire all requests without writing to the rings */
        for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine, true);
+               ret = intel_engine_idle(engine, I915_WAIT_INTERRUPTIBLE);
                if (ret)
                        return ret;
        }
@@ -598,7 +598,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 /**
  * i915_wait_request - wait until execution of request has finished
  * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
+ * @flags: how to wait
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  * @rps: client to charge for RPS boosting
  *
@@ -613,11 +613,12 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
  * errno with remaining time filled in timeout argument.
  */
 int i915_wait_request(struct drm_i915_gem_request *req,
-                     bool interruptible,
+                     unsigned int flags,
                      s64 *timeout,
                      struct intel_rps_client *rps)
 {
-       int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+       const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+               TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
        DEFINE_WAIT(reset);
        struct intel_wait wait;
        unsigned long timeout_remain;
index a231bd318ef0dd68f0681369a6ad00bd5ef18923..479896ef791e84d74fe776ba32e7f321319728b6 100644 (file)
@@ -218,10 +218,11 @@ struct intel_rps_client;
 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
 
 int i915_wait_request(struct drm_i915_gem_request *req,
-                     bool interruptible,
+                     unsigned int flags,
                      s64 *timeout,
                      struct intel_rps_client *rps)
        __attribute__((nonnull(1)));
+#define I915_WAIT_INTERRUPTIBLE BIT(0)
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
@@ -575,13 +576,13 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
        if (!request)
                return 0;
 
-       return i915_wait_request(request, true, NULL, NULL);
+       return i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
 }
 
 /**
  * i915_gem_active_wait_unlocked - waits until the request is completed
  * @active - the active request on which to wait
- * @interruptible - whether the wait can be woken by a userspace signal
+ * @flags - how to wait
  * @timeout - how long to wait at most
  * @rps - userspace client to charge for a waitboost
  *
@@ -602,7 +603,7 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
  */
 static inline int
 i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
-                             bool interruptible,
+                             unsigned int flags,
                              s64 *timeout,
                              struct intel_rps_client *rps)
 {
@@ -611,7 +612,7 @@ i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
 
        request = i915_gem_active_get_unlocked(active);
        if (request) {
-               ret = i915_wait_request(request, interruptible, timeout, rps);
+               ret = i915_wait_request(request, flags, timeout, rps);
                i915_gem_request_put(request);
        }
 
@@ -638,7 +639,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
        if (!request)
                return 0;
 
-       ret = i915_wait_request(request, true, NULL, NULL);
+       ret = i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
        if (ret)
                return ret;
 
index b80802b35353ee803f32a4fd9fd4eba0cefe39d5..35a05f4c51c11cd0dd637c8623871908424be0f2 100644 (file)
@@ -323,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
        unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
 
        do {
-               if (i915_gem_wait_for_idle(dev_priv, false) == 0 &&
+               if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
                    i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
                        break;
 
@@ -414,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gem_wait_for_idle(dev_priv, false);
+       ret = i915_gem_wait_for_idle(dev_priv, 0);
        if (ret)
                goto out;
 
index be54825ef3e86e3108a173309022d1d7e3131317..e537930c64b53d5a18ebbf7fcb79be68f0114acc 100644 (file)
@@ -68,7 +68,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
 
        for_each_active(active, idx)
                i915_gem_active_wait_unlocked(&obj->last_read[idx],
-                                             false, NULL, NULL);
+                                             0, NULL, NULL);
 }
 
 static void cancel_userptr(struct work_struct *work)
index 7e725054ba58922d7b172bda602feb97e8b79320..b92042f4dc50d1b7d4b20888f23919adf236bd28 100644 (file)
@@ -12022,8 +12022,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
 
        if (work->flip_queued_req)
                WARN_ON(i915_wait_request(work->flip_queued_req,
-                                         false, NULL,
-                                         NO_WAITBOOST));
+                                         0, NULL, NO_WAITBOOST));
 
        /* For framebuffer backed by dmabuf, wait for fence */
        resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -14071,7 +14070,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
                                continue;
 
                        ret = i915_wait_request(intel_plane_state->wait_req,
-                                               true, NULL, NULL);
+                                               I915_WAIT_INTERRUPTIBLE,
+                                               NULL, NULL);
                        if (ret) {
                                /* Any hang should be swallowed by the wait */
                                WARN_ON(ret == -EIO);
@@ -14289,7 +14289,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                        continue;
 
                ret = i915_wait_request(intel_plane_state->wait_req,
-                                       true, NULL, NULL);
+                                       0, NULL, NULL);
                /* EIO should be eaten, and we can't get interrupted in the
                 * worker, and blocking commits have waited already. */
                WARN_ON(ret);
index fd8fcc6ec9701d01397b19eeb0b1a381eae213a8..a5bf188770682ee85f7491fc856df54f172d7a1c 100644 (file)
@@ -2223,7 +2223,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
        if (WARN_ON(&target->ring_link == &ring->request_list))
                return -ENOSPC;
 
-       ret = i915_wait_request(target, true, NULL, NO_WAITBOOST);
+       ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE,
+                               NULL, NO_WAITBOOST);
        if (ret)
                return ret;
 
index 2181d0a41a961094c9b5252a511bd6cf40e33445..18848acf5e743ad8181f97fc7d18bf935c6978a8 100644 (file)
@@ -489,11 +489,11 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
 static inline int intel_engine_idle(struct intel_engine_cs *engine,
-                                   bool interruptible)
+                                   unsigned int flags)
 {
        /* Wait upon the last request to be completed */
        return i915_gem_active_wait_unlocked(&engine->last_request,
-                                            interruptible, NULL, NULL);
+                                            flags, NULL, NULL);
 }
 
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);