jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
if (INTEL_INFO(dev_priv)->gen >= 6)
- gen6_rps_boost(dev_priv, rps);
+ gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv,
- struct intel_rps_client *rps);
+ struct intel_rps_client *rps,
+ unsigned long submitted);
void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *req);
void ilk_wm_get_hw_state(struct drm_device *dev);
}
void gen6_rps_boost(struct drm_i915_private *dev_priv,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps,
+ unsigned long submitted)
{
u32 val;
+ /* Force a RPS boost (and don't count it against the client) if
+ * the GPU is severely congested.
+ */
+ if (rps && time_after(jiffies, submitted + msecs_to_jiffies(20)))
+ rps = NULL;
+
mutex_lock(&dev_priv->rps.hw_lock);
val = dev_priv->rps.max_freq_softlimit;
if (dev_priv->rps.enabled &&
static void __intel_rps_boost_work(struct work_struct *work)
{
struct request_boost *boost = container_of(work, struct request_boost, work);
+ struct drm_i915_gem_request *req = boost->req;
- if (!i915_gem_request_completed(boost->req, true))
- gen6_rps_boost(to_i915(boost->req->ring->dev), NULL);
+ if (!i915_gem_request_completed(req, true))
+ gen6_rps_boost(to_i915(req->ring->dev), NULL,
+ req->emitted_jiffies);
- i915_gem_request_unreference__unlocked(boost->req);
+ i915_gem_request_unreference__unlocked(req);
kfree(boost);
}
if (req == NULL || INTEL_INFO(dev)->gen < 6)
return;
+ if (i915_gem_request_completed(req, true))
+ return;
+
boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
if (boost == NULL)
return;