list_empty(&file_priv->rps_boost) ? "" : ", active");
rcu_read_unlock();
}
+ seq_printf(m, "Semaphore boosts: %d\n", dev_priv->rps.semaphores.rps_boosts);
seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
mutex_unlock(&dev_priv->rps.hw_lock);
struct i915_mm_struct;
struct i915_mmu_object;
+struct drm_i915_file_private {
+ struct drm_i915_private *dev_priv;
+ struct drm_file *file;
+
+ struct {
+ spinlock_t lock;
+ struct list_head request_list;
+ } mm;
+ struct idr context_idr;
+
+ struct list_head rps_boost;
+ struct intel_engine_cs *bsd_ring;
+
+ unsigned rps_boosts;
+};
+
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
struct list_head clients;
unsigned boosts;
+ struct drm_i915_file_private semaphores;
+
/* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei;
* a later patch when the call to i915_seqno_passed() is obsoleted...
*/
-struct drm_i915_file_private {
- struct drm_i915_private *dev_priv;
- struct drm_file *file;
-
- struct {
- spinlock_t lock;
- struct list_head request_list;
- } mm;
- struct idr context_idr;
-
- struct list_head rps_boost;
- struct intel_engine_cs *bsd_ring;
-
- unsigned rps_boosts;
-};
-
/*
* A command that requires special handling by the command parser.
*/
return ret;
if (!i915_semaphore_is_enabled(obj->base.dev)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(req,
- atomic_read(&to_i915(obj->base.dev)->gpu_error.reset_counter),
- to_i915(obj->base.dev)->mm.interruptible, NULL, NULL);
+ atomic_read(&i915->gpu_error.reset_counter),
+ i915->mm.interruptible,
+ NULL,
+ &i915->rps.semaphores);
if (ret)
return ret;
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
INIT_LIST_HEAD(&dev_priv->rps.clients);
+ INIT_LIST_HEAD(&dev_priv->rps.semaphores.rps_boost);
dev_priv->pm.suspended = false;
}