goto unlock;
}
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
/* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
if (ret)
return ret;
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
if (ret)
return ret;
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
if (!i915.powersave)
return;
return -ENOMEM;
/* Exit PSR early in page flip */
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
work->event = event;
work->crtc = crtc;
& ~EDP_PSR_ENABLE);
}
-void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back)
+void intel_edp_psr_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->psr.active)
intel_edp_psr_inactivate(dev);
- if (schedule_back)
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(100));
}
void intel_edp_psr_init(struct drm_device *dev)
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back);
+void intel_edp_psr_exit(struct drm_device *dev);
void intel_edp_psr_init(struct drm_device *dev);
mutex_unlock(&dev->struct_mutex);
}
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
return 0;
}