I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(dev_priv, pipe) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
+ seq_printf(m, "Pipe %c power disabled\n",
+ pipe_name(pipe));
+ continue;
+ }
+
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
+ intel_display_power_put(dev_priv, power_domain);
+ }
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
rgvmodectl = I915_READ(MEMMODECTL);
crstandvid = I915_READ16(CRSTANDVID);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv);
seq_printf(m, "self-refresh: %s\n",
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
seq_puts(m, "L-shaped memory detected\n");
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else {
for_each_pipe(dev_priv, pipe) {
+ enum transcoder cpu_transcoder =
+ intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain))
+ continue;
+
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
enabled = true;
+
+ intel_display_power_put(dev_priv, power_domain);
}
}
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_runtime_pm_get(dev_priv);
+
for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_gem_request *rq;
seq_puts(m, "\n");
}
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
if (i915_reset_in_progress(&dev_priv->gpu_error))
return -EAGAIN;
- intel_runtime_pm_get(dev_priv);
-
i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
- intel_runtime_pm_put(dev_priv);
-
return 0;
}
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
u32 snpcr;
- int ret;
if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
DRM_DEBUG_KMS("Suspending device\n");
- /*
- * We could deadlock here in case another thread holding struct_mutex
- * calls RPM suspend concurrently, since the RPM suspend will wait
- * first for this RPM suspend to finish. In this case the concurrent
- * RPM resume will be followed by its RPM suspend counterpart. Still
- * for consistency return -EAGAIN, which will reschedule this suspend.
- */
- if (!mutex_trylock(&dev->struct_mutex)) {
- DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
- /*
- * Bump the expiration timestamp, otherwise the suspend won't
- * be rescheduled.
- */
- pm_runtime_mark_last_busy(kdev);
-
- return -EAGAIN;
- }
-
disable_rpm_wakeref_asserts(dev_priv);
/*
* an RPM reference.
*/
i915_gem_release_all_mmaps(dev_priv);
- mutex_unlock(&dev->struct_mutex);
intel_guc_suspend(dev);
uint64_t offset;
int ret;
+ intel_runtime_pm_get(to_i915(dev));
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
i915_vma_unpin(vma);
}
out:
+ intel_runtime_pm_put(to_i915(dev));
return ret;
}
ret = i915_gem_shmem_pread(dev, obj, args, file);
/* pread for non shmem backed objects */
- if (ret == -EFAULT || ret == -ENODEV) {
- intel_runtime_pm_get(to_i915(dev));
+ if (ret == -EFAULT || ret == -ENODEV)
ret = i915_gem_gtt_pread(dev, obj, args->size,
args->offset, args->data_ptr);
- intel_runtime_pm_put(to_i915(dev));
- }
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
if (i915_gem_object_is_tiled(obj))
return -EFAULT;
+ intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) {
i915_vma_unpin(vma);
}
out:
+ intel_runtime_pm_put(i915);
return ret;
}
* perspective, requiring manual detiling by the client.
*/
if (!i915_gem_object_has_struct_page(obj) ||
- cpu_write_needs_clflush(obj)) {
- ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
+ cpu_write_needs_clflush(obj))
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
- * textures). Fallback to the shmem path in that case. */
- }
+ * textures). Fallback to the shmem path in that case.
+ */
+ ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
goto err_unpin;
/* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(dev_priv);
spin_lock(&dev_priv->mm.userfault_lock);
if (list_empty(&obj->userfault_link))
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
+ *
+ * Note that RPM complicates somewhat by adding an additional
+ * requirement that operations to the GGTT be made holding the RPM
+ * wakeref.
*/
lockdep_assert_held(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
spin_lock(&i915->mm.userfault_lock);
if (!list_empty(&obj->userfault_link)) {
}
spin_unlock(&i915->mm.userfault_lock);
if (!zap)
- return;
+ goto out;
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
* memory writes before touching registers / GSM.
*/
wmb();
+
+out:
+ intel_runtime_pm_put(i915);
}
void
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
- if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
return -ENODEV;
level = I915_CACHE_LLC;
break;
case I915_CACHING_DISPLAY:
- level = HAS_WT(dev_priv) ? I915_CACHE_WT : I915_CACHE_NONE;
+ level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
break;
default:
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto rpm_put;
+ return ret;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj) {
}
ret = i915_gem_object_set_cache_level(obj, level);
-
i915_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
-rpm_put:
- intel_runtime_pm_put(dev_priv);
-
return ret;
}
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0;
int ret;
if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
/*
* Without aliasing PPGTT there's no difference between
enum i915_cache_level cache_level,
u32 flags)
{
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
u32 pte_flags;
int ret;
if (flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm,
vma->pages, vma->node.start,
cache_level, pte_flags);
+ intel_runtime_pm_put(i915);
}
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt =
- to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
vma->pages, vma->node.start,
cache_level, pte_flags);
static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size);
- if (vma->flags & I915_VMA_GLOBAL_BIND)
+ if (vma->flags & I915_VMA_GLOBAL_BIND) {
+ intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm,
vma->node.start, size);
+ intel_runtime_pm_put(i915);
+ }
if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
return -EINVAL;
}
- intel_runtime_pm_get(dev_priv);
-
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
err = -EBUSY;
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
return err;
}