{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
+ if (!HAS_HW_CONTEXTS(dev)) {
+ kfree(file_priv->private_default_ctx);
+ return;
+ }
+
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+ i915_gem_context_unreference(file_priv->private_default_ctx);
idr_destroy(&file_priv->context_idr);
- mutex_unlock(&dev->struct_mutex);
}
-static struct i915_hw_context *
+struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
+ if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
+ return file_priv->private_default_ctx;
+
return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
}
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
- * XXX: We need a real interface to do this instead of trickery. */
+ *
+ * XXX: We need a real interface to do this instead of trickery.
+ */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) {
- i915_gem_object_unpin(to->obj);
+ i915_gem_object_ggtt_unpin(to->obj);
return ret;
}
}
}
- static void eb_destroy(struct eb_vmas *eb) {
+ static void
+ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+ {
+ struct drm_i915_gem_exec_object2 *entry;
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return;
+
+ entry = vma->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
- i915_gem_object_unpin(obj);
++ vma->pin_count--;
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+ }
+
+ static void eb_destroy(struct eb_vmas *eb)
+ {
while (!list_empty(&eb->vmas)) {
struct i915_vma *vma;
/* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine! */
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
- gen6_gt_force_wake_get(dev_priv);
++ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
DRM_ERROR("Switching back to LCPLL failed\n");
}
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
- gen6_gt_force_wake_put(dev_priv);
++ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
void hsw_enable_pc8_work(struct work_struct *__work)
return -1;
}
- val /= mult;
- val -= base / mult;
- val += 0xbd;
-
- if (val > 0xea)
- val = 0xea;
-
- return val;
+ return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
}
- void intel_pm_init(struct drm_device *dev)
+ void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;