dev_priv->dev = dev;
dev_priv->info = info;
- spin_lock_init(&dev_priv->rps.lock);
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ spin_lock_init(&dev_priv->backlight.lock);
++ spin_lock_init(&dev_priv->gt_lock);
+ mutex_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+
i915_dump_device_info(dev_priv);
+ INIT_LIST_HEAD(&dev_priv->vm_list);
+ INIT_LIST_HEAD(&dev_priv->gtt.base.global_link);
+ list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list);
+
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj,
- &dev_priv->mm.inactive_list,
- mm_list)
- {
+ list_for_each_entry(obj, &vm->inactive_list, mm_list)
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
- }
- /* The fence registers are invalidated so clear them out */
- i915_gem_reset_fences(dev);
+ i915_gem_restore_fences(dev);
}
/**
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
+ uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
+ val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- i915_gem_reset_fences(dev);
-
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound mm.suspended!
- */
- dev_priv->mm.suspended = 1;
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &vm->inactive_list, global_list)
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
++ list_for_each_entry(obj, &vm->inactive_list, mm_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->mm.stolen_base == 0)
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- if (size < dev_priv->cfb_size)
+ if (size < dev_priv->fbc.size)
return 0;
/* Release any current block */
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ struct i915_vma *vma;
+ int ret;
- if (dev_priv->mm.stolen_base == 0)
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",