void (*release)(struct drm_i915_gem_object *);
};
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-vise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+#define INTEL_FRONTBUFFER_BITS \
+ (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+ (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER_CURSOR(pipe) \
+ (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe) \
+ (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+
struct drm_i915_gem_object {
struct drm_gem_object base;
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
+ unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+
struct sg_table *pages;
int pages_pin_count;
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits);
+
/**
* Request queue structure.
*
goto out_unref_obj;
}
+ obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_KMS("plane fb obj %p\n", obj);
if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
+ fb->obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
}
}
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb;
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
int ret;
if (intel_crtc_has_pending_flip(crtc)) {
return -EINVAL;
}
+ old_fb = crtc->primary->fb;
+
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
- NULL);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(to_intel_framebuffer(old_fb)->obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
dev_priv->display.update_primary_plane(crtc, fb, x, y);
- old_fb = crtc->primary->fb;
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *old_obj;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
dev_priv->display.off(crtc);
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
- assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
- assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
+ assert_cursor_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev->dev_private, pipe);
if (crtc->primary->fb) {
+ old_obj = to_intel_framebuffer(crtc->primary->fb)->obj;
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+ i915_gem_track_fb(old_obj, NULL,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = NULL;
}
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
unsigned old_width;
uint32_t addr;
int ret;
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
}
+ i915_gem_track_fb(intel_crtc->cursor_bo, obj,
+ INTEL_FRONTBUFFER_CURSOR(pipe));
mutex_unlock(&dev->struct_mutex);
old_width = intel_crtc->cursor_width;
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
unsigned long flags;
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
- work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+ work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
if (ret)
goto cleanup_unpin;
+ i915_gem_track_fb(work->old_fb_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
+
intel_disable_fbc(dev);
intel_mark_fb_busy(obj, NULL);
mutex_unlock(&dev->struct_mutex);
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event)
- drm_send_vblank_event(dev, intel_crtc->pipe, event);
+ drm_send_vblank_event(dev, pipe, event);
}
return ret;
}
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
struct drm_framebuffer *old_fb;
+ struct drm_i915_gem_object *old_obj = NULL;
+ struct drm_i915_gem_object *obj =
+ to_intel_framebuffer(fb)->obj;
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
+ obj,
NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
goto done;
}
old_fb = crtc->primary->fb;
- if (old_fb)
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ if (old_fb) {
+ old_obj = to_intel_framebuffer(old_fb)->obj;
+ intel_unpin_fb_obj(old_obj);
+ }
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = fb;
intel_crtc_wait_for_pending_flips(plane->crtc);
intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
intel_plane->pipe);
-
disable_unpin:
+ i915_gem_track_fb(to_intel_framebuffer(plane->fb)->obj, NULL,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
intel_unpin_fb_obj(to_intel_framebuffer(plane->fb)->obj);
plane->fb = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_i915_gem_object *obj, *old_obj = NULL;
struct drm_rect dest = {
/* integer pixels */
.x1 = crtc_x,
if (ret)
return ret;
+ if (plane->fb)
+ old_obj = to_intel_framebuffer(plane->fb)->obj;
+ obj = to_intel_framebuffer(fb)->obj;
+
/*
* If the CRTC isn't enabled, we're just pinning the framebuffer,
* updating the fb pointer, and returning without touching the
* we may have an fb pinned; unpin it.
*/
if (plane->fb)
- intel_unpin_fb_obj(to_intel_framebuffer(plane->fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
/* Pin and return without programming hardware */
- return intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
- NULL);
+ return intel_pin_and_fence_fb_obj(dev, obj, NULL);
}
intel_crtc_wait_for_pending_flips(crtc);
* fail.
*/
if (plane->fb != fb) {
- ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
- NULL);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret)
return ret;
}
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
if (intel_crtc->primary_enabled)
intel_disable_primary_hw_plane(dev_priv,
intel_plane->plane,
if (plane->fb != fb)
if (plane->fb)
- intel_unpin_fb_obj(to_intel_framebuffer(plane->fb)->obj);
+ intel_unpin_fb_obj(old_obj);
return 0;
}