seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
+ u32 pending;
u32 addr;
- if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+ pending = atomic_read(&work->pending);
+ if (pending == INTEL_FLIP_INACTIVE) {
+ seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
+ pipe, plane);
+ } else if (pending >= INTEL_FLIP_COMPLETE) {
seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
work->flip_queued_vblank,
work->flip_ready_vblank,
drm_crtc_vblank_count(&crtc->base));
- if (work->enable_stall_check)
- seq_puts(m, "Stall check enabled, ");
- else
- seq_puts(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (INTEL_INFO(dev)->gen >= 4)
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_unpin_work *work = intel_crtc->unpin_work;
- /* ensure that the unpin work is consistent wrt ->pending. */
- smp_rmb();
intel_crtc->unpin_work = NULL;
if (work->event)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
- /* Ensure we don't miss a work->pending update ... */
- smp_rmb();
+ if (work && atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) {
+ /* ensure that the unpin work is consistent wrt ->pending. */
+ smp_rmb();
- if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return;
+ page_flip_completed(intel_crtc);
}
- page_flip_completed(intel_crtc);
-
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
{
/* Ensure that the work item is consistent when activating it ... */
- smp_wmb();
+ smp_mb__before_atomic();
atomic_set(&work->pending, INTEL_FLIP_PENDING);
- /* and that it is marked active as soon as the irq could fire. */
- smp_wmb();
}
static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, 0); /* aux display base address, unused */
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, MI_NOOP);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, (MI_NOOP));
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
if (work == NULL)
return;
- intel_mark_page_flip_active(work);
-
intel_pipe_update_start(crtc);
if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
ilk_do_mmio_flip(crtc, work);
intel_pipe_update_end(crtc);
+
+ intel_mark_page_flip_active(work);
}
static void intel_mmio_flip_work_func(struct work_struct *work)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work = intel_crtc->unpin_work;
u32 addr;
+ u32 pending;
- if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
- return true;
-
- if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
- return false;
+ pending = atomic_read(&work->pending);
+ /* ensure that the unpin work is consistent wrt ->pending. */
+ smp_rmb();
- if (!work->enable_stall_check)
- return false;
+ if (pending != INTEL_FLIP_PENDING)
+ return pending == INTEL_FLIP_COMPLETE;
if (work->flip_ready_vblank == 0) {
if (work->flip_queued_req &&
*/
if (!mmio_flip) {
ret = i915_gem_object_sync(obj, engine, &request);
+ if (!ret && !request) {
+ request = i915_gem_request_alloc(engine, NULL);
+ ret = PTR_ERR_OR_ZERO(request);
+ }
+
if (ret)
goto cleanup_pending;
}
work->gtt_offset += intel_crtc->dspaddr_offset;
if (mmio_flip) {
- ret = intel_queue_mmio_flip(dev, crtc, obj);
- if (ret)
- goto cleanup_unpin;
+ work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
- } else {
- if (!request) {
- request = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(request)) {
- ret = PTR_ERR(request);
- goto cleanup_unpin;
- }
- }
+ ret = intel_queue_mmio_flip(dev, crtc, obj);
+ if (ret)
+ goto cleanup_unpin;
+ } else {
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
if (ret)
goto cleanup_unpin;
i915_gem_request_assign(&work->flip_queued_req, request);
- }
- if (request)
- i915_add_request_no_flush(request);
+ work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
+ intel_mark_page_flip_active(work);
- work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
- work->enable_stall_check = true;
+ i915_add_request_no_flush(request);
+ }
- i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
+ i915_gem_track_fb(intel_fb_obj(old_fb), obj,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);