From 78b1d470d57dd7a6e0efda63ebad97f0d44e817c Mon Sep 17 00:00:00 2001 From: Hai Li Date: Mon, 27 Jul 2015 13:49:45 -0400 Subject: [PATCH] drm/msm: Enable clocks during enable/disable_vblank() callbacks AHB clock should be enabled before accessing registers during enable/disable_vblank(). Since these 2 callbacks are called in atomic context while clk_prepare may cause thread sleep, a work is scheduled to control vblanks. v2: fixup spinlock initialization Signed-off-by: Hai Li [add comment about cancel_work_sync() before drm_irq_uninstall()] Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | 9 +++ drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | 9 +++ drivers/gpu/drm/msm/msm_drv.c | 78 ++++++++++++++++++++++++- drivers/gpu/drm/msm/msm_drv.h | 8 +++ 4 files changed, 102 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index 7369ee7f0c55..64d24fcbf01a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c @@ -86,13 +86,22 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp4_crtc_vblank(crtc), true); + mdp4_disable(mdp4_kms); + return 0; } void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp4_crtc_vblank(crtc), false); + mdp4_disable(mdp4_kms); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 33bd4c6160dd..2a578f2d36dd 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -112,15 +112,24 @@ irqreturn_t mdp5_irq(struct msm_kms *kms) int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + + mdp5_enable(mdp5_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp5_crtc_vblank(crtc), true); + mdp5_disable(mdp5_kms); + return 0; } void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + + mdp5_enable(mdp5_kms); mdp_update_vblank_mask(to_mdp_kms(kms), mdp5_crtc_vblank(crtc), false); + mdp5_disable(mdp5_kms); } /* diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 39ce1920f5f5..0339c5d82d37 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -116,6 +116,65 @@ u32 msm_readl(const void __iomem *addr) return val; } +struct vblank_event { + struct list_head node; + int crtc_id; + bool enable; +}; + +static void vblank_ctrl_worker(struct work_struct *work) +{ + struct msm_vblank_ctrl *vbl_ctrl = container_of(work, + struct msm_vblank_ctrl, work); + struct msm_drm_private *priv = container_of(vbl_ctrl, + struct msm_drm_private, vblank_ctrl); + struct msm_kms *kms = priv->kms; + struct vblank_event *vbl_ev, *tmp; + unsigned long flags; + + spin_lock_irqsave(&vbl_ctrl->lock, flags); + list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { + list_del(&vbl_ev->node); + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + + if (vbl_ev->enable) + kms->funcs->enable_vblank(kms, + priv->crtcs[vbl_ev->crtc_id]); + else + kms->funcs->disable_vblank(kms, + priv->crtcs[vbl_ev->crtc_id]); + + kfree(vbl_ev); + + spin_lock_irqsave(&vbl_ctrl->lock, flags); + } + + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); +} + +static int vblank_ctrl_queue_work(struct msm_drm_private *priv, + int crtc_id, bool enable) +{ + struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; + struct vblank_event *vbl_ev; + unsigned long flags; + + vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC); + if (!vbl_ev) + return -ENOMEM; + + vbl_ev->crtc_id = crtc_id; + vbl_ev->enable = enable; + + spin_lock_irqsave(&vbl_ctrl->lock, flags); + list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); + spin_unlock_irqrestore(&vbl_ctrl->lock, flags); + + queue_work(priv->wq, &vbl_ctrl->work); + + return 0; +} + /* * DRM operations: */ @@ -125,6 +184,18 @@ static int msm_unload(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; struct msm_gpu *gpu = priv->gpu; + struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; + struct vblank_event *vbl_ev, *tmp; + + /* We must cancel and cleanup any pending vblank enable/disable + * work before drm_irq_uninstall() to avoid work re-enabling an + * irq after uninstall has disabled it. + */ + cancel_work_sync(&vbl_ctrl->work); + list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { + list_del(&vbl_ev->node); + kfree(vbl_ev); + } drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); @@ -282,6 +353,9 @@ static int msm_load(struct drm_device *dev, unsigned long flags) INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->fence_cbs); + INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); + INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker); + spin_lock_init(&priv->vblank_ctrl.lock); drm_mode_config_init(dev); @@ -464,7 +538,7 @@ static int msm_enable_vblank(struct drm_device *dev, int crtc_id) if (!kms) return -ENXIO; DBG("dev=%p, crtc=%d", dev, crtc_id); - return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]); + return vblank_ctrl_queue_work(priv, crtc_id, true); } static void msm_disable_vblank(struct drm_device *dev, int crtc_id) @@ -474,7 +548,7 @@ static void msm_disable_vblank(struct drm_device *dev, int crtc_id) if (!kms) return; DBG("dev=%p, crtc=%d", dev, crtc_id); - kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]); + vblank_ctrl_queue_work(priv, crtc_id, false); } /* diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index c89c9352d31e..12c97cf27e13 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -71,6 +71,12 @@ enum msm_mdp_plane_property { PLANE_PROP_MAX_NUM }; +struct msm_vblank_ctrl { + struct work_struct work; + struct list_head event_list; + spinlock_t lock; +}; + struct msm_drm_private { struct msm_kms *kms; @@ -147,6 +153,8 @@ struct msm_drm_private { */ struct drm_mm mm; } vram; + + struct msm_vblank_ctrl vblank_ctrl; }; struct msm_format { -- 2.20.1