From: Archit Taneja Date: Thu, 23 Mar 2017 10:27:57 +0000 (+0530) Subject: drm/msm/mdp5: Start using mdp5_hw_mixer X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=adfc0e63abe552bf1b9d18b3073bf2a421f7ea60;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git drm/msm/mdp5: Start using mdp5_hw_mixer Use the mdp5_hw_mixer struct in the mdp5_crtc and mdp5_ctl instead of using the LM index. Like before, the Layer Mixers are assigned statically to the CRTCs. The hwmixer(s) will later be dynamically assigned to CRTCs. For now, ignore the hwmixers that can only do WB. Signed-off-by: Archit Taneja Signed-off-by: Rob Clark --- diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index df1c8adec3f3..2e6ceafbd3e2 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -51,7 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, struct device *dev = encoder->dev->dev; u32 total_lines_x100, vclks_line, cfg; long vsync_clk_speed; - int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = GET_PING_PONG_ID(mixer->lm); if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) { dev_err(dev, "vsync_clk is not initialized\n"); @@ -94,7 +95,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, static int pingpong_tearcheck_enable(struct drm_encoder *encoder) { struct mdp5_kms *mdp5_kms = get_kms(encoder); - int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = GET_PING_PONG_ID(mixer->lm); int ret; ret = clk_set_rate(mdp5_kms->vsync_clk, @@ -119,7 +121,8 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder) static void pingpong_tearcheck_disable(struct drm_encoder *encoder) { struct mdp5_kms *mdp5_kms = get_kms(encoder); - int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = GET_PING_PONG_ID(mixer->lm); mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0); clk_disable_unprepare(mdp5_kms->vsync_clk); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 87a19e0c0e67..ffdb31c4419f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -32,10 +32,8 @@ struct mdp5_crtc { int id; bool enabled; - /* layer mixer used for this CRTC (+ its lock): */ -#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id) - int lm; - spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */ + struct mdp5_hw_mixer *mixer; + spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */ /* CTL used for this CRTC: */ struct mdp5_ctl *ctl; @@ -111,6 +109,7 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) static u32 crtc_flush_all(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_hw_mixer *mixer; struct drm_plane *plane; uint32_t flush_mask = 0; @@ -122,7 +121,8 @@ static u32 crtc_flush_all(struct drm_crtc *crtc) flush_mask |= mdp5_plane_get_flush(plane); } - flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm); + mixer = mdp5_crtc->mixer; + flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm); return crtc_flush(crtc, flush_mask); } @@ -201,7 +201,8 @@ static void blend_setup(struct drm_crtc *crtc) const struct mdp5_cfg_hw *hw_cfg; struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; const struct mdp_format *format; - uint32_t lm = mdp5_crtc->lm; + struct mdp5_hw_mixer *mixer = mdp5_crtc->mixer; + uint32_t lm = mixer->lm; uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; unsigned long flags; enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE }; @@ -302,6 +303,8 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct mdp5_hw_mixer *mixer = mdp5_crtc->mixer; + uint32_t lm = mixer->lm; unsigned long flags; struct drm_display_mode *mode; @@ -320,7 +323,7 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) mode->type, mode->flags); spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm), + mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm), MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); @@ -555,7 +558,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (ret) return -EINVAL; - lm = mdp5_crtc->lm; + lm = mdp5_crtc->mixer->lm; stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); @@ -607,6 +610,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct mdp5_kms *mdp5_kms = get_kms(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + uint32_t lm = mdp5_crtc->mixer->lm; uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); uint32_t roi_w; uint32_t roi_h; @@ -622,10 +626,10 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) get_roi(crtc, &roi_w, &roi_h); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm), + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm), MDP5_LM_CURSOR_START_XY_Y_START(y) | MDP5_LM_CURSOR_START_XY_X_START(x)); spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); @@ -709,7 +713,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, msecs_to_jiffies(50)); if (ret == 0) - dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm); + dev_warn(dev->dev, "pp done time out, lm=%d\n", + mdp5_crtc->mixer->lm); } static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) @@ -749,7 +754,8 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); - int lm = mdp5_crtc_get_lm(crtc); + struct mdp5_hw_mixer *mixer = mdp5_crtc->mixer; + uint32_t lm = mixer->lm; /* now that we know what irq's we want: */ mdp5_crtc->err.irqmask = intf2err(intf->num); @@ -769,7 +775,7 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, mdp_irq_update(&mdp5_kms->base); mdp5_crtc->ctl = ctl; - mdp5_ctl_set_pipeline(ctl, intf, lm); + mdp5_ctl_set_pipeline(ctl, intf, mixer); } struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) @@ -779,10 +785,11 @@ struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) return mdp5_crtc->ctl; } -int mdp5_crtc_get_lm(struct drm_crtc *crtc) +struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm; + return WARN_ON(!crtc) || WARN_ON(!mdp5_crtc->mixer) ? + ERR_PTR(-EINVAL) : mdp5_crtc->mixer; } void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) @@ -802,6 +809,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, { struct drm_crtc *crtc = NULL; struct mdp5_crtc *mdp5_crtc; + struct mdp5_kms *mdp5_kms; mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); if (!mdp5_crtc) @@ -810,7 +818,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, crtc = &mdp5_crtc->base; mdp5_crtc->id = id; - mdp5_crtc->lm = GET_LM_ID(id); spin_lock_init(&mdp5_crtc->lm_lock); spin_lock_init(&mdp5_crtc->cursor.lock); @@ -832,5 +839,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); plane->crtc = crtc; + mdp5_kms = get_kms(crtc); + mdp5_crtc->mixer = mdp5_kms->hwmixers[id]; + return crtc; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index 8b93f7e13200..fa4f27a1a551 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -43,7 +43,7 @@ struct mdp5_ctl { struct mdp5_ctl_manager *ctlm; u32 id; - int lm; + struct mdp5_hw_mixer *mixer; /* CTL status bitmask */ u32 status; @@ -174,8 +174,8 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf) spin_unlock_irqrestore(&ctl->hw_lock, flags); } -int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, - struct mdp5_interface *intf, int lm) +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, + struct mdp5_hw_mixer *mixer) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); @@ -187,11 +187,11 @@ int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, return -EINVAL; } - ctl->lm = lm; + ctl->mixer = mixer; memcpy(&ctl->pipeline.intf, intf, sizeof(*intf)); - ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) | + ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(mixer->lm) | mdp_ctl_flush_mask_encoder(intf); /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ @@ -241,7 +241,7 @@ static void refill_start_mask(struct mdp5_ctl *ctl) struct op_mode *pipeline = &ctl->pipeline; struct mdp5_interface *intf = &ctl->pipeline.intf; - pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm); + pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->mixer->lm); /* * Writeback encoder needs to program & flush @@ -285,24 +285,24 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable) struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; unsigned long flags; u32 blend_cfg; - int lm = ctl->lm; + struct mdp5_hw_mixer *mixer = ctl->mixer; - if (unlikely(WARN_ON(lm < 0))) { - dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", - ctl->id, lm); + if (unlikely(WARN_ON(!mixer))) { + dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM", + ctl->id); return -EINVAL; } spin_lock_irqsave(&ctl->hw_lock, flags); - blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm)); + blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm)); if (enable) blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; else blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); ctl->cursor_on = enable; spin_unlock_irqrestore(&ctl->hw_lock, flags); @@ -358,6 +358,7 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, u32 ctl_blend_op_flags) { + struct mdp5_hw_mixer *mixer = ctl->mixer; unsigned long flags; u32 blend_cfg = 0, blend_ext_cfg = 0; int i, start_stage; @@ -378,13 +379,14 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, if (ctl->cursor_on) blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg); - ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm), + blend_ext_cfg); spin_unlock_irqrestore(&ctl->hw_lock, flags); - ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm); + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm); - DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm, + DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm, blend_cfg, blend_ext_cfg); return 0; @@ -452,7 +454,7 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) /* for some targets, cursor bit is the same as LM bit */ if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) - sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm); + sw_mask |= mdp_ctl_flush_mask_lm(ctl->mixer->lm); return sw_mask; } @@ -620,7 +622,7 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, found: ctl = &ctl_mgr->ctls[c]; ctl->pipeline.intf.num = intf_num; - ctl->lm = -1; + ctl->mixer = NULL; ctl->status |= CTL_STAT_BUSY; ctl->pending_ctl_trigger = 0; DBG("CTL %d allocated", ctl->id); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index fda00d33e4db..882c9d2be365 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -38,7 +38,7 @@ int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); struct mdp5_interface; int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, - int lm); + struct mdp5_hw_mixer *lm); int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 80fa482ae8ed..68d048f040f0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -215,7 +215,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); struct mdp5_ctl *ctl = mdp5_encoder->ctl; - int lm = mdp5_crtc_get_lm(encoder->crtc); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); struct mdp5_interface *intf = &mdp5_encoder->intf; int intfn = mdp5_encoder->intf.num; unsigned long flags; @@ -238,7 +238,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) * the settings changes for the new modeset (like new * scanout buffer) don't latch properly.. */ - mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf)); + mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer->lm, intf)); bs_set(mdp5_encoder, 0); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 23141b70d1c7..e8174d4847b0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -428,7 +428,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) * the MDP5 interfaces) than the number of layer mixers present in HW, * but let's be safe here anyway */ - num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count); + num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers); /* * Construct planes equaling the number of hw pipes, and CRTCs for the @@ -851,6 +851,10 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms) return ret; } + /* Don't create LMs connected to WB for now */ + if (!mixer) + continue; + mixer->idx = mdp5_kms->num_hwmixers; mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 9abb2274379f..13d9ccf37811 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -23,9 +23,9 @@ #include "mdp/mdp_kms.h" #include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ #include "mdp5.xml.h" -#include "mdp5_ctl.h" #include "mdp5_pipe.h" #include "mdp5_mixer.h" +#include "mdp5_ctl.h" #include "mdp5_smp.h" struct mdp5_state; @@ -253,7 +253,7 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); -int mdp5_crtc_get_lm(struct drm_crtc *crtc); +struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc); void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, struct mdp5_interface *intf, struct mdp5_ctl *ctl); void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c index dd38f0bc2494..032dc0a9638f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c @@ -29,6 +29,10 @@ struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm) { struct mdp5_hw_mixer *mixer; + /* ignore WB bound mixers for now */ + if (lm->caps & MDP_LM_CAP_WB) + return NULL; + mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); if (!mixer) return ERR_PTR(-ENOMEM);