struct device *dev = encoder->dev->dev;
u32 total_lines_x100, vclks_line, cfg;
long vsync_clk_speed;
- int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = GET_PING_PONG_ID(mixer->lm);
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
dev_err(dev, "vsync_clk is not initialized\n");
static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = GET_PING_PONG_ID(mixer->lm);
int ret;
ret = clk_set_rate(mdp5_kms->vsync_clk,
static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = GET_PING_PONG_ID(mixer->lm);
mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
clk_disable_unprepare(mdp5_kms->vsync_clk);
int id;
bool enabled;
- /* layer mixer used for this CRTC (+ its lock): */
-#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
- int lm;
- spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
+ struct mdp5_hw_mixer *mixer;
+ spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
/* CTL used for this CRTC: */
struct mdp5_ctl *ctl;
static u32 crtc_flush_all(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_hw_mixer *mixer;
struct drm_plane *plane;
uint32_t flush_mask = 0;
flush_mask |= mdp5_plane_get_flush(plane);
}
- flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
+ mixer = mdp5_crtc->mixer;
+ flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
return crtc_flush(crtc, flush_mask);
}
const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
const struct mdp_format *format;
- uint32_t lm = mdp5_crtc->lm;
+ struct mdp5_hw_mixer *mixer = mdp5_crtc->mixer;
+ uint32_t lm = mixer->lm;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE };
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc->mixer;
+ uint32_t lm = mixer->lm;
unsigned long flags;
struct drm_display_mode *mode;
mode->type, mode->flags);
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
if (ret)
return -EINVAL;
- lm = mdp5_crtc->lm;
+ lm = mdp5_crtc->mixer->lm;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
{
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ uint32_t lm = mdp5_crtc->mixer->lm;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w;
uint32_t roi_h;
get_roi(crtc, &roi_w, &roi_h);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
msecs_to_jiffies(50));
if (ret == 0)
- dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
+ dev_warn(dev->dev, "pp done time out, lm=%d\n",
+ mdp5_crtc->mixer->lm);
}
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- int lm = mdp5_crtc_get_lm(crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc->mixer;
+ uint32_t lm = mixer->lm;
/* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf->num);
mdp_irq_update(&mdp5_kms->base);
mdp5_crtc->ctl = ctl;
- mdp5_ctl_set_pipeline(ctl, intf, lm);
+ mdp5_ctl_set_pipeline(ctl, intf, mixer);
}
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
return mdp5_crtc->ctl;
}
-int mdp5_crtc_get_lm(struct drm_crtc *crtc)
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
+ return WARN_ON(!crtc) || WARN_ON(!mdp5_crtc->mixer) ?
+ ERR_PTR(-EINVAL) : mdp5_crtc->mixer;
}
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc;
+ struct mdp5_kms *mdp5_kms;
mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
if (!mdp5_crtc)
crtc = &mdp5_crtc->base;
mdp5_crtc->id = id;
- mdp5_crtc->lm = GET_LM_ID(id);
spin_lock_init(&mdp5_crtc->lm_lock);
spin_lock_init(&mdp5_crtc->cursor.lock);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
plane->crtc = crtc;
+ mdp5_kms = get_kms(crtc);
+ mdp5_crtc->mixer = mdp5_kms->hwmixers[id];
+
return crtc;
}
struct mdp5_ctl_manager *ctlm;
u32 id;
- int lm;
+ struct mdp5_hw_mixer *mixer;
/* CTL status bitmask */
u32 status;
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
- struct mdp5_interface *intf, int lm)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
+ struct mdp5_hw_mixer *mixer)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
return -EINVAL;
}
- ctl->lm = lm;
+ ctl->mixer = mixer;
memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
- ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
+ ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
mdp_ctl_flush_mask_encoder(intf);
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
struct op_mode *pipeline = &ctl->pipeline;
struct mdp5_interface *intf = &ctl->pipeline.intf;
- pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
+ pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->mixer->lm);
/*
* Writeback encoder needs to program & flush
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 blend_cfg;
- int lm = ctl->lm;
+ struct mdp5_hw_mixer *mixer = ctl->mixer;
- if (unlikely(WARN_ON(lm < 0))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
- ctl->id, lm);
+ if (unlikely(WARN_ON(!mixer))) {
+ dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+ ctl->id);
return -EINVAL;
}
spin_lock_irqsave(&ctl->hw_lock, flags);
- blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
+ blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
if (enable)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
else
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
ctl->cursor_on = enable;
spin_unlock_irqrestore(&ctl->hw_lock, flags);
int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
u32 ctl_blend_op_flags)
{
+ struct mdp5_hw_mixer *mixer = ctl->mixer;
unsigned long flags;
u32 blend_cfg = 0, blend_ext_cfg = 0;
int i, start_stage;
if (ctl->cursor_on)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
- ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
+ blend_ext_cfg);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
- ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
- DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
blend_cfg, blend_ext_cfg);
return 0;
/* for some targets, cursor bit is the same as LM bit */
if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
- sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
+ sw_mask |= mdp_ctl_flush_mask_lm(ctl->mixer->lm);
return sw_mask;
}
found:
ctl = &ctl_mgr->ctls[c];
ctl->pipeline.intf.num = intf_num;
- ctl->lm = -1;
+ ctl->mixer = NULL;
ctl->status |= CTL_STAT_BUSY;
ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);
struct mdp5_interface;
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
- int lm);
+ struct mdp5_hw_mixer *lm);
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
- int lm = mdp5_crtc_get_lm(encoder->crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
struct mdp5_interface *intf = &mdp5_encoder->intf;
int intfn = mdp5_encoder->intf.num;
unsigned long flags;
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
- mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf));
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer->lm, intf));
bs_set(mdp5_encoder, 0);
* the MDP5 interfaces) than the number of layer mixers present in HW,
* but let's be safe here anyway
*/
- num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count);
+ num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
/*
* Construct planes equaling the number of hw pipes, and CRTCs for the
return ret;
}
+ /* Don't create LMs connected to WB for now */
+ if (!mixer)
+ continue;
+
mixer->idx = mdp5_kms->num_hwmixers;
mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
}
#include "mdp/mdp_kms.h"
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
-#include "mdp5_ctl.h"
#include "mdp5_pipe.h"
#include "mdp5_mixer.h"
+#include "mdp5_ctl.h"
#include "mdp5_smp.h"
struct mdp5_state;
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
-int mdp5_crtc_get_lm(struct drm_crtc *crtc);
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc);
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
struct mdp5_interface *intf, struct mdp5_ctl *ctl);
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
{
struct mdp5_hw_mixer *mixer;
+ /* ignore WB bound mixers for now */
+ if (lm->caps & MDP_LM_CAP_WB)
+ return NULL;
+
mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return ERR_PTR(-ENOMEM);