static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
-const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(void *cfg_hnd)
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
{
- struct mdp5_cfg_handler *cfg_handler = cfg_hnd;
-
return cfg_handler->config.hw;
}
-struct mdp5_cfg *mdp5_cfg_get_config(void *cfg_hnd)
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
{
- struct mdp5_cfg_handler *cfg_handler = cfg_hnd;
-
return &cfg_handler->config;
}
-int mdp5_cfg_get_hw_rev(void *cfg_hnd)
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
{
- struct mdp5_cfg_handler *cfg_handler = cfg_hnd;
-
return cfg_handler->revision;
}
-void mdp5_cfg_destroy(void *cfg_hnd)
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
{
- struct mdp5_cfg_handler *cfg_handler = cfg_hnd;
-
kfree(cfg_handler);
}
-void *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor)
{
struct drm_device *dev = mdp5_kms->dev;
};
struct mdp5_kms;
+struct mdp5_cfg_handler;
-const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(void *cfg_hnd);
-struct mdp5_cfg *mdp5_cfg_get_config(void *cfg_hnd);
-int mdp5_cfg_get_hw_rev(void *cfg_hnd);
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
-void *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, uint32_t major, uint32_t minor);
-void mdp5_cfg_destroy(void *cfg_hnd);
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+ uint32_t major, uint32_t minor);
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
#endif /* __MDP5_CFG_H__ */
spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
/* CTL used for this CRTC: */
- void *ctl;
+ struct mdp5_ctl *ctl;
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
unsigned long flags;
#define blender(stage) ((stage) - STAGE_BASE)
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
/* request a free CTL, if none is already allocated for this CRTC */
if (!mdp5_crtc->ctl) {
- mdp5_crtc->ctl = mdp5_ctl_request(mdp5_kms->ctl_priv, crtc);
+ mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
if (!mdp5_crtc->ctl)
return -EBUSY;
}
enum mdp_mixer_stage_id stage = STAGE_BASE;
int max_nb_planes;
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
max_nb_planes = hw_cfg->lm.nb_stages;
if (count_planes(crtc) >= max_nb_planes) {
u32 flush_mask;
bool cursor_on;
- void *crtc;
+
+ struct drm_crtc *crtc;
};
struct mdp5_ctl_manager {
}
-int mdp5_ctl_set_intf(void *c, enum mdp5_intf intf)
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
{
- struct mdp5_ctl *ctl = c;
unsigned long flags;
static const enum mdp5_intfnum intfnum[] = {
INTF0, INTF1, INTF2, INTF3,
return 0;
}
-int mdp5_ctl_set_cursor(void *c, bool enable)
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
- struct mdp5_ctl *ctl = c;
unsigned long flags;
u32 blend_cfg;
int lm;
}
-int mdp5_ctl_blend(void *c, u32 lm, u32 blend_cfg)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
- struct mdp5_ctl *ctl = c;
unsigned long flags;
if (ctl->cursor_on)
return 0;
}
-int mdp5_ctl_commit(void *c, u32 flush_mask)
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
- struct mdp5_ctl *ctl = c;
unsigned long flags;
if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
return 0;
}
-u32 mdp5_ctl_get_flush(void *c)
+u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
{
- struct mdp5_ctl *ctl = c;
-
return ctl->flush_mask;
}
-void mdp5_ctl_release(void *c)
+void mdp5_ctl_release(struct mdp5_ctl *ctl)
{
struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
- struct mdp5_ctl *ctl = c;
unsigned long flags;
if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
*
* @return first free CTL
*/
-void *mdp5_ctl_request(void *ctlm, void *crtc)
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
+ struct drm_crtc *crtc)
{
- struct mdp5_ctl_manager *ctl_mgr = ctlm;
struct mdp5_ctl *ctl = NULL;
unsigned long flags;
int c;
return ctl;
}
-void mdp5_ctlm_hw_reset(void *ctlm)
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
{
- struct mdp5_ctl_manager *ctl_mgr = ctlm;
unsigned long flags;
int c;
}
}
-void mdp5_ctlm_destroy(void *ctlm)
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
{
- struct mdp5_ctl_manager *ctl_mgr = ctlm;
-
kfree(ctl_mgr);
}
-void *mdp5_ctlm_init(struct drm_device *dev, void __iomem *mmio_base,
- const struct mdp5_cfg_hw *hw_cfg)
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
{
struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
* mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
* which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
*/
-void *mdp5_ctlm_init(struct drm_device *dev, void __iomem *mmio_base,
- const struct mdp5_cfg_hw *hw_cfg);
-void mdp5_ctlm_hw_reset(void *ctlm);
-void mdp5_ctlm_destroy(void *ctlm);
+struct mdp5_ctl_manager;
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
/*
* CTL prototypes:
* mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
* which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
*/
-void *mdp5_ctl_request(void *ctlm, void *crtc);
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
-int mdp5_ctl_set_intf(void *ctl, enum mdp5_intf intf);
+int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
-int mdp5_ctl_set_cursor(void *ctl, bool enable);
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
/* @blend_cfg: see LM blender config definition below */
-int mdp5_ctl_blend(void *ctl, u32 lm, u32 blend_cfg);
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
/* @flush_mask: see CTL flush masks definitions below */
-int mdp5_ctl_commit(void *ctl, u32 flush_mask);
-u32 mdp5_ctl_get_flush(void *ctl);
+int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
+u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
-void mdp5_ctl_release(void *ctl);
+void mdp5_ctl_release(struct mdp5_ctl *ctl);
/*
* blend_cfg (LM blender config):
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
- mdp5_ctlm_hw_reset(mdp5_kms->ctl_priv);
+ mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
pm_runtime_put_sync(dev->dev);
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
- void *smp = mdp5_kms->smp_priv;
- void *cfg = mdp5_kms->cfg_priv;
- void *ctl = mdp5_kms->ctl_priv;
mdp5_irq_domain_fini(mdp5_kms);
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
}
- if (ctl)
- mdp5_ctlm_destroy(ctl);
- if (smp)
- mdp5_smp_destroy(smp);
- if (cfg)
- mdp5_cfg_destroy(cfg);
+
+ if (mdp5_kms->ctlm)
+ mdp5_ctlm_destroy(mdp5_kms->ctlm);
+ if (mdp5_kms->smp)
+ mdp5_smp_destroy(mdp5_kms->smp);
+ if (mdp5_kms->cfg)
+ mdp5_cfg_destroy(mdp5_kms->cfg);
kfree(mdp5_kms);
}
const struct mdp5_cfg_hw *hw_cfg;
int i, ret;
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
/* register our interrupt-controller for hdmi/eDP/dsi/etc
* to use for irqs routed through mdp:
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
uint32_t major, minor;
- void *priv;
int i, ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
clk_set_rate(mdp5_kms->src_clk, 200000000);
read_hw_revision(mdp5_kms, &major, &minor);
- priv = mdp5_cfg_init(mdp5_kms, major, minor);
- if (IS_ERR(priv)) {
- ret = PTR_ERR(priv);
+
+ mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
+ if (IS_ERR(mdp5_kms->cfg)) {
+ ret = PTR_ERR(mdp5_kms->cfg);
+ mdp5_kms->cfg = NULL;
goto fail;
}
- mdp5_kms->cfg_priv = priv;
- config = mdp5_cfg_get_config(mdp5_kms->cfg_priv);
+
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
/* TODO: compute core clock rate at runtime */
clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
- priv = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
- if (IS_ERR(priv)) {
- ret = PTR_ERR(priv);
+ mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
+ if (IS_ERR(mdp5_kms->smp)) {
+ ret = PTR_ERR(mdp5_kms->smp);
+ mdp5_kms->smp = NULL;
goto fail;
}
- mdp5_kms->smp_priv = priv;
- priv = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
- if (IS_ERR(priv)) {
- ret = PTR_ERR(priv);
+ mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+ if (IS_ERR(mdp5_kms->ctlm)) {
+ ret = PTR_ERR(mdp5_kms->ctlm);
+ mdp5_kms->ctlm = NULL;
goto fail;
}
- mdp5_kms->ctl_priv = priv;
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
struct drm_device *dev;
- void *cfg_priv;
+ struct mdp5_cfg_handler *cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
struct msm_mmu *mmu;
- void *smp_priv;
- void *ctl_priv;
+ struct mdp5_smp *smp;
+ struct mdp5_ctl_manager *ctlm;
/* io/register spaces: */
void __iomem *mmio, *vbif;
if (mdp5_kms) {
/* Release the memory we requested earlier from the SMP: */
- mdp5_smp_release(mdp5_kms->smp_priv, pipe);
+ mdp5_smp_release(mdp5_kms->smp, pipe);
}
/* TODO detaching now will cause us not to get the last
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
/* Request some memory from the SMP: */
- ret = mdp5_smp_request(mdp5_kms->smp_priv,
+ ret = mdp5_smp_request(mdp5_kms->smp,
mdp5_plane->pipe, fb->pixel_format, src_w);
if (ret)
return ret;
* would move into atomic->check_plane_state(), while updating the
* hw would remain here:
*/
- mdp5_smp_configure(mdp5_kms->smp_priv, pipe);
+ mdp5_smp_configure(mdp5_kms->smp, pipe);
if (src_w != crtc_w) {
config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
- mdp5_smp_commit(mdp5_kms->smp_priv, pipe);
+ mdp5_smp_commit(mdp5_kms->smp, pipe);
}
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
int reserved;
unsigned long flags;
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
reserved = hw_cfg->smp.reserved[cid];
spin_lock_irqsave(&smp->state_lock, flags);
* decimated width. Ie. SMP buffering sits downstream of decimation (which
* presumably happens during the dma from scanout buffer).
*/
-int mdp5_smp_request(void *handler, enum mdp5_pipe pipe, u32 fmt, u32 width)
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
{
- struct mdp5_smp *smp = handler;
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct drm_device *dev = mdp5_kms->dev;
- int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg_priv);
+ int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines, nblks, ret;
nplanes = drm_format_num_planes(fmt);
}
/* Release SMP blocks for all clients of the pipe */
-void mdp5_smp_release(void *handler, enum mdp5_pipe pipe)
+void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- struct mdp5_smp *smp = handler;
int i, nblks;
for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
}
/* step #2: configure hw for union(pending, inuse): */
-void mdp5_smp_configure(void *handler, enum mdp5_pipe pipe)
+void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- struct mdp5_smp *smp = handler;
int cnt = smp->blk_cnt;
mdp5_smp_state_t assigned;
int i;
}
/* step #3: after vblank, copy pending -> inuse: */
-void mdp5_smp_commit(void *handler, enum mdp5_pipe pipe)
+void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
{
- struct mdp5_smp *smp = handler;
int cnt = smp->blk_cnt;
mdp5_smp_state_t released;
int i;
}
}
-void mdp5_smp_destroy(void *handler)
+void mdp5_smp_destroy(struct mdp5_smp *smp)
{
- struct mdp5_smp *smp = handler;
-
kfree(smp);
}
-void *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
+struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
{
struct mdp5_smp *smp = NULL;
int ret;
};
struct mdp5_kms;
+struct mdp5_smp;
/*
* SMP module prototypes:
* which is then used to call the other mdp5_smp_*(handler, ...) functions.
*/
-void *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
-void mdp5_smp_destroy(void *handler);
+struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
+void mdp5_smp_destroy(struct mdp5_smp *smp);
-int mdp5_smp_request(void *handler, enum mdp5_pipe pipe, u32 fmt, u32 width);
-void mdp5_smp_configure(void *handler, enum mdp5_pipe pipe);
-void mdp5_smp_commit(void *handler, enum mdp5_pipe pipe);
-void mdp5_smp_release(void *handler, enum mdp5_pipe pipe);
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
+void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
+void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
#endif /* __MDP5_SMP_H__ */