struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = mdp5_plane->pipe;
- int i;
DBG("%s: disable", mdp5_plane->name);
- /* update our SMP request to zero (release all our blks): */
- for (i = 0; i < pipe2nclients(pipe); i++)
- mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0);
+ if (mdp5_kms) {
+ /* Release the memory we requested earlier from the SMP: */
+ mdp5_smp_release(mdp5_kms->smp_priv, pipe);
+ }
/* TODO detaching now will cause us not to get the last
* vblank and mdp5_smp_commit().. so other planes will
plane->fb = fb;
}
-/* NOTE: looks like if horizontal decimation is used (if we supported that)
- * then the width used to calculate SMP block requirements is the post-
- * decimated width. Ie. SMP buffering sits downstream of decimation (which
- * presumably happens during the dma from scanout buffer).
- */
-static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
- uint32_t nplanes, uint32_t width)
-{
- struct drm_device *dev = plane->dev;
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
- int i, hsub, nlines, nblks, ret;
-
- hsub = drm_format_horz_chroma_subsampling(format);
-
- /* different if BWC (compressed framebuffer?) enabled: */
- nlines = 2;
-
- for (i = 0, nblks = 0; i < nplanes; i++) {
- int n, fetch_stride, cpp;
-
- cpp = drm_format_plane_cpp(format, i);
- fetch_stride = width * cpp / (i ? hsub : 1);
-
- n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE);
-
- /* for hw rev v1.00 */
- if (mdp5_kms->rev == 0)
- n = roundup_pow_of_two(n);
-
- DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n);
- ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
- if (ret) {
- dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
- n, ret);
- return ret;
- }
-
- nblks += n;
- }
-
- /* in success case, return total # of blocks allocated: */
- return nblks;
-}
-
-static void set_fifo_thresholds(struct drm_plane *plane, int nblks)
-{
- struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- enum mdp5_pipe pipe = mdp5_plane->pipe;
- uint32_t val;
-
- /* 1/4 of SMP pool that is being fetched */
- val = (nblks * SMP_ENTRIES_PER_BLK) / 4;
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
-
-}
-
int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
uint32_t nplanes, config = 0;
uint32_t phasex_step = 0, phasey_step = 0;
uint32_t hdecm = 0, vdecm = 0;
- int i, nblks;
+ int ret;
nplanes = drm_format_num_planes(fb->pixel_format);
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
- /*
- * Calculate and request required # of smp blocks:
- */
- nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w);
- if (nblks < 0)
- return nblks;
+ /* Request some memory from the SMP: */
+ ret = mdp5_smp_request(mdp5_kms->smp_priv,
+ mdp5_plane->pipe, fb->pixel_format, src_w);
+ if (ret)
+ return ret;
/*
* Currently we update the hw for allocations/requests immediately,
* would move into atomic->check_plane_state(), while updating the
* hw would remain here:
*/
- for (i = 0; i < pipe2nclients(pipe); i++)
- mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
+ mdp5_smp_configure(mdp5_kms->smp_priv, pipe);
if (src_w != crtc_w) {
config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
- set_fifo_thresholds(plane, nblks);
-
/* TODO detach from old crtc (if we had more than one) */
mdp5_crtc_attach(crtc, plane);
{
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe;
- int i;
- for (i = 0; i < pipe2nclients(pipe); i++)
- mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i));
+ mdp5_smp_commit(mdp5_kms->smp_priv, pipe);
}
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Based on the size of the attached scanout buffer, a certain # of
* blocks must be allocated to that client out of the shared pool.
*
- * For each block, it can be either free, or pending/in-use by a
- * client. The updates happen in three steps:
+ * In some hw, some blocks are statically allocated for certain pipes
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
+ *
+ * For each block that can be dynamically allocated, it can be either
+ * free, or pending/in-use by a client. The updates happen in three steps:
*
* 1) mdp5_smp_request():
* When plane scanout is setup, calculate required number of
* inuse and pending state of all clients..
*/
-static DEFINE_SPINLOCK(smp_lock);
+struct mdp5_smp {
+ struct drm_device *dev;
+
+ int blk_cnt;
+ int blk_size;
+
+ spinlock_t state_lock;
+ mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
+
+ struct mdp5_client_smp_state client_state[CID_MAX];
+};
+static inline
+struct mdp5_kms *get_kms(struct mdp5_smp *smp)
+{
+ struct msm_drm_private *priv = smp->dev->dev_private;
+
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
+{
+ WARN_ON(plane >= pipe2nclients(pipe));
+ switch (pipe) {
+ case SSPP_VIG0: return CID_VIG0_Y + plane;
+ case SSPP_VIG1: return CID_VIG1_Y + plane;
+ case SSPP_VIG2: return CID_VIG2_Y + plane;
+ case SSPP_RGB0: return CID_RGB0;
+ case SSPP_RGB1: return CID_RGB1;
+ case SSPP_RGB2: return CID_RGB2;
+ case SSPP_DMA0: return CID_DMA0_Y + plane;
+ case SSPP_DMA1: return CID_DMA1_Y + plane;
+ case SSPP_VIG3: return CID_VIG3_Y + plane;
+ case SSPP_RGB3: return CID_RGB3;
+ default: return CID_UNUSED;
+ }
+}
/* step #1: update # of blocks pending for the client: */
-int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
+static int smp_request_block(struct mdp5_smp *smp,
enum mdp5_client_id cid, int nblks)
{
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
- int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt;
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+ int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
+ int reserved = mdp5_kms->hw_cfg->smp.reserved[cid];
unsigned long flags;
- spin_lock_irqsave(&smp_lock, flags);
+ spin_lock_irqsave(&smp->state_lock, flags);
+
+ nblks -= reserved;
+ if (reserved)
+ DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
- avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt);
+ avail = cnt - bitmap_weight(smp->state, cnt);
if (nblks > avail) {
+ dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ nblks, avail);
ret = -ENOSPC;
goto fail;
}
if (nblks > cur_nblks) {
/* grow the existing pending reservation: */
for (i = cur_nblks; i < nblks; i++) {
- int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt);
+ int blk = find_first_zero_bit(smp->state, cnt);
set_bit(blk, ps->pending);
- set_bit(blk, mdp5_kms->smp_state);
+ set_bit(blk, smp->state);
}
} else {
/* shrink the existing pending reservation: */
}
fail:
- spin_unlock_irqrestore(&smp_lock, flags);
+ spin_unlock_irqrestore(&smp->state_lock, flags);
+ return 0;
+}
+
+static void set_fifo_thresholds(struct mdp5_smp *smp,
+ enum mdp5_pipe pipe, int nblks)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
+ u32 val;
+
+ /* 1/4 of SMP pool that is being fetched */
+ val = (nblks * smp_entries_per_blk) / 4;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+}
+
+/*
+ * NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width. Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+int mdp5_smp_request(void *handler, enum mdp5_pipe pipe, u32 fmt, u32 width)
+{
+ struct mdp5_smp *smp = handler;
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, hsub, nplanes, nlines, nblks, ret;
+
+ nplanes = drm_format_num_planes(fmt);
+ hsub = drm_format_horz_chroma_subsampling(fmt);
+
+ /* different if BWC (compressed framebuffer?) enabled: */
+ nlines = 2;
+
+ for (i = 0, nblks = 0; i < nplanes; i++) {
+ int n, fetch_stride, cpp;
+
+ cpp = drm_format_plane_cpp(fmt, i);
+ fetch_stride = width * cpp / (i ? hsub : 1);
+
+ n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
+
+ /* for hw rev v1.00 */
+ if (mdp5_kms->rev == 0)
+ n = roundup_pow_of_two(n);
+
+ DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
+ ret = smp_request_block(smp, pipe2client(pipe, i), n);
+ if (ret) {
+ dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+ n, ret);
+ return ret;
+ }
+
+ nblks += n;
+ }
+
+ set_fifo_thresholds(smp, pipe, nblks);
+
return 0;
}
-static void update_smp_state(struct mdp5_kms *mdp5_kms,
+/* Release SMP blocks for all clients of the pipe */
+void mdp5_smp_release(void *handler, enum mdp5_pipe pipe)
+{
+ struct mdp5_smp *smp = handler;
+ int i, nblks;
+
+ for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
+ smp_request_block(smp, pipe2client(pipe, i), 0);
+ set_fifo_thresholds(smp, pipe, 0);
+}
+
+static void update_smp_state(struct mdp5_smp *smp,
enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
{
- int cnt = mdp5_kms->smp_blk_cnt;
- uint32_t blk, val;
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int cnt = smp->blk_cnt;
+ u32 blk, val;
for_each_set_bit(blk, *assigned, cnt) {
int idx = blk / 3;
}
/* step #2: configure hw for union(pending, inuse): */
-void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+void mdp5_smp_configure(void *handler, enum mdp5_pipe pipe)
{
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
- int cnt = mdp5_kms->smp_blk_cnt;
+ struct mdp5_smp *smp = handler;
+ int cnt = smp->blk_cnt;
mdp5_smp_state_t assigned;
+ int i;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ enum mdp5_client_id cid = pipe2client(pipe, i);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
- bitmap_or(assigned, ps->inuse, ps->pending, cnt);
- update_smp_state(mdp5_kms, cid, &assigned);
+ bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+ update_smp_state(smp, cid, &assigned);
+ }
}
/* step #3: after vblank, copy pending -> inuse: */
-void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid)
+void mdp5_smp_commit(void *handler, enum mdp5_pipe pipe)
{
- struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid];
- int cnt = mdp5_kms->smp_blk_cnt;
+ struct mdp5_smp *smp = handler;
+ int cnt = smp->blk_cnt;
mdp5_smp_state_t released;
+ int i;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ enum mdp5_client_id cid = pipe2client(pipe, i);
+ struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+ /*
+ * Figure out if there are any blocks we where previously
+ * using, which can be released and made available to other
+ * clients:
+ */
+ if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp->state_lock, flags);
+ /* clear released blocks: */
+ bitmap_andnot(smp->state, smp->state, released, cnt);
+ spin_unlock_irqrestore(&smp->state_lock, flags);
+
+ update_smp_state(smp, CID_UNUSED, &released);
+ }
- /*
- * Figure out if there are any blocks we where previously
- * using, which can be released and made available to other
- * clients:
- */
- if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
- unsigned long flags;
-
- spin_lock_irqsave(&smp_lock, flags);
- /* clear released blocks: */
- bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
- released, cnt);
- spin_unlock_irqrestore(&smp_lock, flags);
-
- update_smp_state(mdp5_kms, CID_UNUSED, &released);
+ bitmap_copy(ps->inuse, ps->pending, cnt);
}
+}
+
+void mdp5_smp_destroy(void *handler)
+{
+ struct mdp5_smp *smp = handler;
+
+ kfree(smp);
+}
+
+void *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
+{
+ struct mdp5_smp *smp = NULL;
+ int ret;
+
+ smp = kzalloc(sizeof(*smp), GFP_KERNEL);
+ if (unlikely(!smp)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ smp->dev = dev;
+ smp->blk_cnt = cfg->mmb_count;
+ smp->blk_size = cfg->mmb_size;
+
+ /* statically tied MMBs cannot be re-allocated: */
+ bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
+ spin_lock_init(&smp->state_lock);
+
+ return smp;
+fail:
+ if (smp)
+ mdp5_smp_destroy(smp);
- bitmap_copy(ps->inuse, ps->pending, cnt);
+ return ERR_PTR(ret);
}