struct s5p_mfc_raw_info *raw;
unsigned int index = vb->index;
struct s5p_mfc_buf *buf = vb_to_mfc_buf(vb);
- struct dma_buf *dmabuf[MFC_MAX_PLANES];
+ struct dma_buf *bufcon_dmabuf[MFC_MAX_PLANES];
int i, mem_get_count = 0;
size_t buf_size;
}
for (i = 0; i < ctx->src_fmt->mem_planes; i++) {
- dmabuf[i] = dma_buf_get(vb->planes[i].m.fd);
- if (IS_ERR(dmabuf[i])) {
+ bufcon_dmabuf[i] = dma_buf_get(vb->planes[i].m.fd);
+ if (IS_ERR(bufcon_dmabuf[i])) {
mfc_err_ctx("failed to get bufcon dmabuf\n");
goto err_mem_put;
}
mem_get_count++;
- buf->num_bufs_in_vb = s5p_mfc_bufcon_get_buf_count(dmabuf[i]);
- mfc_debug(3, "bufcon count:%d\n", buf->num_bufs_in_vb);
- if (buf->num_bufs_in_vb == 0) {
+ buf->num_bufs_in_batch = s5p_mfc_bufcon_get_buf_count(bufcon_dmabuf[i]);
+ mfc_debug(3, "bufcon count:%d\n", buf->num_bufs_in_batch);
+ if (buf->num_bufs_in_batch == 0) {
mfc_err_ctx("bufcon count couldn't be zero\n");
goto err_mem_put;
}
- if (buf->num_bufs_in_vb < 0)
- buf->num_bufs_in_vb = 0;
+ if (buf->num_bufs_in_batch < 0)
+ buf->num_bufs_in_batch = 0;
- if (!ctx->batch_mode && buf->num_bufs_in_vb > 0) {
+ if (!ctx->batch_mode && buf->num_bufs_in_batch > 0) {
ctx->batch_mode = 1;
- mfc_debug(3, "buffer batch mode enabled\n");
+ mfc_debug(3, "buffer batch mode enable\n");
}
- if (buf->num_bufs_in_vb > 0) {
- int count = 0;
+ if (buf->num_bufs_in_batch > 0) {
+ if (s5p_mfc_bufcon_get_daddr(ctx, buf, bufcon_dmabuf[i], i)) {
+ mfc_err_ctx("failed to get daddr[%d] in buffer container\n", i);
+ goto err_mem_put;
+ }
ctx->framerate = buf->num_bufs_in_vb * ENC_DEFAULT_CAM_CAPTURE_FPS;
mfc_debug(3, "framerate: %ld\n", ctx->framerate);
- count = s5p_mfc_bufcon_get_daddr(ctx, buf, dmabuf[i], i);
- if (count != buf->num_bufs_in_vb) {
- mfc_err_ctx("invalid buffer count %d != num_bufs_in_vb %d\n",
- count, buf->num_bufs_in_vb);
- goto err_mem_put;
- }
-
- dma_buf_put(dmabuf[i]);
+ dma_buf_put(bufcon_dmabuf[i]);
} else {
dma_addr_t start_raw;
- dma_buf_put(dmabuf[i]);
+ dma_buf_put(bufcon_dmabuf[i]);
start_raw = s5p_mfc_mem_get_daddr_vb(vb, 0);
if (start_raw == 0) {
mfc_err_ctx("Plane mem not allocated\n");
err_mem_put:
for (i = 0; i < mem_get_count; i++)
- dma_buf_put(dmabuf[i]);
+ dma_buf_put(bufcon_dmabuf[i]);
return -ENOMEM;
}
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_raw_info *raw = &ctx->raw_buf;
- int i;
+ int i, j = 0;
+ u32 mask;
+
+ if (dmabuf_container_get_mask(bufcon_dmabuf, &mask)) {
+ mfc_err_ctx("it is not buffer container\n");
+ return -1;
+ } else {
+ mfc_debug(3, "bufcon mask info %#x\n", mask);
+ }
- for (i = 0; i < mfc_buf->num_bufs_in_vb; i++) {
- mfc_buf->dmabufs[i][plane] = dmabuf_container_get_buffer(bufcon_dmabuf, i);
+ for (i = 0; i < mfc_buf->num_bufs_in_batch; i++) {
+ if ((mask & (1 << i)) == 0) {
+ mfc_debug(3, "unmasked buf[%d]\n", i);
+ continue;
+ }
+
+ mfc_buf->dmabufs[j][plane] = dmabuf_container_get_buffer(bufcon_dmabuf, i);
if (IS_ERR(mfc_buf->dmabufs[i][plane])) {
mfc_err_ctx("Failed to get dma_buf (err %ld)",
PTR_ERR(mfc_buf->dmabufs[i][plane]));
goto err_get_daddr;
}
- mfc_buf->attachments[i][plane] = dma_buf_attach(mfc_buf->dmabufs[i][plane], dev->device);
+ mfc_buf->attachments[j][plane] = dma_buf_attach(mfc_buf->dmabufs[i][plane], dev->device);
if (IS_ERR(mfc_buf->attachments[i][plane])) {
mfc_err_ctx("Failed to get dma_buf_attach (err %ld)",
PTR_ERR(mfc_buf->attachments[i][plane]));
goto err_get_daddr;
}
- mfc_buf->addr[i][plane] = ion_iovmm_map(mfc_buf->attachments[i][plane], 0,
+ mfc_buf->addr[j][plane] = ion_iovmm_map(mfc_buf->attachments[i][plane], 0,
raw->plane_size[plane], DMA_BIDIRECTIONAL, 0);
if (IS_ERR_VALUE(mfc_buf->addr[i][plane])) {
mfc_err_ctx("Failed to allocate iova (err %pa)",
}
mfc_debug(4, "get batch buf addr[%d][%d]: 0x%08llx, size: %d\n",
- i, plane, mfc_buf->addr[i][plane], raw->plane_size[plane]);
+ j, plane, mfc_buf->addr[j][plane], raw->plane_size[plane]);
+ j++;
}
- return i;
+ mfc_buf->num_bufs_in_vb = j;
+ mfc_debug(3, "batch buffer has %d buffers\n", mfc_buf->num_bufs_in_vb);
+
+ return 0;
err_get_daddr:
s5p_mfc_bufcon_put_daddr(ctx, mfc_buf, plane);