amvdec_ports-objs += decoder/vdec_h264_if.o
amvdec_ports-objs += decoder/vdec_hevc_if.o
amvdec_ports-objs += decoder/vdec_vp9_if.o
-amvdec_ports-objs += decoder/h264_parse.o
-amvdec_ports-objs += decoder/h264_stream.o
+amvdec_ports-objs += decoder/vdec_mpeg12_if.o
+amvdec_ports-objs += decoder/vdec_mpeg4_if.o
+amvdec_ports-objs += decoder/vdec_mjpeg_if.o
+amvdec_ports-objs += decoder/aml_h264_parser.o
+amvdec_ports-objs += decoder/aml_hevc_parser.o
+amvdec_ports-objs += decoder/aml_vp9_parser.o
+amvdec_ports-objs += decoder/aml_mpeg12_parser.o
+amvdec_ports-objs += decoder/aml_mpeg4_parser.o
+amvdec_ports-objs += decoder/aml_mjpeg_parser.o
+amvdec_ports-objs += utils/golomb.o
+amvdec_ports-objs += utils/common.o
#define PTS_OUTSIDE (1)
#define SYNC_OUTSIDE (2)
#define USE_V4L_PORTS (0x80)
+#define SCATTER_MEM (0x100)
-#define DATA_DEBUG
+//#define DATA_DEBUG
static int def_4k_vstreambuf_sizeM =
(DEFAULT_VIDEO_BUFFER_SIZE_4K >> 20);
static void set_default_params(struct aml_vdec_adapt *vdec)
{
- unsigned long sync_mode = (PTS_OUTSIDE | SYNC_OUTSIDE | USE_V4L_PORTS);
+ ulong sync_mode = (PTS_OUTSIDE | SYNC_OUTSIDE | USE_V4L_PORTS);
+
+ sync_mode |= vdec->ctx->scatter_mem_enable ? SCATTER_MEM : 0;
vdec->dec_prop.param = (void *)sync_mode;
vdec->dec_prop.format = vdec->format;
vdec->dec_prop.width = 1920;
{
vdec->sys_info = &ada_ctx->dec_prop;
vdec->port = &ada_ctx->port;
- vdec->format = ada_ctx->dec_prop.format;
+ vdec->format = ada_ctx->video_type;
vdec->sys_info_store = ada_ctx->dec_prop;
vdec->vf_receiver_name = ada_ctx->recv_name;
}
int vdec_vframe_write(struct aml_vdec_adapt *ada_ctx,
- const char *buf, unsigned int count, unsigned long int timestamp)
+ const char *buf, unsigned int count, u64 timestamp)
{
int ret = -1;
- int try_cnt = 100;
struct vdec_s *vdec = ada_ctx->vdec;
/* set timestamp */
vdec_set_timestamp(vdec, timestamp);
- do {
- ret = vdec_write_vframe(vdec, buf, count);
- if (ret == -EAGAIN) {
- /*vdec_input_level(&vdec->input);*/
- msleep(30);
- }
- } while (ret == -EAGAIN && try_cnt--);
+ ret = vdec_write_vframe(vdec, buf, count);
if (slow_input) {
pr_info("slow_input: frame codec write size %d\n", ret);
/* dump to file */
dump_write(buf, count);
#endif
- aml_v4l2_debug(2, "[%d] write frames, vbuf: %p, size: %u, ret: %d, crc: %x",
+ aml_v4l2_debug(4, "[%d] write frames, vbuf: %p, size: %u, ret: %d, crc: %x",
ada_ctx->ctx->id, buf, count, ret, crc32_le(0, buf, count));
return ret;
struct vdec_s *vdec;
struct stream_port_s port;
struct dec_sysinfo dec_prop;
+ int video_type;
char *recv_name;
int vfm_path;
};
const char *buf, unsigned int count);
int vdec_vframe_write(struct aml_vdec_adapt *ada_ctx,
- const char *buf, unsigned int count, unsigned long int timestamp);
+ const char *buf, unsigned int count, u64 timestamp);
int is_need_to_buf(struct aml_vdec_adapt *ada_ctx);
#include <linux/atomic.h>
#include <linux/crc32.h>
#include "aml_vcodec_adapt.h"
+#include <linux/spinlock.h>
#include "aml_vcodec_vfm.h"
#define OUT_FMT_IDX 0 //default h264
-#define CAP_FMT_IDX 3 //capture nv21
+#define CAP_FMT_IDX 8 //capture nv21
#define AML_VDEC_MIN_W 64U
#define AML_VDEC_MIN_H 64U
.type = AML_FMT_DEC,
.num_planes = 1,
},
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG1,
+ .type = AML_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2,
+ .type = AML_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .type = AML_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MJPEG,
+ .type = AML_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .type = AML_FMT_FRAME,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .type = AML_FMT_FRAME,
+ .num_planes = 2,
+ },
{
.fourcc = V4L2_PIX_FMT_NV12,
.type = AML_FMT_FRAME,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .type = AML_FMT_FRAME,
.num_planes = 2,
},
};
.stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 8,
AML_VDEC_MIN_H, AML_VDEC_MAX_H, 8 },
},
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG1,
+ .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 8,
+ AML_VDEC_MIN_H, AML_VDEC_MAX_H, 8 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2,
+ .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 8,
+ AML_VDEC_MIN_H, AML_VDEC_MAX_H, 8 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 8,
+ AML_VDEC_MIN_H, AML_VDEC_MAX_H, 8 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MJPEG,
+ .stepwise = { AML_VDEC_MIN_W, AML_VDEC_MAX_W, 8,
+ AML_VDEC_MIN_H, AML_VDEC_MAX_H, 8 },
+ },
};
#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(aml_vdec_framesizes)
#define NUM_FORMATS ARRAY_SIZE(aml_video_formats)
+extern bool multiplanar;
+
+static ulong aml_vcodec_ctx_lock(struct aml_vcodec_ctx *ctx)
+{
+ ulong flags;
+
+ spin_lock_irqsave(&ctx->slock, flags);
+
+ return flags;
+}
+
+static void aml_vcodec_ctx_unlock(struct aml_vcodec_ctx *ctx, ulong flags)
+{
+ spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
static struct aml_video_fmt *aml_vdec_find_format(struct v4l2_format *f)
{
struct aml_video_fmt *fmt;
unsigned int k;
+ aml_v4l2_debug(4, "%s, type: %u, planes: %u, fmt: %u\n",
+ __func__, f->type, f->fmt.pix_mp.num_planes,
+ f->fmt.pix_mp.pixelformat);
+
for (k = 0; k < NUM_FORMATS; k++) {
fmt = &aml_video_formats[k];
if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
ctx->picinfo = ctx->last_decoded_picinfo;
}
-int get_fb_from_queue(struct aml_vcodec_ctx *ctx, struct vdec_fb **out_fb)
+int get_fb_from_queue(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer **out_fb)
{
+ ulong flags;
struct vb2_buffer *dst_buf = NULL;
- struct vdec_fb *pfb;
+ struct vdec_v4l2_buffer *pfb;
struct aml_video_dec_buf *dst_buf_info, *info;
struct vb2_v4l2_buffer *dst_vb2_v4l2;
- int try_cnt = 100;
-
- aml_v4l2_debug(4, "[%d] %s() [%d]", ctx->id, __func__, __LINE__);
- do {
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- if (dst_buf)
- break;
- aml_v4l2_debug(3, "[%d] waitting enough dst buffers.", ctx->id);
- msleep(30);
- } while (try_cnt--);
+ flags = aml_vcodec_ctx_lock(ctx);
- if (!dst_buf)
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (!dst_buf) {
+ aml_vcodec_ctx_unlock(ctx, flags);
return -1;
+ }
+
+ aml_v4l2_debug(2, "[%d] %s() vbuf idx: %d, state: %d, ready: %d",
+ ctx->id, __func__, dst_buf->index, dst_buf->state,
+ v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx));
dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
dst_buf_info = container_of(dst_vb2_v4l2, struct aml_video_dec_buf, vb);
- pfb = &dst_buf_info->frame_buffer;
- //pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
- pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
- pfb->base_y.va = phys_to_virt(dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->base_y.dma_addr));
- pfb->base_y.size = ctx->picinfo.y_len_sz;
-
- //pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
- pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
- pfb->base_c.va = phys_to_virt(dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->base_c.dma_addr));
- pfb->base_c.size = ctx->picinfo.c_len_sz;
- pfb->status = FB_ST_NORMAL;
-
- aml_v4l2_debug(4, "[%d] id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad y_size=%zx, c_size: %zx",
- ctx->id, dst_buf->index, pfb, pfb->base_y.va, &pfb->base_y.dma_addr,
- &pfb->base_c.dma_addr, pfb->base_y.size, pfb->base_c.size);
+ if (ctx->scatter_mem_enable) {
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->mem_type = VDEC_SCATTER_MEMORY_TYPE;
+ pfb->status = FB_ST_NORMAL;
+ } else if (dst_buf->num_planes == 1) {
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->m.mem[0].dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->m.mem[0].addr = dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[0].dma_addr);
+ pfb->m.mem[0].size = ctx->picinfo.y_len_sz + ctx->picinfo.c_len_sz;
+ pfb->m.mem[0].offset = ctx->picinfo.y_len_sz;
+ pfb->num_planes = dst_buf->num_planes;
+ pfb->status = FB_ST_NORMAL;
+
+ aml_v4l2_debug(4, "[%d] idx: %u, 1 plane, y:(0x%lx, %d)",
+ ctx->id, dst_buf->index,
+ pfb->m.mem[0].addr, pfb->m.mem[0].size);
+ } else if (dst_buf->num_planes == 2) {
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->m.mem[0].dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->m.mem[0].addr = dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[0].dma_addr);
+ pfb->m.mem[0].size = ctx->picinfo.y_len_sz;
+ pfb->m.mem[0].offset = 0;
+
+ pfb->m.mem[1].dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ pfb->m.mem[1].addr = dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[1].dma_addr);
+ pfb->m.mem[1].size = ctx->picinfo.c_len_sz;
+ pfb->m.mem[1].offset = ctx->picinfo.c_len_sz >> 1;
+ pfb->num_planes = dst_buf->num_planes;
+ pfb->status = FB_ST_NORMAL;
+
+ aml_v4l2_debug(4, "[%d] idx: %u, 2 planes, y:(0x%lx, %d), c:(0x%lx, %d)",
+ ctx->id, dst_buf->index,
+ pfb->m.mem[0].addr, pfb->m.mem[0].size,
+ pfb->m.mem[1].addr, pfb->m.mem[1].size);
+ } else {
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->m.mem[0].dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->m.mem[0].addr = dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[0].dma_addr);
+ pfb->m.mem[0].size = ctx->picinfo.y_len_sz;
+ pfb->m.mem[0].offset = 0;
+
+ pfb->m.mem[1].dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ pfb->m.mem[1].addr = dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[2].dma_addr);
+ pfb->m.mem[1].size = ctx->picinfo.c_len_sz >> 1;
+ pfb->m.mem[1].offset = 0;
+
+ pfb->m.mem[2].dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 2);
+ pfb->m.mem[2].addr = dma_to_phys(v4l_get_dev_from_codec_mm(), pfb->m.mem[3].dma_addr);
+ pfb->m.mem[2].size = ctx->picinfo.c_len_sz >> 1;
+ pfb->m.mem[2].offset = 0;
+ pfb->num_planes = dst_buf->num_planes;
+ pfb->status = FB_ST_NORMAL;
+
+ aml_v4l2_debug(4, "[%d] idx: %u, 3 planes, y:(0x%lx, %d), u:(0x%lx, %d), v:(0x%lx, %d)",
+ ctx->id, dst_buf->index,
+ pfb->m.mem[0].addr, pfb->m.mem[0].size,
+ pfb->m.mem[1].addr, pfb->m.mem[1].size,
+ pfb->m.mem[2].addr, pfb->m.mem[2].size);
+ }
dst_buf_info->used = true;
-
- v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ ctx->buf_used_count++;
*out_fb = pfb;
info = container_of(pfb, struct aml_video_dec_buf, frame_buffer);
+ v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ aml_vcodec_ctx_unlock(ctx, flags);
+
return 0;
}
EXPORT_SYMBOL(get_fb_from_queue);
-int put_fb_to_queue(struct aml_vcodec_ctx *ctx, struct vdec_fb *in_fb)
+int put_fb_to_queue(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer *in_fb)
{
struct aml_video_dec_buf *dstbuf;
}
EXPORT_SYMBOL(put_fb_to_queue);
-void trans_vframe_to_user(struct aml_vcodec_ctx *ctx, struct vdec_fb *fb)
+void trans_vframe_to_user(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer *fb)
{
struct aml_video_dec_buf *dstbuf = NULL;
struct vframe_s *vf = (struct vframe_s *)fb->vf_handle;
- aml_v4l2_debug(2, "[%d] FROM (%s %s) vf: %p, ts: %llx, idx: %d",
+ aml_v4l2_debug(3, "[%d] FROM (%s %s) vf: %p, ts: %llx, idx: %d",
ctx->id, vf_get_provider(ctx->ada_ctx->recv_name)->name,
ctx->ada_ctx->vfm_path != FRAME_BASE_PATH_V4L_VIDEO ? "OSD" : "VIDEO",
vf, vf->timestamp, vf->index);
+ aml_v4l2_debug(4, "[%d] FROM Y:(%lx, %u) C/U:(%lx, %u) V:(%lx, %u)",
+ ctx->id, fb->m.mem[0].addr, fb->m.mem[0].size,
+ fb->m.mem[1].addr, fb->m.mem[1].size,
+ fb->m.mem[2].addr, fb->m.mem[2].size);
+
dstbuf = container_of(fb, struct aml_video_dec_buf, frame_buffer);
- vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, fb->base_y.bytes_used);
- vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1, fb->base_c.bytes_used);
+ if (dstbuf->frame_buffer.num_planes == 1) {
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, fb->m.mem[0].bytes_used);
+ } else if (dstbuf->frame_buffer.num_planes == 2) {
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, fb->m.mem[0].bytes_used);
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1, fb->m.mem[1].bytes_used);
+ }
dstbuf->vb.vb2_buf.timestamp = vf->timestamp;
dstbuf->ready_to_display = true;
if (vf->flag & VFRAME_FLAG_EMPTY_FRAME_V4L) {
dstbuf->lastframe = true;
dstbuf->vb.flags = V4L2_BUF_FLAG_LAST;
- vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, 0);
- vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1, 0);
+ if (dstbuf->frame_buffer.num_planes == 1) {
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, 0);
+ } else if (dstbuf->frame_buffer.num_planes == 2) {
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0, 0);
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1, 0);
+ }
ctx->has_receive_eos = true;
- pr_info("[%d] recevie a empty frame.\n", ctx->id);
+ pr_info("[%d] recevie a empty frame. idx: %d, state: %d\n",
+ ctx->id, dstbuf->vb.vb2_buf.index,
+ dstbuf->vb.vb2_buf.state);
}
- v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
+ aml_v4l2_debug(4, "[%d] receive vbuf idx: %d, state: %d",
+ ctx->id, dstbuf->vb.vb2_buf.index,
+ dstbuf->vb.vb2_buf.state);
+
+ if (dstbuf->vb.vb2_buf.state == VB2_BUF_STATE_ACTIVE) {
+ /* binding vframe handle. */
+ if (fb->mem_type == VDEC_SCATTER_MEMORY_TYPE)
+ dstbuf->vb.private = fb->m.vf_fd;
+
+ v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
+ }
mutex_lock(&ctx->state_lock);
if (ctx->state == AML_STATE_FLUSHING &&
ctx->decoded_frame_cnt++;
}
-static int get_display_buffer(struct aml_vcodec_ctx *ctx, struct vdec_fb **out)
+static int get_display_buffer(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer **out)
{
int ret = -1;
/* is there enough dst bufs for decoding? */
buf_ready_num = v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx);
- if (buf_ready_num < ctx->dpb_size) {
- aml_v4l2_debug(4, "[%d] Not enough dst bufs, num: %d.\n",
+ if ((buf_ready_num + ctx->buf_used_count) < ctx->dpb_size) {
+ aml_v4l2_debug(4, "[%d] Not enough dst bufs, num: %d.",
ctx->id, buf_ready_num);
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
v4l2_m2m_try_schedule(ctx->m2m_ctx);
mutex_lock(&ctx->state_lock);
if (ctx->state == AML_STATE_PROBE) {
ctx->state = AML_STATE_READY;
+ ctx->v4l_codec_ready = true;
+ wake_up_interruptible(&ctx->wq);
aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_READY)",
ctx->id, __func__);
}
mutex_lock(&ctx->state_lock);
if (ctx->state == AML_STATE_READY) {
- ctx->state = AML_STATE_ACTIVE;
- aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_ACTIVE)",
- ctx->id, __func__);
+ if (ctx->m2m_ctx->out_q_ctx.q.streaming &&
+ ctx->m2m_ctx->cap_q_ctx.q.streaming) {
+ ctx->state = AML_STATE_ACTIVE;
+ aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_ACTIVE)",
+ ctx->id, __func__);
+ }
}
mutex_unlock(&ctx->state_lock);
aml_v4l2_debug(4, "[%d] entry [%d] [%s]", ctx->id, __LINE__, __func__);
- aml_vdec_lock(ctx);
-
if (ctx->state < AML_STATE_INIT ||
ctx->state > AML_STATE_FLUSHED) {
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
goto out;
}
- buf.va = vb2_plane_vaddr(src_buf, 0);
+ buf.vaddr = vb2_plane_vaddr(src_buf, 0);
buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- //buf.va = phys_to_virt(dma_to_phys(v4l_get_dev_from_codec_mm(), buf.dma_addr));
- buf.size = (size_t)src_buf->planes[0].bytesused;
- if (!buf.va) {
+ buf.size = src_buf->planes[0].bytesused;
+ if (!buf.vaddr) {
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
aml_v4l2_err("[%d] id=%d src_addr is NULL!!",
ctx->id, src_buf->index);
goto out;
}
- aml_v4l2_debug(4, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
- ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
src_buf_info->used = true;
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
out:
- aml_vdec_unlock(ctx);
+ return;
}
-static void aml_reset_worker(struct work_struct *work)
+void wait_vcodec_ending(struct aml_vcodec_ctx *ctx)
{
- struct aml_vcodec_ctx *ctx =
- container_of(work, struct aml_vcodec_ctx, reset_work);
- //struct aml_video_dec_buf *buf = NULL;
- int try_cnt = 10;
+ struct aml_vcodec_dev *dev = ctx->dev;
- aml_vdec_lock(ctx);
+ /* pause inject output data to vdec. */
+ v4l2_m2m_job_pause(dev->m2m_dev_dec, ctx->m2m_ctx);
- if (ctx->state == AML_STATE_ABORT) {
- pr_err("[%d] %s() the decoder will be exited.\n",
- ctx->id, __func__);
- goto out;
- }
-
- while (!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx)) {
- pr_err("[%d] %s() src data is not ready.\n",
- ctx->id, __func__);
+ /* flush worker. */
+ flush_workqueue(dev->decode_workqueue);
- if (try_cnt--) {
- msleep(30);
- continue;
- }
+ /* wait reset worker ending. */
+ if (ctx->state == AML_STATE_RESET) {
+ wait_for_completion_timeout(&ctx->comp,
+ msecs_to_jiffies(200));
+ }
+}
- ctx->state = AML_STATE_ABORT;
- aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_ABORT)",
+static void aml_vdec_reset(struct aml_vcodec_ctx *ctx)
+{
+ if (ctx->state == AML_STATE_ABORT) {
+ pr_err("[%d] %s() the decoder will be exited.\n",
ctx->id, __func__);
goto out;
}
-#if 0
- /*
- *fast enque capture buffers, but need to realloc
- *bufs if the resolution change event was hanped.
- */
- if (!ctx->q_data[AML_Q_DATA_SRC].resolution_changed) {
- list_for_each_entry(buf, &ctx->capture_list, node) {
- buf->que_in_m2m = true;
- buf->queued_in_vb2 = true;
- buf->queued_in_v4l2 = true;
- buf->ready_to_display = false;
- buf->frame_buffer.status = FB_ST_NORMAL;
- v4l2_m2m_buf_queue(ctx->m2m_ctx, &buf->vb);
- pr_err("que buf idx: %d\n", buf->vb.vb2_buf.index);
- }
- }
-#endif
if (aml_codec_reset(ctx->ada_ctx)) {
ctx->state = AML_STATE_ABORT;
aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_ABORT)",
goto out;
}
- mutex_lock(&ctx->state_lock);
if (ctx->state == AML_STATE_RESET) {
- ctx->state = AML_STATE_READY;
- aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_READY)",
+ ctx->state = AML_STATE_PROBE;
+ aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_PROBE)",
ctx->id, __func__);
/* vdec has ready to decode subsequence data of new resolution. */
ctx->q_data[AML_Q_DATA_SRC].resolution_changed = false;
v4l2_m2m_job_resume(ctx->dev->m2m_dev_dec, ctx->m2m_ctx);
}
- mutex_unlock(&ctx->state_lock);
out:
- aml_vdec_unlock(ctx);
+ complete(&ctx->comp);
+ return;
}
void try_to_capture(struct aml_vcodec_ctx *ctx)
{
int ret = 0;
- struct vdec_fb *fb = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
ret = get_display_buffer(ctx, &fb);
if (ret) {
static int vdec_thread(void *data)
{
+ struct sched_param param =
+ {.sched_priority = MAX_RT_PRIO / 2};
struct aml_vdec_thread *thread =
(struct aml_vdec_thread *) data;
struct aml_vcodec_ctx *ctx =
(struct aml_vcodec_ctx *) thread->priv;
+ sched_setscheduler(current, SCHED_FIFO, ¶m);
+
for (;;) {
aml_v4l2_debug(3, "[%d] %s() state: %d", ctx->id,
__func__, ctx->state);
default:
return -EINVAL;
}
+
return 0;
}
aml_v4l2_debug(3, "[%d] %s() [%d], cmd: %u",
ctx->id, __func__, __LINE__, cmd->cmd);
dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
- V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
switch (cmd->cmd) {
case V4L2_DEC_CMD_STOP:
v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf->vb);
v4l2_m2m_try_schedule(ctx->m2m_ctx);//pay attention
+ /* remark cmd use to distinguish stop or seek. */
+ ctx->receive_cmd_stop = true;
+
break;
case V4L2_DEC_CMD_START:
{
struct v4l2_fh *fh = file->private_data;
struct aml_vcodec_ctx *ctx = fh_to_ctx(fh);
- struct aml_vcodec_dev *dev = ctx->dev;
struct vb2_queue *q;
int ret = 0;
q = v4l2_m2m_get_vq(fh->m2m_ctx, i);
- mutex_lock(&ctx->state_lock);
+ if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
+ mutex_lock(&ctx->state_lock);
- if (ctx->state == AML_STATE_FLUSHED) {
- if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
- ctx->state = AML_STATE_RESET;
- aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_RESET)",
- ctx->id, __func__);
- queue_work(dev->reset_workqueue, &ctx->reset_work);
+ /*
+ * if receive the cmd is not V4L2_DEC_CMD_STOP
+ * that request should be seek not stop, thus igorn status
+ * AML_STATE_FLUSHING and AML_STATE_FLUSHED.
+ */
+
+ if (ctx->state == AML_STATE_FLUSHED ||
+ ctx->state == AML_STATE_FLUSHING || //????
+ (ctx->state == AML_STATE_ACTIVE &&
+ !ctx->receive_cmd_stop)) {
+ if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*the short source will be fasted into flushing state.
+ then switch to abort state that caused seek failed.*/
+ if (ctx->state == AML_STATE_FLUSHING)
+ ctx->receive_cmd_stop = false;
+
+ ctx->state = AML_STATE_RESET;
+ ctx->v4l_codec_ready = false;
+
+ aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_RESET)",
+ ctx->id, __func__);
+ aml_vdec_reset(ctx);
+ }
}
+ mutex_unlock(&ctx->state_lock);
}
- mutex_unlock(&ctx->state_lock);
-
return ret;
}
return v4l2_m2m_ioctl_reqbufs(file, priv, rb);
}
-void aml_vdec_unlock(struct aml_vcodec_ctx *ctx)
-{
- mutex_unlock(&ctx->dev->dec_mutex);
-}
-
-void aml_vdec_lock(struct aml_vcodec_ctx *ctx)
-{
- mutex_lock(&ctx->dev->dec_mutex);
-}
-
void aml_vcodec_dec_release(struct aml_vcodec_ctx *ctx)
{
- aml_vdec_lock(ctx);
-
ctx->state = AML_STATE_ABORT;
+ ctx->v4l_codec_ready = false;
aml_v4l2_debug(1, "[%d] %s() vcodec state (AML_STATE_ABORT)",
ctx->id, __func__);
vdec_if_deinit(ctx);
-
- aml_vdec_unlock(ctx);
}
void aml_vcodec_dec_set_default_params(struct aml_vcodec_ctx *ctx)
ctx->fh.m2m_ctx = ctx->m2m_ctx;
ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
INIT_WORK(&ctx->decode_work, aml_vdec_worker);
- INIT_WORK(&ctx->reset_work, aml_reset_worker);
ctx->colorspace = V4L2_COLORSPACE_REC709;
ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
- ctx->dev->dec_capability = VCODEC_CAPABILITY_4K_DISABLED;//disable 4k
+ ctx->dev->dec_capability = 0;//VCODEC_CAPABILITY_4K_DISABLED;//disable 4k
q_data = &ctx->q_data[AML_Q_DATA_SRC];
memset(q_data, 0, sizeof(struct aml_q_data));
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pix_fmt_mp->num_planes = 1;
pix_fmt_mp->plane_fmt[0].bytesperline = 0;
- } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ } else if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
int tmp_w, tmp_h;
pix_fmt_mp->height = clamp(pix_fmt_mp->height,
aml_v4l2_err("[%d] out_q_ctx buffers already requested", ctx->id);
}
- if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ if ((!V4L2_TYPE_IS_OUTPUT(f->type)) &&
vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
aml_v4l2_err("[%d] cap_q_ctx buffers already requested", ctx->id);
}
f->fmt.pix.pixelformat =
aml_video_formats[OUT_FMT_IDX].fourcc;
fmt = aml_vdec_find_format(f);
- } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ } else if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
f->fmt.pix.pixelformat =
aml_video_formats[CAP_FMT_IDX].fourcc;
fmt = aml_vdec_find_format(f);
pix_mp->quantization = ctx->quantization;
pix_mp->xfer_func = ctx->xfer_func;
- if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+ if ((!V4L2_TYPE_IS_OUTPUT(f->type)) &&
(ctx->state >= AML_STATE_PROBE)) {
/* Until STREAMOFF is called on the CAPTURE queue
* (acknowledging the event), the driver operates as if
aml_v4l2_debug(4, "[%d] type=%d state=%d Format information could not be read, not ready yet!",
ctx->id, f->type, ctx->state);
+ return -EINVAL;
}
return 0;
* check if this buffer is ready to be used after decode
*/
if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
- aml_v4l2_debug(3, "[%d] %s() [%d], y_va: %p, vf_h: %lx, state: %d", ctx->id,
- __func__, __LINE__, buf->frame_buffer.base_y.va,
+ aml_v4l2_debug(3, "[%d] %s() [%d], y_addr: %lx, vf_h: %lx, state: %d", ctx->id,
+ __func__, __LINE__, buf->frame_buffer.m.mem[0].addr,
buf->frame_buffer.vf_handle, buf->frame_buffer.status);
if (!buf->que_in_m2m && buf->frame_buffer.status == FB_ST_NORMAL) {
- aml_v4l2_debug(3, "[%d] enque capture buf idx %d, %p\n",
+ aml_v4l2_debug(2, "[%d] enque capture buf idx %d, %p",
ctx->id, vb->index, vb);
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
buf->que_in_m2m = true;
buf->queued_in_vb2 = true;
buf->queued_in_v4l2 = true;
buf->ready_to_display = false;
-
- /*save capture bufs to be used for resetting config.*/
- list_add(&buf->node, &ctx->capture_list);
} else if (buf->frame_buffer.status == FB_ST_DISPLAY) {
buf->queued_in_vb2 = false;
buf->queued_in_v4l2 = true;
return;
}
- src_mem.va = vb2_plane_vaddr(vb, 0);
+ src_mem.vaddr = vb2_plane_vaddr(vb, 0);
src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
- src_mem.size = (size_t)vb->planes[0].bytesused;
- if (vdec_if_probe(ctx, &src_mem, NULL))
+ src_mem.size = vb->planes[0].bytesused;
+ if (vdec_if_probe(ctx, &src_mem, NULL)) {
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_DONE);
return;
+ }
if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
pr_err("[%d] GET_PARAM_PICTURE_INFO err\n", ctx->id);
return;
}
+ if (!dpb)
+ return;
+
ctx->dpb_size = dpb;
ctx->last_decoded_picinfo = ctx->picinfo;
aml_vdec_queue_res_chg_event(ctx);
vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb);
- //mutex_lock(&ctx->lock);
- if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
buf->queued_in_v4l2 = false;
buf->queued_in_vb2 = false;
}
buf_error = buf->error;
- //mutex_unlock(&ctx->lock);
if (buf_error) {
aml_v4l2_err("[%d] Unrecoverable error on buffer.", ctx->id);
aml_v4l2_debug(4, "[%d] (%d) id=%d",
ctx->id, vb->vb2_queue->type, vb->index);
- if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
buf->used = false;
buf->ready_to_display = false;
buf->queued_in_v4l2 = false;
aml_v4l2_debug(3, "[%d] IN alloc, addr: %x, size: %u, idx: %u",
ctx->id, phy_addr, size, vb->index);
} else {
- size = vb->planes[0].length;
- phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
snprintf(owner, PATH_MAX, "%s-%d", "v4l-output", ctx->id);
strncpy(buf->mem_onwer, owner, sizeof(buf->mem_onwer));
buf->mem_onwer[sizeof(buf->mem_onwer) - 1] = '\0';
- buf->mem[0] = v4l_reqbufs_from_codec_mm(buf->mem_onwer,
- phy_addr, size, vb->index);
- aml_v4l2_debug(3, "[%d] OUT Y alloc, addr: %x, size: %u, idx: %u",
- ctx->id, phy_addr, size, vb->index);
-
- size = vb->planes[1].length;
- phy_addr = vb2_dma_contig_plane_dma_addr(vb, 1);
- buf->mem[1] = v4l_reqbufs_from_codec_mm(buf->mem_onwer,
+ if ((vb->memory == VB2_MEMORY_MMAP) && (vb->num_planes == 1)) {
+ size = vb->planes[0].length;
+ phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ buf->mem[0] = v4l_reqbufs_from_codec_mm(buf->mem_onwer,
phy_addr, size, vb->index);
- aml_v4l2_debug(3, "[%d] OUT C alloc, addr: %x, size: %u, idx: %u",
- ctx->id, phy_addr, size, vb->index);
+ aml_v4l2_debug(3, "[%d] OUT Y alloc, addr: %x, size: %u, idx: %u",
+ ctx->id, phy_addr, size, vb->index);
+ } else if ((vb->memory == VB2_MEMORY_MMAP) && (vb->num_planes == 2)) {
+ size = vb->planes[0].length;
+ phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ buf->mem[0] = v4l_reqbufs_from_codec_mm(buf->mem_onwer,
+ phy_addr, size, vb->index);
+ aml_v4l2_debug(3, "[%d] OUT Y alloc, addr: %x, size: %u, idx: %u",
+ ctx->id, phy_addr, size, vb->index);
+
+ size = vb->planes[1].length;
+ phy_addr = vb2_dma_contig_plane_dma_addr(vb, 1);
+ buf->mem[1] = v4l_reqbufs_from_codec_mm(buf->mem_onwer,
+ phy_addr, size, vb->index);
+ aml_v4l2_debug(3, "[%d] OUT C alloc, addr: %x, size: %u, idx: %u",
+ ctx->id, phy_addr, size, vb->index);
+ }
}
__putname(owner);
continue;
}
- v4l_freebufs_back_to_codec_mm(buf->mem_onwer, buf->mem[0]);
- v4l_freebufs_back_to_codec_mm(buf->mem_onwer, buf->mem[1]);
+ if (q->memory == VB2_MEMORY_MMAP) {
+ v4l_freebufs_back_to_codec_mm(buf->mem_onwer, buf->mem[0]);
+ v4l_freebufs_back_to_codec_mm(buf->mem_onwer, buf->mem[1]);
- aml_v4l2_debug(3, "[%d] OUT Y clean, addr: %lx, size: %u, idx: %u",
- ctx->id, buf->mem[0]->phy_addr, buf->mem[0]->buffer_size, i);
- aml_v4l2_debug(3, "[%d] OUT C clean, addr: %lx, size: %u, idx: %u",
- ctx->id, buf->mem[1]->phy_addr, buf->mem[1]->buffer_size, i);
- buf->mem[0] = NULL;
- buf->mem[1] = NULL;
+ aml_v4l2_debug(3, "[%d] OUT Y clean, addr: %lx, size: %u, idx: %u",
+ ctx->id, buf->mem[0]->phy_addr, buf->mem[0]->buffer_size, i);
+ aml_v4l2_debug(3, "[%d] OUT C clean, addr: %lx, size: %u, idx: %u",
+ ctx->id, buf->mem[1]->phy_addr, buf->mem[1]->buffer_size, i);
+ buf->mem[0] = NULL;
+ buf->mem[1] = NULL;
+ }
}
}
codec_mm_bufs_cnt_clean(q);
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
- while (v4l2_m2m_src_buf_remove(ctx->m2m_ctx));
- return;
- }
+ while ((vb2_v4l2 = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
+ v4l2_m2m_buf_done(vb2_v4l2, VB2_BUF_STATE_ERROR);
+ } else {
+ while ((vb2_v4l2 = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx)))
+ v4l2_m2m_buf_done(vb2_v4l2, VB2_BUF_STATE_ERROR);
- while (v4l2_m2m_dst_buf_remove(ctx->m2m_ctx));
+ for (i = 0; i < q->num_buffers; ++i) {
+ vb2_v4l2 = to_vb2_v4l2_buffer(q->bufs[i]);
+ buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb);
+ buf->frame_buffer.status = FB_ST_NORMAL;
+ buf->que_in_m2m = false;
- for (i = 0; i < q->num_buffers; ++i) {
- if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
- q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
- atomic_dec(&q->owned_by_drv_count);
+ /*pr_info("idx: %d, state: %d\n",
+ q->bufs[i]->index, q->bufs[i]->state);*/
}
- vb2_v4l2 = to_vb2_v4l2_buffer(q->bufs[i]);
- buf = container_of(vb2_v4l2, struct aml_video_dec_buf, vb);
- buf->frame_buffer.status = FB_ST_NORMAL;
- }
- while (!list_empty(&ctx->capture_list)) {
- buf = list_entry(ctx->capture_list.next,
- struct aml_video_dec_buf, node);
- list_del(&buf->node);
+ ctx->buf_used_count = 0;
}
}
} else {
pr_err("Seqinfo not ready.\n");
ctrl->val = 0;
+ ret = -EINVAL;
}
break;
default:
};
const struct v4l2_ioctl_ops aml_vdec_ioctl_ops = {
- .vidioc_streamon = vidioc_decoder_streamon,
- .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
- .vidioc_reqbufs = vidioc_decoder_reqbufs,
- .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
- .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,//??
- //.vidioc_g_ctrl = vidioc_vdec_g_ctrl,
+ .vidioc_streamon = vidioc_decoder_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_reqbufs = vidioc_decoder_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,//??
+ //.vidioc_g_ctrl = vidioc_vdec_g_ctrl,
- .vidioc_qbuf = vidioc_vdec_qbuf,
- .vidioc_dqbuf = vidioc_vdec_dqbuf,
+ .vidioc_qbuf = vidioc_vdec_qbuf,
+ .vidioc_dqbuf = vidioc_vdec_dqbuf,
.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap_mplane,
.vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out_mplane,
.vidioc_s_fmt_vid_cap_mplane = vidioc_vdec_s_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_vdec_s_fmt,
.vidioc_s_fmt_vid_out_mplane = vidioc_vdec_s_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_vdec_s_fmt,
.vidioc_g_fmt_vid_cap_mplane = vidioc_vdec_g_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_vdec_g_fmt,
.vidioc_g_fmt_vid_out_mplane = vidioc_vdec_g_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_vdec_g_fmt,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_enum_fmt_vid_cap_mplane = vidioc_vdec_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_cap = vidioc_vdec_enum_fmt_vid_cap_mplane,
.vidioc_enum_fmt_vid_out_mplane = vidioc_vdec_enum_fmt_vid_out_mplane,
+ .vidioc_enum_fmt_vid_out = vidioc_vdec_enum_fmt_vid_out_mplane,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_querycap = vidioc_vdec_querycap,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
-
ret = vb2_queue_init(src_vq);
if (ret) {
aml_v4l2_err("[%d] Failed to initialize videobuf2 queue(output)", ctx->id);
return ret;
}
- dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+ dst_vq->type = multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct aml_video_dec_buf);
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->dev_mutex;
-
ret = vb2_queue_init(dst_vq);
if (ret) {
vb2_queue_release(src_vq);
#define AML_VDEC_IRQ_STATUS_DEC_SUCCESS 0x10000
#define V4L2_BUF_FLAG_LAST 0x00100000
+#define VDEC_GATHER_MEMORY_TYPE 0
+#define VDEC_SCATTER_MEMORY_TYPE 1
+
#define AML_V4L2_SET_DECMODE (V4L2_CID_USER_AMLOGIC_BASE + 0)
/**
* struct vdec_fb - decoder frame buffer
- * @base_y : Y plane memory info
- * @base_c : C plane memory info
+ * @mem_type : gather or scatter memory.
+ * @num_planes : used number of the plane
+ * @mem[4] : array mem for used planes,
+ * mem[0]: Y, mem[1]: C/U, mem[2]: V
+ * @vf_fd : the file handle of video frame
+ * @vf_handle : video frame handle
* @status : frame buffer status (vdec_fb_status)
*/
-struct vdec_fb {
- unsigned long vf_handle;
- struct aml_vcodec_mem base_y;
- struct aml_vcodec_mem base_c;
- unsigned int status;
+
+struct vdec_v4l2_buffer {
+ int mem_type;
+ int num_planes;
+ union {
+ struct aml_vcodec_mem mem[4];
+ u32 vf_fd;
+ } m;
+ ulong vf_handle;
+ u32 status;
};
+
/**
* struct aml_video_dec_buf - Private data related to each VB2 buffer.
* @b: VB2 buffer
struct vb2_v4l2_buffer vb;
struct list_head list;
- struct vdec_fb frame_buffer;
+ struct vdec_v4l2_buffer frame_buffer;
struct codec_mm_s *mem[2];
char mem_onwer[32];
- struct list_head node;
bool used;
bool ready_to_display;
bool que_in_m2m;
bool error;
};
+struct aml_vdec_pic_infos {
+ u32 visible_width;
+ u32 visible_height;
+ u32 coded_width;
+ u32 coded_height;
+ int dpb_size;
+};
+
extern const struct v4l2_ioctl_ops aml_vdec_ioctl_ops;
extern const struct v4l2_m2m_ops aml_vdec_m2m_ops;
int aml_thread_start(struct aml_vcodec_ctx *ctx, aml_thread_func func,
enum aml_thread_type type, const char *thread_name);
void aml_thread_stop(struct aml_vcodec_ctx *ctx);
+void wait_vcodec_ending(struct aml_vcodec_ctx *ctx);
#endif /* _AML_VCODEC_DEC_H_ */
#include "aml_vcodec_drv.h"
#include "aml_vcodec_dec.h"
#include "aml_vcodec_dec_pm.h"
-//#include "aml_vcodec_intr.h"
#include "aml_vcodec_util.h"
#include "aml_vcodec_vfm.h"
-#define VDEC_HW_ACTIVE 0x10
-#define VDEC_IRQ_CFG 0x11
-#define VDEC_IRQ_CLR 0x10
+#define VDEC_HW_ACTIVE 0x10
+#define VDEC_IRQ_CFG 0x11
+#define VDEC_IRQ_CLR 0x10
#define VDEC_IRQ_CFG_REG 0xa4
+bool scatter_mem_enable;
+bool param_sets_from_ucode;
+
static int fops_vcodec_open(struct file *file)
{
struct aml_vcodec_dev *dev = video_drvdata(file);
init_waitqueue_head(&ctx->queue);
mutex_init(&ctx->state_lock);
mutex_init(&ctx->lock);
+ spin_lock_init(&ctx->slock);
init_waitqueue_head(&ctx->wq);
+ init_completion(&ctx->comp);
+
+ ctx->scatter_mem_enable = scatter_mem_enable ? 1 : 0;
+ ctx->param_sets_from_ucode = param_sets_from_ucode ? 1 : 0;
ctx->type = AML_INST_DECODER;
ret = aml_vcodec_dec_ctrls_setup(ctx);
* Second, the decoder will be flushed and all the buffers will be
* returned in stop_streaming.
*/
- aml_vcodec_dec_release(ctx);
+ aml_thread_stop(ctx);
+ wait_vcodec_ending(ctx);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ aml_vcodec_dec_release(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
- aml_thread_stop(ctx);
-
list_del_init(&ctx->list);
kfree(ctx->empty_flush_buf);
kfree(ctx);
goto err_event_workq;
}
- dev->reset_workqueue =
- alloc_ordered_workqueue("aml-vcodec-reset",
- WQ_MEM_RECLAIM | WQ_FREEZABLE);
- if (!dev->reset_workqueue) {
- aml_v4l2_err("Failed to create decode workqueue");
- ret = -EINVAL;
- destroy_workqueue(dev->decode_workqueue);
- goto err_event_workq;
- }
-
//dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
ret = video_register_device(vfd_dec, VFL_TYPE_GRABBER, 26);
return 0;
err_dec_reg:
- destroy_workqueue(dev->reset_workqueue);
destroy_workqueue(dev->decode_workqueue);
err_event_workq:
v4l2_m2m_release(dev->m2m_dev_dec);
{
struct aml_vcodec_dev *dev = platform_get_drvdata(pdev);
- flush_workqueue(dev->reset_workqueue);
- destroy_workqueue(dev->reset_workqueue);
-
flush_workqueue(dev->decode_workqueue);
destroy_workqueue(dev->decode_workqueue);
bool aml_set_vfm_enable;
EXPORT_SYMBOL(aml_set_vfm_enable);
+module_param(aml_set_vfm_enable, bool, 0644);
int aml_set_vfm_path;
EXPORT_SYMBOL(aml_set_vfm_path);
+module_param(aml_set_vfm_path, int, 0644);
bool aml_set_vdec_type_enable;
EXPORT_SYMBOL(aml_set_vdec_type_enable);
+module_param(aml_set_vdec_type_enable, bool, 0644);
int aml_set_vdec_type;
EXPORT_SYMBOL(aml_set_vdec_type);
-
-module_param(aml_set_vdec_type_enable, bool, 0644);
module_param(aml_set_vdec_type, int, 0644);
-module_param(aml_set_vfm_enable, bool, 0644);
-module_param(aml_set_vfm_path, int, 0644);
+
+int vp9_need_prefix;
+EXPORT_SYMBOL(vp9_need_prefix);
+module_param(vp9_need_prefix, int, 0644);
+
+bool multiplanar;
+EXPORT_SYMBOL(multiplanar);
+module_param(multiplanar, bool, 0644);
+
+EXPORT_SYMBOL(scatter_mem_enable);
+module_param(scatter_mem_enable, bool, 0644);
+
+EXPORT_SYMBOL(param_sets_from_ucode);
+module_param(param_sets_from_ucode, bool, 0644);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("AML video codec V4L2 decoder driver");
+
struct v4l2_ctrl_handler ctrl_hdl;
struct work_struct decode_work;
struct work_struct encode_work;
- struct work_struct reset_work;
struct vdec_pic_info last_decoded_picinfo;
struct aml_video_dec_buf *empty_flush_buf;
int decoded_frame_cnt;
struct mutex lock;
- wait_queue_head_t wq;
+ struct completion comp;
bool has_receive_eos;
struct list_head capture_list;
struct list_head vdec_thread_list;
bool is_drm_mode;
bool is_stream_mode;
+ int buf_used_count;
+ bool receive_cmd_stop;
+ bool scatter_mem_enable;
+ bool param_sets_from_ucode;
+ bool v4l_codec_ready;
+ wait_queue_head_t wq;
+ spinlock_t slock;
};
/**
struct workqueue_struct *decode_workqueue;
struct workqueue_struct *encode_workqueue;
- struct workqueue_struct *reset_workqueue;
int int_cond;
int int_type;
struct mutex dev_mutex;
struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)data;
struct device *dev = &ctx->dev->plat_dev->dev;
- //mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
- mem->va = codec_mm_dma_alloc_coherent(dev_name(dev), size,
+ //mem->vaddr = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
+ mem->vaddr = codec_mm_dma_alloc_coherent(dev_name(dev), size,
&mem->dma_addr, GFP_KERNEL, 0);
- if (!mem->va) {
+ if (!mem->vaddr) {
aml_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
size);
return -ENOMEM;
}
- memset(mem->va, 0, size);
+ memset(mem->vaddr, 0, size);
- aml_v4l2_debug(4, "[%d] - va = %p", ctx->id, mem->va);
+ aml_v4l2_debug(4, "[%d] - va = %p", ctx->id, mem->vaddr);
aml_v4l2_debug(4, "[%d] - dma = 0x%lx", ctx->id,
(unsigned long)mem->dma_addr);
aml_v4l2_debug(4, "[%d] size = 0x%lx", ctx->id, size);
struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)data;
struct device *dev = &ctx->dev->plat_dev->dev;
- if (!mem->va) {
+ if (!mem->vaddr) {
aml_v4l2_err("%s dma_free size=%ld failed!", dev_name(dev),
size);
return;
}
- aml_v4l2_debug(4, "[%d] - va = %p", ctx->id, mem->va);
+ aml_v4l2_debug(4, "[%d] - va = %p", ctx->id, mem->vaddr);
aml_v4l2_debug(4, "[%d] - dma = 0x%lx", ctx->id,
(unsigned long)mem->dma_addr);
aml_v4l2_debug(4, "[%d] size = 0x%lx", ctx->id, size);
- dma_free_coherent(dev, size, mem->va, mem->dma_addr);
- mem->va = NULL;
+ dma_free_coherent(dev, size, mem->vaddr, mem->dma_addr);
+ mem->vaddr = NULL;
mem->dma_addr = 0;
mem->size = 0;
}
(((u8)(a) << 24) | ((u8)(b) << 16) | ((u8)(c) << 8) | (u8)(d))
struct aml_vcodec_mem {
- size_t size;
- void *va;
+ ulong addr;
+ u32 size;
+ void *vaddr;
+ u32 bytes_used;
+ u32 offset;
dma_addr_t dma_addr;
- unsigned int bytes_used;
};
struct aml_vcodec_ctx;
return 0;
}
-void video_vf_put(char *receiver, struct vdec_fb *fb, int id)
+void video_vf_put(char *receiver, struct vdec_v4l2_buffer *fb, int id)
{
struct vframe_provider_s *vfp = vf_get_provider(receiver);
struct vframe_s *vf = (struct vframe_s *)fb->vf_handle;
- aml_v4l2_debug(2, "[%d] TO (%s) vf: %p, idx: %d",
+ aml_v4l2_debug(3, "[%d] TO (%s) vf: %p, idx: %d",
id, vfp->name, vf, vf->index);
+ aml_v4l2_debug(4, "[%d] TO Y:(%lx, %u) C/U:(%lx, %u) V:(%lx, %u)",
+ id, fb->m.mem[0].addr, fb->m.mem[0].size,
+ fb->m.mem[1].addr, fb->m.mem[1].size,
+ fb->m.mem[2].addr, fb->m.mem[2].size);
+
if (vfp && vf && atomic_dec_and_test(&vf->use_cnt))
vf_put(vf, receiver);
}
struct vframe_s *get_video_frame(struct vcodec_vfm_s *vfm);
-int get_fb_from_queue(struct aml_vcodec_ctx *ctx, struct vdec_fb **out_fb);
-int put_fb_to_queue(struct aml_vcodec_ctx *ctx, struct vdec_fb *in_fb);
+int get_fb_from_queue(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer **out_fb);
+int put_fb_to_queue(struct aml_vcodec_ctx *ctx, struct vdec_v4l2_buffer *in_fb);
-void video_vf_put(char *receiver, struct vdec_fb *fb, int id);
+void video_vf_put(char *receiver, struct vdec_v4l2_buffer *fb, int id);
#endif /* __AML_VCODEC_VFM_H_ */
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_h264_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+#define MAX_DELAYED_PIC_COUNT (16)
+#define MAX_LOG2_MAX_FRAME_NUM (12 + 4)
+#define MIN_LOG2_MAX_FRAME_NUM (4)
+#define MAX_SPS_COUNT (32)
+#define EXTENDED_SAR (255)
+
+static const struct rational h264_pixel_aspect[17] = {
+ { 0, 1 },
+ { 1, 1 },
+ { 12, 11 },
+ { 10, 11 },
+ { 16, 11 },
+ { 40, 33 },
+ { 24, 11 },
+ { 20, 11 },
+ { 32, 11 },
+ { 80, 33 },
+ { 18, 11 },
+ { 15, 11 },
+ { 64, 33 },
+ { 160, 99 },
+ { 4, 3 },
+ { 3, 2 },
+ { 2, 1 },
+};
+
+/* maximum number of MBs in the DPB for a given level */
+static const int level_max_dpb_mbs[][2] = {
+ { 10, 396 },
+ { 11, 900 },
+ { 12, 2376 },
+ { 13, 2376 },
+ { 20, 2376 },
+ { 21, 4752 },
+ { 22, 8100 },
+ { 30, 8100 },
+ { 31, 18000 },
+ { 32, 20480 },
+ { 40, 32768 },
+ { 41, 32768 },
+ { 42, 34816 },
+ { 50, 110400 },
+ { 51, 184320 },
+ { 52, 184320 },
+};
+
+static const u8 default_scaling4[2][16] = {
+ { 6, 13, 20, 28, 13, 20, 28, 32,
+ 20, 28, 32, 37, 28, 32, 37, 42},
+ { 10, 14, 20, 24, 14, 20, 24, 27,
+ 20, 24, 27, 30, 24, 27, 30, 34 }
+};
+
+static const u8 default_scaling8[2][64] = {
+ { 6, 10, 13, 16, 18, 23, 25, 27,
+ 10, 11, 16, 18, 23, 25, 27, 29,
+ 13, 16, 18, 23, 25, 27, 29, 31,
+ 16, 18, 23, 25, 27, 29, 31, 33,
+ 18, 23, 25, 27, 29, 31, 33, 36,
+ 23, 25, 27, 29, 31, 33, 36, 38,
+ 25, 27, 29, 31, 33, 36, 38, 40,
+ 27, 29, 31, 33, 36, 38, 40, 42 },
+ { 9, 13, 15, 17, 19, 21, 22, 24,
+ 13, 13, 17, 19, 21, 22, 24, 25,
+ 15, 17, 19, 21, 22, 24, 25, 27,
+ 17, 19, 21, 22, 24, 25, 27, 28,
+ 19, 21, 22, 24, 25, 27, 28, 30,
+ 21, 22, 24, 25, 27, 28, 30, 32,
+ 22, 24, 25, 27, 28, 30, 32, 33,
+ 24, 25, 27, 28, 30, 32, 33, 35 }
+};
+
+extern const u8 ff_zigzag_scan[16 + 1];
+extern const u8 ff_zigzag_direct[64];
+
+static int decode_scaling_list(struct get_bits_context *gb,
+ u8 *factors, int size,
+ const u8 *jvt_list,
+ const u8 *fallback_list)
+{
+ int i, last = 8, next = 8;
+ const u8 *scan = size == 16 ? ff_zigzag_scan : ff_zigzag_direct;
+
+ if (!get_bits1(gb)) /* matrix not written, we use the predicted one */
+ memcpy(factors, fallback_list, size * sizeof(u8));
+ else
+ for (i = 0; i < size; i++) {
+ if (next) {
+ int v = get_se_golomb(gb);
+ if (v < -128 || v > 127) {
+ pr_err( "delta scale %d is invalid\n", v);
+ return -1;
+ }
+ next = (last + v) & 0xff;
+ }
+ if (!i && !next) { /* matrix not written, we use the preset one */
+ memcpy(factors, jvt_list, size * sizeof(u8));
+ break;
+ }
+ last = factors[scan[i]] = next ? next : last;
+ }
+ return 0;
+}
+
+/* returns non zero if the provided SPS scaling matrix has been filled */
+static int decode_scaling_matrices(struct get_bits_context *gb,
+ const struct h264_SPS_t *sps,
+ const struct h264_PPS_t *pps, int is_sps,
+ u8(*scaling_matrix4)[16],
+ u8(*scaling_matrix8)[64])
+{
+ int ret = 0;
+ int fallback_sps = !is_sps && sps->scaling_matrix_present;
+ const u8 *fallback[4] = {
+ fallback_sps ? sps->scaling_matrix4[0] : default_scaling4[0],
+ fallback_sps ? sps->scaling_matrix4[3] : default_scaling4[1],
+ fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0],
+ fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1]
+ };
+
+ if (get_bits1(gb)) {
+ ret |= decode_scaling_list(gb, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]); // Intra, Y
+ ret |= decode_scaling_list(gb, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr
+ ret |= decode_scaling_list(gb, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb
+ ret |= decode_scaling_list(gb, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]); // Inter, Y
+ ret |= decode_scaling_list(gb, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr
+ ret |= decode_scaling_list(gb, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb
+ if (is_sps || pps->transform_8x8_mode) {
+ ret |= decode_scaling_list(gb, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y
+ ret |= decode_scaling_list(gb, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y
+ if (sps->chroma_format_idc == 3) {
+ ret |= decode_scaling_list(gb, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr
+ ret |= decode_scaling_list(gb, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr
+ ret |= decode_scaling_list(gb, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb
+ ret |= decode_scaling_list(gb, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb
+ }
+ }
+ if (!ret)
+ ret = is_sps;
+ }
+
+ return ret;
+}
+
+static int decode_hrd_parameters(struct get_bits_context *gb,
+ struct h264_SPS_t *sps)
+{
+ int cpb_count, i;
+
+ cpb_count = get_ue_golomb_31(gb) + 1;
+ if (cpb_count > 32U) {
+ pr_err("cpb_count %d invalid\n", cpb_count);
+ return -1;
+ }
+
+ get_bits(gb, 4); /* bit_rate_scale */
+ get_bits(gb, 4); /* cpb_size_scale */
+ for (i = 0; i < cpb_count; i++) {
+ get_ue_golomb_long(gb); /* bit_rate_value_minus1 */
+ get_ue_golomb_long(gb); /* cpb_size_value_minus1 */
+ get_bits1(gb); /* cbr_flag */
+ }
+
+ sps->initial_cpb_removal_delay_length = get_bits(gb, 5) + 1;
+ sps->cpb_removal_delay_length = get_bits(gb, 5) + 1;
+ sps->dpb_output_delay_length = get_bits(gb, 5) + 1;
+ sps->time_offset_length = get_bits(gb, 5);
+ sps->cpb_cnt = cpb_count;
+
+ return 0;
+}
+
+static int decode_vui_parameters(struct get_bits_context *gb, struct h264_SPS_t *sps)
+{
+ int aspect_ratio_info_present_flag;
+ u32 aspect_ratio_idc;
+
+ aspect_ratio_info_present_flag = get_bits1(gb);
+
+ if (aspect_ratio_info_present_flag) {
+ aspect_ratio_idc = get_bits(gb, 8);
+ if (aspect_ratio_idc == EXTENDED_SAR) {
+ sps->sar.num = get_bits(gb, 16);
+ sps->sar.den = get_bits(gb, 16);
+ } else if (aspect_ratio_idc < ARRAY_SIZE(h264_pixel_aspect)) {
+ sps->sar = h264_pixel_aspect[aspect_ratio_idc];
+ } else {
+ return -1;
+ }
+ } else {
+ sps->sar.num =
+ sps->sar.den = 0;
+ }
+
+ if (get_bits1(gb)) /* overscan_info_present_flag */
+ get_bits1(gb); /* overscan_appropriate_flag */
+
+ sps->video_signal_type_present_flag = get_bits1(gb);
+ if (sps->video_signal_type_present_flag) {
+ get_bits(gb, 3); /* video_format */
+ sps->full_range = get_bits1(gb); /* video_full_range_flag */
+
+ sps->colour_description_present_flag = get_bits1(gb);
+ if (sps->colour_description_present_flag) {
+ sps->color_primaries = get_bits(gb, 8); /* colour_primaries */
+ sps->color_trc = get_bits(gb, 8); /* transfer_characteristics */
+ sps->colorspace = get_bits(gb, 8); /* matrix_coefficients */
+
+ // Set invalid values to "unspecified"
+ if (!av_color_primaries_name(sps->color_primaries))
+ sps->color_primaries = AVCOL_PRI_UNSPECIFIED;
+ if (!av_color_transfer_name(sps->color_trc))
+ sps->color_trc = AVCOL_TRC_UNSPECIFIED;
+ if (!av_color_space_name(sps->colorspace))
+ sps->colorspace = AVCOL_SPC_UNSPECIFIED;
+ }
+ }
+
+ /* chroma_location_info_present_flag */
+ if (get_bits1(gb)) {
+ /* chroma_sample_location_type_top_field */
+ //avctx->chroma_sample_location = get_ue_golomb(gb) + 1;
+ get_ue_golomb(gb); /* chroma_sample_location_type_bottom_field */
+ }
+
+ if (show_bits1(gb) && get_bits_left(gb) < 10) {
+ pr_info("Truncated VUI\n");
+ return 0;
+ }
+
+ sps->timing_info_present_flag = get_bits1(gb);
+ if (sps->timing_info_present_flag) {
+ unsigned num_units_in_tick = get_bits_long(gb, 32);
+ unsigned time_scale = get_bits_long(gb, 32);
+ if (!num_units_in_tick || !time_scale) {
+ pr_info("time_scale/num_units_in_tick invalid or unsupported (%u/%u)\n",
+ time_scale, num_units_in_tick);
+ sps->timing_info_present_flag = 0;
+ } else {
+ sps->num_units_in_tick = num_units_in_tick;
+ sps->time_scale = time_scale;
+ }
+ sps->fixed_frame_rate_flag = get_bits1(gb);
+ }
+
+ sps->nal_hrd_parameters_present_flag = get_bits1(gb);
+ if (sps->nal_hrd_parameters_present_flag)
+ if (decode_hrd_parameters(gb, sps) < 0)
+ return -1;
+ sps->vcl_hrd_parameters_present_flag = get_bits1(gb);
+ if (sps->vcl_hrd_parameters_present_flag)
+ if (decode_hrd_parameters(gb, sps) < 0)
+ return -1;
+ if (sps->nal_hrd_parameters_present_flag ||
+ sps->vcl_hrd_parameters_present_flag)
+ get_bits1(gb); /* low_delay_hrd_flag */
+ sps->pic_struct_present_flag = get_bits1(gb);
+ if (!get_bits_left(gb))
+ return 0;
+ sps->bitstream_restriction_flag = get_bits1(gb);
+ if (sps->bitstream_restriction_flag) {
+ get_bits1(gb); /* motion_vectors_over_pic_boundaries_flag */
+ get_ue_golomb(gb); /* max_bytes_per_pic_denom */
+ get_ue_golomb(gb); /* max_bits_per_mb_denom */
+ get_ue_golomb(gb); /* log2_max_mv_length_horizontal */
+ get_ue_golomb(gb); /* log2_max_mv_length_vertical */
+ sps->num_reorder_frames = get_ue_golomb(gb);
+ sps->max_dec_frame_buffering = get_ue_golomb(gb); /*max_dec_frame_buffering*/
+
+ if (get_bits_left(gb) < 0) {
+ sps->num_reorder_frames = 0;
+ sps->bitstream_restriction_flag = 0;
+ }
+
+ if (sps->num_reorder_frames > 16U
+ /* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) {
+ pr_info("Clipping illegal num_reorder_frames %d\n",
+ sps->num_reorder_frames);
+ sps->num_reorder_frames = 16;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int aml_h264_parser_sps(struct get_bits_context *gb, struct h264_SPS_t *sps)
+{
+ int ret;
+ u32 sps_id;
+ int profile_idc, level_idc, constraint_set_flags = 0;
+ int i, log2_max_frame_num_minus4;
+
+ profile_idc = get_bits(gb, 8);
+ constraint_set_flags |= get_bits1(gb) << 0; // constraint_set0_flag
+ constraint_set_flags |= get_bits1(gb) << 1; // constraint_set1_flag
+ constraint_set_flags |= get_bits1(gb) << 2; // constraint_set2_flag
+ constraint_set_flags |= get_bits1(gb) << 3; // constraint_set3_flag
+ constraint_set_flags |= get_bits1(gb) << 4; // constraint_set4_flag
+ constraint_set_flags |= get_bits1(gb) << 5; // constraint_set5_flag
+ skip_bits(gb, 2); // reserved_zero_2bits
+ level_idc = get_bits(gb, 8);
+ sps_id = get_ue_golomb_31(gb);
+
+ if (sps_id >= MAX_SPS_COUNT) {
+ pr_info( "sps_id %u out of range\n", sps_id);
+ goto fail;
+ }
+
+ sps->sps_id = sps_id;
+ sps->time_offset_length = 24;
+ sps->profile_idc = profile_idc;
+ sps->constraint_set_flags = constraint_set_flags;
+ sps->level_idc = level_idc;
+ sps->full_range = -1;
+
+ memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4));
+ memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8));
+ sps->scaling_matrix_present = 0;
+ sps->colorspace = 2; //AVCOL_SPC_UNSPECIFIED
+
+ if (sps->profile_idc == 100 || // High profile
+ sps->profile_idc == 110 || // High10 profile
+ sps->profile_idc == 122 || // High422 profile
+ sps->profile_idc == 244 || // High444 Predictive profile
+ sps->profile_idc == 44 || // Cavlc444 profile
+ sps->profile_idc == 83 || // Scalable Constrained High profile (SVC)
+ sps->profile_idc == 86 || // Scalable High Intra profile (SVC)
+ sps->profile_idc == 118 || // Stereo High profile (MVC)
+ sps->profile_idc == 128 || // Multiview High profile (MVC)
+ sps->profile_idc == 138 || // Multiview Depth High profile (MVCD)
+ sps->profile_idc == 144) { // old High444 profile
+ sps->chroma_format_idc = get_ue_golomb_31(gb);
+
+ if (sps->chroma_format_idc > 3U) {
+ pr_err("chroma_format_idc %u\n", sps->chroma_format_idc);
+ goto fail;
+ } else if (sps->chroma_format_idc == 3) {
+ sps->residual_color_transform_flag = get_bits1(gb);
+ if (sps->residual_color_transform_flag) {
+ pr_info( "separate color planes are not supported\n");
+ goto fail;
+ }
+ }
+
+ sps->bit_depth_luma = get_ue_golomb(gb) + 8;
+ sps->bit_depth_chroma = get_ue_golomb(gb) + 8;
+ if (sps->bit_depth_chroma != sps->bit_depth_luma) {
+ pr_err("Different chroma and luma bit depth\n");
+ goto fail;
+ }
+
+ if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
+ sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) {
+ pr_info("illegal bit depth value (%d, %d)\n",
+ sps->bit_depth_luma, sps->bit_depth_chroma);
+ goto fail;
+ }
+
+ sps->transform_bypass = get_bits1(gb);
+ ret = decode_scaling_matrices(gb, sps, NULL, 1,
+ sps->scaling_matrix4, sps->scaling_matrix8);
+ if (ret < 0)
+ goto fail;
+ sps->scaling_matrix_present |= ret;
+ } else {
+ sps->chroma_format_idc = 1;
+ sps->bit_depth_luma = 8;
+ sps->bit_depth_chroma = 8;
+ }
+
+ log2_max_frame_num_minus4 = get_ue_golomb(gb);
+ if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 ||
+ log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) {
+ pr_info(
+ "log2_max_frame_num_minus4 out of range (0-12): %d\n",
+ log2_max_frame_num_minus4);
+ goto fail;
+ }
+ sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4;
+
+ sps->poc_type = get_ue_golomb_31(gb);
+ if (sps->poc_type == 0) { // FIXME #define
+ u32 t = get_ue_golomb(gb);
+ if (t > 12) {
+ pr_info( "log2_max_poc_lsb (%d) is out of range\n", t);
+ goto fail;
+ }
+ sps->log2_max_poc_lsb = t + 4;
+ } else if (sps->poc_type == 1) { // FIXME #define
+ sps->delta_pic_order_always_zero_flag = get_bits1(gb);
+ sps->offset_for_non_ref_pic = get_se_golomb_long(gb);
+ sps->offset_for_top_to_bottom_field = get_se_golomb_long(gb);
+
+ sps->poc_cycle_length = get_ue_golomb(gb);
+ if ((u32)sps->poc_cycle_length >= ARRAY_SIZE(sps->offset_for_ref_frame)) {
+ pr_info("poc_cycle_length overflow %d\n", sps->poc_cycle_length);
+ goto fail;
+ }
+
+ for (i = 0; i < sps->poc_cycle_length; i++)
+ sps->offset_for_ref_frame[i] = get_se_golomb_long(gb);
+ } else if (sps->poc_type != 2) {
+ pr_info( "illegal POC type %d\n", sps->poc_type);
+ goto fail;
+ }
+
+ sps->ref_frame_count = get_ue_golomb_31(gb);
+ if (sps->ref_frame_count > MAX_DELAYED_PIC_COUNT) {
+ pr_info("too many reference frames %d\n", sps->ref_frame_count);
+ goto fail;
+ }
+ sps->gaps_in_frame_num_allowed_flag = get_bits1(gb);
+ sps->mb_width = get_ue_golomb(gb) + 1;
+ sps->mb_height = get_ue_golomb(gb) + 1;
+
+ sps->frame_mbs_only_flag = get_bits1(gb);
+
+ if (sps->mb_height >= INT_MAX / 2U) {
+ pr_info("height overflow\n");
+ goto fail;
+ }
+ sps->mb_height *= 2 - sps->frame_mbs_only_flag;
+
+ if (!sps->frame_mbs_only_flag)
+ sps->mb_aff = get_bits1(gb);
+ else
+ sps->mb_aff = 0;
+
+ if ((u32)sps->mb_width >= INT_MAX / 16 ||
+ (u32)sps->mb_height >= INT_MAX / 16) {
+ pr_info( "mb_width/height overflow\n");
+ goto fail;
+ }
+
+ sps->direct_8x8_inference_flag = get_bits1(gb);
+
+ sps->crop = get_bits1(gb);
+ if (sps->crop) {
+ u32 crop_left = get_ue_golomb(gb);
+ u32 crop_right = get_ue_golomb(gb);
+ u32 crop_top = get_ue_golomb(gb);
+ u32 crop_bottom = get_ue_golomb(gb);
+ int width = 16 * sps->mb_width;
+ int height = 16 * sps->mb_height;
+ int vsub = (sps->chroma_format_idc == 1) ? 1 : 0;
+ int hsub = (sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2) ? 1 : 0;
+ int step_x = 1 << hsub;
+ int step_y = (2 - sps->frame_mbs_only_flag) << vsub;
+
+ if (crop_left > (u32)INT_MAX / 4 / step_x ||
+ crop_right > (u32)INT_MAX / 4 / step_x ||
+ crop_top > (u32)INT_MAX / 4 / step_y ||
+ crop_bottom > (u32)INT_MAX / 4 / step_y ||
+ (crop_left + crop_right ) * step_x >= width ||
+ (crop_top + crop_bottom) * step_y >= height) {
+ pr_info( "crop values invalid %u %u %u %u / %d %d\n", crop_left, crop_right, crop_top, crop_bottom, width, height);
+ goto fail;
+ }
+
+ sps->crop_left = crop_left * step_x;
+ sps->crop_right = crop_right * step_x;
+ sps->crop_top = crop_top * step_y;
+ sps->crop_bottom = crop_bottom * step_y;
+ } else {
+ sps->crop_left =
+ sps->crop_right =
+ sps->crop_top =
+ sps->crop_bottom =
+ sps->crop = 0;
+ }
+
+ sps->vui_parameters_present_flag = get_bits1(gb);
+ if (sps->vui_parameters_present_flag) {
+ int ret = decode_vui_parameters(gb, sps);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (get_bits_left(gb) < 0) {
+ pr_info("Overread %s by %d bits\n", sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(gb));
+ goto out;
+ }
+
+ /* if the maximum delay is not stored in the SPS, derive it based on the level */
+ if (!sps->bitstream_restriction_flag && sps->ref_frame_count) {
+ sps->num_reorder_frames = MAX_DELAYED_PIC_COUNT - 1;
+ for (i = 0; i < ARRAY_SIZE(level_max_dpb_mbs); i++) {
+ if (level_max_dpb_mbs[i][0] == sps->level_idc) {
+ sps->num_reorder_frames =
+ MIN(level_max_dpb_mbs[i][1] / (sps->mb_width * sps->mb_height),
+ sps->num_reorder_frames);
+ break;
+ }
+ }
+ }
+
+ if (!sps->sar.den)
+ sps->sar.den = 1;
+out:
+ if (1) {
+ static const char csp[4][5] = { "Gray", "420", "422", "444" };
+ pr_info("sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %d/%d b%d reo:%d\n",
+ sps_id, sps->profile_idc, sps->level_idc,
+ sps->poc_type,
+ sps->ref_frame_count,
+ sps->mb_width, sps->mb_height,
+ sps->frame_mbs_only_flag ? "FRM" : (sps->mb_aff ? "MB-AFF" : "PIC-AFF"),
+ sps->direct_8x8_inference_flag ? "8B8" : "",
+ sps->crop_left, sps->crop_right,
+ sps->crop_top, sps->crop_bottom,
+ sps->vui_parameters_present_flag ? "VUI" : "",
+ csp[sps->chroma_format_idc],
+ sps->timing_info_present_flag ? sps->num_units_in_tick : 0,
+ sps->timing_info_present_flag ? sps->time_scale : 0,
+ sps->bit_depth_luma,
+ sps->bitstream_restriction_flag ? sps->num_reorder_frames : -1);
+ }
+
+ return 0;
+
+fail:
+ return -1;
+}
+
+static const char *h264_nal_type_name[32] = {
+ "Unspecified 0", //H264_NAL_UNSPECIFIED
+ "Coded slice of a non-IDR picture", // H264_NAL_SLICE
+ "Coded slice data partition A", // H264_NAL_DPA
+ "Coded slice data partition B", // H264_NAL_DPB
+ "Coded slice data partition C", // H264_NAL_DPC
+ "IDR", // H264_NAL_IDR_SLICE
+ "SEI", // H264_NAL_SEI
+ "SPS", // H264_NAL_SPS
+ "PPS", // H264_NAL_PPS
+ "AUD", // H264_NAL_AUD
+ "End of sequence", // H264_NAL_END_SEQUENCE
+ "End of stream", // H264_NAL_END_STREAM
+ "Filler data", // H264_NAL_FILLER_DATA
+ "SPS extension", // H264_NAL_SPS_EXT
+ "Prefix", // H264_NAL_PREFIX
+ "Subset SPS", // H264_NAL_SUB_SPS
+ "Depth parameter set", // H264_NAL_DPS
+ "Reserved 17", // H264_NAL_RESERVED17
+ "Reserved 18", // H264_NAL_RESERVED18
+ "Auxiliary coded picture without partitioning", // H264_NAL_AUXILIARY_SLICE
+ "Slice extension", // H264_NAL_EXTEN_SLICE
+ "Slice extension for a depth view or a 3D-AVC texture view", // H264_NAL_DEPTH_EXTEN_SLICE
+ "Reserved 22", // H264_NAL_RESERVED22
+ "Reserved 23", // H264_NAL_RESERVED23
+ "Unspecified 24", // H264_NAL_UNSPECIFIED24
+ "Unspecified 25", // H264_NAL_UNSPECIFIED25
+ "Unspecified 26", // H264_NAL_UNSPECIFIED26
+ "Unspecified 27", // H264_NAL_UNSPECIFIED27
+ "Unspecified 28", // H264_NAL_UNSPECIFIED28
+ "Unspecified 29", // H264_NAL_UNSPECIFIED29
+ "Unspecified 30", // H264_NAL_UNSPECIFIED30
+ "Unspecified 31", // H264_NAL_UNSPECIFIED31
+};
+
+static const char *h264_nal_unit_name(int nal_type)
+{
+ return h264_nal_type_name[nal_type];
+}
+
+static int decode_extradata_ps(u8 *data, int size, struct h264_param_sets *ps)
+{
+ int ret = 0;
+ struct get_bits_context gb;
+ u32 src_len, rbsp_size = 0;
+ u8 *rbsp_buf = NULL;
+ int ref_idc, nalu_pos;
+ u32 nal_type;
+ u8 *p = data;
+ u32 len = size;
+
+ nalu_pos = find_start_code(p, len);
+ if (nalu_pos < 0)
+ return -1;
+
+ src_len = calc_nal_len(p + nalu_pos, size - nalu_pos);
+ rbsp_buf = nal_unit_extract_rbsp(p + nalu_pos, src_len, &rbsp_size);
+ if (rbsp_buf == NULL)
+ return -ENOMEM;
+
+ ret = init_get_bits8(&gb, rbsp_buf, rbsp_size);
+ if (ret < 0)
+ goto out;
+
+ if (get_bits1(&gb) != 0) {
+ pr_err("invalid h264 data,return!\n");
+ goto out;
+ }
+
+ ref_idc = get_bits(&gb, 2);
+ nal_type = get_bits(&gb, 5);
+
+ pr_info("nal_unit_type: %d(%s), nal_ref_idc: %d\n",
+ nal_type, h264_nal_unit_name(nal_type), ref_idc);
+
+ switch (nal_type) {
+ case H264_NAL_SPS:
+ ret = aml_h264_parser_sps(&gb, &ps->sps);
+ if (ret < 0)
+ goto out;
+ ps->sps_parsed = true;
+ break;
+ /*case H264_NAL_PPS:
+ ret = ff_h264_decode_picture_parameter_set(&gb, &ps->pps, rbsp_size);
+ if (ret < 0)
+ goto fail;
+ ps->pps_parsed = true;
+ break;*/
+ default:
+ pr_err("Unsupport parser nal type (%s).\n",
+ h264_nal_unit_name(nal_type));
+ break;
+ }
+
+out:
+ vfree(rbsp_buf);
+
+ return ret;
+}
+
+int h264_decode_extradata_ps(u8 *buf, int size, struct h264_param_sets *ps)
+{
+ int ret = 0, i = 0, j = 0;
+ u8 *p = buf;
+ int len = size;
+
+ for (i = 4; i < size; i++) {
+ j = find_start_code(p, len);
+ if (j > 0) {
+ len = size - (p - buf);
+ ret = decode_extradata_ps(p, len, ps);
+ if (ret) {
+ pr_err("parse extra data failed. err: %d\n", ret);
+ return ret;
+ }
+ p += j;
+ }
+ p++;
+ }
+
+ return ret;
+}
+
+
*
*/
-
#ifndef AML_H264_PARSER_H
#define AML_H264_PARSER_H
-#define QP_MAX_NUM (51 + 6*6) // The maximum supported qp
+#include "../utils/pixfmt.h"
+
+#define QP_MAX_NUM (51 + 6 * 6) // The maximum supported qp
/* NAL unit types */
enum {
- H264_NAL_SLICE = 1,
- H264_NAL_DPA = 2,
- H264_NAL_DPB = 3,
- H264_NAL_DPC = 4,
- H264_NAL_IDR_SLICE = 5,
- H264_NAL_SEI = 6,
- H264_NAL_SPS = 7,
- H264_NAL_PPS = 8,
- H264_NAL_AUD = 9,
- H264_NAL_END_SEQUENCE = 10,
- H264_NAL_END_STREAM = 11,
- H264_NAL_FILLER_DATA = 12,
- H264_NAL_SPS_EXT = 13,
- H264_NAL_AUXILIARY_SLICE = 19,
+ H264_NAL_SLICE = 1,
+ H264_NAL_DPA = 2,
+ H264_NAL_DPB = 3,
+ H264_NAL_DPC = 4,
+ H264_NAL_IDR_SLICE = 5,
+ H264_NAL_SEI = 6,
+ H264_NAL_SPS = 7,
+ H264_NAL_PPS = 8,
+ H264_NAL_AUD = 9,
+ H264_NAL_END_SEQUENCE = 10,
+ H264_NAL_END_STREAM = 11,
+ H264_NAL_FILLER_DATA = 12,
+ H264_NAL_SPS_EXT = 13,
+ H264_NAL_AUXILIARY_SLICE = 19,
};
-
enum {
- // 7.4.2.1.1: seq_parameter_set_id is in [0, 31].
- H264_MAX_SPS_COUNT = 32,
- // 7.4.2.2: pic_parameter_set_id is in [0, 255].
- H264_MAX_PPS_COUNT = 256,
-
- // A.3: MaxDpbFrames is bounded above by 16.
- H264_MAX_DPB_FRAMES = 16,
- // 7.4.2.1.1: max_num_ref_frames is in [0, MaxDpbFrames], and
- // each reference frame can have two fields.
- H264_MAX_REFS = 2 * H264_MAX_DPB_FRAMES,
-
- // 7.4.3.1: modification_of_pic_nums_idc is not equal to 3 at most
- // num_ref_idx_lN_active_minus1 + 1 times (that is, once for each
- // possible reference), then equal to 3 once.
- H264_MAX_RPLM_COUNT = H264_MAX_REFS + 1,
-
- // 7.4.3.3: in the worst case, we begin with a full short-term
- // reference picture list. Each picture in turn is moved to the
- // long-term list (type 3) and then discarded from there (type 2).
- // Then, we set the length of the long-term list (type 4), mark
- // the current picture as long-term (type 6) and terminate the
- // process (type 0).
- H264_MAX_MMCO_COUNT = H264_MAX_REFS * 2 + 3,
-
- // A.2.1, A.2.3: profiles supporting FMO constrain
- // num_slice_groups_minus1 to be in [0, 7].
- H264_MAX_SLICE_GROUPS = 8,
-
- // E.2.2: cpb_cnt_minus1 is in [0, 31].
- H264_MAX_CPB_CNT = 32,
-
- // A.3: in table A-1 the highest level allows a MaxFS of 139264.
- H264_MAX_MB_PIC_SIZE = 139264,
- // A.3.1, A.3.2: PicWidthInMbs and PicHeightInMbs are constrained
- // to be not greater than sqrt(MaxFS * 8). Hence height/width are
- // bounded above by sqrt(139264 * 8) = 1055.5 macroblocks.
- H264_MAX_MB_WIDTH = 1055,
- H264_MAX_MB_HEIGHT = 1055,
- H264_MAX_WIDTH = H264_MAX_MB_WIDTH * 16,
- H264_MAX_HEIGHT = H264_MAX_MB_HEIGHT * 16,
+ // 7.4.2.1.1: seq_parameter_set_id is in [0, 31].
+ H264_MAX_SPS_COUNT = 32,
+ // 7.4.2.2: pic_parameter_set_id is in [0, 255].
+ H264_MAX_PPS_COUNT = 256,
+
+ // A.3: MaxDpbFrames is bounded above by 16.
+ H264_MAX_DPB_FRAMES = 16,
+ // 7.4.2.1.1: max_num_ref_frames is in [0, MaxDpbFrames], and
+ // each reference frame can have two fields.
+ H264_MAX_REFS = 2 * H264_MAX_DPB_FRAMES,
+
+ // 7.4.3.1: modification_of_pic_nums_idc is not equal to 3 at most
+ // num_ref_idx_lN_active_minus1 + 1 times (that is, once for each
+ // possible reference), then equal to 3 once.
+ H264_MAX_RPLM_COUNT = H264_MAX_REFS + 1,
+
+ // 7.4.3.3: in the worst case, we begin with a full short-term
+ // reference picture list. Each picture in turn is moved to the
+ // long-term list (type 3) and then discarded from there (type 2).
+ // Then, we set the length of the long-term list (type 4), mark
+ // the current picture as long-term (type 6) and terminate the
+ // process (type 0).
+ H264_MAX_MMCO_COUNT = H264_MAX_REFS * 2 + 3,
+
+ // A.2.1, A.2.3: profiles supporting FMO constrain
+ // num_slice_groups_minus1 to be in [0, 7].
+ H264_MAX_SLICE_GROUPS = 8,
+
+ // E.2.2: cpb_cnt_minus1 is in [0, 31].
+ H264_MAX_CPB_CNT = 32,
+
+ // A.3: in table A-1 the highest level allows a MaxFS of 139264.
+ H264_MAX_MB_PIC_SIZE = 139264,
+ // A.3.1, A.3.2: PicWidthInMbs and PicHeightInMbs are constrained
+ // to be not greater than sqrt(MaxFS * 8). Hence height/width are
+ // bounded above by sqrt(139264 * 8) = 1055.5 macroblocks.
+ H264_MAX_MB_WIDTH = 1055,
+ H264_MAX_MB_HEIGHT = 1055,
+ H264_MAX_WIDTH = H264_MAX_MB_WIDTH * 16,
+ H264_MAX_HEIGHT = H264_MAX_MB_HEIGHT * 16,
+};
+
+/**
+ * Rational number (pair of numerator and denominator).
+ */
+struct rational{
+ int num; ///< Numerator
+ int den; ///< Denominator
+};
+
+/**
+ * Sequence parameter set
+ */
+struct h264_SPS_t {
+ u32 sps_id;
+ int profile_idc;
+ int level_idc;
+ int chroma_format_idc;
+ int transform_bypass; ///< qpprime_y_zero_transform_bypass_flag
+ int log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4
+ int poc_type; ///< pic_order_cnt_type
+ int log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4
+ int delta_pic_order_always_zero_flag;
+ int offset_for_non_ref_pic;
+ int offset_for_top_to_bottom_field;
+ int poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle
+ int ref_frame_count; ///< num_ref_frames
+ int gaps_in_frame_num_allowed_flag;
+ int mb_width; ///< pic_width_in_mbs_minus1 + 1
+ ///< (pic_height_in_map_units_minus1 + 1) * (2 - frame_mbs_only_flag)
+ int mb_height;
+ int frame_mbs_only_flag;
+ int mb_aff; ///< mb_adaptive_frame_field_flag
+ int direct_8x8_inference_flag;
+ int crop; ///< frame_cropping_flag
+
+ /* those 4 are already in luma samples */
+ u32 crop_left; ///< frame_cropping_rect_left_offset
+ u32 crop_right; ///< frame_cropping_rect_right_offset
+ u32 crop_top; ///< frame_cropping_rect_top_offset
+ u32 crop_bottom; ///< frame_cropping_rect_bottom_offset
+ int vui_parameters_present_flag;
+ struct rational sar;
+ int video_signal_type_present_flag;
+ int full_range;
+ int colour_description_present_flag;
+ enum AVColorPrimaries color_primaries;
+ enum AVColorTransferCharacteristic color_trc;
+ enum AVColorSpace colorspace;
+ int timing_info_present_flag;
+ u32 num_units_in_tick;
+ u32 time_scale;
+ int fixed_frame_rate_flag;
+ int32_t offset_for_ref_frame[256];
+ int bitstream_restriction_flag;
+ int num_reorder_frames;
+ int max_dec_frame_buffering;
+ int scaling_matrix_present;
+ u8 scaling_matrix4[6][16];
+ u8 scaling_matrix8[6][64];
+ int nal_hrd_parameters_present_flag;
+ int vcl_hrd_parameters_present_flag;
+ int pic_struct_present_flag;
+ int time_offset_length;
+ int cpb_cnt; ///< See H.264 E.1.2
+ int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1
+ int cpb_removal_delay_length; ///< cpb_removal_delay_length_minus1 + 1
+ int dpb_output_delay_length; ///< dpb_output_delay_length_minus1 + 1
+ int bit_depth_luma; ///< bit_depth_luma_minus8 + 8
+ int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8
+ int residual_color_transform_flag; ///< residual_colour_transform_flag
+ int constraint_set_flags; ///< constraint_set[0-3]_flag
+} ;
+
+/**
+ * Picture parameter set
+ */
+struct h264_PPS_t {
+ u32 sps_id;
+ int cabac; ///< entropy_coding_mode_flag
+ int pic_order_present; ///< pic_order_present_flag
+ int slice_group_count; ///< num_slice_groups_minus1 + 1
+ int mb_slice_group_map_type;
+ u32 ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
+ int weighted_pred; ///< weighted_pred_flag
+ int weighted_bipred_idc;
+ int init_qp; ///< pic_init_qp_minus26 + 26
+ int init_qs; ///< pic_init_qs_minus26 + 26
+ int chroma_qp_index_offset[2];
+ int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
+ int constrained_intra_pred; ///< constrained_intra_pred_flag
+ int redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag
+ int transform_8x8_mode; ///< transform_8x8_mode_flag
+ u8 scaling_matrix4[6][16];
+ u8 scaling_matrix8[6][64];
+ u8 chroma_qp_table[2][87+1]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
+ int chroma_qp_diff;
+ u8 data[4096];
+ int data_size;
+
+ u32 dequant4_buffer[6][87 + 1][16];
+ u32 dequant8_buffer[6][87 + 1][64];
+ u32(*dequant4_coeff[6])[16];
+ u32(*dequant8_coeff[6])[64];
+} ;
+
+struct h264_param_sets {
+ bool sps_parsed;
+ bool pps_parsed;
+ struct h264_SPS_t sps;
+ struct h264_PPS_t pps;
};
+int h264_decode_extradata_ps(u8 *data, int size, struct h264_param_sets *ps);
#endif /* AML_H264_PARSER_H */
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_hevc_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+const u8 ff_hevc_diag_scan4x4_x[16] = {
+ 0, 0, 1, 0,
+ 1, 2, 0, 1,
+ 2, 3, 1, 2,
+ 3, 2, 3, 3,
+};
+
+const u8 ff_hevc_diag_scan4x4_y[16] = {
+ 0, 1, 0, 2,
+ 1, 0, 3, 2,
+ 1, 0, 3, 2,
+ 1, 3, 2, 3,
+};
+
+const u8 ff_hevc_diag_scan8x8_x[64] = {
+ 0, 0, 1, 0,
+ 1, 2, 0, 1,
+ 2, 3, 0, 1,
+ 2, 3, 4, 0,
+ 1, 2, 3, 4,
+ 5, 0, 1, 2,
+ 3, 4, 5, 6,
+ 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 1, 2, 3, 4,
+ 5, 6, 7, 2,
+ 3, 4, 5, 6,
+ 7, 3, 4, 5,
+ 6, 7, 4, 5,
+ 6, 7, 5, 6,
+ 7, 6, 7, 7,
+};
+
+const u8 ff_hevc_diag_scan8x8_y[64] = {
+ 0, 1, 0, 2,
+ 1, 0, 3, 2,
+ 1, 0, 4, 3,
+ 2, 1, 0, 5,
+ 4, 3, 2, 1,
+ 0, 6, 5, 4,
+ 3, 2, 1, 0,
+ 7, 6, 5, 4,
+ 3, 2, 1, 0,
+ 7, 6, 5, 4,
+ 3, 2, 1, 7,
+ 6, 5, 4, 3,
+ 2, 7, 6, 5,
+ 4, 3, 7, 6,
+ 5, 4, 7, 6,
+ 5, 7, 6, 7,
+};
+
+static const u8 default_scaling_list_intra[] = {
+ 16, 16, 16, 16, 17, 18, 21, 24,
+ 16, 16, 16, 16, 17, 19, 22, 25,
+ 16, 16, 17, 18, 20, 22, 25, 29,
+ 16, 16, 18, 21, 24, 27, 31, 36,
+ 17, 17, 20, 24, 30, 35, 41, 47,
+ 18, 19, 22, 27, 35, 44, 54, 65,
+ 21, 22, 25, 31, 41, 54, 70, 88,
+ 24, 25, 29, 36, 47, 65, 88, 115
+};
+
+static const u8 default_scaling_list_inter[] = {
+ 16, 16, 16, 16, 17, 18, 20, 24,
+ 16, 16, 16, 17, 18, 20, 24, 25,
+ 16, 16, 17, 18, 20, 24, 25, 28,
+ 16, 17, 18, 20, 24, 25, 28, 33,
+ 17, 18, 20, 24, 25, 28, 33, 41,
+ 18, 20, 24, 25, 28, 33, 41, 54,
+ 20, 24, 25, 28, 33, 41, 54, 71,
+ 24, 25, 28, 33, 41, 54, 71, 91
+};
+
+static const struct AVRational vui_sar[] = {
+ { 0, 1 },
+ { 1, 1 },
+ { 12, 11 },
+ { 10, 11 },
+ { 16, 11 },
+ { 40, 33 },
+ { 24, 11 },
+ { 20, 11 },
+ { 32, 11 },
+ { 80, 33 },
+ { 18, 11 },
+ { 15, 11 },
+ { 64, 33 },
+ { 160, 99 },
+ { 4, 3 },
+ { 3, 2 },
+ { 2, 1 },
+};
+
+static const u8 hevc_sub_width_c[] = {
+ 1, 2, 2, 1
+};
+
+static const u8 hevc_sub_height_c[] = {
+ 1, 2, 1, 1
+};
+
+static int decode_profile_tier_level(struct get_bits_context *gb, struct PTLCommon *ptl)
+{
+ int i;
+
+ if (get_bits_left(gb) < 2+1+5 + 32 + 4 + 16 + 16 + 12)
+ return -1;
+
+ ptl->profile_space = get_bits(gb, 2);
+ ptl->tier_flag = get_bits1(gb);
+ ptl->profile_idc = get_bits(gb, 5);
+ if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN)
+ pr_info("Main profile bitstream\n");
+ else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_10)
+ pr_info("Main 10 profile bitstream\n");
+ else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_STILL_PICTURE)
+ pr_info("Main Still Picture profile bitstream\n");
+ else if (ptl->profile_idc == FF_PROFILE_HEVC_REXT)
+ pr_info("Range Extension profile bitstream\n");
+ else
+ pr_info("Unknown HEVC profile: %d\n", ptl->profile_idc);
+
+ for (i = 0; i < 32; i++) {
+ ptl->profile_compatibility_flag[i] = get_bits1(gb);
+
+ if (ptl->profile_idc == 0 && i > 0 && ptl->profile_compatibility_flag[i])
+ ptl->profile_idc = i;
+ }
+ ptl->progressive_source_flag = get_bits1(gb);
+ ptl->interlaced_source_flag = get_bits1(gb);
+ ptl->non_packed_constraint_flag = get_bits1(gb);
+ ptl->frame_only_constraint_flag = get_bits1(gb);
+
+ skip_bits(gb, 16); // XXX_reserved_zero_44bits[0..15]
+ skip_bits(gb, 16); // XXX_reserved_zero_44bits[16..31]
+ skip_bits(gb, 12); // XXX_reserved_zero_44bits[32..43]
+
+ return 0;
+}
+
+static int parse_ptl(struct get_bits_context *gb, struct PTL *ptl, int max_num_sub_layers)
+{
+ int i;
+ if (decode_profile_tier_level(gb, &ptl->general_ptl) < 0 ||
+ get_bits_left(gb) < 8 + (8*2 * (max_num_sub_layers - 1 > 0))) {
+ pr_err("PTL information too short\n");
+ return -1;
+ }
+
+ ptl->general_ptl.level_idc = get_bits(gb, 8);
+
+ for (i = 0; i < max_num_sub_layers - 1; i++) {
+ ptl->sub_layer_profile_present_flag[i] = get_bits1(gb);
+ ptl->sub_layer_level_present_flag[i] = get_bits1(gb);
+ }
+
+ if (max_num_sub_layers - 1> 0)
+ for (i = max_num_sub_layers - 1; i < 8; i++)
+ skip_bits(gb, 2); // reserved_zero_2bits[i]
+ for (i = 0; i < max_num_sub_layers - 1; i++) {
+ if (ptl->sub_layer_profile_present_flag[i] &&
+ decode_profile_tier_level(gb, &ptl->sub_layer_ptl[i]) < 0) {
+ pr_err("PTL information for sublayer %i too short\n", i);
+ return -1;
+ }
+ if (ptl->sub_layer_level_present_flag[i]) {
+ if (get_bits_left(gb) < 8) {
+ pr_err("Not enough data for sublayer %i level_idc\n", i);
+ return -1;
+ } else
+ ptl->sub_layer_ptl[i].level_idc = get_bits(gb, 8);
+ }
+ }
+
+ return 0;
+}
+
+static void decode_sublayer_hrd(struct get_bits_context *gb,
+ u32 nb_cpb, int subpic_params_present)
+{
+ int i;
+
+ for (i = 0; i < nb_cpb; i++) {
+ get_ue_golomb_long(gb); // bit_rate_value_minus1
+ get_ue_golomb_long(gb); // cpb_size_value_minus1
+
+ if (subpic_params_present) {
+ get_ue_golomb_long(gb); // cpb_size_du_value_minus1
+ get_ue_golomb_long(gb); // bit_rate_du_value_minus1
+ }
+ skip_bits1(gb); // cbr_flag
+ }
+}
+
+static int decode_hrd(struct get_bits_context *gb,
+ int common_inf_present, int max_sublayers)
+{
+ int nal_params_present = 0, vcl_params_present = 0;
+ int subpic_params_present = 0;
+ int i;
+
+ if (common_inf_present) {
+ nal_params_present = get_bits1(gb);
+ vcl_params_present = get_bits1(gb);
+
+ if (nal_params_present || vcl_params_present) {
+ subpic_params_present = get_bits1(gb);
+
+ if (subpic_params_present) {
+ skip_bits(gb, 8); // tick_divisor_minus2
+ skip_bits(gb, 5); // du_cpb_removal_delay_increment_length_minus1
+ skip_bits(gb, 1); // sub_pic_cpb_params_in_pic_timing_sei_flag
+ skip_bits(gb, 5); // dpb_output_delay_du_length_minus1
+ }
+
+ skip_bits(gb, 4); // bit_rate_scale
+ skip_bits(gb, 4); // cpb_size_scale
+
+ if (subpic_params_present)
+ skip_bits(gb, 4); // cpb_size_du_scale
+
+ skip_bits(gb, 5); // initial_cpb_removal_delay_length_minus1
+ skip_bits(gb, 5); // au_cpb_removal_delay_length_minus1
+ skip_bits(gb, 5); // dpb_output_delay_length_minus1
+ }
+ }
+
+ for (i = 0; i < max_sublayers; i++) {
+ int low_delay = 0;
+ u32 nb_cpb = 1;
+ int fixed_rate = get_bits1(gb);
+
+ if (!fixed_rate)
+ fixed_rate = get_bits1(gb);
+
+ if (fixed_rate)
+ get_ue_golomb_long(gb); // elemental_duration_in_tc_minus1
+ else
+ low_delay = get_bits1(gb);
+
+ if (!low_delay) {
+ nb_cpb = get_ue_golomb_long(gb) + 1;
+ if (nb_cpb < 1 || nb_cpb > 32) {
+ pr_err("nb_cpb %d invalid\n", nb_cpb);
+ return -1;
+ }
+ }
+
+ if (nal_params_present)
+ decode_sublayer_hrd(gb, nb_cpb, subpic_params_present);
+ if (vcl_params_present)
+ decode_sublayer_hrd(gb, nb_cpb, subpic_params_present);
+ }
+ return 0;
+}
+
+int ff_hevc_parse_vps(struct get_bits_context *gb, struct h265_VPS_t *vps)
+{
+ int i,j;
+ int vps_id = 0;
+
+ pr_info("Decoding VPS\n");
+
+ vps_id = get_bits(gb, 4);
+ if (vps_id >= HEVC_MAX_VPS_COUNT) {
+ pr_err("VPS id out of range: %d\n", vps_id);
+ goto err;
+ }
+
+ if (get_bits(gb, 2) != 3) { // vps_reserved_three_2bits
+ pr_err("vps_reserved_three_2bits is not three\n");
+ goto err;
+ }
+
+ vps->vps_max_layers = get_bits(gb, 6) + 1;
+ vps->vps_max_sub_layers = get_bits(gb, 3) + 1;
+ vps->vps_temporal_id_nesting_flag = get_bits1(gb);
+
+ if (get_bits(gb, 16) != 0xffff) { // vps_reserved_ffff_16bits
+ pr_err("vps_reserved_ffff_16bits is not 0xffff\n");
+ goto err;
+ }
+
+ if (vps->vps_max_sub_layers > HEVC_MAX_SUB_LAYERS) {
+ pr_err("vps_max_sub_layers out of range: %d\n",
+ vps->vps_max_sub_layers);
+ goto err;
+ }
+
+ if (parse_ptl(gb, &vps->ptl, vps->vps_max_sub_layers) < 0)
+ goto err;
+
+ vps->vps_sub_layer_ordering_info_present_flag = get_bits1(gb);
+
+ i = vps->vps_sub_layer_ordering_info_present_flag ? 0 : vps->vps_max_sub_layers - 1;
+ for (; i < vps->vps_max_sub_layers; i++) {
+ vps->vps_max_dec_pic_buffering[i] = get_ue_golomb_long(gb) + 1;
+ vps->vps_num_reorder_pics[i] = get_ue_golomb_long(gb);
+ vps->vps_max_latency_increase[i] = get_ue_golomb_long(gb) - 1;
+
+ if (vps->vps_max_dec_pic_buffering[i] > HEVC_MAX_DPB_SIZE || !vps->vps_max_dec_pic_buffering[i]) {
+ pr_err("vps_max_dec_pic_buffering_minus1 out of range: %d\n",
+ vps->vps_max_dec_pic_buffering[i] - 1);
+ goto err;
+ }
+ if (vps->vps_num_reorder_pics[i] > vps->vps_max_dec_pic_buffering[i] - 1) {
+ pr_err("vps_max_num_reorder_pics out of range: %d\n",
+ vps->vps_num_reorder_pics[i]);
+ goto err;
+ }
+ }
+
+ vps->vps_max_layer_id = get_bits(gb, 6);
+ vps->vps_num_layer_sets = get_ue_golomb_long(gb) + 1;
+ if (vps->vps_num_layer_sets < 1 || vps->vps_num_layer_sets > 1024 ||
+ (vps->vps_num_layer_sets - 1LL) * (vps->vps_max_layer_id + 1LL) > get_bits_left(gb)) {
+ pr_err("too many layer_id_included_flags\n");
+ goto err;
+ }
+
+ for (i = 1; i < vps->vps_num_layer_sets; i++)
+ for (j = 0; j <= vps->vps_max_layer_id; j++)
+ skip_bits(gb, 1); // layer_id_included_flag[i][j]
+
+ vps->vps_timing_info_present_flag = get_bits1(gb);
+ if (vps->vps_timing_info_present_flag) {
+ vps->vps_num_units_in_tick = get_bits_long(gb, 32);
+ vps->vps_time_scale = get_bits_long(gb, 32);
+ vps->vps_poc_proportional_to_timing_flag = get_bits1(gb);
+ if (vps->vps_poc_proportional_to_timing_flag)
+ vps->vps_num_ticks_poc_diff_one = get_ue_golomb_long(gb) + 1;
+ vps->vps_num_hrd_parameters = get_ue_golomb_long(gb);
+ if (vps->vps_num_hrd_parameters > (u32)vps->vps_num_layer_sets) {
+ pr_err("vps_num_hrd_parameters %d is invalid\n", vps->vps_num_hrd_parameters);
+ goto err;
+ }
+ for (i = 0; i < vps->vps_num_hrd_parameters; i++) {
+ int common_inf_present = 1;
+
+ get_ue_golomb_long(gb); // hrd_layer_set_idx
+ if (i)
+ common_inf_present = get_bits1(gb);
+ decode_hrd(gb, common_inf_present, vps->vps_max_sub_layers);
+ }
+ }
+ get_bits1(gb); /* vps_extension_flag */
+
+ if (get_bits_left(gb) < 0) {
+ pr_err("Overread VPS by %d bits\n", -get_bits_left(gb));
+ goto err;
+ }
+
+ return 0;
+err:
+ return -1;
+}
+
+static int map_pixel_format(struct h265_SPS_t *sps)
+{
+ /*const AVPixFmtDescriptor *desc;*/
+ switch (sps->bit_depth) {
+ case 8:
+ if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY8;
+ if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P;
+ if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P;
+ if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P;
+ break;
+ case 9:
+ if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY9;
+ if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P9;
+ if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P9;
+ if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P9;
+ break;
+ case 10:
+ if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY10;
+ if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P10;
+ if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P10;
+ if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P10;
+ break;
+ case 12:
+ if (sps->chroma_format_idc == 0) sps->pix_fmt = AV_PIX_FMT_GRAY12;
+ if (sps->chroma_format_idc == 1) sps->pix_fmt = AV_PIX_FMT_YUV420P12;
+ if (sps->chroma_format_idc == 2) sps->pix_fmt = AV_PIX_FMT_YUV422P12;
+ if (sps->chroma_format_idc == 3) sps->pix_fmt = AV_PIX_FMT_YUV444P12;
+ break;
+ default:
+ pr_info("The following bit-depths are currently specified: 8, 9, 10 and 12 bits, "
+ "chroma_format_idc is %d, depth is %d\n",
+ sps->chroma_format_idc, sps->bit_depth);
+ return -1;
+ }
+
+ /*desc = av_pix_fmt_desc_get(sps->pix_fmt);
+ if (!desc)
+ return AVERROR(EINVAL);
+
+ sps->hshift[0] = sps->vshift[0] = 0;
+ sps->hshift[2] = sps->hshift[1] = desc->log2_chroma_w;
+ sps->vshift[2] = sps->vshift[1] = desc->log2_chroma_h;*/
+
+ sps->pixel_shift = sps->bit_depth > 8;
+
+ return 0;
+}
+
+static void set_default_scaling_list_data(struct ScalingList *sl)
+{
+ int matrixId;
+
+ for (matrixId = 0; matrixId < 6; matrixId++) {
+ // 4x4 default is 16
+ memset(sl->sl[0][matrixId], 16, 16);
+ sl->sl_dc[0][matrixId] = 16; // default for 16x16
+ sl->sl_dc[1][matrixId] = 16; // default for 32x32
+ }
+ memcpy(sl->sl[1][0], default_scaling_list_intra, 64);
+ memcpy(sl->sl[1][1], default_scaling_list_intra, 64);
+ memcpy(sl->sl[1][2], default_scaling_list_intra, 64);
+ memcpy(sl->sl[1][3], default_scaling_list_inter, 64);
+ memcpy(sl->sl[1][4], default_scaling_list_inter, 64);
+ memcpy(sl->sl[1][5], default_scaling_list_inter, 64);
+ memcpy(sl->sl[2][0], default_scaling_list_intra, 64);
+ memcpy(sl->sl[2][1], default_scaling_list_intra, 64);
+ memcpy(sl->sl[2][2], default_scaling_list_intra, 64);
+ memcpy(sl->sl[2][3], default_scaling_list_inter, 64);
+ memcpy(sl->sl[2][4], default_scaling_list_inter, 64);
+ memcpy(sl->sl[2][5], default_scaling_list_inter, 64);
+ memcpy(sl->sl[3][0], default_scaling_list_intra, 64);
+ memcpy(sl->sl[3][1], default_scaling_list_intra, 64);
+ memcpy(sl->sl[3][2], default_scaling_list_intra, 64);
+ memcpy(sl->sl[3][3], default_scaling_list_inter, 64);
+ memcpy(sl->sl[3][4], default_scaling_list_inter, 64);
+ memcpy(sl->sl[3][5], default_scaling_list_inter, 64);
+}
+
+static int scaling_list_data(struct get_bits_context *gb,
+ struct ScalingList *sl, struct h265_SPS_t *sps)
+{
+ u8 scaling_list_pred_mode_flag;
+ int scaling_list_dc_coef[2][6];
+ int size_id, matrix_id, pos;
+ int i;
+
+ for (size_id = 0; size_id < 4; size_id++)
+ for (matrix_id = 0; matrix_id < 6; matrix_id += ((size_id == 3) ? 3 : 1)) {
+ scaling_list_pred_mode_flag = get_bits1(gb);
+ if (!scaling_list_pred_mode_flag) {
+ u32 delta = get_ue_golomb_long(gb);
+ /* Only need to handle non-zero delta. Zero means default,
+ * which should already be in the arrays. */
+ if (delta) {
+ // Copy from previous array.
+ delta *= (size_id == 3) ? 3 : 1;
+ if (matrix_id < delta) {
+ pr_err("Invalid delta in scaling list data: %d.\n", delta);
+ return -1;
+ }
+
+ memcpy(sl->sl[size_id][matrix_id],
+ sl->sl[size_id][matrix_id - delta],
+ size_id > 0 ? 64 : 16);
+ if (size_id > 1)
+ sl->sl_dc[size_id - 2][matrix_id] = sl->sl_dc[size_id - 2][matrix_id - delta];
+ }
+ } else {
+ int next_coef, coef_num;
+ int scaling_list_delta_coef;
+
+ next_coef = 8;
+ coef_num = FFMIN(64, 1 << (4 + (size_id << 1)));
+ if (size_id > 1) {
+ scaling_list_dc_coef[size_id - 2][matrix_id] = get_se_golomb(gb) + 8;
+ next_coef = scaling_list_dc_coef[size_id - 2][matrix_id];
+ sl->sl_dc[size_id - 2][matrix_id] = next_coef;
+ }
+ for (i = 0; i < coef_num; i++) {
+ if (size_id == 0)
+ pos = 4 * ff_hevc_diag_scan4x4_y[i] +
+ ff_hevc_diag_scan4x4_x[i];
+ else
+ pos = 8 * ff_hevc_diag_scan8x8_y[i] +
+ ff_hevc_diag_scan8x8_x[i];
+
+ scaling_list_delta_coef = get_se_golomb(gb);
+ next_coef = (next_coef + 256U + scaling_list_delta_coef) % 256;
+ sl->sl[size_id][matrix_id][pos] = next_coef;
+ }
+ }
+ }
+
+ if (sps->chroma_format_idc == 3) {
+ for (i = 0; i < 64; i++) {
+ sl->sl[3][1][i] = sl->sl[2][1][i];
+ sl->sl[3][2][i] = sl->sl[2][2][i];
+ sl->sl[3][4][i] = sl->sl[2][4][i];
+ sl->sl[3][5][i] = sl->sl[2][5][i];
+ }
+ sl->sl_dc[1][1] = sl->sl_dc[0][1];
+ sl->sl_dc[1][2] = sl->sl_dc[0][2];
+ sl->sl_dc[1][4] = sl->sl_dc[0][4];
+ sl->sl_dc[1][5] = sl->sl_dc[0][5];
+ }
+
+ return 0;
+}
+
+int ff_hevc_decode_short_term_rps(struct get_bits_context *gb,
+ struct ShortTermRPS *rps, const struct h265_SPS_t *sps, int is_slice_header)
+{
+ u8 rps_predict = 0;
+ int delta_poc;
+ int k0 = 0;
+ int k1 = 0;
+ int k = 0;
+ int i;
+
+ if (rps != sps->st_rps && sps->nb_st_rps)
+ rps_predict = get_bits1(gb);
+
+ if (rps_predict) {
+ const struct ShortTermRPS *rps_ridx;
+ int delta_rps;
+ u32 abs_delta_rps;
+ u8 use_delta_flag = 0;
+ u8 delta_rps_sign;
+
+ if (is_slice_header) {
+ u32 delta_idx = get_ue_golomb_long(gb) + 1;
+ if (delta_idx > sps->nb_st_rps) {
+ pr_err("Invalid value of delta_idx in slice header RPS: %d > %d.\n",
+ delta_idx, sps->nb_st_rps);
+ return -1;
+ }
+ rps_ridx = &sps->st_rps[sps->nb_st_rps - delta_idx];
+ rps->rps_idx_num_delta_pocs = rps_ridx->num_delta_pocs;
+ } else
+ rps_ridx = &sps->st_rps[rps - sps->st_rps - 1];
+
+ delta_rps_sign = get_bits1(gb);
+ abs_delta_rps = get_ue_golomb_long(gb) + 1;
+ if (abs_delta_rps < 1 || abs_delta_rps > 32768) {
+ pr_err("Invalid value of abs_delta_rps: %d\n",
+ abs_delta_rps);
+ return -1;
+ }
+ delta_rps = (1 - (delta_rps_sign << 1)) * abs_delta_rps;
+ for (i = 0; i <= rps_ridx->num_delta_pocs; i++) {
+ int used = rps->used[k] = get_bits1(gb);
+
+ if (!used)
+ use_delta_flag = get_bits1(gb);
+
+ if (used || use_delta_flag) {
+ if (i < rps_ridx->num_delta_pocs)
+ delta_poc = delta_rps + rps_ridx->delta_poc[i];
+ else
+ delta_poc = delta_rps;
+ rps->delta_poc[k] = delta_poc;
+ if (delta_poc < 0)
+ k0++;
+ else
+ k1++;
+ k++;
+ }
+ }
+
+ if (k >= ARRAY_SIZE(rps->used)) {
+ pr_err( "Invalid num_delta_pocs: %d\n", k);
+ return -1;
+ }
+
+ rps->num_delta_pocs = k;
+ rps->num_negative_pics = k0;
+ // sort in increasing order (smallest first)
+ if (rps->num_delta_pocs != 0) {
+ int used, tmp;
+ for (i = 1; i < rps->num_delta_pocs; i++) {
+ delta_poc = rps->delta_poc[i];
+ used = rps->used[i];
+ for (k = i - 1; k >= 0; k--) {
+ tmp = rps->delta_poc[k];
+ if (delta_poc < tmp) {
+ rps->delta_poc[k + 1] = tmp;
+ rps->used[k + 1] = rps->used[k];
+ rps->delta_poc[k] = delta_poc;
+ rps->used[k] = used;
+ }
+ }
+ }
+ }
+ if ((rps->num_negative_pics >> 1) != 0) {
+ int used;
+ k = rps->num_negative_pics - 1;
+ // flip the negative values to largest first
+ for (i = 0; i < rps->num_negative_pics >> 1; i++) {
+ delta_poc = rps->delta_poc[i];
+ used = rps->used[i];
+ rps->delta_poc[i] = rps->delta_poc[k];
+ rps->used[i] = rps->used[k];
+ rps->delta_poc[k] = delta_poc;
+ rps->used[k] = used;
+ k--;
+ }
+ }
+ } else {
+ u32 prev, nb_positive_pics;
+ rps->num_negative_pics = get_ue_golomb_long(gb);
+ nb_positive_pics = get_ue_golomb_long(gb);
+
+ if (rps->num_negative_pics >= HEVC_MAX_REFS ||
+ nb_positive_pics >= HEVC_MAX_REFS) {
+ pr_err("Too many refs in a short term RPS.\n");
+ return -1;
+ }
+
+ rps->num_delta_pocs = rps->num_negative_pics + nb_positive_pics;
+ if (rps->num_delta_pocs) {
+ prev = 0;
+ for (i = 0; i < rps->num_negative_pics; i++) {
+ delta_poc = get_ue_golomb_long(gb) + 1;
+ if (delta_poc < 1 || delta_poc > 32768) {
+ pr_err("Invalid value of delta_poc: %d\n",
+ delta_poc);
+ return -1;
+ }
+ prev -= delta_poc;
+ rps->delta_poc[i] = prev;
+ rps->used[i] = get_bits1(gb);
+ }
+ prev = 0;
+ for (i = 0; i < nb_positive_pics; i++) {
+ delta_poc = get_ue_golomb_long(gb) + 1;
+ if (delta_poc < 1 || delta_poc > 32768) {
+ pr_err("Invalid value of delta_poc: %d\n",
+ delta_poc);
+ return -1;
+ }
+ prev += delta_poc;
+ rps->delta_poc[rps->num_negative_pics + i] = prev;
+ rps->used[rps->num_negative_pics + i] = get_bits1(gb);
+ }
+ }
+ }
+ return 0;
+}
+
+static void decode_vui(struct get_bits_context *gb, struct h265_SPS_t *sps)
+{
+ struct VUI backup_vui, *vui = &sps->vui;
+ struct get_bits_context backup;
+ int sar_present, alt = 0;
+
+ pr_info("Decoding VUI\n");
+
+ sar_present = get_bits1(gb);
+ if (sar_present) {
+ u8 sar_idx = get_bits(gb, 8);
+ if (sar_idx < ARRAY_SIZE(vui_sar))
+ vui->sar = vui_sar[sar_idx];
+ else if (sar_idx == 255) {
+ vui->sar.num = get_bits(gb, 16);
+ vui->sar.den = get_bits(gb, 16);
+ } else
+ pr_info("Unknown SAR index: %u.\n", sar_idx);
+ }
+
+ vui->overscan_info_present_flag = get_bits1(gb);
+ if (vui->overscan_info_present_flag)
+ vui->overscan_appropriate_flag = get_bits1(gb);
+
+ vui->video_signal_type_present_flag = get_bits1(gb);
+ if (vui->video_signal_type_present_flag) {
+ vui->video_format = get_bits(gb, 3);
+ vui->video_full_range_flag = get_bits1(gb);
+ vui->colour_description_present_flag = get_bits1(gb);
+ if (vui->video_full_range_flag && sps->pix_fmt == AV_PIX_FMT_YUV420P)
+ sps->pix_fmt = AV_PIX_FMT_YUVJ420P;
+ if (vui->colour_description_present_flag) {
+ vui->colour_primaries = get_bits(gb, 8);
+ vui->transfer_characteristic = get_bits(gb, 8);
+ vui->matrix_coeffs = get_bits(gb, 8);
+
+ // Set invalid values to "unspecified"
+ if (!av_color_primaries_name(vui->colour_primaries))
+ vui->colour_primaries = AVCOL_PRI_UNSPECIFIED;
+ if (!av_color_transfer_name(vui->transfer_characteristic))
+ vui->transfer_characteristic = AVCOL_TRC_UNSPECIFIED;
+ if (!av_color_space_name(vui->matrix_coeffs))
+ vui->matrix_coeffs = AVCOL_SPC_UNSPECIFIED;
+ if (vui->matrix_coeffs == AVCOL_SPC_RGB) {
+ switch (sps->pix_fmt) {
+ case AV_PIX_FMT_YUV444P:
+ sps->pix_fmt = AV_PIX_FMT_GBRP;
+ break;
+ case AV_PIX_FMT_YUV444P10:
+ sps->pix_fmt = AV_PIX_FMT_GBRP10;
+ break;
+ case AV_PIX_FMT_YUV444P12:
+ sps->pix_fmt = AV_PIX_FMT_GBRP12;
+ break;
+ }
+ }
+ }
+ }
+
+ vui->chroma_loc_info_present_flag = get_bits1(gb);
+ if (vui->chroma_loc_info_present_flag) {
+ vui->chroma_sample_loc_type_top_field = get_ue_golomb_long(gb);
+ vui->chroma_sample_loc_type_bottom_field = get_ue_golomb_long(gb);
+ }
+
+ vui->neutra_chroma_indication_flag = get_bits1(gb);
+ vui->field_seq_flag = get_bits1(gb);
+ vui->frame_field_info_present_flag = get_bits1(gb);
+
+ // Backup context in case an alternate header is detected
+ memcpy(&backup, gb, sizeof(backup));
+ memcpy(&backup_vui, vui, sizeof(backup_vui));
+ if (get_bits_left(gb) >= 68 && show_bits_long(gb, 21) == 0x100000) {
+ vui->default_display_window_flag = 0;
+ pr_info("Invalid default display window\n");
+ } else
+ vui->default_display_window_flag = get_bits1(gb);
+
+ if (vui->default_display_window_flag) {
+ int vert_mult = hevc_sub_height_c[sps->chroma_format_idc];
+ int horiz_mult = hevc_sub_width_c[sps->chroma_format_idc];
+ vui->def_disp_win.left_offset = get_ue_golomb_long(gb) * horiz_mult;
+ vui->def_disp_win.right_offset = get_ue_golomb_long(gb) * horiz_mult;
+ vui->def_disp_win.top_offset = get_ue_golomb_long(gb) * vert_mult;
+ vui->def_disp_win.bottom_offset = get_ue_golomb_long(gb) * vert_mult;
+ }
+
+timing_info:
+ vui->vui_timing_info_present_flag = get_bits1(gb);
+
+ if (vui->vui_timing_info_present_flag) {
+ if (get_bits_left(gb) < 66 && !alt) {
+ // The alternate syntax seem to have timing info located
+ // at where def_disp_win is normally located
+ pr_info("Strange VUI timing information, retrying...\n");
+ memcpy(vui, &backup_vui, sizeof(backup_vui));
+ memcpy(gb, &backup, sizeof(backup));
+ alt = 1;
+ goto timing_info;
+ }
+ vui->vui_num_units_in_tick = get_bits_long(gb, 32);
+ vui->vui_time_scale = get_bits_long(gb, 32);
+ if (alt) {
+ pr_info("Retry got %u/%u fps\n",
+ vui->vui_time_scale, vui->vui_num_units_in_tick);
+ }
+ vui->vui_poc_proportional_to_timing_flag = get_bits1(gb);
+ if (vui->vui_poc_proportional_to_timing_flag)
+ vui->vui_num_ticks_poc_diff_one_minus1 = get_ue_golomb_long(gb);
+ vui->vui_hrd_parameters_present_flag = get_bits1(gb);
+ if (vui->vui_hrd_parameters_present_flag)
+ decode_hrd(gb, 1, sps->max_sub_layers);
+ }
+
+ vui->bitstream_restriction_flag = get_bits1(gb);
+ if (vui->bitstream_restriction_flag) {
+ if (get_bits_left(gb) < 8 && !alt) {
+ pr_info("Strange VUI bitstream restriction information, retrying"
+ " from timing information...\n");
+ memcpy(vui, &backup_vui, sizeof(backup_vui));
+ memcpy(gb, &backup, sizeof(backup));
+ alt = 1;
+ goto timing_info;
+ }
+ vui->tiles_fixed_structure_flag = get_bits1(gb);
+ vui->motion_vectors_over_pic_boundaries_flag = get_bits1(gb);
+ vui->restricted_ref_pic_lists_flag = get_bits1(gb);
+ vui->min_spatial_segmentation_idc = get_ue_golomb_long(gb);
+ vui->max_bytes_per_pic_denom = get_ue_golomb_long(gb);
+ vui->max_bits_per_min_cu_denom = get_ue_golomb_long(gb);
+ vui->log2_max_mv_length_horizontal = get_ue_golomb_long(gb);
+ vui->log2_max_mv_length_vertical = get_ue_golomb_long(gb);
+ }
+
+ if (get_bits_left(gb) < 1 && !alt) {
+ // XXX: Alternate syntax when sps_range_extension_flag != 0?
+ pr_info("Overread in VUI, retrying from timing information...\n");
+ memcpy(vui, &backup_vui, sizeof(backup_vui));
+ memcpy(gb, &backup, sizeof(backup));
+ alt = 1;
+ goto timing_info;
+ }
+}
+
+int ff_hevc_parse_sps(struct get_bits_context *gb, struct h265_SPS_t *sps)
+{
+ int i, ret = 0;
+ int log2_diff_max_min_transform_block_size;
+ int bit_depth_chroma, start, vui_present, sublayer_ordering_info;
+ struct HEVCWindow *ow;
+
+ sps->vps_id = get_bits(gb, 4);
+ if (sps->vps_id >= HEVC_MAX_VPS_COUNT) {
+ pr_err("VPS id out of range: %d\n", sps->vps_id);
+ return -1;
+ }
+
+ sps->max_sub_layers = get_bits(gb, 3) + 1;
+ if (sps->max_sub_layers > HEVC_MAX_SUB_LAYERS) {
+ pr_err("sps_max_sub_layers out of range: %d\n",
+ sps->max_sub_layers);
+ return -1;
+ }
+
+ sps->temporal_id_nesting_flag = get_bits(gb, 1);
+
+ if ((ret = parse_ptl(gb, &sps->ptl, sps->max_sub_layers)) < 0)
+ return ret;
+
+ sps->sps_id = get_ue_golomb_long(gb);
+ if (sps->sps_id >= HEVC_MAX_SPS_COUNT) {
+ pr_err("SPS id out of range: %d\n", sps->sps_id);
+ return -1;
+ }
+
+ sps->chroma_format_idc = get_ue_golomb_long(gb);
+ if (sps->chroma_format_idc > 3U) {
+ pr_err("chroma_format_idc %d is invalid\n", sps->chroma_format_idc);
+ return -1;
+ }
+
+ if (sps->chroma_format_idc == 3)
+ sps->separate_colour_plane_flag = get_bits1(gb);
+
+ if (sps->separate_colour_plane_flag)
+ sps->chroma_format_idc = 0;
+
+ sps->width = get_ue_golomb_long(gb);
+ sps->height = get_ue_golomb_long(gb);
+ if (sps->width > 8192 || sps->height > 8192) {
+ pr_err("width or height oversize.\n");
+ return -1;
+ }
+
+ if (get_bits1(gb)) { // pic_conformance_flag
+ int vert_mult = hevc_sub_height_c[sps->chroma_format_idc];
+ int horiz_mult = hevc_sub_width_c[sps->chroma_format_idc];
+ sps->pic_conf_win.left_offset = get_ue_golomb_long(gb) * horiz_mult;
+ sps->pic_conf_win.right_offset = get_ue_golomb_long(gb) * horiz_mult;
+ sps->pic_conf_win.top_offset = get_ue_golomb_long(gb) * vert_mult;
+ sps->pic_conf_win.bottom_offset = get_ue_golomb_long(gb) * vert_mult;
+ sps->output_window = sps->pic_conf_win;
+ }
+
+ sps->bit_depth = get_ue_golomb_long(gb) + 8;
+ bit_depth_chroma = get_ue_golomb_long(gb) + 8;
+ if (sps->chroma_format_idc && bit_depth_chroma != sps->bit_depth) {
+ pr_err("Luma bit depth (%d) is different from chroma bit depth (%d), this is unsupported.\n",
+ sps->bit_depth, bit_depth_chroma);
+ return -1;
+ }
+ sps->bit_depth_chroma = bit_depth_chroma;
+
+ ret = map_pixel_format(sps);
+ if (ret < 0)
+ return ret;
+
+ sps->log2_max_poc_lsb = get_ue_golomb_long(gb) + 4;
+ if (sps->log2_max_poc_lsb > 16) {
+ pr_err("log2_max_pic_order_cnt_lsb_minus4 out range: %d\n",
+ sps->log2_max_poc_lsb - 4);
+ return -1;
+ }
+
+ sublayer_ordering_info = get_bits1(gb);
+ start = sublayer_ordering_info ? 0 : sps->max_sub_layers - 1;
+ for (i = start; i < sps->max_sub_layers; i++) {
+ sps->temporal_layer[i].max_dec_pic_buffering = get_ue_golomb_long(gb) + 1;
+ sps->temporal_layer[i].num_reorder_pics = get_ue_golomb_long(gb);
+ sps->temporal_layer[i].max_latency_increase = get_ue_golomb_long(gb) - 1;
+ if (sps->temporal_layer[i].max_dec_pic_buffering > (u32)HEVC_MAX_DPB_SIZE) {
+ pr_err("sps_max_dec_pic_buffering_minus1 out of range: %d\n",
+ sps->temporal_layer[i].max_dec_pic_buffering - 1U);
+ return -1;
+ }
+ if (sps->temporal_layer[i].num_reorder_pics > sps->temporal_layer[i].max_dec_pic_buffering - 1) {
+ pr_info("sps_max_num_reorder_pics out of range: %d\n",
+ sps->temporal_layer[i].num_reorder_pics);
+ if (sps->temporal_layer[i].num_reorder_pics > HEVC_MAX_DPB_SIZE - 1) {
+ return -1;
+ }
+ sps->temporal_layer[i].max_dec_pic_buffering = sps->temporal_layer[i].num_reorder_pics + 1;
+ }
+ }
+
+ if (!sublayer_ordering_info) {
+ for (i = 0; i < start; i++) {
+ sps->temporal_layer[i].max_dec_pic_buffering = sps->temporal_layer[start].max_dec_pic_buffering;
+ sps->temporal_layer[i].num_reorder_pics = sps->temporal_layer[start].num_reorder_pics;
+ sps->temporal_layer[i].max_latency_increase = sps->temporal_layer[start].max_latency_increase;
+ }
+ }
+
+ sps->log2_min_cb_size = get_ue_golomb_long(gb) + 3;
+ sps->log2_diff_max_min_coding_block_size = get_ue_golomb_long(gb);
+ sps->log2_min_tb_size = get_ue_golomb_long(gb) + 2;
+ log2_diff_max_min_transform_block_size = get_ue_golomb_long(gb);
+ sps->log2_max_trafo_size = log2_diff_max_min_transform_block_size + sps->log2_min_tb_size;
+
+ if (sps->log2_min_cb_size < 3 || sps->log2_min_cb_size > 30) {
+ pr_err("Invalid value %d for log2_min_cb_size", sps->log2_min_cb_size);
+ return -1;
+ }
+
+ if (sps->log2_diff_max_min_coding_block_size > 30) {
+ pr_err("Invalid value %d for log2_diff_max_min_coding_block_size", sps->log2_diff_max_min_coding_block_size);
+ return -1;
+ }
+
+ if (sps->log2_min_tb_size >= sps->log2_min_cb_size || sps->log2_min_tb_size < 2) {
+ pr_err("Invalid value for log2_min_tb_size");
+ return -1;
+ }
+
+ if (log2_diff_max_min_transform_block_size < 0 || log2_diff_max_min_transform_block_size > 30) {
+ pr_err("Invalid value %d for log2_diff_max_min_transform_block_size", log2_diff_max_min_transform_block_size);
+ return -1;
+ }
+
+ sps->max_transform_hierarchy_depth_inter = get_ue_golomb_long(gb);
+ sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb);
+
+ sps->scaling_list_enable_flag = get_bits1(gb);
+ if (sps->scaling_list_enable_flag) {
+ set_default_scaling_list_data(&sps->scaling_list);
+
+ if (get_bits1(gb)) {
+ ret = scaling_list_data(gb, &sps->scaling_list, sps);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ sps->amp_enabled_flag = get_bits1(gb);
+ sps->sao_enabled = get_bits1(gb);
+
+ sps->pcm_enabled_flag = get_bits1(gb);
+ if (sps->pcm_enabled_flag) {
+ sps->pcm.bit_depth = get_bits(gb, 4) + 1;
+ sps->pcm.bit_depth_chroma = get_bits(gb, 4) + 1;
+ sps->pcm.log2_min_pcm_cb_size = get_ue_golomb_long(gb) + 3;
+ sps->pcm.log2_max_pcm_cb_size = sps->pcm.log2_min_pcm_cb_size +
+ get_ue_golomb_long(gb);
+ if (FFMAX(sps->pcm.bit_depth, sps->pcm.bit_depth_chroma) > sps->bit_depth) {
+ pr_err("PCM bit depth (%d, %d) is greater than normal bit depth (%d)\n",
+ sps->pcm.bit_depth, sps->pcm.bit_depth_chroma, sps->bit_depth);
+ return -1;
+ }
+
+ sps->pcm.loop_filter_disable_flag = get_bits1(gb);
+ }
+
+ sps->nb_st_rps = get_ue_golomb_long(gb);
+ if (sps->nb_st_rps > HEVC_MAX_SHORT_TERM_REF_PIC_SETS) {
+ pr_err("Too many short term RPS: %d.\n", sps->nb_st_rps);
+ return -1;
+ }
+ for (i = 0; i < sps->nb_st_rps; i++) {
+ if ((ret = ff_hevc_decode_short_term_rps(gb, &sps->st_rps[i], sps, 0)) < 0)
+ return ret;
+ }
+
+ sps->long_term_ref_pics_present_flag = get_bits1(gb);
+ if (sps->long_term_ref_pics_present_flag) {
+ sps->num_long_term_ref_pics_sps = get_ue_golomb_long(gb);
+ if (sps->num_long_term_ref_pics_sps > HEVC_MAX_LONG_TERM_REF_PICS) {
+ pr_err("Too many long term ref pics: %d.\n",
+ sps->num_long_term_ref_pics_sps);
+ return -1;
+ }
+ for (i = 0; i < sps->num_long_term_ref_pics_sps; i++) {
+ sps->lt_ref_pic_poc_lsb_sps[i] = get_bits(gb, sps->log2_max_poc_lsb);
+ sps->used_by_curr_pic_lt_sps_flag[i] = get_bits1(gb);
+ }
+ }
+
+ sps->sps_temporal_mvp_enabled_flag = get_bits1(gb);
+ sps->sps_strong_intra_smoothing_enable_flag = get_bits1(gb);
+ sps->vui.sar = (struct AVRational){0, 1};
+ vui_present = get_bits1(gb);
+ if (vui_present)
+ decode_vui(gb, sps);
+
+ if (get_bits1(gb)) { // sps_extension_flag
+ sps->sps_range_extension_flag = get_bits1(gb);
+ skip_bits(gb, 7); //sps_extension_7bits = get_bits(gb, 7);
+ if (sps->sps_range_extension_flag) {
+ sps->transform_skip_rotation_enabled_flag = get_bits1(gb);
+ sps->transform_skip_context_enabled_flag = get_bits1(gb);
+ sps->implicit_rdpcm_enabled_flag = get_bits1(gb);
+ sps->explicit_rdpcm_enabled_flag = get_bits1(gb);
+ sps->extended_precision_processing_flag = get_bits1(gb);
+ if (sps->extended_precision_processing_flag)
+ pr_info("extended_precision_processing_flag not yet implemented\n");
+
+ sps->intra_smoothing_disabled_flag = get_bits1(gb);
+ sps->high_precision_offsets_enabled_flag = get_bits1(gb);
+ if (sps->high_precision_offsets_enabled_flag)
+ pr_info("high_precision_offsets_enabled_flag not yet implemented\n");
+
+ sps->persistent_rice_adaptation_enabled_flag = get_bits1(gb);
+ sps->cabac_bypass_alignment_enabled_flag = get_bits1(gb);
+ if (sps->cabac_bypass_alignment_enabled_flag)
+ pr_info("cabac_bypass_alignment_enabled_flag not yet implemented\n");
+ }
+ }
+
+ ow = &sps->output_window;
+ if (ow->left_offset >= INT_MAX - ow->right_offset ||
+ ow->top_offset >= INT_MAX - ow->bottom_offset ||
+ ow->left_offset + ow->right_offset >= sps->width ||
+ ow->top_offset + ow->bottom_offset >= sps->height) {
+ pr_err("Invalid cropping offsets: %u/%u/%u/%u\n",
+ ow->left_offset, ow->right_offset, ow->top_offset, ow->bottom_offset);
+ return -1;
+ }
+
+ // Inferred parameters
+ sps->log2_ctb_size = sps->log2_min_cb_size +
+ sps->log2_diff_max_min_coding_block_size;
+ sps->log2_min_pu_size = sps->log2_min_cb_size - 1;
+
+ if (sps->log2_ctb_size > HEVC_MAX_LOG2_CTB_SIZE) {
+ pr_err("CTB size out of range: 2^%d\n", sps->log2_ctb_size);
+ return -1;
+ }
+ if (sps->log2_ctb_size < 4) {
+ pr_err("log2_ctb_size %d differs from the bounds of any known profile\n", sps->log2_ctb_size);
+ pr_err("log2_ctb_size %d", sps->log2_ctb_size);
+ return -1;
+ }
+
+ sps->ctb_width = (sps->width + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size;
+ sps->ctb_height = (sps->height + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size;
+ sps->ctb_size = sps->ctb_width * sps->ctb_height;
+
+ sps->min_cb_width = sps->width >> sps->log2_min_cb_size;
+ sps->min_cb_height = sps->height >> sps->log2_min_cb_size;
+ sps->min_tb_width = sps->width >> sps->log2_min_tb_size;
+ sps->min_tb_height = sps->height >> sps->log2_min_tb_size;
+ sps->min_pu_width = sps->width >> sps->log2_min_pu_size;
+ sps->min_pu_height = sps->height >> sps->log2_min_pu_size;
+ sps->tb_mask = (1 << (sps->log2_ctb_size - sps->log2_min_tb_size)) - 1;
+ sps->qp_bd_offset = 6 * (sps->bit_depth - 8);
+
+ if (av_mod_uintp2(sps->width, sps->log2_min_cb_size) ||
+ av_mod_uintp2(sps->height, sps->log2_min_cb_size)) {
+ pr_err("Invalid coded frame dimensions.\n");
+ return -1;
+ }
+
+ if (sps->max_transform_hierarchy_depth_inter > sps->log2_ctb_size - sps->log2_min_tb_size) {
+ pr_err("max_transform_hierarchy_depth_inter out of range: %d\n",
+ sps->max_transform_hierarchy_depth_inter);
+ return -1;
+ }
+ if (sps->max_transform_hierarchy_depth_intra > sps->log2_ctb_size - sps->log2_min_tb_size) {
+ pr_err("max_transform_hierarchy_depth_intra out of range: %d\n",
+ sps->max_transform_hierarchy_depth_intra);
+ return -1;
+ }
+ if (sps->log2_max_trafo_size > FFMIN(sps->log2_ctb_size, 5)) {
+ pr_err("max transform block size out of range: %d\n",
+ sps->log2_max_trafo_size);
+ return -1;
+ }
+
+ if (get_bits_left(gb) < 0) {
+ pr_err("Overread SPS by %d bits\n", -get_bits_left(gb));
+ return -1;
+ }
+
+ pr_info("Parsed SPS: id %d; ref: %d, coded wxh: %dx%d, cropped wxh: %dx%d; pix_fmt: %d.\n",
+ sps->sps_id, sps->temporal_layer[0].num_reorder_pics, sps->width, sps->height,
+ sps->width - (sps->output_window.left_offset + sps->output_window.right_offset),
+ sps->height - (sps->output_window.top_offset + sps->output_window.bottom_offset),
+ sps->pix_fmt);
+
+ return 0;
+}
+
+const char *hevc_nal_type_name[64] = {
+ "TRAIL_N", // HEVC_NAL_TRAIL_N
+ "TRAIL_R", // HEVC_NAL_TRAIL_R
+ "TSA_N", // HEVC_NAL_TSA_N
+ "TSA_R", // HEVC_NAL_TSA_R
+ "STSA_N", // HEVC_NAL_STSA_N
+ "STSA_R", // HEVC_NAL_STSA_R
+ "RADL_N", // HEVC_NAL_RADL_N
+ "RADL_R", // HEVC_NAL_RADL_R
+ "RASL_N", // HEVC_NAL_RASL_N
+ "RASL_R", // HEVC_NAL_RASL_R
+ "RSV_VCL_N10", // HEVC_NAL_VCL_N10
+ "RSV_VCL_R11", // HEVC_NAL_VCL_R11
+ "RSV_VCL_N12", // HEVC_NAL_VCL_N12
+ "RSV_VLC_R13", // HEVC_NAL_VCL_R13
+ "RSV_VCL_N14", // HEVC_NAL_VCL_N14
+ "RSV_VCL_R15", // HEVC_NAL_VCL_R15
+ "BLA_W_LP", // HEVC_NAL_BLA_W_LP
+ "BLA_W_RADL", // HEVC_NAL_BLA_W_RADL
+ "BLA_N_LP", // HEVC_NAL_BLA_N_LP
+ "IDR_W_RADL", // HEVC_NAL_IDR_W_RADL
+ "IDR_N_LP", // HEVC_NAL_IDR_N_LP
+ "CRA_NUT", // HEVC_NAL_CRA_NUT
+ "IRAP_IRAP_VCL22", // HEVC_NAL_IRAP_VCL22
+ "IRAP_IRAP_VCL23", // HEVC_NAL_IRAP_VCL23
+ "RSV_VCL24", // HEVC_NAL_RSV_VCL24
+ "RSV_VCL25", // HEVC_NAL_RSV_VCL25
+ "RSV_VCL26", // HEVC_NAL_RSV_VCL26
+ "RSV_VCL27", // HEVC_NAL_RSV_VCL27
+ "RSV_VCL28", // HEVC_NAL_RSV_VCL28
+ "RSV_VCL29", // HEVC_NAL_RSV_VCL29
+ "RSV_VCL30", // HEVC_NAL_RSV_VCL30
+ "RSV_VCL31", // HEVC_NAL_RSV_VCL31
+ "VPS", // HEVC_NAL_VPS
+ "SPS", // HEVC_NAL_SPS
+ "PPS", // HEVC_NAL_PPS
+ "AUD", // HEVC_NAL_AUD
+ "EOS_NUT", // HEVC_NAL_EOS_NUT
+ "EOB_NUT", // HEVC_NAL_EOB_NUT
+ "FD_NUT", // HEVC_NAL_FD_NUT
+ "SEI_PREFIX", // HEVC_NAL_SEI_PREFIX
+ "SEI_SUFFIX", // HEVC_NAL_SEI_SUFFIX
+ "RSV_NVCL41", // HEVC_NAL_RSV_NVCL41
+ "RSV_NVCL42", // HEVC_NAL_RSV_NVCL42
+ "RSV_NVCL43", // HEVC_NAL_RSV_NVCL43
+ "RSV_NVCL44", // HEVC_NAL_RSV_NVCL44
+ "RSV_NVCL45", // HEVC_NAL_RSV_NVCL45
+ "RSV_NVCL46", // HEVC_NAL_RSV_NVCL46
+ "RSV_NVCL47", // HEVC_NAL_RSV_NVCL47
+ "UNSPEC48", // HEVC_NAL_UNSPEC48
+ "UNSPEC49", // HEVC_NAL_UNSPEC49
+ "UNSPEC50", // HEVC_NAL_UNSPEC50
+ "UNSPEC51", // HEVC_NAL_UNSPEC51
+ "UNSPEC52", // HEVC_NAL_UNSPEC52
+ "UNSPEC53", // HEVC_NAL_UNSPEC53
+ "UNSPEC54", // HEVC_NAL_UNSPEC54
+ "UNSPEC55", // HEVC_NAL_UNSPEC55
+ "UNSPEC56", // HEVC_NAL_UNSPEC56
+ "UNSPEC57", // HEVC_NAL_UNSPEC57
+ "UNSPEC58", // HEVC_NAL_UNSPEC58
+ "UNSPEC59", // HEVC_NAL_UNSPEC59
+ "UNSPEC60", // HEVC_NAL_UNSPEC60
+ "UNSPEC61", // HEVC_NAL_UNSPEC61
+ "UNSPEC62", // HEVC_NAL_UNSPEC62
+ "UNSPEC63", // HEVC_NAL_UNSPEC63
+};
+
+static const char *hevc_nal_unit_name(int nal_type)
+{
+ return hevc_nal_type_name[nal_type];
+}
+
+/**
+* Parse NAL units of found picture and decode some basic information.
+*
+* @param s parser context.
+* @param avctx codec context.
+* @param buf buffer with field/frame data.
+* @param buf_size size of the buffer.
+*/
+static int decode_extradata_ps(u8 *data, int size, struct h265_param_sets *ps)
+{
+ int ret = 0;
+ struct get_bits_context gb;
+ u32 src_len, rbsp_size = 0;
+ u8 *rbsp_buf = NULL;
+ int nalu_pos, nuh_layer_id, temporal_id;
+ u32 nal_type;
+ u8 *p = data;
+ u32 len = size;
+
+ nalu_pos = find_start_code(p, len);
+ if (nalu_pos < 0)
+ return -1;
+
+ src_len = calc_nal_len(p + nalu_pos, size - nalu_pos);
+ rbsp_buf = nal_unit_extract_rbsp(p + nalu_pos, src_len, &rbsp_size);
+ if (rbsp_buf == NULL)
+ return -ENOMEM;
+
+ ret = init_get_bits8(&gb, rbsp_buf, rbsp_size);
+ if (ret < 0)
+ goto out;
+
+ if (get_bits1(&gb) != 0) {
+ pr_err("invalid data, return!\n");
+ goto out;
+ }
+
+ nal_type = get_bits(&gb, 6);
+ nuh_layer_id = get_bits(&gb, 6);
+ temporal_id = get_bits(&gb, 3) - 1;
+ if (temporal_id < 0)
+ goto out;
+
+ /*pr_info("nal_unit_type: %d(%s), nuh_layer_id: %d, temporal_id: %d\n",
+ nal_type, hevc_nal_unit_name(nal_type),
+ nuh_layer_id, temporal_id);*/
+
+ switch (nal_type) {
+ case HEVC_NAL_VPS:
+ ret = ff_hevc_parse_vps(&gb, &ps->vps);
+ if (ret < 0)
+ goto out;
+ ps->vps_parsed = true;
+ break;
+ case HEVC_NAL_SPS:
+ ret = ff_hevc_parse_sps(&gb, &ps->sps);
+ if (ret < 0)
+ goto out;
+ ps->sps_parsed = true;
+ break;
+ /*case HEVC_NAL_PPS:
+ ret = ff_hevc_decode_nal_pps(&gb, NULL, ps);
+ if (ret < 0)
+ goto out;
+ ps->pps_parsed = true;
+ break;*/
+ default:
+ pr_err("Unsupport parser nal type (%s).\n",
+ hevc_nal_unit_name(nal_type));
+ break;
+ }
+
+out:
+ vfree(rbsp_buf);
+
+ return 0;
+}
+
+int h265_decode_extradata_ps(u8 *buf, int size, struct h265_param_sets *ps)
+{
+ int ret = 0, i = 0, j = 0;
+ u8 *p = buf;
+ int len = size;
+
+ for (i = 4; i < size; i++) {
+ j = find_start_code(p, len);
+ if (j > 0) {
+ len = size - (p - buf);
+ ret = decode_extradata_ps(p, len, ps);
+ if (ret) {
+ pr_err("parse extra data failed. err: %d\n", ret);
+ return ret;
+ }
+ p += j;
+ }
+ p++;
+ }
+
+ return ret;
+}
+
#ifndef AML_HEVC_PARSER_H
#define AML_HEVC_PARSER_H
-#define MAX_DPB_SIZE 16 // A.4.1
-#define MAX_REFS 16
+#include "../utils/common.h"
-#define MAX_NB_THREADS 16
-#define SHIFT_CTB_WPP 2
+#define MAX_DPB_SIZE 16 // A.4.1
+#define MAX_REFS 16
+
+#define MAX_NB_THREADS 16
+#define SHIFT_CTB_WPP 2
/**
* 7.4.2.1
*/
-#define MAX_SUB_LAYERS 7
-#define MAX_VPS_COUNT 16
-#define MAX_SPS_COUNT 32
-#define MAX_PPS_COUNT 256
-#define MAX_SHORT_TERM_RPS_COUNT 64
-#define MAX_CU_SIZE 128
+#define MAX_SUB_LAYERS 7
+#define MAX_VPS_COUNT 16
+#define MAX_SPS_COUNT 32
+#define MAX_PPS_COUNT 256
+#define MAX_SHORT_TERM_RPS_COUNT 64
+#define MAX_CU_SIZE 128
//TODO: check if this is really the maximum
-#define MAX_TRANSFORM_DEPTH 5
+#define MAX_TRANSFORM_DEPTH 5
+
+#define MAX_TB_SIZE 32
+#define MAX_PB_SIZE 64
+#define MAX_LOG2_CTB_SIZE 6
+#define MAX_QP 51
+#define DEFAULT_INTRA_TC_OFFSET 2
-#define MAX_TB_SIZE 32
-#define MAX_PB_SIZE 64
-#define MAX_LOG2_CTB_SIZE 6
-#define MAX_QP 51
-#define DEFAULT_INTRA_TC_OFFSET 2
+#define HEVC_CONTEXTS 183
-#define HEVC_CONTEXTS 183
+#define MRG_MAX_NUM_CANDS 5
-#define MRG_MAX_NUM_CANDS 5
+#define L0 0
+#define L1 1
-#define L0 0
-#define L1 1
+#define EPEL_EXTRA_BEFORE 1
+#define EPEL_EXTRA_AFTER 2
+#define EPEL_EXTRA 3
-#define EPEL_EXTRA_BEFORE 1
-#define EPEL_EXTRA_AFTER 2
-#define EPEL_EXTRA 3
+#define FF_PROFILE_HEVC_MAIN 1
+#define FF_PROFILE_HEVC_MAIN_10 2
+#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
+#define FF_PROFILE_HEVC_REXT 4
/**
* Value of the luma sample at position (x, y) in the 2D array tab.
u32 bottom_offset;
};
-struct AVRational{
- int num; ///< numerator
- int den; ///< denominator
-};
-
struct VUI {
struct AVRational sar;
};
struct PTLCommon {
- u8 profile_space;
- u8 tier_flag;
- u8 profile_idc;
- u8 profile_compatibility_flag[32];
- u8 level_idc;
- u8 progressive_source_flag;
- u8 interlaced_source_flag;
- u8 non_packed_constraint_flag;
- u8 frame_only_constraint_flag;
+ u8 profile_space;
+ u8 tier_flag;
+ u8 profile_idc;
+ u8 profile_compatibility_flag[32];
+ u8 level_idc;
+ u8 progressive_source_flag;
+ u8 interlaced_source_flag;
+ u8 non_packed_constraint_flag;
+ u8 frame_only_constraint_flag;
};
struct PTL {
- int general_profile_space;
- u8 general_tier_flag;
- int general_profile_idc;
- int general_profile_compatibility_flag[32];
- int general_level_idc;
-
- u8 sub_layer_profile_present_flag[MAX_SUB_LAYERS];
- u8 sub_layer_level_present_flag[MAX_SUB_LAYERS];
-
- int sub_layer_profile_space[MAX_SUB_LAYERS];
- u8 sub_layer_tier_flag[MAX_SUB_LAYERS];
- int sub_layer_profile_idc[MAX_SUB_LAYERS];
- u8 sub_layer_profile_compatibility_flags[MAX_SUB_LAYERS][32];
- int sub_layer_level_idc[MAX_SUB_LAYERS];
+ struct PTLCommon general_ptl;
+ struct PTLCommon sub_layer_ptl[HEVC_MAX_SUB_LAYERS];
+
+ u8 sub_layer_profile_present_flag[HEVC_MAX_SUB_LAYERS];
+ u8 sub_layer_level_present_flag[HEVC_MAX_SUB_LAYERS];
};
-struct HEVCVPS {
+struct h265_VPS_t {
u8 vps_temporal_id_nesting_flag;
int vps_max_layers;
int vps_max_sub_layers; ///< vps_max_temporal_layers_minus1 + 1
u8 vps_poc_proportional_to_timing_flag;
int vps_num_ticks_poc_diff_one; ///< vps_num_ticks_poc_diff_one_minus1 + 1
int vps_num_hrd_parameters;
-
- u8 data[4096];
- int data_size;
};
struct ScalingList {
u8 sl_dc[2][6];
};
-struct HEVCSPS {
+struct h265_SPS_t {
u8 vps_id;
+ u8 sps_id;
int chroma_format_idc;
u8 separate_colour_plane_flag;
int max_sub_layers;
struct {
- int max_dec_pic_buffering;
- int num_reorder_pics;
- int max_latency_increase;
+ int max_dec_pic_buffering;
+ int num_reorder_pics;
+ int max_latency_increase;
} temporal_layer[HEVC_MAX_SUB_LAYERS];
u8 temporal_id_nesting_flag;
u8 num_long_term_ref_pics_sps;
struct {
- u8 bit_depth;
- u8 bit_depth_chroma;
- u32 log2_min_pcm_cb_size;
- u32 log2_max_pcm_cb_size;
- u8 loop_filter_disable_flag;
+ u8 bit_depth;
+ u8 bit_depth_chroma;
+ u32 log2_min_pcm_cb_size;
+ u32 log2_max_pcm_cb_size;
+ u8 loop_filter_disable_flag;
} pcm;
u8 sps_temporal_mvp_enabled_flag;
u8 sps_strong_intra_smoothing_enable_flag;
int max_transform_hierarchy_depth_inter;
int max_transform_hierarchy_depth_intra;
+ int sps_range_extension_flag;
int transform_skip_rotation_enabled_flag;
int transform_skip_context_enabled_flag;
int implicit_rdpcm_enabled_flag;
int explicit_rdpcm_enabled_flag;
+ int extended_precision_processing_flag;
int intra_smoothing_disabled_flag;
int high_precision_offsets_enabled_flag;
int persistent_rice_adaptation_enabled_flag;
+ int cabac_bypass_alignment_enabled_flag;
///< coded frame dimension in various units
int width;
int data_size;
};
-struct HEVCPPS {
+struct h265_PPS_t {
u32 sps_id; ///< seq_parameter_set_id
u8 sign_data_hiding_flag;
int *tile_pos_rs; ///< TilePosRS
int *min_tb_addr_zs; ///< MinTbAddrZS
int *min_tb_addr_zs_tab;///< MinTbAddrZS
-
- u8 data[4096];
- int data_size;
};
-struct HEVCParamSets {
+struct h265_param_sets {
bool vps_parsed;
bool sps_parsed;
bool pps_parsed;
/* currently active parameter sets */
- struct HEVCVPS vps;
- struct HEVCSPS sps;
- struct HEVCPPS pps;
+ struct h265_VPS_t vps;
+ struct h265_SPS_t sps;
+ struct h265_PPS_t pps;
};
+int h265_decode_extradata_ps(u8 *data, int size, struct h265_param_sets *ps);
+
#endif /* AML_HEVC_PARSER_H */
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_mjpeg_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+/* return the 8 bit start code value and update the search
+state. Return -1 if no start code found */
+static int find_marker(const u8 **pbuf_ptr, const u8 *buf_end)
+{
+ const u8 *buf_ptr;
+ u32 v, v2;
+ int val;
+ int skipped = 0;
+
+ buf_ptr = *pbuf_ptr;
+ while (buf_end - buf_ptr > 1) {
+ v = *buf_ptr++;
+ v2 = *buf_ptr;
+ if ((v == 0xff) && (v2 >= 0xc0) && (v2 <= 0xfe) && buf_ptr < buf_end) {
+ val = *buf_ptr++;
+ goto found;
+ }
+ skipped++;
+ }
+ buf_ptr = buf_end;
+ val = -1;
+found:
+ pr_info("find_marker skipped %d bytes\n", skipped);
+ *pbuf_ptr = buf_ptr;
+
+ return val;
+}
+
+int ff_mjpeg_find_marker(struct MJpegDecodeContext *s,
+ const u8 **buf_ptr, const u8 *buf_end,
+ const u8 **unescaped_buf_ptr,
+ int *unescaped_buf_size)
+{
+ int start_code;
+
+ start_code = find_marker(buf_ptr, buf_end);
+
+ /* unescape buffer of SOS, use special treatment for JPEG-LS */
+ if (start_code == SOS && !s->ls) {
+ const u8 *src = *buf_ptr;
+ const u8 *ptr = src;
+ u8 *dst = s->buffer;
+
+ #define copy_data_segment(skip) do { \
+ int length = (ptr - src) - (skip); \
+ if (length > 0) { \
+ memcpy(dst, src, length); \
+ dst += length; \
+ src = ptr; \
+ } \
+ } while (0)
+
+
+ while (ptr < buf_end) {
+ u8 x = *(ptr++);
+
+ if (x == 0xff) {
+ int skip = 0;
+ while (ptr < buf_end && x == 0xff) {
+ x = *(ptr++);
+ skip++;
+ }
+
+ /* 0xFF, 0xFF, ... */
+ if (skip > 1) {
+ copy_data_segment(skip);
+
+ /* decrement src as it is equal to ptr after the
+ * copy_data_segment macro and we might want to
+ * copy the current value of x later on */
+ src--;
+ }
+
+ if (x < 0xd0 || x > 0xd7) {
+ copy_data_segment(1);
+ if (x)
+ break;
+ }
+ }
+ if (src < ptr)
+ copy_data_segment(0);
+ }
+ #undef copy_data_segment
+
+ *unescaped_buf_ptr = s->buffer;
+ *unescaped_buf_size = dst - s->buffer;
+ memset(s->buffer + *unescaped_buf_size, 0,
+ AV_INPUT_BUFFER_PADDING_SIZE);
+
+ pr_info("escaping removed %d bytes\n",
+ (int)((buf_end - *buf_ptr) - (dst - s->buffer)));
+ } else if (start_code == SOS && s->ls) {
+ const u8 *src = *buf_ptr;
+ u8 *dst = s->buffer;
+ int bit_count = 0;
+ int t = 0, b = 0;
+ struct put_bits_context pb;
+
+ /* find marker */
+ while (src + t < buf_end) {
+ u8 x = src[t++];
+ if (x == 0xff) {
+ while ((src + t < buf_end) && x == 0xff)
+ x = src[t++];
+ if (x & 0x80) {
+ t -= FFMIN(2, t);
+ break;
+ }
+ }
+ }
+ bit_count = t * 8;
+ init_put_bits(&pb, dst, t);
+
+ /* unescape bitstream */
+ while (b < t) {
+ u8 x = src[b++];
+ put_bits(&pb, 8, x);
+ if (x == 0xFF && b < t) {
+ x = src[b++];
+ if (x & 0x80) {
+ pr_err("Invalid escape sequence\n");
+ x &= 0x7f;
+ }
+ put_bits(&pb, 7, x);
+ bit_count--;
+ }
+ }
+ flush_put_bits(&pb);
+
+ *unescaped_buf_ptr = dst;
+ *unescaped_buf_size = (bit_count + 7) >> 3;
+ memset(s->buffer + *unescaped_buf_size, 0,
+ AV_INPUT_BUFFER_PADDING_SIZE);
+ } else {
+ *unescaped_buf_ptr = *buf_ptr;
+ *unescaped_buf_size = buf_end - *buf_ptr;
+ }
+
+ return start_code;
+}
+
+
+int ff_mjpeg_decode_sof(struct MJpegDecodeContext *s)
+{
+ int len, nb_components, i, width, height, bits, size_change;
+ int h_count[MAX_COMPONENTS] = { 0 };
+ int v_count[MAX_COMPONENTS] = { 0 };
+
+ s->cur_scan = 0;
+ memset(s->upscale_h, 0, sizeof(s->upscale_h));
+ memset(s->upscale_v, 0, sizeof(s->upscale_v));
+
+ /* XXX: verify len field validity */
+ len = get_bits(&s->gb, 16);
+ bits = get_bits(&s->gb, 8);
+
+ if (bits > 16 || bits < 1) {
+ pr_err("bits %d is invalid\n", bits);
+ return -1;
+ }
+
+ height = get_bits(&s->gb, 16);
+ width = get_bits(&s->gb, 16);
+
+ pr_info("sof0: picture: %dx%d\n", width, height);
+
+ nb_components = get_bits(&s->gb, 8);
+ if (nb_components <= 0 ||
+ nb_components > MAX_COMPONENTS)
+ return -1;
+
+ s->nb_components = nb_components;
+ s->h_max = 1;
+ s->v_max = 1;
+ for (i = 0; i < nb_components; i++) {
+ /* component id */
+ s->component_id[i] = get_bits(&s->gb, 8) - 1;
+ h_count[i] = get_bits(&s->gb, 4);
+ v_count[i] = get_bits(&s->gb, 4);
+ /* compute hmax and vmax (only used in interleaved case) */
+ if (h_count[i] > s->h_max)
+ s->h_max = h_count[i];
+ if (v_count[i] > s->v_max)
+ s->v_max = v_count[i];
+ s->quant_index[i] = get_bits(&s->gb, 8);
+ if (s->quant_index[i] >= 4) {
+ pr_err("quant_index is invalid\n");
+ return -1;
+ }
+ if (!h_count[i] || !v_count[i]) {
+ pr_err("Invalid sampling factor in component %d %d:%d\n",
+ i, h_count[i], v_count[i]);
+ return -1;
+ }
+
+ pr_info("component %d %d:%d id: %d quant:%d\n",
+ i, h_count[i], v_count[i],
+ s->component_id[i], s->quant_index[i]);
+ }
+ if (nb_components == 4
+ && s->component_id[0] == 'C' - 1
+ && s->component_id[1] == 'M' - 1
+ && s->component_id[2] == 'Y' - 1
+ && s->component_id[3] == 'K' - 1)
+ s->adobe_transform = 0;
+
+ /* if different size, realloc/alloc picture */
+ if (width != s->width || height != s->height || bits != s->bits ||
+ memcmp(s->h_count, h_count, sizeof(h_count)) ||
+ memcmp(s->v_count, v_count, sizeof(v_count))) {
+ size_change = 1;
+
+ s->width = width;
+ s->height = height;
+ s->bits = bits;
+ memcpy(s->h_count, h_count, sizeof(h_count));
+ memcpy(s->v_count, v_count, sizeof(v_count));
+ s->interlaced = 0;
+ s->got_picture = 0;
+ } else {
+ size_change = 0;
+ }
+
+ return 0;
+}
+
+static int ff_mjpeg_decode_frame(u8 *buf, int buf_size, struct MJpegDecodeContext *s)
+{
+ const u8 *buf_end, *buf_ptr;
+ const u8 *unescaped_buf_ptr;
+ int unescaped_buf_size;
+ int start_code;
+ int ret = 0;
+
+ buf_ptr = buf;
+ buf_end = buf + buf_size;
+ while (buf_ptr < buf_end) {
+ /* find start next marker */
+ start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
+ &unescaped_buf_ptr,
+ &unescaped_buf_size);
+ /* EOF */
+ if (start_code < 0) {
+ break;
+ } else if (unescaped_buf_size > INT_MAX / 8) {
+ pr_err("MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
+ start_code, unescaped_buf_size, buf_size);
+ return -1;
+ }
+ pr_info("marker=%x avail_size_in_buf=%d\n",
+ start_code, (int)(buf_end - buf_ptr));
+
+ ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
+ if (ret < 0) {
+ pr_err("invalid buffer\n");
+ goto fail;
+ }
+
+ s->start_code = start_code;
+ pr_info("startcode: %X\n", start_code);
+
+ switch (start_code) {
+ case SOF0:
+ case SOF1:
+ case SOF2:
+ case SOF3:
+ case SOF48:
+ case SOI:
+ case SOS:
+ case EOI:
+ break;
+ default:
+ goto skip;
+ }
+
+ switch (start_code) {
+ case SOI:
+ s->restart_interval = 0;
+ s->restart_count = 0;
+ s->raw_image_buffer = buf_ptr;
+ s->raw_image_buffer_size = buf_end - buf_ptr;
+ /* nothing to do on SOI */
+ break;
+ case SOF0:
+ case SOF1:
+ if (start_code == SOF0)
+ s->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
+ else
+ s->profile = FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT;
+ s->lossless = 0;
+ s->ls = 0;
+ s->progressive = 0;
+ if ((ret = ff_mjpeg_decode_sof(s)) < 0)
+ goto fail;
+ break;
+ case SOF2:
+ s->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
+ s->lossless = 0;
+ s->ls = 0;
+ s->progressive = 1;
+ if ((ret = ff_mjpeg_decode_sof(s)) < 0)
+ goto fail;
+ break;
+ case SOF3:
+ s->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
+ s->properties |= FF_CODEC_PROPERTY_LOSSLESS;
+ s->lossless = 1;
+ s->ls = 0;
+ s->progressive = 0;
+ if ((ret = ff_mjpeg_decode_sof(s)) < 0)
+ goto fail;
+ break;
+ case SOF48:
+ s->profile = FF_PROFILE_MJPEG_JPEG_LS;
+ s->properties |= FF_CODEC_PROPERTY_LOSSLESS;
+ s->lossless = 1;
+ s->ls = 1;
+ s->progressive = 0;
+ if ((ret = ff_mjpeg_decode_sof(s)) < 0)
+ goto fail;
+ break;
+ case EOI:
+ goto the_end;
+ case DHT:
+ case LSE:
+ case SOS:
+ case DRI:
+ case SOF5:
+ case SOF6:
+ case SOF7:
+ case SOF9:
+ case SOF10:
+ case SOF11:
+ case SOF13:
+ case SOF14:
+ case SOF15:
+ case JPG:
+ pr_err("mjpeg: unsupported coding type (%x)\n", start_code);
+ break;
+ }
+skip:
+ /* eof process start code */
+ buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
+ pr_info("marker parser used %d bytes (%d bits)\n",
+ (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
+ }
+
+ pr_err("No JPEG data found in image\n");
+ return -1;
+fail:
+ s->got_picture = 0;
+ return ret;
+the_end:
+ pr_info("decode frame unused %d bytes\n", (int)(buf_end - buf_ptr));
+
+ return 0;
+}
+
+int mjpeg_decode_extradata_ps(u8 *buf, int size, struct mjpeg_param_sets *ps)
+{
+ int ret;
+
+ ps->head_parsed = false;
+
+ ps->dec_ps.buf_size = size;
+ ps->dec_ps.buffer = vzalloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!ps->dec_ps.buffer)
+ return -1;
+
+ ret = ff_mjpeg_decode_frame(buf, size, &ps->dec_ps);
+ if (ret) {
+ pr_err("parse extra data failed. err: %d\n", ret);
+ vfree(ps->dec_ps.buffer);
+ return ret;
+ }
+
+ if (ps->dec_ps.width && ps->dec_ps.height)
+ ps->head_parsed = true;
+
+ vfree(ps->dec_ps.buffer);
+
+ return 0;
+}
+
--- /dev/null
+#ifndef AML_MPEG12_PARSER_H
+#define AML_MPEG12_PARSER_H
+
+#include "../utils/pixfmt.h"
+#include "../utils/common.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+
+#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT 0xc0
+#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT 0xc1
+#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT 0xc2
+#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS 0xc3
+#define FF_PROFILE_MJPEG_JPEG_LS 0xf7
+
+#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001
+#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002
+
+#define MAX_COMPONENTS 4
+
+/* JPEG marker codes */
+enum JpegMarker {
+ /* start of frame */
+ SOF0 = 0xc0, /* baseline */
+ SOF1 = 0xc1, /* extended sequential, huffman */
+ SOF2 = 0xc2, /* progressive, huffman */
+ SOF3 = 0xc3, /* lossless, huffman */
+
+ SOF5 = 0xc5, /* differential sequential, huffman */
+ SOF6 = 0xc6, /* differential progressive, huffman */
+ SOF7 = 0xc7, /* differential lossless, huffman */
+ JPG = 0xc8, /* reserved for JPEG extension */
+ SOF9 = 0xc9, /* extended sequential, arithmetic */
+ SOF10 = 0xca, /* progressive, arithmetic */
+ SOF11 = 0xcb, /* lossless, arithmetic */
+
+ SOF13 = 0xcd, /* differential sequential, arithmetic */
+ SOF14 = 0xce, /* differential progressive, arithmetic */
+ SOF15 = 0xcf, /* differential lossless, arithmetic */
+
+ DHT = 0xc4, /* define huffman tables */
+
+ DAC = 0xcc, /* define arithmetic-coding conditioning */
+
+ /* restart with modulo 8 count "m" */
+ RST0 = 0xd0,
+ RST1 = 0xd1,
+ RST2 = 0xd2,
+ RST3 = 0xd3,
+ RST4 = 0xd4,
+ RST5 = 0xd5,
+ RST6 = 0xd6,
+ RST7 = 0xd7,
+
+ SOI = 0xd8, /* start of image */
+ EOI = 0xd9, /* end of image */
+ SOS = 0xda, /* start of scan */
+ DQT = 0xdb, /* define quantization tables */
+ DNL = 0xdc, /* define number of lines */
+ DRI = 0xdd, /* define restart interval */
+ DHP = 0xde, /* define hierarchical progression */
+ EXP = 0xdf, /* expand reference components */
+
+ APP0 = 0xe0,
+ APP1 = 0xe1,
+ APP2 = 0xe2,
+ APP3 = 0xe3,
+ APP4 = 0xe4,
+ APP5 = 0xe5,
+ APP6 = 0xe6,
+ APP7 = 0xe7,
+ APP8 = 0xe8,
+ APP9 = 0xe9,
+ APP10 = 0xea,
+ APP11 = 0xeb,
+ APP12 = 0xec,
+ APP13 = 0xed,
+ APP14 = 0xee,
+ APP15 = 0xef,
+
+ JPG0 = 0xf0,
+ JPG1 = 0xf1,
+ JPG2 = 0xf2,
+ JPG3 = 0xf3,
+ JPG4 = 0xf4,
+ JPG5 = 0xf5,
+ JPG6 = 0xf6,
+ SOF48 = 0xf7, ///< JPEG-LS
+ LSE = 0xf8, ///< JPEG-LS extension parameters
+ JPG9 = 0xf9,
+ JPG10 = 0xfa,
+ JPG11 = 0xfb,
+ JPG12 = 0xfc,
+ JPG13 = 0xfd,
+
+ COM = 0xfe, /* comment */
+
+ TEM = 0x01, /* temporary private use for arithmetic coding */
+
+ /* 0x02 -> 0xbf reserved */
+};
+
+struct VLC {
+ int bits;
+ short (*table)[2]; ///< code, bits
+ int table_size, table_allocated;
+};
+
+struct MJpegDecodeContext {
+ struct get_bits_context gb;
+ int buf_size;
+
+ int start_code; /* current start code */
+ int buffer_size;
+ u8 *buffer;
+
+ u16 quant_matrixes[4][64];
+ struct VLC vlcs[3][4];
+ int qscale[4]; ///< quantizer scale calculated from quant_matrixes
+
+ int first_picture; /* true if decoding first picture */
+ int interlaced; /* true if interlaced */
+ int bottom_field; /* true if bottom field */
+ int lossless;
+ int ls;
+ int progressive;
+ u8 upscale_h[4];
+ u8 upscale_v[4];
+ int bits; /* bits per component */
+ int adobe_transform;
+
+ int width, height;
+ int mb_width, mb_height;
+ int nb_components;
+ int block_stride[MAX_COMPONENTS];
+ int component_id[MAX_COMPONENTS];
+ int h_count[MAX_COMPONENTS]; /* horizontal and vertical count for each component */
+ int v_count[MAX_COMPONENTS];
+ int h_scount[MAX_COMPONENTS];
+ int v_scount[MAX_COMPONENTS];
+ int h_max, v_max; /* maximum h and v counts */
+ int quant_index[4]; /* quant table index for each component */
+ int got_picture; ///< we found a SOF and picture is valid, too.
+ int restart_interval;
+ int restart_count;
+ int cur_scan; /* current scan, used by JPEG-LS */
+
+ // Raw stream data for hwaccel use.
+ const u8 *raw_image_buffer;
+ int raw_image_buffer_size;
+
+ int profile;
+ u32 properties;
+};
+
+struct mjpeg_param_sets {
+ bool head_parsed;
+ /* currently active parameter sets */
+ struct MJpegDecodeContext dec_ps;
+};
+
+int mjpeg_decode_extradata_ps(u8 *buf, int size, struct mjpeg_param_sets *ps);
+
+#endif
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_mpeg12_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+const struct AVRational ff_mpeg12_frame_rate_tab[16] = {
+ { 0, 0},
+ {24000, 1001},
+ { 24, 1},
+ { 25, 1},
+ {30000, 1001},
+ { 30, 1},
+ { 50, 1},
+ {60000, 1001},
+ { 60, 1},
+ // Xing's 15fps: (9)
+ { 15, 1},
+ // libmpeg3's "Unofficial economy rates": (10-13)
+ { 5, 1},
+ { 10, 1},
+ { 12, 1},
+ { 15, 1},
+ { 0, 0},
+};
+
+const u8 *avpriv_find_start_code(const u8 *p, const u8 *end, u32 *state)
+{
+ int i;
+
+ if (p >= end)
+ return end;
+
+ for (i = 0; i < 3; i++) {
+ u32 tmp = *state << 8;
+ *state = tmp + *(p++);
+ if (tmp == 0x100 || p == end)
+ return p;
+ }
+
+ while (p < end) {
+ if (p[-1] > 1 ) p += 3;
+ else if (p[-2] ) p += 2;
+ else if (p[-3]|(p[-1]-1)) p++;
+ else {
+ p++;
+ break;
+ }
+ }
+
+ p = FFMIN(p, end) - 4;
+ *state = AV_RB32(p);
+
+ return p + 4;
+}
+
+static void mpegvideo_extract_headers(const u8 *buf, int buf_size,
+ struct mpeg12_param_sets *ps)
+{
+ struct MpvParseContext *pc = &ps->dec_ps;
+ const u8 *buf_end = buf + buf_size;
+ u32 start_code;
+ int frame_rate_index, ext_type, bytes_left;
+ int frame_rate_ext_n, frame_rate_ext_d;
+ int top_field_first, repeat_first_field, progressive_frame;
+ int horiz_size_ext, vert_size_ext, bit_rate_ext;
+ int bit_rate = 0;
+ int vbv_delay = 0;
+ int chroma_format;
+ enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
+ //FIXME replace the crap with get_bits()
+ pc->repeat_pict = 0;
+
+ while (buf < buf_end) {
+ start_code= -1;
+ buf= avpriv_find_start_code(buf, buf_end, &start_code);
+ bytes_left = buf_end - buf;
+ switch (start_code) {
+ case PICTURE_START_CODE:
+ if (bytes_left >= 2) {
+ pc->pict_type = (buf[1] >> 3) & 7;
+ if (bytes_left >= 4)
+ vbv_delay = ((buf[1] & 0x07) << 13) | (buf[2] << 5) | (buf[3] >> 3);
+ }
+ break;
+ case SEQ_START_CODE:
+ if (bytes_left >= 7) {
+ pc->width = (buf[0] << 4) | (buf[1] >> 4);
+ pc->height = ((buf[1] & 0x0f) << 8) | buf[2];
+
+ pix_fmt = AV_PIX_FMT_YUV420P;
+ frame_rate_index = buf[3] & 0xf;
+ pc->frame_rate = ff_mpeg12_frame_rate_tab[frame_rate_index];
+ bit_rate = (buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6);
+ pc->ticks_per_frame = 1;
+ }
+ break;
+ case EXT_START_CODE:
+ if (bytes_left >= 1) {
+ ext_type = (buf[0] >> 4);
+ switch (ext_type) {
+ case 0x1: /* sequence extension */
+ if (bytes_left >= 6) {
+ horiz_size_ext = ((buf[1] & 1) << 1) | (buf[2] >> 7);
+ vert_size_ext = (buf[2] >> 5) & 3;
+ bit_rate_ext = ((buf[2] & 0x1F)<<7) | (buf[3]>>1);
+ frame_rate_ext_n = (buf[5] >> 5) & 3;
+ frame_rate_ext_d = (buf[5] & 0x1f);
+ pc->progressive_sequence = buf[1] & (1 << 3);
+ pc->has_b_frames= !(buf[5] >> 7);
+
+ chroma_format = (buf[1] >> 1) & 3;
+ switch (chroma_format) {
+ case 1: pix_fmt = AV_PIX_FMT_YUV420P; break;
+ case 2: pix_fmt = AV_PIX_FMT_YUV422P; break;
+ case 3: pix_fmt = AV_PIX_FMT_YUV444P; break;
+ }
+
+ pc->width = (pc->width & 0xFFF) | (horiz_size_ext << 12);
+ pc->height = (pc->height& 0xFFF) | ( vert_size_ext << 12);
+ bit_rate = (bit_rate&0x3FFFF) | (bit_rate_ext << 18);
+ //if(did_set_size)
+ //set_dim_ret = ff_set_dimensions(avctx, pc->width, pc->height);
+ pc->framerate.num = pc->frame_rate.num * (frame_rate_ext_n + 1);
+ pc->framerate.den = pc->frame_rate.den * (frame_rate_ext_d + 1);
+ pc->ticks_per_frame = 2;
+ }
+ break;
+ case 0x8: /* picture coding extension */
+ if (bytes_left >= 5) {
+ top_field_first = buf[3] & (1 << 7);
+ repeat_first_field = buf[3] & (1 << 1);
+ progressive_frame = buf[4] & (1 << 7);
+
+ /* check if we must repeat the frame */
+ pc->repeat_pict = 1;
+ if (repeat_first_field) {
+ if (pc->progressive_sequence) {
+ if (top_field_first)
+ pc->repeat_pict = 5;
+ else
+ pc->repeat_pict = 3;
+ } else if (progressive_frame) {
+ pc->repeat_pict = 2;
+ }
+ }
+
+ if (!pc->progressive_sequence && !progressive_frame) {
+ if (top_field_first)
+ pc->field_order = AV_FIELD_TT;
+ else
+ pc->field_order = AV_FIELD_BB;
+ } else
+ pc->field_order = AV_FIELD_PROGRESSIVE;
+ }
+ break;
+ }
+ }
+ break;
+ case -1:
+ goto the_end;
+ default:
+ /* we stop parsing when we encounter a slice. It ensures
+ that this function takes a negligible amount of time */
+ if (start_code >= SLICE_MIN_START_CODE &&
+ start_code <= SLICE_MAX_START_CODE)
+ goto the_end;
+ break;
+ }
+ }
+the_end:
+
+ if (pix_fmt != AV_PIX_FMT_NONE) {
+ pc->format = pix_fmt;
+ pc->coded_width = ALIGN(pc->width, 16);
+ pc->coded_height = ALIGN(pc->height, 16);
+ }
+}
+
+int mpeg12_decode_extradata_ps(u8 *buf, int size, struct mpeg12_param_sets *ps)
+{
+ ps->head_parsed = false;
+
+ mpegvideo_extract_headers(buf, size, ps);
+
+ if (ps->dec_ps.width && ps->dec_ps.height)
+ ps->head_parsed = true;
+
+ return 0;
+}
+
--- /dev/null
+#ifndef AML_MPEG12_PARSER_H
+#define AML_MPEG12_PARSER_H
+
+#include "../utils/pixfmt.h"
+#include "../utils/common.h"
+
+/* Start codes. */
+#define SEQ_END_CODE 0x000001b7
+#define SEQ_START_CODE 0x000001b3
+#define GOP_START_CODE 0x000001b8
+#define PICTURE_START_CODE 0x00000100
+#define SLICE_MIN_START_CODE 0x00000101
+#define SLICE_MAX_START_CODE 0x000001af
+#define EXT_START_CODE 0x000001b5
+#define USER_START_CODE 0x000001b2
+#define SLICE_START_CODE 0x000001b7
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, //< Top coded_first, top displayed first
+ AV_FIELD_BB, //< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, //< Top coded first, bottom displayed first
+ AV_FIELD_BT, //< Bottom coded first, top displayed first
+};
+
+struct MpvParseContext {
+ struct AVRational frame_rate;
+ int progressive_sequence;
+ int width, height;
+
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ enum AVFieldOrder field_order;
+ int format;
+ /**
+ * Dimensions of the coded video.
+ */
+ int coded_width;
+ int coded_height;
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame rate.
+ * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+ * if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+ */
+ int ticks_per_frame;
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+ /**
+ * - decoding: For codecs that store a framerate value in the compressed
+ * bitstream, the decoder may export it here. { 0, 1} when
+ * unknown.
+ * - encoding: May be used to signal the framerate of CFR content to an
+ * encoder.
+ */
+ struct AVRational framerate;
+};
+
+struct mpeg12_param_sets {
+ bool head_parsed;
+ /* currently active parameter sets */
+ struct MpvParseContext dec_ps;
+};
+
+int mpeg12_decode_extradata_ps(u8 *buf, int size, struct mpeg12_param_sets *ps);
+
+#endif
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_mpeg4_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+const u8 ff_mpeg4_dc_threshold[8]={
+ 99, 13, 15, 17, 19, 21, 23, 0
+};
+
+/* these matrixes will be permuted for the idct */
+const int16_t ff_mpeg4_default_intra_matrix[64] = {
+ 8, 17, 18, 19, 21, 23, 25, 27,
+ 17, 18, 19, 21, 23, 25, 27, 28,
+ 20, 21, 22, 23, 24, 26, 28, 30,
+ 21, 22, 23, 24, 26, 28, 30, 32,
+ 22, 23, 24, 26, 28, 30, 32, 35,
+ 23, 24, 26, 28, 30, 32, 35, 38,
+ 25, 26, 28, 30, 32, 35, 38, 41,
+ 27, 28, 30, 32, 35, 38, 41, 45,
+};
+
+const int16_t ff_mpeg4_default_non_intra_matrix[64] = {
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 18, 19, 20, 21, 22, 23, 24, 25,
+ 19, 20, 21, 22, 23, 24, 26, 27,
+ 20, 21, 22, 23, 25, 26, 27, 28,
+ 21, 22, 23, 24, 26, 27, 28, 30,
+ 22, 23, 24, 26, 27, 28, 30, 31,
+ 23, 24, 25, 27, 28, 30, 31, 33,
+};
+
+const struct AVRational ff_h263_pixel_aspect[16] = {
+ { 0, 1 },
+ { 1, 1 },
+ { 12, 11 },
+ { 10, 11 },
+ { 16, 11 },
+ { 40, 33 },
+ { 0, 1 },
+ { 0, 1 },
+ { 0, 1 },
+ { 0, 1 },
+ { 0, 1 },
+ { 0, 1 },
+ { 0, 1 },
+ { 0, 1 },
+ { 0, 1 },
+ { 0, 1 },
+};
+
+/* As per spec, studio start code search isn't the same as the old type of start code */
+static void next_start_code_studio(struct get_bits_context *gb)
+{
+ align_get_bits(gb);
+
+ while (get_bits_left(gb) >= 24 && show_bits_long(gb, 24) != 0x1) {
+ get_bits(gb, 8);
+ }
+}
+
+static int read_quant_matrix_ext(struct MpegEncContext *s, struct get_bits_context *gb)
+{
+ int i, /*j,*/ v;
+
+ if (get_bits1(gb)) {
+ if (get_bits_left(gb) < 64*8)
+ return -1;
+ /* intra_quantiser_matrix */
+ for (i = 0; i < 64; i++) {
+ v = get_bits(gb, 8);
+ //j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+ //s->intra_matrix[j] = v;
+ //s->chroma_intra_matrix[j] = v;
+ }
+ }
+
+ if (get_bits1(gb)) {
+ if (get_bits_left(gb) < 64*8)
+ return -1;
+ /* non_intra_quantiser_matrix */
+ for (i = 0; i < 64; i++) {
+ get_bits(gb, 8);
+ }
+ }
+
+ if (get_bits1(gb)) {
+ if (get_bits_left(gb) < 64*8)
+ return -1;
+ /* chroma_intra_quantiser_matrix */
+ for (i = 0; i < 64; i++) {
+ v = get_bits(gb, 8);
+ //j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+ //s->chroma_intra_matrix[j] = v;
+ }
+ }
+
+ if (get_bits1(gb)) {
+ if (get_bits_left(gb) < 64*8)
+ return -1;
+ /* chroma_non_intra_quantiser_matrix */
+ for (i = 0; i < 64; i++) {
+ get_bits(gb, 8);
+ }
+ }
+
+ next_start_code_studio(gb);
+ return 0;
+}
+
+static void extension_and_user_data(struct MpegEncContext *s, struct get_bits_context *gb, int id)
+{
+ u32 startcode;
+ u8 extension_type;
+
+ startcode = show_bits_long(gb, 32);
+ if (startcode == USER_DATA_STARTCODE || startcode == EXT_STARTCODE) {
+ if ((id == 2 || id == 4) && startcode == EXT_STARTCODE) {
+ skip_bits_long(gb, 32);
+ extension_type = get_bits(gb, 4);
+ if (extension_type == QUANT_MATRIX_EXT_ID)
+ read_quant_matrix_ext(s, gb);
+ }
+ }
+}
+
+
+static int decode_studio_vol_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+ struct MpegEncContext *s = &ctx->m;
+ int width, height;
+ int bits_per_raw_sample;
+
+ // random_accessible_vol and video_object_type_indication have already
+ // been read by the caller decode_vol_header()
+ skip_bits(gb, 4); /* video_object_layer_verid */
+ ctx->shape = get_bits(gb, 2); /* video_object_layer_shape */
+ skip_bits(gb, 4); /* video_object_layer_shape_extension */
+ skip_bits1(gb); /* progressive_sequence */
+ if (ctx->shape != BIN_ONLY_SHAPE) {
+ ctx->rgb = get_bits1(gb); /* rgb_components */
+ s->chroma_format = get_bits(gb, 2); /* chroma_format */
+ if (!s->chroma_format) {
+ pr_err("illegal chroma format\n");
+ return -1;
+ }
+
+ bits_per_raw_sample = get_bits(gb, 4); /* bit_depth */
+ if (bits_per_raw_sample == 10) {
+ if (ctx->rgb) {
+ ctx->pix_fmt = AV_PIX_FMT_GBRP10;
+ } else {
+ ctx->pix_fmt = s->chroma_format == CHROMA_422 ? AV_PIX_FMT_YUV422P10 : AV_PIX_FMT_YUV444P10;
+ }
+ }
+ else {
+ pr_err("MPEG-4 Studio profile bit-depth %u", bits_per_raw_sample);
+ return -1;
+ }
+ ctx->bits_per_raw_sample = bits_per_raw_sample;
+ }
+ if (ctx->shape == RECT_SHAPE) {
+ check_marker(gb, "before video_object_layer_width");
+ width = get_bits(gb, 14); /* video_object_layer_width */
+ check_marker(gb, "before video_object_layer_height");
+ height = get_bits(gb, 14); /* video_object_layer_height */
+ check_marker(gb, "after video_object_layer_height");
+
+ /* Do the same check as non-studio profile */
+ if (width && height) {
+ if (s->width && s->height &&
+ (s->width != width || s->height != height))
+ s->context_reinit = 1;
+ s->width = width;
+ s->height = height;
+ }
+ }
+ s->aspect_ratio_info = get_bits(gb, 4);
+ if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
+ ctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width
+ ctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height
+ } else {
+ ctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info];
+ }
+ skip_bits(gb, 4); /* frame_rate_code */
+ skip_bits(gb, 15); /* first_half_bit_rate */
+ check_marker(gb, "after first_half_bit_rate");
+ skip_bits(gb, 15); /* latter_half_bit_rate */
+ check_marker(gb, "after latter_half_bit_rate");
+ skip_bits(gb, 15); /* first_half_vbv_buffer_size */
+ check_marker(gb, "after first_half_vbv_buffer_size");
+ skip_bits(gb, 3); /* latter_half_vbv_buffer_size */
+ skip_bits(gb, 11); /* first_half_vbv_buffer_size */
+ check_marker(gb, "after first_half_vbv_buffer_size");
+ skip_bits(gb, 15); /* latter_half_vbv_occupancy */
+ check_marker(gb, "after latter_half_vbv_occupancy");
+ s->low_delay = get_bits1(gb);
+ s->mpeg_quant = get_bits1(gb); /* mpeg2_stream */
+
+ next_start_code_studio(gb);
+ extension_and_user_data(s, gb, 2);
+
+ return 0;
+}
+
+static int decode_vol_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+ struct MpegEncContext *s = &ctx->m;
+ int width, height, vo_ver_id;
+
+ /* vol header */
+ skip_bits(gb, 1); /* random access */
+ s->vo_type = get_bits(gb, 8);
+
+ /* If we are in studio profile (per vo_type), check if its all consistent
+ * and if so continue pass control to decode_studio_vol_header().
+ * elIf something is inconsistent, error out
+ * else continue with (non studio) vol header decpoding.
+ */
+ if (s->vo_type == CORE_STUDIO_VO_TYPE ||
+ s->vo_type == SIMPLE_STUDIO_VO_TYPE) {
+ if (ctx->profile != FF_PROFILE_UNKNOWN && ctx->profile != FF_PROFILE_MPEG4_SIMPLE_STUDIO)
+ return -1;
+ s->studio_profile = 1;
+ ctx->profile = FF_PROFILE_MPEG4_SIMPLE_STUDIO;
+ return decode_studio_vol_header(ctx, gb);
+ } else if (s->studio_profile) {
+ return -1;
+ }
+
+ if (get_bits1(gb) != 0) { /* is_ol_id */
+ vo_ver_id = get_bits(gb, 4); /* vo_ver_id */
+ skip_bits(gb, 3); /* vo_priority */
+ } else {
+ vo_ver_id = 1;
+ }
+ s->aspect_ratio_info = get_bits(gb, 4);
+ if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
+ ctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width
+ ctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height
+ } else {
+ ctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info];
+ }
+
+ if ((ctx->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */
+ int chroma_format = get_bits(gb, 2);
+ if (chroma_format != CHROMA_420)
+ pr_err("illegal chroma format\n");
+
+ s->low_delay = get_bits1(gb);
+ if (get_bits1(gb)) { /* vbv parameters */
+ get_bits(gb, 15); /* first_half_bitrate */
+ check_marker(gb, "after first_half_bitrate");
+ get_bits(gb, 15); /* latter_half_bitrate */
+ check_marker(gb, "after latter_half_bitrate");
+ get_bits(gb, 15); /* first_half_vbv_buffer_size */
+ check_marker(gb, "after first_half_vbv_buffer_size");
+ get_bits(gb, 3); /* latter_half_vbv_buffer_size */
+ get_bits(gb, 11); /* first_half_vbv_occupancy */
+ check_marker(gb, "after first_half_vbv_occupancy");
+ get_bits(gb, 15); /* latter_half_vbv_occupancy */
+ check_marker(gb, "after latter_half_vbv_occupancy");
+ }
+ } else {
+ /* is setting low delay flag only once the smartest thing to do?
+ * low delay detection will not be overridden. */
+ if (s->picture_number == 0) {
+ switch (s->vo_type) {
+ case SIMPLE_VO_TYPE:
+ case ADV_SIMPLE_VO_TYPE:
+ s->low_delay = 1;
+ break;
+ default:
+ s->low_delay = 0;
+ }
+ }
+ }
+
+ ctx->shape = get_bits(gb, 2); /* vol shape */
+ if (ctx->shape != RECT_SHAPE)
+ pr_err("only rectangular vol supported\n");
+ if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) {
+ pr_err("Gray shape not supported\n");
+ skip_bits(gb, 4); /* video_object_layer_shape_extension */
+ }
+
+ check_marker(gb, "before time_increment_resolution");
+
+ ctx->framerate.num = get_bits(gb, 16);
+ if (!ctx->framerate.num) {
+ pr_err("framerate==0\n");
+ return -1;
+ }
+
+ ctx->time_increment_bits = av_log2(ctx->framerate.num - 1) + 1;
+ if (ctx->time_increment_bits < 1)
+ ctx->time_increment_bits = 1;
+
+ check_marker(gb, "before fixed_vop_rate");
+
+ if (get_bits1(gb) != 0) /* fixed_vop_rate */
+ ctx->framerate.den = get_bits(gb, ctx->time_increment_bits);
+ else
+ ctx->framerate.den = 1;
+
+ //ctx->time_base = av_inv_q(av_mul_q(ctx->framerate, (AVRational){ctx->ticks_per_frame, 1}));
+
+ ctx->t_frame = 0;
+
+ if (ctx->shape != BIN_ONLY_SHAPE) {
+ if (ctx->shape == RECT_SHAPE) {
+ check_marker(gb, "before width");
+ width = get_bits(gb, 13);
+ check_marker(gb, "before height");
+ height = get_bits(gb, 13);
+ check_marker(gb, "after height");
+ if (width && height && /* they should be non zero but who knows */
+ !(s->width && s->codec_tag == AV_RL32("MP4S"))) {
+ if (s->width && s->height &&
+ (s->width != width || s->height != height))
+ s->context_reinit = 1;
+ s->width = width;
+ s->height = height;
+ }
+ }
+
+ s->progressive_sequence =
+ s->progressive_frame = get_bits1(gb) ^ 1;
+ s->interlaced_dct = 0;
+ if (!get_bits1(gb)) /* OBMC Disable */
+ pr_info("MPEG-4 OBMC not supported (very likely buggy encoder)\n");
+ if (vo_ver_id == 1)
+ ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */
+ else
+ ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */
+
+ if (ctx->vol_sprite_usage == STATIC_SPRITE)
+ pr_err("Static Sprites not supported\n");
+ if (ctx->vol_sprite_usage == STATIC_SPRITE ||
+ ctx->vol_sprite_usage == GMC_SPRITE) {
+ if (ctx->vol_sprite_usage == STATIC_SPRITE) {
+ skip_bits(gb, 13); // sprite_width
+ check_marker(gb, "after sprite_width");
+ skip_bits(gb, 13); // sprite_height
+ check_marker(gb, "after sprite_height");
+ skip_bits(gb, 13); // sprite_left
+ check_marker(gb, "after sprite_left");
+ skip_bits(gb, 13); // sprite_top
+ check_marker(gb, "after sprite_top");
+ }
+ ctx->num_sprite_warping_points = get_bits(gb, 6);
+ if (ctx->num_sprite_warping_points > 3) {
+ pr_err("%d sprite_warping_points\n",
+ ctx->num_sprite_warping_points);
+ ctx->num_sprite_warping_points = 0;
+ return -1;
+ }
+ s->sprite_warping_accuracy = get_bits(gb, 2);
+ ctx->sprite_brightness_change = get_bits1(gb);
+ if (ctx->vol_sprite_usage == STATIC_SPRITE)
+ skip_bits1(gb); // low_latency_sprite
+ }
+ // FIXME sadct disable bit if verid!=1 && shape not rect
+
+ if (get_bits1(gb) == 1) { /* not_8_bit */
+ s->quant_precision = get_bits(gb, 4); /* quant_precision */
+ if (get_bits(gb, 4) != 8) /* bits_per_pixel */
+ pr_err("N-bit not supported\n");
+ if (s->quant_precision != 5)
+ pr_err("quant precision %d\n", s->quant_precision);
+ if (s->quant_precision<3 || s->quant_precision>9) {
+ s->quant_precision = 5;
+ }
+ } else {
+ s->quant_precision = 5;
+ }
+
+ // FIXME a bunch of grayscale shape things
+
+ if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */
+ int i, v;
+
+ //mpeg4_load_default_matrices(s);
+
+ /* load custom intra matrix */
+ if (get_bits1(gb)) {
+ int last = 0;
+ for (i = 0; i < 64; i++) {
+ //int j;
+ if (get_bits_left(gb) < 8) {
+ pr_err("insufficient data for custom matrix\n");
+ return -1;
+ }
+ v = get_bits(gb, 8);
+ if (v == 0)
+ break;
+
+ last = v;
+ //j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+ //s->intra_matrix[j] = last;
+ //s->chroma_intra_matrix[j] = last;
+ }
+
+ /* replicate last value */
+ //for (; i < 64; i++) {
+ //int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+ //s->intra_matrix[j] = last;
+ //s->chroma_intra_matrix[j] = last;
+ //}
+ }
+
+ /* load custom non intra matrix */
+ if (get_bits1(gb)) {
+ int last = 0;
+ for (i = 0; i < 64; i++) {
+ //int j;
+ if (get_bits_left(gb) < 8) {
+ pr_err("insufficient data for custom matrix\n");
+ return -1;
+ }
+ v = get_bits(gb, 8);
+ if (v == 0)
+ break;
+
+ last = v;
+ //j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+ //s->inter_matrix[j] = v;
+ //s->chroma_inter_matrix[j] = v;
+ }
+
+ /* replicate last value */
+ //for (; i < 64; i++) {
+ //int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
+ //s->inter_matrix[j] = last;
+ //s->chroma_inter_matrix[j] = last;
+ //}
+ }
+
+ // FIXME a bunch of grayscale shape things
+ }
+
+ if (vo_ver_id != 1)
+ s->quarter_sample = get_bits1(gb);
+ else
+ s->quarter_sample = 0;
+
+ if (get_bits_left(gb) < 4) {
+ pr_err("VOL Header truncated\n");
+ return -1;
+ }
+
+ if (!get_bits1(gb)) {
+ int pos = get_bits_count(gb);
+ int estimation_method = get_bits(gb, 2);
+ if (estimation_method < 2) {
+ if (!get_bits1(gb)) {
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upsampling */
+ }
+ if (!get_bits1(gb)) {
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */
+ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */
+ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */
+ }
+ if (!check_marker(gb, "in complexity estimation part 1")) {
+ skip_bits_long(gb, pos - get_bits_count(gb));
+ goto no_cplx_est;
+ }
+ if (!get_bits1(gb)) {
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */
+ ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */
+ }
+ if (!get_bits1(gb)) {
+ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */
+ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */
+ ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */
+ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */
+ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */
+ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */
+ }
+ if (!check_marker(gb, "in complexity estimation part 2")) {
+ skip_bits_long(gb, pos - get_bits_count(gb));
+ goto no_cplx_est;
+ }
+ if (estimation_method == 1) {
+ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */
+ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */
+ }
+ } else
+ pr_err("Invalid Complexity estimation method %d\n",
+ estimation_method);
+ } else {
+
+no_cplx_est:
+ ctx->cplx_estimation_trash_i =
+ ctx->cplx_estimation_trash_p =
+ ctx->cplx_estimation_trash_b = 0;
+ }
+
+ ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */
+
+ s->data_partitioning = get_bits1(gb);
+ if (s->data_partitioning)
+ ctx->rvlc = get_bits1(gb);
+
+ if (vo_ver_id != 1) {
+ ctx->new_pred = get_bits1(gb);
+ if (ctx->new_pred) {
+ pr_err("new pred not supported\n");
+ skip_bits(gb, 2); /* requested upstream message type */
+ skip_bits1(gb); /* newpred segment type */
+ }
+ if (get_bits1(gb)) // reduced_res_vop
+ pr_err("reduced resolution VOP not supported\n");
+ } else {
+ ctx->new_pred = 0;
+ }
+
+ ctx->scalability = get_bits1(gb);
+
+ if (ctx->scalability) {
+ struct get_bits_context bak = *gb;
+ int h_sampling_factor_n;
+ int h_sampling_factor_m;
+ int v_sampling_factor_n;
+ int v_sampling_factor_m;
+
+ skip_bits1(gb); // hierarchy_type
+ skip_bits(gb, 4); /* ref_layer_id */
+ skip_bits1(gb); /* ref_layer_sampling_dir */
+ h_sampling_factor_n = get_bits(gb, 5);
+ h_sampling_factor_m = get_bits(gb, 5);
+ v_sampling_factor_n = get_bits(gb, 5);
+ v_sampling_factor_m = get_bits(gb, 5);
+ ctx->enhancement_type = get_bits1(gb);
+
+ if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 ||
+ v_sampling_factor_n == 0 || v_sampling_factor_m == 0) {
+ /* illegal scalability header (VERY broken encoder),
+ * trying to workaround */
+ ctx->scalability = 0;
+ *gb = bak;
+ } else
+ pr_err("scalability not supported\n");
+
+ // bin shape stuff FIXME
+ }
+ }
+
+ if (1) {
+ pr_info("tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, low_delay:%d %s%s%s%s\n",
+ ctx->framerate.den, ctx->framerate.num,
+ ctx->time_increment_bits,
+ s->quant_precision,
+ s->progressive_sequence,
+ s->low_delay,
+ ctx->scalability ? "scalability " :"" , s->quarter_sample ? "qpel " : "",
+ s->data_partitioning ? "partition " : "", ctx->rvlc ? "rvlc " : ""
+ );
+ }
+
+ return 0;
+}
+
+
+/**
+ * Decode the user data stuff in the header.
+ * Also initializes divx/xvid/lavc_version/build.
+ */
+static int decode_user_data(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+ struct MpegEncContext *s = &ctx->m;
+ char buf[256];
+ int i;
+ int e;
+ int ver = 0, build = 0, ver2 = 0, ver3 = 0;
+ char last;
+
+ for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) {
+ if (show_bits(gb, 23) == 0)
+ break;
+ buf[i] = get_bits(gb, 8);
+ }
+ buf[i] = 0;
+
+ /* divx detection */
+ e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last);
+ if (e < 2)
+ e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last);
+ if (e >= 2) {
+ ctx->divx_version = ver;
+ ctx->divx_build = build;
+ s->divx_packed = e == 3 && last == 'p';
+ }
+
+ /* libavcodec detection */
+ e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3;
+ if (e != 4)
+ e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build);
+ if (e != 4) {
+ e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1;
+ if (e > 1) {
+ if (ver > 0xFFU || ver2 > 0xFFU || ver3 > 0xFFU) {
+ pr_info("Unknown Lavc version string encountered, %d.%d.%d; "
+ "clamping sub-version values to 8-bits.\n",
+ ver, ver2, ver3);
+ }
+ build = ((ver & 0xFF) << 16) + ((ver2 & 0xFF) << 8) + (ver3 & 0xFF);
+ }
+ }
+ if (e != 4) {
+ if (strcmp(buf, "ffmpeg") == 0)
+ ctx->lavc_build = 4600;
+ }
+ if (e == 4)
+ ctx->lavc_build = build;
+
+ /* Xvid detection */
+ e = sscanf(buf, "XviD%d", &build);
+ if (e == 1)
+ ctx->xvid_build = build;
+
+ return 0;
+}
+
+
+static int mpeg4_decode_gop_header(struct MpegEncContext *s, struct get_bits_context *gb)
+{
+ int hours, minutes, seconds;
+
+ if (!show_bits(gb, 23)) {
+ pr_err("GOP header invalid\n");
+ return -1;
+ }
+
+ hours = get_bits(gb, 5);
+ minutes = get_bits(gb, 6);
+ check_marker(gb, "in gop_header");
+ seconds = get_bits(gb, 6);
+
+ s->time_base = seconds + 60*(minutes + 60*hours);
+
+ skip_bits1(gb);
+ skip_bits1(gb);
+
+ return 0;
+}
+
+
+static int mpeg4_decode_profile_level(struct MpegEncContext *s, struct get_bits_context *gb, int *profile, int *level)
+{
+
+ *profile = get_bits(gb, 4);
+ *level = get_bits(gb, 4);
+
+ // for Simple profile, level 0
+ if (*profile == 0 && *level == 8) {
+ *level = 0;
+ }
+
+ return 0;
+}
+
+
+static int decode_studiovisualobject(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+ struct MpegEncContext *s = &ctx->m;
+ int visual_object_type;
+
+ skip_bits(gb, 4); /* visual_object_verid */
+ visual_object_type = get_bits(gb, 4);
+ if (visual_object_type != VOT_VIDEO_ID) {
+ pr_err("VO type %u", visual_object_type);
+ return -1;
+ }
+
+ next_start_code_studio(gb);
+ extension_and_user_data(s, gb, 1);
+
+ return 0;
+}
+
+
+static int mpeg4_decode_visual_object(struct MpegEncContext *s, struct get_bits_context *gb)
+{
+ int visual_object_type;
+ int is_visual_object_identifier = get_bits1(gb);
+
+ if (is_visual_object_identifier) {
+ skip_bits(gb, 4+3);
+ }
+ visual_object_type = get_bits(gb, 4);
+
+ if (visual_object_type == VOT_VIDEO_ID ||
+ visual_object_type == VOT_STILL_TEXTURE_ID) {
+ int video_signal_type = get_bits1(gb);
+ if (video_signal_type) {
+ int video_range, color_description;
+ skip_bits(gb, 3); // video_format
+ video_range = get_bits1(gb);
+ color_description = get_bits1(gb);
+
+ s->ctx->color_range = video_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
+
+ if (color_description) {
+ s->ctx->color_primaries = get_bits(gb, 8);
+ s->ctx->color_trc = get_bits(gb, 8);
+ s->ctx->colorspace = get_bits(gb, 8);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void decode_smpte_tc(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+ skip_bits(gb, 16); /* Time_code[63..48] */
+ check_marker(gb, "after Time_code[63..48]");
+ skip_bits(gb, 16); /* Time_code[47..32] */
+ check_marker(gb, "after Time_code[47..32]");
+ skip_bits(gb, 16); /* Time_code[31..16] */
+ check_marker(gb, "after Time_code[31..16]");
+ skip_bits(gb, 16); /* Time_code[15..0] */
+ check_marker(gb, "after Time_code[15..0]");
+ skip_bits(gb, 4); /* reserved_bits */
+}
+
+static void reset_studio_dc_predictors(struct MpegEncContext *s)
+{
+ /* Reset DC Predictors */
+ s->last_dc[0] =
+ s->last_dc[1] =
+ s->last_dc[2] = 1 << (s->ctx->bits_per_raw_sample + s->dct_precision + s->intra_dc_precision - 1);
+}
+
+/**
+ * Decode the next studio vop header.
+ * @return <0 if something went wrong
+ */
+static int decode_studio_vop_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+ struct MpegEncContext *s = &ctx->m;
+
+ if (get_bits_left(gb) <= 32)
+ return 0;
+
+ //s->decode_mb = mpeg4_decode_studio_mb;
+
+ decode_smpte_tc(ctx, gb);
+
+ skip_bits(gb, 10); /* temporal_reference */
+ skip_bits(gb, 2); /* vop_structure */
+ s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* vop_coding_type */
+ if (get_bits1(gb)) { /* vop_coded */
+ skip_bits1(gb); /* top_field_first */
+ skip_bits1(gb); /* repeat_first_field */
+ s->progressive_frame = get_bits1(gb) ^ 1; /* progressive_frame */
+ }
+
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
+ if (get_bits1(gb))
+ reset_studio_dc_predictors(s);
+ }
+
+ if (ctx->shape != BIN_ONLY_SHAPE) {
+ s->alternate_scan = get_bits1(gb);
+ s->frame_pred_frame_dct = get_bits1(gb);
+ s->dct_precision = get_bits(gb, 2);
+ s->intra_dc_precision = get_bits(gb, 2);
+ s->q_scale_type = get_bits1(gb);
+ }
+
+ //if (s->alternate_scan) { }
+
+ //mpeg4_load_default_matrices(s);
+
+ next_start_code_studio(gb);
+ extension_and_user_data(s, gb, 4);
+
+ return 0;
+}
+
+static int decode_new_pred(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+ int len = FFMIN(ctx->time_increment_bits + 3, 15);
+
+ get_bits(gb, len);
+ if (get_bits1(gb))
+ get_bits(gb, len);
+ check_marker(gb, "after new_pred");
+
+ return 0;
+}
+
+static int decode_vop_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+ struct MpegEncContext *s = &ctx->m;
+ int time_incr, time_increment;
+ int64_t pts;
+
+ s->mcsel = 0;
+ s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */
+ if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay &&
+ ctx->vol_control_parameters == 0) {
+ pr_err("low_delay flag set incorrectly, clearing it\n");
+ s->low_delay = 0;
+ }
+
+ s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
+ /*if (s->partitioned_frame)
+ s->decode_mb = mpeg4_decode_partitioned_mb;
+ else
+ s->decode_mb = mpeg4_decode_mb;*/
+
+ time_incr = 0;
+ while (get_bits1(gb) != 0)
+ time_incr++;
+
+ check_marker(gb, "before time_increment");
+
+ if (ctx->time_increment_bits == 0 ||
+ !(show_bits(gb, ctx->time_increment_bits + 1) & 1)) {
+ pr_info("time_increment_bits %d is invalid in relation to the current bitstream, this is likely caused by a missing VOL header\n", ctx->time_increment_bits);
+
+ for (ctx->time_increment_bits = 1;
+ ctx->time_increment_bits < 16;
+ ctx->time_increment_bits++) {
+ if (s->pict_type == AV_PICTURE_TYPE_P ||
+ (s->pict_type == AV_PICTURE_TYPE_S &&
+ ctx->vol_sprite_usage == GMC_SPRITE)) {
+ if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30)
+ break;
+ } else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18)
+ break;
+ }
+
+ pr_info("time_increment_bits set to %d bits, based on bitstream analysis\n", ctx->time_increment_bits);
+ if (ctx->framerate.num && 4*ctx->framerate.num < 1<<ctx->time_increment_bits) {
+ ctx->framerate.num = 1<<ctx->time_increment_bits;
+ //ctx->time_base = av_inv_q(av_mul_q(ctx->framerate, (AVRational){ctx->ticks_per_frame, 1}));
+ }
+ }
+
+ if (IS_3IV1)
+ time_increment = get_bits1(gb); // FIXME investigate further
+ else
+ time_increment = get_bits(gb, ctx->time_increment_bits);
+
+ if (s->pict_type != AV_PICTURE_TYPE_B) {
+ s->last_time_base = s->time_base;
+ s->time_base += time_incr;
+ s->time = s->time_base * (int64_t)ctx->framerate.num + time_increment;
+ //if (s->workaround_bugs & FF_BUG_UMP4) { }
+ s->pp_time = s->time - s->last_non_b_time;
+ s->last_non_b_time = s->time;
+ } else {
+ s->time = (s->last_time_base + time_incr) * (int64_t)ctx->framerate.num + time_increment;
+ s->pb_time = s->pp_time - (s->last_non_b_time - s->time);
+ if (s->pp_time <= s->pb_time ||
+ s->pp_time <= s->pp_time - s->pb_time ||
+ s->pp_time <= 0) {
+ /* messed up order, maybe after seeking? skipping current B-frame */
+ return FRAME_SKIPPED;
+ }
+ //ff_mpeg4_init_direct_mv(s);
+
+ if (ctx->t_frame == 0)
+ ctx->t_frame = s->pb_time;
+ if (ctx->t_frame == 0)
+ ctx->t_frame = 1; // 1/0 protection
+ s->pp_field_time = (ROUNDED_DIV(s->last_non_b_time, ctx->t_frame) -
+ ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
+ s->pb_field_time = (ROUNDED_DIV(s->time, ctx->t_frame) -
+ ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
+ if (s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1) {
+ s->pb_field_time = 2;
+ s->pp_field_time = 4;
+ if (!s->progressive_sequence)
+ return FRAME_SKIPPED;
+ }
+ }
+
+ if (ctx->framerate.den)
+ pts = ROUNDED_DIV(s->time, ctx->framerate.den);
+ else
+ pts = AV_NOPTS_VALUE;
+ pr_info("MPEG4 PTS: %lld\n", pts);
+
+ check_marker(gb, "before vop_coded");
+
+ /* vop coded */
+ if (get_bits1(gb) != 1) {
+ if (1)
+ pr_err("vop not coded\n");
+ return FRAME_SKIPPED;
+ }
+ if (ctx->new_pred)
+ decode_new_pred(ctx, gb);
+
+ if (ctx->shape != BIN_ONLY_SHAPE &&
+ (s->pict_type == AV_PICTURE_TYPE_P ||
+ (s->pict_type == AV_PICTURE_TYPE_S &&
+ ctx->vol_sprite_usage == GMC_SPRITE))) {
+ /* rounding type for motion estimation */
+ s->no_rounding = get_bits1(gb);
+ } else {
+ s->no_rounding = 0;
+ }
+ // FIXME reduced res stuff
+
+ if (ctx->shape != RECT_SHAPE) {
+ if (ctx->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) {
+ skip_bits(gb, 13); /* width */
+ check_marker(gb, "after width");
+ skip_bits(gb, 13); /* height */
+ check_marker(gb, "after height");
+ skip_bits(gb, 13); /* hor_spat_ref */
+ check_marker(gb, "after hor_spat_ref");
+ skip_bits(gb, 13); /* ver_spat_ref */
+ }
+ skip_bits1(gb); /* change_CR_disable */
+
+ if (get_bits1(gb) != 0)
+ skip_bits(gb, 8); /* constant_alpha_value */
+ }
+
+ // FIXME complexity estimation stuff
+
+ if (ctx->shape != BIN_ONLY_SHAPE) {
+ skip_bits_long(gb, ctx->cplx_estimation_trash_i);
+ if (s->pict_type != AV_PICTURE_TYPE_I)
+ skip_bits_long(gb, ctx->cplx_estimation_trash_p);
+ if (s->pict_type == AV_PICTURE_TYPE_B)
+ skip_bits_long(gb, ctx->cplx_estimation_trash_b);
+
+ if (get_bits_left(gb) < 3) {
+ pr_err("Header truncated\n");
+ return -1;
+ }
+ ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)];
+ if (!s->progressive_sequence) {
+ s->top_field_first = get_bits1(gb);
+ s->alternate_scan = get_bits1(gb);
+ } else
+ s->alternate_scan = 0;
+ }
+
+ /*if (s->alternate_scan) { } */
+
+ if (s->pict_type == AV_PICTURE_TYPE_S) {
+ if((ctx->vol_sprite_usage == STATIC_SPRITE ||
+ ctx->vol_sprite_usage == GMC_SPRITE)) {
+ //if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0)
+ //return -1;
+ if (ctx->sprite_brightness_change)
+ pr_err("sprite_brightness_change not supported\n");
+ if (ctx->vol_sprite_usage == STATIC_SPRITE)
+ pr_err("static sprite not supported\n");
+ } else {
+ memset(s->sprite_offset, 0, sizeof(s->sprite_offset));
+ memset(s->sprite_delta, 0, sizeof(s->sprite_delta));
+ }
+ }
+
+ if (ctx->shape != BIN_ONLY_SHAPE) {
+ s->chroma_qscale = s->qscale = get_bits(gb, s->quant_precision);
+ if (s->qscale == 0) {
+ pr_err("Error, header damaged or not MPEG-4 header (qscale=0)\n");
+ return -1; // makes no sense to continue, as there is nothing left from the image then
+ }
+
+ if (s->pict_type != AV_PICTURE_TYPE_I) {
+ s->f_code = get_bits(gb, 3); /* fcode_for */
+ if (s->f_code == 0) {
+ pr_err("Error, header damaged or not MPEG-4 header (f_code=0)\n");
+ s->f_code = 1;
+ return -1; // makes no sense to continue, as there is nothing left from the image then
+ }
+ } else
+ s->f_code = 1;
+
+ if (s->pict_type == AV_PICTURE_TYPE_B) {
+ s->b_code = get_bits(gb, 3);
+ if (s->b_code == 0) {
+ pr_err("Error, header damaged or not MPEG4 header (b_code=0)\n");
+ s->b_code=1;
+ return -1; // makes no sense to continue, as the MV decoding will break very quickly
+ }
+ } else
+ s->b_code = 1;
+
+ if (1) {
+ pr_info("qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%ld tincr:%d\n",
+ s->qscale, s->f_code, s->b_code,
+ s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
+ gb->size_in_bits,s->progressive_sequence, s->alternate_scan,
+ s->top_field_first, s->quarter_sample ? "q" : "h",
+ s->data_partitioning, ctx->resync_marker,
+ ctx->num_sprite_warping_points, s->sprite_warping_accuracy,
+ 1 - s->no_rounding, s->vo_type,
+ ctx->vol_control_parameters ? " VOLC" : " ", ctx->intra_dc_threshold,
+ ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p,
+ ctx->cplx_estimation_trash_b,
+ s->time,
+ time_increment
+ );
+ }
+
+ if (!ctx->scalability) {
+ if (ctx->shape != RECT_SHAPE && s->pict_type != AV_PICTURE_TYPE_I)
+ skip_bits1(gb); // vop shape coding type
+ } else {
+ if (ctx->enhancement_type) {
+ int load_backward_shape = get_bits1(gb);
+ if (load_backward_shape)
+ pr_err("load backward shape isn't supported\n");
+ }
+ skip_bits(gb, 2); // ref_select_code
+ }
+ }
+ /* detect buggy encoders which don't set the low_delay flag
+ * (divx4/xvid/opendivx). Note we cannot detect divx5 without B-frames
+ * easily (although it's buggy too) */
+ if (s->vo_type == 0 && ctx->vol_control_parameters == 0 &&
+ ctx->divx_version == -1 && s->picture_number == 0) {
+ pr_info("looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n");
+ s->low_delay = 1;
+ }
+
+ s->picture_number++; // better than pic number==0 always ;)
+
+ // FIXME add short header support
+ //s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
+ //s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
+
+ return 0;
+}
+
+/**
+ * Decode MPEG-4 headers.
+ * @return <0 if no VOP found (or a damaged one)
+ * FRAME_SKIPPED if a not coded VOP is found
+ * 0 if a VOP is found
+ */
+int ff_mpeg4_decode_picture_header(struct mpeg4_dec_param *ctx, struct get_bits_context *gb)
+{
+ struct MpegEncContext *s = &ctx->m;
+
+ unsigned startcode, v;
+ int ret;
+ int vol = 0;
+ int bits_per_raw_sample = 0;
+
+ s->ctx = ctx;
+
+ /* search next start code */
+ align_get_bits(gb);
+
+ // If we have not switched to studio profile than we also did not switch bps
+ // that means something else (like a previous instance) outside set bps which
+ // would be inconsistant with the currect state, thus reset it
+ if (!s->studio_profile && bits_per_raw_sample != 8)
+ bits_per_raw_sample = 0;
+
+ if (show_bits(gb, 24) == 0x575630) {
+ skip_bits(gb, 24);
+ if (get_bits(gb, 8) == 0xF0)
+ goto end;
+ }
+
+ startcode = 0xff;
+ for (;;) {
+ if (get_bits_count(gb) >= gb->size_in_bits) {
+ if (gb->size_in_bits == 8) {
+ pr_info("frame skip %d\n", gb->size_in_bits);
+ return FRAME_SKIPPED; // divx bug
+ } else
+ return -1; // end of stream
+ }
+
+ /* use the bits after the test */
+ v = get_bits(gb, 8);
+ startcode = ((startcode << 8) | v) & 0xffffffff;
+
+ if ((startcode & 0xFFFFFF00) != 0x100)
+ continue; // no startcode
+
+ if (1) { //debug
+ pr_info("startcode: %3X \n", startcode);
+ if (startcode <= 0x11F)
+ pr_info("Video Object Start\n");
+ else if (startcode <= 0x12F)
+ pr_info("Video Object Layer Start\n");
+ else if (startcode <= 0x13F)
+ pr_info("Reserved\n");
+ else if (startcode <= 0x15F)
+ pr_info("FGS bp start\n");
+ else if (startcode <= 0x1AF)
+ pr_info("Reserved\n");
+ else if (startcode == 0x1B0)
+ pr_info("Visual Object Seq Start\n");
+ else if (startcode == 0x1B1)
+ pr_info("Visual Object Seq End\n");
+ else if (startcode == 0x1B2)
+ pr_info("User Data\n");
+ else if (startcode == 0x1B3)
+ pr_info("Group of VOP start\n");
+ else if (startcode == 0x1B4)
+ pr_info("Video Session Error\n");
+ else if (startcode == 0x1B5)
+ pr_info("Visual Object Start\n");
+ else if (startcode == 0x1B6)
+ pr_info("Video Object Plane start\n");
+ else if (startcode == 0x1B7)
+ pr_info("slice start\n");
+ else if (startcode == 0x1B8)
+ pr_info("extension start\n");
+ else if (startcode == 0x1B9)
+ pr_info("fgs start\n");
+ else if (startcode == 0x1BA)
+ pr_info("FBA Object start\n");
+ else if (startcode == 0x1BB)
+ pr_info("FBA Object Plane start\n");
+ else if (startcode == 0x1BC)
+ pr_info("Mesh Object start\n");
+ else if (startcode == 0x1BD)
+ pr_info("Mesh Object Plane start\n");
+ else if (startcode == 0x1BE)
+ pr_info("Still Texture Object start\n");
+ else if (startcode == 0x1BF)
+ pr_info("Texture Spatial Layer start\n");
+ else if (startcode == 0x1C0)
+ pr_info("Texture SNR Layer start\n");
+ else if (startcode == 0x1C1)
+ pr_info("Texture Tile start\n");
+ else if (startcode == 0x1C2)
+ pr_info("Texture Shape Layer start\n");
+ else if (startcode == 0x1C3)
+ pr_info("stuffing start\n");
+ else if (startcode <= 0x1C5)
+ pr_info("reserved\n");
+ else if (startcode <= 0x1FF)
+ pr_info("System start\n");
+ }
+
+ if (startcode >= 0x120 && startcode <= 0x12F) {
+ if (vol) {
+ pr_err("Ignoring multiple VOL headers\n");
+ continue;
+ }
+ vol++;
+ if ((ret = decode_vol_header(ctx, gb)) < 0)
+ return ret;
+ } else if (startcode == USER_DATA_STARTCODE) {
+ decode_user_data(ctx, gb);
+ } else if (startcode == GOP_STARTCODE) {
+ mpeg4_decode_gop_header(s, gb);
+ } else if (startcode == VOS_STARTCODE) {
+ int profile, level;
+ mpeg4_decode_profile_level(s, gb, &profile, &level);
+ if (profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO &&
+ (level > 0 && level < 9)) {
+ s->studio_profile = 1;
+ next_start_code_studio(gb);
+ extension_and_user_data(s, gb, 0);
+ } else if (s->studio_profile) {
+ pr_err("Mixes studio and non studio profile\n");
+ return -1;
+ }
+ ctx->profile = profile;
+ ctx->level = level;
+ } else if (startcode == VISUAL_OBJ_STARTCODE) {
+ if (s->studio_profile) {
+ if ((ret = decode_studiovisualobject(ctx, gb)) < 0)
+ return ret;
+ } else
+ mpeg4_decode_visual_object(s, gb);
+ } else if (startcode == VOP_STARTCODE) {
+ break;
+ }
+
+ align_get_bits(gb);
+ startcode = 0xff;
+ }
+
+end:
+ if (s->studio_profile) {
+ if (!bits_per_raw_sample) {
+ pr_err("Missing VOL header\n");
+ return -1;
+ }
+ return decode_studio_vop_header(ctx, gb);
+ } else
+ return decode_vop_header(ctx, gb);
+}
+
+int mpeg4_decode_extradata_ps(u8 *buf, int size, struct mpeg4_param_sets *ps)
+{
+ int ret = 0;
+ struct get_bits_context gb;
+
+ ps->head_parsed = false;
+
+ init_get_bits8(&gb, buf, size);
+
+ ret = ff_mpeg4_decode_picture_header(&ps->dec_ps, &gb);
+ if (ret < -1) {
+ pr_err("Failed to parse extradata\n");
+ return ret;
+ }
+
+ if (ps->dec_ps.m.width && ps->dec_ps.m.height)
+ ps->head_parsed = true;
+
+ return 0;
+}
+
--- /dev/null
+#ifndef AVCODEC_MPEG4VIDEO_H
+#define AVCODEC_MPEG4VIDEO_H
+
+#include "../utils/pixfmt.h"
+#include "../utils/common.h"
+
+//mpeg4 profile
+#define FF_PROFILE_MPEG4_SIMPLE 0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+#define FF_PROFILE_MPEG4_CORE 2
+#define FF_PROFILE_MPEG4_MAIN 3
+#define FF_PROFILE_MPEG4_N_BIT 4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+#define FF_PROFILE_MPEG4_HYBRID 8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE 10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING 11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE 12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+// shapes
+#define RECT_SHAPE 0
+#define BIN_SHAPE 1
+#define BIN_ONLY_SHAPE 2
+#define GRAY_SHAPE 3
+
+#define SIMPLE_VO_TYPE 1
+#define CORE_VO_TYPE 3
+#define MAIN_VO_TYPE 4
+#define NBIT_VO_TYPE 5
+#define ARTS_VO_TYPE 10
+#define ACE_VO_TYPE 12
+#define SIMPLE_STUDIO_VO_TYPE 14
+#define CORE_STUDIO_VO_TYPE 15
+#define ADV_SIMPLE_VO_TYPE 17
+
+#define VOT_VIDEO_ID 1
+#define VOT_STILL_TEXTURE_ID 2
+
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+// aspect_ratio_info
+#define EXTENDED_PAR 15
+
+//vol_sprite_usage / sprite_enable
+#define STATIC_SPRITE 1
+#define GMC_SPRITE 2
+
+#define MOTION_MARKER 0x1F001
+#define DC_MARKER 0x6B001
+
+#define VOS_STARTCODE 0x1B0
+#define USER_DATA_STARTCODE 0x1B2
+#define GOP_STARTCODE 0x1B3
+#define VISUAL_OBJ_STARTCODE 0x1B5
+#define VOP_STARTCODE 0x1B6
+#define SLICE_STARTCODE 0x1B7
+#define EXT_STARTCODE 0x1B8
+
+#define QUANT_MATRIX_EXT_ID 0x3
+
+/* smaller packets likely don't contain a real frame */
+#define MAX_NVOP_SIZE 19
+
+#define IS_3IV1 0
+
+#define CHROMA_420 1
+#define CHROMA_422 2
+#define CHROMA_444 3
+
+#define FF_ASPECT_EXTENDED 15
+
+#define AV_NOPTS_VALUE (LONG_MIN)
+
+/**
+ * Return value for header parsers if frame is not coded.
+ * */
+#define FRAME_SKIPPED 100
+
+enum AVPictureType {
+ AV_PICTURE_TYPE_NONE = 0, ///< Undefined
+ AV_PICTURE_TYPE_I, ///< Intra
+ AV_PICTURE_TYPE_P, ///< Predicted
+ AV_PICTURE_TYPE_B, ///< Bi-dir predicted
+ AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG-4
+ AV_PICTURE_TYPE_SI, ///< Switching Intra
+ AV_PICTURE_TYPE_SP, ///< Switching Predicted
+ AV_PICTURE_TYPE_BI, ///< BI type
+};
+
+struct VLC {
+ int bits;
+ short (*table)[2]; ///< code, bits
+ int table_size, table_allocated;
+};
+
+/**
+ * MpegEncContext.
+ */
+struct MpegEncContext {
+ struct mpeg4_dec_param *ctx;
+
+ /* the following parameters must be initialized before encoding */
+ int width, height;///< picture size. must be a multiple of 16
+ int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag
+ int picture_number; //FIXME remove, unclear definition
+
+ /** matrix transmitted in the bitstream */
+ u16 intra_matrix[64];
+ u16 chroma_intra_matrix[64];
+ u16 inter_matrix[64];
+ u16 chroma_inter_matrix[64];
+
+ /* MPEG-4 specific */
+ int studio_profile;
+ int time_base; ///< time in seconds of last I,P,S Frame
+ int quant_precision;
+ int quarter_sample; ///< 1->qpel, 0->half pel ME/MC
+ int aspect_ratio_info; //FIXME remove
+ int sprite_warping_accuracy;
+ int data_partitioning; ///< data partitioning flag from header
+ int low_delay; ///< no reordering needed / has no B-frames
+ int vo_type;
+ int mpeg_quant;
+
+ /* divx specific, used to workaround (many) bugs in divx5 */
+ int divx_packed;
+
+ /* MPEG-2-specific - I wished not to have to support this mess. */
+ int progressive_sequence;
+
+ int progressive_frame;
+ int interlaced_dct;
+
+ int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replication)
+ const u8 *y_dc_scale_table; ///< qscale -> y_dc_scale table
+ const u8 *c_dc_scale_table; ///< qscale -> c_dc_scale table
+ int qscale; ///< QP
+ int chroma_qscale; ///< chroma QP
+ int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
+ int f_code; ///< forward MV resolution
+ int b_code; ///< backward MV resolution for B-frames (MPEG-4)
+ int no_rounding; /**< apply no rounding to motion compensation (MPEG-4, msmpeg4, ...)
+ for B-frames rounding mode is always 0 */
+ int last_time_base;
+ long time; ///< time of current frame
+ long last_non_b_time;
+ u16 pp_time; ///< time distance between the last 2 p,s,i frames
+ u16 pb_time; ///< time distance between the last b and p,s,i frame
+ u16 pp_field_time;
+ u16 pb_field_time; ///< like above, just for interlaced
+ int real_sprite_warping_points;
+ int sprite_offset[2][2]; ///< sprite offset[isChroma][isMVY]
+ int sprite_delta[2][2]; ///< sprite_delta [isY][isMVY]
+ int mcsel;
+ int partitioned_frame; ///< is current frame partitioned
+ int top_field_first;
+ int alternate_scan;
+ int last_dc[3]; ///< last DC values for MPEG-1
+ int dct_precision;
+ int intra_dc_precision;
+ int frame_pred_frame_dct;
+ int q_scale_type;
+ int context_reinit;
+ int chroma_format;
+};
+
+struct mpeg4_dec_param {
+ struct MpegEncContext m;
+
+ /// number of bits to represent the fractional part of time
+ int time_increment_bits;
+ int shape;
+ int vol_sprite_usage;
+ int sprite_brightness_change;
+ int num_sprite_warping_points;
+ /// sprite trajectory points
+ u16 sprite_traj[4][2];
+ /// sprite shift [isChroma]
+ int sprite_shift[2];
+
+ // reversible vlc
+ int rvlc;
+ /// could this stream contain resync markers
+ int resync_marker;
+ /// time distance of first I -> B, used for interlaced B-frames
+ int t_frame;
+
+ int new_pred;
+ int enhancement_type;
+ int scalability;
+ int use_intra_dc_vlc;
+
+ /// QP above which the ac VLC should be used for intra dc
+ int intra_dc_threshold;
+
+ /* bug workarounds */
+ int divx_version;
+ int divx_build;
+ int xvid_build;
+ int lavc_build;
+
+ /// flag for having shown the warning about invalid Divx B-frames
+ int showed_packed_warning;
+ /** does the stream contain the low_delay flag,
+ * used to work around buggy encoders. */
+ int vol_control_parameters;
+ int cplx_estimation_trash_i;
+ int cplx_estimation_trash_p;
+ int cplx_estimation_trash_b;
+
+ struct VLC studio_intra_tab[12];
+ struct VLC studio_luma_dc;
+ struct VLC studio_chroma_dc;
+
+ int rgb;
+
+ struct AVRational time_base;
+ int ticks_per_frame;
+ enum AVPixelFormat pix_fmt;
+ struct AVRational sample_aspect_ratio;
+ enum AVColorPrimaries color_primaries;
+ enum AVColorTransferCharacteristic color_trc;
+ enum AVColorSpace colorspace;
+ enum AVColorRange color_range;
+ enum AVChromaLocation chroma_sample_location;
+ int err_recognition;
+ int idct_algo;
+ int bits_per_raw_sample;
+ int profile;
+ int level;
+ struct AVRational framerate;
+ int flags;
+};
+
+struct mpeg4_param_sets {
+ bool head_parsed;
+ /* currently active parameter sets */
+ struct mpeg4_dec_param dec_ps;
+};
+
+int mpeg4_decode_extradata_ps(u8 *buf, int size, struct mpeg4_param_sets *ps);
+
+#endif
+
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "aml_vp9_parser.h"
+#include "../utils/get_bits.h"
+#include "../utils/put_bits.h"
+#include "../utils/golomb.h"
+#include "../utils/common.h"
+#include "utils.h"
+
+#define VP9_SYNCCODE 0x498342
+
+static int read_colorspace_details(struct VP9Context *s, int profile)
+{
+ static const enum AVColorSpace colorspaces[8] = {
+ AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_BT470BG, AVCOL_SPC_BT709, AVCOL_SPC_SMPTE170M,
+ AVCOL_SPC_SMPTE240M, AVCOL_SPC_BT2020_NCL, AVCOL_SPC_RESERVED, AVCOL_SPC_RGB,
+ };
+
+ enum AVColorSpace colorspace;
+ int color_range;
+ int bits = profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
+
+ s->bpp_index = bits;
+ s->s.h.bpp = 8 + bits * 2;
+ s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
+ colorspace = colorspaces[get_bits(&s->gb, 3)];
+ if (colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
+ if (profile & 1) {
+ if (get_bits1(&s->gb)) {
+ pr_err("Reserved bit set in RGB\n");
+ return -1;
+ }
+ } else {
+ pr_err("RGB not supported in profile %d\n", profile);
+ return -1;
+ }
+ } else {
+ static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
+ { { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P },
+ { AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV420P } },
+ { { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10 },
+ { AV_PIX_FMT_YUV440P10, AV_PIX_FMT_YUV420P10 } },
+ { { AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12 },
+ { AV_PIX_FMT_YUV440P12, AV_PIX_FMT_YUV420P12 } }};
+ color_range = get_bits1(&s->gb) ? 2 : 1;
+ if (profile & 1) {
+ s->ss_h = get_bits1(&s->gb);
+ s->ss_v = get_bits1(&s->gb);
+ s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
+ if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
+ pr_err("YUV 4:2:0 not supported in profile %d\n", profile);
+ return -1;
+ } else if (get_bits1(&s->gb)) {
+ pr_err("Profile %d color details reserved bit set\n", profile);
+ return -1;
+ }
+ } else {
+ s->ss_h = s->ss_v = 1;
+ s->pix_fmt = pix_fmt_for_ss[bits][1][1];
+ }
+ }
+
+ return 0;
+}
+
+int decode_frame_header(const u8 *data, int size, struct VP9Context *s, int *ref)
+{
+ int ret, last_invisible, profile;
+
+ /* general header */
+ if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
+ pr_err("Failed to initialize bitstream reader\n");
+ return ret;
+ }
+
+ if (get_bits(&s->gb, 2) != 0x2) { // frame marker
+ pr_err("Invalid frame marker\n");
+ return -1;
+ }
+
+ profile = get_bits1(&s->gb);
+ profile |= get_bits1(&s->gb) << 1;
+ if (profile == 3)
+ profile += get_bits1(&s->gb);
+
+ if (profile > 3) {
+ pr_err("Profile %d is not yet supported\n", profile);
+ return -1;
+ }
+
+ s->s.h.profile = profile;
+ if (get_bits1(&s->gb)) {
+ *ref = get_bits(&s->gb, 3);
+ return 0;
+ }
+
+ s->last_keyframe = s->s.h.keyframe;
+ s->s.h.keyframe = !get_bits1(&s->gb);
+
+ last_invisible = s->s.h.invisible;
+ s->s.h.invisible = !get_bits1(&s->gb);
+ s->s.h.errorres = get_bits1(&s->gb);
+ s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
+
+ if (s->s.h.keyframe) {
+ if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+ pr_err("Invalid sync code\n");
+ return -1;
+ }
+ if ((ret = read_colorspace_details(s,profile)) < 0)
+ return ret;
+ // for profile 1, here follows the subsampling bits
+ s->s.h.refreshrefmask = 0xff;
+ s->width = get_bits(&s->gb, 16) + 1;
+ s->height = get_bits(&s->gb, 16) + 1;
+ if (get_bits1(&s->gb)) { // has scaling
+ s->render_width = get_bits(&s->gb, 16) + 1;
+ s->render_height = get_bits(&s->gb, 16) + 1;
+ } else {
+ s->render_width = s->width;
+ s->render_height = s->height;
+ }
+ /*pr_info("keyframe res: (%d x %d), render size: (%d x %d)\n",
+ s->width, s->height, s->render_width, s->render_height);*/
+ } else {
+ s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
+ s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
+ if (s->s.h.intraonly) {
+ if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+ pr_err("Invalid sync code\n");
+ return -1;
+ }
+ if (profile >= 1) {
+ if ((ret = read_colorspace_details(s, profile)) < 0)
+ return ret;
+ } else {
+ s->ss_h = s->ss_v = 1;
+ s->s.h.bpp = 8;
+ s->bpp_index = 0;
+ s->bytesperpixel = 1;
+ s->pix_fmt = AV_PIX_FMT_YUV420P;
+ }
+ s->s.h.refreshrefmask = get_bits(&s->gb, 8);
+ s->width = get_bits(&s->gb, 16) + 1;
+ s->height = get_bits(&s->gb, 16) + 1;
+ if (get_bits1(&s->gb)) { // has scaling
+ s->render_width = get_bits(&s->gb, 16) + 1;
+ s->render_height = get_bits(&s->gb, 16) + 1;
+ } else {
+ s->render_width = s->width;
+ s->render_height = s->height;
+ }
+ pr_info("intra res: (%d x %d), render size: (%d x %d)\n",
+ s->width, s->height, s->render_width, s->render_height);
+ } else {
+ s->s.h.refreshrefmask = get_bits(&s->gb, 8);
+ s->s.h.refidx[0] = get_bits(&s->gb, 3);
+ s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
+ s->s.h.refidx[1] = get_bits(&s->gb, 3);
+ s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
+ s->s.h.refidx[2] = get_bits(&s->gb, 3);
+ s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
+
+ /*refresh_frame_flags;
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ frame_refs[i];
+ ref_frame_sign_biases[i];
+ }
+ frame_size_from_refs();
+ high_precision_mv;
+ interp_filter();*/
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int vp9_superframe_split_filter(struct vp9_superframe_split *s)
+{
+ int i, j, ret, marker;
+ bool is_superframe = false;
+ int *prefix = (int *)s->data;
+
+ if (!s->data)
+ return -1;
+
+ #define AML_PREFIX ('V' << 24 | 'L' << 16 | 'M' << 8 | 'A')
+ if (prefix[3] == AML_PREFIX) {
+ s->prefix_size = 16;
+ /*pr_info("the frame data has beed added header\n");*/
+ }
+
+ marker = s->data[s->data_size - 1];
+ if ((marker & 0xe0) == 0xc0) {
+ int length_size = 1 + ((marker >> 3) & 0x3);
+ int nb_frames = 1 + (marker & 0x7);
+ int idx_size = 2 + nb_frames * length_size;
+
+ if (s->data_size >= idx_size &&
+ s->data[s->data_size - idx_size] == marker) {
+ s64 total_size = 0;
+ int idx = s->data_size + 1 - idx_size;
+
+ for (i = 0; i < nb_frames; i++) {
+ int frame_size = 0;
+ for (j = 0; j < length_size; j++)
+ frame_size |= s->data[idx++] << (j * 8);
+
+ total_size += frame_size;
+ if (frame_size < 0 ||
+ total_size > s->data_size - idx_size) {
+ pr_err( "Invalid frame size in a sframe: %d\n",
+ frame_size);
+ ret = -EINVAL;
+ goto fail;
+ }
+ s->sizes[i] = frame_size;
+ }
+
+ s->nb_frames = nb_frames;
+ s->size = total_size;
+ s->next_frame = 0;
+ s->next_frame_offset = 0;
+ is_superframe = true;
+ }
+ }else {
+ s->nb_frames = 1;
+ s->sizes[0] = s->data_size;
+ s->size = s->data_size;
+ }
+
+ /*pr_info("sframe: %d, frames: %d, IN: %x, OUT: %x\n",
+ is_superframe, s->nb_frames,
+ s->data_size, s->size);*/
+
+ /* parse uncompressed header. */
+ if (is_superframe) {
+ /* bitstream profile. */
+ /* frame type. (intra or inter) */
+ /* colorspace descriptor */
+ /* ... */
+
+ pr_info("the frame is a superframe.\n");
+ }
+
+ /*pr_err("in: %x, %d, out: %x, sizes %d,%d,%d,%d,%d,%d,%d,%d\n",
+ s->data_size,
+ s->nb_frames,
+ s->size,
+ s->sizes[0],
+ s->sizes[1],
+ s->sizes[2],
+ s->sizes[3],
+ s->sizes[4],
+ s->sizes[5],
+ s->sizes[6],
+ s->sizes[7]);*/
+
+ return 0;
+fail:
+ return ret;
+}
+
+int vp9_decode_extradata_ps(u8 *data, int size, struct vp9_param_sets *ps)
+{
+ int i, ref = -1, ret = 0;
+ struct vp9_superframe_split s = {0};
+
+ /*parse superframe.*/
+ s.data = data;
+ s.data_size = size;
+ ret = vp9_superframe_split_filter(&s);
+ if (ret) {
+ pr_err("parse frames failed.\n");
+ return ret;
+ }
+
+ for (i = 0; i < s.nb_frames; i++) {
+ u32 len = s.sizes[i] - s.prefix_size;
+ u8 *buf = s.data + s.next_frame_offset + s.prefix_size;
+
+ ret = decode_frame_header(buf, len, &ps->ctx, &ref);
+ if (!ret) {
+ ps->head_parsed = ref < 0 ? true : false;
+ return 0;
+ }
+
+ s.next_frame_offset = len + s.prefix_size;
+ }
+
+ return ret;
+}
+
*
*/
-
#ifndef AML_VP9_PARSER_H
#define AML_VP9_PARSER_H
-enum BlockPartition {
- PARTITION_NONE, // [ ] <-.
- PARTITION_H, // [-] |
- PARTITION_V, // [|] |
- PARTITION_SPLIT, // [+] --'
-};
+#include "../utils/pixfmt.h"
+#include "../utils/get_bits.h"
-enum InterPredMode {
- NEARESTMV = 10,
- NEARMV = 11,
- ZEROMV = 12,
- NEWMV = 13,
-};
+#define MAX_SEGMENT 8
-enum CompPredMode {
- PRED_SINGLEREF,
- PRED_COMPREF,
- PRED_SWITCHABLE,
-};
+struct VP9BitstreamHeader {
+ // bitstream header
+ u8 profile;
+ u8 bpp;
+ u8 keyframe;
+ u8 invisible;
+ u8 errorres;
+ u8 intraonly;
+ u8 resetctx;
+ u8 refreshrefmask;
+ u8 highprecisionmvs;
+ u8 allowcompinter;
+ u8 refreshctx;
+ u8 parallelmode;
+ u8 framectxid;
+ u8 use_last_frame_mvs;
+ u8 refidx[3];
+ u8 signbias[3];
+ u8 fixcompref;
+ u8 varcompref[2];
+ struct {
+ u8 level;
+ char sharpness;
+ } filter;
+ struct {
+ u8 enabled;
+ u8 updated;
+ char mode[2];
+ char ref[4];
+ } lf_delta;
+ u8 yac_qi;
+ char ydc_qdelta, uvdc_qdelta, uvac_qdelta;
+ u8 lossless;
+ struct {
+ u8 enabled;
+ u8 temporal;
+ u8 absolute_vals;
+ u8 update_map;
+ u8 prob[7];
+ u8 pred_prob[3];
+ struct {
+ u8 q_enabled;
+ u8 lf_enabled;
+ u8 ref_enabled;
+ u8 skip_enabled;
+ u8 ref_val;
+ int16_t q_val;
+ char lf_val;
+ int16_t qmul[2][2];
+ u8 lflvl[4][2];
+ } feat[MAX_SEGMENT];
+ } segmentation;
+ struct {
+ u32 log2_tile_cols, log2_tile_rows;
+ u32 tile_cols, tile_rows;
+ } tiling;
-enum BlockLevel {
- BL_64X64,
- BL_32X32,
- BL_16X16,
- BL_8X8,
+ int uncompressed_header_size;
+ int compressed_header_size;
};
-enum BlockSize {
- BS_64x64,
- BS_64x32,
- BS_32x64,
- BS_32x32,
- BS_32x16,
- BS_16x32,
- BS_16x16,
- BS_16x8,
- BS_8x16,
- BS_8x8,
- BS_8x4,
- BS_4x8,
- BS_4x4,
- N_BS_SIZES,
-};
+struct VP9SharedContext {
+ struct VP9BitstreamHeader h;
-enum FilterMode {
- MODE_NONE,
- MODE_INTERLEAVE,
- MODE_DEINTERLEAVE
+ //struct ThreadFrame refs[8];
+#define CUR_FRAME 0
+#define REF_FRAME_MVPAIR 1
+#define REF_FRAME_SEGMAP 2
+ //struct VP9Frame frames[3];
};
-enum TxfmMode {
- TX_4X4,
- TX_8X8,
- TX_16X16,
- TX_32X32,
- N_TXFM_SIZES,
- TX_SWITCHABLE = N_TXFM_SIZES,
- N_TXFM_MODES
-};
+struct VP9Context {
+ struct VP9SharedContext s;
+ struct get_bits_context gb;
+ int pass, active_tile_cols;
-enum TxfmType {
- DCT_DCT,
- DCT_ADST,
- ADST_DCT,
- ADST_ADST,
- N_TXFM_TYPES
-};
+ u8 ss_h, ss_v;
+ u8 last_bpp, bpp_index, bytesperpixel;
+ u8 last_keyframe;
+ // sb_cols/rows, rows/cols and last_fmt are used for allocating all internal
+ // arrays, and are thus per-thread. w/h and gf_fmt are synced between threads
+ // and are therefore per-stream. pix_fmt represents the value in the header
+ // of the currently processed frame.
+ int width;
+ int height;
-enum IntraPredMode {
- VERT_PRED,
- HOR_PRED,
- DC_PRED,
- DIAG_DOWN_LEFT_PRED,
- DIAG_DOWN_RIGHT_PRED,
- VERT_RIGHT_PRED,
- HOR_DOWN_PRED,
- VERT_LEFT_PRED,
- HOR_UP_PRED,
- TM_VP8_PRED,
- LEFT_DC_PRED,
- TOP_DC_PRED,
- DC_128_PRED,
- DC_127_PRED,
- DC_129_PRED,
- N_INTRA_PRED_MODES
+ int render_width;
+ int render_height;
+
+ enum AVPixelFormat pix_fmt, last_fmt, gf_fmt;
+ u32 sb_cols, sb_rows, rows, cols;
+
+ struct {
+ u8 lim_lut[64];
+ u8 mblim_lut[64];
+ } filter_lut;
+ struct {
+ u8 coef[4][2][2][6][6][3];
+ } prob_ctx[4];
+ struct {
+ u8 coef[4][2][2][6][6][11];
+ } prob;
+
+ // contextual (above) cache
+ u8 *above_partition_ctx;
+ u8 *above_mode_ctx;
+ // FIXME maybe merge some of the below in a flags field?
+ u8 *above_y_nnz_ctx;
+ u8 *above_uv_nnz_ctx[2];
+ u8 *above_skip_ctx; // 1bit
+ u8 *above_txfm_ctx; // 2bit
+ u8 *above_segpred_ctx; // 1bit
+ u8 *above_intra_ctx; // 1bit
+ u8 *above_comp_ctx; // 1bit
+ u8 *above_ref_ctx; // 2bit
+ u8 *above_filter_ctx;
+
+ // whole-frame cache
+ u8 *intra_pred_data[3];
+
+ // block reconstruction intermediates
+ int block_alloc_using_2pass;
+ uint16_t mvscale[3][2];
+ u8 mvstep[3][2];
};
-struct VP9BitstreamHeader {
- // bitstream header
- u8 profile;
- u8 bpp;
- u8 keyframe;
- u8 invisible;
- u8 errorres;
- u8 intraonly;
- u8 resetctx;
- u8 refreshrefmask;
- u8 highprecisionmvs;
- enum FilterMode filtermode;
- u8 allowcompinter;
- u8 refreshctx;
- u8 parallelmode;
- u8 framectxid;
- u8 use_last_frame_mvs;
- u8 refidx[3];
- u8 signbias[3];
- u8 fixcompref;
- u8 varcompref[2];
- struct {
- u8 level;
- int8_t sharpness;
- } filter;
- struct {
- u8 enabled;
- u8 updated;
- char mode[2];
- char ref[4];
- } lf_delta;
- u8 yac_qi;
- char ydc_qdelta, uvdc_qdelta, uvac_qdelta;
- u8 lossless;
-#define MAX_SEGMENT 8
- struct {
- u8 enabled;
- u8 temporal;
- u8 absolute_vals;
- u8 update_map;
- u8 prob[7];
- u8 pred_prob[3];
- struct {
- u8 q_enabled;
- u8 lf_enabled;
- u8 ref_enabled;
- u8 skip_enabled;
- u8 ref_val;
- s16 q_val;
- char lf_val;
- s16 qmul[2][2];
- u8 lflvl[4][2];
- } feat[MAX_SEGMENT];
- } segmentation;
- enum TxfmMode txfmmode;
- enum CompPredMode comppredmode;
- struct {
- u32 log2_tile_cols, log2_tile_rows;
- u32 tile_cols, tile_rows;
- } tiling;
-
- int uncompressed_header_size;
- int compressed_header_size;
+struct vp9_superframe_split {
+ /*in data*/
+ u8 *data;
+ u32 data_size;
+
+ /*out data*/
+ int nb_frames;
+ int size;
+ int next_frame;
+ u32 next_frame_offset;
+ int prefix_size;
+ int sizes[8];
};
-struct vp9_head_info_t {
- bool parsed;
- struct VP9BitstreamHeader info;
+struct vp9_param_sets {
+ bool head_parsed;
+ struct VP9Context ctx;
};
+int vp9_superframe_split_filter(struct vp9_superframe_split *s);
+int vp9_decode_extradata_ps(u8 *data, int size, struct vp9_param_sets *ps);
+
#endif //AML_VP9_PARSER_H
+++ /dev/null
-/*
- * drivers/amlogic/media_modules/amvdec_ports/decoder/h264_parse.c
- *
- * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/kernel.h>
-#include "h264_parse.h"
-#include "h264_stream.h"
-
-static unsigned char h264_exp_golomb_bits[256] = {
- 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0,
-};
-
-static unsigned char ZZ_SCAN[16] = {
- 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
-};
-
-static unsigned char ZZ_SCAN8[64] = {
- 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5,
- 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28,
- 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51,
- 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63
-};
-
-unsigned int h264_u(struct h264_stream_t *s, unsigned int n)
-{
- //if (n % 8 == 0) {
- // return h264_stream_read_bytes(s, n / 8);
- //}
- return h264_stream_read_bits(s, n);
-}
-
-unsigned int h264_ue(struct h264_stream_t *s)
-{
- unsigned int bits, read;
- unsigned char coded;
-
- bits = 0;
- while (true) {
- if (h264_stream_bytes_remaining(s) < 1) {
- read = h264_stream_peek_bits(s, s->bit_pos) <<
- (8 - s->bit_pos);
- break;
- }
-
- read = h264_stream_peek_bits(s, 8);
- if (bits > 16)
- break;
-
- if (read)
- break;
-
- h264_stream_read_bits(s, 8);
- bits += 8;
- }
-
- coded = h264_exp_golomb_bits[read];
- h264_stream_read_bits(s, coded);
- bits += coded;
- return h264_stream_read_bits(s, bits + 1) - 1;
-}
-
-int h264_se(struct h264_stream_t *s)
-{
- unsigned int ret;
-
- ret = h264_ue(s);
- if (!(ret & 0x1)) {
- ret >>= 1;
- return (int)(-ret);
- }
-
- return (ret + 1) >> 1;
-}
-
-void h264_f(struct h264_stream_t *s, unsigned int n, unsigned int pattern)
-{
- unsigned int val = h264_u(s, n);
-
- if (val != pattern) {
- pr_err("fixed-pattern doesn't match. expected: %x actual: %x\n",
- pattern, (unsigned int)val);
- return;
- }
-}
-
-void h264_rbsp_trailing_bits(struct h264_stream_t *s)
-{
- h264_f(s, 1, 1);
- h264_f(s, s->bit_pos, 0);
-}
-
-// syntax for scaling list matrix values
-void Scaling_List(int *scalingList, int sizeOfScalingList,
- bool *UseDefaultScalingMatrix, struct h264_stream_t *s)
-{
- int j, scanj;
- int delta_scale, lastScale, nextScale;
-
- lastScale = 8;
- nextScale = 8;
-
- for (j = 0; j < sizeOfScalingList; j++) {
- scanj = (sizeOfScalingList == 16) ? ZZ_SCAN[j] : ZZ_SCAN8[j];
-
- if (nextScale != 0) {
- delta_scale = h264_ue(s);
- nextScale = (lastScale + delta_scale + 256) % 256;
- *UseDefaultScalingMatrix =
- (bool) (scanj == 0 && nextScale == 0);
- }
-
- scalingList[scanj] = (nextScale == 0) ? lastScale : nextScale;
- lastScale = scalingList[scanj];
- }
-}
-
-void h264_sps_parse(struct h264_stream_t *s, struct h264_SPS_t *sps)
-{
- unsigned int i, n_ScalingList;
-
- sps->profile_idc = h264_u(s, 8);
-
- if ((sps->profile_idc != BASELINE) &&
- (sps->profile_idc != MAIN) &&
- (sps->profile_idc != EXTENDED) &&
- (sps->profile_idc != FREXT_HP) &&
- (sps->profile_idc != FREXT_Hi10P) &&
- (sps->profile_idc != FREXT_Hi422) &&
- (sps->profile_idc != FREXT_Hi444) &&
- (sps->profile_idc != FREXT_CAVLC444)
- && (sps->profile_idc != MVC_HIGH)
- && (sps->profile_idc != STEREO_HIGH)) {
- pr_err("Invalid Profile IDC (%d) encountered.\n",
- sps->profile_idc);
- return;
- }
-
- sps->constrained_set0_flag = h264_u(s, 1);
- sps->constrained_set1_flag = h264_u(s, 1);
- sps->constrained_set2_flag = h264_u(s, 1);
- h264_u(s, 5); // reserved_zero_5bits
- sps->level_idc = h264_u(s, 8);
- sps->seq_parameter_set_id = h264_ue(s);
-
- // Fidelity Range Extensions stuff
- sps->chroma_format_idc = 1;
- sps->bit_depth_luma_minus8 = 0;
- sps->bit_depth_chroma_minus8 = 0;
- sps->lossless_qpprime_flag = 0;
- sps->separate_colour_plane_flag = 0;
-
- if ((sps->profile_idc == FREXT_HP) ||
- (sps->profile_idc == FREXT_Hi10P) ||
- (sps->profile_idc == FREXT_Hi422) ||
- (sps->profile_idc == FREXT_Hi444) ||
- (sps->profile_idc == FREXT_CAVLC444)) {
- sps->chroma_format_idc = h264_ue(s);
-
- if (sps->chroma_format_idc == YUV444)
- sps->separate_colour_plane_flag = h264_u(s, 1);
-
- sps->bit_depth_luma_minus8 = h264_ue(s);
- sps->bit_depth_chroma_minus8 = h264_ue(s);
- //checking;
- if ((sps->bit_depth_luma_minus8 + 8 > sizeof(unsigned short) * 8) ||
- (sps->bit_depth_chroma_minus8 + 8 > sizeof(unsigned short) * 8)) {
- pr_err("Source picture has higher bit depth than imgpel data type.\n");
- pr_err("Please recompile with larger data type for imgpel.\n");
- }
-
- sps->lossless_qpprime_flag = h264_u(s, 1);
- sps->seq_scaling_matrix_present_flag = h264_u(s, 1);
- if (sps->seq_scaling_matrix_present_flag) {
- n_ScalingList = (sps->chroma_format_idc != YUV444) ? 8 : 12;
- for (i = 0; i < n_ScalingList; i++) {
- sps->seq_scaling_list_present_flag[i] = h264_u(s, 1);
- if (sps->seq_scaling_list_present_flag[i]) {
- if (i < 6)
- Scaling_List(sps->ScalingList4x4[i], 16,
- &sps->UseDefaultScalingMatrix4x4Flag[i], s);
- else
- Scaling_List(sps->ScalingList8x8[i - 6],
- 64, &sps->UseDefaultScalingMatrix8x8Flag[i - 6], s);
- }
- }
- }
- }
-
- sps->log2_max_frame_num_minus4 = h264_ue(s);
- sps->pic_order_cnt_type = h264_ue(s);
- if (sps->pic_order_cnt_type == 0) {
- sps->log2_max_pic_order_cnt_lsb_minus4 = h264_ue(s);
- } else if (sps->pic_order_cnt_type == 1) {
- sps->delta_pic_order_always_zero_flag = h264_se(s);
- sps->offset_for_non_ref_pic = h264_se(s);
- sps->offset_for_top_to_bottom_field = h264_se(s);
- sps->num_ref_frames_in_poc_cycle = h264_se(s);
- for (i = 0; i < sps->num_ref_frames_in_poc_cycle; ++i)
- sps->offset_for_ref_frame[i] = h264_se(s);
- }
-
- sps->num_ref_frames = h264_ue(s);
- sps->gaps_in_frame_num_value_allowed_flag = h264_u(s, 1);
- sps->pic_width_in_mbs_minus1 = h264_ue(s);
- sps->pic_height_in_map_units_minus1 = h264_ue(s);
- sps->frame_mbs_only_flag = h264_u(s, 1);
- if (!sps->frame_mbs_only_flag)
- sps->mb_adaptive_frame_field_flag = h264_u(s, 1);
-
- sps->direct_8x8_inference_flag = h264_u(s, 1);
- sps->frame_cropping_flag = h264_u(s, 1);
- if (sps->frame_cropping_flag) {
- sps->frame_crop_left_offset = h264_ue(s);
- sps->frame_crop_right_offset = h264_ue(s);
- sps->frame_crop_top_offset = h264_ue(s);
- sps->frame_crop_bottom_offset = h264_ue(s);
- }
-
- sps->vui_parameters_present_flag = h264_u(s, 1);
- //if (sps->vui_parameters_present_flag) {
- // sps->vui_parameters = h264_vui_parameters(s);
- //}
- h264_rbsp_trailing_bits(s);
-}
-
-void h264_pps_parse(struct h264_stream_t *s, struct h264_PPS_t *pps)
-{
- pps->pic_parameter_set_id = h264_ue(s);
- pps->seq_parameter_set_id = h264_ue(s);
- pps->entropy_coding_mode_flag = h264_u(s, 1);
- pps->pic_order_present_flag = h264_u(s, 1);
- pps->num_slice_groups_minus1 = h264_ue(s);
- if (pps->num_slice_groups_minus1 > 0) {
- pps->slice_group_map_type = h264_ue(s);
- if (pps->slice_group_map_type == 0) {
- pps->run_length_minus1 = h264_ue(s);
- } else if (pps->slice_group_map_type == 2) {
- pps->top_left = h264_ue(s);
- pps->bottom_right = h264_ue(s);
- } else if (pps->slice_group_map_type == 3 ||
- pps->slice_group_map_type == 4 ||
- pps->slice_group_map_type == 5) {
- pps->slice_group_change_direction_flag = h264_u(s, 1);
- pps->slice_group_change_rate_minus1 = h264_ue(s);
- } else if (pps->slice_group_map_type == 6) {
- pps->pic_size_in_map_units_minus1 = h264_ue(s);
- pps->slice_group_id = h264_ue(s);
- }
- }
- pps->num_ref_idx_l0_active_minus1 = h264_ue(s);
- pps->num_ref_idx_l1_active_minus1 = h264_ue(s);
- pps->weighted_pred_flag = h264_u(s, 1);
- pps->weighted_bipred_idc = h264_u(s, 2);
- pps->pic_init_qp_minus26 = h264_se(s);
- pps->pic_init_qs_minus26 = h264_se(s);
- pps->chroma_qp_index_offset = h264_se(s);
- pps->deblocking_filter_control_present_flag = h264_u(s, 1);
- pps->constrained_intra_pred_flag = h264_u(s, 1);
- pps->redundant_pic_cnt_present_flag = h264_u(s, 1);
- h264_rbsp_trailing_bits(s);
-}
-
-void h264_sps_info(struct h264_SPS_t *sps)
-{
- int i;
-
- pr_info("sequence_parameter_set {\n");
- pr_info(" profile_idc: %d\n", sps->profile_idc);
- pr_info(" constraint_set0_flag: %d\n", sps->constrained_set0_flag);
- pr_info(" constraint_set1_flag: %d\n", sps->constrained_set1_flag);
- pr_info(" constraint_set2_flag: %d\n", sps->constrained_set2_flag);
- pr_info(" level_idc: %d\n", sps->level_idc);
- pr_info(" seq_parameter_set_id: %d\n", sps->seq_parameter_set_id);
-
- pr_info(" log2_max_frame_num_minus4: %d\n",
- sps->log2_max_frame_num_minus4);
- pr_info(" pic_order_cnt_type: %d\n", sps->pic_order_cnt_type);
- if (sps->pic_order_cnt_type == 0) {
- pr_info(" log2_max_pic_order_cnt_lsb_minus4: %d\n",
- sps->log2_max_pic_order_cnt_lsb_minus4);
- } else if (sps->pic_order_cnt_type == 1) {
- pr_info(" delta_pic_order_always_zero_flag: %d\n",
- sps->delta_pic_order_always_zero_flag);
- pr_info(" offset_for_non_ref_pic: %d\n",
- sps->offset_for_non_ref_pic);
- pr_info(" offset_for_top_to_bottom_field: %d\n",
- sps->offset_for_top_to_bottom_field);
- pr_info(" num_ref_frames_in_pic_order_cnt_cycle: %d\n",
- sps->num_ref_frames_in_poc_cycle);
- for (i = 0; i < sps->num_ref_frames_in_poc_cycle; ++i) {
- pr_info(" offset_for_ref_frame[%d]: %d\n", i,
- sps->offset_for_ref_frame[i]);
- }
- }
- pr_info(" num_ref_frames: %d\n", sps->num_ref_frames);
- pr_info(" gaps_in_frame_num_value_allowed_flag: %d\n",
- sps->gaps_in_frame_num_value_allowed_flag);
- pr_info(" pic_width_in_mbs_minus1: %d\n",
- sps->pic_width_in_mbs_minus1);
- pr_info(" pic_height_in_map_units_minus1: %d\n",
- sps->pic_height_in_map_units_minus1);
- pr_info(" frame_mbs_only_flag: %d\n",
- sps->frame_mbs_only_flag);
- pr_info(" mb_adaptive_frame_field_flag: %d\n",
- sps->mb_adaptive_frame_field_flag);
- pr_info(" direct_8x8_inference_flag: %d\n",
- sps->direct_8x8_inference_flag);
- pr_info(" frame_cropping_flag: %d\n",
- sps->frame_cropping_flag);
- if (sps->frame_cropping_flag) {
- pr_info(" frame_crop_left_offset: %d\n",
- sps->frame_crop_left_offset);
- pr_info(" frame_crop_right_offset: %d\n",
- sps->frame_crop_right_offset);
- pr_info(" frame_crop_top_offset: %d\n",
- sps->frame_crop_top_offset);
- pr_info(" frame_crop_bottom_offset: %d\n",
- sps->frame_crop_bottom_offset);
- }
- pr_info(" vui_parameters_present_flag: %d\n",
- sps->vui_parameters_present_flag);
- //if (sps->vui_parameters_present_flag) {
- // h264_print_vui_parameters(sps->vui_parameters);
- //}
-
- pr_info(" }\n");
-}
-
-void h264_pps_info(struct h264_PPS_t *pps)
-{
- pr_info("pic_parameter_set {\n");
- pr_info(" pic_parameter_set_id: %d\n",
- pps->pic_parameter_set_id);
- pr_info(" seq_parameter_set_id: %d\n",
- pps->seq_parameter_set_id);
- pr_info(" entropy_coding_mode_flag: %d\n",
- pps->entropy_coding_mode_flag);
- pr_info(" pic_order_present_flag: %d\n",
- pps->pic_order_present_flag);
- pr_info(" num_slice_groups_minus1: %d\n",
- pps->num_slice_groups_minus1);
- // FIXME: Code for slice groups is missing here.
- pr_info(" num_ref_idx_l0_active_minus1: %d\n",
- pps->num_ref_idx_l0_active_minus1);
- pr_info(" num_ref_idx_l1_active_minus1: %d\n",
- pps->num_ref_idx_l1_active_minus1);
- pr_info(" weighted_pred_flag: %d\n", pps->weighted_pred_flag);
- pr_info(" weighted_bipred_idc: %d\n", pps->weighted_bipred_idc);
- pr_info(" pic_init_qp_minus26: %d\n", pps->pic_init_qp_minus26);
- pr_info(" pic_init_qs_minus26: %d\n", pps->pic_init_qs_minus26);
- pr_info(" chroma_qp_index_offset: %d\n",
- pps->chroma_qp_index_offset);
- pr_info(" deblocking_filter_control_present_flag: %d\n",
- pps->deblocking_filter_control_present_flag);
- pr_info(" constrained_intra_pred_flag: %d\n",
- pps->constrained_intra_pred_flag);
- pr_info(" redundant_pic_cnt_present_flag: %d\n",
- pps->redundant_pic_cnt_present_flag);
- pr_info(" }\n");
-}
-
+++ /dev/null
-/*
- * drivers/amlogic/media_modules/amvdec_ports/decoder/h264_parse.h
- *
- * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef _H264_PARSE_H
-#define _H264_PARSE_H
-
-#include "h264_stream.h"
-
-enum color_model {
- CM_UNKNOWN = -1,
- CM_YUV = 0,
- CM_RGB = 1,
- CM_XYZ = 2
-};
-
-enum color_format {
- CF_UNKNOWN = -1, //!< Unknown color format
- YUV400 = 0, //!< Monochrome
- YUV420 = 1, //!< 4:2:0
- YUV422 = 2, //!< 4:2:2
- YUV444 = 3 //!< 4:4:4
-};
-
-enum pixel_format {
- PF_UNKNOWN = -1, //!< Unknown color ordering
- UYVY = 0, //!< UYVY
- YUY2 = 1, //!< YUY2
- YUYV = 1, //!< YUYV
- YVYU = 2, //!< YVYU
- BGR = 3, //!< BGR
- V210 = 4 //!< Video Clarity 422 format (10 bits)
-};
-
-//AVC Profile IDC definitions
-enum profile_idc{
- NO_PROFILE = 0, //!< disable profile checking for experimental coding (enables FRExt, but disables MV)
- FREXT_CAVLC444 = 44, //!< YUV 4:4:4/14 "CAVLC 4:4:4"
- BASELINE = 66, //!< YUV 4:2:0/8 "Baseline"
- MAIN = 77, //!< YUV 4:2:0/8 "Main"
- EXTENDED = 88, //!< YUV 4:2:0/8 "Extended"
- FREXT_HP = 100, //!< YUV 4:2:0/8 "High"
- FREXT_Hi10P = 110, //!< YUV 4:2:0/10 "High 10"
- FREXT_Hi422 = 122, //!< YUV 4:2:2/10 "High 4:2:2"
- FREXT_Hi444 = 244, //!< YUV 4:4:4/14 "High 4:4:4"
- MVC_HIGH = 118, //!< YUV 4:2:0/8 "Multiview High"
- STEREO_HIGH = 128 //!< YUV 4:2:0/8 "Stereo High"
-};
-
-/* sequence parameter set */
-struct h264_SPS_t {
- bool vailid;
- unsigned int profile_idc;
- bool constrained_set0_flag;
- bool constrained_set1_flag;
- bool constrained_set2_flag;
- bool constrained_set3_flag;
- unsigned int level_idc;
- unsigned int seq_parameter_set_id;
- unsigned int chroma_format_idc;
- bool seq_scaling_matrix_present_flag;
- int seq_scaling_list_present_flag[12];
- int ScalingList4x4[6][16];
- int ScalingList8x8[6][64];
- bool UseDefaultScalingMatrix4x4Flag[6];
- bool UseDefaultScalingMatrix8x8Flag[6];
- unsigned int bit_depth_luma_minus8;
- unsigned int bit_depth_chroma_minus8;
- unsigned int log2_max_frame_num_minus4;
- unsigned int pic_order_cnt_type;
- unsigned int log2_max_pic_order_cnt_lsb_minus4;
- bool delta_pic_order_always_zero_flag;
- int offset_for_non_ref_pic;
- int offset_for_top_to_bottom_field;
- unsigned int num_ref_frames_in_poc_cycle;
- int offset_for_ref_frame[255];
- int num_ref_frames;
- bool gaps_in_frame_num_value_allowed_flag;
- unsigned int pic_width_in_mbs_minus1;
- unsigned int pic_height_in_map_units_minus1;
- bool frame_mbs_only_flag;
- bool mb_adaptive_frame_field_flag;
- bool direct_8x8_inference_flag;
- bool frame_cropping_flag;
- unsigned int frame_crop_left_offset;
- unsigned int frame_crop_right_offset;
- unsigned int frame_crop_top_offset;
- unsigned int frame_crop_bottom_offset;
- bool vui_parameters_present_flag;
- //h264_vui_parameters_t *vui_parameters;
- unsigned separate_colour_plane_flag;
- int lossless_qpprime_flag;
-};
-
-/* pic parameter set */
-struct h264_PPS_t {
- int pic_parameter_set_id;
- int seq_parameter_set_id;
- int entropy_coding_mode_flag;
- int pic_order_present_flag;
- int num_slice_groups_minus1;
- int slice_group_map_type;
- int run_length_minus1;
- int top_left;
- int bottom_right;
- int slice_group_change_direction_flag;
- int slice_group_change_rate_minus1;
- int pic_size_in_map_units_minus1;
- int slice_group_id;
- int num_ref_idx_l0_active_minus1;
- int num_ref_idx_l1_active_minus1;
- int weighted_pred_flag;
- int weighted_bipred_idc;
- int pic_init_qp_minus26;
- int pic_init_qs_minus26;
- int chroma_qp_index_offset;
- int deblocking_filter_control_present_flag;
- int constrained_intra_pred_flag;
- int redundant_pic_cnt_present_flag;
-};
-
-void h264_sps_parse(struct h264_stream_t *s, struct h264_SPS_t *sps);
-void h264_pps_parse(struct h264_stream_t *s, struct h264_PPS_t *pps);
-
-void h264_sps_info(struct h264_SPS_t *sps);
-void h264_pps_info(struct h264_PPS_t *pps);
-
-#endif //_H264_PARSE_H
+++ /dev/null
-/*
- * drivers/amlogic/media_modules/amvdec_ports/decoder/h264_stream.c
- *
- * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include "h264_stream.h"
-
-void h264_stream_set(struct h264_stream_t *s, unsigned char *data, int size)
-{
- s->data = data;
- s->size = size;
- s->bit_pos = 7;
- s->byte_pos = 0;
-}
-
-unsigned int h264_stream_read_bits(struct h264_stream_t *s, unsigned int n)
-{
- unsigned int ret = 0;
- unsigned char b = 0;
- int i;
-
- if (n == 0)
- return 0;
-
- for (i = 0; i < n; ++i) {
- if (h264_stream_bits_remaining(s) == 0)
- ret <<= n - i - 1;
-
- b = s->data[s->byte_pos];
- if (n - i <= 32)
- ret = ret << 1 | BITAT(b, s->bit_pos);
-
- if (s->bit_pos == 0) {
- s->bit_pos = 7;
- s->byte_pos++;
- } else
- s->bit_pos--;
- }
-
- return ret;
-}
-
-unsigned int h264_stream_peek_bits(struct h264_stream_t *s, unsigned int n)
-{
- int prev_bit_pos = s->bit_pos;
- int prev_byte_pos = s->byte_pos;
- unsigned int ret = h264_stream_read_bits(s, n);
-
- s->bit_pos = prev_bit_pos;
- s->byte_pos = prev_byte_pos;
-
- return ret;
-}
-
-unsigned int h264_stream_read_bytes(struct h264_stream_t *s, unsigned int n)
-{
- unsigned int ret = 0;
- int i;
-
- if (n == 0)
- return 0;
-
- for (i = 0; i < n; ++i) {
- if (h264_stream_bytes_remaining(s) == 0) {
- ret <<= (n - i - 1) * 8;
- break;
- }
-
- if (n - i <= 4)
- ret = ret << 8 | s->data[s->byte_pos];
-
- s->byte_pos++;
- }
-
- return ret;
-}
-
-unsigned int h264_stream_peek_bytes(struct h264_stream_t *s, unsigned int n)
-{
- int prev_byte_pos = s->byte_pos;
- unsigned int ret = h264_stream_read_bytes(s, n);
-
- s->byte_pos = prev_byte_pos;
-
- return ret;
-}
-
-int h264_stream_bits_remaining(struct h264_stream_t *s)
-{
- return (s->size - s->byte_pos) * 8 + s->bit_pos;
-}
-
-int h264_stream_bytes_remaining(struct h264_stream_t *s)
-{
- return s->size - s->byte_pos;
-}
-
+++ /dev/null
-/*
- * drivers/amlogic/media_modules/amvdec_ports/decoder/h264_stream.h
- *
- * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef _H264_STREAM_H
-#define _H264_STREAM_H
-
-#include "utils.h"
-
-struct h264_stream_t {
- unsigned char *data;
- unsigned int size;
- int bit_pos;
- int byte_pos;
-};
-
-void h264_stream_set(struct h264_stream_t *s, unsigned char *data, int size);
-unsigned int h264_stream_read_bits(struct h264_stream_t *s, unsigned int n);
-unsigned int h264_stream_peek_bits(struct h264_stream_t *s, unsigned int n);
-unsigned int h264_stream_read_bytes(struct h264_stream_t *s, unsigned int n);
-unsigned int h264_stream_peek_bytes(struct h264_stream_t *s, unsigned int n);
-int h264_stream_bits_remaining(struct h264_stream_t *s);
-int h264_stream_bytes_remaining(struct h264_stream_t *s);
-
-#endif //_H264_STREAM_H
-
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+
#include "../vdec_drv_if.h"
#include "../aml_vcodec_util.h"
#include "../aml_vcodec_dec.h"
-//#include "../aml_vcodec_intr.h"
#include "../aml_vcodec_adapt.h"
#include "../vdec_drv_base.h"
#include "../aml_vcodec_vfm.h"
-#include "h264_stream.h"
-#include "h264_parse.h"
-#include <uapi/linux/swab.h>
+#include "aml_h264_parser.h"
+#include "../utils/common.h"
/* h264 NALU type */
#define NAL_NON_IDR_SLICE 0x01
#define NAL_H264_SEI 0x06
#define NAL_H264_SPS 0x07
#define NAL_H264_PPS 0x08
+#define NAL_H264_AUD 0x09
-#define NAL_TYPE(value) ((value) & 0x1F)
+#define AVC_NAL_TYPE(value) ((value) & 0x1F)
#define BUF_PREDICTION_SZ (64 * 1024)//(32 * 1024)
#define H264_MAX_FB_NUM 17
#define HDR_PARSING_BUF_SZ 1024
-#define HEADER_BUFFER_SIZE (32 * 1024)
+#define HEADER_BUFFER_SIZE (128 * 1024)
/**
* struct h264_fb - h264 decode frame buffer information
* by VPU.
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
- * @hdr_buf : Header parsing buffer (AP-W, VPU-R)
- * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
- * @mv_buf_dma : HW working motion vector buffer dma address (AP-W, VPU-R)
- * @list_free : free frame buffer ring list (AP-W/R, VPU-W)
- * @list_disp : display frame buffer ring list (AP-R, VPU-W)
* @dec : decode information (AP-R, VPU-W)
* @pic : picture information (AP-R, VPU-W)
* @crop : crop information (AP-R, VPU-W)
int pps_size;
int sei_size;
int head_offset;
- uint64_t pred_buf_dma;
- uint64_t mv_buf_dma[H264_MAX_FB_NUM];
- struct h264_ring_fb_list list_free;
- struct h264_ring_fb_list list_disp;
struct vdec_h264_dec_info dec;
struct vdec_pic_info pic;
struct vdec_pic_info cur_pic;
struct aml_vcodec_ctx *ctx;
struct aml_vcodec_mem pred_buf;
struct aml_vcodec_mem mv_buf[H264_MAX_FB_NUM];
- //struct vdec_vpu_inst vpu;
struct aml_vdec_adapt vdec;
struct vdec_h264_vsi *vsi;
struct vcodec_vfm_s vfm;
+ struct completion comp;
};
#if 0
aml_vcodec_debug(inst, "sz=%d", *dpb_sz);
}
-static int find_start_code(unsigned char *data, unsigned int data_sz)
-{
- if (data_sz > 3 && data[0] == 0 && data[1] == 0 && data[2] == 1)
- return 3;
-
- if (data_sz > 4 && data[0] == 0 && data[1] == 0 && data[2] == 0 &&
- data[3] == 1)
- return 4;
-
- return -1;
-}
-
static void skip_aud_data(u8 **data, u32 *size)
{
int i;
inst->ctx = ctx;
- inst->vdec.format = VFORMAT_H264;
+ inst->vdec.video_type = VFORMAT_H264;
inst->vdec.dev = ctx->dev->vpu_plat_dev;
inst->vdec.filp = ctx->dev->filp;
inst->vdec.ctx = ctx;
inst->vsi->pic.c_bs_sz = 0;
inst->vsi->pic.c_len_sz = (1920 * 1088 / 2);
+ init_completion(&inst->comp);
+
aml_vcodec_debug(inst, "H264 Instance >> %p", inst);
ctx->ada_ctx = &inst->vdec;
return ret;
}
-static int refer_buffer_num(int level_idc, int poc_cnt,
+#if 0
+static int refer_buffer_num(int level_idc, int max_poc_cnt,
int mb_width, int mb_height)
{
- int max_ref_num = 27;
- int size, size_margin = 6;
+ int size;
int pic_size = mb_width * mb_height * 384;
switch (level_idc) {
size /= pic_size;
size = size + 1; /* need more buffers */
- if (poc_cnt > size)
- size = poc_cnt;
-
- size = size + size_margin;
- if (size > max_ref_num)
- size = max_ref_num;
+ if (size > max_poc_cnt)
+ size = max_poc_cnt;
return size;
}
+#endif
static void fill_vdec_params(struct vdec_h264_inst *inst, struct h264_SPS_t *sps)
{
struct vdec_pic_info *pic = &inst->vsi->pic;
struct vdec_h264_dec_info *dec = &inst->vsi->dec;
struct v4l2_rect *rect = &inst->vsi->crop;
- unsigned int mb_w, mb_h, width, height;
- unsigned int crop_unit_x = 0, crop_unit_y = 0;
- unsigned int poc_cnt = 0;
-
- mb_w = sps->pic_width_in_mbs_minus1 + 1;
- mb_h = sps->pic_height_in_map_units_minus1 + 1;
-
- width = mb_w << 4; // 16
- height = (2 - sps->frame_mbs_only_flag) * (mb_h << 4);
-
- if (sps->frame_cropping_flag) {
- if (0 == sps->chroma_format_idc) {// monochrome
- crop_unit_x = 1;
- crop_unit_y = 2 - sps->frame_mbs_only_flag;
- } else if (1 == sps->chroma_format_idc) {// 4:2:0
- crop_unit_x = 2;
- crop_unit_y = 2 * (2 - sps->frame_mbs_only_flag);
- } else if (2 == sps->chroma_format_idc) {// 4:2:2
- crop_unit_x = 2;
- crop_unit_y = 2 - sps->frame_mbs_only_flag;
- } else {// 3 == sps.chroma_format_idc // 4:4:4
- crop_unit_x = 1;
- crop_unit_y = 2 - sps->frame_mbs_only_flag;
- }
- }
+ u32 mb_w, mb_h, width, height;
- width -= crop_unit_x * (sps->frame_crop_left_offset +
- sps->frame_crop_right_offset);
- height -= crop_unit_y * (sps->frame_crop_top_offset +
- sps->frame_crop_bottom_offset);
+ mb_w = sps->mb_width;
+ mb_h = sps->mb_height;
+
+ width = mb_w << 4;
+ height = mb_h << 4;
+
+ width -= (sps->crop_left + sps->crop_right);
+ height -= (sps->crop_top + sps->crop_bottom);
/* fill visible area size that be used for EGL. */
pic->visible_width = width;
pic->c_len_sz = pic->y_len_sz >> 1;
/* calc DPB size */
- poc_cnt = sps->pic_order_cnt_type;
- if (!poc_cnt)
- poc_cnt = (sps->log2_max_pic_order_cnt_lsb_minus4 + 4) << 1;
-
- dec->dpb_sz = refer_buffer_num(sps->level_idc, poc_cnt, mb_w, mb_h);
+ dec->dpb_sz = sps->ref_frame_count;
aml_vcodec_debug(inst, "[%d] The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n",
inst->ctx->id, pic->coded_width, pic->coded_height,
pic->visible_width, pic->visible_height, dec->dpb_sz);
}
-static void search_from_st(u8 *buf, u32 size, int *pos, bool *is_combine)
+static bool check_frame_combine(u8 *buf, u32 size, int *pos)
{
+ bool combine = false;
int i = 0, j = 0, cnt = 0;
u8 *p = buf;
j = find_start_code(p, 7);
if (j > 0) {
if (++cnt > 1) {
- *is_combine = true;
+ combine = true;
break;
}
}
//pr_info("nal pos: %d, is_combine: %d\n",*pos, *is_combine);
+ return combine;
+}
+
+static int stream_parse_by_ucode(struct vdec_h264_inst *inst, u8 *buf, u32 size)
+{
+ int ret = 0;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+
+ ret = vdec_vframe_write(vdec, buf, size, 0);
+ if (ret < 0) {
+ pr_err("write frame data failed. err: %d\n", ret);
+ return ret;
+ }
+
+ /* wait ucode parse ending. */
+ wait_for_completion_timeout(&inst->comp,
+ msecs_to_jiffies(1000));
+
+ return inst->vsi->dec.dpb_sz ? 0 : -1;
}
static int stream_parse(struct vdec_h264_inst *inst, u8 *buf, u32 size)
{
- struct h264_stream_t s;
- struct h264_SPS_t *sps;
- unsigned int nal_type;
+ int ret = 0;
+ struct h264_param_sets *ps;
+ u32 nal_type;
int nal_idx = 0;
- int real_data_pos, real_data_size;
bool is_combine = false;
- search_from_st(buf, size, &nal_idx, &is_combine);
+ is_combine = check_frame_combine(buf, size, &nal_idx);
if (nal_idx < 0)
return -1;
- nal_type = NAL_TYPE(buf[nal_idx]);
+ nal_type = AVC_NAL_TYPE(buf[nal_idx]);
if (nal_type != NAL_H264_SPS)
return -1;
inst->vsi->is_combine = is_combine;
inst->vsi->nalu_pos = nal_idx;
- /* start code plus nal type. */
- real_data_pos = nal_idx + 1;
- real_data_size = size - real_data_pos;
-
- sps = kzalloc(sizeof(struct h264_SPS_t), GFP_KERNEL);
- if (sps == NULL)
+ ps = kzalloc(sizeof(struct h264_param_sets), GFP_KERNEL);
+ if (ps == NULL)
return -ENOMEM;
- h264_stream_set(&s, &buf[real_data_pos], real_data_size);
- h264_sps_parse(&s, sps);
- //h264_sps_info(sps);
+ ret = h264_decode_extradata_ps(buf, size, ps);
+ if (ret) {
+ pr_err("parse extra data failed. err: %d\n", ret);
+ goto out;
+ }
- fill_vdec_params(inst, sps);
+ if (ps->sps_parsed)
+ fill_vdec_params(inst, &ps->sps);
- kfree(sps);
+ ret = ps->sps_parsed ? 0 : -1;
+out:
+ kfree(ps);
- return 0;
+ return ret;
}
static int vdec_h264_probe(unsigned long h_vdec,
struct vdec_h264_inst *inst =
(struct vdec_h264_inst *)h_vdec;
struct stream_info *st;
- u8 *buf = (u8 *)bs->va;
+ u8 *buf = (u8 *)bs->vaddr;
u32 size = bs->size;
int ret = 0;
}
skip_aud_data(&buf, &size);
- ret = stream_parse(inst, buf, size);
+
+ if (inst->ctx->param_sets_from_ucode)
+ ret = stream_parse_by_ucode(inst, buf, size);
+ else
+ ret = stream_parse(inst, buf, size);
inst->vsi->cur_pic = inst->vsi->pic;
static void vdec_h264_deinit(unsigned long h_vdec)
{
+ ulong flags;
struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
-
- if (!inst)
- return;
+ struct aml_vcodec_ctx *ctx = inst->ctx;
aml_vcodec_debug_enter(inst);
//dump_deinit();
+ spin_lock_irqsave(&ctx->slock, flags);
if (inst->vsi && inst->vsi->header_buf)
kfree(inst->vsi->header_buf);
kfree(inst->vsi);
kfree(inst);
+
+ ctx->drv_handle = 0;
+ spin_unlock_irqrestore(&ctx->slock, flags);
}
-static int vdec_h264_get_fb(struct vdec_h264_inst *inst, struct vdec_fb **out)
+static int vdec_h264_get_fb(struct vdec_h264_inst *inst, struct vdec_v4l2_buffer **out)
{
return get_fb_from_queue(inst->ctx, out);
}
-static void vdec_h264_get_vf(struct vdec_h264_inst *inst, struct vdec_fb **out)
+static void vdec_h264_get_vf(struct vdec_h264_inst *inst, struct vdec_v4l2_buffer **out)
{
struct vframe_s *vf = NULL;
- struct vdec_fb *fb = NULL;
-
- aml_vcodec_debug(inst, "%s() [%d], vfm: %p",
- __func__, __LINE__, &inst->vfm);
+ struct vdec_v4l2_buffer *fb = NULL;
vf = peek_video_frame(&inst->vfm);
if (!vf) {
atomic_set(&vf->use_cnt, 1);
- aml_vcodec_debug(inst, "%s() [%d], vf: %p, v4l_mem_handle: %lx, idx: %d\n",
- __func__, __LINE__, vf, vf->v4l_mem_handle, vf->index);
-
- fb = (struct vdec_fb *)vf->v4l_mem_handle;
+ fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
fb->vf_handle = (unsigned long)vf;
fb->status = FB_ST_DISPLAY;
*out = fb;
//pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
- //dump_write(fb->base_y.va, fb->base_y.bytes_used);
- //dump_write(fb->base_c.va, fb->base_c.bytes_used);
+ //dump_write(fb->base_y.vaddr, fb->base_y.bytes_used);
+ //dump_write(fb->base_c.vaddr, fb->base_c.bytes_used);
/* convert yuv format. */
- //swap_uv(fb->base_c.va, fb->base_c.size);
-
- aml_vcodec_debug(inst, "%s() [%d], va: %p, phy: %x, size: %zu",
- __func__, __LINE__, fb->base_y.va,
- (unsigned int)virt_to_phys(fb->base_y.va), fb->base_y.size);
- aml_vcodec_debug(inst, "%s() [%d], va: %p, phy: %x, size: %zu",
- __func__, __LINE__, fb->base_c.va,
- (unsigned int)virt_to_phys(fb->base_c.va), fb->base_c.size);
+ //swap_uv(fb->base_c.vaddr, fb->base_c.size);
}
static int vdec_write_nalu(struct vdec_h264_inst *inst,
if (nalu_pos < 0)
goto err;
- nal_type = NAL_TYPE(buf[nalu_pos]);
- aml_vcodec_debug(inst, "NALU type: %d, size: %u", nal_type, size);
+ nal_type = AVC_NAL_TYPE(buf[nalu_pos]);
+ //aml_vcodec_debug(inst, "NALU type: %d, size: %u", nal_type, size);
if (nal_type == NAL_H264_SPS && !is_combine) {
if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) {
ret = size;
} else if (inst->vsi->head_offset == 0) {
ret = vdec_vframe_write(vdec, buf, size, ts);
-
- aml_vcodec_debug(inst, "buf: %p, buf size: %u, write to: %d",
- buf, size, ret);
} else {
char *write_buf = vmalloc(inst->vsi->head_offset + size);
if (!write_buf) {
ret = vdec_vframe_write(vdec, write_buf,
inst->vsi->head_offset + size, ts);
- aml_vcodec_debug(inst, "buf: %p, buf size: %u, write to: %d",
- write_buf, inst->vsi->head_offset + size, ret);
-
memset(inst->vsi->header_buf, 0, HEADER_BUFFER_SIZE);
inst->vsi->head_offset = 0;
inst->vsi->sps_size = 0;
return ret;
}
+static bool monitor_res_change(struct vdec_h264_inst *inst, u8 *buf, u32 size)
+{
+ int ret = 0, i = 0, j = 0;
+ u8 *p = buf;
+ int len = size;
+ u32 type;
+
+ for (i = 4; i < size; i++) {
+ j = find_start_code(p, len);
+ if (j > 0) {
+ len = size - (p - buf);
+ type = AVC_NAL_TYPE(p[j]);
+ if (type != NAL_H264_AUD &&
+ (type > NAL_H264_PPS || type < NAL_H264_SEI))
+ break;
+
+ if (type == NAL_H264_SPS) {
+ ret = stream_parse(inst, p, len);
+ if (!ret && (inst->vsi->cur_pic.coded_width !=
+ inst->vsi->pic.coded_width ||
+ inst->vsi->cur_pic.coded_height !=
+ inst->vsi->pic.coded_height)) {
+ inst->vsi->cur_pic = inst->vsi->pic;
+ return true;
+ }
+ }
+ p += j;
+ }
+ p++;
+ }
+
+ return false;
+}
+
static int vdec_h264_decode(unsigned long h_vdec, struct aml_vcodec_mem *bs,
- unsigned long int timestamp, bool *res_chg)
+ u64 timestamp, bool *res_chg)
{
struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
struct aml_vdec_adapt *vdec = &inst->vdec;
struct stream_info *st;
- int nalu_pos;
- u32 nal_type;
u8 *buf;
u32 size;
int ret = -1;
if (bs == NULL)
return -1;
- buf = (u8 *)bs->va;
+ buf = (u8 *)bs->vaddr;
size = bs->size;
st = (struct stream_info *)buf;
else if (inst->ctx->is_stream_mode)
ret = vdec_vbuf_write(vdec, buf, size);
else {
- /*checked whether the resolution chagnes.*/
- u8 *p = buf;
- u32 l = size;
-
- skip_aud_data(&p, &l);
-
- nalu_pos = find_start_code(p, l);
- if (nalu_pos < 0)
- return -1;
-
- nal_type = NAL_TYPE(p[nalu_pos]);
- if (nal_type == NAL_H264_SPS) {
- stream_parse(inst, p, l);
- if (inst->vsi->cur_pic.coded_width !=
- inst->vsi->pic.coded_width ||
- inst->vsi->cur_pic.coded_height !=
- inst->vsi->pic.coded_height) {
- inst->vsi->cur_pic = inst->vsi->pic;
- *res_chg = true;
- return 0;
- }
- }
+ /*checked whether the resolution changes.*/
+ if ((*res_chg = monitor_res_change(inst, buf, size)))
+ return 0;
+
ret = vdec_write_nalu(inst, buf, size, timestamp);
}
return ret;
}
+static void set_param_write_sync(struct vdec_h264_inst *inst)
+{
+ complete(&inst->comp);
+}
+
+static void set_param_pic_info(struct vdec_h264_inst *inst,
+ struct aml_vdec_pic_infos *info)
+{
+ struct vdec_pic_info *pic = &inst->vsi->pic;
+ struct vdec_h264_dec_info *dec = &inst->vsi->dec;
+ struct v4l2_rect *rect = &inst->vsi->crop;
+
+ /* fill visible area size that be used for EGL. */
+ pic->visible_width = info->visible_width;
+ pic->visible_height = info->visible_height;
+
+ /* calc visible ares. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = pic->visible_width;
+ rect->height = pic->visible_height;
+
+ /* config canvas size that be used for decoder. */
+ pic->coded_width = info->coded_width;
+ pic->coded_height = info->coded_height;
+ pic->y_len_sz = pic->coded_width * pic->coded_height;
+ pic->c_len_sz = pic->y_len_sz >> 1;
+
+ dec->dpb_sz = info->dpb_size;
+
+ /*wake up*/
+ complete(&inst->comp);
+
+ pr_info("Parse from ucode, crop(%d x %d), coded(%d x %d) dpb: %d\n",
+ info->visible_width, info->visible_height,
+ info->coded_width, info->coded_height,
+ info->dpb_size);
+}
+
+static int vdec_h264_set_param(unsigned long h_vdec,
+ enum vdec_set_param_type type, void *in)
+{
+ int ret = 0;
+ struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+ if (!inst) {
+ pr_err("the h264 inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case SET_PARAM_WRITE_FRAME_SYNC:
+ set_param_write_sync(inst);
+ break;
+
+ case SET_PARAM_PIC_INFO:
+ set_param_pic_info(inst, in);
+ break;
+
+ default:
+ aml_vcodec_err(inst, "invalid set parameter type=%d", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static struct vdec_common_if vdec_h264_if = {
- vdec_h264_init,
- vdec_h264_probe,
- vdec_h264_decode,
- vdec_h264_get_param,
- vdec_h264_deinit,
+ .init = vdec_h264_init,
+ .probe = vdec_h264_probe,
+ .decode = vdec_h264_decode,
+ .get_param = vdec_h264_get_param,
+ .set_param = vdec_h264_set_param,
+ .deinit = vdec_h264_deinit,
};
struct vdec_common_if *get_h264_dec_comm_if(void);
#include "../aml_vcodec_vfm.h"
#include "aml_hevc_parser.h"
-#define NAL_TYPE(value) ((value) & 0x1F)
+#define HEVC_NAL_TYPE(value) ((value >> 1) & 0x3F)
#define HEADER_BUFFER_SIZE (32 * 1024)
/**
int head_offset;
struct vdec_hevc_dec_info dec;
struct vdec_pic_info pic;
+ struct vdec_pic_info cur_pic;
struct v4l2_rect crop;
bool is_combine;
int nalu_pos;
- struct HEVCParamSets ps;
+ struct h265_param_sets ps;
};
/**
struct aml_vdec_adapt vdec;
struct vdec_hevc_vsi *vsi;
struct vcodec_vfm_s vfm;
+ struct completion comp;
};
-#if 0
-#define DUMP_FILE_NAME "/data/dump/dump.tmp"
-static struct file *filp;
-static loff_t file_pos;
-
-void dump_write(const char __user *buf, size_t count)
-{
- mm_segment_t old_fs;
-
- if (!filp)
- return;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- if (count != vfs_write(filp, buf, count, &file_pos))
- pr_err("Failed to write file\n");
-
- set_fs(old_fs);
-}
-
-void dump_init(void)
-{
- filp = filp_open(DUMP_FILE_NAME, O_CREAT | O_RDWR, 0644);
- if (IS_ERR(filp)) {
- pr_err("open dump file failed\n");
- filp = NULL;
- }
-}
-
-void dump_deinit(void)
-{
- if (filp) {
- filp_close(filp, current->files);
- filp = NULL;
- file_pos = 0;
- }
-}
-
-void swap_uv(void *uv, int size)
-{
- int i;
- __u16 *p = uv;
-
- size /= 2;
-
- for (i = 0; i < size; i++, p++)
- *p = __swab16(*p);
-}
-#endif
-
static void get_pic_info(struct vdec_hevc_inst *inst,
struct vdec_pic_info *pic)
{
static void get_dpb_size(struct vdec_hevc_inst *inst, unsigned int *dpb_sz)
{
- *dpb_sz = 20;//inst->vsi->dec.dpb_sz;
+ *dpb_sz = inst->vsi->dec.dpb_sz;
aml_vcodec_debug(inst, "sz=%d", *dpb_sz);
}
-static int find_start_code(unsigned char *data, unsigned int data_sz)
-{
- if (data_sz > 3 && data[0] == 0 && data[1] == 0 && data[2] == 1)
- return 3;
-
- if (data_sz > 4 && data[0] == 0 && data[1] == 0 && data[2] == 0 &&
- data[3] == 1)
- return 4;
-
- return -1;
-}
-
static int vdec_hevc_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
{
struct vdec_hevc_inst *inst = NULL;
inst->ctx = ctx;
- inst->vdec.format = VFORMAT_HEVC;
+ inst->vdec.video_type = VFORMAT_HEVC;
inst->vdec.dev = ctx->dev->vpu_plat_dev;
inst->vdec.filp = ctx->dev->filp;
inst->vdec.ctx = ctx;
inst->vsi->pic.c_bs_sz = 0;
inst->vsi->pic.c_len_sz = (1920 * 1088 / 2);
+ init_completion(&inst->comp);
+
aml_vcodec_debug(inst, "hevc Instance >> %p", inst);
ctx->ada_ctx = &inst->vdec;
return ret;
}
-#if 0
-static int refer_buffer_num(int level_idc, int poc_cnt,
- int mb_width, int mb_height)
+
+static int refer_buffer_num(struct h265_SPS_t *sps)
{
- return 20;
+ int used_buf_num = 0;
+ int sps_pic_buf_diff = 0;
+
+ if ((!sps->temporal_layer[0].num_reorder_pics) &&
+ (sps->temporal_layer[0].max_dec_pic_buffering)) {
+ /* the range of sps_num_reorder_pics_0 is in
+ [0, sps_max_dec_pic_buffering_minus1_0] */
+ used_buf_num = sps->temporal_layer[0].max_dec_pic_buffering;
+ } else
+ used_buf_num = sps->temporal_layer[0].num_reorder_pics;
+
+ sps_pic_buf_diff = sps->temporal_layer[0].max_dec_pic_buffering -
+ sps->temporal_layer[0].num_reorder_pics + 1;
+
+ if (sps_pic_buf_diff >= 4)
+ used_buf_num += 1;
+
+ /*need one more for multi instance, as
+ apply_ref_pic_set() has no chanch to run to
+ to clear referenced flag in some case */
+ used_buf_num++;
+
+ /* for eos add more buffer to flush.*/
+ used_buf_num++;
+
+ return used_buf_num;
}
-#endif
-//static void fill_vdec_params(struct vdec_hevc_inst *inst, struct hevc_SPS_t *sps)
-static void fill_vdec_params(struct vdec_hevc_inst *inst)
+static void fill_vdec_params(struct vdec_hevc_inst *inst, struct h265_SPS_t *sps)
{
struct vdec_pic_info *pic = &inst->vsi->pic;
struct vdec_hevc_dec_info *dec = &inst->vsi->dec;
struct v4l2_rect *rect = &inst->vsi->crop;
- unsigned int mb_w = 0, mb_h = 0, width, height;
- //unsigned int crop_unit_x = 0, crop_unit_y = 0;
- //unsigned int poc_cnt = 0;
-
- /* calc width & height. */
- width = 1920;
- height = 1080;
/* fill visible area size that be used for EGL. */
- pic->visible_width = width;
- pic->visible_height = height;
+ pic->visible_width = sps->width - (sps->output_window.left_offset +
+ sps->output_window.right_offset);
+ pic->visible_height = sps->height - (sps->output_window.top_offset +
+ sps->output_window.bottom_offset);
/* calc visible ares. */
rect->left = 0;
rect->height = pic->visible_height;
/* config canvas size that be used for decoder. */
- pic->coded_width = ALIGN(mb_w, 4) << 4;
- pic->coded_height = ALIGN(mb_h, 4) << 4;
-
- pic->coded_width = 1920;
- pic->coded_height = 1088;//temp
+ pic->coded_width = ALIGN(sps->width, 32);
+ pic->coded_height = ALIGN(sps->height, 32);
pic->y_len_sz = pic->coded_width * pic->coded_height;
pic->c_len_sz = pic->y_len_sz >> 1;
/* calc DPB size */
- dec->dpb_sz = 20;//refer_buffer_num(sps->level_idc, poc_cnt, mb_w, mb_h);
+ dec->dpb_sz = refer_buffer_num(sps);
pr_info("[%d] The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n",
inst->ctx->id, pic->coded_width, pic->coded_height,
pic->visible_width, pic->visible_height, dec->dpb_sz);
}
-static int hevc_parse_nal_header(u32 val)
+static int stream_parse_by_ucode(struct vdec_hevc_inst *inst, u8 *buf, u32 size)
{
- if (val & 0x80) {
- pr_err("the nal data is invalid.\n");
- return -1;
- }
+ int ret = 0;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
- return (val & 0x7f) >> 1;
-}
+ ret = vdec_vframe_write(vdec, buf, size, 0);
+ if (ret < 0) {
+ pr_err("write frame data failed. err: %d\n", ret);
+ return ret;
+ }
-static void hevc_parse(struct HEVCParamSets *ps, u8 *buf, u32 size)
-{
- //int ret = -1;
- int i = 0, j = 0, cnt = 0;
- int pos, nal_size, nal_type;
- u8 *p = buf;
+ /* wait ucode parse ending. */
+ wait_for_completion_timeout(&inst->comp,
+ msecs_to_jiffies(1000));
- for (i = 4; i < size; i++) {
- j = find_start_code(p, i);
- if (j > 0) {
- pos = p - buf + j;
- nal_size = size - pos;
- nal_type = hevc_parse_nal_header(buf[pos]);
-
- switch (nal_type) {
- case HEVC_NAL_VPS:
- //ret = hevc_parse_nal_vps(&ps->vps, p, nal_size);
- //if (!ret)
- //ps->vps_parsed = true;
- pr_err("------%s,%d nal type: %u\n",__func__, __LINE__,nal_type);
- break;
- case HEVC_NAL_SPS:
- //ret = hevc_parse_nal_sps(&ps->sps, p, nal_size);
- //if (!ret)
- ps->sps_parsed = true;
- pr_err("------%s,%d nal type: %u\n",__func__, __LINE__,nal_type);
- break;
- case HEVC_NAL_PPS:
- //ret = hevc_parse_nal_pps(&ps->pps, p, nal_size);
- //if (!ret)
- //ps->sps_parsed = true;
- pr_err("------%s,%d nal type: %u\n",__func__, __LINE__,nal_type);
- break;
- case HEVC_NAL_SEI_PREFIX:
- case HEVC_NAL_SEI_SUFFIX:
- //ret = hevc_parse_nal_sei(&ps->sei, p, nal_size);
- //if (!ret)
- //ps->sei_parsed = true;
- pr_err("------%s,%d nal type: %u\n",__func__, __LINE__,nal_type);
- break;
- default:
- pr_info("ignoring NAL type %d in extradata\n", nal_type);
- break;
- }
-
- cnt++;
- p += j;
- i = pos;
- }
- p++;
- }
+ return inst->vsi->dec.dpb_sz ? 0 : -1;
}
static int stream_parse(struct vdec_hevc_inst *inst, u8 *buf, u32 size)
{
- //struct hevc_stream_t s;
- //struct hevc_SPS_t *sps;
- //unsigned int nal_type;
- int nal_idx = 0;
- int real_data_pos, real_data_size;
- bool is_combine = false;
-
- hevc_parse(&inst->vsi->ps, buf, size);
-
- if (!inst->vsi->ps.sps_parsed)
- return -1;
-
- /* if the st compose from csd + slice that is the combine data. */
- inst->vsi->is_combine = is_combine;
- inst->vsi->nalu_pos = nal_idx;
-
- /* start code plus nal type. */
- real_data_pos = nal_idx + 1;
- real_data_size = size - real_data_pos;
+ int ret = 0;
+ struct h265_param_sets *ps = NULL;
- //sps = kzalloc(sizeof(struct hevc_SPS_t), GFP_KERNEL);
- //if (sps == NULL)
- //return -ENOMEM;
+ ps = kzalloc(sizeof(struct h265_param_sets), GFP_KERNEL);
+ if (ps == NULL)
+ return -ENOMEM;
- /* the extra data would be parsed. */
- //hevc_stream_set(&s, &buf[real_data_pos], real_data_size);
- //hevc_sps_parse(&s, sps);
- //hevc_sps_info(sps);
+ ret = h265_decode_extradata_ps(buf, size, ps);
+ if (ret) {
+ pr_err("parse extra data failed. err: %d\n", ret);
+ goto out;
+ }
- //fill_vdec_params(inst, sps);
- fill_vdec_params(inst);
+ if (ps->sps_parsed)
+ fill_vdec_params(inst, &ps->sps);
- //kfree(sps);
+ ret = ps->sps_parsed ? 0 : -1;
+out:
+ kfree(ps);
- return 0;
+ return ret;
}
static int vdec_hevc_probe(unsigned long h_vdec,
struct vdec_hevc_inst *inst =
(struct vdec_hevc_inst *)h_vdec;
struct stream_info *st;
- u8 *buf = (u8 *)bs->va;
+ u8 *buf = (u8 *)bs->vaddr;
u32 size = bs->size;
int ret = 0;
if (st->magic == NORe || st->magic == NORn)
ret = stream_parse(inst, st->data, st->length);
- else
- ret = stream_parse(inst, buf, size);
+ else {
+ if (inst->ctx->param_sets_from_ucode)
+ ret = stream_parse_by_ucode(inst, buf, size);
+ else
+ ret = stream_parse(inst, buf, size);
+ }
+
+ inst->vsi->cur_pic = inst->vsi->pic;
return ret;
}
static void vdec_hevc_deinit(unsigned long h_vdec)
{
+ ulong flags;
struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec;
-
- if (!inst)
- return;
+ struct aml_vcodec_ctx *ctx = inst->ctx;
aml_vcodec_debug_enter(inst);
//dump_deinit();
+ spin_lock_irqsave(&ctx->slock, flags);
if (inst->vsi && inst->vsi->header_buf)
kfree(inst->vsi->header_buf);
kfree(inst->vsi);
kfree(inst);
+
+ ctx->drv_handle = 0;
+ spin_unlock_irqrestore(&ctx->slock, flags);
}
-static int vdec_hevc_get_fb(struct vdec_hevc_inst *inst, struct vdec_fb **out)
+static int vdec_hevc_get_fb(struct vdec_hevc_inst *inst, struct vdec_v4l2_buffer **out)
{
return get_fb_from_queue(inst->ctx, out);
}
-static void vdec_hevc_get_vf(struct vdec_hevc_inst *inst, struct vdec_fb **out)
+static void vdec_hevc_get_vf(struct vdec_hevc_inst *inst, struct vdec_v4l2_buffer **out)
{
struct vframe_s *vf = NULL;
- struct vdec_fb *fb = NULL;
-
- aml_vcodec_debug(inst, "%s() [%d], vfm: %p",
- __func__, __LINE__, &inst->vfm);
+ struct vdec_v4l2_buffer *fb = NULL;
vf = peek_video_frame(&inst->vfm);
if (!vf) {
atomic_set(&vf->use_cnt, 1);
- aml_vcodec_debug(inst, "%s() [%d], vf: %p, v4l_mem_handle: %lx, idx: %d\n",
- __func__, __LINE__, vf, vf->v4l_mem_handle, vf->index);
-
- fb = (struct vdec_fb *)vf->v4l_mem_handle;
+ fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
fb->vf_handle = (unsigned long)vf;
fb->status = FB_ST_DISPLAY;
*out = fb;
//pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
- //dump_write(fb->base_y.va, fb->base_y.bytes_used);
- //dump_write(fb->base_c.va, fb->base_c.bytes_used);
+ //dump_write(fb->base_y.vaddr, fb->base_y.bytes_used);
+ //dump_write(fb->base_c.vaddr, fb->base_c.bytes_used);
/* convert yuv format. */
- //swap_uv(fb->base_c.va, fb->base_c.size);
-
- aml_vcodec_debug(inst, "%s() [%d], va: %p, phy: %x, size: %zu",
- __func__, __LINE__, fb->base_y.va,
- (unsigned int)virt_to_phys(fb->base_y.va), fb->base_y.size);
- aml_vcodec_debug(inst, "%s() [%d], va: %p, phy: %x, size: %zu",
- __func__, __LINE__, fb->base_c.va,
- (unsigned int)virt_to_phys(fb->base_c.va), fb->base_c.size);
+ //swap_uv(fb->base_c.vaddr, fb->base_c.size);
}
static int vdec_write_nalu(struct vdec_hevc_inst *inst,
u8 *buf, u32 size, u64 ts)
{
- int ret = 0, err = 0;
+ int ret = 0;
struct aml_vdec_adapt *vdec = &inst->vdec;
- bool is_combine = true;//inst->vsi->is_combine;
- int nalu_pos;
- u32 nal_type;
-
- nalu_pos = find_start_code(buf, size);
- if (nalu_pos < 0)
- goto err;
-
- nal_type = hevc_parse_nal_header(buf[nalu_pos]);
- aml_vcodec_debug(inst, "NALU type: %d, size: %u\n", nal_type, size);
-
- if (nal_type == HEVC_NAL_SPS && !is_combine) {
- if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) {
- ret = -EILSEQ;
- goto err;
- }
- inst->vsi->sps_size = size;
- memcpy(inst->vsi->header_buf + inst->vsi->head_offset, buf, size);
- inst->vsi->head_offset += inst->vsi->sps_size;
- } else if (nal_type == HEVC_NAL_PPS && !is_combine) {
- //buf_sz -= nal_start_idx;
- if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) {
- ret = -EILSEQ;
- goto err;
- }
- inst->vsi->pps_size = size;
- memcpy(inst->vsi->header_buf + inst->vsi->head_offset, buf, size);
- inst->vsi->head_offset += inst->vsi->pps_size;
- } else if (nal_type == HEVC_NAL_SEI_PREFIX && !is_combine) {
- if (inst->vsi->head_offset + size > HEADER_BUFFER_SIZE) {
- ret = -EILSEQ;
- goto err;
- }
- inst->vsi->sei_size = size;
- memcpy(inst->vsi->header_buf + inst->vsi->head_offset, buf, size);
- inst->vsi->head_offset += inst->vsi->sei_size;
- } else {
- char *write_buf = vmalloc(inst->vsi->head_offset + size);
- memcpy(write_buf, inst->vsi->header_buf, inst->vsi->head_offset);
- memcpy(write_buf + inst->vsi->head_offset, buf, size);
+ ret = vdec_vframe_write(vdec, buf, size, ts);
- ret = vdec_vframe_write(vdec, write_buf,
- inst->vsi->head_offset + size, ts);
+ return ret;
+}
- aml_vcodec_debug(inst, "buf: %p, buf size: %u, write to: %d",
- write_buf, inst->vsi->head_offset + size, ret);
+static bool monitor_res_change(struct vdec_hevc_inst *inst, u8 *buf, u32 size)
+{
+ int ret = 0, i = 0, j = 0;
+ u8 *p = buf;
+ int len = size;
+ u32 type;
- memset(inst->vsi->header_buf, 0, HEADER_BUFFER_SIZE);
- inst->vsi->head_offset = 0;
- inst->vsi->sps_size = 0;
- inst->vsi->pps_size = 0;
- inst->vsi->sei_size = 0;
+ for (i = 4; i < size; i++) {
+ j = find_start_code(p, len);
+ if (j > 0) {
+ len = size - (p - buf);
+ type = HEVC_NAL_TYPE(p[j]);
+ if (type != HEVC_NAL_AUD &&
+ (type > HEVC_NAL_PPS || type < HEVC_NAL_VPS))
+ break;
- vfree(write_buf);
+ if (type == HEVC_NAL_SPS) {
+ ret = stream_parse(inst, p, len);
+ if (!ret && (inst->vsi->cur_pic.coded_width !=
+ inst->vsi->pic.coded_width ||
+ inst->vsi->cur_pic.coded_height !=
+ inst->vsi->pic.coded_height)) {
+ inst->vsi->cur_pic = inst->vsi->pic;
+ return true;
+ }
+ }
+ p += j;
+ }
+ p++;
}
- return 0;
-err:
- aml_vcodec_err(inst, "%s err(%d)", __func__, err);
-
- return err;
+ return false;
}
static int vdec_hevc_decode(unsigned long h_vdec, struct aml_vcodec_mem *bs,
- unsigned long int timestamp, bool *res_chg)
+ u64 timestamp, bool *res_chg)
{
struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec;
struct aml_vdec_adapt *vdec = &inst->vdec;
struct stream_info *st;
u8 *buf;
u32 size;
- int ret = 0;
+ int ret = -1;
/* bs NULL means flush decoder */
if (bs == NULL)
- return 0;
+ return -1;
- buf = (u8 *)bs->va;
+ buf = (u8 *)bs->vaddr;
size = bs->size;
st = (struct stream_info *)buf;
ret = vdec_write_nalu(inst, st->data, st->length, timestamp);
else if (inst->ctx->is_stream_mode)
ret = vdec_vbuf_write(vdec, buf, size);
- else
+ else {
+ /*checked whether the resolution changes.*/
+ if ((*res_chg = monitor_res_change(inst, buf, size)))
+ return 0;
+
ret = vdec_write_nalu(inst, buf, size, timestamp);
+ }
return ret;
}
return ret;
}
+static void set_param_write_sync(struct vdec_hevc_inst *inst)
+{
+ complete(&inst->comp);
+}
+
+static void set_param_pic_info(struct vdec_hevc_inst *inst,
+ struct aml_vdec_pic_infos *info)
+{
+ struct vdec_pic_info *pic = &inst->vsi->pic;
+ struct vdec_hevc_dec_info *dec = &inst->vsi->dec;
+ struct v4l2_rect *rect = &inst->vsi->crop;
+
+ /* fill visible area size that be used for EGL. */
+ pic->visible_width = info->visible_width;
+ pic->visible_height = info->visible_height;
+
+ /* calc visible ares. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = pic->visible_width;
+ rect->height = pic->visible_height;
+
+ /* config canvas size that be used for decoder. */
+ pic->coded_width = ALIGN(info->coded_width, 64);
+ pic->coded_height = ALIGN(info->coded_height, 64);
+ pic->y_len_sz = pic->coded_width * pic->coded_height;
+ pic->c_len_sz = pic->y_len_sz >> 1;
+
+ dec->dpb_sz = info->dpb_size;
+
+ /*wake up*/
+ complete(&inst->comp);
+
+ pr_info("Parse from ucode, crop(%d x %d), coded(%d x %d) dpb: %d\n",
+ pic->visible_width, pic->visible_height,
+ pic->coded_width, pic->coded_height,
+ dec->dpb_sz);
+}
+
+static int vdec_hevc_set_param(unsigned long h_vdec,
+ enum vdec_set_param_type type, void *in)
+{
+ int ret = 0;
+ struct vdec_hevc_inst *inst = (struct vdec_hevc_inst *)h_vdec;
+
+ if (!inst) {
+ pr_err("the hevc inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case SET_PARAM_WRITE_FRAME_SYNC:
+ set_param_write_sync(inst);
+ break;
+
+ case SET_PARAM_PIC_INFO:
+ set_param_pic_info(inst, in);
+ break;
+
+ default:
+ aml_vcodec_err(inst, "invalid set parameter type=%d", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static struct vdec_common_if vdec_hevc_if = {
- vdec_hevc_init,
- vdec_hevc_probe,
- vdec_hevc_decode,
- vdec_hevc_get_param,
- vdec_hevc_deinit,
+ .init = vdec_hevc_init,
+ .probe = vdec_hevc_probe,
+ .decode = vdec_hevc_decode,
+ .get_param = vdec_hevc_get_param,
+ .set_param = vdec_hevc_set_param,
+ .deinit = vdec_hevc_deinit,
};
struct vdec_common_if *get_hevc_dec_comm_if(void);
--- /dev/null
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "aml_mjpeg_parser.h"
+
+#define NAL_TYPE(value) ((value) & 0x1F)
+#define HEADER_BUFFER_SIZE (32 * 1024)
+
+/**
+ * struct mjpeg_fb - mjpeg decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct mjpeg_fb {
+ uint64_t vdec_fb_va;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ int32_t poc;
+ uint32_t reserved;
+};
+
+/**
+ * struct vdec_mjpeg_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @reserved : for 8 bytes alignment
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_mjpeg_dec_info {
+ uint32_t dpb_sz;
+ uint32_t resolution_changed;
+ uint32_t reserved;
+ uint64_t bs_dma;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_mjpeg_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf : Header parsing buffer (AP-W, VPU-R)
+ * @list_free : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp : display frame buffer ring list (AP-R, VPU-W)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ */
+struct vdec_mjpeg_vsi {
+ char *header_buf;
+ int sps_size;
+ int pps_size;
+ int sei_size;
+ int head_offset;
+ struct vdec_mjpeg_dec_info dec;
+ struct vdec_pic_info pic;
+ struct v4l2_rect crop;
+ bool is_combine;
+ int nalu_pos;
+ //struct mjpeg_param_sets ps;
+};
+
+/**
+ * struct vdec_mjpeg_inst - mjpeg decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to aml_vcodec_ctx
+ * @vsi : VPU shared information
+ */
+struct vdec_mjpeg_inst {
+ unsigned int num_nalu;
+ struct aml_vcodec_ctx *ctx;
+ struct aml_vdec_adapt vdec;
+ struct vdec_mjpeg_vsi *vsi;
+ struct vcodec_vfm_s vfm;
+};
+
+static void get_pic_info(struct vdec_mjpeg_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+
+ aml_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->visible_width, pic->visible_height,
+ pic->coded_width, pic->coded_height);
+ aml_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_mjpeg_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi->crop.left;
+ cr->top = inst->vsi->crop.top;
+ cr->width = inst->vsi->crop.width;
+ cr->height = inst->vsi->crop.height;
+
+ aml_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_mjpeg_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = 20;//inst->vsi->dec.dpb_sz;
+ aml_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_mjpeg_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_mjpeg_inst *inst = NULL;
+ int ret = -1;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vdec.video_type = VFORMAT_MJPEG;
+ inst->vdec.dev = ctx->dev->vpu_plat_dev;
+ inst->vdec.filp = ctx->dev->filp;
+ inst->vdec.ctx = ctx;
+
+ /* set play mode.*/
+ if (ctx->is_drm_mode)
+ inst->vdec.port.flag |= PORT_FLAG_DRM;
+
+ /* to eable mjpeg hw.*/
+ inst->vdec.port.type = PORT_TYPE_VIDEO;
+
+ /* init vfm */
+ inst->vfm.ctx = ctx;
+ inst->vfm.ada_ctx = &inst->vdec;
+ vcodec_vfm_init(&inst->vfm);
+
+ ret = video_decoder_init(&inst->vdec);
+ if (ret) {
+ aml_vcodec_err(inst, "vdec_mjpeg init err=%d", ret);
+ goto error_free_inst;
+ }
+
+ /* probe info from the stream */
+ inst->vsi = kzalloc(sizeof(struct vdec_mjpeg_vsi), GFP_KERNEL);
+ if (!inst->vsi) {
+ ret = -ENOMEM;
+ goto error_free_inst;
+ }
+
+ /* alloc the header buffer to be used cache sps or spp etc.*/
+ inst->vsi->header_buf = kzalloc(HEADER_BUFFER_SIZE, GFP_KERNEL);
+ if (!inst->vsi) {
+ ret = -ENOMEM;
+ goto error_free_vsi;
+ }
+
+ inst->vsi->pic.visible_width = 1920;
+ inst->vsi->pic.visible_height = 1080;
+ inst->vsi->pic.coded_width = 1920;
+ inst->vsi->pic.coded_height = 1088;
+ inst->vsi->pic.y_bs_sz = 0;
+ inst->vsi->pic.y_len_sz = (1920 * 1088);
+ inst->vsi->pic.c_bs_sz = 0;
+ inst->vsi->pic.c_len_sz = (1920 * 1088 / 2);
+
+ aml_vcodec_debug(inst, "mjpeg Instance >> %p", inst);
+
+ ctx->ada_ctx = &inst->vdec;
+ *h_vdec = (unsigned long)inst;
+
+ //dump_init();
+
+ return 0;
+
+error_free_vsi:
+ kfree(inst->vsi);
+error_free_inst:
+ kfree(inst);
+ *h_vdec = 0;
+
+ return ret;
+}
+
+#if 0
+static int refer_buffer_num(int level_idc, int poc_cnt,
+ int mb_width, int mb_height)
+{
+ return 20;
+}
+#endif
+
+static void fill_vdec_params(struct vdec_mjpeg_inst *inst,
+ struct MJpegDecodeContext *ps)
+{
+ struct vdec_pic_info *pic = &inst->vsi->pic;
+ struct vdec_mjpeg_dec_info *dec = &inst->vsi->dec;
+ struct v4l2_rect *rect = &inst->vsi->crop;
+
+ /* fill visible area size that be used for EGL. */
+ pic->visible_width = ps->width;
+ pic->visible_height = ps->height;
+
+ /* calc visible ares. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = pic->visible_width;
+ rect->height = pic->visible_height;
+
+ /* config canvas size that be used for decoder. */
+ pic->coded_width = ALIGN(ps->width, 64);
+ pic->coded_height = ALIGN(ps->height, 64);
+
+ pic->y_len_sz = pic->coded_width * pic->coded_height;
+ pic->c_len_sz = pic->y_len_sz >> 1;
+
+ /* calc DPB size */
+ dec->dpb_sz = 9;//refer_buffer_num(sps->level_idc, poc_cnt, mb_w, mb_h);
+
+ pr_info("[%d] The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n",
+ inst->ctx->id, pic->coded_width, pic->coded_height,
+ pic->visible_width, pic->visible_height, dec->dpb_sz);
+}
+
+static int stream_parse(struct vdec_mjpeg_inst *inst, u8 *buf, u32 size)
+{
+ int ret = 0;
+ struct mjpeg_param_sets *ps = NULL;
+
+ ps = kzalloc(sizeof(struct mjpeg_param_sets), GFP_KERNEL);
+ if (ps == NULL)
+ return -ENOMEM;
+
+ ret = mjpeg_decode_extradata_ps(buf, size, ps);
+ if (ret) {
+ pr_err("parse extra data failed. err: %d\n", ret);
+ goto out;
+ }
+
+ if (ps->head_parsed)
+ fill_vdec_params(inst, &ps->dec_ps);
+
+ ret = ps->head_parsed ? 0 : -1;
+out:
+ kfree(ps);
+
+ return ret;
+}
+
+static int vdec_mjpeg_probe(unsigned long h_vdec,
+ struct aml_vcodec_mem *bs, void *out)
+{
+ struct vdec_mjpeg_inst *inst =
+ (struct vdec_mjpeg_inst *)h_vdec;
+ struct stream_info *st;
+ u8 *buf = (u8 *)bs->vaddr;
+ u32 size = bs->size;
+ int ret = 0;
+
+ st = (struct stream_info *)buf;
+ if (inst->ctx->is_drm_mode && (st->magic == DRMe || st->magic == DRMn))
+ return 0;
+
+ if (st->magic == NORe || st->magic == NORn)
+ ret = stream_parse(inst, st->data, st->length);
+ else
+ ret = stream_parse(inst, buf, size);
+
+ return ret;
+}
+
+static void vdec_mjpeg_deinit(unsigned long h_vdec)
+{
+ struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec;
+
+ if (!inst)
+ return;
+
+ aml_vcodec_debug_enter(inst);
+
+ video_decoder_release(&inst->vdec);
+
+ vcodec_vfm_release(&inst->vfm);
+
+ //dump_deinit();
+
+ if (inst->vsi && inst->vsi->header_buf)
+ kfree(inst->vsi->header_buf);
+
+ if (inst->vsi)
+ kfree(inst->vsi);
+
+ kfree(inst);
+}
+
+static int vdec_mjpeg_get_fb(struct vdec_mjpeg_inst *inst, struct vdec_v4l2_buffer **out)
+{
+ return get_fb_from_queue(inst->ctx, out);
+}
+
+static void vdec_mjpeg_get_vf(struct vdec_mjpeg_inst *inst, struct vdec_v4l2_buffer **out)
+{
+ struct vframe_s *vf = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ vf = peek_video_frame(&inst->vfm);
+ if (!vf) {
+ aml_vcodec_debug(inst, "there is no vframe.");
+ *out = NULL;
+ return;
+ }
+
+ vf = get_video_frame(&inst->vfm);
+ if (!vf) {
+ aml_vcodec_debug(inst, "the vframe is avalid.");
+ *out = NULL;
+ return;
+ }
+
+ atomic_set(&vf->use_cnt, 1);
+
+ fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
+ fb->vf_handle = (unsigned long)vf;
+ fb->status = FB_ST_DISPLAY;
+
+ *out = fb;
+
+ //pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
+ //dump_write(fb->base_y.va, fb->base_y.bytes_used);
+ //dump_write(fb->base_c.va, fb->base_c.bytes_used);
+
+ /* convert yuv format. */
+ //swap_uv(fb->base_c.va, fb->base_c.size);
+}
+
+static int vdec_write_nalu(struct vdec_mjpeg_inst *inst,
+ u8 *buf, u32 size, u64 ts)
+{
+ int ret = 0;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+
+ ret = vdec_vframe_write(vdec, buf, size, ts);
+
+ return ret;
+}
+
+static int vdec_mjpeg_decode(unsigned long h_vdec, struct aml_vcodec_mem *bs,
+ u64 timestamp, bool *res_chg)
+{
+ struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+ struct stream_info *st;
+ u8 *buf;
+ u32 size;
+ int ret = 0;
+
+ /* bs NULL means flush decoder */
+ if (bs == NULL)
+ return 0;
+
+ buf = (u8 *)bs->vaddr;
+ size = bs->size;
+ st = (struct stream_info *)buf;
+
+ if (inst->ctx->is_drm_mode && (st->magic == DRMe || st->magic == DRMn))
+ ret = vdec_vbuf_write(vdec, st->m.buf, sizeof(st->m.drm));
+ else if (st->magic == NORe)
+ ret = vdec_vbuf_write(vdec, st->data, st->length);
+ else if (st->magic == NORn)
+ ret = vdec_write_nalu(inst, st->data, st->length, timestamp);
+ else if (inst->ctx->is_stream_mode)
+ ret = vdec_vbuf_write(vdec, buf, size);
+ else
+ ret = vdec_write_nalu(inst, buf, size, timestamp);
+
+ return ret;
+}
+
+static int vdec_mjpeg_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ int ret = 0;
+ struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec;
+
+ if (!inst) {
+ pr_err("the mjpeg inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ vdec_mjpeg_get_vf(inst, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ ret = vdec_mjpeg_get_fb(inst, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ default:
+ aml_vcodec_err(inst, "invalid get parameter type=%d", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void set_param_pic_info(struct vdec_mjpeg_inst *inst,
+ struct aml_vdec_pic_infos *info)
+{
+ pr_info("---%s, %d\n", __func__, __LINE__);
+}
+
+static int vdec_mjpeg_set_param(unsigned long h_vdec,
+ enum vdec_set_param_type type, void *in)
+{
+ int ret = 0;
+ struct vdec_mjpeg_inst *inst = (struct vdec_mjpeg_inst *)h_vdec;
+
+ if (!inst) {
+ pr_err("the mjpeg inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case SET_PARAM_PIC_INFO:
+ set_param_pic_info(inst, in);
+ break;
+
+ default:
+ aml_vcodec_err(inst, "invalid set parameter type=%d", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct vdec_common_if vdec_mjpeg_if = {
+ .init = vdec_mjpeg_init,
+ .probe = vdec_mjpeg_probe,
+ .decode = vdec_mjpeg_decode,
+ .get_param = vdec_mjpeg_get_param,
+ .set_param = vdec_mjpeg_set_param,
+ .deinit = vdec_mjpeg_deinit,
+};
+
+struct vdec_common_if *get_mjpeg_dec_comm_if(void);
+
+struct vdec_common_if *get_mjpeg_dec_comm_if(void)
+{
+ return &vdec_mjpeg_if;
+}
--- /dev/null
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "aml_mpeg12_parser.h"
+
+#define NAL_TYPE(value) ((value) & 0x1F)
+#define HEADER_BUFFER_SIZE (32 * 1024)
+
+/**
+ * struct mpeg12_fb - mpeg12 decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct mpeg12_fb {
+ uint64_t vdec_fb_va;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ int32_t poc;
+ uint32_t reserved;
+};
+
+/**
+ * struct vdec_mpeg12_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @reserved : for 8 bytes alignment
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_mpeg12_dec_info {
+ uint32_t dpb_sz;
+ uint32_t resolution_changed;
+ uint32_t reserved;
+ uint64_t bs_dma;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_mpeg12_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf : Header parsing buffer (AP-W, VPU-R)
+ * @list_free : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp : display frame buffer ring list (AP-R, VPU-W)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ */
+struct vdec_mpeg12_vsi {
+ char *header_buf;
+ int sps_size;
+ int pps_size;
+ int sei_size;
+ int head_offset;
+ struct vdec_mpeg12_dec_info dec;
+ struct vdec_pic_info pic;
+ struct v4l2_rect crop;
+ bool is_combine;
+ int nalu_pos;
+ //struct mpeg12_param_sets ps;
+};
+
+/**
+ * struct vdec_mpeg12_inst - mpeg12 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to aml_vcodec_ctx
+ * @vsi : VPU shared information
+ */
+struct vdec_mpeg12_inst {
+ unsigned int num_nalu;
+ struct aml_vcodec_ctx *ctx;
+ struct aml_vdec_adapt vdec;
+ struct vdec_mpeg12_vsi *vsi;
+ struct vcodec_vfm_s vfm;
+};
+
+static void get_pic_info(struct vdec_mpeg12_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+
+ aml_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->visible_width, pic->visible_height,
+ pic->coded_width, pic->coded_height);
+ aml_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_mpeg12_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi->crop.left;
+ cr->top = inst->vsi->crop.top;
+ cr->width = inst->vsi->crop.width;
+ cr->height = inst->vsi->crop.height;
+
+ aml_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_mpeg12_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = inst->vsi->dec.dpb_sz;
+ aml_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_mpeg12_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_mpeg12_inst *inst = NULL;
+ int ret = -1;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vdec.video_type = VFORMAT_MPEG12;
+ inst->vdec.dev = ctx->dev->vpu_plat_dev;
+ inst->vdec.filp = ctx->dev->filp;
+ inst->vdec.ctx = ctx;
+
+ /* set play mode.*/
+ if (ctx->is_drm_mode)
+ inst->vdec.port.flag |= PORT_FLAG_DRM;
+
+ /* to eable mpeg12 hw.*/
+ inst->vdec.port.type = PORT_TYPE_VIDEO;
+
+ /* init vfm */
+ inst->vfm.ctx = ctx;
+ inst->vfm.ada_ctx = &inst->vdec;
+ vcodec_vfm_init(&inst->vfm);
+
+ ret = video_decoder_init(&inst->vdec);
+ if (ret) {
+ aml_vcodec_err(inst, "vdec_mpeg12 init err=%d", ret);
+ goto error_free_inst;
+ }
+
+ /* probe info from the stream */
+ inst->vsi = kzalloc(sizeof(struct vdec_mpeg12_vsi), GFP_KERNEL);
+ if (!inst->vsi) {
+ ret = -ENOMEM;
+ goto error_free_inst;
+ }
+
+ /* alloc the header buffer to be used cache sps or spp etc.*/
+ inst->vsi->header_buf = kzalloc(HEADER_BUFFER_SIZE, GFP_KERNEL);
+ if (!inst->vsi) {
+ ret = -ENOMEM;
+ goto error_free_vsi;
+ }
+
+ inst->vsi->pic.visible_width = 1920;
+ inst->vsi->pic.visible_height = 1080;
+ inst->vsi->pic.coded_width = 1920;
+ inst->vsi->pic.coded_height = 1088;
+ inst->vsi->pic.y_bs_sz = 0;
+ inst->vsi->pic.y_len_sz = (1920 * 1088);
+ inst->vsi->pic.c_bs_sz = 0;
+ inst->vsi->pic.c_len_sz = (1920 * 1088 / 2);
+
+ aml_vcodec_debug(inst, "mpeg12 Instance >> %p", inst);
+
+ ctx->ada_ctx = &inst->vdec;
+ *h_vdec = (unsigned long)inst;
+
+ //dump_init();
+
+ return 0;
+
+error_free_vsi:
+ kfree(inst->vsi);
+error_free_inst:
+ kfree(inst);
+ *h_vdec = 0;
+
+ return ret;
+}
+
+static void fill_vdec_params(struct vdec_mpeg12_inst *inst,
+ struct MpvParseContext *dec_ps)
+{
+ struct vdec_pic_info *pic = &inst->vsi->pic;
+ struct vdec_mpeg12_dec_info *dec = &inst->vsi->dec;
+ struct v4l2_rect *rect = &inst->vsi->crop;
+
+ /* fill visible area size that be used for EGL. */
+ pic->visible_width = dec_ps->width;
+ pic->visible_height = dec_ps->height;
+
+ /* calc visible ares. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = pic->visible_width;
+ rect->height = pic->visible_height;
+
+ /* config canvas size that be used for decoder. */
+ pic->coded_width = ALIGN(dec_ps->coded_width, 64);
+ pic->coded_height = ALIGN(dec_ps->coded_height, 64);;
+
+ pic->y_len_sz = pic->coded_width * pic->coded_height;
+ pic->c_len_sz = pic->y_len_sz >> 1;
+
+ /* calc DPB size */
+ dec->dpb_sz = 9;//refer_buffer_num(sps->level_idc, poc_cnt, mb_w, mb_h);
+
+ pr_info("[%d] The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n",
+ inst->ctx->id, pic->coded_width, pic->coded_height,
+ pic->visible_width, pic->visible_height, dec->dpb_sz);
+}
+
+static int stream_parse(struct vdec_mpeg12_inst *inst, u8 *buf, u32 size)
+{
+ int ret = 0;
+ struct mpeg12_param_sets *ps = NULL;
+
+ ps = kzalloc(sizeof(struct mpeg12_param_sets), GFP_KERNEL);
+ if (ps == NULL)
+ return -ENOMEM;
+
+ ret = mpeg12_decode_extradata_ps(buf, size, ps);
+ if (ret) {
+ pr_err("parse extra data failed. err: %d\n", ret);
+ goto out;
+ }
+
+ if (ps->head_parsed)
+ fill_vdec_params(inst, &ps->dec_ps);
+
+ ret = ps->head_parsed ? 0 : -1;
+out:
+ kfree(ps);
+
+ return ret;
+}
+
+static int vdec_mpeg12_probe(unsigned long h_vdec,
+ struct aml_vcodec_mem *bs, void *out)
+{
+ struct vdec_mpeg12_inst *inst =
+ (struct vdec_mpeg12_inst *)h_vdec;
+ struct stream_info *st;
+ u8 *buf = (u8 *)bs->vaddr;
+ u32 size = bs->size;
+ int ret = 0;
+
+ st = (struct stream_info *)buf;
+ if (inst->ctx->is_drm_mode && (st->magic == DRMe || st->magic == DRMn))
+ return 0;
+
+ if (st->magic == NORe || st->magic == NORn)
+ ret = stream_parse(inst, st->data, st->length);
+ else
+ ret = stream_parse(inst, buf, size);
+
+ return ret;
+}
+
+static void vdec_mpeg12_deinit(unsigned long h_vdec)
+{
+ struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec;
+
+ if (!inst)
+ return;
+
+ aml_vcodec_debug_enter(inst);
+
+ video_decoder_release(&inst->vdec);
+
+ vcodec_vfm_release(&inst->vfm);
+
+ //dump_deinit();
+
+ if (inst->vsi && inst->vsi->header_buf)
+ kfree(inst->vsi->header_buf);
+
+ if (inst->vsi)
+ kfree(inst->vsi);
+
+ kfree(inst);
+}
+
+static int vdec_mpeg12_get_fb(struct vdec_mpeg12_inst *inst, struct vdec_v4l2_buffer **out)
+{
+ return get_fb_from_queue(inst->ctx, out);
+}
+
+static void vdec_mpeg12_get_vf(struct vdec_mpeg12_inst *inst, struct vdec_v4l2_buffer **out)
+{
+ struct vframe_s *vf = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ vf = peek_video_frame(&inst->vfm);
+ if (!vf) {
+ aml_vcodec_debug(inst, "there is no vframe.");
+ *out = NULL;
+ return;
+ }
+
+ vf = get_video_frame(&inst->vfm);
+ if (!vf) {
+ aml_vcodec_debug(inst, "the vframe is avalid.");
+ *out = NULL;
+ return;
+ }
+
+ atomic_set(&vf->use_cnt, 1);
+
+ fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
+ fb->vf_handle = (unsigned long)vf;
+ fb->status = FB_ST_DISPLAY;
+
+ *out = fb;
+
+ //pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
+ //dump_write(fb->base_y.va, fb->base_y.bytes_used);
+ //dump_write(fb->base_c.va, fb->base_c.bytes_used);
+
+ /* convert yuv format. */
+ //swap_uv(fb->base_c.va, fb->base_c.size);
+}
+
+static int vdec_write_nalu(struct vdec_mpeg12_inst *inst,
+ u8 *buf, u32 size, u64 ts)
+{
+ int ret = 0;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+
+ ret = vdec_vframe_write(vdec, buf, size, ts);
+
+ return ret;
+}
+
+static int vdec_mpeg12_decode(unsigned long h_vdec, struct aml_vcodec_mem *bs,
+ u64 timestamp, bool *res_chg)
+{
+ struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+ struct stream_info *st;
+ u8 *buf;
+ u32 size;
+ int ret = 0;
+
+ /* bs NULL means flush decoder */
+ if (bs == NULL)
+ return 0;
+
+ buf = (u8 *)bs->vaddr;
+ size = bs->size;
+ st = (struct stream_info *)buf;
+
+ if (inst->ctx->is_drm_mode && (st->magic == DRMe || st->magic == DRMn))
+ ret = vdec_vbuf_write(vdec, st->m.buf, sizeof(st->m.drm));
+ else if (st->magic == NORe)
+ ret = vdec_vbuf_write(vdec, st->data, st->length);
+ else if (st->magic == NORn)
+ ret = vdec_write_nalu(inst, st->data, st->length, timestamp);
+ else if (inst->ctx->is_stream_mode)
+ ret = vdec_vbuf_write(vdec, buf, size);
+ else
+ ret = vdec_write_nalu(inst, buf, size, timestamp);
+
+ return ret;
+}
+
+static int vdec_mpeg12_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ int ret = 0;
+ struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec;
+
+ if (!inst) {
+ pr_err("the mpeg12 inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ vdec_mpeg12_get_vf(inst, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ ret = vdec_mpeg12_get_fb(inst, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ default:
+ aml_vcodec_err(inst, "invalid get parameter type=%d", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void set_param_pic_info(struct vdec_mpeg12_inst *inst,
+ struct aml_vdec_pic_infos *info)
+{
+ pr_info("---%s, %d\n", __func__, __LINE__);
+}
+
+static int vdec_mpeg12_set_param(unsigned long h_vdec,
+ enum vdec_set_param_type type, void *in)
+{
+ int ret = 0;
+ struct vdec_mpeg12_inst *inst = (struct vdec_mpeg12_inst *)h_vdec;
+
+ if (!inst) {
+ pr_err("the mpeg12 inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case SET_PARAM_PIC_INFO:
+ set_param_pic_info(inst, in);
+ break;
+
+ default:
+ aml_vcodec_err(inst, "invalid set parameter type=%d", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct vdec_common_if vdec_mpeg12_if = {
+ .init = vdec_mpeg12_init,
+ .probe = vdec_mpeg12_probe,
+ .decode = vdec_mpeg12_decode,
+ .get_param = vdec_mpeg12_get_param,
+ .set_param = vdec_mpeg12_set_param,
+ .deinit = vdec_mpeg12_deinit,
+};
+
+struct vdec_common_if *get_mpeg12_dec_comm_if(void);
+
+struct vdec_common_if *get_mpeg12_dec_comm_if(void)
+{
+ return &vdec_mpeg12_if;
+}
--- /dev/null
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "aml_mpeg4_parser.h"
+
+#define NAL_TYPE(value) ((value) & 0x1F)
+#define HEADER_BUFFER_SIZE (32 * 1024)
+
+/**
+ * struct mpeg4_fb - mpeg4 decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct mpeg4_fb {
+ uint64_t vdec_fb_va;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ int32_t poc;
+ uint32_t reserved;
+};
+
+/**
+ * struct vdec_mpeg4_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @reserved : for 8 bytes alignment
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_mpeg4_dec_info {
+ uint32_t dpb_sz;
+ uint32_t resolution_changed;
+ uint32_t reserved;
+ uint64_t bs_dma;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_mpeg4_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf : Header parsing buffer (AP-W, VPU-R)
+ * @list_free : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp : display frame buffer ring list (AP-R, VPU-W)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ */
+struct vdec_mpeg4_vsi {
+ char *header_buf;
+ int sps_size;
+ int pps_size;
+ int sei_size;
+ int head_offset;
+ struct vdec_mpeg4_dec_info dec;
+ struct vdec_pic_info pic;
+ struct v4l2_rect crop;
+ bool is_combine;
+ int nalu_pos;
+ //struct mpeg4ParamSets ps;
+};
+
+/**
+ * struct vdec_mpeg4_inst - mpeg4 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to aml_vcodec_ctx
+ * @vsi : VPU shared information
+ */
+struct vdec_mpeg4_inst {
+ unsigned int num_nalu;
+ struct aml_vcodec_ctx *ctx;
+ struct aml_vdec_adapt vdec;
+ struct vdec_mpeg4_vsi *vsi;
+ struct vcodec_vfm_s vfm;
+};
+
+static void get_pic_info(struct vdec_mpeg4_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+
+ aml_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ pic->visible_width, pic->visible_height,
+ pic->coded_width, pic->coded_height);
+ aml_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+ pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_mpeg4_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi->crop.left;
+ cr->top = inst->vsi->crop.top;
+ cr->width = inst->vsi->crop.width;
+ cr->height = inst->vsi->crop.height;
+
+ aml_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_mpeg4_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = 9;//inst->vsi->dec.dpb_sz;
+ aml_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_mpeg4_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_mpeg4_inst *inst = NULL;
+ int ret = -1;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vdec.video_type = VFORMAT_MPEG4;
+ inst->vdec.format = VIDEO_DEC_FORMAT_MPEG4_5;
+ inst->vdec.dev = ctx->dev->vpu_plat_dev;
+ inst->vdec.filp = ctx->dev->filp;
+ inst->vdec.ctx = ctx;
+
+ /* set play mode.*/
+ if (ctx->is_drm_mode)
+ inst->vdec.port.flag |= PORT_FLAG_DRM;
+
+ /* to eable mpeg4 hw.*/
+ inst->vdec.port.type = PORT_TYPE_VIDEO;
+
+ /* init vfm */
+ inst->vfm.ctx = ctx;
+ inst->vfm.ada_ctx = &inst->vdec;
+ vcodec_vfm_init(&inst->vfm);
+
+ ret = video_decoder_init(&inst->vdec);
+ if (ret) {
+ aml_vcodec_err(inst, "vdec_mpeg4 init err=%d", ret);
+ goto error_free_inst;
+ }
+
+ /* probe info from the stream */
+ inst->vsi = kzalloc(sizeof(struct vdec_mpeg4_vsi), GFP_KERNEL);
+ if (!inst->vsi) {
+ ret = -ENOMEM;
+ goto error_free_inst;
+ }
+
+ /* alloc the header buffer to be used cache sps or spp etc.*/
+ inst->vsi->header_buf = kzalloc(HEADER_BUFFER_SIZE, GFP_KERNEL);
+ if (!inst->vsi) {
+ ret = -ENOMEM;
+ goto error_free_vsi;
+ }
+
+ inst->vsi->pic.visible_width = 1920;
+ inst->vsi->pic.visible_height = 1080;
+ inst->vsi->pic.coded_width = 1920;
+ inst->vsi->pic.coded_height = 1088;
+ inst->vsi->pic.y_bs_sz = 0;
+ inst->vsi->pic.y_len_sz = (1920 * 1088);
+ inst->vsi->pic.c_bs_sz = 0;
+ inst->vsi->pic.c_len_sz = (1920 * 1088 / 2);
+
+ aml_vcodec_debug(inst, "mpeg4 Instance >> %p", inst);
+
+ ctx->ada_ctx = &inst->vdec;
+ *h_vdec = (unsigned long)inst;
+
+ //dump_init();
+
+ return 0;
+
+error_free_vsi:
+ kfree(inst->vsi);
+error_free_inst:
+ kfree(inst);
+ *h_vdec = 0;
+
+ return ret;
+}
+
+#if 0
+static int refer_buffer_num(int level_idc, int poc_cnt,
+ int mb_width, int mb_height)
+{
+ return 20;
+}
+#endif
+
+static void fill_vdec_params(struct vdec_mpeg4_inst *inst,
+ struct mpeg4_dec_param *dec_ps)
+{
+ struct vdec_pic_info *pic = &inst->vsi->pic;
+ struct vdec_mpeg4_dec_info *dec = &inst->vsi->dec;
+ struct v4l2_rect *rect = &inst->vsi->crop;
+
+ /* fill visible area size that be used for EGL. */
+ pic->visible_width = dec_ps->m.width;
+ pic->visible_height = dec_ps->m.height;
+
+ /* calc visible ares. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = pic->visible_width;
+ rect->height = pic->visible_height;
+
+ /* config canvas size that be used for decoder. */
+ pic->coded_width = ALIGN(dec_ps->m.width, 64);
+ pic->coded_height = ALIGN(dec_ps->m.height, 64);
+
+ pic->y_len_sz = pic->coded_width * pic->coded_height;
+ pic->c_len_sz = pic->y_len_sz >> 1;
+
+ /* calc DPB size */
+ dec->dpb_sz = 9;//refer_buffer_num(sps->level_idc, poc_cnt, mb_w, mb_h);
+
+ pr_info("[%d] The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n",
+ inst->ctx->id, pic->coded_width, pic->coded_height,
+ pic->visible_width, pic->visible_height, dec->dpb_sz);
+}
+
+static int stream_parse(struct vdec_mpeg4_inst *inst, u8 *buf, u32 size)
+{
+ int ret = 0;
+ struct mpeg4_param_sets *ps = NULL;
+
+ ps = kzalloc(sizeof(struct mpeg4_param_sets), GFP_KERNEL);
+ if (ps == NULL)
+ return -ENOMEM;
+
+ ret = mpeg4_decode_extradata_ps(buf, size, ps);
+ if (ret) {
+ pr_err("parse extra data failed. err: %d\n", ret);
+ goto out;
+ }
+
+ if (ps->head_parsed)
+ fill_vdec_params(inst, &ps->dec_ps);
+
+ ret = ps->head_parsed ? 0 : -1;
+out:
+ kfree(ps);
+
+ return ret;
+}
+
+static int vdec_mpeg4_probe(unsigned long h_vdec,
+ struct aml_vcodec_mem *bs, void *out)
+{
+ struct vdec_mpeg4_inst *inst =
+ (struct vdec_mpeg4_inst *)h_vdec;
+ struct stream_info *st;
+ u8 *buf = (u8 *)bs->vaddr;
+ u32 size = bs->size;
+ int ret = 0;
+
+ st = (struct stream_info *)buf;
+ if (inst->ctx->is_drm_mode && (st->magic == DRMe || st->magic == DRMn))
+ return 0;
+
+ if (st->magic == NORe || st->magic == NORn)
+ ret = stream_parse(inst, st->data, st->length);
+ else
+ ret = stream_parse(inst, buf, size);
+
+ return ret;
+}
+
+static void vdec_mpeg4_deinit(unsigned long h_vdec)
+{
+ struct vdec_mpeg4_inst *inst = (struct vdec_mpeg4_inst *)h_vdec;
+
+ if (!inst)
+ return;
+
+ aml_vcodec_debug_enter(inst);
+
+ video_decoder_release(&inst->vdec);
+
+ vcodec_vfm_release(&inst->vfm);
+
+ //dump_deinit();
+
+ if (inst->vsi && inst->vsi->header_buf)
+ kfree(inst->vsi->header_buf);
+
+ if (inst->vsi)
+ kfree(inst->vsi);
+
+ kfree(inst);
+}
+
+static int vdec_mpeg4_get_fb(struct vdec_mpeg4_inst *inst, struct vdec_v4l2_buffer **out)
+{
+ return get_fb_from_queue(inst->ctx, out);
+}
+
+static void vdec_mpeg4_get_vf(struct vdec_mpeg4_inst *inst, struct vdec_v4l2_buffer **out)
+{
+ struct vframe_s *vf = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ vf = peek_video_frame(&inst->vfm);
+ if (!vf) {
+ aml_vcodec_debug(inst, "there is no vframe.");
+ *out = NULL;
+ return;
+ }
+
+ vf = get_video_frame(&inst->vfm);
+ if (!vf) {
+ aml_vcodec_debug(inst, "the vframe is avalid.");
+ *out = NULL;
+ return;
+ }
+
+ atomic_set(&vf->use_cnt, 1);
+
+ fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
+ fb->vf_handle = (unsigned long)vf;
+ fb->status = FB_ST_DISPLAY;
+
+ *out = fb;
+
+ //pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
+ //dump_write(fb->base_y.va, fb->base_y.bytes_used);
+ //dump_write(fb->base_c.va, fb->base_c.bytes_used);
+
+ /* convert yuv format. */
+ //swap_uv(fb->base_c.va, fb->base_c.size);
+}
+
+static int vdec_write_nalu(struct vdec_mpeg4_inst *inst,
+ u8 *buf, u32 size, u64 ts)
+{
+ int ret = 0;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+
+ ret = vdec_vframe_write(vdec, buf, size, ts);
+
+ return ret;
+}
+
+static int vdec_mpeg4_decode(unsigned long h_vdec, struct aml_vcodec_mem *bs,
+ u64 timestamp, bool *res_chg)
+{
+ struct vdec_mpeg4_inst *inst = (struct vdec_mpeg4_inst *)h_vdec;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+ struct stream_info *st;
+ u8 *buf;
+ u32 size;
+ int ret = 0;
+
+ /* bs NULL means flush decoder */
+ if (bs == NULL)
+ return 0;
+
+ buf = (u8 *)bs->vaddr;
+ size = bs->size;
+ st = (struct stream_info *)buf;
+
+ if (inst->ctx->is_drm_mode && (st->magic == DRMe || st->magic == DRMn))
+ ret = vdec_vbuf_write(vdec, st->m.buf, sizeof(st->m.drm));
+ else if (st->magic == NORe)
+ ret = vdec_vbuf_write(vdec, st->data, st->length);
+ else if (st->magic == NORn)
+ ret = vdec_write_nalu(inst, st->data, st->length, timestamp);
+ else if (inst->ctx->is_stream_mode)
+ ret = vdec_vbuf_write(vdec, buf, size);
+ else
+ ret = vdec_write_nalu(inst, buf, size, timestamp);
+
+ return ret;
+}
+
+static int vdec_mpeg4_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ int ret = 0;
+ struct vdec_mpeg4_inst *inst = (struct vdec_mpeg4_inst *)h_vdec;
+
+ if (!inst) {
+ pr_err("the mpeg4 inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ vdec_mpeg4_get_vf(inst, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ ret = vdec_mpeg4_get_fb(inst, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ default:
+ aml_vcodec_err(inst, "invalid get parameter type=%d", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void set_param_pic_info(struct vdec_mpeg4_inst *inst,
+ struct aml_vdec_pic_infos *info)
+{
+ pr_info("---%s, %d\n", __func__, __LINE__);
+}
+
+static int vdec_mpeg4_set_param(unsigned long h_vdec,
+ enum vdec_set_param_type type, void *in)
+{
+ int ret = 0;
+ struct vdec_mpeg4_inst *inst = (struct vdec_mpeg4_inst *)h_vdec;
+
+ if (!inst) {
+ pr_err("the mpeg4 inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case SET_PARAM_PIC_INFO:
+ set_param_pic_info(inst, in);
+ break;
+
+ default:
+ aml_vcodec_err(inst, "invalid set parameter type=%d", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct vdec_common_if vdec_mpeg4_if = {
+ .init = vdec_mpeg4_init,
+ .probe = vdec_mpeg4_probe,
+ .decode = vdec_mpeg4_decode,
+ .get_param = vdec_mpeg4_get_param,
+ .set_param = vdec_mpeg4_set_param,
+ .deinit = vdec_mpeg4_deinit,
+};
+
+struct vdec_common_if *get_mpeg4_dec_comm_if(void);
+
+struct vdec_common_if *get_mpeg4_dec_comm_if(void)
+{
+ return &vdec_mpeg4_if;
+}
#define NAL_TYPE(value) ((value) & 0x1F)
#define HEADER_BUFFER_SIZE (32 * 1024)
+#define SYNC_CODE (0x498342)
+extern int vp9_need_prefix;
bool need_trigger;
int dump_cnt = 0;
int head_offset;
struct vdec_vp9_dec_info dec;
struct vdec_pic_info pic;
+ struct vdec_pic_info cur_pic;
struct v4l2_rect crop;
bool is_combine;
int nalu_pos;
- struct vp9_head_info_t head;
+ struct vp9_param_sets ps;
};
/**
struct aml_vdec_adapt vdec;
struct vdec_vp9_vsi *vsi;
struct vcodec_vfm_s vfm;
+ struct completion comp;
};
-struct vp9_superframe_split {
- /*in data*/
- u8 *data;
- u32 data_size;
-
- /*out data*/
- int nb_frames;
- int size;
- int next_frame;
- u32 next_frame_offset;
- int sizes[8];
-};
-
-#if 1
-#define DUMP_FILE_NAME "/data/dump/dump.tmp"
-static struct file *filp;
-static loff_t file_pos;
-
-void dump_write(const char __user *buf, size_t count)
-{
- mm_segment_t old_fs;
-
- if (!filp)
- return;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- if (count != vfs_write(filp, buf, count, &file_pos))
- pr_err("Failed to write file\n");
-
- set_fs(old_fs);
-}
-
-void dump_init(void)
-{
- filp = filp_open(DUMP_FILE_NAME, O_CREAT | O_RDWR, 0644);
- if (IS_ERR(filp)) {
- pr_err("open dump file failed\n");
- filp = NULL;
- }
-}
-
-void dump_deinit(void)
-{
- if (filp) {
- filp_close(filp, current->files);
- filp = NULL;
- file_pos = 0;
- }
-}
-
-void swap_uv(void *uv, int size)
-{
- int i;
- __u16 *p = uv;
-
- size /= 2;
-
- for (i = 0; i < size; i++, p++)
- *p = __swab16(*p);
-}
-#endif
+static int vdec_write_nalu(struct vdec_vp9_inst *inst,
+ u8 *buf, u32 size, u64 ts);
static void get_pic_info(struct vdec_vp9_inst *inst,
struct vdec_pic_info *pic)
static void get_dpb_size(struct vdec_vp9_inst *inst, unsigned int *dpb_sz)
{
- *dpb_sz = 20;//inst->vsi->dec.dpb_sz;
+ *dpb_sz = inst->vsi->dec.dpb_sz;
aml_vcodec_debug(inst, "sz=%d", *dpb_sz);
}
inst->ctx = ctx;
- inst->vdec.format = VFORMAT_VP9;
+ inst->vdec.video_type = VFORMAT_VP9;
inst->vdec.dev = ctx->dev->vpu_plat_dev;
inst->vdec.filp = ctx->dev->filp;
inst->vdec.ctx = ctx;
inst->vsi->pic.c_bs_sz = 0;
inst->vsi->pic.c_len_sz = (1920 * 1088 / 2);
+ init_completion(&inst->comp);
+
aml_vcodec_debug(inst, "vp9 Instance >> %p", inst);
ctx->ada_ctx = &inst->vdec;
goto error_free_inst;
}
- dump_init();
+ //dump_init();
return 0;
}
#endif
-static void fill_vdec_params(struct vdec_vp9_inst *inst)
+static void fill_vdec_params(struct vdec_vp9_inst *inst,
+ struct VP9Context *vp9_ctx)
{
struct vdec_pic_info *pic = &inst->vsi->pic;
struct vdec_vp9_dec_info *dec = &inst->vsi->dec;
struct v4l2_rect *rect = &inst->vsi->crop;
- unsigned int mb_w = 0, mb_h = 0, width, height;
- //unsigned int crop_unit_x = 0, crop_unit_y = 0;
- //unsigned int poc_cnt = 0;
-
- /* calc width & height. */
- width = 1920;
- height = 1080;
/* fill visible area size that be used for EGL. */
- pic->visible_width = width;
- pic->visible_height = height;
+ pic->visible_width = vp9_ctx->render_width;
+ pic->visible_height = vp9_ctx->render_height;
/* calc visible ares. */
rect->left = 0;
rect->height = pic->visible_height;
/* config canvas size that be used for decoder. */
- pic->coded_width = ALIGN(mb_w, 4) << 4;
- pic->coded_height = ALIGN(mb_h, 4) << 4;
-
- pic->coded_width = 1920;
- pic->coded_height = 1088;//temp
+ pic->coded_width = ALIGN(vp9_ctx->width, 32);
+ pic->coded_height = ALIGN(vp9_ctx->height, 32);
pic->y_len_sz = pic->coded_width * pic->coded_height;
pic->c_len_sz = pic->y_len_sz >> 1;
/* calc DPB size */
- dec->dpb_sz = 20;//refer_buffer_num(sps->level_idc, poc_cnt, mb_w, mb_h);
+ dec->dpb_sz = 5;//refer_buffer_num(sps->level_idc, poc_cnt, mb_w, mb_h);
- pr_info("[%d] The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n",
+ aml_vcodec_debug(inst, "[%d] The stream infos, coded:(%d x %d), visible:(%d x %d), DPB: %d\n",
inst->ctx->id, pic->coded_width, pic->coded_height,
pic->visible_width, pic->visible_height, dec->dpb_sz);
}
-#if 0
-static int vp9_parse_nal_header(u32 val)
+static int stream_parse_by_ucode(struct vdec_vp9_inst *inst, u8 *buf, u32 size)
{
- if (val & 0x80) {
- pr_err("the nal data is invalid.\n");
- return -1;
- }
-
- return (val & 0x7f) >> 1;
-}
-#endif
+ int ret = 0;
-static void vp9_parse(struct vp9_head_info_t *head, u8 *buf, u32 size)
-{
- //int ret = -1;
- //u8 *p = buf;
+ ret = vdec_write_nalu(inst, buf, size, 0);
+ if (ret < 0) {
+ pr_err("write frame data failed. err: %d\n", ret);
+ return ret;
+ }
- head->parsed = true;
+ /* wait ucode parse ending. */
+ wait_for_completion_timeout(&inst->comp,
+ msecs_to_jiffies(1000));
- return;
+ return inst->vsi->dec.dpb_sz ? 0 : -1;
}
static int stream_parse(struct vdec_vp9_inst *inst, u8 *buf, u32 size)
{
- //struct vp9_stream_t s;
- //struct vp9_SPS_t *sps;
- //unsigned int nal_type;
- int nal_idx = 0;
- int real_data_pos, real_data_size;
- bool is_combine = false;
-
- vp9_parse(&inst->vsi->head, buf, size);
-
- if (!inst->vsi->head.parsed)
- return -1;
-
- /* if the st compose from csd + slice that is the combine data. */
- inst->vsi->is_combine = is_combine;
- inst->vsi->nalu_pos = nal_idx;
-
- /* start code plus nal type. */
- real_data_pos = nal_idx + 1;
- real_data_size = size - real_data_pos;
+ int ret = 0;
+ struct vp9_param_sets *ps = NULL;
- //sps = kzalloc(sizeof(struct vp9_SPS_t), GFP_KERNEL);
- //if (sps == NULL)
- //return -ENOMEM;
+ ps = kzalloc(sizeof(struct vp9_param_sets), GFP_KERNEL);
+ if (ps == NULL)
+ return -ENOMEM;
- /* the extra data would be parsed. */
- //vp9_stream_set(&s, &buf[real_data_pos], real_data_size);
- //vp9_sps_parse(&s, sps);
- //vp9_sps_info(sps);
+ ret = vp9_decode_extradata_ps(buf, size, ps);
+ if (ret) {
+ pr_err("parse extra data failed. err: %d\n", ret);
+ goto out;
+ }
- //fill_vdec_params(inst, sps);
- fill_vdec_params(inst);
+ if (ps->head_parsed)
+ fill_vdec_params(inst, &ps->ctx);
- //kfree(sps);
+ ret = ps->head_parsed ? 0 : -1;
+out:
+ kfree(ps);
- return 0;
+ return ret;
}
static int vdec_vp9_probe(unsigned long h_vdec,
struct vdec_vp9_inst *inst =
(struct vdec_vp9_inst *)h_vdec;
struct stream_info *st;
- u8 *buf = (u8 *)bs->va;
+ u8 *buf = (u8 *)bs->vaddr;
u32 size = bs->size;
int ret = 0;
if (st->magic == NORe || st->magic == NORn)
ret = stream_parse(inst, st->data, st->length);
- else
- ret = stream_parse(inst, buf, size);
+ else {
+ if (inst->ctx->param_sets_from_ucode)
+ ret = stream_parse_by_ucode(inst, buf, size);
+ else
+ ret = stream_parse(inst, buf, size);
+ }
+
+ inst->vsi->cur_pic = inst->vsi->pic;
return ret;
}
static void vdec_vp9_deinit(unsigned long h_vdec)
{
+ ulong flags;
struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
-
- if (!inst)
- return;
+ struct aml_vcodec_ctx *ctx = inst->ctx;
aml_vcodec_debug_enter(inst);
vcodec_vfm_release(&inst->vfm);
- dump_deinit();
+ //dump_deinit();
+ spin_lock_irqsave(&ctx->slock, flags);
if (inst->vsi && inst->vsi->header_buf)
kfree(inst->vsi->header_buf);
kfree(inst->vsi);
kfree(inst);
+
+ ctx->drv_handle = 0;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+
need_trigger = false;
dump_cnt = 0;
}
-static int vdec_vp9_get_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out)
+static int vdec_vp9_get_fb(struct vdec_vp9_inst *inst, struct vdec_v4l2_buffer **out)
{
return get_fb_from_queue(inst->ctx, out);
}
-static void vdec_vp9_get_vf(struct vdec_vp9_inst *inst, struct vdec_fb **out)
+static void vdec_vp9_get_vf(struct vdec_vp9_inst *inst, struct vdec_v4l2_buffer **out)
{
struct vframe_s *vf = NULL;
- struct vdec_fb *fb = NULL;
-
- aml_vcodec_debug(inst, "%s() [%d], vfm: %p",
- __func__, __LINE__, &inst->vfm);
+ struct vdec_v4l2_buffer *fb = NULL;
vf = peek_video_frame(&inst->vfm);
if (!vf) {
atomic_set(&vf->use_cnt, 1);
- aml_vcodec_debug(inst, "%s() [%d], vf: %p, v4l_mem_handle: %lx, idx: %d\n",
- __func__, __LINE__, vf, vf->v4l_mem_handle, vf->index);
-
- fb = (struct vdec_fb *)vf->v4l_mem_handle;
+ fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
fb->vf_handle = (unsigned long)vf;
fb->status = FB_ST_DISPLAY;
*out = fb;
//pr_info("%s, %d\n", __func__, fb->base_y.bytes_used);
- //dump_write(fb->base_y.va, fb->base_y.bytes_used);
- //dump_write(fb->base_c.va, fb->base_c.bytes_used);
+ //dump_write(fb->base_y.vaddr, fb->base_y.bytes_used);
+ //dump_write(fb->base_c.vaddr, fb->base_c.bytes_used);
/* convert yuv format. */
- //swap_uv(fb->base_c.va, fb->base_c.size);
-
- aml_vcodec_debug(inst, "%s() [%d], va: %p, phy: %x, size: %zu",
- __func__, __LINE__, fb->base_y.va,
- (unsigned int)virt_to_phys(fb->base_y.va), fb->base_y.size);
- aml_vcodec_debug(inst, "%s() [%d], va: %p, phy: %x, size: %zu",
- __func__, __LINE__, fb->base_c.va,
- (unsigned int)virt_to_phys(fb->base_c.va), fb->base_c.size);
-}
-
-static int vp9_superframe_split_filter(struct vp9_superframe_split *s)
-{
- int i, j, ret, marker;
- bool is_superframe = false;
-
- if (!s->data)
- return -1;
-
- marker = s->data[s->data_size - 1];
- if ((marker & 0xe0) == 0xc0) {
- int length_size = 1 + ((marker >> 3) & 0x3);
- int nb_frames = 1 + (marker & 0x7);
- int idx_size = 2 + nb_frames * length_size;
-
- if (s->data_size >= idx_size &&
- s->data[s->data_size - idx_size] == marker) {
- s64 total_size = 0;
- int idx = s->data_size + 1 - idx_size;
-
- for (i = 0; i < nb_frames; i++) {
- int frame_size = 0;
- for (j = 0; j < length_size; j++)
- frame_size |= s->data[idx++] << (j * 8);
-
- total_size += frame_size;
- if (frame_size < 0 ||
- total_size > s->data_size - idx_size) {
- pr_err( "Invalid frame size in a sframe: %d\n",
- frame_size);
- ret = -EINVAL;
- goto fail;
- }
- s->sizes[i] = frame_size;
- }
-
- s->nb_frames = nb_frames;
- s->size = total_size;
- s->next_frame = 0;
- s->next_frame_offset = 0;
- is_superframe = true;
- }
- }else {
- s->nb_frames = 1;
- s->sizes[0] = s->data_size;
- s->size = s->data_size;
- }
-
- /*pr_info("sframe: %d, frames: %d, IN: %x, OUT: %x\n",
- is_superframe, s->nb_frames,
- s->data_size, s->size);*/
-
- /* parse uncompressed header. */
- if (is_superframe) {
- /* bitstream profile. */
- /* frame type. (intra or inter) */
- /* colorspace descriptor */
- /* ... */
-
- pr_info("the frame is a superframe.\n");
- }
-
- /*pr_err("in: %x, %d, out: %x, sizes %d,%d,%d,%d,%d,%d,%d,%d\n",
- s->data_size,
- s->nb_frames,
- s->size,
- s->sizes[0],
- s->sizes[1],
- s->sizes[2],
- s->sizes[3],
- s->sizes[4],
- s->sizes[5],
- s->sizes[6],
- s->sizes[7]);*/
-
- return 0;
-fail:
- return ret;
+ //swap_uv(fb->base_c.vaddr, fb->base_c.size);
}
static void add_prefix_data(struct vp9_superframe_split *s,
prefix = p + PREFIX_SIZE * (i - 1);
/*add amlogic frame headers.*/
- frame_size += 4;
+ frame_size += 16;
prefix[0] = (frame_size >> 24) & 0xff;
prefix[1] = (frame_size >> 16) & 0xff;
prefix[2] = (frame_size >> 8 ) & 0xff;
prefix[13] = 'M';
prefix[14] = 'L';
prefix[15] = 'V';
- frame_size -= 4;
+ frame_size -= 16;
}
*out = p;
struct vp9_superframe_split s;
u8 *data = NULL;
u32 length = 0;
+ bool need_prefix = vp9_need_prefix;
memset(&s, 0, sizeof(s));
need_trigger = true;
}
- /*parse superframe.*/
- s.data = buf;
- s.data_size = size;
- ret = vp9_superframe_split_filter(&s);
- if (ret) {
- pr_err("parse frames failed.\n");
- return ret;
- }
-
- /*add headers.*/
- add_prefix_data(&s, &data, &length);
+ if (need_prefix) {
+ /*parse superframe.*/
+ s.data = buf;
+ s.data_size = size;
+ ret = vp9_superframe_split_filter(&s);
+ if (ret) {
+ pr_err("parse frames failed.\n");
+ return ret;
+ }
- ret = vdec_vframe_write(vdec, data, length, ts);
+ /*add headers.*/
+ add_prefix_data(&s, &data, &length);
+ ret = vdec_vframe_write(vdec, data, length, ts);
+ vfree(data);
+ } else {
+ ret = vdec_vframe_write(vdec, buf, size, ts);
+ }
- aml_vcodec_debug(inst, "buf: %p, buf size: %u, write to: %d",
- data, length, ret);
+ return ret;
+}
- vfree(data);
+static bool monitor_res_change(struct vdec_vp9_inst *inst, u8 *buf, u32 size)
+{
+ int ret = -1;
+ u8 *p = buf;
+ int len = size;
+ u32 synccode = vp9_need_prefix ?
+ ((p[1] << 16) | (p[2] << 8) | p[3]) :
+ ((p[17] << 16) | (p[18] << 8) | p[19]);
+
+ if (synccode == SYNC_CODE) {
+ ret = stream_parse(inst, p, len);
+ if (!ret && (inst->vsi->cur_pic.coded_width !=
+ inst->vsi->pic.coded_width ||
+ inst->vsi->cur_pic.coded_height !=
+ inst->vsi->pic.coded_height)) {
+ inst->vsi->cur_pic = inst->vsi->pic;
+ return true;
+ }
+ }
- return 0;
+ return false;
}
static int vdec_vp9_decode(unsigned long h_vdec, struct aml_vcodec_mem *bs,
- unsigned long int timestamp, bool *res_chg)
+ u64 timestamp, bool *res_chg)
{
struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
struct aml_vdec_adapt *vdec = &inst->vdec;
struct stream_info *st;
u8 *buf;
u32 size;
- int ret = 0;
+ int ret = -1;
/* bs NULL means flush decoder */
if (bs == NULL)
- return 0;
+ return -1;
- buf = (u8 *)bs->va;
+ buf = (u8 *)bs->vaddr;
size = bs->size;
st = (struct stream_info *)buf;
ret = vdec_write_nalu(inst, st->data, st->length, timestamp);
else if (inst->ctx->is_stream_mode)
ret = vdec_vbuf_write(vdec, buf, size);
- else
+ else {
+ /*checked whether the resolution changes.*/
+ if ((*res_chg = monitor_res_change(inst, buf, size)))
+ return 0;
+
ret = vdec_write_nalu(inst, buf, size, timestamp);
+ }
return ret;
}
return ret;
}
+static void set_param_write_sync(struct vdec_vp9_inst *inst)
+{
+ complete(&inst->comp);
+}
+
+static void set_param_pic_info(struct vdec_vp9_inst *inst,
+ struct aml_vdec_pic_infos *info)
+{
+ struct vdec_pic_info *pic = &inst->vsi->pic;
+ struct vdec_vp9_dec_info *dec = &inst->vsi->dec;
+ struct v4l2_rect *rect = &inst->vsi->crop;
+
+ /* fill visible area size that be used for EGL. */
+ pic->visible_width = info->visible_width;
+ pic->visible_height = info->visible_height;
+
+ /* calc visible ares. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = pic->visible_width;
+ rect->height = pic->visible_height;
+
+ /* config canvas size that be used for decoder. */
+ pic->coded_width = info->coded_width;
+ pic->coded_height = info->coded_height;
+
+ pic->y_len_sz = pic->coded_width * pic->coded_height;
+ pic->c_len_sz = pic->y_len_sz >> 1;
+
+ /* calc DPB size */
+ dec->dpb_sz = 5;
+
+ /*wake up*/
+ complete(&inst->comp);
+
+ pr_info("Parse from ucode, crop(%d x %d), coded(%d x %d) dpb: %d\n",
+ info->visible_width, info->visible_height,
+ info->coded_width, info->coded_height,
+ info->dpb_size);
+}
+
+static int vdec_vp9_set_param(unsigned long h_vdec,
+ enum vdec_set_param_type type, void *in)
+{
+ int ret = 0;
+ struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+
+ if (!inst) {
+ pr_err("the vp9 inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case SET_PARAM_WRITE_FRAME_SYNC:
+ set_param_write_sync(inst);
+ break;
+
+ case SET_PARAM_PIC_INFO:
+ set_param_pic_info(inst, in);
+ break;
+
+ default:
+ aml_vcodec_err(inst, "invalid set parameter type=%d", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static struct vdec_common_if vdec_vp9_if = {
- vdec_vp9_init,
- vdec_vp9_probe,
- vdec_vp9_decode,
- vdec_vp9_get_param,
- vdec_vp9_deinit,
+ .init = vdec_vp9_init,
+ .probe = vdec_vp9_probe,
+ .decode = vdec_vp9_decode,
+ .get_param = vdec_vp9_get_param,
+ .set_param = vdec_vp9_set_param,
+ .deinit = vdec_vp9_deinit,
};
struct vdec_common_if *get_vp9_dec_comm_if(void);
case AV_CODEC_ID_VP9:
forced_codec_name = "vp9_v4l2m2m";
break;
+ case AV_CODEC_ID_MPEG1VIDEO:
+ forced_codec_name = "mpeg1_v4l2m2m";
+ break;
+ case AV_CODEC_ID_MPEG2VIDEO:
+ forced_codec_name = "mpeg2_v4l2m2m";
+ break;
+ case AV_CODEC_ID_VC1:
+ forced_codec_name = "vc1_v4l2m2m";
+ break;
+ case AV_CODEC_ID_H263:
+ forced_codec_name = "h263_v4l2m2m";
+ break;
+ case AV_CODEC_ID_MPEG4:
+ forced_codec_name = "mpeg4_v4l2m2m";
+ break;
+ case AV_CODEC_ID_MJPEG:
+ forced_codec_name = "mjpeg_v4l2m2m";
+ break;
}
codec = avcodec_find_decoder_by_name(forced_codec_name);
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "common.h"
+#include "pixfmt.h"
+
+const u8 ff_zigzag_direct[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+const u8 ff_zigzag_scan[16 + 1] = {
+ 0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4,
+ 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4,
+ 1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4,
+ 3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4,
+};
+
+const char * const color_space_names[] = {
+ [AVCOL_SPC_RGB] = "gbr",
+ [AVCOL_SPC_BT709] = "bt709",
+ [AVCOL_SPC_UNSPECIFIED] = "unknown",
+ [AVCOL_SPC_RESERVED] = "reserved",
+ [AVCOL_SPC_FCC] = "fcc",
+ [AVCOL_SPC_BT470BG] = "bt470bg",
+ [AVCOL_SPC_SMPTE170M] = "smpte170m",
+ [AVCOL_SPC_SMPTE240M] = "smpte240m",
+ [AVCOL_SPC_YCGCO] = "ycgco",
+ [AVCOL_SPC_BT2020_NCL] = "bt2020nc",
+ [AVCOL_SPC_BT2020_CL] = "bt2020c",
+ [AVCOL_SPC_SMPTE2085] = "smpte2085",
+ [AVCOL_SPC_CHROMA_DERIVED_NCL] = "chroma-derived-nc",
+ [AVCOL_SPC_CHROMA_DERIVED_CL] = "chroma-derived-c",
+ [AVCOL_SPC_ICTCP] = "ictcp",
+};
+
+const char *av_color_space_name(enum AVColorSpace space)
+{
+ return (unsigned) space < AVCOL_SPC_NB ?
+ color_space_names[space] : NULL;
+}
+
+const char * const color_primaries_names[AVCOL_PRI_NB] = {
+ [AVCOL_PRI_RESERVED0] = "reserved",
+ [AVCOL_PRI_BT709] = "bt709",
+ [AVCOL_PRI_UNSPECIFIED] = "unknown",
+ [AVCOL_PRI_RESERVED] = "reserved",
+ [AVCOL_PRI_BT470M] = "bt470m",
+ [AVCOL_PRI_BT470BG] = "bt470bg",
+ [AVCOL_PRI_SMPTE170M] = "smpte170m",
+ [AVCOL_PRI_SMPTE240M] = "smpte240m",
+ [AVCOL_PRI_FILM] = "film",
+ [AVCOL_PRI_BT2020] = "bt2020",
+ [AVCOL_PRI_SMPTE428] = "smpte428",
+ [AVCOL_PRI_SMPTE431] = "smpte431",
+ [AVCOL_PRI_SMPTE432] = "smpte432",
+ [AVCOL_PRI_JEDEC_P22] = "jedec-p22",
+};
+
+const char *av_color_primaries_name(enum AVColorPrimaries primaries)
+{
+ return (unsigned) primaries < AVCOL_PRI_NB ?
+ color_primaries_names[primaries] : NULL;
+}
+
+const char * const color_transfer_names[] = {
+ [AVCOL_TRC_RESERVED0] = "reserved",
+ [AVCOL_TRC_BT709] = "bt709",
+ [AVCOL_TRC_UNSPECIFIED] = "unknown",
+ [AVCOL_TRC_RESERVED] = "reserved",
+ [AVCOL_TRC_GAMMA22] = "bt470m",
+ [AVCOL_TRC_GAMMA28] = "bt470bg",
+ [AVCOL_TRC_SMPTE170M] = "smpte170m",
+ [AVCOL_TRC_SMPTE240M] = "smpte240m",
+ [AVCOL_TRC_LINEAR] = "linear",
+ [AVCOL_TRC_LOG] = "log100",
+ [AVCOL_TRC_LOG_SQRT] = "log316",
+ [AVCOL_TRC_IEC61966_2_4] = "iec61966-2-4",
+ [AVCOL_TRC_BT1361_ECG] = "bt1361e",
+ [AVCOL_TRC_IEC61966_2_1] = "iec61966-2-1",
+ [AVCOL_TRC_BT2020_10] = "bt2020-10",
+ [AVCOL_TRC_BT2020_12] = "bt2020-12",
+ [AVCOL_TRC_SMPTE2084] = "smpte2084",
+ [AVCOL_TRC_SMPTE428] = "smpte428",
+ [AVCOL_TRC_ARIB_STD_B67] = "arib-std-b67",
+};
+
+const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
+{
+ return (unsigned) transfer < AVCOL_TRC_NB ?
+ color_transfer_names[transfer] : NULL;
+}
+
+//math
+const u8 ff_log2_tab[256]={
+ 0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
+};
+
+int av_log2(u32 v)
+{
+ int n = 0;
+
+ if (v & 0xffff0000) {
+ v >>= 16;
+ n += 16;
+ }
+ if (v & 0xff00) {
+ v >>= 8;
+ n += 8;
+ }
+ n += ff_log2_tab[v];
+
+ return n;
+}
+
+//bitstream
+int find_start_code(u8 *data, int data_sz)
+{
+ if (data_sz > 3 && data[0] == 0 && data[1] == 0 && data[2] == 1)
+ return 3;
+
+ if (data_sz > 4 && data[0] == 0 && data[1] == 0 && data[2] == 0 && data[3] == 1)
+ return 4;
+
+ return -1;
+}
+
+int calc_nal_len(u8 *data, int len)
+{
+ int i;
+
+ for (i = 0; i < len - 4; i++) {
+ if (data[i])
+ continue;
+
+ if ((data[i] == 0 && data[i + 1] == 0 && data[i + 2] == 1) ||
+ (data[i] == 0 && data[i + 1] == 0 &&
+ data[i + 2]==0 && data[i + 3] == 1))
+ return i;
+ }
+ return len; //Not find the end of nalu
+}
+
+u8 *nal_unit_extract_rbsp(const u8 *src, u32 src_len, u32 *dst_len)
+{
+ u8 *dst;
+ u32 i, len;
+
+ dst = vmalloc(src_len + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!dst)
+ return NULL;
+
+ /* NAL unit header (2 bytes) */
+ i = len = 0;
+ while (i < 2 && i < src_len)
+ dst[len++] = src[i++];
+
+ while (i + 2 < src_len)
+ if (!src[i] && !src[i + 1] && src[i + 2] == 3) {
+ dst[len++] = src[i++];
+ dst[len++] = src[i++];
+ i++; // remove emulation_prevention_three_byte
+ } else
+ dst[len++] = src[i++];
+
+ while (i < src_len)
+ dst[len++] = src[i++];
+
+ memset(dst + len, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ *dst_len = len;
+
+ return dst;
+}
+
+//debug
+static void _pr_hex(const char *fmt, ...)
+{
+ u8 buf[512];
+ int len = 0;
+
+ va_list args;
+ va_start(args, fmt);
+ vsnprintf(buf + len, 512 - len, fmt, args);
+ printk("%s", buf);
+ va_end(args);
+}
+
+void print_hex_debug(u8 *data, u32 len, int max)
+{
+ int i, l;
+
+ l = len > max ? max : len;
+
+ for (i = 0; i < l; i++) {
+ if ((i & 0xf) == 0)
+ _pr_hex("%06x:", i);
+ _pr_hex("%02x ", data[i]);
+ if ((((i + 1) & 0xf) == 0) || ((i + 1) == l))
+ _pr_hex("\n");
+ }
+
+ _pr_hex("print hex ending. len %d\n\n", l);
+}
+
--- /dev/null
+#ifndef UTILS_COMMON_H
+#define UTILS_COMMON_H
+
+#include "pixfmt.h"
+
+#define AV_INPUT_BUFFER_PADDING_SIZE 64
+#define MIN_CACHE_BITS 64
+
+#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
+
+#define AV_WL32(p, val) \
+ do { \
+ u32 d = (val); \
+ ((u8*)(p))[0] = (d); \
+ ((u8*)(p))[1] = (d) >> 8; \
+ ((u8*)(p))[2] = (d) >> 16; \
+ ((u8*)(p))[3] = (d) >> 24; \
+ } while(0)
+
+#define AV_WB32(p, val) \
+ do { u32 d = (val); \
+ ((u8*)(p))[3] = (d); \
+ ((u8*)(p))[2] = (d) >> 8; \
+ ((u8*)(p))[1] = (d) >> 16; \
+ ((u8*)(p))[0] = (d) >> 24; \
+ } while(0)
+
+#define AV_RB32(x) \
+ (((u32)((const u8*)(x))[0] << 24) | \
+ (((const u8*)(x))[1] << 16) | \
+ (((const u8*)(x))[2] << 8) | \
+ ((const u8*)(x))[3])
+
+#define AV_RL32(x) \
+ (((u32)((const u8*)(x))[3] << 24) | \
+ (((const u8*)(x))[2] << 16) | \
+ (((const u8*)(x))[1] << 8) | \
+ ((const u8*)(x))[0])
+
+#define NEG_SSR32(a, s) (((int)(a)) >> ((s < 32) ? (32 - (s)) : 0))
+#define NEG_USR32(a, s) (((u32)(a)) >> ((s < 32) ? (32 - (s)) : 0))
+
+//rounded division & shift
+#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
+/* assume b>0 */
+#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+
+struct AVRational{
+ int num; ///< numerator
+ int den; ///< denominator
+};
+
+//fmt
+const char *av_color_space_name(enum AVColorSpace space);
+const char *av_color_primaries_name(enum AVColorPrimaries primaries);
+const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer);
+
+//math
+int av_log2(u32 v);
+
+//bitstream
+int find_start_code(u8 *data, int data_sz);
+int calc_nal_len(u8 *data, int len);
+u8 *nal_unit_extract_rbsp(const u8 *src, u32 src_len, u32 *dst_len);
+
+//debug
+void print_hex_debug(u8 *data, u32 len, int max);
+
+#endif
\ No newline at end of file
--- /dev/null
+#ifndef AVCODEC_GET_BITS_H
+#define AVCODEC_GET_BITS_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include "common.h"
+
+/*
+ * Safe bitstream reading:
+ * optionally, the get_bits API can check to ensure that we
+ * don't read past input buffer boundaries. This is protected
+ * with CONFIG_SAFE_BITSTREAM_READER at the global level, and
+ * then below that with UNCHECKED_BITSTREAM_READER at the per-
+ * decoder level. This means that decoders that check internally
+ * can "#define UNCHECKED_BITSTREAM_READER 1" to disable
+ * overread checks.
+ * Boundary checking causes a minor performance penalty so for
+ * applications that won't want/need this, it can be disabled
+ * globally using "#define CONFIG_SAFE_BITSTREAM_READER 0".
+ */
+
+struct get_bits_context {
+ const u8 *buffer;
+ const u8 *buffer_end;
+ int index;
+ int size_in_bits;
+ int size_in_bits_plus8;
+};
+
+/* Bitstream reader API docs:
+ * name
+ * arbitrary name which is used as prefix for the internal variables
+ *
+ * gb
+ * struct get_bits_context
+ *
+ * OPEN_READER(name, gb)
+ * load gb into local variables
+ *
+ * CLOSE_READER(name, gb)
+ * store local vars in gb
+ *
+ * UPDATE_CACHE(name, gb)
+ * Refill the internal cache from the bitstream.
+ * After this call at least MIN_CACHE_BITS will be available.
+ *
+ * GET_CACHE(name, gb)
+ * Will output the contents of the internal cache,
+ * next bit is MSB of 32 or 64 bits (FIXME 64 bits).
+ *
+ * SHOW_UBITS(name, gb, num)
+ * Will return the next num bits.
+ *
+ * SHOW_SBITS(name, gb, num)
+ * Will return the next num bits and do sign extension.
+ *
+ * SKIP_BITS(name, gb, num)
+ * Will skip over the next num bits.
+ * Note, this is equivalent to SKIP_CACHE; SKIP_COUNTER.
+ *
+ * SKIP_CACHE(name, gb, num)
+ * Will remove the next num bits from the cache (note SKIP_COUNTER
+ * MUST be called before UPDATE_CACHE / CLOSE_READER).
+ *
+ * SKIP_COUNTER(name, gb, num)
+ * Will increment the internal bit counter (see SKIP_CACHE & SKIP_BITS).
+ *
+ * LAST_SKIP_BITS(name, gb, num)
+ * Like SKIP_BITS, to be used if next call is UPDATE_CACHE or CLOSE_READER.
+ *
+ * BITS_LEFT(name, gb)
+ * Return the number of bits left
+ *
+ * For examples see get_bits, show_bits, skip_bits, get_vlc.
+ */
+
+#define OPEN_READER_NOSIZE(name, gb) \
+ u32 name ## _index = (gb)->index; \
+ u32 name ## _cache
+
+#define OPEN_READER(name, gb) OPEN_READER_NOSIZE(name, gb)
+#define BITS_AVAILABLE(name, gb) 1
+
+#define CLOSE_READER(name, gb) (gb)->index = name ## _index
+
+#define UPDATE_CACHE_LE(name, gb) name ##_cache = \
+ AV_RL32((gb)->buffer + (name ## _index >> 3)) >> (name ## _index & 7)
+
+#define UPDATE_CACHE_BE(name, gb) name ## _cache = \
+ AV_RB32((gb)->buffer + (name ## _index >> 3)) << (name ## _index & 7)
+
+#define SKIP_COUNTER(name, gb, num) name ## _index += (num)
+
+#define BITS_LEFT(name, gb) ((int)((gb)->size_in_bits - name ## _index))
+
+#define SKIP_BITS(name, gb, num) \
+ do { \
+ SKIP_CACHE(name, gb, num); \
+ SKIP_COUNTER(name, gb, num); \
+ } while (0)
+
+#define GET_CACHE(name, gb) ((u32) name ## _cache)
+
+#define LAST_SKIP_BITS(name, gb, num) SKIP_COUNTER(name, gb, num)
+
+#define SHOW_UBITS_LE(name, gb, num) zero_extend(name ## _cache, num)
+#define SHOW_SBITS_LE(name, gb, num) sign_extend(name ## _cache, num)
+
+#define SHOW_UBITS_BE(name, gb, num) NEG_USR32(name ## _cache, num)
+#define SHOW_SBITS_BE(name, gb, num) NEG_SSR32(name ## _cache, num)
+
+#ifdef BITSTREAM_READER_LE
+#define UPDATE_CACHE(name, gb) UPDATE_CACHE_LE(name, gb)
+#define SKIP_CACHE(name, gb, num) name ## _cache >>= (num)
+
+#define SHOW_UBITS(name, gb, num) SHOW_UBITS_LE(name, gb, num)
+#define SHOW_SBITS(name, gb, num) SHOW_SBITS_LE(name, gb, num)
+#else
+#define UPDATE_CACHE(name, gb) UPDATE_CACHE_BE(name, gb)
+#define SKIP_CACHE(name, gb, num) name ## _cache <<= (num)
+
+#define SHOW_UBITS(name, gb, num) SHOW_UBITS_BE(name, gb, num)
+#define SHOW_SBITS(name, gb, num) SHOW_SBITS_BE(name, gb, num)
+#endif
+
+static inline const int sign_extend(int val, u32 bits)
+{
+ u32 shift = 8 * sizeof(int) - bits;
+
+ union { u32 u; int s; } v = { (u32) val << shift };
+ return v.s >> shift;
+}
+
+static inline u32 zero_extend(u32 val, u32 bits)
+{
+ return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
+}
+
+static inline int get_bits_count(const struct get_bits_context *s)
+{
+ return s->index;
+}
+
+/**
+ * Skips the specified number of bits.
+ * @param n the number of bits to skip,
+ * For the UNCHECKED_BITSTREAM_READER this must not cause the distance
+ * from the start to overflow int. Staying within the bitstream + padding
+ * is sufficient, too.
+ */
+static inline void skip_bits_long(struct get_bits_context *s, int n)
+{
+ s->index += n;
+}
+
+/**
+ * Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
+ * if MSB not set it is negative
+ * @param n length in bits
+ */
+static inline int get_xbits(struct get_bits_context *s, int n)
+{
+ register int sign;
+ register int cache;
+
+ OPEN_READER(re, s);
+ UPDATE_CACHE(re, s);
+ cache = GET_CACHE(re, s);
+ sign = ~cache >> 31;
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+
+ return (NEG_USR32(sign ^ cache, n) ^ sign) - sign;
+}
+
+
+static inline int get_xbits_le(struct get_bits_context *s, int n)
+{
+ register int sign;
+ register int cache;
+
+ OPEN_READER(re, s);
+ UPDATE_CACHE_LE(re, s);
+ cache = GET_CACHE(re, s);
+ sign = sign_extend(~cache, n) >> 31;
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+
+ return (zero_extend(sign ^ cache, n) ^ sign) - sign;
+}
+
+static inline int get_sbits(struct get_bits_context *s, int n)
+{
+ register int tmp;
+
+ OPEN_READER(re, s);
+ UPDATE_CACHE(re, s);
+ tmp = SHOW_SBITS(re, s, n);
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+
+ return tmp;
+}
+
+/**
+ * Read 1-25 bits.
+ */
+static inline u32 get_bits(struct get_bits_context *s, int n)
+{
+ register u32 tmp;
+
+ OPEN_READER(re, s);
+ UPDATE_CACHE(re, s);
+ tmp = SHOW_UBITS(re, s, n);
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+
+ return tmp;
+}
+
+/**
+ * Read 0-25 bits.
+ */
+static inline int get_bitsz(struct get_bits_context *s, int n)
+{
+ return n ? get_bits(s, n) : 0;
+}
+
+static inline u32 get_bits_le(struct get_bits_context *s, int n)
+{
+ register int tmp;
+
+ OPEN_READER(re, s);
+ UPDATE_CACHE_LE(re, s);
+ tmp = SHOW_UBITS_LE(re, s, n);
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+
+ return tmp;
+}
+
+/**
+ * Show 1-25 bits.
+ */
+static inline u32 show_bits(struct get_bits_context *s, int n)
+{
+ register u32 tmp;
+
+ OPEN_READER_NOSIZE(re, s);
+ UPDATE_CACHE(re, s);
+ tmp = SHOW_UBITS(re, s, n);
+
+ return tmp;
+}
+
+static inline void skip_bits(struct get_bits_context *s, int n)
+{
+ u32 re_index = s->index;
+ LAST_SKIP_BITS(re, s, n);
+ CLOSE_READER(re, s);
+}
+
+static inline u32 get_bits1(struct get_bits_context *s)
+{
+ u32 index = s->index;
+ u8 result = s->buffer[index >> 3];
+
+#ifdef BITSTREAM_READER_LE
+ result >>= index & 7;
+ result &= 1;
+#else
+ result <<= index & 7;
+ result >>= 8 - 1;
+#endif
+
+ index++;
+ s->index = index;
+
+ return result;
+}
+
+static inline u32 show_bits1(struct get_bits_context *s)
+{
+ return show_bits(s, 1);
+}
+
+static inline void skip_bits1(struct get_bits_context *s)
+{
+ skip_bits(s, 1);
+}
+
+/**
+ * Read 0-32 bits.
+ */
+static inline u32 get_bits_long(struct get_bits_context *s, int n)
+{
+ if (!n) {
+ return 0;
+ } else if (n <= MIN_CACHE_BITS) {
+ return get_bits(s, n);
+ } else {
+#ifdef BITSTREAM_READER_LE
+ u32 ret = get_bits(s, 16);
+ return ret | (get_bits(s, n - 16) << 16);
+#else
+ u32 ret = get_bits(s, 16) << (n - 16);
+ return ret | get_bits(s, n - 16);
+#endif
+ }
+}
+
+/**
+ * Read 0-64 bits.
+ */
+static inline u64 get_bits64(struct get_bits_context *s, int n)
+{
+ if (n <= 32) {
+ return get_bits_long(s, n);
+ } else {
+#ifdef BITSTREAM_READER_LE
+ u64 ret = get_bits_long(s, 32);
+ return ret | (u64) get_bits_long(s, n - 32) << 32;
+#else
+ u64 ret = (u64) get_bits_long(s, n - 32) << 32;
+ return ret | get_bits_long(s, 32);
+#endif
+ }
+}
+
+/**
+ * Read 0-32 bits as a signed integer.
+ */
+static inline int get_sbits_long(struct get_bits_context *s, int n)
+{
+ if (!n)
+ return 0;
+
+ return sign_extend(get_bits_long(s, n), n);
+}
+
+/**
+ * Show 0-32 bits.
+ */
+static inline u32 show_bits_long(struct get_bits_context *s, int n)
+{
+ if (n <= MIN_CACHE_BITS) {
+ return show_bits(s, n);
+ } else {
+ struct get_bits_context gb = *s;
+
+ return get_bits_long(&gb, n);
+ }
+}
+
+static inline int check_marker(struct get_bits_context *s, const char *msg)
+{
+ int bit = get_bits1(s);
+
+ if (!bit)
+ pr_err("Marker bit missing at %d of %d %s\n",
+ get_bits_count(s) - 1, s->size_in_bits, msg);
+ return bit;
+}
+
+static inline int init_get_bits_xe(struct get_bits_context *s,
+ const u8 *buffer, int bit_size, int is_le)
+{
+ int buffer_size;
+ int ret = 0;
+
+ if (bit_size >= INT_MAX - FFMAX(7, AV_INPUT_BUFFER_PADDING_SIZE * 8) ||
+ bit_size < 0 || !buffer) {
+ bit_size = 0;
+ buffer = NULL;
+ ret = -1;
+ }
+
+ buffer_size = (bit_size + 7) >> 3;
+
+ s->buffer = buffer;
+ s->size_in_bits = bit_size;
+ s->size_in_bits_plus8 = bit_size + 8;
+ s->buffer_end = buffer + buffer_size;
+ s->index = 0;
+
+ return ret;
+}
+
+/**
+ * Initialize struct get_bits_context.
+ * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
+ * larger than the actual read bits because some optimized bitstream
+ * readers read 32 or 64 bit at once and could read over the end
+ * @param bit_size the size of the buffer in bits
+ * @return 0 on success, -1 if the buffer_size would overflow.
+ */
+static inline int init_get_bits(struct get_bits_context *s,
+ const u8 *buffer, int bit_size)
+{
+#ifdef BITSTREAM_READER_LE
+ return init_get_bits_xe(s, buffer, bit_size, 1);
+#else
+ return init_get_bits_xe(s, buffer, bit_size, 0);
+#endif
+}
+
+/**
+ * Initialize struct get_bits_context.
+ * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
+ * larger than the actual read bits because some optimized bitstream
+ * readers read 32 or 64 bit at once and could read over the end
+ * @param byte_size the size of the buffer in bytes
+ * @return 0 on success, -1 if the buffer_size would overflow.
+ */
+static inline int init_get_bits8(struct get_bits_context *s,
+ const u8 *buffer, int byte_size)
+{
+ if (byte_size > INT_MAX / 8 || byte_size < 0)
+ byte_size = -1;
+ return init_get_bits(s, buffer, byte_size * 8);
+}
+
+static inline int init_get_bits8_le(struct get_bits_context *s,
+ const u8 *buffer, int byte_size)
+{
+ if (byte_size > INT_MAX / 8 || byte_size < 0)
+ byte_size = -1;
+ return init_get_bits_xe(s, buffer, byte_size * 8, 1);
+}
+
+static inline const u8 *align_get_bits(struct get_bits_context *s)
+{
+ int n = -get_bits_count(s) & 7;
+
+ if (n)
+ skip_bits(s, n);
+ return s->buffer + (s->index >> 3);
+}
+
+/**
+ * If the vlc code is invalid and max_depth=1, then no bits will be removed.
+ * If the vlc code is invalid and max_depth>1, then the number of bits removed
+ * is undefined.
+ */
+#define GET_VLC(code, name, gb, table, bits, max_depth) \
+ do { \
+ int n, nb_bits; \
+ u32 index; \
+ \
+ index = SHOW_UBITS(name, gb, bits); \
+ code = table[index][0]; \
+ n = table[index][1]; \
+ \
+ if (max_depth > 1 && n < 0) { \
+ LAST_SKIP_BITS(name, gb, bits); \
+ UPDATE_CACHE(name, gb); \
+ \
+ nb_bits = -n; \
+ \
+ index = SHOW_UBITS(name, gb, nb_bits) + code; \
+ code = table[index][0]; \
+ n = table[index][1]; \
+ if (max_depth > 2 && n < 0) { \
+ LAST_SKIP_BITS(name, gb, nb_bits); \
+ UPDATE_CACHE(name, gb); \
+ \
+ nb_bits = -n; \
+ \
+ index = SHOW_UBITS(name, gb, nb_bits) + code; \
+ code = table[index][0]; \
+ n = table[index][1]; \
+ } \
+ } \
+ SKIP_BITS(name, gb, n); \
+ } while (0)
+
+#define GET_RL_VLC(level, run, name, gb, table, bits, \
+ max_depth, need_update) \
+ do { \
+ int n, nb_bits; \
+ u32 index; \
+ \
+ index = SHOW_UBITS(name, gb, bits); \
+ level = table[index].level; \
+ n = table[index].len; \
+ \
+ if (max_depth > 1 && n < 0) { \
+ SKIP_BITS(name, gb, bits); \
+ if (need_update) { \
+ UPDATE_CACHE(name, gb); \
+ } \
+ \
+ nb_bits = -n; \
+ \
+ index = SHOW_UBITS(name, gb, nb_bits) + level; \
+ level = table[index].level; \
+ n = table[index].len; \
+ if (max_depth > 2 && n < 0) { \
+ LAST_SKIP_BITS(name, gb, nb_bits); \
+ if (need_update) { \
+ UPDATE_CACHE(name, gb); \
+ } \
+ nb_bits = -n; \
+ \
+ index = SHOW_UBITS(name, gb, nb_bits) + level; \
+ level = table[index].level; \
+ n = table[index].len; \
+ } \
+ } \
+ run = table[index].run; \
+ SKIP_BITS(name, gb, n); \
+ } while (0)
+
+/* Return the LUT element for the given bitstream configuration. */
+static inline int set_idx(struct get_bits_context *s,
+ int code, int *n, int *nb_bits, int (*table)[2])
+{
+ u32 idx;
+
+ *nb_bits = -*n;
+ idx = show_bits(s, *nb_bits) + code;
+ *n = table[idx][1];
+
+ return table[idx][0];
+}
+
+/**
+ * Parse a vlc code.
+ * @param bits is the number of bits which will be read at once, must be
+ * identical to nb_bits in init_vlc()
+ * @param max_depth is the number of times bits bits must be read to completely
+ * read the longest vlc code
+ * = (max_vlc_length + bits - 1) / bits
+ * @returns the code parsed or -1 if no vlc matches
+ */
+static inline int get_vlc2(struct get_bits_context *s,
+ int (*table)[2], int bits, int max_depth)
+{
+ int code;
+
+ OPEN_READER(re, s);
+ UPDATE_CACHE(re, s);
+
+ GET_VLC(code, re, s, table, bits, max_depth);
+
+ CLOSE_READER(re, s);
+
+ return code;
+}
+
+static inline int decode012(struct get_bits_context *gb)
+{
+ int n;
+
+ n = get_bits1(gb);
+ if (n == 0)
+ return 0;
+ else
+ return get_bits1(gb) + 1;
+}
+
+static inline int decode210(struct get_bits_context *gb)
+{
+ if (get_bits1(gb))
+ return 0;
+ else
+ return 2 - get_bits1(gb);
+}
+
+static inline int get_bits_left(struct get_bits_context *gb)
+{
+ return gb->size_in_bits - get_bits_count(gb);
+}
+
+static inline int skip_1stop_8data_bits(struct get_bits_context *gb)
+{
+ if (get_bits_left(gb) <= 0)
+ return -1;
+
+ while (get_bits1(gb)) {
+ skip_bits(gb, 8);
+ if (get_bits_left(gb) <= 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+#endif /* AVCODEC_GET_BITS_H */
+
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+const u8 ff_golomb_vlc_len[512]={
+ 19,17,15,15,13,13,13,13,11,11,11,11,11,11,11,11,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
+};
+
+const u8 ff_ue_golomb_vlc_code[512]={
+ 32,32,32,32,32,32,32,32,31,32,32,32,32,32,32,32,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,
+ 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9,10,10,10,10,11,11,11,11,12,12,12,12,13,13,13,13,14,14,14,14,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+const char ff_se_golomb_vlc_code[512]={
+ 17, 17, 17, 17, 17, 17, 17, 17, 16, 17, 17, 17, 17, 17, 17, 17, 8, -8, 9, -9, 10,-10, 11,-11, 12,-12, 13,-13, 14,-14, 15,-15,
+ 4, 4, 4, 4, -4, -4, -4, -4, 5, 5, 5, 5, -5, -5, -5, -5, 6, 6, 6, 6, -6, -6, -6, -6, 7, 7, 7, 7, -7, -7, -7, -7,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+const u8 ff_ue_golomb_len[256]={
+ 1, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,11,
+ 11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,13,
+ 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,
+ 13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,15,
+ 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+ 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+ 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
+ 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,17,
+};
+
+const u8 ff_interleaved_golomb_vlc_len[256]={
+ 9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+ 9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+ 9,9,7,7,9,9,7,7,5,5,5,5,5,5,5,5,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+};
+
+const u8 ff_interleaved_ue_golomb_vlc_code[256]={
+ 15,16,7, 7, 17,18,8, 8, 3, 3, 3, 3, 3, 3, 3, 3,
+ 19,20,9, 9, 21,22,10,10,4, 4, 4, 4, 4, 4, 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 23,24,11,11,25,26,12,12,5, 5, 5, 5, 5, 5, 5, 5,
+ 27,28,13,13,29,30,14,14,6, 6, 6, 6, 6, 6, 6, 6,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+const char ff_interleaved_se_golomb_vlc_code[256]={
+ 8, -8, 4, 4, 9, -9, -4, -4, 2, 2, 2, 2, 2, 2, 2, 2,
+ 10,-10, 5, 5, 11,-11, -5, -5, -2, -2, -2, -2, -2, -2, -2, -2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 12,-12, 6, 6, 13,-13, -6, -6, 3, 3, 3, 3, 3, 3, 3, 3,
+ 14,-14, 7, 7, 15,-15, -7, -7, -3, -3, -3, -3, -3, -3, -3, -3,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+const u8 ff_interleaved_dirac_golomb_vlc_code[256]={
+ 0, 1, 0, 0, 2, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 4, 5, 2, 2, 6, 7, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 8, 9, 4, 4, 10,11,5, 5, 2, 2, 2, 2, 2, 2, 2, 2,
+ 12,13,6, 6, 14,15,7, 7, 3, 3, 3, 3, 3, 3, 3, 3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
--- /dev/null
+#ifndef AVCODEC_GOLOMB_H
+#define AVCODEC_GOLOMB_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "get_bits.h"
+#include "put_bits.h"
+#include "common.h"
+
+#define INVALID_VLC 0x80000000
+
+extern const u8 ff_golomb_vlc_len[512];
+extern const u8 ff_ue_golomb_vlc_code[512];
+extern const char ff_se_golomb_vlc_code[512];
+extern const u8 ff_ue_golomb_len[256];
+
+extern const u8 ff_interleaved_golomb_vlc_len[256];
+extern const u8 ff_interleaved_ue_golomb_vlc_code[256];
+extern const char ff_interleaved_se_golomb_vlc_code[256];
+extern const u8 ff_interleaved_dirac_golomb_vlc_code[256];
+
+/**
+ * Read an u32 Exp-Golomb code in the range 0 to 8190.
+ *
+ * @returns the read value or a negative error code.
+ */
+static inline int get_ue_golomb(struct get_bits_context *gb)
+{
+ u32 buf;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ if (buf >= (1 << 27)) {
+ buf >>= 32 - 9;
+ LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_ue_golomb_vlc_code[buf];
+ } else {
+ int log = 2 * av_log2(buf) - 31;
+ LAST_SKIP_BITS(re, gb, 32 - log);
+ CLOSE_READER(re, gb);
+ if (log < 7) {
+ pr_err("Invalid UE golomb code\n");
+ return -1;
+ }
+ buf >>= log;
+ buf--;
+
+ return buf;
+ }
+}
+
+/**
+ * Read an u32 Exp-Golomb code in the range 0 to UINT_MAX-1.
+ */
+static inline u32 get_ue_golomb_long(struct get_bits_context *gb)
+{
+ u32 buf, log;
+
+ buf = show_bits_long(gb, 32);
+ log = 31 - av_log2(buf);
+ skip_bits_long(gb, log);
+
+ return get_bits_long(gb, log + 1) - 1;
+}
+
+/**
+ * read u32 exp golomb code, constraint to a max of 31.
+ * the return value is undefined if the stored value exceeds 31.
+ */
+static inline int get_ue_golomb_31(struct get_bits_context *gb)
+{
+ u32 buf;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ buf >>= 32 - 9;
+ LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_ue_golomb_vlc_code[buf];
+}
+
+static inline u32 get_interleaved_ue_golomb(struct get_bits_context *gb)
+{
+ u32 buf;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ if (buf & 0xAA800000) {
+ buf >>= 32 - 8;
+ LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_interleaved_ue_golomb_vlc_code[buf];
+ } else {
+ u32 ret = 1;
+
+ do {
+ buf >>= 32 - 8;
+ LAST_SKIP_BITS(re, gb,
+ FFMIN(ff_interleaved_golomb_vlc_len[buf], 8));
+
+ if (ff_interleaved_golomb_vlc_len[buf] != 9) {
+ ret <<= (ff_interleaved_golomb_vlc_len[buf] - 1) >> 1;
+ ret |= ff_interleaved_dirac_golomb_vlc_code[buf];
+ break;
+ }
+ ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf];
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+ } while (ret<0x8000000U && BITS_AVAILABLE(re, gb));
+
+ CLOSE_READER(re, gb);
+ return ret - 1;
+ }
+}
+
+/**
+ * read u32 truncated exp golomb code.
+ */
+static inline int get_te0_golomb(struct get_bits_context *gb, int range)
+{
+ if (range == 1)
+ return 0;
+ else if (range == 2)
+ return get_bits1(gb) ^ 1;
+ else
+ return get_ue_golomb(gb);
+}
+
+/**
+ * read u32 truncated exp golomb code.
+ */
+static inline int get_te_golomb(struct get_bits_context *gb, int range)
+{
+ if (range == 2)
+ return get_bits1(gb) ^ 1;
+ else
+ return get_ue_golomb(gb);
+}
+
+/**
+ * read signed exp golomb code.
+ */
+static inline int get_se_golomb(struct get_bits_context *gb)
+{
+ u32 buf;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ if (buf >= (1 << 27)) {
+ buf >>= 32 - 9;
+ LAST_SKIP_BITS(re, gb, ff_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_se_golomb_vlc_code[buf];
+ } else {
+ int log = av_log2(buf), sign;
+ LAST_SKIP_BITS(re, gb, 31 - log);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ buf >>= log;
+
+ LAST_SKIP_BITS(re, gb, 32 - log);
+ CLOSE_READER(re, gb);
+
+ sign = -(buf & 1);
+ buf = ((buf >> 1) ^ sign) - sign;
+
+ return buf;
+ }
+}
+
+static inline int get_se_golomb_long(struct get_bits_context *gb)
+{
+ u32 buf = get_ue_golomb_long(gb);
+ int sign = (buf & 1) - 1;
+
+ return ((buf >> 1) ^ sign) + 1;
+}
+
+static inline int get_interleaved_se_golomb(struct get_bits_context *gb)
+{
+ u32 buf;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ if (buf & 0xAA800000) {
+ buf >>= 32 - 8;
+ LAST_SKIP_BITS(re, gb, ff_interleaved_golomb_vlc_len[buf]);
+ CLOSE_READER(re, gb);
+
+ return ff_interleaved_se_golomb_vlc_code[buf];
+ } else {
+ int log;
+ LAST_SKIP_BITS(re, gb, 8);
+ UPDATE_CACHE(re, gb);
+ buf |= 1 | (GET_CACHE(re, gb) >> 8);
+
+ if ((buf & 0xAAAAAAAA) == 0)
+ return INVALID_VLC;
+
+ for (log = 31; (buf & 0x80000000) == 0; log--)
+ buf = (buf << 2) - ((buf << log) >> (log - 1)) + (buf >> 30);
+
+ LAST_SKIP_BITS(re, gb, 63 - 2 * log - 8);
+ CLOSE_READER(re, gb);
+ return (signed) (((((buf << log) >> log) - 1) ^ -(buf & 0x1)) + 1) >> 1;
+ }
+}
+
+static inline int dirac_get_se_golomb(struct get_bits_context *gb)
+{
+ u32 ret = get_interleaved_ue_golomb(gb);
+
+ if (ret) {
+ int sign = -get_bits1(gb);
+ ret = (ret ^ sign) - sign;
+ }
+
+ return ret;
+}
+
+/**
+ * read u32 golomb rice code (ffv1).
+ */
+static inline int get_ur_golomb(struct get_bits_context *gb,
+ int k, int limit, int esc_len)
+{
+ u32 buf;
+ int log;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ log = av_log2(buf);
+
+ if (log > 31 - limit) {
+ buf >>= log - k;
+ buf += (30U - log) << k;
+ LAST_SKIP_BITS(re, gb, 32 + k - log);
+ CLOSE_READER(re, gb);
+
+ return buf;
+ } else {
+ LAST_SKIP_BITS(re, gb, limit);
+ UPDATE_CACHE(re, gb);
+
+ buf = SHOW_UBITS(re, gb, esc_len);
+
+ LAST_SKIP_BITS(re, gb, esc_len);
+ CLOSE_READER(re, gb);
+
+ return buf + limit - 1;
+ }
+}
+
+/**
+ * read u32 golomb rice code (jpegls).
+ */
+static inline int get_ur_golomb_jpegls(struct get_bits_context *gb,
+ int k, int limit, int esc_len)
+{
+ u32 buf;
+ int log;
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb);
+ buf = GET_CACHE(re, gb);
+
+ log = av_log2(buf);
+
+ if (log - k >= 32 - MIN_CACHE_BITS + (MIN_CACHE_BITS == 32) &&
+ 32 - log < limit) {
+ buf >>= log - k;
+ buf += (30U - log) << k;
+ LAST_SKIP_BITS(re, gb, 32 + k - log);
+ CLOSE_READER(re, gb);
+
+ return buf;
+ } else {
+ int i;
+ for (i = 0; i + MIN_CACHE_BITS <= limit && SHOW_UBITS(re, gb, MIN_CACHE_BITS) == 0; i += MIN_CACHE_BITS) {
+ if (gb->size_in_bits <= re_index) {
+ CLOSE_READER(re, gb);
+ return -1;
+ }
+ LAST_SKIP_BITS(re, gb, MIN_CACHE_BITS);
+ UPDATE_CACHE(re, gb);
+ }
+ for (; i < limit && SHOW_UBITS(re, gb, 1) == 0; i++) {
+ SKIP_BITS(re, gb, 1);
+ }
+ LAST_SKIP_BITS(re, gb, 1);
+ UPDATE_CACHE(re, gb);
+
+ if (i < limit - 1) {
+ if (k) {
+ if (k > MIN_CACHE_BITS - 1) {
+ buf = SHOW_UBITS(re, gb, 16) << (k-16);
+ LAST_SKIP_BITS(re, gb, 16);
+ UPDATE_CACHE(re, gb);
+ buf |= SHOW_UBITS(re, gb, k-16);
+ LAST_SKIP_BITS(re, gb, k-16);
+ } else {
+ buf = SHOW_UBITS(re, gb, k);
+ LAST_SKIP_BITS(re, gb, k);
+ }
+ } else {
+ buf = 0;
+ }
+ buf += ((u32)i << k);
+ } else if (i == limit - 1) {
+ buf = SHOW_UBITS(re, gb, esc_len);
+ LAST_SKIP_BITS(re, gb, esc_len);
+
+ buf ++;
+ } else {
+ buf = -1;
+ }
+ CLOSE_READER(re, gb);
+ return buf;
+ }
+}
+
+/**
+ * read signed golomb rice code (ffv1).
+ */
+static inline int get_sr_golomb(struct get_bits_context *gb,
+ int k, int limit, int esc_len)
+{
+ u32 v = get_ur_golomb(gb, k, limit, esc_len);
+
+ return (v >> 1) ^ -(v & 1);
+}
+
+/**
+ * read signed golomb rice code (flac).
+ */
+static inline int get_sr_golomb_flac(struct get_bits_context *gb,
+ int k, int limit, int esc_len)
+{
+ u32 v = get_ur_golomb_jpegls(gb, k, limit, esc_len);
+
+ return (v >> 1) ^ -(v & 1);
+}
+
+/**
+ * read u32 golomb rice code (shorten).
+ */
+static inline u32 get_ur_golomb_shorten(struct get_bits_context *gb, int k)
+{
+ return get_ur_golomb_jpegls(gb, k, INT_MAX, 0);
+}
+
+/**
+ * read signed golomb rice code (shorten).
+ */
+static inline int get_sr_golomb_shorten(struct get_bits_context *gb, int k)
+{
+ int uvar = get_ur_golomb_jpegls(gb, k + 1, INT_MAX, 0);
+
+ return (uvar >> 1) ^ -(uvar & 1);
+}
+
+/**
+ * write u32 exp golomb code. 2^16 - 2 at most
+ */
+static inline void set_ue_golomb(struct put_bits_context *pb, int i)
+{
+ if (i < 256)
+ put_bits(pb, ff_ue_golomb_len[i], i + 1);
+ else {
+ int e = av_log2(i + 1);
+ put_bits(pb, 2 * e + 1, i + 1);
+ }
+}
+
+/**
+ * write u32 exp golomb code. 2^32-2 at most.
+ */
+static inline void set_ue_golomb_long(struct put_bits_context *pb, u32 i)
+{
+ if (i < 256)
+ put_bits(pb, ff_ue_golomb_len[i], i + 1);
+ else {
+ int e = av_log2(i + 1);
+ put_bits64(pb, 2 * e + 1, i + 1);
+ }
+}
+
+/**
+ * write truncated u32 exp golomb code.
+ */
+static inline void set_te_golomb(struct put_bits_context *pb, int i, int range)
+{
+ if (range == 2)
+ put_bits(pb, 1, i ^ 1);
+ else
+ set_ue_golomb(pb, i);
+}
+
+/**
+ * write signed exp golomb code. 16 bits at most.
+ */
+static inline void set_se_golomb(struct put_bits_context *pb, int i)
+{
+ i = 2 * i - 1;
+
+ if (i < 0)
+ i ^= -1; //FIXME check if gcc does the right thing
+ set_ue_golomb(pb, i);
+}
+
+/**
+ * write u32 golomb rice code (ffv1).
+ */
+static inline void set_ur_golomb(struct put_bits_context *pb, int i, int k, int limit,
+ int esc_len)
+{
+ int e;
+
+ e = i >> k;
+ if (e < limit)
+ put_bits(pb, e + k + 1, (1 << k) + av_mod_uintp2(i, k));
+ else
+ put_bits(pb, limit + esc_len, i - limit + 1);
+}
+
+/**
+ * write u32 golomb rice code (jpegls).
+ */
+static inline void set_ur_golomb_jpegls(struct put_bits_context *pb,
+ int i, int k, int limit, int esc_len)
+{
+ int e;
+
+ e = (i >> k) + 1;
+ if (e < limit) {
+ while (e > 31) {
+ put_bits(pb, 31, 0);
+ e -= 31;
+ }
+ put_bits(pb, e, 1);
+ if (k)
+ put_sbits(pb, k, i);
+ } else {
+ while (limit > 31) {
+ put_bits(pb, 31, 0);
+ limit -= 31;
+ }
+ put_bits(pb, limit, 1);
+ put_bits(pb, esc_len, i - 1);
+ }
+}
+
+/**
+ * write signed golomb rice code (ffv1).
+ */
+static inline void set_sr_golomb(struct put_bits_context *pb,
+ int i, int k, int limit, int esc_len)
+{
+ int v;
+
+ v = -2 * i - 1;
+ v ^= (v >> 31);
+
+ set_ur_golomb(pb, v, k, limit, esc_len);
+}
+
+/**
+ * write signed golomb rice code (flac).
+ */
+static inline void set_sr_golomb_flac(struct put_bits_context *pb,
+ int i, int k, int limit, int esc_len)
+{
+ int v;
+
+ v = -2 * i - 1;
+ v ^= (v >> 31);
+
+ set_ur_golomb_jpegls(pb, v, k, limit, esc_len);
+}
+
+#endif /* AVCODEC_GOLOMB_H */
--- /dev/null
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @note
+ * If the resolution is not a multiple of the chroma subsampling factor
+ * then the chroma plane resolution must be rounded up.
+ *
+ * @par
+ * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB32 palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ */
+enum AVPixelFormat {
+ AV_PIX_FMT_NONE = -1,
+ AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ AV_PIX_FMT_GRAY8, ///< Y , 8bpp
+ AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette
+ AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range
+ AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range
+ AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range
+ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
+ AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+ AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
+ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
+
+ AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
+ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
+
+#ifdef FF_API_VAAPI
+ /** @name Deprecated pixel formats */
+ /**@{*/
+ AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID
+ /**@}*/
+ AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD,
+#else
+ /**
+ * Hardware acceleration through VA-API, data[3] contains a
+ * VASurfaceID.
+ */
+ AV_PIX_FMT_VAAPI,
+#endif
+
+ AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha
+
+ AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+ AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+
+ AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+
+ /**
+ * The following 12 formats have the disadvantage of needing 1 format for each bit depth.
+ * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.
+ * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
+ */
+ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP
+ AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
+ AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
+ AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
+ AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
+ AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
+ AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+ AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+ AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+
+ AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
+
+ AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+
+ AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+
+ AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
+
+ AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian)
+ AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian)
+
+ AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
+ AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
+ AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
+ /**
+ * HW acceleration through QSV, data[3] contains a pointer to the
+ * mfxFrameSurface1 structure.
+ */
+ AV_PIX_FMT_QSV,
+ /**
+ * HW acceleration though MMAL, data[3] contains a pointer to the
+ * MMAL_BUFFER_HEADER_T structure.
+ */
+ AV_PIX_FMT_MMAL,
+
+ AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer
+
+ /**
+ * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers
+ * exactly as for system memory frames.
+ */
+ AV_PIX_FMT_CUDA,
+
+ AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
+ AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
+ AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
+ AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
+
+ AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
+ AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
+ AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
+ AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
+ AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range
+
+ AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */
+ AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */
+ AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */
+ AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */
+
+ AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing
+
+ AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
+ AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
+ AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+
+ AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
+
+ AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian
+ AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian
+
+ AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian
+
+ AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian
+ AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian
+
+ AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec
+
+ AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian
+ AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian
+ AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian
+ AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian
+
+ AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian
+ AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian
+
+ /**
+ * Hardware surfaces for Direct3D11.
+ *
+ * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11
+ * hwaccel API and filtering support AV_PIX_FMT_D3D11 only.
+ *
+ * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the
+ * texture array index of the frame as intptr_t if the ID3D11Texture2D is
+ * an array texture (or always 0 if it's a normal texture).
+ */
+ AV_PIX_FMT_D3D11,
+
+ AV_PIX_FMT_GRAY9BE, ///< Y , 9bpp, big-endian
+ AV_PIX_FMT_GRAY9LE, ///< Y , 9bpp, little-endian
+
+ AV_PIX_FMT_GBRPF32BE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian
+ AV_PIX_FMT_GBRPF32LE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian
+ AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian
+ AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian
+
+ /**
+ * DRM-managed buffers exposed through PRIME buffer sharing.
+ *
+ * data[0] points to an AVDRMFrameDescriptor.
+ */
+ AV_PIX_FMT_DRM_PRIME,
+ /**
+ * Hardware surfaces for OpenCL.
+ *
+ * data[i] contain 2D image objects (typed in C as cl_mem, used
+ * in OpenCL as image2d_t) for each plane of the surface.
+ */
+ AV_PIX_FMT_OPENCL,
+
+ AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian
+ AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian
+
+ AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian
+ AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian
+
+ AV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian
+ AV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian
+ AV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian
+ AV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian
+
+ AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped
+
+ AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+};
+
+#ifdef AV_HAVE_BIGENDIAN
+#define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
+#else
+#define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
+#endif
+
+#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE)
+#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)
+#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)
+#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE)
+#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)
+
+
+#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE)
+#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
+#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE)
+#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE)
+#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE)
+#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE)
+#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE)
+#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE)
+
+#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE)
+#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE)
+#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE)
+
+#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
+#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
+#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)
+
+/**
+ * Chromaticity coordinates of the source primaries.
+ * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.1.
+ */
+enum AVColorPrimaries {
+ AVCOL_PRI_RESERVED0 = 0,
+ AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
+ AVCOL_PRI_UNSPECIFIED = 2,
+ AVCOL_PRI_RESERVED = 3,
+ AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+
+ AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
+ AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C
+ AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
+ AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ)
+ AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428,
+ AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3
+ AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3
+ AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors
+ AVCOL_PRI_NB ///< Not part of ABI
+};
+
+/**
+ * Color Transfer Characteristic.
+ * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.2.
+ */
+enum AVColorTransferCharacteristic {
+ AVCOL_TRC_RESERVED0 = 0,
+ AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED = 2,
+ AVCOL_TRC_RESERVED = 3,
+ AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
+ AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
+ AVCOL_TRC_SMPTE240M = 7,
+ AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
+ AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)"
+ AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
+ AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
+ AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
+ AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
+ AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system
+ AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system
+ AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems
+ AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084,
+ AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1
+ AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428,
+ AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma"
+ AVCOL_TRC_NB ///< Not part of ABI
+};
+
+/**
+ * YUV colorspace type.
+ * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.3.
+ */
+enum AVColorSpace {
+ AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
+ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
+ AVCOL_SPC_UNSPECIFIED = 2,
+ AVCOL_SPC_RESERVED = 3,
+ AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+ AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above
+ AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+ AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO,
+ AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
+ AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
+ AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x
+ AVCOL_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived non-constant luminance system
+ AVCOL_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant luminance system
+ AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp
+ AVCOL_SPC_NB ///< Not part of ABI
+};
+
+/**
+ * MPEG vs JPEG YUV range.
+ */
+enum AVColorRange {
+ AVCOL_RANGE_UNSPECIFIED = 0,
+ AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
+ AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges
+ AVCOL_RANGE_NB ///< Not part of ABI
+};
+
+/**
+ * Location of chroma samples.
+ *
+ * Illustration showing the location of the first (top left) chroma sample of the
+ * image, the left shows only luma, the right
+ * shows the location of the chroma sample, the 2 could be imagined to overlay
+ * each other but are drawn separately due to limitations of ASCII
+ *
+ * 1st 2nd 1st 2nd horizontal luma sample positions
+ * v v v v
+ * ______ ______
+ *1st luma line > |X X ... |3 4 X ... X are luma samples,
+ * | |1 2 1-6 are possible chroma positions
+ *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position
+ */
+enum AVChromaLocation {
+ AVCHROMA_LOC_UNSPECIFIED = 0,
+ AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0
+ AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0
+ AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2
+ AVCHROMA_LOC_TOP = 4,
+ AVCHROMA_LOC_BOTTOMLEFT = 5,
+ AVCHROMA_LOC_BOTTOM = 6,
+ AVCHROMA_LOC_NB ///< Not part of ABI
+};
+
+#endif /* AVUTIL_PIXFMT_H */
+
--- /dev/null
+#ifndef AVCODEC_PUT_BITS_H
+#define AVCODEC_PUT_BITS_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include "common.h"
+
+struct put_bits_context {
+ u32 bit_buf;
+ int bit_left;
+ u8 *buf;
+ u8 *buf_ptr;
+ u8 *buf_end;
+ int size_in_bits;
+};
+
+/**
+ * Initialize the struct put_bits_context s.
+ *
+ * @param buffer the buffer where to put bits
+ * @param buffer_size the size in bytes of buffer
+ */
+static inline void init_put_bits(struct put_bits_context *s,
+ u8 *buffer, int buffer_size)
+{
+ if (buffer_size < 0) {
+ buffer_size = 0;
+ buffer = NULL;
+ }
+
+ s->size_in_bits = 8 * buffer_size;
+ s->buf = buffer;
+ s->buf_end = s->buf + buffer_size;
+ s->buf_ptr = s->buf;
+ s->bit_left = 32;
+ s->bit_buf = 0;
+}
+
+/**
+ * Rebase the bit writer onto a reallocated buffer.
+ *
+ * @param buffer the buffer where to put bits
+ * @param buffer_size the size in bytes of buffer,
+ * must be larger than the previous size
+ */
+static inline void rebase_put_bits(struct put_bits_context *s,
+ u8 *buffer, int buffer_size)
+{
+ s->buf_end = buffer + buffer_size;
+ s->buf_ptr = buffer + (s->buf_ptr - s->buf);
+ s->buf = buffer;
+ s->size_in_bits = 8 * buffer_size;
+}
+
+/**
+ * @return the total number of bits written to the bitstream.
+ */
+static inline int put_bits_count(struct put_bits_context *s)
+{
+ return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left;
+}
+
+/**
+ * @return the number of bits available in the bitstream.
+ */
+static inline int put_bits_left(struct put_bits_context* s)
+{
+ return (s->buf_end - s->buf_ptr) * 8 - 32 + s->bit_left;
+}
+
+/**
+ * Pad the end of the output stream with zeros.
+ */
+static inline void flush_put_bits(struct put_bits_context *s)
+{
+#ifndef BITSTREAM_WRITER_LE
+ if (s->bit_left < 32)
+ s->bit_buf <<= s->bit_left;
+#endif
+ while (s->bit_left < 32) {
+#ifdef BITSTREAM_WRITER_LE
+ *s->buf_ptr++ = s->bit_buf;
+ s->bit_buf >>= 8;
+#else
+ *s->buf_ptr++ = s->bit_buf >> 24;
+ s->bit_buf <<= 8;
+#endif
+ s->bit_left += 8;
+ }
+ s->bit_left = 32;
+ s->bit_buf = 0;
+}
+
+static inline void flush_put_bits_le(struct put_bits_context *s)
+{
+ while (s->bit_left < 32) {
+ *s->buf_ptr++ = s->bit_buf;
+ s->bit_buf >>= 8;
+ s->bit_left += 8;
+ }
+ s->bit_left = 32;
+ s->bit_buf = 0;
+}
+
+#ifdef BITSTREAM_WRITER_LE
+#define avpriv_align_put_bits align_put_bits_unsupported_here
+#define avpriv_put_string ff_put_string_unsupported_here
+#define avpriv_copy_bits avpriv_copy_bits_unsupported_here
+#else
+/**
+ * Pad the bitstream with zeros up to the next byte boundary.
+ */
+void avpriv_align_put_bits(struct put_bits_context *s);
+
+/**
+ * Put the string string in the bitstream.
+ *
+ * @param terminate_string 0-terminates the written string if value is 1
+ */
+void avpriv_put_string(struct put_bits_context *pb,
+ const char *string, int terminate_string);
+
+/**
+ * Copy the content of src to the bitstream.
+ *
+ * @param length the number of bits of src to copy
+ */
+void avpriv_copy_bits(struct put_bits_context *pb, const u8 *src, int length);
+#endif
+
+/**
+ * Write up to 31 bits into a bitstream.
+ * Use put_bits32 to write 32 bits.
+ */
+static inline void put_bits(struct put_bits_context *s, int n, u32 value)
+{
+ u32 bit_buf;
+ int bit_left;
+
+ bit_buf = s->bit_buf;
+ bit_left = s->bit_left;
+
+ /* XXX: optimize */
+#ifdef BITSTREAM_WRITER_LE
+ bit_buf |= value << (32 - bit_left);
+ if (n >= bit_left) {
+ if (3 < s->buf_end - s->buf_ptr) {
+ AV_WL32(s->buf_ptr, bit_buf);
+ s->buf_ptr += 4;
+ } else {
+ pr_err("Internal error, put_bits buffer too small\n");
+ }
+ bit_buf = value >> bit_left;
+ bit_left += 32;
+ }
+ bit_left -= n;
+#else
+ if (n < bit_left) {
+ bit_buf = (bit_buf << n) | value;
+ bit_left -= n;
+ } else {
+ bit_buf <<= bit_left;
+ bit_buf |= value >> (n - bit_left);
+ if (3 < s->buf_end - s->buf_ptr) {
+ AV_WB32(s->buf_ptr, bit_buf);
+ s->buf_ptr += 4;
+ } else {
+ pr_err("Internal error, put_bits buffer too small\n");
+ }
+ bit_left += 32 - n;
+ bit_buf = value;
+ }
+#endif
+ s->bit_buf = bit_buf;
+ s->bit_left = bit_left;
+}
+
+static inline void put_bits_le(struct put_bits_context *s, int n, u32 value)
+{
+ u32 bit_buf;
+ int bit_left;
+
+ bit_buf = s->bit_buf;
+ bit_left = s->bit_left;
+
+ bit_buf |= value << (32 - bit_left);
+ if (n >= bit_left) {
+ if (3 < s->buf_end - s->buf_ptr) {
+ AV_WL32(s->buf_ptr, bit_buf);
+ s->buf_ptr += 4;
+ } else {
+ pr_err("Internal error, put_bits buffer too small\n");
+ }
+ bit_buf = value >> bit_left;
+ bit_left += 32;
+ }
+ bit_left -= n;
+
+ s->bit_buf = bit_buf;
+ s->bit_left = bit_left;
+}
+
+static inline u32 av_mod_uintp2(u32 a, u32 p)
+{
+ return a & ((1 << p) - 1);
+}
+
+static inline void put_sbits(struct put_bits_context *pb, int n, int32_t value)
+{
+ put_bits(pb, n, av_mod_uintp2(value, n));
+}
+
+/**
+ * Write exactly 32 bits into a bitstream.
+ */
+static void put_bits32(struct put_bits_context *s, u32 value)
+{
+ u32 bit_buf;
+ int bit_left;
+
+ bit_buf = s->bit_buf;
+ bit_left = s->bit_left;
+
+#ifdef BITSTREAM_WRITER_LE
+ bit_buf |= value << (32 - bit_left);
+ if (3 < s->buf_end - s->buf_ptr) {
+ AV_WL32(s->buf_ptr, bit_buf);
+ s->buf_ptr += 4;
+ } else {
+ pr_err("Internal error, put_bits buffer too small\n");
+ }
+ bit_buf = (uint64_t)value >> bit_left;
+#else
+ bit_buf = (uint64_t)bit_buf << bit_left;
+ bit_buf |= value >> (32 - bit_left);
+ if (3 < s->buf_end - s->buf_ptr) {
+ AV_WB32(s->buf_ptr, bit_buf);
+ s->buf_ptr += 4;
+ } else {
+ pr_err("Internal error, put_bits buffer too small\n");
+ }
+ bit_buf = value;
+#endif
+
+ s->bit_buf = bit_buf;
+ s->bit_left = bit_left;
+}
+
+/**
+ * Write up to 64 bits into a bitstream.
+ */
+static inline void put_bits64(struct put_bits_context *s, int n, uint64_t value)
+{
+ if (n < 32)
+ put_bits(s, n, value);
+ else if (n == 32)
+ put_bits32(s, value);
+ else if (n < 64) {
+ u32 lo = value & 0xffffffff;
+ u32 hi = value >> 32;
+#ifdef BITSTREAM_WRITER_LE
+ put_bits32(s, lo);
+ put_bits(s, n - 32, hi);
+#else
+ put_bits(s, n - 32, hi);
+ put_bits32(s, lo);
+#endif
+ } else {
+ u32 lo = value & 0xffffffff;
+ u32 hi = value >> 32;
+#ifdef BITSTREAM_WRITER_LE
+ put_bits32(s, lo);
+ put_bits32(s, hi);
+#else
+ put_bits32(s, hi);
+ put_bits32(s, lo);
+#endif
+ }
+}
+
+/**
+ * Return the pointer to the byte where the bitstream writer will put
+ * the next bit.
+ */
+static inline u8 *put_bits_ptr(struct put_bits_context *s)
+{
+ return s->buf_ptr;
+}
+
+/**
+ * Skip the given number of bytes.
+ * struct put_bits_context must be flushed & aligned to a byte boundary before calling this.
+ */
+static inline void skip_put_bytes(struct put_bits_context *s, int n)
+{
+ s->buf_ptr += n;
+}
+
+/**
+ * Skip the given number of bits.
+ * Must only be used if the actual values in the bitstream do not matter.
+ * If n is 0 the behavior is undefined.
+ */
+static inline void skip_put_bits(struct put_bits_context *s, int n)
+{
+ s->bit_left -= n;
+ s->buf_ptr -= 4 * (s->bit_left >> 5);
+ s->bit_left &= 31;
+}
+
+/**
+ * Change the end of the buffer.
+ *
+ * @param size the new size in bytes of the buffer where to put bits
+ */
+static inline void set_put_bits_buffer_size(struct put_bits_context *s, int size)
+{
+ s->buf_end = s->buf + size;
+ s->size_in_bits = 8*size;
+}
+
+#endif /* AVCODEC_PUT_BITS_H */
+
* @res_chg : [out] resolution change happen
*/
int (*decode)(unsigned long h_vdec, struct aml_vcodec_mem *bs,
- unsigned long int pts, bool *res_chg);
+ u64 pts, bool *res_chg);
/**
* (*get_param)() - get driver's parameter
int (*get_param)(unsigned long h_vdec,
enum vdec_get_param_type type, void *out);
+ /**
+ * (*set_param)() - set driver's parameter
+ * @h_vdec : [in] driver handle
+ * @type : [in] input parameter type
+ * @in : [in] buffer to store query result
+ */
+ int (*set_param)(unsigned long h_vdec,
+ enum vdec_set_param_type type, void *in);
+
/**
* (*deinit)() - deinitialize driver.
* @h_vdec : [in] driver handle to be deinit
const struct vdec_common_if *get_h264_dec_comm_if(void);
const struct vdec_common_if *get_hevc_dec_comm_if(void);
const struct vdec_common_if *get_vp9_dec_comm_if(void);
+const struct vdec_common_if *get_mpeg12_dec_comm_if(void);
+const struct vdec_common_if *get_mpeg4_dec_comm_if(void);
+const struct vdec_common_if *get_mjpeg_dec_comm_if(void);
int vdec_if_init(struct aml_vcodec_ctx *ctx, unsigned int fourcc)
{
case V4L2_PIX_FMT_VP9:
ctx->dec_if = get_vp9_dec_comm_if();
break;
+ case V4L2_PIX_FMT_MPEG:
+ case V4L2_PIX_FMT_MPEG1:
+ case V4L2_PIX_FMT_MPEG2:
+ ctx->dec_if = get_mpeg12_dec_comm_if();
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ ctx->dec_if = get_mpeg4_dec_comm_if();
+ break;
+ case V4L2_PIX_FMT_MJPEG:
+ ctx->dec_if = get_mjpeg_dec_comm_if();
+ break;
default:
return -EINVAL;
}
- aml_vdec_lock(ctx);
- //aml_vcodec_dec_clock_on(&ctx->dev->pm);//debug_tmp
ret = ctx->dec_if->init(ctx, &ctx->drv_handle);
- //aml_vcodec_dec_clock_off(&ctx->dev->pm);
- aml_vdec_unlock(ctx);
return ret;
}
{
int ret = 0;
- aml_vdec_lock(ctx);
ret = ctx->dec_if->probe(ctx->drv_handle, bs, out);
- aml_vdec_unlock(ctx);
return ret;
}
int vdec_if_decode(struct aml_vcodec_ctx *ctx, struct aml_vcodec_mem *bs,
- unsigned long int timestamp, bool *res_chg)
+ u64 timestamp, bool *res_chg)
{
int ret = 0;
}
}
- /*if (fb) {
- if (((fb->base_y.dma_addr & 511) != 0) ||
- ((fb->base_c.dma_addr & 511) != 0)) {
- aml_v4l2_err("frame buffer dma_addr should 512 byte align");
- return -EINVAL;
- }
- }*/
-
if (ctx->drv_handle == 0)
return -EIO;
- //aml_vdec_lock(ctx);
-
aml_vcodec_set_curr_ctx(ctx->dev, ctx);
- //aml_vcodec_dec_clock_on(&ctx->dev->pm);//debug_tmp
- //enable_irq(ctx->dev->dec_irq);
ret = ctx->dec_if->decode(ctx->drv_handle, bs, timestamp, res_chg);
- //disable_irq(ctx->dev->dec_irq);
- //aml_vcodec_dec_clock_off(&ctx->dev->pm);
aml_vcodec_set_curr_ctx(ctx->dev, NULL);
- //aml_vdec_unlock(ctx);
-
return ret;
}
if (ctx->drv_handle == 0)
return -EIO;
- //aml_vdec_lock(ctx);
ret = ctx->dec_if->get_param(ctx->drv_handle, type, out);
- //aml_vdec_unlock(ctx);
return ret;
}
if (ctx->drv_handle == 0)
return;
- //aml_vdec_lock(ctx);
- //aml_vcodec_dec_clock_on(&ctx->dev->pm);
ctx->dec_if->deinit(ctx->drv_handle);
- //aml_vcodec_dec_clock_off(&ctx->dev->pm);
- //aml_vdec_unlock(ctx);
-
ctx->drv_handle = 0;
}
GET_PARAM_DPB_SIZE
};
+/*
+ * SET_PARAM_PIC_INFO : set picture info, data parsed from ucode.
+ */
+enum vdec_set_param_type {
+ SET_PARAM_WRITE_FRAME_SYNC,
+ SET_PARAM_PIC_INFO,
+};
+
/**
* struct vdec_fb_node - decoder frame buffer node
* @list : list to hold this node
* Return: 0 on success. -EIO on unrecoverable error.
*/
int vdec_if_decode(struct aml_vcodec_ctx *ctx, struct aml_vcodec_mem *bs,
- unsigned long int timestamp, bool *res_chg);
+ u64 timestamp, bool *res_chg);
/**
* vdec_if_get_param() - get driver's parameter
#include <linux/amlogic/tee.h>
#include <linux/uaccess.h>
#include "../utils/config_parser.h"
-#include "../../../amvdec_ports/vdec_drv_base.h"
-#include "../../../amvdec_ports/aml_vcodec_adapt.h"
#include "../../../common/chips/decoder_cpu_ver_info.h"
+#include "../utils/vdec_v4l2_buffer_ops.h"
#include <linux/crc32.h>
-
-
#undef pr_info
#define pr_info printk
#define VDEC_DW
u32 canvas_mode;
bool is_used_v4l;
void *v4l2_ctx;
+ bool v4l_params_parsed;
wait_queue_head_t wait_q;
u32 reg_g_status;
struct mutex chunks_mutex;
return 0;
}
-static int v4l_get_fb(struct aml_vcodec_ctx *ctx, struct vdec_fb **out)
-{
- int ret = 0;
-
- ret = ctx->dec_if->get_param(ctx->drv_handle,
- GET_PARAM_FREE_FRAME_BUFFER, out);
-
- return ret;
-}
-
static int alloc_one_buf_spec_from_queue(struct vdec_h264_hw_s *hw, int idx)
{
int ret = 0;
struct buffer_spec_s *bs = &hw->buffer_spec[idx];
struct canvas_config_s *y_canvas_cfg = NULL;
struct canvas_config_s *c_canvas_cfg = NULL;
- struct vdec_fb *fb = NULL;
- unsigned int y_addr, c_addr;
+ struct vdec_v4l2_buffer *fb = NULL;
+ unsigned int y_addr = 0, c_addr = 0;
if (IS_ERR_OR_NULL(hw->v4l2_ctx)) {
pr_err("the v4l context has err.\n");
ctx->id, __func__,
(hw->mb_total << 8) + (hw->mb_total << 7));
- ret = v4l_get_fb(hw->v4l2_ctx, &fb);
+ ret = vdec_v4l_get_buffer(hw->v4l2_ctx, &fb);
if (ret) {
- dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR,
+ dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] get fb fail.\n", ctx->id);
return ret;
}
"[%d] %s(), cma alloc addr: 0x%x\n",
ctx->id, __func__, bs->cma_alloc_addr);
- y_addr = virt_to_phys(fb->base_y.va);
- c_addr = virt_to_phys(fb->base_c.va);
+ if (fb->num_planes == 1) {
+ y_addr = fb->m.mem[0].addr;
+ c_addr = fb->m.mem[0].addr + fb->m.mem[0].offset;
+ fb->m.mem[0].bytes_used = fb->m.mem[0].size;
+ } else if (fb->num_planes == 2) {
+ y_addr = fb->m.mem[0].addr;
+ c_addr = fb->m.mem[1].addr;
+ fb->m.mem[0].bytes_used = fb->m.mem[0].size;
+ fb->m.mem[1].bytes_used = fb->m.mem[1].size;
+ }
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
- "[%d] %s(), y_addr: %x, va: %p\n",
- ctx->id, __func__, y_addr, fb->base_y.va);
+ "[%d] %s(), y_addr: %x, size: %u\n",
+ ctx->id, __func__, y_addr, fb->m.mem[0].size);
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
- "[%d] %s(), c_addr: %x, va: %p\n",
- ctx->id, __func__, c_addr, fb->base_c.va);
+ "[%d] %s(), c_addr: %x, size: %u\n",
+ ctx->id, __func__, c_addr, fb->m.mem[1].size);
bs->y_addr = y_addr;
bs->u_addr = c_addr;
y_canvas_cfg->width = hw->mb_width << 4;
y_canvas_cfg->height = hw->mb_height << 4;
y_canvas_cfg->block_mode = CANVAS_BLKMODE_LINEAR;
- fb->base_y.bytes_used = y_canvas_cfg->width * y_canvas_cfg->height;
+ //fb->m.mem[0].bytes_used = y_canvas_cfg->width * y_canvas_cfg->height;
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), y_w: %d, y_h: %d\n", ctx->id, __func__,
y_canvas_cfg->width,y_canvas_cfg->height);
c_canvas_cfg->width = hw->mb_width << 4;
c_canvas_cfg->height = hw->mb_height << 3;
c_canvas_cfg->block_mode = CANVAS_BLKMODE_LINEAR;
- fb->base_c.bytes_used = c_canvas_cfg->width * c_canvas_cfg->height;
+ //fb->m.mem[1].bytes_used = c_canvas_cfg->width * c_canvas_cfg->height;
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), c_w: %d, c_h: %d\n", ctx->id, __func__,
c_canvas_cfg->width, c_canvas_cfg->height);
int index = -1;
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
int buf_total = BUFSPEC_POOL_SIZE;
+
+ if (hw->is_used_v4l)
+ buf_total = hw->dpb.mDPB.size;
+
spin_lock_irqsave(&hw->bufspec_lock, flags);
/*hw->start_search_pos = 0;*/
for (i = hw->start_search_pos; i < buf_total; i++) {
if (hw->mmu_enable)
addr = hw->buffer_spec[i].alloc_header_addr;
- else
+ else {
addr = hw->buffer_spec[i].cma_alloc_addr;
- if (hw->buffer_spec[i].used == 0 && addr) {
+ if (hw->is_used_v4l && !addr) {
+ if (!alloc_one_buf_spec_from_queue(hw, i)) {
+ config_decode_canvas(hw, i);
+ addr = hw->buffer_spec[i].cma_alloc_addr;
+ }
+ }
+ }
+ if (hw->buffer_spec[i].used == 0 && addr) {
hw->buffer_spec[i].used = 1;
hw->start_search_pos = i+1;
index = i;
for (i = 0; i < hw->start_search_pos; i++) {
if (hw->mmu_enable)
addr = hw->buffer_spec[i].alloc_header_addr;
- else
+ else {
addr = hw->buffer_spec[i].cma_alloc_addr;
+ if (hw->is_used_v4l && !addr) {
+ if (!alloc_one_buf_spec_from_queue(hw, i)) {
+ config_decode_canvas(hw, i);
+ addr = hw->buffer_spec[i].cma_alloc_addr;
+ }
+ }
+ }
if (hw->buffer_spec[i].used == 0 && addr) {
hw->buffer_spec[i].used = 1;
hw->start_search_pos = i+1;
dealloc_buf_specs(hw, 0);
if (max_alloc_buf_count == 0 ||
allocated_count < max_alloc_buf_count) {
- if (hw->is_used_v4l) {
- if (alloc_one_buf_spec_from_queue(hw, index) >= 0)
- ret = 1;
- } else if (alloc_one_buf_spec(hw, index) >= 0)
+ if (hw->is_used_v4l)
+ ret = 1;
+ else if (alloc_one_buf_spec(hw, index) >= 0)
ret = 1;
}
mutex_unlock(&vmh264_mutex);
if (hw->is_used_v4l) {
vf->v4l_mem_handle
= hw->buffer_spec[buffer_index].cma_alloc_addr;
+ if (hw->mmu_enable) {
+ if (vdec_v4l_binding_fd_and_vf(vf->v4l_mem_handle, vf) < 0) {
+ dpb_print(DECODE_ID(hw),
+ PRINT_FLAG_V4L_DETAIL,
+ "v4l: binding vf fail.\n");
+ return -1;
+ }
+ }
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), v4l mem handle: 0x%x\n",
((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id,
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
struct vframe_s *vf = NULL;
- struct vdec_fb *fb = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
if (hw->is_used_v4l && hw->eos) {
if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) {
return -1;
}
- if (v4l_get_fb(hw->v4l2_ctx, &fb)) {
+ if (vdec_v4l_get_buffer(hw->v4l2_ctx, &fb)) {
pr_err("[%d] get fb fail.\n", ctx->id);
return -1;
}
+ vf->type |= VIDTYPE_V4L_EOS;
vf->timestamp = ULONG_MAX;
vf->v4l_mem_handle = (unsigned long)fb;
vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L;
size /= pic_size;
size = size + 1; /* need one more buffer */
- if (max_reference_frame_num > size)
+ if (max_reference_frame_num < size)
size = max_reference_frame_num;
return size;
if (hw->is_used_v4l) {
if (i != -1) {
- ret = alloc_one_buf_spec_from_queue(hw, i);
- if (!ret)
- config_decode_canvas(hw, i);
+ pr_info("alloc the buf of v4l when it is about to decoding.\n");
}
} else {
if ((i != -1) && alloc_one_buf_spec(hw, i) >= 0)
i = get_buf_spec_by_canvas_pos(hw, j);
if (hw->is_used_v4l) {
- if (alloc_one_buf_spec_from_queue
- (hw, i) < 0) {
- if (j++ != buf_cnt)
- ret = -1;
- break;
- }
+ pr_info("alloc the buf of v4l when it is about to decoding.\n");
+ break;
} else if (alloc_one_buf_spec(hw, i) < 0)
break;
unsigned short *p = (unsigned short *)hw->lmem_addr;
reset_process_time(hw);
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode && !ctx->v4l_codec_ready) {
+ //amvdec_stop();
+ hw->dec_result = DEC_RESULT_DONE;
+ vdec_schedule_work(&hw->work);
+ return IRQ_HANDLED;
+ }
+ }
+
hw->reg_iqidct_control = READ_VREG(IQIDCT_CONTROL);
hw->reg_vcop_ctrl_reg = READ_VREG(VCOP_CTRL_REG);
hw->reg_rv_ai_mb_count = READ_VREG(RV_AI_MB_COUNT);
#endif
/* cbcr_merge_swap_en */
- if (hw->is_used_v4l && v4l2_ctx->ada_ctx->vfm_path
- != FRAME_BASE_PATH_V4L_VIDEO)
+ if (hw->is_used_v4l
+ && (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21
+ || v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M))
SET_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 16);
SET_VREG_MASK(MDEC_PIC_DC_CTRL, 0xbf << 24);
hw->first_i_policy = first_i_policy;
- hw->is_used_v4l = (((unsigned long)
- hw->vh264_amstream_dec_info.param & 0x80) >> 7);
-
if (hw->is_used_v4l)
mem_map_mode = CANVAS_BLKMODE_LINEAR;
u32 param2 = READ_VREG(AV_SCRATCH_2);
u32 param3 = READ_VREG(AV_SCRATCH_6);
u32 param4 = READ_VREG(AV_SCRATCH_B);
+
if (vh264_set_params(hw, param1,
param2, param3, param4) < 0)
dpb_print(DECODE_ID(hw), 0, "set parameters error\n");
+
WRITE_VREG(AV_SCRATCH_0, (hw->max_reference_size<<24) |
(hw->dpb.mDPB.size<<16) |
(hw->dpb.mDPB.size<<8));
+
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) {
+ struct aml_vdec_pic_infos info;
+
+ info.visible_width = hw->frame_width;
+ info.visible_height = hw->frame_height;
+ info.coded_width = ALIGN(hw->frame_width, 64);
+ info.coded_height = ALIGN(hw->frame_height, 64);
+ info.dpb_size = hw->dpb.mDPB.size;
+ hw->v4l_params_parsed = true;
+ vdec_v4l_set_pic_infos(ctx, &info);
+ }
+ }
+
start_process_time(hw);
return;
} else
vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC);
wake_up_interruptible(&hw->wait_q);
+
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode &&
+ !hw->v4l_params_parsed)
+ vdec_v4l_write_frame_sync(ctx);
+ }
+
if (hw->vdec_cb)
hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg);
}
get_used_buf_count(hw) >=
run_ready_max_buf_num)
ret = 0;
+
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode &&
+ !ctx->v4l_codec_ready &&
+ hw->v4l_params_parsed)
+ ret = 0; /*the params has parsed.*/
+ }
}
#endif
if (ret)
hw->dec_result = DEC_RESULT_NONE;
reset_process_time(hw);
h264_reset_bufmgr(vdec);
+
dpb_print(DECODE_ID(hw), 0, "%s\n", __func__);
}
hw->has_i_frame = 0;
hw->config_bufmgr_done = 0;
+ if (hw->is_used_v4l) {
+ mutex_lock(&vmh264_mutex);
+ dealloc_buf_specs(hw, 1);
+ mutex_unlock(&vmh264_mutex);
+ }
+
if (dpb_is_debug(DECODE_ID(hw),
PRINT_FLAG_DUMP_BUFSPEC))
dump_bufspec(hw, "after h264_reconfig");
vh264_local_init(hw);
/*hw->decode_pic_count = 0;
hw->seq_info2 = 0;*/
- if (!hw->is_used_v4l) {
- if (vh264_set_params(hw,
- hw->cfg_param1,
- hw->cfg_param2,
- hw->cfg_param3,
- hw->cfg_param4) < 0)
- hw->stat |= DECODER_FATAL_ERROR_SIZE_OVERFLOW;
- } else {
- /* V4L2 decoder reset keeps decoder in same status as
- * after probe when there is no SPS/PPS header processed
- * and no buffers allocated.
- */
- hw->seq_info = 0;
- hw->seq_info2 = 0;
+
+ if (hw->is_used_v4l) {
+ mutex_lock(&vmh264_mutex);
+ /* detach relationship with v4l buffs.*/
+ dealloc_buf_specs(hw, 1);
+ mutex_unlock(&vmh264_mutex);
}
+
+ if (vh264_set_params(hw,
+ hw->cfg_param1,
+ hw->cfg_param2,
+ hw->cfg_param3,
+ hw->cfg_param4) < 0)
+ hw->stat |= DECODER_FATAL_ERROR_SIZE_OVERFLOW;
+
/*drop 3 frames after reset bufmgr if bit0 is set 1 */
if (first_i_policy & 0x01)
hw->first_i_policy = (3 << 8) | first_i_policy;
+
hw->init_flag = 1;
hw->reset_bufmgr_count++;
#endif
-
}
int ammvdec_h264_mmu_init(struct vdec_h264_hw_s *hw)
hw->first_head_check_flag = 0;
hw->new_iframe_flag = 0;
hw->ref_err_flush_dpb_flag = 0;
+
+ if (pdata->sys_info)
+ hw->vh264_amstream_dec_info = *pdata->sys_info;
+
+ hw->is_used_v4l = (((unsigned long)
+ hw->vh264_amstream_dec_info.param & 0x80) >> 7);
+ if (hw->is_used_v4l) {
+ hw->mmu_enable = (((unsigned long)
+ hw->vh264_amstream_dec_info.param & 0x100) >> 8);
+ dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
+ "%s v4: enable mmu %d.\n",
+ __func__, hw->mmu_enable);
+ }
+
if (force_enable_mmu && pdata->sys_info &&
(get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_TXLX) &&
(get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_GXLX) &&
}
/**/
- if (pdata->sys_info)
- hw->vh264_amstream_dec_info = *pdata->sys_info;
#if 0
if (NULL == hw->sei_data_buffer) {
hw->sei_data_buffer =
#include "../utils/config_parser.h"
#include "../utils/firmware.h"
#include "../../../common/chips/decoder_cpu_ver_info.h"
-#include "../../../amvdec_ports/vdec_drv_base.h"
+#include "../utils/vdec_v4l2_buffer_ops.h"
#define CONSTRAIN_MAX_BUF_NUM
unsigned long start_adr;
unsigned int size;
int used_flag;
+ unsigned int y_size;
ulong v4l_ref_buf_addr;
} /*BUF_t */;
u32 pts;
u64 pts64;
+ u64 timestamp;
u32 aspect_ratio_idc;
u32 sar_width;
#define DEC_RESULT_GET_DATA_RETRY 8
#define DEC_RESULT_EOS 9
#define DEC_RESULT_FORCE_EXIT 10
+#define DEC_RESULT_FREE_CANVAS 11
static void vh265_work(struct work_struct *work);
static void vh265_timeout_work(struct work_struct *work);
struct vframe_qos_s vframe_qos;
bool is_used_v4l;
void *v4l2_ctx;
+ bool v4l_params_parsed;
} /*hevc_stru_t */;
-static int v4l_get_fb(struct aml_vcodec_ctx *ctx, struct vdec_fb **out)
-{
- int ret = 0;
-
- ret = ctx->dec_if->get_param(ctx->drv_handle,
- GET_PARAM_FREE_FRAME_BUFFER, out);
-
- return ret;
-}
-
#ifdef AGAIN_HAS_THRESHOLD
u32 again_threshold;
#endif
static void hevc_init_stru(struct hevc_state_s *hevc,
struct BuffInfo_s *buf_spec_i)
{
- int i;
+ //int i;
INIT_LIST_HEAD(&hevc->log_list);
hevc->work_space_buf = buf_spec_i;
hevc->prefix_aux_size = 0;
else
hevc->ignore_bufmgr_error = 0x0;
- for (i = 0; i < MAX_REF_PIC_NUM; i++)
- hevc->m_PIC[i] = NULL;
+ hevc->dec_result = DEC_RESULT_FREE_CANVAS;
+ vdec_schedule_work(&hevc->work);
+ /*for (i = 0; i < MAX_REF_PIC_NUM; i++)
+ hevc->m_PIC[i] = NULL;*/
+
hevc->pic_num = 0;
hevc->lcu_x_num_pre = 0;
hevc->lcu_y_num_pre = 0;
int i;
int ret = -1;
int buf_size = cal_current_buf_size(hevc, NULL);
- struct vdec_fb *fb = NULL;
+
+ if (hevc->is_used_v4l)
+ return 0;
if (hevc->fatal_error & DECODER_FATAL_ERROR_NO_MEM)
return ret;
/*get_cma_alloc_ref();*/ /*DEBUG_TMP*/
/*alloc compress header first*/
- if (hevc->is_used_v4l) {
- ret = v4l_get_fb(hevc->v4l2_ctx, &fb);
- if (ret) {
+ ret = decoder_bmmu_box_alloc_buf_phy
+ (hevc->bmmu_box,
+ VF_BUFFER_IDX(i), buf_size,
+ DRIVER_NAME,
+ &hevc->m_BUF[i].start_adr);
+ if (ret < 0) {
+ hevc->m_BUF[i].start_adr = 0;
+ if (i <= 8) {
+ hevc->fatal_error |=
+ DECODER_FATAL_ERROR_NO_MEM;
hevc_print(hevc, PRINT_FLAG_ERROR,
- "[%d] get fb fail.\n",
- ((struct aml_vcodec_ctx *)
- (hevc->v4l2_ctx))->id);
- return ret;
- }
-
- hevc->m_BUF[i].v4l_ref_buf_addr = (ulong)fb;
- hevc->m_BUF[i].start_adr =
- virt_to_phys(fb->base_y.va);
- hevc_print(hevc, PRINT_FLAG_V4L_DETAIL,
- "[%d] %s(), v4l ref buf addr: 0x%x\n",
- ((struct aml_vcodec_ctx *)
- (hevc->v4l2_ctx))->id, __func__, fb);
- } else {
- ret = decoder_bmmu_box_alloc_buf_phy
- (hevc->bmmu_box,
- VF_BUFFER_IDX(i), buf_size,
- DRIVER_NAME,
- &hevc->m_BUF[i].start_adr);
- if (ret < 0) {
- hevc->m_BUF[i].start_adr = 0;
- if (i <= 8) {
- hevc->fatal_error |=
- DECODER_FATAL_ERROR_NO_MEM;
- hevc_print(hevc, PRINT_FLAG_ERROR,
- "%s[%d], size: %d, no mem fatal err\n",
- __func__, i, buf_size);
- }
+ "%s[%d], size: %d, no mem fatal err\n",
+ __func__, i, buf_size);
}
}
hevc->m_BUF[i].size = buf_size;
hevc->m_BUF[i].used_flag = 0;
ret = 0;
+
if (get_dbg_flag(hevc) & H265_DEBUG_BUFMGR) {
hevc_print(hevc, 0,
"Buffer %d: start_adr %p size %x\n",
sps_pic_buf_diff = hevc->param.p.sps_max_dec_pic_buffering_minus1_0
- hevc->sps_num_reorder_pics_0;
-
#ifdef MULTI_INSTANCE_SUPPORT
/*
need one more for multi instance, as
struct buf_stru_s buf_stru;
int buf_size = cal_current_buf_size(hevc, &buf_stru);
int dw_mode = get_double_write_mode(hevc);
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ if (hevc->is_used_v4l)
+ buf_size = 0;
for (i = 0; i < BUF_POOL_SIZE; i++) {
+ if (hevc->is_used_v4l && !hevc->m_BUF[i].start_adr) {
+ ret = vdec_v4l_get_buffer(hevc->v4l2_ctx, &fb);
+ if (ret) {
+ hevc_print(hevc, PRINT_FLAG_V4L_DETAIL,
+ "[%d] get fb fail.\n",
+ ((struct aml_vcodec_ctx *)
+ (hevc->v4l2_ctx))->id);
+ return ret;
+ }
+
+ hevc->m_BUF[i].used_flag = 0;
+ hevc->m_BUF[i].v4l_ref_buf_addr = (ulong)fb;
+ if (fb->num_planes == 1) {
+ hevc->m_BUF[i].start_adr = fb->m.mem[0].addr;
+ hevc->m_BUF[i].size = fb->m.mem[0].size;
+ hevc->m_BUF[i].y_size = fb->m.mem[0].offset;
+ fb->m.mem[0].bytes_used = fb->m.mem[0].size;
+ } else if (fb->num_planes == 2) {
+ hevc->m_BUF[i].start_adr = fb->m.mem[0].addr;
+ hevc->m_BUF[i].size = fb->m.mem[0].size + fb->m.mem[1].size;
+ hevc->m_BUF[i].y_size = fb->m.mem[0].size;
+ fb->m.mem[0].bytes_used = fb->m.mem[0].size;
+ fb->m.mem[1].bytes_used = fb->m.mem[1].size;
+ }
+
+ pic->BUF_index = i;
+
+ hevc_print(hevc, PRINT_FLAG_V4L_DETAIL,
+ "[%d] %s(), v4l ref buf addr: 0x%x\n",
+ ((struct aml_vcodec_ctx *)
+ (hevc->v4l2_ctx))->id, __func__, fb);
+ }
+
if (hevc->m_BUF[i].start_adr != 0 &&
hevc->m_BUF[i].used_flag == 0 &&
buf_size <= hevc->m_BUF[i].size) {
break;
}
}
+
if (i >= BUF_POOL_SIZE)
return -1;
} else
y_adr = hevc->m_BUF[i].start_adr;
- y_adr = ((y_adr + 0xffff) >> 16) << 16; /*64k alignment*/
+ if (!hevc->is_used_v4l)
+ y_adr = ((y_adr + 0xffff) >> 16) << 16; /*64k alignment*/
+
pic->POC = INVALID_POC;
/*ensure get_pic_by_POC()
not get the buffer not decoded*/
pic->mc_canvas_y = pic->index;
pic->mc_canvas_u_v = pic->index;
if (dw_mode & 0x10) {
- pic->mc_y_adr = y_adr;
- pic->mc_u_v_adr = y_adr +
- ((buf_stru.mc_buffer_size_u_v_h << 16) << 1);
+ if (hevc->is_used_v4l) {
+ pic->mc_y_adr = y_adr;
+ pic->mc_u_v_adr = y_adr + hevc->m_BUF[i].y_size;
+ } else {
+ pic->mc_y_adr = y_adr;
+ pic->mc_u_v_adr = y_adr +
+ ((buf_stru.mc_buffer_size_u_v_h << 16) << 1);
+ }
pic->mc_canvas_y = (pic->index << 1);
pic->mc_canvas_u_v = (pic->index << 1) + 1;
pic->y_canvas_index = -1;
pic->uv_canvas_index = -1;
}
- if (config_pic(hevc, pic) < 0) {
- if (get_dbg_flag(hevc))
- hevc_print(hevc, 0,
- "Config_pic %d fail\n", pic->index);
- pic->index = -1;
- i++;
- break;
- }
+
pic->width = hevc->pic_w;
pic->height = hevc->pic_h;
pic->double_write_mode = dw_mode;
- if (pic->double_write_mode)
- set_canvas(hevc, pic);
+
+ if (!hevc->is_used_v4l) {
+ if (config_pic(hevc, pic) < 0) {
+ if (get_dbg_flag(hevc))
+ hevc_print(hevc, 0,
+ "Config_pic %d fail\n", pic->index);
+ pic->index = -1;
+ i++;
+ break;
+ }
+
+ if (pic->double_write_mode)
+ set_canvas(hevc, pic);
+ }
}
for (; i < MAX_REF_PIC_NUM; i++) {
hevc->lcu_total * hevc->lcu_size * hevc->lcu_size / 2;
int mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff) >> 16;
struct PIC_s *cur_pic = hevc->cur_pic;
+ struct aml_vcodec_ctx * v4l2_ctx = hevc->v4l2_ctx;
data32 = READ_VREG(HEVC_SAO_CTRL0);
data32 &= (~0xf);
"[DBLK DEBUG] HEVC1 CFGB : 0x%x\n", data);
}
+ /* swap uv */
+ if (hevc->is_used_v4l) {
+ if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) ||
+ (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M))
+ data32 &= ~(1 << 8); /* NV21 */
+ else
+ data32 |= (1 << 8); /* NV12 */
+ }
+
+ /*
+ * [31:24] ar_fifo1_axi_thred
+ * [23:16] ar_fifo0_axi_thred
+ * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes
+ * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32
+ * [11:08] axi_lendian_C
+ * [07:04] axi_lendian_Y
+ * [3] reserved
+ * [2] clk_forceon
+ * [1] dw_disable:disable double write output
+ * [0] cm_disable:disable compress output
+ */
WRITE_VREG(HEVC_SAO_CTRL1, data32);
if (get_double_write_mode(hevc) & 0x10) {
/* [23:22] dw_v1_ctrl
data32 &= (~0xF);
data32 |= 0xf; /* valid only when double write only */
/*data32 |= 0x8;*/ /* Big-Endian per 64-bit */
+
+ /* swap uv */
+ if (hevc->is_used_v4l) {
+ if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) ||
+ (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M))
+ data32 |= (1 << 12); /* NV21 */
+ else
+ data32 &= ~(1 << 12); /* NV12 */
+ }
+
+ /*
+ * [3:0] little_endian
+ * [5:4] address_format 00:linear 01:32x32 10:64x32
+ * [7:6] reserved
+ * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte
+ * [11:10] reserved
+ * [12] CbCr_byte_swap
+ * [31:13] reserved
+ */
WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32);
#endif
data32 = 0;
if (hevc->is_used_v4l) {
vf->v4l_mem_handle
= hevc->m_BUF[pic->BUF_index].v4l_ref_buf_addr;
+ if (hevc->mmu_enable) {
+ if (vdec_v4l_binding_fd_and_vf(vf->v4l_mem_handle, vf) < 0) {
+ hevc_print(hevc, PRINT_FLAG_V4L_DETAIL,
+ "v4l: binding vf fail.\n");
+ return -1;
+ }
+ }
hevc_print(hevc, PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), v4l mem handle: 0x%lx\n",
((struct aml_vcodec_ctx *)(hevc->v4l2_ctx))->id,
if (vdec_frame_based(hw_to_vdec(hevc))) {
vf->pts = pic->pts;
vf->pts_us64 = pic->pts64;
+ vf->timestamp = pic->timestamp;
}
/* if (pts_lookup_offset(PTS_TYPE_VIDEO,
stream_offset, &vf->pts, 0) != 0) { */
struct hevc_state_s *hw = (struct hevc_state_s *)vdec->private;
struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
struct vframe_s *vf = NULL;
- struct vdec_fb *fb = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
if (hw->is_used_v4l && hw->eos) {
if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) {
return -1;
}
- if (v4l_get_fb(hw->v4l2_ctx, &fb)) {
+ if (vdec_v4l_get_buffer(hw->v4l2_ctx, &fb)) {
pr_err("[%d] get fb fail.\n", ctx->id);
return -1;
}
+ vf->type |= VIDTYPE_V4L_EOS;
vf->timestamp = ULONG_MAX;
vf->v4l_mem_handle = (unsigned long)fb;
vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L;
&& (hevc->chunk)) {
hevc->cur_pic->pts = hevc->chunk->pts;
hevc->cur_pic->pts64 = hevc->chunk->pts64;
+ hevc->cur_pic->timestamp = hevc->chunk->timestamp;
}
mutex_unlock(&hevc->chunks_mutex);
hevc->param.p.vui_time_scale_hi,
hevc->param.p.vui_time_scale_lo);
}
+
+ if (hevc->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hevc->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode && !hevc->v4l_params_parsed) {
+ struct aml_vdec_pic_infos info;
+
+ hevc->frame_width = hevc->param.p.pic_width_in_luma_samples;
+ hevc->frame_height = hevc->param.p.pic_height_in_luma_samples;
+ info.visible_width = hevc->frame_width;
+ info.visible_height = hevc->frame_height;
+ info.coded_width = ALIGN(hevc->frame_width, 32);
+ info.coded_height = ALIGN(hevc->frame_height, 32);
+ info.dpb_size = get_work_pic_num(hevc);
+ hevc->v4l_params_parsed = true;
+ /*notice the v4l2 codec.*/
+ vdec_v4l_set_pic_infos(ctx, &info);
+ }
+ }
+
if (
#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION
vdec->master == NULL &&
init_timer(&hevc->timer);
hevc->stat |= STAT_TIMER_INIT;
+
+ if (hevc->m_ins_flag) {
+#ifdef USE_UNINIT_SEMA
+ sema_init(&hevc->h265_uninit_done_sema, 0);
+#endif
+ INIT_WORK(&hevc->work, vh265_work);
+ INIT_WORK(&hevc->timeout_work, vh265_timeout_work);
+ }
+
if (vh265_local_init(hevc) < 0)
return -EBUSY;
hevc->timer.function = vh265_check_timer_func;
hevc->timer.expires = jiffies + PUT_INTERVAL;
-#ifdef USE_UNINIT_SEMA
- sema_init(&hevc->h265_uninit_done_sema, 0);
-#endif
-
- /*add_timer(&hevc->timer);
- *hevc->stat |= STAT_TIMER_ARM;
- */
-
- INIT_WORK(&hevc->work, vh265_work);
- INIT_WORK(&hevc->timeout_work, vh265_timeout_work);
-
hevc->fw = fw;
return 0;
if (use_cma) {
hevc->uninit_list = 1;
reset_process_time(hevc);
+ hevc->dec_result = DEC_RESULT_FREE_CANVAS;
vdec_schedule_work(&hevc->work);
+ flush_work(&hevc->work);
#ifdef USE_UNINIT_SEMA
if (hevc->init_flag) {
down(&hevc->h265_uninit_done_sema);
static void vh265_work_implement(struct hevc_state_s *hevc,
struct vdec_s *vdec,int from)
{
- if (hevc->uninit_list) {
+ if (hevc->dec_result == DEC_RESULT_FREE_CANVAS) {
/*USE_BUF_BLOCK*/
uninit_pic_list(hevc);
hevc_print(hevc, 0, "uninit list\n");
else
vdec_core_finish_run(vdec, CORE_MASK_VDEC_1 | CORE_MASK_HEVC);
+ if (hevc->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hevc->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode &&
+ !hevc->v4l_params_parsed)
+ vdec_v4l_write_frame_sync(ctx);
+ }
+
if (hevc->vdec_cb)
hevc->vdec_cb(hw_to_vdec(hevc), hevc->vdec_cb_arg);
}
}
#endif
+ if (hevc->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hevc->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode &&
+ !ctx->v4l_codec_ready &&
+ hevc->v4l_params_parsed)
+ ret = 0; /*the params has parsed.*/
+ }
+
+
if (ret)
not_run_ready[hevc->index] = 0;
else
struct hevc_state_s *hevc =
(struct hevc_state_s *)vdec->private;
+ int i;
- hevc_print(hevc,
- PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__);
+ cancel_work_sync(&hevc->work);
+ cancel_work_sync(&hevc->notify_work);
+ if (hevc->stat & STAT_VDEC_RUN) {
+ amhevc_stop();
+ hevc->stat &= ~STAT_VDEC_RUN;
+ }
+
+ if (hevc->stat & STAT_TIMER_ARM) {
+ del_timer_sync(&hevc->timer);
+ hevc->stat &= ~STAT_TIMER_ARM;
+ }
+ hevc->dec_result = DEC_RESULT_NONE;
+ reset_process_time(hevc);
+ hevc->init_flag = 0;
+ hevc->pic_list_init_flag = 0;
+ dealloc_mv_bufs(hevc);
+ hevc_local_uninit(hevc);
+ if (vh265_local_init(hevc) < 0)
+ pr_debug(" %s local init fail\n", __func__);
+ for (i = 0; i < BUF_POOL_SIZE; i++) {
+ hevc->m_BUF[i].start_adr = 0;
+ }
+ hevc_print(hevc, PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__);
}
static irqreturn_t vh265_irq_cb(struct vdec_s *vdec, int irq)
hevc->is_used_v4l = (((unsigned long)
hevc->vh265_amstream_dec_info.param & 0x80) >> 7);
if (hevc->is_used_v4l) {
- hevc->double_write_mode = 0x10;
- hevc->mmu_enable = 0;
+ hevc->mmu_enable = (((unsigned long) // scatter mem
+ hevc->vh265_amstream_dec_info.param & 0x100) >> 8);
+ if (!hevc->mmu_enable)
+ hevc->double_write_mode = 0x10;
+
+ hevc_print(hevc, PRINT_FLAG_V4L_DETAIL,
+ "%s v4: enable mmu %d.\n",
+ __func__, hevc->mmu_enable);
}
if (init_mmu_buffers(hevc) < 0) {
#include <linux/amlogic/media/codec_mm/codec_mm.h>
#include <linux/amlogic/media/codec_mm/configs.h>
#include "../utils/firmware.h"
-
-
+#include "../utils/vdec_v4l2_buffer_ops.h"
#define MEM_NAME "codec_mmjpeg"
#define PRINT_FLAG_FORCE_DONE 0x0100
#define PRINT_FRAMEBASE_DATA 0x0400
#define PRINT_FLAG_TIMEOUT_STATUS 0x1000
+#define PRINT_FLAG_V4L_DETAIL 0x8000
int mmjpeg_debug_print(int index, int debug_flag, const char *fmt, ...)
{
#define DEC_RESULT_NONE 0
#define DEC_RESULT_DONE 1
#define DEC_RESULT_AGAIN 2
-#define DEC_RESULT_FORCE_EXIT 3
-#define DEC_RESULT_EOS 4
+#define DEC_RESULT_ERROR 3
+#define DEC_RESULT_FORCE_EXIT 4
+#define DEC_RESULT_EOS 5
#define DEC_DECODE_TIMEOUT 0x21
unsigned long cma_alloc_addr;
int cma_alloc_count;
unsigned int buf_adr;
+ ulong v4l_ref_buf_addr;
};
#define spec2canvas(x) \
u32 put_num;
u32 run_count;
u32 not_run_ready;
+ u32 buffer_not_ready;
u32 input_empty;
u32 peek_num;
u32 get_num;
+ bool is_used_v4l;
+ void *v4l2_ctx;
+ bool v4l_params_parsed;
};
static void reset_process_time(struct vdec_mjpeg_hw_s *hw);
return IRQ_HANDLED;
}
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) {
+ struct aml_vdec_pic_infos info;
+
+ info.visible_width = hw->frame_width;
+ info.visible_height = hw->frame_height;
+ info.coded_width = ALIGN(hw->frame_width, 64);
+ info.coded_height = ALIGN(hw->frame_height, 64);
+ info.dpb_size = MAX_BMMU_BUFFER_NUM - 1;
+ hw->v4l_params_parsed = true;
+ vdec_v4l_set_pic_infos(ctx, &info);
+ }
+
+ if (!ctx->v4l_codec_ready)
+ return IRQ_HANDLED;
+ }
+
+ if (hw->is_used_v4l) {
+ vf->v4l_mem_handle
+ = hw->buffer_spec[index].v4l_ref_buf_addr;
+ if (vdec_v4l_binding_fd_and_vf(vf->v4l_mem_handle, vf) < 0) {
+ mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
+ "v4l: binding vf fail.\n");
+ return -1;
+ }
+ mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
+ "[%d] %s(), v4l mem handle: 0x%lx\n",
+ ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id,
+ __func__, vf->v4l_mem_handle);
+ }
+
vf->index = index;
set_frame_info(hw, vf);
}
/****************************************/
-static void vmjpeg_canvas_init(struct vdec_s *vdec)
+static void vmjpeg_canvas_init(struct vdec_mjpeg_hw_s *hw)
{
int i, ret;
u32 canvas_width, canvas_height;
u32 decbuf_size, decbuf_y_size, decbuf_uv_size;
unsigned long buf_start, addr;
- struct vdec_mjpeg_hw_s *hw =
- (struct vdec_mjpeg_hw_s *)vdec->private;
+ struct vdec_s *vdec = hw_to_vdec(hw);
canvas_width = 1920;
canvas_height = 1088;
for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
int canvas;
- ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i,
- decbuf_size, DRIVER_NAME, &buf_start);
- if (ret < 0) {
- pr_err("CMA alloc failed! size 0x%d idx %d\n",
- decbuf_size, i);
- return;
+ if (hw->is_used_v4l) {
+ continue;
+ } else {
+ ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i,
+ decbuf_size, DRIVER_NAME, &buf_start);
+ if (ret < 0) {
+ pr_err("CMA alloc failed! size 0x%d idx %d\n",
+ decbuf_size, i);
+ return;
+ }
}
hw->buffer_spec[i].buf_adr = buf_start;
mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL);
}
-static void vmjpeg_hw_ctx_restore(struct vdec_s *vdec, int index)
+static int vmjpeg_v4l_alloc_buff_config_canvas(struct vdec_mjpeg_hw_s *hw, int i)
+{
+ int ret;
+ u32 canvas;
+ ulong decbuf_start = 0, addr;
+ int decbuf_y_size = 0, decbuf_uv_size = 0;
+ u32 canvas_width = 0, canvas_height = 0;
+ struct vdec_s *vdec = hw_to_vdec(hw);
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ if (hw->buffer_spec[i].v4l_ref_buf_addr)
+ return 0;
+
+ ret = vdec_v4l_get_buffer(hw->v4l2_ctx, &fb);
+ if (ret) {
+ mmjpeg_debug_print(DECODE_ID(hw), 0,
+ "[%d] get fb fail.\n",
+ ((struct aml_vcodec_ctx *)
+ (hw->v4l2_ctx))->id);
+ return ret;
+ }
+
+ hw->buffer_spec[i].v4l_ref_buf_addr = (ulong)fb;
+ if (fb->num_planes == 1) {
+ decbuf_start = fb->m.mem[0].addr;
+ decbuf_y_size = fb->m.mem[0].offset;
+ decbuf_uv_size = fb->m.mem[0].size - fb->m.mem[0].offset;
+ canvas_width = ALIGN(hw->frame_width, 16);
+ canvas_height = ALIGN(hw->frame_height, 16);
+ fb->m.mem[0].bytes_used = fb->m.mem[0].size;
+ } else if (fb->num_planes == 2) {
+ decbuf_start = fb->m.mem[0].addr;
+ decbuf_y_size = fb->m.mem[0].size;
+ decbuf_uv_size = fb->m.mem[1].size << 1;
+ canvas_width = ALIGN(hw->frame_width, 16);
+ canvas_height = ALIGN(hw->frame_height, 16);
+ fb->m.mem[0].bytes_used = decbuf_y_size;
+ fb->m.mem[1].bytes_used = decbuf_uv_size >> 1;
+ }
+
+ hw->buffer_spec[i].buf_adr = decbuf_start;
+ addr = hw->buffer_spec[i].buf_adr;
+ hw->buffer_spec[i].y_addr = addr;
+ addr += decbuf_y_size;
+ hw->buffer_spec[i].u_addr = addr;
+ addr += decbuf_uv_size;
+ hw->buffer_spec[i].v_addr = addr;
+
+ mmjpeg_debug_print(DECODE_ID(hw), 0, "[%d] %s(), v4l ref buf addr: 0x%x\n",
+ ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, __func__, fb);
+
+ if (vdec->parallel_dec == 1) {
+ if (hw->buffer_spec[i].y_canvas_index == -1)
+ hw->buffer_spec[i].y_canvas_index =
+ vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
+ if (hw->buffer_spec[i].u_canvas_index == -1)
+ hw->buffer_spec[i].u_canvas_index =
+ vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
+ if (hw->buffer_spec[i].v_canvas_index == -1)
+ hw->buffer_spec[i].v_canvas_index =
+ vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
+ } else {
+ canvas = vdec->get_canvas(i, 3);
+ hw->buffer_spec[i].y_canvas_index = canvas_y(canvas);
+ hw->buffer_spec[i].u_canvas_index = canvas_u(canvas);
+ hw->buffer_spec[i].v_canvas_index = canvas_v(canvas);
+ }
+
+ canvas_config(hw->buffer_spec[i].y_canvas_index,
+ hw->buffer_spec[i].y_addr,
+ canvas_width,
+ canvas_height,
+ CANVAS_ADDR_NOWRAP,
+ CANVAS_BLKMODE_LINEAR);
+ hw->buffer_spec[i].canvas_config[0].phy_addr =
+ hw->buffer_spec[i].y_addr;
+ hw->buffer_spec[i].canvas_config[0].width =
+ canvas_width;
+ hw->buffer_spec[i].canvas_config[0].height =
+ canvas_height;
+ hw->buffer_spec[i].canvas_config[0].block_mode =
+ CANVAS_BLKMODE_LINEAR;
+
+ canvas_config(hw->buffer_spec[i].u_canvas_index,
+ hw->buffer_spec[i].u_addr,
+ canvas_width / 2,
+ canvas_height / 2,
+ CANVAS_ADDR_NOWRAP,
+ CANVAS_BLKMODE_LINEAR);
+ hw->buffer_spec[i].canvas_config[1].phy_addr =
+ hw->buffer_spec[i].u_addr;
+ hw->buffer_spec[i].canvas_config[1].width =
+ canvas_width / 2;
+ hw->buffer_spec[i].canvas_config[1].height =
+ canvas_height / 2;
+ hw->buffer_spec[i].canvas_config[1].block_mode =
+ CANVAS_BLKMODE_LINEAR;
+
+ canvas_config(hw->buffer_spec[i].v_canvas_index,
+ hw->buffer_spec[i].v_addr,
+ canvas_width / 2,
+ canvas_height / 2,
+ CANVAS_ADDR_NOWRAP,
+ CANVAS_BLKMODE_LINEAR);
+ hw->buffer_spec[i].canvas_config[2].phy_addr =
+ hw->buffer_spec[i].v_addr;
+ hw->buffer_spec[i].canvas_config[2].width =
+ canvas_width / 2;
+ hw->buffer_spec[i].canvas_config[2].height =
+ canvas_height / 2;
+ hw->buffer_spec[i].canvas_config[2].block_mode =
+ CANVAS_BLKMODE_LINEAR;
+
+ return 0;
+}
+
+static bool is_enough_free_buffer(struct vdec_mjpeg_hw_s *hw)
+{
+ int i;
+
+ for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
+ if (hw->vfbuf_use[i] == 0)
+ break;
+ }
+
+ return i == DECODE_BUFFER_NUM_MAX ? false : true;
+}
+
+static int find_free_buffer(struct vdec_mjpeg_hw_s *hw)
+{
+ int i;
+
+ for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
+ if (hw->vfbuf_use[i] == 0)
+ break;
+ }
+
+ if (i == DECODE_BUFFER_NUM_MAX)
+ return -1;
+
+ if (hw->is_used_v4l)
+ if (vmjpeg_v4l_alloc_buff_config_canvas(hw, i))
+ return -1;
+
+ return i;
+}
+
+static int vmjpeg_hw_ctx_restore(struct vdec_mjpeg_hw_s *hw)
{
- struct vdec_mjpeg_hw_s *hw =
- (struct vdec_mjpeg_hw_s *)vdec->private;
struct buffer_spec_s *buff_spec;
- u32 i;
+ u32 index, i;
+
+ index = find_free_buffer(hw);
+ if (index < 0)
+ return -1;
WRITE_VREG(DOS_SW_RESET0, (1 << 7) | (1 << 6));
WRITE_VREG(DOS_SW_RESET0, 0);
if (!hw->init_flag) {
- vmjpeg_canvas_init(vdec);
+ vmjpeg_canvas_init(hw);
} else {
- for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
- buff_spec = &hw->buffer_spec[i];
- canvas_config_config(buff_spec->y_canvas_index,
- &buff_spec->canvas_config[0]);
- canvas_config_config(buff_spec->u_canvas_index,
- &buff_spec->canvas_config[1]);
- canvas_config_config(buff_spec->v_canvas_index,
- &buff_spec->canvas_config[2]);
+ if (!hw->is_used_v4l) {
+ for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
+ buff_spec = &hw->buffer_spec[i];
+ canvas_config_config(buff_spec->y_canvas_index,
+ &buff_spec->canvas_config[0]);
+ canvas_config_config(buff_spec->u_canvas_index,
+ &buff_spec->canvas_config[1]);
+ canvas_config_config(buff_spec->v_canvas_index,
+ &buff_spec->canvas_config[2]);
+ }
}
}
#if 1/*MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6*/
CLEAR_VREG_MASK(MDEC_PIC_DC_CTRL, 1 << 17);
#endif
+ return 0;
}
static s32 vmjpeg_init(struct vdec_s *vdec)
if (level < pre_decode_buf_level)
return 0;
}
- hw->not_run_ready = 0;
+ if (!is_enough_free_buffer(hw)) {
+ hw->buffer_not_ready++;
+ return 0;
+ }
+
+ hw->not_run_ready = 0;
+ hw->buffer_not_ready = 0;
if (vdec->parallel_dec == 1)
return CORE_MASK_VDEC_1;
else
return;
}*/
- vmjpeg_hw_ctx_restore(vdec, i);
+ if (vmjpeg_hw_ctx_restore(hw) < 0) {
+ hw->dec_result = DEC_RESULT_ERROR;
+ mmjpeg_debug_print(DECODE_ID(hw), 0,
+ "amvdec_mmjpeg: error HW context restore\n");
+ vdec_schedule_work(&hw->work);
+ return;
+ }
#if 0
vdec_enable_input(vdec);
mod_timer(&hw->check_timer, jiffies + CHECK_INTERVAL);
hw->init_flag = 1;
mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW,
- "%s (0x%x 0x%x 0x%x) vldcrl 0x%x bitcnt 0x%x powerctl 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- __func__,
- READ_VREG(VLD_MEM_VIFIFO_LEVEL),
- READ_VREG(VLD_MEM_VIFIFO_WP),
- READ_VREG(VLD_MEM_VIFIFO_RP),
- READ_VREG(VLD_DECODE_CONTROL),
- READ_VREG(VIFF_BIT_CNT),
- READ_VREG(POWER_CTL_VLD),
- READ_VREG(VLD_MEM_VIFIFO_START_PTR),
- READ_VREG(VLD_MEM_VIFIFO_CURR_PTR),
- READ_VREG(VLD_MEM_VIFIFO_CONTROL),
- READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL),
- READ_VREG(VLD_MEM_VIFIFO_END_PTR));
+ "%s (0x%x 0x%x 0x%x) vldcrl 0x%x bitcnt 0x%x powerctl 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ __func__,
+ READ_VREG(VLD_MEM_VIFIFO_LEVEL),
+ READ_VREG(VLD_MEM_VIFIFO_WP),
+ READ_VREG(VLD_MEM_VIFIFO_RP),
+ READ_VREG(VLD_DECODE_CONTROL),
+ READ_VREG(VIFF_BIT_CNT),
+ READ_VREG(POWER_CTL_VLD),
+ READ_VREG(VLD_MEM_VIFIFO_START_PTR),
+ READ_VREG(VLD_MEM_VIFIFO_CURR_PTR),
+ READ_VREG(VLD_MEM_VIFIFO_CONTROL),
+ READ_VREG(VLD_MEM_VIFIFO_BUF_CNTL),
+ READ_VREG(VLD_MEM_VIFIFO_END_PTR));
}
static void wait_vmjpeg_search_done(struct vdec_mjpeg_hw_s *hw)
{
} while (1);
}
+static int notify_v4l_eos(struct vdec_s *vdec)
+{
+ struct vdec_mjpeg_hw_s *hw = (struct vdec_mjpeg_hw_s *)vdec->private;
+ struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+ struct vframe_s *vf = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ if (hw->is_used_v4l && hw->eos) {
+ if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) {
+ mmjpeg_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR,
+ "%s fatal error, no available buffer slot.\n",
+ __func__);
+ return -1;
+ }
+
+ if (vdec_v4l_get_buffer(hw->v4l2_ctx, &fb)) {
+ pr_err("[%d] get fb fail.\n", ctx->id);
+ return -1;
+ }
+
+ vf->timestamp = ULONG_MAX;
+ vf->v4l_mem_handle = (unsigned long)fb;
+ vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L;
+
+ kfifo_put(&hw->display_q, (const struct vframe_s *)vf);
+ vf_notify_receiver(vdec->vf_provider_name,
+ VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL);
+
+ pr_info("[%d] mpeg12 EOS notify.\n", ctx->id);
+ }
+
+ return 0;
+}
+
static void vmjpeg_work(struct work_struct *work)
{
struct vdec_mjpeg_hw_s *hw = container_of(work,
hw->stat &= ~STAT_VDEC_RUN;
}
hw->eos = 1;
+ if (hw->is_used_v4l)
+ notify_v4l_eos(vdec);
+
vdec_vframe_dirty(hw_to_vdec(hw), hw->chunk);
hw->chunk = NULL;
vdec_clean_input(hw_to_vdec(hw));
return -ENOMEM;
}
+ /* the ctx from v4l2 driver. */
+ hw->v4l2_ctx = pdata->private;
+
pdata->private = hw;
pdata->dec_status = vmjpeg_dec_status;
hw->platform_dev = pdev;
- if (pdata->sys_info)
+ if (pdata->sys_info) {
hw->vmjpeg_amstream_dec_info = *pdata->sys_info;
+ hw->is_used_v4l = (((unsigned long)
+ hw->vmjpeg_amstream_dec_info.param & 0x80) >> 7);
+ }
vdec_source_changed(VFORMAT_MJPEG,
1920, 1080, 60);
#include <linux/amlogic/media/codec_mm/codec_mm.h>
#include <linux/amlogic/media/codec_mm/configs.h>
#include "../utils/firmware.h"
+#include "../utils/vdec_v4l2_buffer_ops.h"
#define MEM_NAME "codec_mmpeg12"
u32 pts;
u64 pts64;
bool pts_valid;
+ ulong v4l_ref_buf_addr;
};
struct vdec_mpeg12_hw_s {
u32 reference[MAX_UD_RECORDS];
#endif
int tvp_flag;
+ bool is_used_v4l;
+ void *v4l2_ctx;
+ bool v4l_params_parsed;
};
static void vmpeg12_local_init(struct vdec_mpeg12_hw_s *hw);
static int vmpeg12_hw_ctx_restore(struct vdec_mpeg12_hw_s *hw);
#define PRINT_FLAG_ERROR 0x0
#define PRINT_FLAG_RUN_FLOW 0X0001
#define PRINT_FLAG_TIMEINFO 0x0002
-#define PRINT_FLAG_UCODE_DETAIL 0x0004
+#define PRINT_FLAG_UCODE_DETAIL 0x0004
#define PRINT_FLAG_VLD_DETAIL 0x0008
#define PRINT_FLAG_DEC_DETAIL 0x0010
#define PRINT_FLAG_BUFFER_DETAIL 0x0020
#define PRINT_FLAG_VDEC_STATUS 0x0800
#define PRINT_FLAG_PARA_DATA 0x1000
#define PRINT_FLAG_USERDATA_DETAIL 0x2000
-#define PRINT_FLAG_TIMEOUT_STATUS 0x4000
+#define PRINT_FLAG_TIMEOUT_STATUS 0x4000
+#define PRINT_FLAG_V4L_DETAIL 0x8000
96000 / 24, 96000 / 24, 96000 / 24
};
+static int vmpeg12_v4l_alloc_buff_config_canvas(struct vdec_mpeg12_hw_s *hw, int i)
+{
+ int ret;
+ u32 canvas;
+ ulong decbuf_start = 0;
+ int decbuf_y_size = 0;
+ u32 canvas_width = 0, canvas_height = 0;
+ struct vdec_s *vdec = hw_to_vdec(hw);
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ if (hw->pics[i].v4l_ref_buf_addr)
+ return 0;
+
+ ret = vdec_v4l_get_buffer(hw->v4l2_ctx, &fb);
+ if (ret) {
+ debug_print(DECODE_ID(hw), 0,
+ "[%d] get fb fail.\n",
+ ((struct aml_vcodec_ctx *)
+ (hw->v4l2_ctx))->id);
+ return ret;
+ }
+
+ hw->pics[i].v4l_ref_buf_addr = (ulong)fb;
+ if (fb->num_planes == 1) {
+ decbuf_start = fb->m.mem[0].addr;
+ decbuf_y_size = fb->m.mem[0].offset;
+ canvas_width = ALIGN(hw->frame_width, 64);
+ canvas_height = ALIGN(hw->frame_height, 32);
+ fb->m.mem[0].bytes_used = fb->m.mem[0].size;
+ } else if (fb->num_planes == 2) {
+ decbuf_start = fb->m.mem[0].addr;
+ decbuf_y_size = fb->m.mem[0].size;
+ canvas_width = ALIGN(hw->frame_width, 64);
+ canvas_height = ALIGN(hw->frame_height, 32);
+ fb->m.mem[0].bytes_used = decbuf_y_size;
+ fb->m.mem[1].bytes_used = decbuf_y_size << 1;
+ }
+
+ debug_print(DECODE_ID(hw), 0, "[%d] %s(), v4l ref buf addr: 0x%x\n",
+ ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, __func__, fb);
-static u32 find_buffer(struct vdec_mpeg12_hw_s *hw)
+ if (vdec->parallel_dec == 1) {
+ u32 tmp;
+ if (canvas_y(hw->canvas_spec[i]) == 0xff) {
+ tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
+ hw->canvas_spec[i] &= ~0xff;
+ hw->canvas_spec[i] |= tmp;
+ }
+ if (canvas_u(hw->canvas_spec[i]) == 0xff) {
+ tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
+ hw->canvas_spec[i] &= ~(0xffff << 8);
+ hw->canvas_spec[i] |= tmp << 8;
+ hw->canvas_spec[i] |= tmp << 16;
+ }
+ canvas = hw->canvas_spec[i];
+ } else {
+ canvas = vdec->get_canvas(i, 2);
+ hw->canvas_spec[i] = canvas;
+ }
+
+ hw->canvas_config[i][0].phy_addr = decbuf_start;
+ hw->canvas_config[i][0].width = canvas_width;
+ hw->canvas_config[i][0].height = canvas_height;
+ hw->canvas_config[i][0].block_mode = hw->canvas_mode;
+ hw->canvas_config[i][0].endian =
+ (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0;
+ canvas_config_config(canvas_y(canvas), &hw->canvas_config[i][0]);
+
+ hw->canvas_config[i][1].phy_addr = decbuf_start + decbuf_y_size;
+ hw->canvas_config[i][1].width = canvas_width;
+ hw->canvas_config[i][1].height = canvas_height / 2;
+ hw->canvas_config[i][1].block_mode = hw->canvas_mode;
+ hw->canvas_config[i][1].endian =
+ (hw->canvas_mode == CANVAS_BLKMODE_LINEAR) ? 7 : 0;
+ canvas_config_config(canvas_u(canvas), &hw->canvas_config[i][1]);
+
+ return 0;
+}
+
+
+static bool is_enough_free_buffer(struct vdec_mpeg12_hw_s *hw)
{
- u32 i;
+ int i;
for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
if (hw->vfbuf_use[i] == 0)
- return i;
+ break;
}
- return DECODE_BUFFER_NUM_MAX;
+ return i == DECODE_BUFFER_NUM_MAX ? false : true;
+}
+
+static int find_free_buffer(struct vdec_mpeg12_hw_s *hw)
+{
+ int i;
+
+ for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
+ if (hw->vfbuf_use[i] == 0)
+ break;
+ }
+
+ if (i == DECODE_BUFFER_NUM_MAX)
+ return -1;
+
+ if (hw->is_used_v4l)
+ if (vmpeg12_v4l_alloc_buff_config_canvas(hw, i))
+ return -1;
+ return i;
}
static u32 spec_to_index(struct vdec_mpeg12_hw_s *hw, u32 spec)
vdec_schedule_work(&hw->work);
return -1;
}
+
+ if (hw->is_used_v4l) {
+ vf->v4l_mem_handle
+ = hw->pics[index].v4l_ref_buf_addr;
+ if (vdec_v4l_binding_fd_and_vf(vf->v4l_mem_handle, vf) < 0) {
+ debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
+ "v4l: binding vf fail.\n");
+ return -1;
+ }
+ debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
+ "[%d] %s(), v4l mem handle: 0x%lx\n",
+ ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id,
+ __func__, vf->v4l_mem_handle);
+ }
+
hw->vfbuf_use[index]++;
vf->index = index;
set_frame_info(hw, vf);
hw->frame_height = tmp;
}
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) {
+ struct aml_vdec_pic_infos info;
+
+ info.visible_width = hw->frame_width;
+ info.visible_height = hw->frame_height;
+ info.coded_width = ALIGN(hw->frame_width, 64);
+ info.coded_height = ALIGN(hw->frame_height, 64);
+ info.dpb_size = MAX_BMMU_BUFFER_NUM - 1;
+ hw->v4l_params_parsed = true;
+ vdec_v4l_set_pic_infos(ctx, &info);
+ }
+
+ if (!ctx->v4l_codec_ready)
+ return IRQ_HANDLED;
+ }
+
new_pic->buffer_info = info;
new_pic->offset = offset;
new_pic->index = index;
prepare_display_buf(hw, &hw->pics[index]);
}
+static int notify_v4l_eos(struct vdec_s *vdec)
+{
+ struct vdec_mpeg12_hw_s *hw = (struct vdec_mpeg12_hw_s *)vdec->private;
+ struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+ struct vframe_s *vf = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ if (hw->is_used_v4l && hw->eos) {
+ if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) {
+ debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR,
+ "%s fatal error, no available buffer slot.\n",
+ __func__);
+ return -1;
+ }
+
+ if (vdec_v4l_get_buffer(hw->v4l2_ctx, &fb)) {
+ pr_err("[%d] get fb fail.\n", ctx->id);
+ return -1;
+ }
+
+ vf->timestamp = ULONG_MAX;
+ vf->v4l_mem_handle = (unsigned long)fb;
+ vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L;
+
+ kfifo_put(&hw->display_q, (const struct vframe_s *)vf);
+ vf_notify_receiver(vdec->vf_provider_name,
+ VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL);
+
+ pr_info("[%d] mpeg12 EOS notify.\n", ctx->id);
+ }
+
+ return 0;
+}
+
static void vmpeg12_work_implement(struct vdec_mpeg12_hw_s *hw,
struct vdec_s *vdec, int from)
{
hw->chunk = NULL;
vdec_clean_input(vdec);
flush_output(hw);
+ if (hw->is_used_v4l)
+ notify_v4l_eos(vdec);
+
debug_print(DECODE_ID(hw), 0,
"%s: end of stream, num %d(%d)\n",
__func__, hw->disp_num, hw->dec_num);
}
for (i = 0; i < MAX_BMMU_BUFFER_NUM; i++) {
-
unsigned canvas;
+
if (i == (MAX_BMMU_BUFFER_NUM - 1)) /* SWAP&CCBUF&MATIRX&MV */
decbuf_size = WORKSPACE_SIZE;
- ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i,
- decbuf_size, DRIVER_NAME, &decbuf_start);
- if (ret < 0) {
- pr_err("mmu alloc failed! size 0x%d idx %d\n",
- decbuf_size, i);
- return;
+
+ if (hw->is_used_v4l && !(i == (MAX_BMMU_BUFFER_NUM - 1))) {
+ continue;
+ } else {
+ ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i,
+ decbuf_size, DRIVER_NAME, &decbuf_start);
+ if (ret < 0) {
+ pr_err("mmu alloc failed! size 0x%d idx %d\n",
+ decbuf_size, i);
+ return;
+ }
}
if (i == (MAX_BMMU_BUFFER_NUM - 1)) {
static int vmpeg12_hw_ctx_restore(struct vdec_mpeg12_hw_s *hw)
{
u32 index, i;
- index = find_buffer(hw);
+ index = find_free_buffer(hw);
if (index >= DECODE_BUFFER_NUM_MAX)
return -1;
if (!hw->init_flag)
vmpeg12_canvas_init(hw);
else {
- WRITE_VREG(MREG_CO_MV_START, hw->buf_start);
- for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
- canvas_config_config(canvas_y(hw->canvas_spec[i]),
- &hw->canvas_config[i][0]);
- canvas_config_config(canvas_u(hw->canvas_spec[i]),
- &hw->canvas_config[i][1]);
+ if (!hw->is_used_v4l) {
+ WRITE_VREG(MREG_CO_MV_START, hw->buf_start);
+ for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
+ canvas_config_config(canvas_y(hw->canvas_spec[i]),
+ &hw->canvas_config[i][0]);
+ canvas_config_config(canvas_u(hw->canvas_spec[i]),
+ &hw->canvas_config[i][1]);
+ }
}
}
static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask)
{
- int index;
-
struct vdec_mpeg12_hw_s *hw =
(struct vdec_mpeg12_hw_s *)vdec->private;
if (hw->eos)
}
#endif
- index = find_buffer(hw);
- if (index >= DECODE_BUFFER_NUM_MAX) {
+ if (!is_enough_free_buffer(hw)) {
hw->buffer_not_ready++;
return 0;
}
return -ENOMEM;
}
+ /* the ctx from v4l2 driver. */
+ hw->v4l2_ctx = pdata->private;
+
pdata->private = hw;
pdata->dec_status = vmmpeg12_dec_status;
pdata->run_ready = run_ready;
reset_user_data_buf(hw);
#endif
+ hw->is_used_v4l = (((unsigned long)
+ hw->vmpeg12_amstream_dec_info.param & 0x80) >> 7);
+
/*INIT_WORK(&userdata_push_work, userdata_push_do_work);*/
return 0;
}
#include "../utils/decoder_bmmu_box.h"
#include <linux/amlogic/media/codec_mm/configs.h>
#include "../utils/firmware.h"
-
-
+#include "../utils/vdec_v4l2_buffer_ops.h"
#define DRIVER_NAME "ammvdec_mpeg4"
#define MODULE_NAME "ammvdec_mpeg4"
#define PRINT_FRAMEBASE_DATA 0x0400
#define PRINT_FLAG_VDEC_STATUS 0x0800
#define PRINT_FLAG_TIMEOUT_STATUS 0x1000
+#define PRINT_FLAG_V4L_DETAIL 0x8000
int mmpeg4_debug_print(int index, int debug_flag, const char *fmt, ...)
{
bool pts_valid;
u32 duration;
u32 repeat_cnt;
+ ulong v4l_ref_buf_addr;
};
struct vdec_mpeg4_hw_s {
struct firmware_s *fw;
u32 blkmode;
wait_queue_head_t wait_q;
+ bool is_used_v4l;
+ void *v4l2_ctx;
+ bool v4l_params_parsed;
};
static void vmpeg4_local_init(struct vdec_mpeg4_hw_s *hw);
static int vmpeg4_hw_ctx_restore(struct vdec_mpeg4_hw_s *hw);
};
static void reset_process_time(struct vdec_mpeg4_hw_s *hw);
-static int find_buffer(struct vdec_mpeg4_hw_s *hw)
+
+static int vmpeg4_v4l_alloc_buff_config_canvas(struct vdec_mpeg4_hw_s *hw, int i)
+{
+ int ret;
+ u32 canvas;
+ ulong decbuf_start = 0;
+ int decbuf_y_size = 0;
+ u32 canvas_width = 0, canvas_height = 0;
+ struct vdec_s *vdec = hw_to_vdec(hw);
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ if (hw->pic[i].v4l_ref_buf_addr)
+ return 0;
+
+ ret = vdec_v4l_get_buffer(hw->v4l2_ctx, &fb);
+ if (ret) {
+ mmpeg4_debug_print(DECODE_ID(hw), 0,
+ "[%d] get fb fail.\n",
+ ((struct aml_vcodec_ctx *)
+ (hw->v4l2_ctx))->id);
+ return ret;
+ }
+
+ hw->pic[i].v4l_ref_buf_addr = (ulong)fb;
+ if (fb->num_planes == 1) {
+ decbuf_start = fb->m.mem[0].addr;
+ decbuf_y_size = fb->m.mem[0].offset;
+ canvas_width = ALIGN(hw->frame_width, 64);
+ canvas_height = ALIGN(hw->frame_height, 32);
+ fb->m.mem[0].bytes_used = fb->m.mem[0].size;
+ } else if (fb->num_planes == 2) {
+ decbuf_start = fb->m.mem[0].addr;
+ decbuf_y_size = fb->m.mem[0].size;
+ canvas_width = ALIGN(hw->frame_width, 64);
+ canvas_height = ALIGN(hw->frame_height, 32);
+ fb->m.mem[0].bytes_used = decbuf_y_size;
+ fb->m.mem[1].bytes_used = decbuf_y_size << 1;
+ }
+
+ mmpeg4_debug_print(DECODE_ID(hw), 0, "[%d] %s(), v4l ref buf addr: 0x%x\n",
+ ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id, __func__, fb);
+
+ if (vdec->parallel_dec == 1) {
+ u32 tmp;
+ if (canvas_y(hw->canvas_spec[i]) == 0xff) {
+ tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
+ hw->canvas_spec[i] &= ~0xff;
+ hw->canvas_spec[i] |= tmp;
+ }
+ if (canvas_u(hw->canvas_spec[i]) == 0xff) {
+ tmp = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
+ hw->canvas_spec[i] &= ~(0xffff << 8);
+ hw->canvas_spec[i] |= tmp << 8;
+ hw->canvas_spec[i] |= tmp << 16;
+ }
+ canvas = hw->canvas_spec[i];
+ } else {
+ canvas = vdec->get_canvas(i, 2);
+ hw->canvas_spec[i] = canvas;
+ }
+
+ hw->canvas_config[i][0].phy_addr = decbuf_start;
+ hw->canvas_config[i][0].width = canvas_width;
+ hw->canvas_config[i][0].height = canvas_height;
+ hw->canvas_config[i][0].block_mode = hw->blkmode;
+ if (hw->blkmode == CANVAS_BLKMODE_LINEAR)
+ hw->canvas_config[i][0].endian = 7;
+ else
+ hw->canvas_config[i][0].endian = 0;
+ canvas_config_config(canvas_y(canvas),
+ &hw->canvas_config[i][0]);
+
+ hw->canvas_config[i][1].phy_addr =
+ decbuf_start + decbuf_y_size;
+ hw->canvas_config[i][1].width = canvas_width;
+ hw->canvas_config[i][1].height = (canvas_height >> 1);
+ hw->canvas_config[i][1].block_mode = hw->blkmode;
+ if (hw->blkmode == CANVAS_BLKMODE_LINEAR)
+ hw->canvas_config[i][1].endian = 7;
+ else
+ hw->canvas_config[i][1].endian = 0;
+ canvas_config_config(canvas_u(canvas),
+ &hw->canvas_config[i][1]);
+
+ return 0;
+}
+
+static bool is_enough_free_buffer(struct vdec_mpeg4_hw_s *hw)
{
int i;
for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
if (hw->vfbuf_use[i] == 0)
- return i;
+ break;
+ }
+
+ return i == DECODE_BUFFER_NUM_MAX ? false : true;
+}
+
+static int find_free_buffer(struct vdec_mpeg4_hw_s *hw)
+{
+ int i;
+
+ for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
+ if (hw->vfbuf_use[i] == 0)
+ break;
}
- return DECODE_BUFFER_NUM_MAX;
+ if (i == DECODE_BUFFER_NUM_MAX)
+ return -1;
+
+ if (hw->is_used_v4l)
+ if (vmpeg4_v4l_alloc_buff_config_canvas(hw, i))
+ return -1;
+
+ return i;
}
static int spec_to_index(struct vdec_mpeg4_hw_s *hw, u32 spec)
return index;
}
-
static int prepare_display_buf(struct vdec_mpeg4_hw_s * hw,
struct pic_info_t *pic)
{
return -1;
}
+ if (hw->is_used_v4l) {
+ vf->v4l_mem_handle
+ = hw->pic[index].v4l_ref_buf_addr;
+ mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
+ "[%d] %s(), v4l mem handle: 0x%lx\n",
+ ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id,
+ __func__, vf->v4l_mem_handle);
+ }
+
vf->index = pic->index;
vf->width = hw->frame_width;
vf->height = hw->frame_height;
return -1;
}
+ if (hw->is_used_v4l) {
+ vf->v4l_mem_handle
+ = hw->pic[index].v4l_ref_buf_addr;
+ if (vdec_v4l_binding_fd_and_vf(vf->v4l_mem_handle, vf) < 0) {
+ mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
+ "v4l: binding vf fail.\n");
+ return -1;
+ }
+ mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
+ "[%d] %s(), v4l mem handle: 0x%lx\n",
+ ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id,
+ __func__, vf->v4l_mem_handle);
+ }
+
vf->index = index;
vf->width = hw->frame_width;
vf->height = hw->frame_height;
if (dec_h != 0)
hw->frame_height = dec_h;
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) {
+ struct aml_vdec_pic_infos info;
+
+ info.visible_width = hw->frame_width;
+ info.visible_height = hw->frame_height;
+ info.coded_width = ALIGN(hw->frame_width, 64);
+ info.coded_height = ALIGN(hw->frame_height, 64);
+ info.dpb_size = MAX_BMMU_BUFFER_NUM - 1;
+ hw->v4l_params_parsed = true;
+ vdec_v4l_set_pic_infos(ctx, &info);
+ }
+
+ if (!ctx->v4l_codec_ready)
+ return IRQ_HANDLED;
+ }
+
if (hw->vmpeg4_amstream_dec_info.rate == 0) {
if (vop_time_inc < hw->last_vop_time_inc) {
duration = vop_time_inc +
return IRQ_HANDLED;
}
disp_pic = &hw->pic[index];
-
if ((hw->first_i_frame_ready == 0) &&
(I_PICTURE == disp_pic->pic_type))
hw->first_i_frame_ready = 1;
}
}
+static int notify_v4l_eos(struct vdec_s *vdec)
+{
+ struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private;
+ struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+ struct vframe_s *vf = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ if (hw->is_used_v4l && hw->eos) {
+ if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) {
+ mmpeg4_debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR,
+ "%s fatal error, no available buffer slot.\n",
+ __func__);
+ return -1;
+ }
+
+ if (vdec_v4l_get_buffer(hw->v4l2_ctx, &fb)) {
+ pr_err("[%d] get fb fail.\n", ctx->id);
+ return -1;
+ }
+
+ vf->timestamp = ULONG_MAX;
+ vf->v4l_mem_handle = (unsigned long)fb;
+ vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L;
+
+ kfifo_put(&hw->display_q, (const struct vframe_s *)vf);
+ vf_notify_receiver(vdec->vf_provider_name,
+ VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL);
+
+ pr_info("[%d] mpeg4 EOS notify.\n", ctx->id);
+ }
+
+ return 0;
+}
+
static void vmpeg4_work(struct work_struct *work)
{
struct vdec_mpeg4_hw_s *hw =
hw->chunk = NULL;
vdec_clean_input(vdec);
flush_output(hw);
+
+ if (hw->is_used_v4l)
+ notify_v4l_eos(vdec);
+
mmpeg4_debug_print(DECODE_ID(hw), 0,
"%s: eos flushed, frame_num %d\n",
__func__, hw->frame_num);
canvas_height = 576;
decbuf_y_size = 0x80000;
decbuf_size = 0x100000;
+ } else {
+ int w = 1920;
+ int h = 1088;
+ int align_w, align_h;
+ int max, min;
+
+ align_w = ALIGN(w, 64);
+ align_h = ALIGN(h, 64);
+ if (align_w > align_h) {
+ max = align_w;
+ min = align_h;
} else {
- int w = 1920;
- int h = 1088;
- int align_w, align_h;
- int max, min;
-
- align_w = ALIGN(w, 64);
- align_h = ALIGN(h, 64);
- if (align_w > align_h) {
- max = align_w;
- min = align_h;
+ max = align_h;
+ min = align_w;
+ }
+ /* HD & SD */
+ if ((max > 1920 || min > 1088) &&
+ ALIGN(align_w * align_h * 3/2, SZ_64K) * 9 <=
+ buf_size) {
+ canvas_width = align_w;
+ canvas_height = align_h;
+ decbuf_y_size =
+ ALIGN(align_w * align_h, SZ_64K);
+ decbuf_size =
+ ALIGN(align_w * align_h * 3/2, SZ_64K);
+ } else { /*1080p*/
+ if (h > w) {
+ canvas_width = 1088;
+ canvas_height = 1920;
} else {
- max = align_h;
- min = align_w;
- }
- /* HD & SD */
- if ((max > 1920 || min > 1088) &&
- ALIGN(align_w * align_h * 3/2, SZ_64K) * 9 <=
- buf_size) {
- canvas_width = align_w;
- canvas_height = align_h;
- decbuf_y_size =
- ALIGN(align_w * align_h, SZ_64K);
- decbuf_size =
- ALIGN(align_w * align_h * 3/2, SZ_64K);
- } else { /*1080p*/
- if (h > w) {
- canvas_width = 1088;
- canvas_height = 1920;
- } else {
- canvas_width = 1920;
- canvas_height = 1088;
- }
- decbuf_y_size = 0x200000;
- decbuf_size = 0x300000;
+ canvas_width = 1920;
+ canvas_height = 1088;
}
+ decbuf_y_size = 0x200000;
+ decbuf_size = 0x300000;
}
+ }
for (i = 0; i < MAX_BMMU_BUFFER_NUM; i++) {
unsigned canvas;
+
if (i == (MAX_BMMU_BUFFER_NUM - 1))
decbuf_size = WORKSPACE_SIZE;
- ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i,
- decbuf_size, DRIVER_NAME, &decbuf_start);
- if (ret < 0) {
- pr_err("mmu alloc failed! size 0x%d idx %d\n",
- decbuf_size, i);
- return ret;
+
+ if (hw->is_used_v4l && !(i == (MAX_BMMU_BUFFER_NUM - 1))) {
+ continue;
+ } else {
+ ret = decoder_bmmu_box_alloc_buf_phy(hw->mm_blk_handle, i,
+ decbuf_size, DRIVER_NAME, &decbuf_start);
+ if (ret < 0) {
+ pr_err("mmu alloc failed! size %d idx %d\n",
+ decbuf_size, i);
+ return ret;
+ }
}
if (i == (MAX_BMMU_BUFFER_NUM - 1)) {
return 0;
}
+
static void vmpeg4_dump_state(struct vdec_s *vdec)
{
struct vdec_mpeg4_hw_s *hw =
int index, i;
void *workspace_buf = NULL;
- index = find_buffer(hw);
- if (index >= DECODE_BUFFER_NUM_MAX)
+ index = find_free_buffer(hw);
+ if (index < 0)
return -1;
if (!hw->init_flag) {
if (vmpeg4_canvas_init(hw) < 0)
return -1;
} else {
- for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
- canvas_config_config(canvas_y(hw->canvas_spec[i]),
- &hw->canvas_config[i][0]);
- canvas_config_config(canvas_u(hw->canvas_spec[i]),
- &hw->canvas_config[i][1]);
+ if (!hw->is_used_v4l) {
+ for (i = 0; i < DECODE_BUFFER_NUM_MAX; i++) {
+ canvas_config_config(canvas_y(hw->canvas_spec[i]),
+ &hw->canvas_config[i][0]);
+ canvas_config_config(canvas_u(hw->canvas_spec[i]),
+ &hw->canvas_config[i][1]);
+ }
}
}
/* prepare REF0 & REF1
static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask)
{
- int index;
struct vdec_mpeg4_hw_s *hw = (struct vdec_mpeg4_hw_s *)vdec->private;
if (hw->eos)
}
}
- index = find_buffer(hw);
- if (index >= DECODE_BUFFER_NUM_MAX) {
+ if (!is_enough_free_buffer(hw)) {
hw->buffer_not_ready++;
return 0;
}
+
hw->not_run_ready = 0;
hw->buffer_not_ready = 0;
if (vdec->parallel_dec == 1)
}
memset(hw, 0, sizeof(struct vdec_mpeg4_hw_s));
+ /* the ctx from v4l2 driver. */
+ hw->v4l2_ctx = pdata->private;
+
pdata->private = hw;
pdata->dec_status = dec_status;
/* pdata->set_trickmode = set_trickmode; */
hw->platform_dev = pdev;
hw->blkmode = pdata->canvas_mode;
-
if (pdata->sys_info) {
hw->vmpeg4_amstream_dec_info = *pdata->sys_info;
if ((hw->vmpeg4_amstream_dec_info.height != 0) &&
hw->vmpeg4_amstream_dec_info.width,
hw->vmpeg4_amstream_dec_info.height,
hw->vmpeg4_amstream_dec_info.rate);
+ hw->is_used_v4l = (((unsigned long)
+ hw->vmpeg4_amstream_dec_info.param & 0x80) >> 7);
}
if (vmmpeg4_init(hw) < 0) {
pdata->dec_status = NULL;
return -ENODEV;
}
+
if (pdata->parallel_dec == 1)
vdec_core_request(pdata, CORE_MASK_VDEC_1);
else {
mmpeg4_debug_print(DECODE_ID(hw), 0,
"%s end.\n", __func__);
-
return 0;
}
decoder_common-objs += config_parser.o secprot.o vdec_profile.o
decoder_common-objs += amstream_profile.o
decoder_common-objs += frame_check.o amlogic_fbc_hook.o
+decoder_common-objs += vdec_v4l2_buffer_ops.o
ulong phy_addr,
int size);
+static int aml_copy_from_user(void *to, const void *from, ulong n)
+{
+ int ret =0;
+
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ ret = copy_from_user(to, from, n);
+ else
+ memcpy(to, from, n);
+
+ return ret;
+}
+
static int copy_from_user_to_phyaddr(void *virts, const char __user *buf,
u32 size, ulong phys, u32 pading, bool is_mapped)
{
u8 *p = virts;
if (is_mapped) {
- if (copy_from_user(p, buf, size))
+ if (aml_copy_from_user(p, buf, size))
return -EFAULT;
if (pading)
if (!p)
return -1;
- if (copy_from_user(p, buf + i * span, span)) {
+ if (aml_copy_from_user(p, buf + i * span, span)) {
codec_mm_unmap_phyaddr(p);
return -EFAULT;
}
if (!p)
return -1;
- if (copy_from_user(p, buf + span, remain)) {
+ if (aml_copy_from_user(p, buf + span, remain)) {
codec_mm_unmap_phyaddr(p);
return -EFAULT;
}
struct vframe_chunk_s *chunk;
struct vdec_s *vdec = input->vdec;
struct vframe_block_list_s *block;
-
int need_pading_size = MIN_FRAME_PADDING_SIZE;
if (vdec_secure(vdec)) {
--- /dev/null
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include "vdec_v4l2_buffer_ops.h"
+
+const struct file_operations v4l2_file_fops = {};
+
+static int is_v4l2_buf_file(struct file *file)
+{
+ return file->f_op == &v4l2_file_fops;
+}
+
+static int vdec_v4l_alloc_fd(void)
+{
+ int fd;
+ struct file *file = NULL;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ pr_err("v4l: get unused fd fail\n");
+ return fd;
+ }
+
+ file = anon_inode_getfile("v4l2_meta_file", &v4l2_file_fops, NULL, 0);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ pr_err("v4l: get file faill\n");
+ return PTR_ERR(file);
+ }
+
+ fd_install(fd, file);
+
+ pr_info("v4l: fd %d, file %p, data %p\n",
+ fd, file, file->private_data);
+
+ return fd;
+}
+
+static int vdec_v4l_fd_install_data(int fd, void *data)
+{
+ struct file *file;
+
+ file = fget(fd);
+
+ if (!file) {
+ pr_info("v4l: fget fd %d fail!, comm %s, pid %d\n",
+ fd, current->comm, current->pid);
+ return -EBADF;
+ }
+
+ if (!is_v4l2_buf_file(file)) {
+ pr_info("v4l: v4l2 check fd fail!\n");
+ return -EBADF;
+ }
+
+ file->private_data = data;
+
+ return 0;
+}
+
+int vdec_v4l_binding_fd_and_vf(ulong v4l_handle, void *vf)
+{
+ int ret = 0, fd = -1;
+ struct vdec_v4l2_buffer *v4l_buf =
+ (struct vdec_v4l2_buffer *) v4l_handle;
+
+ if (v4l_buf->m.vf_fd > 0)
+ return 0;
+
+ fd = vdec_v4l_alloc_fd();
+ if (fd < 0) {
+ pr_err("v4l: alloc fd fail %d.\n", fd);
+ return fd;
+ }
+
+ ret = vdec_v4l_fd_install_data(fd, vf);
+ if (ret < 0) {
+ put_unused_fd(fd);
+ pr_err("v4l: fd install data fail %d.\n", ret);
+ return ret;
+ }
+
+ v4l_buf->m.vf_fd = fd;
+
+ return 0;
+}
+EXPORT_SYMBOL(vdec_v4l_binding_fd_and_vf);
+
+int vdec_v4l_get_buffer(struct aml_vcodec_ctx *ctx,
+ struct vdec_v4l2_buffer **out)
+{
+ int ret = 0;
+
+ if (ctx->drv_handle == 0)
+ return -EIO;
+
+ ret = ctx->dec_if->get_param(ctx->drv_handle,
+ GET_PARAM_FREE_FRAME_BUFFER, out);
+
+ return ret;
+}
+EXPORT_SYMBOL(vdec_v4l_get_buffer);
+
+int vdec_v4l_set_pic_infos(struct aml_vcodec_ctx *ctx,
+ struct aml_vdec_pic_infos *info)
+{
+ int ret = 0;
+
+ if (ctx->drv_handle == 0)
+ return -EIO;
+
+ ret = ctx->dec_if->set_param(ctx->drv_handle,
+ SET_PARAM_PIC_INFO, info);
+
+ return ret;
+}
+EXPORT_SYMBOL(vdec_v4l_set_pic_infos);
+
+int vdec_v4l_write_frame_sync(struct aml_vcodec_ctx *ctx)
+{
+ int ret = 0;
+
+ if (ctx->drv_handle == 0)
+ return -EIO;
+
+ ret = ctx->dec_if->set_param(ctx->drv_handle,
+ SET_PARAM_WRITE_FRAME_SYNC, NULL);
+
+ return ret;
+}
+EXPORT_SYMBOL(vdec_v4l_write_frame_sync);
+
--- /dev/null
+#ifndef _AML_VDEC_V4L2_BUFFER_H_
+#define _AML_VDEC_V4L2_BUFFER_H_
+
+#include "../../../amvdec_ports/vdec_drv_base.h"
+#include "../../../amvdec_ports/aml_vcodec_adapt.h"
+
+int vdec_v4l_get_buffer(
+ struct aml_vcodec_ctx *ctx,
+ struct vdec_v4l2_buffer **out);
+
+int vdec_v4l_set_pic_infos(
+ struct aml_vcodec_ctx *ctx,
+ struct aml_vdec_pic_infos *info);
+
+int vdec_v4l_write_frame_sync(
+ struct aml_vcodec_ctx *ctx);
+
+int vdec_v4l_binding_fd_and_vf(
+ ulong v4l_handle,
+ void *vf);
+
+#endif
#include "../utils/config_parser.h"
#include "../utils/firmware.h"
#include "../../../common/chips/decoder_cpu_ver_info.h"
-#include "../../../amvdec_ports/vdec_drv_base.h"
-
-
+#include "../utils/vdec_v4l2_buffer_ops.h"
#define MIX_STREAM_SUPPORT
#ifdef MULTI_INSTANCE_SUPPORT
#define MAX_DECODE_INSTANCE_NUM 9
#define MULTI_DRIVER_NAME "ammvdec_vp9"
-
static unsigned int max_decode_instance_num
= MAX_DECODE_INSTANCE_NUM;
static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM];
int stream_offset;
u32 pts;
u64 pts64;
+ u64 timestamp;
uint8_t error_mark;
/**/
int slice_idx;
struct buff_s rpm;
struct buff_s lmem;
} BuffInfo_t;
-
#ifdef MULTI_INSTANCE_SUPPORT
#define DEC_RESULT_NONE 0
#define DEC_RESULT_DONE 1
#define DEC_RESULT_GET_DATA_RETRY 8
#define DEC_RESULT_EOS 9
#define DEC_RESULT_FORCE_EXIT 10
+#define DEC_V4L2_CONTINUE_DECODING 18
#define DEC_S1_RESULT_NONE 0
#define DEC_S1_RESULT_DONE 1
bool pic_list_init_done2;
bool is_used_v4l;
void *v4l2_ctx;
+ bool v4l_params_parsed;
int frameinfo_enable;
struct vframe_qos_s vframe_qos;
+ u32 mem_map_mode;
};
static int vp9_print(struct VP9Decoder_s *pbi,
return false;
}
-static int v4l_get_fb(struct aml_vcodec_ctx *ctx, struct vdec_fb **out)
-{
- int ret = 0;
-
- ret = ctx->dec_if->get_param(ctx->drv_handle,
- GET_PARAM_FREE_FRAME_BUFFER, out);
-
- return ret;
-}
+static int config_pic(struct VP9Decoder_s *pbi,
+ struct PIC_BUFFER_CONFIG_s *pic_config);
static void resize_context_buffers(struct VP9Decoder_s *pbi,
struct VP9_Common_s *cm, int width, int height)
pbi->vp9_first_pts_ready = 0;
pbi->duration_from_pts_done = 0;
}
- cm->width = width;
- cm->height = height;
pr_info("%s (%d,%d)=>(%d,%d)\r\n", __func__, cm->width,
cm->height, width, height);
+
+ if (pbi->is_used_v4l) {
+ struct PIC_BUFFER_CONFIG_s *pic = &cm->cur_frame->buf;
+
+ /* resolution change happend need to reconfig buffs if true. */
+ if (pic->y_crop_width != width || pic->y_crop_height != height) {
+ int i;
+ for (i = 0; i < pbi->used_buf_num; i++) {
+ pic = &cm->buffer_pool->frame_bufs[i].buf;
+ pic->y_crop_width = width;
+ pic->y_crop_height = height;
+ if (!config_pic(pbi, pic))
+ set_canvas(pbi, pic);
+ else
+ vp9_print(pbi, 0,
+ "v4l: reconfig buff fail.\n");
+ }
+ }
+ }
+
+ cm->width = width;
+ cm->height = height;
}
/*
*if (cm->cur_frame->mvs == NULL ||
#else
/* porting */
ybf = get_frame_new_buffer(cm);
+ if (!ybf)
+ return -1;
+
ybf->y_crop_width = width;
ybf->y_crop_height = height;
ybf->bit_depth = params->p.bit_depth;
#else
/* porting */
ybf = get_frame_new_buffer(cm);
+ if (!ybf)
+ return -1;
+
ybf->y_crop_width = width;
ybf->y_crop_height = height;
ybf->bit_depth = params->p.bit_depth;
static void trigger_schedule(struct VP9Decoder_s *pbi)
{
+ if (pbi->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(pbi->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode &&
+ !pbi->v4l_params_parsed)
+ vdec_v4l_write_frame_sync(ctx);
+ }
+
if (pbi->vdec_cb)
pbi->vdec_cb(hw_to_vdec(pbi), pbi->vdec_cb_arg);
}
#endif
+static void init_pic_list_hw(struct VP9Decoder_s *pbi);
static int get_free_fb(struct VP9Decoder_s *pbi)
{
struct VP9_Common_s *const cm = &pbi->common;
struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
+ struct PIC_BUFFER_CONFIG_s *pic = NULL;
int i;
unsigned long flags;
}
}
for (i = 0; i < pbi->used_buf_num; ++i) {
+ pic = &frame_bufs[i].buf;
if ((frame_bufs[i].ref_count == 0) &&
- (frame_bufs[i].buf.vf_ref == 0) &&
- (frame_bufs[i].buf.index != -1)
- )
+ (pic->vf_ref == 0) && (pic->index != -1)) {
+ if (pbi->is_used_v4l && !pic->cma_alloc_addr) {
+ pic->y_crop_width = pbi->frame_width;
+ pic->y_crop_height = pbi->frame_height;
+ if (!config_pic(pbi, pic)) {
+ set_canvas(pbi, pic);
+ init_pic_list_hw(pbi);
+ } else
+ i = pbi->used_buf_num;
+ }
+
break;
+ }
}
if (i != pbi->used_buf_num) {
frame_bufs[i].ref_count = 1;
struct VP9_Common_s *const cm = &pbi->common;
struct BufferPool_s *pool = cm->buffer_pool;
struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs;
+ struct PIC_BUFFER_CONFIG_s *pic = NULL;
int i;
int ret;
}
}
- get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
- get_frame_new_buffer(cm)->color_space = cm->color_space;
- get_frame_new_buffer(cm)->slice_type = cm->frame_type;
+ pic = get_frame_new_buffer(cm);
+ if (!pic)
+ return -1;
+
+ pic->bit_depth = cm->bit_depth;
+ pic->color_space = cm->color_space;
+ pic->slice_type = cm->frame_type;
if (pbi->need_resync) {
pr_err
pbi->hold_ref_buf = 0;
cm->frame_to_show = get_frame_new_buffer(cm);
- /*if (!pbi->frame_parallel_decode || !cm->show_frame) {*/
- lock_buffer_pool(pool, flags);
- --frame_bufs[cm->new_fb_idx].ref_count;
- /*pr_info("[MMU DEBUG 8] dec ref_count[%d] : %d\r\n", cm->new_fb_idx,
- * frame_bufs[cm->new_fb_idx].ref_count);
- */
- unlock_buffer_pool(pool, flags);
- /*}*/
+ if (cm->frame_to_show) {
+ /*if (!pbi->frame_parallel_decode || !cm->show_frame) {*/
+ lock_buffer_pool(pool, flags);
+ --frame_bufs[cm->new_fb_idx].ref_count;
+ /*pr_info("[MMU DEBUG 8] dec ref_count[%d] : %d\r\n", cm->new_fb_idx,
+ * frame_bufs[cm->new_fb_idx].ref_count);
+ */
+ unlock_buffer_pool(pool, flags);
+ /*}*/
+ }
/*Invalidate these references until the next frame starts.*/
for (ref_index = 0; ref_index < 3; ref_index++)
if (!cm->show_frame)
return ret;
+ /* may not be get buff in v4l2 */
+ if (!cm->frame_to_show)
+ return ret;
+
pbi->ready_for_new_data = 1;
*sd = *cm->frame_to_show;
return 0;
}
-
int vp9_bufmgr_postproc(struct VP9Decoder_s *pbi)
{
struct VP9_Common_s *cm = &pbi->common;
{
int ret = -1;
int i;
- int pic_width = pbi->init_pic_w;
- int pic_height = pbi->init_pic_h;
+ int pic_width = pbi->is_used_v4l ? pbi->frame_width : pbi->init_pic_w;
+ int pic_height = pbi->is_used_v4l ? pbi->frame_height : pbi->init_pic_h;
int lcu_size = 64; /*fixed 64*/
int pic_width_64 = (pic_width + 63) & (~0x3f);
int pic_height_32 = (pic_height + 31) & (~0x1f);
int mc_buffer_size_u_v = 0;
int mc_buffer_size_u_v_h = 0;
int dw_mode = get_double_write_mode_init(pbi);
- struct vdec_fb *fb = NULL;
+ int pic_width_align = 0;
+ int pic_height_align = 0;
+ struct vdec_v4l2_buffer *fb = NULL;
pbi->lcu_total = lcu_total;
/*64k alignment*/
buf_size = ((mc_buffer_size_u_v_h << 16) * 3);
buf_size = ((buf_size + 0xffff) >> 16) << 16;
+
+ if (pbi->is_used_v4l) {
+ pic_width_align = ALIGN(pic_width_dw, 32);
+ pic_height_align = ALIGN(pic_height_dw, 32);
+ }
}
if (mc_buffer_size & 0xffff) /*64k alignment*/
if ((!pbi->mmu_enable) && ((dw_mode & 0x10) == 0))
buf_size += (mc_buffer_size_h << 16);
-
if (pbi->mmu_enable) {
pic_config->header_adr = decoder_bmmu_box_get_phy_addr(
pbi->bmmu_box, HEADER_BUFFER_IDX(pic_config->index));
#endif
if (buf_size > 0) {
if (pbi->is_used_v4l) {
- ret = v4l_get_fb(pbi->v4l2_ctx, &fb);
+ ret = vdec_v4l_get_buffer(pbi->v4l2_ctx, &fb);
if (ret) {
- vp9_print(pbi, PRINT_FLAG_ERROR,
+ vp9_print(pbi, PRINT_FLAG_V4L_DETAIL,
"[%d] get fb fail.\n",
((struct aml_vcodec_ctx *)
(pbi->v4l2_ctx))->id);
}
pbi->m_BUF[i].v4l_ref_buf_addr = (ulong)fb;
- pic_config->cma_alloc_addr =
- virt_to_phys(fb->base_y.va);
+ pic_config->cma_alloc_addr = fb->m.mem[0].addr;
vp9_print(pbi, PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), v4l ref buf addr: 0x%x\n",
((struct aml_vcodec_ctx *)
pic_config->mc_canvas_y = pic_config->index;
pic_config->mc_canvas_u_v = pic_config->index;
if (dw_mode & 0x10) {
- pic_config->dw_y_adr = y_adr;
- pic_config->dw_u_v_adr = y_adr +
- ((mc_buffer_size_u_v_h << 16) << 1);
+ if (pbi->is_used_v4l) {
+ if (fb->num_planes == 1) {
+ fb->m.mem[0].bytes_used =
+ pic_width_align * pic_height_align;
+ pic_config->dw_y_adr = y_adr;
+ pic_config->dw_u_v_adr = y_adr +
+ fb->m.mem[0].bytes_used;
+ } else if (fb->num_planes == 2) {
+ fb->m.mem[0].bytes_used =
+ pic_width_align * pic_height_align;
+ fb->m.mem[1].bytes_used =
+ fb->m.mem[1].size;
+ pic_config->dw_y_adr = y_adr;
+ pic_config->dw_u_v_adr = y_adr +
+ fb->m.mem[0].bytes_used;
+ }
+ } else {
+ pic_config->dw_y_adr = y_adr;
+ pic_config->dw_u_v_adr = y_adr +
+ ((mc_buffer_size_u_v_h << 16) << 1);
+ }
pic_config->mc_canvas_y =
(pic_config->index << 1);
return (MAX_FRAME_4K_NUM * 4);
}
-
static void init_pic_list(struct VP9Decoder_s *pbi)
{
int i;
pic_config->y_canvas_index = -1;
pic_config->uv_canvas_index = -1;
}
- if (config_pic(pbi, pic_config) < 0) {
- if (debug)
- pr_info("Config_pic %d fail\n",
- pic_config->index);
- pic_config->index = -1;
- break;
- }
pic_config->y_crop_width = pbi->init_pic_w;
pic_config->y_crop_height = pbi->init_pic_h;
pic_config->double_write_mode = get_double_write_mode(pbi);
- if (pic_config->double_write_mode) {
- set_canvas(pbi, pic_config);
+ if (!pbi->is_used_v4l) {
+ if (config_pic(pbi, pic_config) < 0) {
+ if (debug)
+ pr_info("Config_pic %d fail\n",
+ pic_config->index);
+ pic_config->index = -1;
+ break;
+ }
+
+ if (pic_config->double_write_mode) {
+ set_canvas(pbi, pic_config);
+ }
}
}
for (; i < pbi->used_buf_num; i++) {
}
pr_info("%s ok, used_buf_num = %d\n",
__func__, pbi->used_buf_num);
-
}
static void init_pic_list_hw(struct VP9Decoder_s *pbi)
pic_config->lcu_total * lcu_size*lcu_size/2;
int mc_buffer_size_u_v_h =
(mc_buffer_size_u_v + 0xffff) >> 16;/*64k alignment*/
-
+ struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx;
if (get_double_write_mode(pbi)) {
WRITE_VREG(HEVC_SAO_Y_START_ADDR, pic_config->dw_y_adr);
data32 = READ_VREG(HEVC_SAO_CTRL1);
data32 &= (~0x3000);
/*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/
- data32 |= (mem_map_mode << 12);
+ data32 |= (pbi->mem_map_mode << 12);
data32 &= (~0x3);
data32 |= 0x1; /* [1]:dw_disable [0]:cm_disable*/
WRITE_VREG(HEVC_SAO_CTRL1, data32);
data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG);
data32 &= (~0x30);
/*[5:4] address_format 00:linear 01:32x32 10:64x32*/
- data32 |= (mem_map_mode << 4);
+ data32 |= (pbi->mem_map_mode << 4);
WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32);
#else
/*m8baby test1902*/
data32 = READ_VREG(HEVC_SAO_CTRL1);
data32 &= (~0x3000);
/*[13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32*/
- data32 |= (mem_map_mode << 12);
+ data32 |= (pbi->mem_map_mode << 12);
data32 &= (~0xff0);
/*data32 |= 0x670;*/ /*Big-Endian per 64-bit*/
data32 |= 0x880; /*.Big-Endian per 64-bit */
data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG);
data32 &= (~0x30);
/*[5:4] address_format 00:linear 01:32x32 10:64x32*/
- data32 |= (mem_map_mode << 4);
+ data32 |= (pbi->mem_map_mode << 4);
data32 &= (~0xF);
data32 |= 0x8; /*Big-Endian per 64-bit*/
WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32);
#else
data32 = READ_VREG(HEVC_SAO_CTRL1);
data32 &= (~0x3000);
- data32 |= (mem_map_mode <<
+ data32 |= (pbi->mem_map_mode <<
12);
/* [13:12] axi_aformat, 0-Linear,
data |= ((0x1 << 8) |(0x1 << 9));
WRITE_VREG(HEVC_DBLK_CFGB, data);
}
+
+ /* swap uv */
+ if (pbi->is_used_v4l) {
+ if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) ||
+ (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M))
+ data32 &= ~(1 << 8); /* NV21 */
+ else
+ data32 |= (1 << 8); /* NV12 */
+ }
+
+ /*
+ * [31:24] ar_fifo1_axi_thred
+ * [23:16] ar_fifo0_axi_thred
+ * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes
+ * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32
+ * [11:08] axi_lendian_C
+ * [07:04] axi_lendian_Y
+ * [3] reserved
+ * [2] clk_forceon
+ * [1] dw_disable:disable double write output
+ * [0] cm_disable:disable compress output
+ */
WRITE_VREG(HEVC_SAO_CTRL1, data32);
if (get_double_write_mode(pbi) & 0x10) {
data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG);
data32 &= (~0x30);
/* [5:4] -- address_format 00:linear 01:32x32 10:64x32 */
- data32 |= (mem_map_mode <<
+ data32 |= (pbi->mem_map_mode <<
4);
data32 &= (~0xF);
data32 |= 0xf; /* valid only when double write only */
/*data32 |= 0x8;*/ /* Big-Endian per 64-bit */
+
+ /* swap uv */
+ if (pbi->is_used_v4l) {
+ if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) ||
+ (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M))
+ data32 |= (1 << 12); /* NV21 */
+ else
+ data32 &= ~(1 << 12); /* NV12 */
+ }
+
+ /*
+ * [3:0] little_endian
+ * [5:4] address_format 00:linear 01:32x32 10:64x32
+ * [7:6] reserved
+ * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte
+ * [11:10] reserved
+ * [12] CbCr_byte_swap
+ * [31:13] reserved
+ */
WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32);
#endif
}
pbi->vvp9_amstream_dec_info.height :
pbi->work_space_buf->max_height));
+ pbi->mem_map_mode = mem_map_mode ? mem_map_mode : 0;
+
/* video is not support unaligned with 64 in tl1
** vdec canvas mode will be linear when dump yuv is set
*/
(pbi->vvp9_amstream_dec_info.width % 64) != 0)) {
if (hw_to_vdec(pbi)->canvas_mode !=
CANVAS_BLKMODE_LINEAR)
- mem_map_mode = 2;
+ pbi->mem_map_mode = 2;
else {
- mem_map_mode = 0;
+ pbi->mem_map_mode = 0;
pr_info("vdec blkmod linear, force mem_map_mode 0\n");
}
}
struct vdec_s *vdec = hw_to_vdec(pbi);
int canvas_w = ALIGN(pic_config->y_crop_width, 64)/4;
int canvas_h = ALIGN(pic_config->y_crop_height, 32)/4;
- int blkmode = mem_map_mode;
+ int blkmode = pbi->mem_map_mode;
/*CANVAS_BLKMODE_64X32*/
if (pic_config->double_write_mode) {
canvas_w = pic_config->y_crop_width /
get_double_write_ratio(pbi,
pic_config->double_write_mode);
- if (mem_map_mode == 0)
+ if (pbi->mem_map_mode == 0)
canvas_w = ALIGN(canvas_w, 32);
else
canvas_w = ALIGN(canvas_w, 64);
if (kfifo_get(&pbi->display_q, &vf)) {
struct vframe_s *next_vf;
uint8_t index = vf->index & 0xff;
- if (index < pbi->used_buf_num) {
+ if (index < pbi->used_buf_num ||
+ (vf->type & VIDTYPE_V4L_EOS)) {
pbi->vf_get_count++;
if (debug & VP9_DEBUG_BUFMGR)
pr_info("%s type 0x%x w/h %d/%d, pts %d, %lld\n",
if (pbi->is_used_v4l) {
vf->v4l_mem_handle
= pbi->m_BUF[pic_config->BUF_index].v4l_ref_buf_addr;
+ if (pbi->mmu_enable) {
+ if (vdec_v4l_binding_fd_and_vf(vf->v4l_mem_handle, vf) < 0) {
+ vp9_print(pbi, PRINT_FLAG_V4L_DETAIL,
+ "v4l: binding vf fail.\n");
+ return -1;
+ }
+ }
vp9_print(pbi, PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), v4l mem handle: 0x%lx\n",
((struct aml_vcodec_ctx *)(pbi->v4l2_ctx))->id,
if (vdec_frame_based(hw_to_vdec(pbi))) {
vf->pts = pic_config->pts;
vf->pts_us64 = pic_config->pts64;
+ vf->timestamp = pic_config->timestamp;
if (vf->pts != 0 || vf->pts_us64 != 0) {
pts_valid = 1;
pts_us64_valid = 1;
struct VP9Decoder_s *hw = (struct VP9Decoder_s *)vdec->private;
struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
struct vframe_s *vf = NULL;
- struct vdec_fb *fb = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
if (hw->is_used_v4l && hw->eos) {
if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) {
return -1;
}
- if (v4l_get_fb(hw->v4l2_ctx, &fb)) {
+ if (vdec_v4l_get_buffer(hw->v4l2_ctx, &fb)) {
pr_err("[%d] get fb fail.\n", ctx->id);
return -1;
}
+ vf->type |= VIDTYPE_V4L_EOS;
vf->timestamp = ULONG_MAX;
vf->v4l_mem_handle = (unsigned long)fb;
vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L;
if (pbi->chunk) {
cur_pic_config->pts = pbi->chunk->pts;
cur_pic_config->pts64 = pbi->chunk->pts64;
+ cur_pic_config->timestamp = pbi->chunk->timestamp;
}
#endif
}
}
}
- continue_decoding(pbi);
- pbi->postproc_done = 0;
- pbi->process_busy = 0;
+ if (pbi->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(pbi->v4l2_ctx);
+
+ pbi->frame_width = vp9_param.p.width;
+ pbi->frame_height = vp9_param.p.height;
+ if (ctx->param_sets_from_ucode && !pbi->v4l_params_parsed) {
+ struct aml_vdec_pic_infos info;
+
+ info.visible_width = pbi->frame_width;
+ info.visible_height = pbi->frame_height;
+ info.coded_width = ALIGN(pbi->frame_width, 32);
+ info.coded_height = ALIGN(pbi->frame_height, 32);
+ info.dpb_size = pbi->used_buf_num;
+ pbi->v4l_params_parsed = true;
+ vdec_v4l_set_pic_infos(ctx, &info);
+ }
+ }
+
+ if (pbi->is_used_v4l) {
+ pbi->dec_result = DEC_V4L2_CONTINUE_DECODING;
+ vdec_schedule_work(&pbi->work);
+ } else {
+ continue_decoding(pbi);
+ pbi->postproc_done = 0;
+ pbi->process_busy = 0;
+ }
#ifdef MULTI_INSTANCE_SUPPORT
if (pbi->m_ins_flag)
pr_info("pts missed %ld, pts hit %ld, duration %d\n",
pbi->pts_missed, pbi->pts_hit, pbi->frame_dur);
#endif
+ mem_map_mode = 0;
+
vfree(pbi);
mutex_unlock(&vvp9_mutex);
return;
}
+ if (pbi->dec_result == DEC_V4L2_CONTINUE_DECODING) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(pbi->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode) {
+ reset_process_time(pbi);
+ if (wait_event_interruptible_timeout(ctx->wq,
+ ctx->v4l_codec_ready,
+ msecs_to_jiffies(500)) < 0)
+ return;
+ }
+
+ continue_decoding(pbi);
+ pbi->postproc_done = 0;
+ pbi->process_busy = 0;
+
+ return;
+ }
+
if (((pbi->dec_result == DEC_RESULT_GET_DATA) ||
(pbi->dec_result == DEC_RESULT_GET_DATA_RETRY))
&& (hw_to_vdec(pbi)->next_status !=
size, (pbi->need_cache_size >> PAGE_SHIFT),
(int)(get_jiffies_64() - pbi->sc_start_time) * 1000/HZ);
}
+
#ifdef SUPPORT_FB_DECODING
if (pbi->used_stage_buf_num > 0) {
if (mask & CORE_MASK_HEVC_FRONT) {
ret = CORE_MASK_HEVC;
else
ret = CORE_MASK_VDEC_1 | CORE_MASK_HEVC;
- }
+ }
+
+ if (pbi->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(pbi->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode &&
+ !ctx->v4l_codec_ready &&
+ pbi->v4l_params_parsed)
+ ret = 0; /*the params has parsed.*/
+ }
+
if (ret)
not_run_ready[pbi->index] = 0;
else
#endif
}
+static void init_frame_bufs(struct VP9Decoder_s *pbi)
+{
+ struct vdec_s *vdec = hw_to_vdec(pbi);
+ struct VP9_Common_s *const cm = &pbi->common;
+ struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
+ int i;
+
+ for (i = 0; i < pbi->used_buf_num; ++i) {
+ frame_bufs[i].ref_count = 0;
+ frame_bufs[i].buf.vf_ref = 0;
+ frame_bufs[i].buf.decode_idx = 0;
+ }
+
+ if (vdec->parallel_dec == 1) {
+ for (i = 0; i < FRAME_BUFFERS; i++) {
+ vdec->free_canvas_ex
+ (pbi->common.buffer_pool->frame_bufs[i].buf.y_canvas_index,
+ vdec->id);
+ vdec->free_canvas_ex
+ (pbi->common.buffer_pool->frame_bufs[i].buf.uv_canvas_index,
+ vdec->id);
+ }
+ }
+}
+
static void reset(struct vdec_s *vdec)
{
struct VP9Decoder_s *pbi =
(struct VP9Decoder_s *)vdec->private;
- vp9_print(pbi,
- PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__);
+ cancel_work_sync(&pbi->work);
+ if (pbi->stat & STAT_VDEC_RUN) {
+ amhevc_stop();
+ pbi->stat &= ~STAT_VDEC_RUN;
+ }
+
+ if (pbi->stat & STAT_TIMER_ARM) {
+ del_timer_sync(&pbi->timer);
+ pbi->stat &= ~STAT_TIMER_ARM;
+ }
+ pbi->dec_result = DEC_RESULT_NONE;
+ reset_process_time(pbi);
+ dealloc_mv_bufs(pbi);
+ vp9_local_uninit(pbi);
+ if (vvp9_local_init(pbi) < 0)
+ vp9_print(pbi, 0, "%s local_init failed \r\n", __func__);
+ init_frame_bufs(pbi);
+
+ pbi->eos = 0;
+ vp9_print(pbi, PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__);
}
static irqreturn_t vp9_irq_cb(struct vdec_s *vdec, int irq)
pbi->is_used_v4l = (((unsigned long)
pbi->vvp9_amstream_dec_info.param & 0x80) >> 7);
if (pbi->is_used_v4l) {
- pbi->double_write_mode = 0x10;
- pbi->mmu_enable = 0;
- pbi->max_pic_w = 1920;
- pbi->max_pic_h = 1080;
+ pbi->mmu_enable = (((unsigned long)
+ pbi->vvp9_amstream_dec_info.param & 0x100) >> 8);
+ if (!pbi->mmu_enable)
+ pbi->double_write_mode = 0x10;
+
+ pbi->mmu_enable = 1;
+ pbi->max_pic_w = 4096;
+ pbi->max_pic_h = 2304;
}
vp9_print(pbi, 0,
pr_info("pts missed %ld, pts hit %ld, duration %d\n",
pbi->pts_missed, pbi->pts_hit, pbi->frame_dur);
#endif
+ mem_map_mode = 0;
+
/* devm_kfree(&pdev->dev, (void *)pbi); */
vfree((void *)pbi);
return 0;