--- /dev/null
+/*
+* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along
+* with this program; if not, write to the Free Software Foundation, Inc.,
+* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*
+* Description:
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <uapi/linux/swab.h>
+#include "../vdec_drv_if.h"
+#include "../aml_vcodec_util.h"
+#include "../aml_vcodec_dec.h"
+#include "../aml_vcodec_drv.h"
+#include "../aml_vcodec_adapt.h"
+#include "../vdec_drv_base.h"
+#include "../aml_vcodec_vfm.h"
+#include "../utils/common.h"
+
+#define KERNEL_ATRACE_TAG KERNEL_ATRACE_TAG_V4L2
+#include <trace/events/meson_atrace.h>
+
+#define PREFIX_SIZE (16)
+
+#define HEADER_BUFFER_SIZE (32 * 1024)
+#define SYNC_CODE (0x498342)
+
+extern int av1_need_prefix;
+
+/**
+ * struct av1_fb - av1 decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct av1_fb {
+ uint64_t vdec_fb_va;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ int32_t poc;
+ uint32_t reserved;
+};
+
+/**
+ * struct vdec_av1_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @reserved : for 8 bytes alignment
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_av1_dec_info {
+ uint32_t dpb_sz;
+ uint32_t resolution_changed;
+ uint32_t reserved;
+ uint64_t bs_dma;
+ uint64_t y_fb_dma;
+ uint64_t c_fb_dma;
+ uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_av1_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf : Header parsing buffer (AP-W, VPU-R)
+ * @list_free : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp : display frame buffer ring list (AP-R, VPU-W)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ */
+struct vdec_av1_vsi {
+ char *header_buf;
+ int sps_size;
+ int pps_size;
+ int sei_size;
+ int head_offset;
+ struct vdec_av1_dec_info dec;
+ struct vdec_pic_info pic;
+ struct vdec_pic_info cur_pic;
+ struct v4l2_rect crop;
+ bool is_combine;
+ int nalu_pos;
+};
+
+/**
+ * struct vdec_av1_inst - av1 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to aml_vcodec_ctx
+ * @vsi : VPU shared information
+ */
+struct vdec_av1_inst {
+ unsigned int num_nalu;
+ struct aml_vcodec_ctx *ctx;
+ struct aml_vdec_adapt vdec;
+ struct vdec_av1_vsi *vsi;
+ struct vcodec_vfm_s vfm;
+ struct aml_dec_params parms;
+ struct completion comp;
+};
+
+/*!\brief OBU types. */
+enum OBU_TYPE {
+ OBU_SEQUENCE_HEADER = 1,
+ OBU_TEMPORAL_DELIMITER = 2,
+ OBU_FRAME_HEADER = 3,
+ OBU_TILE_GROUP = 4,
+ OBU_METADATA = 5,
+ OBU_FRAME = 6,
+ OBU_REDUNDANT_FRAME_HEADER = 7,
+ OBU_TILE_LIST = 8,
+ OBU_PADDING = 15,
+};
+
+/*!\brief OBU metadata types. */
+enum OBU_METADATA_TYPE {
+ OBU_METADATA_TYPE_RESERVED_0 = 0,
+ OBU_METADATA_TYPE_HDR_CLL = 1,
+ OBU_METADATA_TYPE_HDR_MDCV = 2,
+ OBU_METADATA_TYPE_SCALABILITY = 3,
+ OBU_METADATA_TYPE_ITUT_T35 = 4,
+ OBU_METADATA_TYPE_TIMECODE = 5,
+};
+
+struct ObuHeader {
+ size_t size; // Size (1 or 2 bytes) of the OBU header (including the
+ // optional OBU extension header) in the bitstream.
+ enum OBU_TYPE type;
+ int has_size_field;
+ int has_extension;
+ // The following fields come from the OBU extension header and therefore are
+ // only used if has_extension is true.
+ int temporal_layer_id;
+ int spatial_layer_id;
+};
+
+static const size_t kMaximumLeb128Size = 8;
+static const u8 kLeb128ByteMask = 0x7f; // Binary: 01111111
+
+// Disallow values larger than 32-bits to ensure consistent behavior on 32 and
+// 64 bit targets: value is typically used to determine buffer allocation size
+// when decoded.
+static const u64 kMaximumLeb128Value = ULONG_MAX;
+
+char obu_type_name[16][32] = {
+ "UNKNOWN",
+ "OBU_SEQUENCE_HEADER",
+ "OBU_TEMPORAL_DELIMITER",
+ "OBU_FRAME_HEADER",
+ "OBU_TILE_GROUP",
+ "OBU_METADATA",
+ "OBU_FRAME",
+ "OBU_REDUNDANT_FRAME_HEADER",
+ "OBU_TILE_LIST",
+ "UNKNOWN",
+ "UNKNOWN",
+ "UNKNOWN",
+ "UNKNOWN",
+ "UNKNOWN",
+ "UNKNOWN",
+ "OBU_PADDING"
+};
+
+char meta_type_name[6][32] = {
+ "OBU_METADATA_TYPE_RESERVED_0",
+ "OBU_METADATA_TYPE_HDR_CLL",
+ "OBU_METADATA_TYPE_HDR_MDCV",
+ "OBU_METADATA_TYPE_SCALABILITY",
+ "OBU_METADATA_TYPE_ITUT_T35",
+ "OBU_METADATA_TYPE_TIMECODE"
+};
+
+struct read_bit_buffer {
+ const u8 *bit_buffer;
+ const u8 *bit_buffer_end;
+ u32 bit_offset;
+};
+
+struct DataBuffer {
+ const u8 *data;
+ size_t size;
+};
+
+static int vdec_write_nalu(struct vdec_av1_inst *inst,
+ u8 *buf, u32 size, u64 ts);
+
+static void get_pic_info(struct vdec_av1_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ *pic = inst->vsi->pic;
+
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+ "pic(%d, %d), buf(%d, %d)\n",
+ pic->visible_width, pic->visible_height,
+ pic->coded_width, pic->coded_height);
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+ "Y(%d, %d), C(%d, %d)\n",
+ pic->y_bs_sz, pic->y_len_sz,
+ pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_av1_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi->crop.left;
+ cr->top = inst->vsi->crop.top;
+ cr->width = inst->vsi->crop.width;
+ cr->height = inst->vsi->crop.height;
+
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO,
+ "l=%d, t=%d, w=%d, h=%d\n",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_av1_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = inst->vsi->dec.dpb_sz;
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_EXINFO, "sz=%d\n", *dpb_sz);
+}
+
+static u32 vdec_config_default_parms(u8 *parm)
+{
+ u8 *pbuf = parm;
+
+ pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+ pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:11;");
+ pbuf += sprintf(pbuf, "av1_double_write_mode:3;");
+ pbuf += sprintf(pbuf, "av1_buf_width:1920;");
+ pbuf += sprintf(pbuf, "av1_buf_height:1088;");
+ pbuf += sprintf(pbuf, "av1_max_pic_w:8192;");
+ pbuf += sprintf(pbuf, "av1_max_pic_h:4608;");
+ pbuf += sprintf(pbuf, "save_buffer_mode:0;");
+ pbuf += sprintf(pbuf, "no_head:0;");
+ pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:0;");
+ pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:0;");
+
+ return parm - pbuf;
+}
+
+static void vdec_parser_parms(struct vdec_av1_inst *inst)
+{
+ struct aml_vcodec_ctx *ctx = inst->ctx;
+
+ if (ctx->config.parm.dec.parms_status &
+ V4L2_CONFIG_PARM_DECODE_CFGINFO) {
+ u8 *pbuf = ctx->config.buf;
+
+ pbuf += sprintf(pbuf, "parm_v4l_codec_enable:1;");
+ pbuf += sprintf(pbuf, "parm_v4l_buffer_margin:%d;",
+ ctx->config.parm.dec.cfg.ref_buf_margin);
+ pbuf += sprintf(pbuf, "av1_double_write_mode:%d;",
+ ctx->config.parm.dec.cfg.double_write_mode);
+ pbuf += sprintf(pbuf, "av1_buf_width:%d;",
+ ctx->config.parm.dec.cfg.init_width);
+ pbuf += sprintf(pbuf, "av1_buf_height:%d;",
+ ctx->config.parm.dec.cfg.init_height);
+ pbuf += sprintf(pbuf, "save_buffer_mode:0;");
+ pbuf += sprintf(pbuf, "no_head:0;");
+ pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_mode:%d;",
+ ctx->config.parm.dec.cfg.canvas_mem_mode);
+ pbuf += sprintf(pbuf, "parm_v4l_canvas_mem_endian:%d;",
+ ctx->config.parm.dec.cfg.canvas_mem_endian);
+ ctx->config.length = pbuf - ctx->config.buf;
+ } else {
+ ctx->config.parm.dec.cfg.double_write_mode = 16;
+ ctx->config.parm.dec.cfg.ref_buf_margin = 7;
+ ctx->config.length = vdec_config_default_parms(ctx->config.buf);
+ }
+
+ if ((ctx->config.parm.dec.parms_status &
+ V4L2_CONFIG_PARM_DECODE_HDRINFO) &&
+ inst->parms.hdr.color_parms.present_flag) {
+ u8 *pbuf = ctx->config.buf + ctx->config.length;
+
+ pbuf += sprintf(pbuf, "mG.x:%d;",
+ ctx->config.parm.dec.hdr.color_parms.primaries[0][0]);
+ pbuf += sprintf(pbuf, "mG.y:%d;",
+ ctx->config.parm.dec.hdr.color_parms.primaries[0][1]);
+ pbuf += sprintf(pbuf, "mB.x:%d;",
+ ctx->config.parm.dec.hdr.color_parms.primaries[1][0]);
+ pbuf += sprintf(pbuf, "mB.y:%d;",
+ ctx->config.parm.dec.hdr.color_parms.primaries[1][1]);
+ pbuf += sprintf(pbuf, "mR.x:%d;",
+ ctx->config.parm.dec.hdr.color_parms.primaries[2][0]);
+ pbuf += sprintf(pbuf, "mR.y:%d;",
+ ctx->config.parm.dec.hdr.color_parms.primaries[2][1]);
+ pbuf += sprintf(pbuf, "mW.x:%d;",
+ ctx->config.parm.dec.hdr.color_parms.white_point[0]);
+ pbuf += sprintf(pbuf, "mW.y:%d;",
+ ctx->config.parm.dec.hdr.color_parms.white_point[1]);
+ pbuf += sprintf(pbuf, "mMaxDL:%d;",
+ ctx->config.parm.dec.hdr.color_parms.luminance[0] / 1000);
+ pbuf += sprintf(pbuf, "mMinDL:%d;",
+ ctx->config.parm.dec.hdr.color_parms.luminance[1]);
+ pbuf += sprintf(pbuf, "mMaxCLL:%d;",
+ ctx->config.parm.dec.hdr.color_parms.content_light_level.max_content);
+ pbuf += sprintf(pbuf, "mMaxFALL:%d;",
+ ctx->config.parm.dec.hdr.color_parms.content_light_level.max_pic_average);
+ ctx->config.length = pbuf - ctx->config.buf;
+ inst->parms.hdr = ctx->config.parm.dec.hdr;
+ inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_HDRINFO;
+ }
+
+ inst->vdec.config = ctx->config;
+ inst->parms.cfg = ctx->config.parm.dec.cfg;
+ inst->parms.parms_status |= V4L2_CONFIG_PARM_DECODE_CFGINFO;
+}
+
+static int vdec_av1_init(struct aml_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+ struct vdec_av1_inst *inst = NULL;
+ int ret = -1;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->vdec.video_type = VFORMAT_AV1;
+ inst->vdec.filp = ctx->dev->filp;
+ inst->vdec.ctx = ctx;
+ inst->ctx = ctx;
+
+ vdec_parser_parms(inst);
+
+ /* set play mode.*/
+ if (ctx->is_drm_mode)
+ inst->vdec.port.flag |= PORT_FLAG_DRM;
+
+ /* to eable av1 hw.*/
+ inst->vdec.port.type = PORT_TYPE_HEVC;
+
+ /* init vfm */
+ inst->vfm.ctx = ctx;
+ inst->vfm.ada_ctx = &inst->vdec;
+ ret = vcodec_vfm_init(&inst->vfm);
+ if (ret) {
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "init vfm failed.\n");
+ goto err;
+ }
+
+ /* probe info from the stream */
+ inst->vsi = kzalloc(sizeof(struct vdec_av1_vsi), GFP_KERNEL);
+ if (!inst->vsi) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* alloc the header buffer to be used cache sps or spp etc.*/
+ inst->vsi->header_buf = kzalloc(HEADER_BUFFER_SIZE, GFP_KERNEL);
+ if (!inst->vsi->header_buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ init_completion(&inst->comp);
+
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+ "av1 Instance >> %lx\n", (ulong) inst);
+
+ ctx->ada_ctx = &inst->vdec;
+ *h_vdec = (unsigned long)inst;
+
+ /* init decoder. */
+ ret = video_decoder_init(&inst->vdec);
+ if (ret) {
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "vdec_av1 init err=%d\n", ret);
+ goto err;
+ }
+
+ //dump_init();
+
+ return 0;
+err:
+ if (inst)
+ vcodec_vfm_release(&inst->vfm);
+ if (inst && inst->vsi && inst->vsi->header_buf)
+ kfree(inst->vsi->header_buf);
+ if (inst && inst->vsi)
+ kfree(inst->vsi);
+ if (inst)
+ kfree(inst);
+ *h_vdec = 0;
+
+ return ret;
+}
+
+static int parse_stream_ucode(struct vdec_av1_inst *inst,
+ u8 *buf, u32 size, u64 timestamp)
+{
+ int ret = 0;
+
+ ret = vdec_write_nalu(inst, buf, size, timestamp);
+ if (ret < 0) {
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "write data failed. size: %d, err: %d\n", size, ret);
+ return ret;
+ }
+
+ /* wait ucode parse ending. */
+ wait_for_completion_timeout(&inst->comp,
+ msecs_to_jiffies(1000));
+
+ return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_ucode_dma(struct vdec_av1_inst *inst,
+ ulong buf, u32 size, u64 timestamp, u32 handle)
+{
+ int ret = 0;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+
+ ret = vdec_vframe_write_with_dma(vdec, buf, size, timestamp, handle);
+ if (ret < 0) {
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "write frame data failed. err: %d\n", ret);
+ return ret;
+ }
+
+ /* wait ucode parse ending. */
+ wait_for_completion_timeout(&inst->comp,
+ msecs_to_jiffies(1000));
+
+ return inst->vsi->dec.dpb_sz ? 0 : -1;
+}
+
+static int parse_stream_cpu(struct vdec_av1_inst *inst, u8 *buf, u32 size)
+{
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "can not suppport parse stream by cpu.\n");
+
+ return -1;
+}
+
+static int vdec_av1_probe(unsigned long h_vdec,
+ struct aml_vcodec_mem *bs, void *out)
+{
+ struct vdec_av1_inst *inst =
+ (struct vdec_av1_inst *)h_vdec;
+ u8 *buf = (u8 *)bs->vaddr;
+ u32 size = bs->size;
+ int ret = 0;
+
+ if (inst->ctx->is_drm_mode) {
+ if (bs->model == VB2_MEMORY_MMAP) {
+ struct aml_video_stream *s =
+ (struct aml_video_stream *) buf;
+
+ if ((s->magic != AML_VIDEO_MAGIC) &&
+ (s->type != V4L_STREAM_TYPE_MATEDATA))
+ return -1;
+
+ if (inst->ctx->param_sets_from_ucode) {
+ ret = parse_stream_ucode(inst, s->data,
+ s->len, bs->timestamp);
+ } else {
+ ret = parse_stream_cpu(inst, s->data, s->len);
+ }
+ } else if (bs->model == VB2_MEMORY_DMABUF ||
+ bs->model == VB2_MEMORY_USERPTR) {
+ ret = parse_stream_ucode_dma(inst, bs->addr, size,
+ bs->timestamp, BUFF_IDX(bs, bs->index));
+ }
+ } else {
+ if (inst->ctx->param_sets_from_ucode) {
+ ret = parse_stream_ucode(inst, buf, size, bs->timestamp);
+ } else {
+ ret = parse_stream_cpu(inst, buf, size);
+ }
+ }
+
+ inst->vsi->cur_pic = inst->vsi->pic;
+
+ return ret;
+}
+
+static void vdec_av1_deinit(unsigned long h_vdec)
+{
+ ulong flags;
+ struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec;
+ struct aml_vcodec_ctx *ctx = inst->ctx;
+
+ video_decoder_release(&inst->vdec);
+
+ vcodec_vfm_release(&inst->vfm);
+
+ //dump_deinit();
+
+ spin_lock_irqsave(&ctx->slock, flags);
+ if (inst->vsi && inst->vsi->header_buf)
+ kfree(inst->vsi->header_buf);
+
+ if (inst->vsi)
+ kfree(inst->vsi);
+
+ kfree(inst);
+
+ ctx->drv_handle = 0;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static int vdec_av1_get_fb(struct vdec_av1_inst *inst, struct vdec_v4l2_buffer **out)
+{
+ return get_fb_from_queue(inst->ctx, out);
+}
+
+static void vdec_av1_get_vf(struct vdec_av1_inst *inst, struct vdec_v4l2_buffer **out)
+{
+ struct vframe_s *vf = NULL;
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ vf = peek_video_frame(&inst->vfm);
+ if (!vf) {
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "there is no vframe.\n");
+ *out = NULL;
+ return;
+ }
+
+ vf = get_video_frame(&inst->vfm);
+ if (!vf) {
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "the vframe is avalid.\n");
+ *out = NULL;
+ return;
+ }
+
+ atomic_set(&vf->use_cnt, 1);
+
+ fb = (struct vdec_v4l2_buffer *)vf->v4l_mem_handle;
+ fb->vf_handle = (unsigned long)vf;
+ fb->status = FB_ST_DISPLAY;
+
+ *out = fb;
+}
+
+// Returns 1 when OBU type is valid, and 0 otherwise.
+static int valid_obu_type(int obu_type)
+{
+ int valid_type = 0;
+
+ switch (obu_type) {
+ case OBU_SEQUENCE_HEADER:
+ case OBU_TEMPORAL_DELIMITER:
+ case OBU_FRAME_HEADER:
+ case OBU_TILE_GROUP:
+ case OBU_METADATA:
+ case OBU_FRAME:
+ case OBU_REDUNDANT_FRAME_HEADER:
+ case OBU_TILE_LIST:
+ case OBU_PADDING:
+ valid_type = 1;
+ break;
+ default:
+ break;
+ }
+
+ return valid_type;
+}
+
+size_t uleb_size_in_bytes(u64 value)
+{
+ size_t size = 0;
+
+ do {
+ ++size;
+ } while ((value >>= 7) != 0);
+
+ return size;
+}
+
+int uleb_decode(const u8 *buffer, size_t available,
+ u64 *value, size_t *length)
+{
+ int i;
+
+ if (buffer && value) {
+ *value = 0;
+
+ for (i = 0; i < kMaximumLeb128Size && i < available; ++i) {
+ const u8 decoded_byte = *(buffer + i) & kLeb128ByteMask;
+
+ *value |= ((u64)decoded_byte) << (i * 7);
+ if ((*(buffer + i) >> 7) == 0) {
+ if (length) {
+ *length = i + 1;
+ }
+
+ // Fail on values larger than 32-bits to ensure consistent behavior on
+ // 32 and 64 bit targets: value is typically used to determine buffer
+ // allocation size.
+ if (*value > ULONG_MAX)
+ return -1;
+
+ return 0;
+ }
+ }
+ }
+
+ // If we get here, either the buffer/value pointers were invalid,
+ // or we ran over the available space
+ return -1;
+}
+
+int uleb_encode(u64 value, size_t available,
+ u8 *coded_value, size_t *coded_size)
+{
+ int i;
+ const size_t leb_size = uleb_size_in_bytes(value);
+
+ if (value > kMaximumLeb128Value || leb_size > kMaximumLeb128Size ||
+ leb_size > available || !coded_value || !coded_size) {
+ return -1;
+ }
+
+ for (i = 0; i < leb_size; ++i) {
+ u8 byte = value & 0x7f;
+
+ value >>= 7;
+ if (value != 0) byte |= 0x80; // Signal that more bytes follow.
+
+ *(coded_value + i) = byte;
+ }
+
+ *coded_size = leb_size;
+
+ return 0;
+}
+
+static int rb_read_bit(struct read_bit_buffer *rb)
+{
+ const u32 off = rb->bit_offset;
+ const u32 p = off >> 3;
+ const int q = 7 - (int)(off & 0x7);
+
+ if (rb->bit_buffer + p < rb->bit_buffer_end) {
+ const int bit = (rb->bit_buffer[p] >> q) & 1;
+
+ rb->bit_offset = off + 1;
+ return bit;
+ } else {
+ return 0;
+ }
+}
+
+static int rb_read_literal(struct read_bit_buffer *rb, int bits)
+{
+ int value = 0, bit;
+
+ for (bit = bits - 1; bit >= 0; bit--)
+ value |= rb_read_bit(rb) << bit;
+
+ return value;
+}
+
+static int read_obu_size(const u8 *data,
+ size_t bytes_available,
+ size_t *const obu_size,
+ size_t *const length_field_size)
+{
+ u64 u_obu_size = 0;
+
+ if (uleb_decode(data, bytes_available, &u_obu_size, length_field_size) != 0) {
+ return -1;
+ }
+
+ if (u_obu_size > ULONG_MAX)
+ return -1;
+
+ *obu_size = (size_t) u_obu_size;
+
+ return 0;
+}
+
+// Parses OBU header and stores values in 'header'.
+static int read_obu_header(struct read_bit_buffer *rb,
+ int is_annexb, struct ObuHeader *header)
+{
+ const int bit_buffer_byte_length =
+ rb->bit_buffer_end - rb->bit_buffer;
+
+ if (!rb || !header)
+ return -1;
+
+ if (bit_buffer_byte_length < 1)
+ return -1;
+
+ header->size = 1;
+
+ if (rb_read_bit(rb) != 0) {
+ // Forbidden bit. Must not be set.
+ return -1;
+ }
+
+ header->type = (enum OBU_TYPE) rb_read_literal(rb, 4);
+ if (!valid_obu_type(header->type))
+ return -1;
+
+ header->has_extension = rb_read_bit(rb);
+ header->has_size_field = rb_read_bit(rb);
+
+ if (!header->has_size_field && !is_annexb) {
+ // section 5 obu streams must have obu_size field set.
+ return -1;
+ }
+
+ if (rb_read_bit(rb) != 0) {
+ // obu_reserved_1bit must be set to 0.
+ return -1;
+ }
+
+ if (header->has_extension) {
+ if (bit_buffer_byte_length == 1)
+ return -1;
+
+ header->size += 1;
+ header->temporal_layer_id = rb_read_literal(rb, 3);
+ header->spatial_layer_id = rb_read_literal(rb, 2);
+ if (rb_read_literal(rb, 3) != 0) {
+ // extension_header_reserved_3bits must be set to 0.
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int read_obu_header_and_size(const u8 *data,
+ size_t bytes_available,
+ int is_annexb,
+ struct ObuHeader *obu_header,
+ size_t *const payload_size,
+ size_t *const bytes_read)
+{
+ size_t length_field_size_obu = 0;
+ size_t length_field_size_payload = 0;
+ size_t obu_size = 0;
+ int status = 0;
+ struct read_bit_buffer rb = { data + length_field_size_obu,
+ data + bytes_available, 0};
+
+ if (is_annexb) {
+ // Size field comes before the OBU header, and includes the OBU header
+ status = read_obu_size(data, bytes_available, &obu_size, &length_field_size_obu);
+ if (status != 0)
+ return status;
+ }
+
+ status = read_obu_header(&rb, is_annexb, obu_header);
+ if (status != 0)
+ return status;
+
+ if (!obu_header->has_size_field) {
+ // Derive the payload size from the data we've already read
+ if (obu_size < obu_header->size)
+ return -1;
+
+ *payload_size = obu_size - obu_header->size;
+ } else {
+ // Size field comes after the OBU header, and is just the payload size
+ status = read_obu_size(data + length_field_size_obu + obu_header->size,
+ bytes_available - length_field_size_obu - obu_header->size,
+ payload_size, &length_field_size_payload);
+ if (status != 0)
+ return status;
+ }
+
+ *bytes_read = length_field_size_obu + obu_header->size + length_field_size_payload;
+
+ return 0;
+}
+
+int parser_frame(int is_annexb, u8 *data, const u8 *data_end,
+ u8 *dst_data, u32 *frame_len, u8 *meta_buf, u32 *meta_len)
+{
+ int frame_decoding_finished = 0;
+ u32 obu_size = 0;
+ int seen_frame_header = 0;
+ int next_start_tile = 0;
+ struct DataBuffer obu_size_hdr;
+ u8 header[20] = {0};
+ u8 *p = NULL;
+ u32 rpu_size = 0;
+ struct ObuHeader obu_header;
+
+ memset(&obu_header, 0, sizeof(obu_header));
+
+ // decode frame as a series of OBUs
+ while (!frame_decoding_finished) {
+ // struct read_bit_buffer rb;
+ size_t payload_size = 0;
+ size_t header_size = 0;
+ size_t bytes_read = 0;
+ const size_t bytes_available = data_end - data;
+ enum OBU_METADATA_TYPE meta_type;
+ int status;
+ u64 type;
+ u32 i;
+
+ if (bytes_available == 0 && !seen_frame_header) {
+ break;
+ }
+
+ status = read_obu_header_and_size(data, bytes_available, is_annexb,
+ &obu_header, &payload_size, &bytes_read);
+ if (status != 0) {
+ return -1;
+ }
+
+ // Record obu size header information.
+ obu_size_hdr.data = data + obu_header.size;
+ obu_size_hdr.size = bytes_read - obu_header.size;
+
+ // Note: read_obu_header_and_size() takes care of checking that this
+ // doesn't cause 'data' to advance past 'data_end'.
+
+ if ((size_t)(data_end - data - bytes_read) < payload_size) {
+ return -1;
+ }
+
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "obu %s len %zu+%zu\n",
+ obu_type_name[obu_header.type],
+ bytes_read, payload_size);
+
+ if (!is_annexb) {
+ obu_size = bytes_read + payload_size + 4;
+ header_size = 20;
+ } else {
+ obu_size = bytes_read + payload_size;
+ header_size = 16;
+ }
+
+ header[0] = ((obu_size + 4) >> 24) & 0xff;
+ header[1] = ((obu_size + 4) >> 16) & 0xff;
+ header[2] = ((obu_size + 4) >> 8) & 0xff;
+ header[3] = ((obu_size + 4) >> 0) & 0xff;
+ header[4] = header[0] ^ 0xff;
+ header[5] = header[1] ^ 0xff;
+ header[6] = header[2] ^ 0xff;
+ header[7] = header[3] ^ 0xff;
+ header[8] = 0;
+ header[9] = 0;
+ header[10] = 0;
+ header[11] = 1;
+ header[12] = 'A';
+ header[13] = 'M';
+ header[14] = 'L';
+ header[15] = 'V';
+
+ // put new size to here as annexb
+ header[16] = (obu_size & 0xff) | 0x80;
+ header[17] = ((obu_size >> 7) & 0xff) | 0x80;
+ header[18] = ((obu_size >> 14) & 0xff) | 0x80;
+ header[19] = ((obu_size >> 21) & 0xff) | 0x00;
+
+ memcpy(dst_data, header, header_size);
+ dst_data += header_size;
+ memcpy(dst_data, data, bytes_read + payload_size);
+ dst_data += (bytes_read + payload_size);
+
+ data += bytes_read;
+ *frame_len += (header_size + bytes_read + payload_size);
+
+ switch (obu_header.type) {
+ case OBU_TEMPORAL_DELIMITER:
+ seen_frame_header = 0;
+ next_start_tile = 0;
+ break;
+ case OBU_SEQUENCE_HEADER:
+ // The sequence header should not change in the middle of a frame.
+ if (seen_frame_header) {
+ return -1;
+ }
+ break;
+ case OBU_FRAME_HEADER:
+ if (data_end == data + payload_size) {
+ frame_decoding_finished = 1;
+ } else {
+ seen_frame_header = 1;
+ }
+ break;
+ case OBU_REDUNDANT_FRAME_HEADER:
+ case OBU_FRAME:
+ if (obu_header.type == OBU_REDUNDANT_FRAME_HEADER) {
+ if (!seen_frame_header) {
+ return -1;
+ }
+ } else {
+ // OBU_FRAME_HEADER or OBU_FRAME.
+ if (seen_frame_header) {
+ return -1;
+ }
+ }
+ if (obu_header.type == OBU_FRAME) {
+ if (data_end == data + payload_size) {
+ frame_decoding_finished = 1;
+ seen_frame_header = 0;
+ }
+ }
+ break;
+ case OBU_TILE_GROUP:
+ if (!seen_frame_header) {
+ return -1;
+ }
+ if (data + payload_size == data_end)
+ frame_decoding_finished = 1;
+ if (frame_decoding_finished)
+ seen_frame_header = 0;
+ break;
+ case OBU_METADATA:
+ uleb_decode(data, 8, &type, &bytes_read);
+ if (type < 6)
+ meta_type = type;
+ else
+ meta_type = 0;
+ p = data + bytes_read;
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+ "meta type %s %zu+%zu\n",
+ meta_type_name[type],
+ bytes_read,
+ payload_size - bytes_read);
+
+ if (meta_type == OBU_METADATA_TYPE_ITUT_T35) {
+#if 0 /* for dumping original obu payload */
+ for (i = 0; i < payload_size - bytes_read; i++) {
+ pr_info("%02x ", p[i]);
+ if (i % 16 == 15)
+ pr_info("\n");
+ }
+ if (i % 16 != 0)
+ pr_info("\n");
+#endif
+ if ((p[0] == 0xb5) /* country code */
+ && ((p[1] == 0x00) && (p[2] == 0x3b)) /* terminal_provider_code */
+ && ((p[3] == 0x00) && (p[4] == 0x00) && (p[5] == 0x08) && (p[6] == 0x00))) { /* terminal_provider_oriented_code */
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+ "dolbyvison rpu\n");
+ meta_buf[0] = meta_buf[1] = meta_buf[2] = 0;
+ meta_buf[3] = 0x01;
+ meta_buf[4] = 0x19;
+
+ if (p[11] & 0x10) {
+ rpu_size = 0x100;
+ rpu_size |= (p[11] & 0x0f) << 4;
+ rpu_size |= (p[12] >> 4) & 0x0f;
+ if (p[12] & 0x08) {
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+ "meta rpu in obu exceed 512 bytes\n");
+ break;
+ }
+ for (i = 0; i < rpu_size; i++) {
+ meta_buf[5 + i] = (p[12 + i] & 0x07) << 5;
+ meta_buf[5 + i] |= (p[13 + i] >> 3) & 0x1f;
+ }
+ rpu_size += 5;
+ } else {
+ rpu_size = (p[10] & 0x1f) << 3;
+ rpu_size |= (p[11] >> 5) & 0x07;
+ for (i = 0; i < rpu_size; i++) {
+ meta_buf[5 + i] = (p[11 + i] & 0x0f) << 4;
+ meta_buf[5 + i] |= (p[12 + i] >> 4) & 0x0f;
+ }
+ rpu_size += 5;
+ }
+ *meta_len = rpu_size;
+ }
+ } else if (meta_type == OBU_METADATA_TYPE_HDR_CLL) {
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "hdr10 cll:\n");
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "max_cll = %x\n", (p[0] << 8) | p[1]);
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "max_fall = %x\n", (p[2] << 8) | p[3]);
+ } else if (meta_type == OBU_METADATA_TYPE_HDR_MDCV) {
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, "hdr10 primaries[r,g,b] = \n");
+ for (i = 0; i < 3; i++) {
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER, " %x, %x\n",
+ (p[i * 4] << 8) | p[i * 4 + 1],
+ (p[i * 4 + 2] << 8) | p[i * 4 + 3]);
+ }
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+ "white point = %x, %x\n", (p[12] << 8) | p[13], (p[14] << 8) | p[15]);
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+ "maxl = %x\n", (p[16] << 24) | (p[17] << 16) | (p[18] << 8) | p[19]);
+ v4l_dbg(0, V4L_DEBUG_CODEC_PARSER,
+ "minl = %x\n", (p[20] << 24) | (p[21] << 16) | (p[22] << 8) | p[23]);
+ }
+ break;
+ case OBU_TILE_LIST:
+ break;
+ case OBU_PADDING:
+ break;
+ default:
+ // Skip unrecognized OBUs
+ break;
+ }
+
+ data += payload_size;
+ }
+
+ return 0;
+}
+
+static int vdec_write_nalu(struct vdec_av1_inst *inst,
+ u8 *buf, u32 size, u64 ts)
+{
+ int ret = 0;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+ u8 *data = NULL;
+ u32 length = 0;
+ bool need_prefix = av1_need_prefix;
+
+ if (need_prefix) {
+ u8 meta_buffer[1024] = {0};
+ u32 meta_size = 0;
+ u8 *src = buf;
+
+ data = vzalloc(size + 0x1000);
+ if (!data)
+ return -ENOMEM;
+
+ parser_frame(0, src, src + size, data, &length, meta_buffer, &meta_size);
+
+ if (length)
+ ret = vdec_vframe_write(vdec, data, length, ts);
+ else
+ ret = -1;
+
+ vfree(data);
+ } else {
+ ret = vdec_vframe_write(vdec, buf, size, ts);
+ }
+
+ return ret;
+}
+
+static bool monitor_res_change(struct vdec_av1_inst *inst, u8 *buf, u32 size)
+{
+ int ret = -1;
+ u8 *p = buf;
+ int len = size;
+ u32 synccode = av1_need_prefix ?
+ ((p[1] << 16) | (p[2] << 8) | p[3]) :
+ ((p[17] << 16) | (p[18] << 8) | p[19]);
+
+ if (synccode == SYNC_CODE) {
+ ret = parse_stream_cpu(inst, p, len);
+ if (!ret && (inst->vsi->cur_pic.coded_width !=
+ inst->vsi->pic.coded_width ||
+ inst->vsi->cur_pic.coded_height !=
+ inst->vsi->pic.coded_height)) {
+ inst->vsi->cur_pic = inst->vsi->pic;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int vdec_av1_decode(unsigned long h_vdec,
+ struct aml_vcodec_mem *bs, bool *res_chg)
+{
+ struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec;
+ struct aml_vdec_adapt *vdec = &inst->vdec;
+ u8 *buf = (u8 *) bs->vaddr;
+ u32 size = bs->size;
+ int ret = -1;
+
+ if (bs == NULL)
+ return -1;
+
+ if (vdec_input_full(vdec)) {
+ ATRACE_COUNTER("vdec_input_full", 0);
+ return -EAGAIN;
+ }
+
+ if (inst->ctx->is_drm_mode) {
+ if (bs->model == VB2_MEMORY_MMAP) {
+ struct aml_video_stream *s =
+ (struct aml_video_stream *) buf;
+
+ if (s->magic != AML_VIDEO_MAGIC)
+ return -1;
+
+ if (!inst->ctx->param_sets_from_ucode &&
+ (s->type == V4L_STREAM_TYPE_MATEDATA)) {
+ if ((*res_chg = monitor_res_change(inst,
+ s->data, s->len)))
+ return 0;
+ }
+
+ ret = vdec_vframe_write(vdec,
+ s->data,
+ s->len,
+ bs->timestamp);
+ } else if (bs->model == VB2_MEMORY_DMABUF ||
+ bs->model == VB2_MEMORY_USERPTR) {
+ ret = vdec_vframe_write_with_dma(vdec,
+ bs->addr, size, bs->timestamp,
+ BUFF_IDX(bs, bs->index));
+ }
+ } else {
+ /*checked whether the resolution changes.*/
+ if ((!inst->ctx->param_sets_from_ucode) &&
+ (*res_chg = monitor_res_change(inst, buf, size)))
+ return 0;
+
+ ret = vdec_write_nalu(inst, buf, size, bs->timestamp);
+ }
+ ATRACE_COUNTER("v4l2_decode_write", ret);
+
+ return ret;
+}
+
+ static void get_param_config_info(struct vdec_av1_inst *inst,
+ struct aml_dec_params *parms)
+ {
+ if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CFGINFO)
+ parms->cfg = inst->parms.cfg;
+ if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_PSINFO)
+ parms->ps = inst->parms.ps;
+ if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_HDRINFO)
+ parms->hdr = inst->parms.hdr;
+ if (inst->parms.parms_status & V4L2_CONFIG_PARM_DECODE_CNTINFO)
+ parms->cnt = inst->parms.cnt;
+
+ parms->parms_status |= inst->parms.parms_status;
+
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+ "parms status: %u\n", parms->parms_status);
+ }
+
+static int vdec_av1_get_param(unsigned long h_vdec,
+ enum vdec_get_param_type type, void *out)
+{
+ int ret = 0;
+ struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec;
+
+ if (!inst) {
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "the av1 inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case GET_PARAM_DISP_FRAME_BUFFER:
+ vdec_av1_get_vf(inst, out);
+ break;
+
+ case GET_PARAM_FREE_FRAME_BUFFER:
+ ret = vdec_av1_get_fb(inst, out);
+ break;
+
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ case GET_PARAM_CONFIG_INFO:
+ get_param_config_info(inst, out);
+ break;
+
+ default:
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "invalid get parameter type=%d\n", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void set_param_write_sync(struct vdec_av1_inst *inst)
+{
+ complete(&inst->comp);
+}
+
+static void set_param_ps_info(struct vdec_av1_inst *inst,
+ struct aml_vdec_ps_infos *ps)
+{
+ struct vdec_pic_info *pic = &inst->vsi->pic;
+ struct vdec_av1_dec_info *dec = &inst->vsi->dec;
+ struct v4l2_rect *rect = &inst->vsi->crop;
+
+ /* fill visible area size that be used for EGL. */
+ pic->visible_width = ps->visible_width;
+ pic->visible_height = ps->visible_height;
+
+ /* calc visible ares. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = pic->visible_width;
+ rect->height = pic->visible_height;
+
+ /* config canvas size that be used for decoder. */
+ pic->coded_width = ps->coded_width;
+ pic->coded_height = ps->coded_height;
+
+ pic->y_len_sz = pic->coded_width * pic->coded_height;
+ pic->c_len_sz = pic->y_len_sz >> 1;
+
+ /* calc DPB size */
+ dec->dpb_sz = ps->dpb_size;
+
+ inst->parms.ps = *ps;
+ inst->parms.parms_status |=
+ V4L2_CONFIG_PARM_DECODE_PSINFO;
+
+ /*wake up*/
+ complete(&inst->comp);
+
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+ "Parse from ucode, crop(%d x %d), coded(%d x %d) dpb: %d\n",
+ ps->visible_width, ps->visible_height,
+ ps->coded_width, ps->coded_height,
+ ps->dpb_size);
+}
+
+static void set_param_hdr_info(struct vdec_av1_inst *inst,
+ struct aml_vdec_hdr_infos *hdr)
+{
+ if ((inst->parms.parms_status &
+ V4L2_CONFIG_PARM_DECODE_HDRINFO)) {
+ inst->parms.hdr = *hdr;
+ inst->parms.parms_status |=
+ V4L2_CONFIG_PARM_DECODE_HDRINFO;
+ aml_vdec_dispatch_event(inst->ctx,
+ V4L2_EVENT_SRC_CH_HDRINFO);
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+ "av1 set HDR infos\n");
+ }
+}
+
+static void set_param_post_event(struct vdec_av1_inst *inst, u32 *event)
+{
+ aml_vdec_dispatch_event(inst->ctx, *event);
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_PRINFO,
+ "av1 post event: %d\n", *event);
+}
+
+static int vdec_av1_set_param(unsigned long h_vdec,
+ enum vdec_set_param_type type, void *in)
+{
+ int ret = 0;
+ struct vdec_av1_inst *inst = (struct vdec_av1_inst *)h_vdec;
+
+ if (!inst) {
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "the av1 inst of dec is invalid.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case SET_PARAM_WRITE_FRAME_SYNC:
+ set_param_write_sync(inst);
+ break;
+
+ case SET_PARAM_PS_INFO:
+ set_param_ps_info(inst, in);
+ break;
+
+ case SET_PARAM_HDR_INFO:
+ set_param_hdr_info(inst, in);
+ break;
+
+ case SET_PARAM_POST_EVENT:
+ set_param_post_event(inst, in);
+ break;
+ default:
+ v4l_dbg(inst->ctx, V4L_DEBUG_CODEC_ERROR,
+ "invalid set parameter type=%d\n", type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct vdec_common_if vdec_av1_if = {
+ .init = vdec_av1_init,
+ .probe = vdec_av1_probe,
+ .decode = vdec_av1_decode,
+ .get_param = vdec_av1_get_param,
+ .set_param = vdec_av1_set_param,
+ .deinit = vdec_av1_deinit,
+};
+
+struct vdec_common_if *get_av1_dec_comm_if(void);
+
+struct vdec_common_if *get_av1_dec_comm_if(void)
+{
+ return &vdec_av1_if;
+}
+
#include <linux/amlogic/media/codec_mm/codec_mm.h>
#include "../utils/decoder_mmu_box.h"
#include "../utils/decoder_bmmu_box.h"
+#include <linux/crc32.h>
#define MEM_NAME "codec_av1"
/* #include <mach/am_regs.h> */
#ifdef SUPPORT_V4L2
#include "../utils/vdec_v4l2_buffer_ops.h"
+#include <media/v4l2-mem2mem.h>
#endif
+#include "../../../amvdec_ports/utils/common.h"
#define AML
#include "aom_av1_define.h"
static int vav1_local_init(struct AV1HW_s *hw);
static void vav1_put_timer_func(unsigned long arg);
static void dump_data(struct AV1HW_s *hw, int size);
-static unsigned char get_data_check_sum
+static unsigned int get_data_check_sum
(struct AV1HW_s *hw, int size);
static void dump_pic_list(struct AV1HW_s *hw);
static int vav1_mmu_map_alloc(struct AV1HW_s *hw);
unsigned int free_start_adr;
ulong v4l_ref_buf_addr;
+ ulong header_addr;
+ u32 header_size;
+ u32 luma_size;
+ ulong chroma_addr;
+ u32 chroma_size;
} /*BUF_t */;
struct MVBUF_s {
static u32 udebug_pause_decode_idx;
-
#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION
static u32 dv_toggle_prov_name;
#endif
u8 first_pts_index;
u32 frame_mode_pts_save[FRAME_BUFFERS];
u64 frame_mode_pts64_save[FRAME_BUFFERS];
+ u64 frame_mode_timestamp_save[FRAME_BUFFERS];
+ u64 timestamp_duration;
int last_pts;
u64 last_pts_us64;
+ u64 last_timestamp;
u64 shift_byte_count;
u32 shift_byte_count_lo;
bool pic_list_init_done2;
bool is_used_v4l;
void *v4l2_ctx;
+ bool v4l_params_parsed;
int frameinfo_enable;
struct vframe_qos_s vframe_qos;
/*
* malloc may not work in real chip, please allocate memory for the following structures
*/
- struct loop_filter_info_n_s *lfi;
- struct loopfilter *lf;
- struct segmentation_lf *seg_4lf;
-#endif
-
+ struct loop_filter_info_n_s *lfi;
+ struct loopfilter *lf;
+ struct segmentation_lf *seg_4lf;
+#endif
+ u32 mem_map_mode;
+ u32 dynamic_buf_num_margin;
+ struct vframe_s vframe_dummy;
+ unsigned int res_ch_flag;
+ int buffer_wrap[FRAME_BUFFERS];
};
static void av1_dump_state(struct vdec_s *vdec);
return false;
}
-static int v4l_get_fb(struct aml_vcodec_ctx *ctx, struct vdec_fb **out)
-{
- int ret = 0;
-
- ret = ctx->dec_if->get_param(ctx->drv_handle,
- GET_PARAM_FREE_FRAME_BUFFER, out);
-
- return ret;
-}
-
+static int v4l_alloc_and_config_pic(struct AV1HW_s *hw,
+ struct PIC_BUFFER_CONFIG_s *pic);
static inline bool close_to(int a, int b, int m)
{
static void trigger_schedule(struct AV1HW_s *hw)
{
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode &&
+ !hw->v4l_params_parsed)
+ vdec_v4l_write_frame_sync(ctx);
+ }
+
if (hw->vdec_cb)
hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg);
}
return dw;
}
+static int v4l_parser_get_double_write_mode(struct AV1HW_s *hw)
+{
+ u32 valid_dw_mode = get_valid_double_write_mode(hw);
+ u32 dw;
+ int w, h;
+
+ /* mask for supporting double write value bigger than 0x100 */
+ if (valid_dw_mode & 0xffffff00) {
+ w = hw->frame_width;
+ h = hw->frame_height;
+
+ dw = 0x1; /*1:1*/
+ switch (valid_dw_mode) {
+ case 0x100:
+ if (w > 1920 && h > 1088)
+ dw = 0x4; /*1:2*/
+ break;
+ case 0x200:
+ if (w > 1920 && h > 1088)
+ dw = 0x2; /*1:4*/
+ break;
+ case 0x300:
+ if (w > 1280 && h > 720)
+ dw = 0x4; /*1:2*/
+ break;
+ default:
+ break;
+ }
+ return dw;
+ }
+
+ return valid_dw_mode;
+}
+
static int get_double_write_mode(struct AV1HW_s *hw)
{
u32 valid_dw_mode = get_valid_double_write_mode(hw);
u32 dw;
int w, h;
- struct AV1_Common_s *cm = &hw->pbi->common;
+ struct AV1_Common_s *cm = &hw->common;
struct PIC_BUFFER_CONFIG_s *cur_pic_config;
if (!cm->cur_frame)
return ratio;
}
+/* return page number */
+static int av1_mmu_page_num(struct AV1HW_s *hw,
+ int w, int h, int save_mode)
+{
+ int picture_size;
+ int cur_mmu_4k_number, max_frame_num;
+
+ picture_size = compute_losless_comp_body_size(w, h, save_mode);
+ cur_mmu_4k_number = ((picture_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT);
+
+ if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1)
+ max_frame_num = MAX_FRAME_8K_NUM;
+ else
+ max_frame_num = MAX_FRAME_4K_NUM;
+
+ if (cur_mmu_4k_number > max_frame_num) {
+ pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n",
+ cur_mmu_4k_number, w, h);
+ return -1;
+ }
+
+ return cur_mmu_4k_number;
+}
+
//#define MAX_4K_NUM 0x1200
int av1_alloc_mmu(
struct AV1HW_s *hw,
{
int ret = 0;
int bit_depth_10 = (bit_depth == AOM_BITS_10);
- int picture_size;
- int cur_mmu_4k_number, max_frame_num;
+ int cur_mmu_4k_number;
+
if (!hw->mmu_box) {
pr_err("error no mmu box!\n");
return -1;
}
+
if (hw->double_write_mode & 0x10)
return 0;
+
if (bit_depth >= AOM_BITS_12) {
hw->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW;
pr_err("fatal_error, un support bit depth 12!\n\n");
return -1;
}
- picture_size = compute_losless_comp_body_size(pic_width, pic_height,
- bit_depth_10);
- cur_mmu_4k_number = ((picture_size + (1 << 12) - 1) >> 12);
-
- if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1)
- max_frame_num = MAX_FRAME_8K_NUM;
- else
- max_frame_num = MAX_FRAME_4K_NUM;
- if (cur_mmu_4k_number > max_frame_num) {
- pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n",
- cur_mmu_4k_number, pic_width, pic_height);
+ cur_mmu_4k_number = av1_mmu_page_num(hw,
+ pic_width,
+ pic_height,
+ bit_depth_10);
+ if (cur_mmu_4k_number < 0)
return -1;
- }
+
ret = decoder_mmu_box_alloc_idx(
hw->mmu_box,
- cur_buf_idx,
+ hw->buffer_wrap[cur_buf_idx],
cur_mmu_4k_number,
mmu_index_adr);
+
return ret;
}
}
ret = decoder_mmu_box_alloc_idx(
hw->mmu_box_dw,
- cur_buf_idx,
+ hw->buffer_wrap[cur_buf_idx],
cur_mmu_4k_number,
mmu_index_adr);
return ret;
}
static void put_un_used_mv_bufs(struct AV1HW_s *hw)
{
- struct AV1_Common_s *const cm = &hw->pbi->common;
+ struct AV1_Common_s *const cm = &hw->common;
struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
for (i = 0; i < hw->used_buf_num; ++i) {
}
#endif
+static void init_pic_list_hw(struct AV1HW_s *pbi);
+
+static int get_free_fb_idx(AV1_COMMON *cm)
+{
+ int i;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+
+ for (i = 0; i < FRAME_BUFFERS; ++i) {
+ if (frame_bufs[i].ref_count == 0
+ && frame_bufs[i].buf.vf_ref == 0)
+ break;
+ }
+
+ return i;
+}
+
+static int v4l_get_free_fb(struct AV1HW_s *hw)
+{
+ struct AV1_Common_s *const cm = &hw->common;
+ struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
+ struct aml_vcodec_ctx * v4l = hw->v4l2_ctx;
+ struct v4l_buff_pool *pool = &v4l->cap_pool;
+ struct PIC_BUFFER_CONFIG_s *pic = NULL;
+ struct PIC_BUFFER_CONFIG_s *free_pic = NULL;
+ ulong flags;
+ int idx, i;
+
+ lock_buffer_pool(cm->buffer_pool, flags);
+
+ for (i = 0; i < pool->in; ++i) {
+ u32 state = (pool->seq[i] >> 16);
+ u32 index = (pool->seq[i] & 0xffff);
+
+ switch (state) {
+ case V4L_CAP_BUFF_IN_DEC:
+ pic = &frame_bufs[i].buf;
+ if ((frame_bufs[i].ref_count == 0) &&
+ (pic->vf_ref == 0) &&
+ (pic->index != -1) &&
+ pic->cma_alloc_addr) {
+ free_pic = pic;
+ }
+ break;
+ case V4L_CAP_BUFF_IN_M2M:
+ idx = get_free_fb_idx(cm);
+ pic = &frame_bufs[idx].buf;
+ pic->y_crop_width = hw->frame_width;
+ pic->y_crop_height = hw->frame_height;
+ hw->buffer_wrap[idx] = index;
+ if (!v4l_alloc_and_config_pic(hw, pic)) {
+ set_canvas(hw, pic);
+ init_pic_list_hw(hw);
+ free_pic = pic;
+ }
+ break;
+ default:
+ pr_err("v4l buffer state err %d.\n", state);
+ break;
+ }
+
+ if (free_pic) {
+ if (frame_bufs[i].buf.use_external_reference_buffers) {
+ // If this frame buffer's y_buffer, u_buffer, and v_buffer point to the
+ // external reference buffers. Restore the buffer pointers to point to the
+ // internally allocated memory.
+ PIC_BUFFER_CONFIG *ybf = &frame_bufs[i].buf;
+
+ ybf->y_buffer = ybf->store_buf_adr[0];
+ ybf->u_buffer = ybf->store_buf_adr[1];
+ ybf->v_buffer = ybf->store_buf_adr[2];
+ ybf->use_external_reference_buffers = 0;
+ }
+
+ frame_bufs[i].ref_count = 1;
+ break;
+ }
+ }
+
+ unlock_buffer_pool(cm->buffer_pool, flags);
+
+ return free_pic ? free_pic->index : INVALID_IDX;
+}
+
+static int get_free_fb(AV1_COMMON *cm) {
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+ unsigned long flags;
+ int i;
+
+ lock_buffer_pool(cm->buffer_pool, flags);
+ for (i = 0; i < FRAME_BUFFERS; ++i) {
+ if (frame_bufs[i].ref_count == 0
+#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
+ && frame_bufs[i].buf.vf_ref == 0
+#endif
+ )
+ break;
+ }
+
+ if (i != FRAME_BUFFERS) {
+ if (frame_bufs[i].buf.use_external_reference_buffers) {
+ // If this frame buffer's y_buffer, u_buffer, and v_buffer point to the
+ // external reference buffers. Restore the buffer pointers to point to the
+ // internally allocated memory.
+ PIC_BUFFER_CONFIG *ybf = &frame_bufs[i].buf;
+ ybf->y_buffer = ybf->store_buf_adr[0];
+ ybf->u_buffer = ybf->store_buf_adr[1];
+ ybf->v_buffer = ybf->store_buf_adr[2];
+ ybf->use_external_reference_buffers = 0;
+ }
+
+ frame_bufs[i].ref_count = 1;
+ } else {
+ // We should never run out of free buffers. If this assertion fails, there
+ // is a reference leak.
+ //assert(0 && "Ran out of free frame buffers. Likely a reference leak.");
+ // Reset i to be INVALID_IDX to indicate no free buffer found.
+ i = INVALID_IDX;
+ }
+
+ unlock_buffer_pool(cm->buffer_pool, flags);
+ return i;
+}
+
+int get_free_frame_buffer(struct AV1_Common_s *cm)
+{
+ struct AV1HW_s *hw = container_of(cm, struct AV1HW_s, common);
+
+ return hw->is_used_v4l ? v4l_get_free_fb(hw) : get_free_fb(cm);
+}
static int get_free_buf_count(struct AV1HW_s *hw)
{
- struct AV1_Common_s *const cm = &hw->pbi->common;
+ struct AV1_Common_s *const cm = &hw->common;
struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
int free_buf_count = 0;
int aom_bufmgr_init(struct AV1HW_s *hw, struct BuffInfo_s *buf_spec_i,
struct buff_s *mc_buf_i) {
- struct AV1_Common_s *cm = &hw->pbi->common;
+ struct AV1_Common_s *cm = &hw->common;
if (debug)
pr_info("%s %d %p\n", __func__, __LINE__, hw->pbi);
hw->frame_count = 0;
hw->last_pts = 0;
hw->last_pts_us64 = 0;
+ hw->last_timestamp = 0;
hw->shift_byte_count = 0;
hw->shift_byte_count_lo = 0;
hw->shift_byte_count_hi = 0;
}
}
+static bool v4l_is_there_vframe_bound(struct AV1HW_s *hw)
+{
+ int i;
+ struct AV1_Common_s *const cm = &hw->common;
+ struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs;
+
+ for (i = 0; i < hw->used_buf_num; ++i) {
+ if (frame_bufs[i].buf.vframe_bound)
+ return true;
+ }
+ return false;
+}
+
+static void v4l_mmu_buffer_release(struct AV1HW_s *hw)
+{
+ struct AV1_Common_s *const cm = &hw->common;
+ struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs;
+ int i;
+
+ /* release workspace */
+ if (hw->bmmu_box)
+ decoder_bmmu_box_free_idx(hw->bmmu_box,
+ WORK_SPACE_BUF_ID);
+ /*
+ * it's only when vframe get back to driver, right now we can be sure
+ * that vframe and fd are related. if the playback exits, the capture
+ * requires the upper app to release when the fd is closed, and others
+ * buffers drivers are released by driver.
+ */
+ for (i = 0; i < hw->used_buf_num; ++i) {
+ if (!frame_bufs[i].buf.vframe_bound) {
+ if (hw->bmmu_box)
+ decoder_bmmu_box_free_idx(hw->bmmu_box,
+ HEADER_BUFFER_IDX(hw->buffer_wrap[i]));
+ if (hw->mmu_box)
+ decoder_mmu_box_free_idx(hw->mmu_box, hw->buffer_wrap[i]);
+
+ av1_print(hw, PRINT_FLAG_V4L_DETAIL,
+ "%s free buffer[%d], bmmu_box: %p, mmu_box: %p\n",
+ __func__, i, hw->bmmu_box, hw->mmu_box);
+ }
+ }
+}
static void uninit_mmu_buffers(struct AV1HW_s *hw)
{
#ifndef MV_USE_FIXED_BUF
dealloc_mv_bufs(hw);
#endif
+ if (hw->is_used_v4l &&
+ v4l_is_there_vframe_bound(hw)) {
+ if (get_double_write_mode(hw) != 0x10) {
+ v4l_mmu_buffer_release(hw);
+ return;
+ }
+ }
+
if (hw->mmu_box)
decoder_mmu_box_free(hw->mmu_box);
hw->mmu_box = NULL;
hw->bmmu_box = NULL;
}
+static int calc_luc_quantity(int lcu_size, u32 w, u32 h)
+{
+ int pic_width_64 = (w + 63) & (~0x3f);
+ int pic_height_32 = (h + 31) & (~0x1f);
+ int pic_width_lcu = (pic_width_64 % lcu_size) ?
+ pic_width_64 / lcu_size + 1 : pic_width_64 / lcu_size;
+ int pic_height_lcu = (pic_height_32 % lcu_size) ?
+ pic_height_32 / lcu_size + 1 : pic_height_32 / lcu_size;
+
+ return pic_width_lcu * pic_height_lcu;
+}
+
+static int v4l_alloc_and_config_pic(struct AV1HW_s *hw,
+ struct PIC_BUFFER_CONFIG_s *pic)
+{
+ int ret = -1;
+ int i = pic->index;
+ int dw_mode = get_double_write_mode_init(hw);
+ int lcu_total = calc_luc_quantity(hw->current_lcu_size,
+ hw->frame_width, hw->frame_height);
+#ifdef MV_USE_FIXED_BUF
+ u32 mpred_mv_end = hw->work_space_buf->mpred_mv.buf_start +
+ hw->work_space_buf->mpred_mv.buf_size;
+#ifdef USE_DYNAMIC_MV_BUFFER
+ int32_t MV_MEM_UNIT = (lcu_size == 128) ? (19*4*16) : (19*16);
+ int32_t mv_buffer_size = (lcu_total*MV_MEM_UNIT);
+#else
+ int32_t mv_buffer_size = MAX_ONE_MV_BUFFER_SIZE;
+#endif
+#endif
+ struct vdec_v4l2_buffer *fb = NULL;
+
+ if (i < 0)
+ return ret;
+
+ ret = vdec_v4l_get_buffer(hw->v4l2_ctx, &fb);
+ if (ret < 0) {
+ av1_print(hw, 0, "[%d] AV1 get buffer fail.\n",
+ ((struct aml_vcodec_ctx *) (hw->v4l2_ctx))->id);
+ return ret;
+ }
+
+ if (hw->mmu_enable) {
+ hw->m_BUF[i].header_addr = decoder_bmmu_box_get_phy_addr(
+ hw->bmmu_box, HEADER_BUFFER_IDX(hw->buffer_wrap[i]));
+ if (debug & AV1_DEBUG_BUFMGR_MORE) {
+ pr_info("MMU header_adr %d: %ld\n",
+ i, hw->m_BUF[i].header_addr);
+ }
+ }
+
+#ifdef MV_USE_FIXED_BUF
+ if ((hw->work_space_buf->mpred_mv.buf_start +
+ ((i + 1) * mv_buffer_size))
+ <= mpred_mv_end) {
+#endif
+ hw->m_BUF[i].v4l_ref_buf_addr = (ulong)fb;
+ pic->cma_alloc_addr = fb->m.mem[0].addr;
+ if (fb->num_planes == 1) {
+ hw->m_BUF[i].start_adr = fb->m.mem[0].addr;
+ hw->m_BUF[i].luma_size = fb->m.mem[0].offset;
+ hw->m_BUF[i].size = fb->m.mem[0].size;
+ fb->m.mem[0].bytes_used = fb->m.mem[0].size;
+ pic->dw_y_adr = hw->m_BUF[i].start_adr;
+ pic->dw_u_v_adr = pic->dw_y_adr + hw->m_BUF[i].luma_size;
+ } else if (fb->num_planes == 2) {
+ hw->m_BUF[i].start_adr = fb->m.mem[0].addr;
+ hw->m_BUF[i].size = fb->m.mem[0].size;
+ hw->m_BUF[i].chroma_addr = fb->m.mem[1].addr;
+ hw->m_BUF[i].chroma_size = fb->m.mem[1].size;
+ fb->m.mem[0].bytes_used = fb->m.mem[0].size;
+ fb->m.mem[1].bytes_used = fb->m.mem[1].size;
+ pic->dw_y_adr = hw->m_BUF[i].start_adr;
+ pic->dw_u_v_adr = hw->m_BUF[i].chroma_addr;
+ }
+
+ /* config frame buffer */
+ if (hw->mmu_enable)
+ pic->header_adr = hw->m_BUF[i].header_addr;
+
+ pic->BUF_index = i;
+ pic->lcu_total = lcu_total;
+ pic->mc_canvas_y = pic->index;
+ pic->mc_canvas_u_v = pic->index;
+
+ if (dw_mode & 0x10) {
+ pic->mc_canvas_y = (pic->index << 1);
+ pic->mc_canvas_u_v = (pic->index << 1) + 1;
+ }
+
+#ifdef MV_USE_FIXED_BUF
+ pic->mpred_mv_wr_start_addr =
+ hw->work_space_buf->mpred_mv.buf_start +
+ (pic->index * mv_buffer_size);
+#endif
+
+#ifdef DUMP_FILMGRAIN
+ if (pic->index == fg_dump_index) {
+ pic->fgs_table_adr = hw->fg_phy_addr;
+ pr_info("set buffer %d film grain table 0x%x\n",
+ pic->index, pic->fgs_table_adr);
+ } else
+#endif
+ pic->fgs_table_adr =
+ hw->work_space_buf->fgs_table.buf_start +
+ (pic->index * FGS_TABLE_SIZE);
+
+ if (debug) {
+
+ pr_info("%s index %d BUF_index %d ",
+ __func__, pic->index,
+ pic->BUF_index);
+ pr_info("comp_body_size %x comp_buf_size %x ",
+ pic->comp_body_size,
+ pic->buf_size);
+ pr_info("mpred_mv_wr_start_adr %d\n",
+ pic->mpred_mv_wr_start_addr);
+ pr_info("dw_y_adr %d, pic_config->dw_u_v_adr =%d\n",
+ pic->dw_y_adr,
+ pic->dw_u_v_adr);
+ }
+#ifdef MV_USE_FIXED_BUF
+ }
+#endif
+ return ret;
+}
static int config_pic(struct AV1HW_s *hw,
struct PIC_BUFFER_CONFIG_s *pic_config)
int mc_buffer_size_u_v = 0;
int mc_buffer_size_u_v_h = 0;
int dw_mode = get_double_write_mode_init(hw);
- struct vdec_v4l2_buffer *fb = NULL;
hw->lcu_total = lcu_total;
/*!USE_SPEC_BUF_FOR_MMU_HEAD*/
if (hw->mmu_enable) {
pic_config->header_adr = decoder_bmmu_box_get_phy_addr(
- hw->bmmu_box, HEADER_BUFFER_IDX(pic_config->index));
+ hw->bmmu_box, HEADER_BUFFER_IDX(hw->buffer_wrap[pic_config->index]));
#ifdef AOM_AV1_MMU_DW
if (hw->dw_mmu_enable) {
pic_config->header_dw_adr = decoder_bmmu_box_get_phy_addr(
- hw->bmmu_box, DW_HEADER_BUFFER_IDX(pic_config->index));
+ hw->bmmu_box, DW_HEADER_BUFFER_IDX(hw->buffer_wrap[pic_config->index]));
}
if (debug & AV1_DEBUG_BUFMGR_MORE) {
pr_info("MMU dw header_adr (%d, %d) %d: %d\n",
hw->dw_mmu_enable,
- DW_HEADER_BUFFER_IDX(pic_config->index),
- pic_config->index,
+ DW_HEADER_BUFFER_IDX(hw->buffer_wrap[pic_config->index]),
+ hw->buffer_wrap[pic_config->index],
pic_config->header_dw_adr);
}
#endif
if (debug & AV1_DEBUG_BUFMGR_MORE) {
pr_info("MMU header_adr %d: %d\n",
- pic_config->index, pic_config->header_adr);
+ hw->buffer_wrap[pic_config->index], pic_config->header_adr);
}
}
#endif
) {
#endif
if (buf_size > 0) {
- if (hw->is_used_v4l) {
-#ifdef SUPPORT_V4L2
- ret = vdec_v4l_get_buffer(hw->v4l2_ctx, &fb);
-#endif
- if (ret) {
- av1_print(hw, PRINT_FLAG_ERROR,
- "[%d] get fb fail.\n",
- ((struct aml_vcodec_ctx *)
- (hw->v4l2_ctx))->id);
- return ret;
- }
-
- hw->m_BUF[i].v4l_ref_buf_addr = (ulong)fb;
-#ifdef SUPPORT_V4L2
- pic_config->cma_alloc_addr = fb->m.mem[0].addr;
-#endif
- av1_print(hw, PRINT_FLAG_V4L_DETAIL,
- "[%d] %s(), v4l ref buf addr: 0x%x\n",
- ((struct aml_vcodec_ctx *)
- (hw->v4l2_ctx))->id, __func__, fb);
- } else {
- ret = decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box,
- VF_BUFFER_IDX(i),
- buf_size, DRIVER_NAME,
- &pic_config->cma_alloc_addr);
- if (ret < 0) {
- pr_info(
- "decoder_bmmu_box_alloc_buf_phy idx %d size %d fail\n",
- VF_BUFFER_IDX(i),
- buf_size
- );
- return ret;
- }
+ ret = decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box,
+ VF_BUFFER_IDX(hw->buffer_wrap[i]),
+ buf_size, DRIVER_NAME,
+ &pic_config->cma_alloc_addr);
+ if (ret < 0) {
+ pr_info(
+ "decoder_bmmu_box_alloc_buf_phy idx %d size %d fail\n",
+ VF_BUFFER_IDX(hw->buffer_wrap[i]),
+ buf_size
+ );
+ return ret;
}
if (pic_config->cma_alloc_addr)
else {
pr_info(
"decoder_bmmu_box_alloc_buf_phy idx %d size %d return null\n",
- VF_BUFFER_IDX(i),
+ VF_BUFFER_IDX(hw->buffer_wrap[i]),
buf_size
);
return -1;
static void init_pic_list(struct AV1HW_s *hw)
{
int i;
- struct AV1_Common_s *cm = &hw->pbi->common;
+ struct AV1_Common_s *cm = &hw->common;
struct PIC_BUFFER_CONFIG_s *pic_config;
struct vdec_s *vdec = hw_to_vdec(hw);
pic_config->y_canvas_index = -1;
pic_config->uv_canvas_index = -1;
}
- if (config_pic(hw, pic_config) < 0) {
- if (debug)
- av1_print(hw, 0, "Config_pic %d fail\n",
- pic_config->index);
- pic_config->index = -1;
- break;
- }
pic_config->y_crop_width = hw->init_pic_w;
pic_config->y_crop_height = hw->init_pic_h;
pic_config->double_write_mode = get_double_write_mode(hw);
+ hw->buffer_wrap[i] = i;
+
+ if (!hw->is_used_v4l) {
+ if (config_pic(hw, pic_config) < 0) {
+ if (debug)
+ av1_print(hw, 0, "Config_pic %d fail\n",
+ pic_config->index);
+ pic_config->index = -1;
+ break;
+ }
- if (pic_config->double_write_mode &&
- (pic_config->double_write_mode & 0x20) == 0) {
- set_canvas(hw, pic_config);
+ if (pic_config->double_write_mode &&
+ (pic_config->double_write_mode & 0x20) == 0) {
+ set_canvas(hw, pic_config);
+ }
}
}
for (; i < hw->used_buf_num; i++) {
pic_config->index = -1;
pic_config->BUF_index = -1;
pic_config->mv_buf_index = -1;
+ hw->buffer_wrap[i] = i;
if (vdec->parallel_dec == 1) {
pic_config->y_canvas_index = -1;
pic_config->uv_canvas_index = -1;
static void init_pic_list_hw(struct AV1HW_s *hw)
{
int i;
- struct AV1_Common_s *cm = &hw->pbi->common;
+ struct AV1_Common_s *cm = &hw->common;
struct PIC_BUFFER_CONFIG_s *pic_config;
/*WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x0);*/
WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR,
static void dump_pic_list(struct AV1HW_s *hw)
{
- struct AV1_Common_s *const cm = &hw->pbi->common;
+ struct AV1_Common_s *const cm = &hw->common;
struct PIC_BUFFER_CONFIG_s *pic_config;
int i;
for (i = 0; i < FRAME_BUFFERS; i++) {
void av1_release_bufs(struct AV1HW_s *hw)
{
- /*struct AV1HW_s *hw = (struct AV1HW_s *)(pbi->private_data);*/
- AV1_COMMON *cm = &hw->pbi->common;
+ AV1_COMMON *cm = &hw->common;
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
- if (!hw->mmu_enable)
- return;
for (i = 0; i < FRAME_BUFFERS; ++i) {
if (frame_bufs[i].buf.vf_ref == 0 &&
frame_bufs[i].ref_count == 0 &&
frame_bufs[i].buf.index >= 0) {
- //release_buffer_4k(&av1_mmumgr_m, i);
- decoder_mmu_box_free_idx(hw->mmu_box, i);
-#ifdef AOM_AV1_MMU_DW
- //release_buffer_4k(&av1_mmumgr_dw, i);
- if (hw->dw_mmu_enable)
- decoder_mmu_box_free_idx(hw->mmu_box_dw, i);
-#endif
- av1_print(hw, AOM_DEBUG_HW_MORE, "%s, index %d\n",
- __func__, i);
-
if (frame_bufs[i].buf.aux_data_buf)
release_aux_data(hw, &frame_bufs[i].buf);
}
static int config_pic_size(struct AV1HW_s *hw, unsigned short bit_depth)
{
- uint32_t data32;
- struct AV1_Common_s *cm = &hw->pbi->common;
+ uint32_t data32;
+ struct AV1_Common_s *cm = &hw->common;
struct PIC_BUFFER_CONFIG_s *cur_pic_config = &cm->cur_frame->buf;
int losless_comp_header_size, losless_comp_body_size;
#ifdef AOM_AV1_MMU_DW
static int config_mc_buffer(struct AV1HW_s *hw, unsigned short bit_depth, unsigned char inter_flag)
{
int32_t i;
- AV1_COMMON *cm = &hw->pbi->common;
+ AV1_COMMON *cm = &hw->common;
PIC_BUFFER_CONFIG* cur_pic_config = &cm->cur_frame->buf;
uint8_t scale_enable = 0;
static void config_mpred_hw(struct AV1HW_s *hw, unsigned char inter_flag)
{
- AV1_COMMON *cm = &hw->pbi->common;
+ AV1_COMMON *cm = &hw->common;
PIC_BUFFER_CONFIG *cur_pic_config = &cm->cur_frame->buf;
//PIC_BUFFER_CONFIG *last_frame_pic_config = NULL;
int i, j, pos, reg_i;
cm->current_frame.frame_type, cm->cur_frame->frame_type,
cm->allow_ref_frame_mvs);
- if (av1_frame_is_inter(&hw->pbi->common)) {
+ if (av1_frame_is_inter(&hw->common)) {
if (cm->allow_ref_frame_mvs) {
data32 |= (1 << 11); /*read enable*/
}
!!!!!!!!!!!!!!!!!!!!!!!!!TODO .... !!!!!!!!!!!
mem_map_mode, endian, get_double_write_mode
*/
- AV1_COMMON *cm = &hw->pbi->common;
+ AV1_COMMON *cm = &hw->common;
PIC_BUFFER_CONFIG* pic_config = &cm->cur_frame->buf;
uint32_t data32;
int32_t lcu_size =
pic_config->lcu_total*lcu_size*lcu_size/2;
int32_t mc_buffer_size_u_v_h =
(mc_buffer_size_u_v + 0xffff)>>16; //64k alignment
+ struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx;
+
av1_print(hw, AOM_DEBUG_HW_MORE,
"[test.c] #### config_sao_hw ####, lcu_size %d\n", lcu_size);
av1_print(hw, AOM_DEBUG_HW_MORE,
WRITE_VREG(HEVC_DBLK_CFGB, data);
}
+ /* swap uv */
+ if (hw->is_used_v4l) {
+ if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) ||
+ (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M))
+ data32 &= ~(1 << 8); /* NV21 */
+ else
+ data32 |= (1 << 8); /* NV12 */
+ }
+
+ /*
+ * [31:24] ar_fifo1_axi_thred
+ * [23:16] ar_fifo0_axi_thred
+ * [15:14] axi_linealign, 0-16bytes, 1-32bytes, 2-64bytes
+ * [13:12] axi_aformat, 0-Linear, 1-32x32, 2-64x32
+ * [11:08] axi_lendian_C
+ * [07:04] axi_lendian_Y
+ * [3] reserved
+ * [2] clk_forceon
+ * [1] dw_disable:disable double write output
+ * [0] cm_disable:disable compress output
+ */
WRITE_VREG(HEVC_SAO_CTRL1, data32);
if (get_double_write_mode(hw) & 0x10) {
data32 &= (~0xF);
data32 |= 0xf; /* valid only when double write only */
/*data32 |= 0x8;*/ /* Big-Endian per 64-bit */
+
+ /* swap uv */
+ if (hw->is_used_v4l) {
+ if ((v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21) ||
+ (v4l2_ctx->q_data[AML_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_NV21M))
+ data32 |= (1 << 12); /* NV21 */
+ else
+ data32 &= ~(1 << 12); /* NV12 */
+ }
+
+ /*
+ * [3:0] little_endian
+ * [5:4] address_format 00:linear 01:32x32 10:64x32
+ * [7:6] reserved
+ * [9:8] Linear_LineAlignment 00:16byte 01:32byte 10:64byte
+ * [11:10] reserved
+ * [12] CbCr_byte_swap
+ * [31:13] reserved
+ */
WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32);
#endif
static void config_dblk_hw(struct AV1HW_s *hw)
{
AV1Decoder *pbi = hw->pbi;
- AV1_COMMON *cm = &hw->pbi->common;
+ AV1_COMMON *cm = &hw->common;
loop_filter_info_n *lfi = hw->lfi;
struct loopfilter *lf = hw->lf;
struct segmentation_lf *seg_4lf = hw->seg_4lf;
av1_print(hw, AOM_DEBUG_HW_MORE,
"[test.c] av1_upscale_frame_init (run before every frame decoding start)\n");
av1_upscale_frame_init(pbi,
- &pbi->common, &hw->aom_param);
+ pbi->common, &hw->aom_param);
#endif // #ifdef AOM_AV1_UPSCALE_INIT
//BuffInfo_t* buf_spec = pbi->work_space_buf;
return -1;
}
#endif
+
hw->mv_buf_margin = mv_buf_margin;
if (IS_4K_SIZE(hw->init_pic_w, hw->init_pic_h)) {
hw->used_buf_num = MAX_BUF_NUM_LESS + dynamic_buf_num_margin;
else
hw->used_buf_num = max_buf_num + dynamic_buf_num_margin;
+ if (hw->is_used_v4l)
+ hw->used_buf_num = 9 + hw->dynamic_buf_num_margin;
+
if (hw->used_buf_num > MAX_BUF_NUM)
hw->used_buf_num = MAX_BUF_NUM;
if (hw->used_buf_num > FRAME_BUFFERS)
canvas_config_ex(pic_config->y_canvas_index,
pic_config->dw_y_adr, canvas_w, canvas_h,
- CANVAS_ADDR_NOWRAP, blkmode, 0x7);
+ CANVAS_ADDR_NOWRAP, blkmode, hw->is_used_v4l ? 0 : 7);
canvas_config_ex(pic_config->uv_canvas_index,
pic_config->dw_u_v_adr, canvas_w, canvas_h,
- CANVAS_ADDR_NOWRAP, blkmode, 0x7);
+ CANVAS_ADDR_NOWRAP, blkmode, hw->is_used_v4l ? 0 : 7);
#ifdef MULTI_INSTANCE_SUPPORT
pic_config->canvas_config[0].phy_addr =
canvas_h;
pic_config->canvas_config[0].block_mode =
blkmode;
- pic_config->canvas_config[0].endian = 7;
+ pic_config->canvas_config[0].endian = hw->is_used_v4l ? 0 : 7;
pic_config->canvas_config[1].phy_addr =
pic_config->dw_u_v_adr;
canvas_h;
pic_config->canvas_config[1].block_mode =
blkmode;
- pic_config->canvas_config[1].endian = 7;
+ pic_config->canvas_config[1].endian = hw->is_used_v4l ? 0 : 7;
#endif
}
}
ar = min_t(u32, hw->frame_ar, DISP_RATIO_ASPECT_RATIO_MAX);
vf->ratio_control = (ar << DISP_RATIO_ASPECT_RATIO_BIT);
+ if (hw->is_used_v4l && hw->vf_dp.present_flag) {
+ struct aml_vdec_hdr_infos hdr;
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.signal_type = vf->signal_type;
+ hdr.color_parms = hw->vf_dp;
+ vdec_v4l_set_hdr_infos(ctx, &hdr);
+ }
}
static int vav1_vf_states(struct vframe_states *states, void *op_arg)
if (kfifo_get(&hw->display_q, &vf)) {
struct vframe_s *next_vf;
uint8_t index = vf->index & 0xff;
- if (index < hw->used_buf_num) {
+ if (index < hw->used_buf_num ||
+ (vf->type & VIDTYPE_V4L_EOS)) {
hw->vf_get_count++;
if (debug & AOM_DEBUG_VFRAME) {
- struct BufferPool_s *pool = hw->pbi->common.buffer_pool;
+ struct BufferPool_s *pool = hw->common.buffer_pool;
struct PIC_BUFFER_CONFIG_s *pic =
&pool->frame_bufs[index].buf;
unsigned long flags;
- lock_buffer_pool(hw->pbi->common.buffer_pool, flags);
- av1_print(hw, AOM_DEBUG_VFRAME, "%s index 0x%x type 0x%x w/h %d/%d, aux size %d, pts %d, %lld\n",
+ lock_buffer_pool(hw->common.buffer_pool, flags);
+ av1_print(hw, AOM_DEBUG_VFRAME, "%s index 0x%x type 0x%x w/h %d/%d, aux size %d, pts %d, %lld, ts: %llu\n",
__func__, vf->index, vf->type,
vf->width, vf->height,
pic->aux_data_size,
vf->pts,
- vf->pts_us64);
- unlock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ vf->pts_us64,
+ vf->timestamp);
+ unlock_buffer_pool(hw->common.buffer_pool, flags);
}
if (kfifo_peek(&hw->display_q, &next_vf)) {
if (index == fg_dump_index) {
unsigned long flags;
int ii;
- lock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ lock_buffer_pool(hw->common.buffer_pool, flags);
pr_info("FGS_TABLE for buffer %d:\n", index);
for (ii = 0; ii < FGS_TABLE_SIZE; ii++) {
pr_info("%02x ", hw->fg_ptr[ii]);
if (((ii+ 1) & 0xf) == 0)
pr_info("\n");
}
- unlock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ unlock_buffer_pool(hw->common.buffer_pool, flags);
}
#endif
kfifo_put(&hw->newframe_q, (const struct vframe_s *)vf);
hw->vf_put_count++;
if (debug & AOM_DEBUG_VFRAME) {
- lock_buffer_pool(hw->pbi->common.buffer_pool, flags);
- av1_print(hw, AOM_DEBUG_VFRAME, "%s index 0x%x type 0x%x w/h %d/%d, pts %d, %lld\n",
+ lock_buffer_pool(hw->common.buffer_pool, flags);
+ av1_print(hw, AOM_DEBUG_VFRAME, "%s index 0x%x type 0x%x w/h %d/%d, pts %d, %lld, ts: %llu\n",
__func__, vf->index, vf->type,
vf->width, vf->height,
vf->pts,
- vf->pts_us64);
- unlock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ vf->pts_us64,
+ vf->timestamp);
+ unlock_buffer_pool(hw->common.buffer_pool, flags);
}
if (index < hw->used_buf_num) {
- struct AV1_Common_s *cm = &hw->pbi->common;
+ struct AV1_Common_s *cm = &hw->common;
struct BufferPool_s *pool = cm->buffer_pool;
- lock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ lock_buffer_pool(hw->common.buffer_pool, flags);
if ((debug & AV1_DEBUG_IGNORE_VF_REF) == 0) {
if (pool->frame_bufs[index].buf.vf_ref > 0)
pool->frame_bufs[index].buf.vf_ref--;
}
+
+ if (hw->is_used_v4l)
+ pool->frame_bufs[index].buf.vframe_bound = true;
+
if (hw->wait_buf)
WRITE_VREG(HEVC_ASSIST_MBOX0_IRQ_REG,
0x1);
hw->last_put_idx = index;
hw->new_frame_displayed++;
- unlock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ unlock_buffer_pool(hw->common.buffer_pool, flags);
}
}
{
unsigned long flags;
struct AV1HW_s *hw = (struct AV1HW_s *)op_arg;
- struct AV1_Common_s *cm = &hw->pbi->common;
+ struct AV1_Common_s *cm = &hw->common;
struct BufferPool_s *pool = cm->buffer_pool;
if (type & VFRAME_EVENT_RECEIVER_RESET) {
(struct provider_aux_req_s *)data;
unsigned char index;
- lock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ lock_buffer_pool(hw->common.buffer_pool, flags);
index = req->vf->index & 0xff;
req->aux_buf = NULL;
req->aux_size = 0;
req->dv_enhance_exist = 0;
#endif
}
- unlock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ unlock_buffer_pool(hw->common.buffer_pool, flags);
if (debug & AOM_DEBUG_AUX_DATA)
av1_print(hw, 0,
void av1_inc_vf_ref(struct AV1HW_s *hw, int index)
{
- struct AV1_Common_s *cm = &hw->pbi->common;
+ struct AV1_Common_s *cm = &hw->common;
if ((debug & AV1_DEBUG_IGNORE_VF_REF) == 0) {
cm->buffer_pool->frame_bufs[index].buf.vf_ref++;
static void update_vf_memhandle(struct AV1HW_s *hw,
struct vframe_s *vf, struct PIC_BUFFER_CONFIG_s *pic)
{
+ /* keeper not needed for v4l solution */
+ if (hw->is_used_v4l)
+ return;
+
if (pic->index < 0) {
vf->mem_handle = NULL;
vf->mem_head_handle = NULL;
(debug & AOM_DEBUG_DW_DISP_MAIN) == 0) {
vf->mem_handle =
decoder_mmu_box_get_mem_handle(
- hw->mmu_box_dw, pic->index);
+ hw->mmu_box_dw, hw->buffer_wrap[pic->index]);
vf->mem_head_handle =
decoder_bmmu_box_get_mem_handle(
hw->bmmu_box,
- DW_HEADER_BUFFER_IDX(pic->BUF_index));
+ DW_HEADER_BUFFER_IDX(hw->buffer_wrap[pic->BUF_index]));
vf->mem_dw_handle = NULL;
} else
#endif
{
vf->mem_handle =
decoder_mmu_box_get_mem_handle(
- hw->mmu_box, pic->index);
+ hw->mmu_box, hw->buffer_wrap[pic->index]);
vf->mem_head_handle =
decoder_bmmu_box_get_mem_handle(
hw->bmmu_box,
- HEADER_BUFFER_IDX(pic->BUF_index));
+ HEADER_BUFFER_IDX(hw->buffer_wrap[pic->BUF_index]));
if (hw->double_write_mode == 3)
vf->mem_dw_handle =
decoder_bmmu_box_get_mem_handle(
hw->bmmu_box,
- VF_BUFFER_IDX(pic->BUF_index));
+ VF_BUFFER_IDX(hw->buffer_wrap[pic->BUF_index]));
else
vf->mem_dw_handle = NULL;
}
} else {
vf->mem_handle =
decoder_bmmu_box_get_mem_handle(
- hw->bmmu_box, VF_BUFFER_IDX(pic->BUF_index));
+ hw->bmmu_box, VF_BUFFER_IDX(hw->buffer_wrap[pic->BUF_index]));
vf->mem_head_handle = NULL;
vf->mem_dw_handle = NULL;
/*vf->mem_head_handle =
{
struct vframe_s *vf = NULL;
int stream_offset = pic_config->stream_offset;
+ struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx;
+ ulong nv_order = VIDTYPE_VIU_NV21;
u32 pts_valid = 0, pts_us64_valid = 0;
u32 frame_size;
- int i, reclac_flag;
+ int i, reclac_flag = 0;
av1_print(hw, AOM_DEBUG_VFRAME, "%s index = %d\r\n", __func__, pic_config->index);
if (kfifo_get(&hw->newframe_q, &vf) == 0) {
return -1;
}
+ /* swap uv */
+ if (hw->is_used_v4l) {
+ if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) ||
+ (v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M))
+ nv_order = VIDTYPE_VIU_NV12;
+ }
+
if (pic_config->double_write_mode &&
(pic_config->double_write_mode & 0x20) == 0)
set_canvas(hw, pic_config);
display_frame_count[hw->index]++;
if (vf) {
if (!force_pts_unstable && (hw->av1_first_pts_ready)) {
- if ((pic_config->pts == 0) || (pic_config->pts <= hw->last_pts)) {
- for (i = (FRAME_BUFFERS - 1); i > 0; i--) {
- if ((hw->last_pts == hw->frame_mode_pts_save[i]) ||
- (hw->last_pts_us64 == hw->frame_mode_pts64_save[i])) {
- pic_config->pts = hw->frame_mode_pts_save[i - 1];
- pic_config->pts64 = hw->frame_mode_pts64_save[i - 1];
- break;
+ if (hw->is_used_v4l) {
+ if ((pic_config->timestamp == 0) || (pic_config->timestamp <= hw->last_timestamp)) {
+ for (i = (FRAME_BUFFERS - 1); i > 0; i--) {
+ if (hw->last_timestamp == hw->frame_mode_timestamp_save[i]) {
+ pic_config->timestamp = hw->frame_mode_timestamp_save[i - 1];
+ break;
+ }
+ }
+
+ if ((i == 0) || (pic_config->timestamp <= hw->last_timestamp)) {
+ av1_print(hw, AV1_DEBUG_OUT_PTS,
+ "no found timestamp %d, set 0. %d, %d\n",
+ i, pic_config->timestamp, hw->last_timestamp);
+ pic_config->timestamp = 0;
}
}
- if ((i == 0) || (pic_config->pts <= hw->last_pts)) {
- av1_print(hw, AV1_DEBUG_OUT_PTS,
- "no found pts %d, set 0. %d, %d\n",
- i, pic_config->pts, hw->last_pts);
- pic_config->pts = 0;
- pic_config->pts64 = 0;
+ } else {
+ if ((pic_config->pts == 0) || (pic_config->pts <= hw->last_pts)) {
+ for (i = (FRAME_BUFFERS - 1); i > 0; i--) {
+ if ((hw->last_pts == hw->frame_mode_pts_save[i]) ||
+ (hw->last_pts_us64 == hw->frame_mode_pts64_save[i])) {
+ pic_config->pts = hw->frame_mode_pts_save[i - 1];
+ pic_config->pts64 = hw->frame_mode_pts64_save[i - 1];
+ break;
+ }
+ }
+
+ if ((i == 0) || (pic_config->pts <= hw->last_pts)) {
+ av1_print(hw, AV1_DEBUG_OUT_PTS,
+ "no found pts %d, set 0. %d, %d\n",
+ i, pic_config->pts, hw->last_pts);
+ pic_config->pts = 0;
+ pic_config->pts64 = 0;
+ }
}
}
}
+
if (hw->is_used_v4l) {
vf->v4l_mem_handle
= hw->m_BUF[pic_config->BUF_index].v4l_ref_buf_addr;
- av1_print(hw, PRINT_FLAG_V4L_DETAIL,
- "[%d] %s(), v4l mem handle: 0x%lx\n",
- ((struct aml_vcodec_ctx *)(hw->v4l2_ctx))->id,
- __func__, vf->v4l_mem_handle);
+ if (hw->mmu_enable) {
+ vf->mm_box.bmmu_box = hw->bmmu_box;
+ vf->mm_box.bmmu_idx = HEADER_BUFFER_IDX(hw->buffer_wrap[pic_config->BUF_index]);
+ vf->mm_box.mmu_box = hw->mmu_box;
+ vf->mm_box.mmu_idx = hw->buffer_wrap[pic_config->BUF_index];
+ }
}
#ifdef MULTI_INSTANCE_SUPPORT
if (vdec_frame_based(hw_to_vdec(hw))) {
vf->pts = pic_config->pts;
vf->pts_us64 = pic_config->pts64;
+ vf->timestamp = pic_config->timestamp;
if (vf->pts != 0 || vf->pts_us64 != 0) {
pts_valid = 1;
pts_us64_valid = 1;
}
if (hw->av1_first_pts_ready) {
- if (hw->frame_dur && ((vf->pts == 0) || (vf->pts_us64 == 0))) {
- vf->pts = hw->last_pts + DUR2PTS(hw->frame_dur);
- vf->pts_us64 = hw->last_pts_us64 +
- (DUR2PTS(hw->frame_dur) * 100 / 9);
- reclac_flag = 1;
- }
+ if (hw->is_used_v4l) {
+ if (hw->frame_dur && (vf->timestamp == 0)) {
+ vf->timestamp = hw->last_timestamp +
+ hw->timestamp_duration;
+ }
+
+ if (!close_to(vf->timestamp, (hw->last_timestamp +
+ hw->timestamp_duration), 100)) {
+ vf->timestamp = hw->last_timestamp +
+ hw->timestamp_duration;
+ }
+ } else {
+ if (hw->frame_dur && ((vf->pts == 0) || (vf->pts_us64 == 0))) {
+ vf->pts = hw->last_pts + DUR2PTS(hw->frame_dur);
+ vf->pts_us64 = hw->last_pts_us64 +
+ (DUR2PTS(hw->frame_dur) * 100 / 9);
+ reclac_flag = 1;
+ }
- if (!close_to(vf->pts, (hw->last_pts + DUR2PTS(hw->frame_dur)), 100)) {
- vf->pts = hw->last_pts + DUR2PTS(hw->frame_dur);
- vf->pts_us64 = hw->last_pts_us64 +
- (DUR2PTS(hw->frame_dur) * 100 / 9);
- reclac_flag = 2;
+ if (!close_to(vf->pts, (hw->last_pts + DUR2PTS(hw->frame_dur)), 100)) {
+ vf->pts = hw->last_pts + DUR2PTS(hw->frame_dur);
+ vf->pts_us64 = hw->last_pts_us64 +
+ (DUR2PTS(hw->frame_dur) * 100 / 9);
+ reclac_flag = 2;
+ }
}
/* try find the closed pts in saved pts pool */
}
} else {
av1_print(hw, AV1_DEBUG_OUT_PTS,
- "first pts %d change to save[%d] %d\n",
+ "first pts %d change to save[%d] %d, ts: %llu\n",
vf->pts, hw->first_pts_index - 1,
- hw->frame_mode_pts_save[hw->first_pts_index - 1]);
+ hw->frame_mode_pts_save[hw->first_pts_index - 1],
+ hw->frame_mode_timestamp_save[hw->first_pts_index - 1]);
vf->pts = hw->frame_mode_pts_save[hw->first_pts_index - 1];
vf->pts_us64 = hw->frame_mode_pts64_save[hw->first_pts_index - 1];
+ vf->timestamp = hw->frame_mode_timestamp_save[hw->first_pts_index - 1];
}
hw->last_pts = vf->pts;
hw->last_pts_us64 = vf->pts_us64;
+ hw->last_timestamp = vf->timestamp;
hw->av1_first_pts_ready = true;
av1_print(hw, AV1_DEBUG_OUT_PTS,
- "av1 output slice type %d, dur %d, pts %d, pts64 %lld\n",
- pic_config->slice_type, hw->frame_dur, vf->pts, vf->pts_us64);
+ "av1 output slice type %d, dur %d, pts %d, pts64 %lld, ts: %llu\n",
+ pic_config->slice_type, hw->frame_dur, vf->pts, vf->pts_us64, vf->timestamp);
fill_frame_info(hw, pic_config, frame_size, vf->pts);
{
struct AV1HW_s *hw = (struct AV1HW_s *)vdec->private;
struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
- struct vframe_s *vf = NULL;
- struct vdec_fb *fb = NULL;
+ struct vframe_s *vf = &hw->vframe_dummy;
+ struct vdec_v4l2_buffer *fb = NULL;
+ int index = INVALID_IDX;
+ ulong expires;
if (hw->is_used_v4l && hw->eos) {
- if (kfifo_get(&hw->newframe_q, &vf) == 0 || vf == NULL) {
- av1_print(hw, 0,
- "%s fatal error, no available buffer slot.\n",
- __func__);
- return -1;
+ expires = jiffies + msecs_to_jiffies(2000);
+ while (INVALID_IDX == (index = v4l_get_free_fb(hw))) {
+ if (time_after(jiffies, expires) ||
+ v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx))
+ break;
}
- if (v4l_get_fb(hw->v4l2_ctx, &fb)) {
- pr_err("[%d] get fb fail.\n", ctx->id);
- return -1;
+ if (index == INVALID_IDX) {
+ if (vdec_v4l_get_buffer(hw->v4l2_ctx, &fb) < 0) {
+ pr_err("[%d] EOS get free buff fail.\n", ctx->id);
+ return -1;
+ }
}
- vf->timestamp = ULONG_MAX;
- vf->v4l_mem_handle = (unsigned long)fb;
- vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L;
+ vf->type |= VIDTYPE_V4L_EOS;
+ vf->timestamp = ULONG_MAX;
+ vf->flag = VFRAME_FLAG_EMPTY_FRAME_V4L;
+ vf->v4l_mem_handle = (index == INVALID_IDX) ? (ulong)fb :
+ hw->m_BUF[index].v4l_ref_buf_addr;
kfifo_put(&hw->display_q, (const struct vframe_s *)vf);
vf_notify_receiver(vdec->vf_provider_name,
VFRAME_EVENT_PROVIDER_VFRAME_READY, NULL);
- av1_print(hw, AOM_DEBUG_HW_MORE, "[%d] AV1 EOS notify.\n", ctx->id);
+ av1_print(hw, PRINT_FLAG_V4L_DETAIL,
+ "[%d] AV1 EOS notify.\n", ctx->id);
}
return 0;
pr_info("leave %s\r\n", __func__);
}
+#ifdef CHANGE_REMOVED
+static int recycle_mmu_buf_tail(struct AV1HW_s *hw,
+ bool check_dma)
+{
+ struct AV1_Common_s *const cm = &hw->common;
+
+ hw->used_4k_num =
+ READ_VREG(HEVC_SAO_MMU_STATUS) >> 16;
+
+ av1_print(hw, 0, "pic index %d page_start %d\n",
+ cm->cur_fb_idx_mmu, hw->used_4k_num);
+
+ if (check_dma)
+ hevc_mmu_dma_check(hw_to_vdec(hw));
+
+ decoder_mmu_box_free_idx_tail(
+ hw->mmu_box,
+ hw->buffer_wrap[cm->cur_fb_idx_mmu],
+ hw->used_4k_num);
+
+ cm->cur_fb_idx_mmu = INVALID_IDX;
+ hw->used_4k_num = -1;
+
+ return 0;
+}
+#endif
+
+#ifdef CHANGE_REMOVED
+static void av1_recycle_mmu_buf_tail(struct AV1HW_s *hw)
+{
+ struct AV1_Common_s *const cm = &hw->common;
+ if (hw->double_write_mode & 0x10)
+ return;
+
+ if (cm->cur_fb_idx_mmu != INVALID_IDX) {
+ recycle_mmu_buf_tail(hw,
+ ((hw->used_4k_num == -1) &&
+ hw->m_ins_flag) ? 1 : 0);
+ }
+}
+#endif
+
+static void av1_recycle_mmu_buf(struct AV1HW_s *hw)
+{
+ struct AV1_Common_s *const cm = &hw->common;
+
+ if (hw->is_used_v4l)
+ return;
+
+ if (hw->double_write_mode & 0x10)
+ return;
+ if (cm->cur_fb_idx_mmu != INVALID_IDX) {
+ decoder_mmu_box_free_idx(hw->mmu_box,
+ hw->buffer_wrap[cm->cur_fb_idx_mmu]);
+
+ cm->cur_fb_idx_mmu = INVALID_IDX;
+ hw->used_4k_num = -1;
+ }
+}
static void dec_again_process(struct AV1HW_s *hw)
{
-#ifdef CHANGE_REMOVED
amhevc_stop();
hw->dec_result = DEC_RESULT_AGAIN;
if (hw->process_state ==
}
reset_process_time(hw);
vdec_schedule_work(&hw->work);
-#endif
}
static void read_film_grain_reg(struct AV1HW_s *hw)
{
- AV1_COMMON *cm = &hw->pbi->common;
+ AV1_COMMON *cm = &hw->common;
int i;
if (cm->cur_frame == NULL) {
av1_print(hw, AOM_DEBUG_HW_MORE, "%s, cur_frame not exist!!!\n", __func__);
static void config_film_grain_reg(struct AV1HW_s *hw, int film_grain_params_ref_idx)
{
- AV1_COMMON *cm = &hw->pbi->common;
+ AV1_COMMON *cm = &hw->common;
int i;
unsigned char found = 0;
RefCntBuffer *buf;
void config_next_ref_info_hw(struct AV1HW_s *hw)
{
int j;
- AV1_COMMON *const cm = &hw->pbi->common;
+ AV1_COMMON *const cm = &hw->common;
av1_set_next_ref_frame_map(hw->pbi);
if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SC2)
WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x11a0);
else
WRITE_VREG(HEVC_PARSER_MEM_WR_ADDR, 0x1000);
- for (j = 0; j < 12; j++) {
- unsigned int info =
- av1_get_next_used_ref_info(cm, j);
- WRITE_VREG(HEVC_PARSER_MEM_RW_DATA, info);
- av1_print(hw, AOM_DEBUG_HW_MORE,
- "config next ref info %d 0x%x\n", j, info);
+
+ for (j = 0; j < 12; j++) {
+ unsigned int info =
+ av1_get_next_used_ref_info(cm, j);
+
+ WRITE_VREG(HEVC_PARSER_MEM_RW_DATA, info);
+ av1_print(hw, AOM_DEBUG_HW_MORE,
+ "config next ref info %d 0x%x\n", j, info);
}
}
uint32_t refcanvas_array[2];
uint32_t orderhint_bits;
unsigned char is_inter;
- AV1_COMMON *cm = &hw->pbi->common;
+ AV1_COMMON *cm = &hw->common;
PIC_BUFFER_CONFIG *curr_pic_config;
int32_t curr_orderhint;
int cindex0 = LAST_FRAME;
WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2); // reset mcrcc
- is_inter = av1_frame_is_inter(&hw->pbi->common); //((pbi->common.frame_type != KEY_FRAME) && (!pbi->common.intra_only)) ? 1 : 0;
+ is_inter = av1_frame_is_inter(&hw->common); //((pbi->common.frame_type != KEY_FRAME) && (!pbi->common.intra_only)) ? 1 : 0;
if ( !is_inter ) { // I-PIC
//WRITE_VREG(HEVCD_MCRCC_CTL1, 0x1); // remove reset -- disables clock
WRITE_VREG(HEVCD_MCRCC_CTL2, 0xffffffff); // Replace with current-frame canvas
#if 1
//def CHANGE_DONE
AV1Decoder *pbi = hw->pbi;
- AV1_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = pbi->common;
+ struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
int i;
av1_print(hw, AOM_DEBUG_HW_MORE,
hw->has_keyframe = 1;
on_no_keyframe_skiped = 0;
+ if (hw->is_used_v4l && ctx->param_sets_from_ucode)
+ hw->res_ch_flag = 0;
+
//pre_decode_idx = pbi->decode_idx;
if (pbi->bufmgr_proc_count == 0 ||
hw->one_compressed_data_done) {
__func__, hw->chunk->pts, hw->chunk->pts64);
cur_pic_config->pts = hw->chunk->pts;
cur_pic_config->pts64 = hw->chunk->pts64;
+ cur_pic_config->timestamp = hw->chunk->timestamp;
hw->chunk->pts = 0;
- hw->chunk->pts64 = 0;
+ hw->chunk->pts64 = 0;
+ hw->chunk->timestamp = 0;
}
#ifdef DUAL_DECODE
#else
} else {
ret = 0;
}
- if (av1_frame_is_inter(&hw->pbi->common)) {
+ if (av1_frame_is_inter(&hw->common)) {
//if ((pbi->common.frame_type != KEY_FRAME) && (!pbi->common.intra_only)) {
#ifdef DUAL_DECODE
#else
if ((frame_bufs[i].ref_count == 0) &&
(frame_bufs[i].buf.vf_ref == 0) &&
(frame_bufs[i].buf.index != -1)) {
- decoder_mmu_box_free_idx(hw->mmu_box, i);
+ if (pbi->is_used_v4l) {
+ struct internal_comp_buf *ibuf =
+ index_to_icomp_buf(pbi, i);
+
+ decoder_mmu_box_free_idx(ibuf->mmu_box, i);
+ } else {
+ decoder_mmu_box_free_idx(pbi->mmu_box, i);
+ }
}
hw->last_put_idx = -1;
}
params->p.enable_superres = (params->p.seq_flags >> 15) & 0x1;
if (debug & AV1_DEBUG_BUFMGR_MORE) {
- lock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ lock_buffer_pool(hw->common.buffer_pool, flags);
pr_info("aom_param: (%d)\n", hw->pbi->decode_idx);
//pbi->slice_idx++;
for ( i = 0; i < (RPM_END-RPM_BEGIN); i++) {
if (((i + 1) & 0xf) == 0)
pr_info("\n");
}
- unlock_buffer_pool(hw->pbi->common.buffer_pool, flags);
+ unlock_buffer_pool(hw->common.buffer_pool, flags);
}
return head_type;
}
return av1_bufmgr_postproc(hw->pbi, hw->frame_decoded);
}
+static int vav1_get_ps_info(struct AV1HW_s *hw, struct aml_vdec_ps_infos *ps)
+{
+ int dw_mode = v4l_parser_get_double_write_mode(hw);
+
+ ps->visible_width = hw->frame_width / get_double_write_ratio(hw, dw_mode);
+ ps->visible_height = hw->frame_height / get_double_write_ratio(hw, dw_mode);
+ ps->coded_width = ALIGN(hw->frame_width, 32) / get_double_write_ratio(hw, dw_mode);
+ ps->coded_height = ALIGN(hw->frame_height, 32) / get_double_write_ratio(hw, dw_mode);
+ ps->dpb_size = hw->used_buf_num;
+
+ return 0;
+}
+
+
+static int v4l_res_change(struct AV1HW_s *hw)
+{
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+ struct AV1_Common_s *const cm = &hw->common;
+ int ret = 0;
+
+ if (ctx->param_sets_from_ucode &&
+ hw->res_ch_flag == 0) {
+ struct aml_vdec_ps_infos ps;
+
+ if ((cm->width != 0 &&
+ cm->height != 0) &&
+ (hw->frame_width != cm->width ||
+ hw->frame_height != cm->height)) {
+
+ av1_print(hw, 0,
+ "%s (%d,%d)=>(%d,%d)\r\n", __func__, cm->width,
+ cm->height, hw->frame_width, hw->frame_height);
+
+ vav1_get_ps_info(hw, &ps);
+ vdec_v4l_set_ps_infos(ctx, &ps);
+ vdec_v4l_res_ch_event(ctx);
+ hw->v4l_params_parsed = false;
+ hw->res_ch_flag = 1;
+ hw->eos = 1;
+ //del_timer_sync(&pbi->timer);
+ notify_v4l_eos(hw_to_vdec(hw));
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
static irqreturn_t vav1_isr_thread_fn(int irq, void *data)
{
struct AV1HW_s *hw = (struct AV1HW_s *)data;
unsigned int dec_status = hw->dec_status;
int obu_type;
- int ret;
+ int ret = 0;
/*if (hw->wait_buf)
* pr_info("set wait_buf to 0\r\n");
hw->process_busy = 0;
return IRQ_HANDLED;
} else if (dec_status == AOM_AV1_DEC_PIC_END) {
- struct AV1_Common_s *const cm = &hw->pbi->common;
+ struct AV1_Common_s *const cm = &hw->common;
#if 1
u32 fg_reg0, fg_reg1, num_y_points, num_cb_points, num_cr_points;
WRITE_VREG(HEVC_FGS_IDX, 0);
av1_print(hw, AOM_DEBUG_HW_MORE, "mmu free tail, index %d used_num 0x%x\n",
cm->cur_frame->buf.index, used_4k_num);
decoder_mmu_box_free_idx_tail(hw->mmu_box,
- cm->cur_frame->buf.index, used_4k_num);
+ hw->buffer_wrap[cm->cur_frame->buf.index], used_4k_num);
#ifdef AOM_AV1_MMU_DW
if (hw->dw_mmu_enable) {
used_4k_num =
(READ_VREG(HEVC_SAO_MMU_STATUS2) >> 16);
decoder_mmu_box_free_idx_tail(hw->mmu_box_dw,
- cm->cur_frame->buf.index, used_4k_num);
+ hw->buffer_wrap[cm->cur_frame->buf.index], used_4k_num);
av1_print(hw, AOM_DEBUG_HW_MORE, "dw mmu free tail, index %d used_num 0x%x\n",
cm->cur_frame->buf.index, used_4k_num);
}
if (hw->m_ins_flag)
reset_process_time(hw);
-
if (hw->process_state != PROC_STATE_SENDAGAIN
) {
if (hw->one_compressed_data_done) {
}
}
- ret = av1_continue_decoding(hw, obu_type);
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ hw->frame_width = hw->common.seq_params.max_frame_width;
+ hw->frame_height = hw->common.seq_params.max_frame_height;
+
+ if (!v4l_res_change(hw)) {
+ if (ctx->param_sets_from_ucode && !hw->v4l_params_parsed) {
+ struct aml_vdec_ps_infos ps;
+
+ pr_info("set ucode parse\n");
+ vav1_get_ps_info(hw, &ps);
+ /*notice the v4l2 codec.*/
+ vdec_v4l_set_ps_infos(ctx, &ps);
+ hw->v4l_params_parsed = true;
+ hw->postproc_done = 0;
+ hw->process_busy = 0;
+ dec_again_process(hw);
+ return IRQ_HANDLED;
+ }
+ } else {
+ hw->postproc_done = 0;
+ hw->process_busy = 0;
+ dec_again_process(hw);
+ return IRQ_HANDLED;
+ }
+ }
+
+ av1_continue_decoding(hw, obu_type);
hw->postproc_done = 0;
hw->process_busy = 0;
int i;
unsigned int dec_status;
struct AV1HW_s *hw = (struct AV1HW_s *)data;
- //struct AV1_Common_s *const cm = &hw->pbi->common;
+ //struct AV1_Common_s *const cm = &hw->common;
uint debug_tag;
WRITE_VREG(HEVC_ASSIST_MBOX0_CLR_REG, 1);
if (hw->m_ins_flag) {
if (hw_to_vdec(hw)->next_status
== VDEC_STATUS_DISCONNECTED) {
- hw->dec_result = DEC_RESULT_FORCE_EXIT;
- vdec_schedule_work(&hw->work);
- pr_debug(
- "vdec requested to be disconnected\n");
- return;
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+ if (!hw->is_used_v4l || ctx->is_stream_off) {
+ hw->dec_result = DEC_RESULT_FORCE_EXIT;
+ vdec_schedule_work(&hw->work);
+ pr_debug("vdec requested to be disconnected\n");
+ return;
+ }
}
}
if (hw->init_flag == 0) {
INIT_KFIFO(hw->display_q);
INIT_KFIFO(hw->newframe_q);
+ for (i = 0; i < FRAME_BUFFERS; i++) {
+ hw->buffer_wrap[i] = i;
+ }
for (i = 0; i < VF_POOL_SIZE; i++) {
const struct vframe_s *vf = &hw->vfpool[i];
pr_info("set pts unstable\n");
}
}
+
return ret;
}
}
hw->need_cache_size = buf_size * SZ_1M;
hw->sc_start_time = get_jiffies_64();
- if (hw->mmu_enable && ((hw->double_write_mode & 0x10) == 0)) {
+ if (hw->mmu_enable) {
int count = FRAME_BUFFERS;
hw->mmu_box = decoder_mmu_box_alloc_box(DRIVER_NAME,
hw->index /* * 2*/, count,
return -ENOMEM;
}
- pbi = av1_decoder_create(&hw->av1_buffer_pool); //&aom_decoder;
+ pbi = av1_decoder_create(&hw->av1_buffer_pool, &hw->common); //&aom_decoder;
hw->pbi = pbi;
if (hw->pbi == NULL) {
pr_info("\nammvdec_av1 device data allocation failed\n");
vfree(hw);
return -ENOMEM;
}
- //hw->pbi->common.buffer_pool = &hw->av1_buffer_pool; //????
+ //hw->common.buffer_pool = &hw->av1_buffer_pool; //????
hw->pbi->private_data = hw;
hw->init_flag = 0;
if (vdec->parallel_dec == 1) {
for (i = 0; i < FRAME_BUFFERS; i++) {
- vdec->free_canvas_ex(hw->pbi->common.buffer_pool->
+ vdec->free_canvas_ex(hw->common.buffer_pool->
frame_bufs[i].buf.y_canvas_index, vdec->id);
- vdec->free_canvas_ex(hw->pbi->common.buffer_pool->
+ vdec->free_canvas_ex(hw->common.buffer_pool->
frame_bufs[i].buf.uv_canvas_index, vdec->id);
}
}
static struct codec_profile_t amvdec_av1_profile_mult;
-static unsigned char get_data_check_sum
+static unsigned int get_data_check_sum
(struct AV1HW_s *hw, int size)
{
- int jj;
int sum = 0;
u8 *data = NULL;
data = ((u8 *)hw->chunk->block->start_virt) +
hw->chunk->offset;
- for (jj = 0; jj < size; jj++)
- sum += data[jj];
+ sum = crc32_le(0, data, size);
if (!hw->chunk->block->is_mapped)
codec_mm_unmap_phyaddr(data);
if (!hw->pic_list_init_done2 || hw->eos)
return ret;
+
if (!hw->first_sc_checked && hw->mmu_enable) {
int size = decoder_mmu_box_sc_check(hw->mmu_box, tvp);
+
hw->first_sc_checked = 1;
av1_print(hw, 0, "av1 cached=%d need_size=%d speed= %d ms\n",
size, (hw->need_cache_size >> PAGE_SHIFT),
ret = CORE_MASK_HEVC;
else
ret = CORE_MASK_VDEC_1 | CORE_MASK_HEVC;
+ }
+
+ if (hw->is_used_v4l) {
+ struct aml_vcodec_ctx *ctx =
+ (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
+
+ if (ctx->param_sets_from_ucode) {
+ if (hw->v4l_params_parsed) {
+ if ((ctx->cap_pool.in < hw->used_buf_num) &&
+ v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) <
+ run_ready_min_buf_num)
+ ret = 0;
+ } else {
+ if ((hw->res_ch_flag == 1) &&
+ ((ctx->state <= AML_STATE_INIT) ||
+ (ctx->state >= AML_STATE_FLUSHING)))
+ ret = 0;
+ }
+ } else if (ctx->cap_pool.in < ctx->dpb_size) {
+ if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) <
+ run_ready_min_buf_num)
+ ret = 0;
}
+ }
+
if (ret)
not_run_ready[hw->index] = 0;
else
static void av1_frame_mode_pts_save(struct AV1HW_s *hw)
{
- unsigned int i, valid_pts_diff_cnt, pts_diff_sum;
- unsigned int in_pts_diff, last_valid_pts_diff, calc_dur;
+ u64 i, valid_pts_diff_cnt, pts_diff_sum;
+ u64 in_pts_diff, last_valid_pts_diff, calc_dur;
if ((hw->chunk == NULL) ||
(hw->frame_count && (hw->chunk->pts == 0)) ||
(hw->frame_mode_pts_save[0] == hw->chunk->pts))
return;
av1_print(hw, AV1_DEBUG_OUT_PTS,
- "run_front: pts %d, pts64 %lld\n", hw->chunk->pts, hw->chunk->pts64);
+ "run_front: pts %d, pts64 %lld, ts: %llu\n",
+ hw->chunk->pts, hw->chunk->pts64, hw->chunk->timestamp);
for (i = (FRAME_BUFFERS - 1); i > 0; i--) {
hw->frame_mode_pts_save[i] = hw->frame_mode_pts_save[i - 1];
hw->frame_mode_pts64_save[i] = hw->frame_mode_pts64_save[i - 1];
+ hw->frame_mode_timestamp_save[i] = hw->frame_mode_timestamp_save[i - 1];
}
hw->frame_mode_pts_save[0] = hw->chunk->pts;
hw->frame_mode_pts64_save[0] = hw->chunk->pts64;
+ hw->frame_mode_timestamp_save[0] = hw->chunk->timestamp;
if (hw->first_pts_index < ARRAY_SIZE(hw->frame_mode_pts_save))
hw->first_pts_index++;
/* frame duration check, vdec_secure return for nts problem */
return;
valid_pts_diff_cnt = 0;
pts_diff_sum = 0;
+
for (i = 0; i < FRAME_BUFFERS - 1; i++) {
- if ((hw->frame_mode_pts_save[i] > hw->frame_mode_pts_save[i + 1]) &&
- (hw->frame_mode_pts_save[i + 1] != 0))
- in_pts_diff = hw->frame_mode_pts_save[i]
- - hw->frame_mode_pts_save[i + 1];
- else
- in_pts_diff = 0;
+ if (hw->is_used_v4l) {
+ if ((hw->frame_mode_timestamp_save[i] > hw->frame_mode_timestamp_save[i + 1]) &&
+ (hw->frame_mode_timestamp_save[i + 1] != 0))
+ in_pts_diff = hw->frame_mode_timestamp_save[i]
+ - hw->frame_mode_timestamp_save[i + 1];
+ else
+ in_pts_diff = 0;
+ } else {
+ if ((hw->frame_mode_pts_save[i] > hw->frame_mode_pts_save[i + 1]) &&
+ (hw->frame_mode_pts_save[i + 1] != 0))
+ in_pts_diff = hw->frame_mode_pts_save[i]
+ - hw->frame_mode_pts_save[i + 1];
+ else
+ in_pts_diff = 0;
+ }
if (in_pts_diff < 100 ||
(valid_pts_diff_cnt && (!close_to(in_pts_diff, last_valid_pts_diff, 100))))
pts_diff_sum += in_pts_diff;
}
+
if (!valid_pts_diff_cnt) {
av1_print(hw, 0, "checked no avaliable pts\n");
return;
}
+
calc_dur = PTS2DUR(pts_diff_sum / valid_pts_diff_cnt);
if ((!close_to(calc_dur, hw->frame_dur, 10)) &&
(calc_dur < 9601) && (calc_dur > 800)) {
if (hw->frame_count > FRAME_BUFFERS)
hw->get_frame_dur = true;
}
+
+ if (hw->is_used_v4l) {
+ hw->timestamp_duration =
+ pts_diff_sum / valid_pts_diff_cnt;
+ }
}
static void run_front(struct vdec_s *vdec)
av1_frame_mode_pts_save(hw);
if (debug & PRINT_FLAG_VDEC_STATUS) {
- /*int ii;*/
- av1_print(hw, 0,
- "%s (%d): size 0x%x (0x%x 0x%x) sum 0x%x (%x %x %x %x %x) bytes 0x%x\n",
- __func__,
- hw->frame_count, size,
- hw->chunk ? hw->chunk->size : 0,
- hw->chunk ? hw->chunk->offset : 0,
- hw->chunk ? ((vdec_frame_based(vdec) &&
- (debug & PRINT_FLAG_VDEC_STATUS)) ?
- get_data_check_sum(hw, size) : 0) : 0,
- READ_VREG(HEVC_STREAM_START_ADDR),
- READ_VREG(HEVC_STREAM_END_ADDR),
- READ_VREG(HEVC_STREAM_LEVEL),
- READ_VREG(HEVC_STREAM_WR_PTR),
- READ_VREG(HEVC_STREAM_RD_PTR),
- hw->start_shift_bytes);
-#if 0
if (vdec_frame_based(vdec) && hw->chunk) {
u8 *data = NULL;
if (!hw->chunk->block->is_mapped)
data = codec_mm_vmap(hw->chunk->block->start +
- hw->chunk->offset, 8);
+ hw->chunk->offset, size);
else
data = ((u8 *)hw->chunk->block->start_virt) +
hw->chunk->offset;
- av1_print_cont(hw, 0, "data adr %p:",
- data);
- for (ii = 0; ii < 8; ii++)
- av1_print_cont(hw, 0, "%02x ",
- data[ii]);
+ //print_hex_debug(data, size, size > 64 ? 64 : size);
+ av1_print(hw, 0,
+ "%s: size 0x%x sum 0x%x %02x %02x %02x %02x %02x %02x .. %02x %02x %02x %02x\n",
+ __func__, size, get_data_check_sum(hw, size),
+ data[0], data[1], data[2], data[3],
+ data[4], data[5], data[size - 4],
+ data[size - 3], data[size - 2],
+ data[size - 1]);
+ av1_print(hw, 0,
+ "%s frm cnt (%d): chunk (0x%x 0x%x) (%x %x %x %x %x) bytes 0x%x\n",
+ __func__, hw->frame_count, hw->chunk->size, hw->chunk->offset,
+ READ_VREG(HEVC_STREAM_START_ADDR),
+ READ_VREG(HEVC_STREAM_END_ADDR),
+ READ_VREG(HEVC_STREAM_LEVEL),
+ READ_VREG(HEVC_STREAM_WR_PTR),
+ READ_VREG(HEVC_STREAM_RD_PTR),
+ hw->start_shift_bytes);
if (!hw->chunk->block->is_mapped)
codec_mm_unmap_phyaddr(data);
+ } else {
+ av1_print(hw, 0,
+ "%s (%d): size 0x%x (0x%x 0x%x) (%x %x %x %x %x) bytes 0x%x\n",
+ __func__,
+ hw->frame_count, size,
+ hw->chunk ? hw->chunk->size : 0,
+ hw->chunk ? hw->chunk->offset : 0,
+ READ_VREG(HEVC_STREAM_START_ADDR),
+ READ_VREG(HEVC_STREAM_END_ADDR),
+ READ_VREG(HEVC_STREAM_LEVEL),
+ READ_VREG(HEVC_STREAM_WR_PTR),
+ READ_VREG(HEVC_STREAM_RD_PTR),
+ hw->start_shift_bytes);
}
- av1_print_cont(hw, 0, "\r\n");
-#endif
}
if (vdec->mc_loaded) {
/*firmware have load before,
run_front(vdec);
}
-static void reset(struct vdec_s *vdec)
+static void av1_decode_ctx_reset(struct AV1HW_s *hw)
{
+ struct AV1_Common_s *const cm = &hw->common;
+ struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
+ int i;
+
+ for (i = 0; i < FRAME_BUFFERS; ++i) {
+ frame_bufs[i].ref_count = 0;
+ frame_bufs[i].buf.vf_ref = 0;
+ frame_bufs[i].buf.decode_idx = 0;
+ frame_bufs[i].buf.cma_alloc_addr = 0;
+ frame_bufs[i].buf.index = i;
+ frame_bufs[i].buf.BUF_index = -1;
+ frame_bufs[i].buf.mv_buf_index = -1;
+ }
+ for (i = 0; i < MV_BUFFER_NUM; i++) {
+ if (hw->m_mv_BUF[i].start_adr) {
+ hw->m_mv_BUF[i].used_flag = 0;
+ }
+ }
+
+ hw->one_compressed_data_done = 0;
+ hw->config_next_ref_info_flag = 0;
+ hw->init_flag = 0;
+ hw->first_sc_checked = 0;
+ hw->fatal_error = 0;
+ hw->show_frame_num = 0;
+ hw->postproc_done = 0;
+ hw->process_busy = 0;
+ hw->process_state = 0;
+ hw->frame_decoded = 0;
+ hw->eos = 0;
+}
+
+static void reset(struct vdec_s *vdec)
+{
struct AV1HW_s *hw =
(struct AV1HW_s *)vdec->private;
- av1_print(hw,
- PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__);
+ cancel_work_sync(&hw->work);
+ cancel_work_sync(&hw->set_clk_work);
+
+ if (hw->stat & STAT_VDEC_RUN) {
+ amhevc_stop();
+ hw->stat &= ~STAT_VDEC_RUN;
+ }
+
+ if (hw->stat & STAT_TIMER_ARM) {
+ del_timer_sync(&hw->timer);
+ hw->stat &= ~STAT_TIMER_ARM;
+ }
+
+ reset_process_time(hw);
+
+ av1_bufmgr_ctx_reset(hw->pbi, &hw->av1_buffer_pool, &hw->common);
+ hw->pbi->private_data = hw;
+
+ av1_local_uninit(hw);
+ if (vav1_local_init(hw) < 0)
+ av1_print(hw, 0, "%s local_init failed \r\n", __func__);
+ av1_decode_ctx_reset(hw);
+
+ av1_print(hw, PRINT_FLAG_VDEC_DETAIL, "%s\r\n", __func__);
}
static irqreturn_t av1_irq_cb(struct vdec_s *vdec, int irq)
{
struct AV1HW_s *hw =
(struct AV1HW_s *)vdec->private;
- struct AV1_Common_s *const cm = &hw->pbi->common;
+ struct AV1_Common_s *const cm = &hw->common;
int i;
av1_print(hw, 0, "====== %s\n", __func__);
return -ENOMEM;
}
- hw->pbi = av1_decoder_create(&hw->av1_buffer_pool); //&aom_decoder;
+ hw->pbi = av1_decoder_create(&hw->av1_buffer_pool, &hw->common); //&aom_decoder;
if (hw->pbi == NULL) {
av1_print(hw, 0, "\nammvdec_av1 device data allocation failed\n");
release_dblk_struct(hw);
vfree(hw);
return -ENOMEM;
}
+
hw->pbi->private_data = hw;
/* the ctx from v4l2 driver. */
hw->v4l2_ctx = pdata->private;
hw->max_pic_h = av1_buf_height;
av1_print(hw, 0, "use buf resolution\n");
}
+
+ if (get_config_int(pdata->config,
+ "parm_v4l_codec_enable",
+ &config_val) == 0)
+ hw->is_used_v4l = config_val;
+
+ if (get_config_int(pdata->config,
+ "parm_v4l_buffer_margin",
+ &config_val) == 0)
+ hw->dynamic_buf_num_margin = config_val;
+
+ if (get_config_int(pdata->config,
+ "parm_v4l_canvas_mem_mode",
+ &config_val) == 0)
+ hw->mem_map_mode = config_val;
#endif
if (get_config_int(pdata->config, "HDRStaticInfo",
&vf_dp.present_flag) == 0
hw->max_pic_h = 4608;
hw->double_write_mode = double_write_mode;
}
+
+ if (!hw->is_used_v4l) {
+ hw->mem_map_mode = mem_map_mode;
+ }
+
if (is_oversize(hw->max_pic_w, hw->max_pic_h)) {
pr_err("over size: %dx%d, probe failed\n",
hw->max_pic_w, hw->max_pic_h);
return -1;
}
- hw->mmu_enable = 1;
+ if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_GXL ||
+ hw->double_write_mode == 0x10)
+ hw->mmu_enable = 0;
+ else
+ hw->mmu_enable = 1;
+
video_signal_type = hw->video_signal_type;
- hw->is_used_v4l = (((unsigned long)
- hw->vav1_amstream_dec_info.param & 0x80) >> 7);
- if (hw->is_used_v4l) {
- hw->double_write_mode = 0x10;
- hw->mmu_enable = 0;
- hw->max_pic_w = 1920;
- hw->max_pic_h = 1080;
+ if (pdata->sys_info) {
+ hw->vav1_amstream_dec_info = *pdata->sys_info;
+ if ((unsigned long) hw->vav1_amstream_dec_info.param
+ & 0x08) {
+ hw->low_latency_flag = 1;
+ } else
+ hw->low_latency_flag = 0;
+ } else {
+ hw->vav1_amstream_dec_info.width = 0;
+ hw->vav1_amstream_dec_info.height = 0;
+ hw->vav1_amstream_dec_info.rate = 30;
}
+
#ifdef AOM_AV1_MMU_DW
hw->dw_mmu_enable =
get_double_write_mode_init(hw) & 0x20 ? 1 : 0;
if (vdec->parallel_dec == 1) {
for (i = 0; i < FRAME_BUFFERS; i++) {
vdec->free_canvas_ex
- (hw->pbi->common.buffer_pool->frame_bufs[i].buf.y_canvas_index,
+ (hw->common.buffer_pool->frame_bufs[i].buf.y_canvas_index,
vdec->id);
vdec->free_canvas_ex
- (hw->pbi->common.buffer_pool->frame_bufs[i].buf.uv_canvas_index,
+ (hw->common.buffer_pool->frame_bufs[i].buf.uv_canvas_index,
vdec->id);
}
}
-
#ifdef DEBUG_PTS
pr_info("pts missed %ld, pts hit %ld, duration %d\n",
hw->pts_missed, hw->pts_hit, hw->frame_dur);