static bool mc_mem_alloc;
+bool dim_get_mcmem_alloc(void)
+{
+ return mc_mem_alloc;
+}
static unsigned int di_pre_rdma_enable;
/**************************************
}
/*--------------------------*/
-u8 *dim_vmap(ulong addr, u32 size, bool *bflg)
-{
- u8 *vaddr = NULL;
- ulong phys = addr;
- u32 offset = phys & ~PAGE_MASK;
- u32 npages = PAGE_ALIGN(size) / PAGE_SIZE;
- struct page **pages = NULL;
- pgprot_t pgprot;
- int i;
-
- if (!PageHighMem(phys_to_page(phys)))
- return phys_to_virt(phys);
-
- if (offset)
- npages++;
-
- pages = vmalloc(sizeof(struct page *) * npages);
- if (!pages)
- return NULL;
-
- for (i = 0; i < npages; i++) {
- pages[i] = phys_to_page(phys);
- phys += PAGE_SIZE;
- }
-
- /*nocache*/
- pgprot = pgprot_writecombine(PAGE_KERNEL);
-
- vaddr = vmap(pages, npages, VM_MAP, pgprot);
- if (!vaddr) {
- PR_ERR("the phy(%lx) vmaped fail, size: %d\n",
- addr - offset, npages << PAGE_SHIFT);
- vfree(pages);
- return NULL;
- }
-
- vfree(pages);
-#if 0
- if (debug_mode & 0x20) {
- dim_print("[HIGH-MEM-MAP] %s, pa(%lx) to va(%p), size: %d\n",
- __func__, addr, vaddr + offset,
- npages << PAGE_SHIFT);
- }
-#endif
- *bflg = true;
-
- return vaddr + offset;
-}
-
-void dim_unmap_phyaddr(u8 *vaddr)
-{
- void *addr = (void *)(PAGE_MASK & (ulong)vaddr);
- vunmap(addr);
-}
/*--------------------------*/
ssize_t
struct di_buf_s *pbuf_post;
struct di_buf_s *pbuf_local;
struct di_post_stru_s *ppost;
- struct di_mm_s *mm = dim_mm_get();/*mm-0705*/
+ struct di_mm_s *mm;
/*************************/
buf_orig = kstrdup(buf, GFP_KERNEL);
return 0;
}
di_pr_info("c_post:ch[%d],index[%d]\n", channel, indx);
+ mm = dim_mm_get(channel);
ppre = get_pre_stru(channel);
ppost = get_post_stru(channel);
}
#endif
-
-#ifdef CONFIG_CMA
-/**********************************************************
- * ./include/linux/amlogic/media/codec_mm/codec_mm.h:
- * unsigned long codec_mm_alloc_for_dma(const char *owner,
- * int page_cnt,
- * int align2n,
- * int memflags);
- * int codec_mm_free_for_dma(const char *owner,
- * unsigned long phy_addr);
- * void *codec_mm_phys_to_virt(unsigned long phy_addr);
- ***********************************************************/
-
-#define TVP_MEM_PAGES 0xffff
-/**********************************************************
- * alloc mm from codec mm
- * o: out:
- * return:
- * true: seccuss
- * false: failed
- ***********************************************************/
-static bool mm_codec_alloc(const char *owner, size_t count,
- int cma_mode,
- struct dim_mm_s *o)
-{
- int flags = 0;
- bool istvp = false;
-
- if (codec_mm_video_tvp_enabled()) {
- istvp = true;
- flags |= CODEC_MM_FLAGS_TVP;
- } else {
- flags |= CODEC_MM_FLAGS_RESERVED | CODEC_MM_FLAGS_CPU;
- }
-
- if (cma_mode == 4 && !istvp)
- flags = CODEC_MM_FLAGS_CMA_FIRST |
- CODEC_MM_FLAGS_CPU;
-
- o->addr = codec_mm_alloc_for_dma(owner,
- count,
- 0,
- flags);
-
- if (o->addr == 0) {
- /*failed*/
- PR_ERR("%s: failed\n", __func__);
- return false;
- }
-
- if (istvp)
- o->ppage = (struct page *)TVP_MEM_PAGES;
- else
- o->ppage = codec_mm_phys_to_virt(o->addr);
-
- /*PR_INF("%s:page:0x%p,add:0x%lx\n", __func__, o->ppage, o->addr);*/
- return true;
-}
-
-/**********************************************************
- * ./include/linux/dma-contiguous.h:
- * struct page *dma_alloc_from_contiguous(struct device *dev,
- * size_t count,
- * unsigned int order);
- * bool dma_release_from_contiguous(struct device *dev,
- * struct page *pages,
- * int count);
- *
- ***********************************************************/
-
-/**********************************************************
- * alloc mm by cma
- * o: out:
- * return:
- * true: seccuss
- * false: failed
- ***********************************************************/
-static bool mm_cma_alloc(struct device *dev, size_t count,
- struct dim_mm_s *o)
-{
- o->ppage = dma_alloc_from_contiguous(dev, count, 0);
- if (o->ppage) {
- o->addr = page_to_phys(o->ppage);
- return true;
- }
- PR_ERR("%s: failed\n", __func__);
- return false;
-}
-
-bool dim_mm_alloc(int cma_mode, size_t count, struct dim_mm_s *o)
-{
- struct di_dev_s *de_devp = get_dim_de_devp();
- bool ret;
-
- if (cma_mode == 3 || cma_mode == 4)
- ret = mm_codec_alloc(DEVICE_NAME,
- count,
- cma_mode,
- o);
- else
- ret = mm_cma_alloc(&de_devp->pdev->dev, count, o);
-
- return ret;
-}
-
-bool dim_mm_release(int cma_mode,
- struct page *pages,
- int count,
- unsigned long addr)
-{
- struct di_dev_s *de_devp = get_dim_de_devp();
- bool ret = true;
-
- if (cma_mode == 3 || cma_mode == 4)
- codec_mm_free_for_dma(DEVICE_NAME, addr);
- else
- ret = dma_release_from_contiguous(&de_devp->pdev->dev,
- pages,
- count);
- return ret;
-}
-
-/***********************************************************/
-unsigned int dim_cma_alloc_total(struct di_dev_s *de_devp)
-{
- /*struct di_dev_s *de_devp = get_dim_de_devp();*/
- /*****************************************************/
- struct dim_mm_s omm;
- bool ret;
-
- ret = dim_mm_alloc(de_devp->flag_cma,
- de_devp->mem_size >> PAGE_SHIFT, &omm);
-
- if (!ret) /*failed*/
- return 0;
-
- de_devp->total_pages = omm.ppage;
- de_devp->mem_start = omm.addr;
-
- if (de_devp->flag_cma != 0 && de_devp->nrds_enable) {
- dim_nr_ds_buf_init(de_devp->flag_cma, 0,
- &de_devp->pdev->dev);
- }
-
- return 1;
-}
-
-static unsigned int di_cma_alloc(struct di_dev_s *devp, unsigned int channel)
-{
- unsigned int start_time, end_time, delta_time;
- struct di_buf_s *buf_p = NULL;
- int itmp, alloc_cnt = 0;
- struct di_pre_stru_s *ppre = get_pre_stru(channel);
- struct di_dev_s *de_devp = get_dim_de_devp();
-
- unsigned int tmpa[MAX_FIFO_SIZE]; /*new que*/
- unsigned int psize; /*new que*/
- struct di_mm_s *mm = dim_mm_get();/*mm-0705*/
-
- bool aret;
- struct dim_mm_s omm;
-
- start_time = jiffies_to_msecs(jiffies);
- queue_for_each_entry(buf_p, channel, QUEUE_LOCAL_FREE, list) {
- if (buf_p->pages) {
- PR_ERR("1:%s:buf[%d] page:0x%p alloced skip\n",
- __func__, buf_p->index, buf_p->pages);
- continue;
- }
-
- aret = dim_mm_alloc(devp->flag_cma,
- devp->buffer_size >> PAGE_SHIFT,
- &omm);
-
- if (!aret) {
- buf_p->pages = NULL;
- PR_ERR("2:%s: alloc failed %d fail.\n",
- __func__, buf_p->index);
- return 0;
- }
-
- buf_p->pages = omm.ppage;
- buf_p->nr_adr = omm.addr;
- alloc_cnt++;
- mm->sts.num_local++;
- if (dimp_get(eDI_MP_cma_print))
- PR_INF("CMA allocate buf[%d]page:0x%p\n",
- buf_p->index, buf_p->pages);
-
- if (dimp_get(eDI_MP_cma_print))
- pr_info(" addr 0x%lx ok.\n", buf_p->nr_adr);
- if (ppre->buf_alloc_mode == 0) {
- buf_p->mtn_adr = buf_p->nr_adr +
- ppre->nr_size;
- buf_p->cnt_adr = buf_p->nr_adr +
- ppre->nr_size +
- ppre->mtn_size;
- if (mc_mem_alloc) {
- buf_p->mcvec_adr = buf_p->nr_adr +
- ppre->nr_size +
- ppre->mtn_size +
- ppre->count_size;
- buf_p->mcinfo_adr =
- buf_p->nr_adr +
- ppre->nr_size +
- ppre->mtn_size +
- ppre->count_size +
- ppre->mv_size;
- }
- }
- }
- PR_INF("%s:num_local[%d]:[%d]\n", __func__, mm->sts.num_local,
- alloc_cnt);
- alloc_cnt = 0;
- if (dimp_get(eDI_MP_post_wr_en) && dimp_get(eDI_MP_post_wr_support)) {
- di_que_list(channel, QUE_POST_FREE, &tmpa[0], &psize);
-
- for (itmp = 0; itmp < psize; itmp++) {
- buf_p = pw_qindex_2_buf(channel, tmpa[itmp]);
-
- if (buf_p->pages) {
- PR_ERR("3:%s:buf[%d] page:0x%p skip\n",
- __func__,
- buf_p->index, buf_p->pages);
- continue;
- }
-
- aret = dim_mm_alloc(devp->flag_cma,
- devp->post_buffer_size >> PAGE_SHIFT,
- &omm);
-
- if (!aret) {
- buf_p->pages = NULL;
- PR_ERR("4:%s: alloc failed %d fail.\n",
- __func__, buf_p->index);
- return 0;
- }
-
- buf_p->pages = omm.ppage;
- buf_p->nr_adr = omm.addr;
- mm->sts.num_post++;
- alloc_cnt++;
- if (dimp_get(eDI_MP_cma_print))
- PR_INF("%s:pbuf[%d]page:0x%p\n",
- __func__,
- buf_p->index, buf_p->pages);
- if (dimp_get(eDI_MP_cma_print))
- pr_info(" addr 0x%lx ok.\n", buf_p->nr_adr);
- }
- PR_INF("%s:num_pst[%d]:%d\n", __func__, mm->sts.num_post,
- alloc_cnt);
- }
- if (de_devp->flag_cma != 0 && de_devp->nrds_enable) {
- dim_nr_ds_buf_init(de_devp->flag_cma, 0,
- &de_devp->pdev->dev);
- }
-
- end_time = jiffies_to_msecs(jiffies);
- delta_time = end_time - start_time;
- pr_info("%s:alloc use %u ms(%u~%u)\n",
- __func__, delta_time, start_time, end_time);
- return 1;
-}
-
-static void di_cma_release(struct di_dev_s *devp, unsigned int channel)
-{
- unsigned int i, ii, rels_cnt = 0, start_time, end_time, delta_time;
- struct di_buf_s *buf_p;
- struct di_buf_s *pbuf_local = get_buf_local(channel);
- struct di_buf_s *pbuf_post = get_buf_post(channel);
-/* struct di_post_stru_s *ppost = get_post_stru(channel);*/
- struct di_dev_s *de_devp = get_dim_de_devp();
- bool ret;
- struct di_mm_s *mm = dim_mm_get();
-
- start_time = jiffies_to_msecs(jiffies);
- for (i = 0; (i < mm->cfg.num_local); i++) {
- buf_p = &pbuf_local[i];
- ii = USED_LOCAL_BUF_MAX;
-
- if ((ii >= USED_LOCAL_BUF_MAX) &&
- buf_p->pages) {
- ret = dim_mm_release(devp->flag_cma, buf_p->pages,
- devp->buffer_size >> PAGE_SHIFT,
- buf_p->nr_adr);
- if (ret) {
- buf_p->pages = NULL;
- mm->sts.num_local--;
- rels_cnt++;
- if (dimp_get(eDI_MP_cma_print))
- pr_info(
- "DI release buf[%d] ok.\n", i);
- } else {
- PR_ERR("%s:release buf[%d] fail.\n",
- __func__, i);
- }
- } else {
- if (!IS_ERR_OR_NULL(buf_p->pages) &&
- dimp_get(eDI_MP_cma_print)) {
- pr_info("DI buf[%d] page:0x%p no release.\n",
- buf_p->index, buf_p->pages);
- }
- }
- }
- if (dimp_get(eDI_MP_post_wr_en) && dimp_get(eDI_MP_post_wr_support)) {
- /*mm-0705 for (i = 0; i < ppost->di_post_num; i++) {*/
- for (i = 0; i < mm->cfg.num_post; i++) {
- buf_p = &pbuf_post[i];
-
- if (di_que_is_in_que(channel, QUE_POST_KEEP, buf_p))
- continue;
-
- if (!buf_p->pages) {
- PR_INF("2:%s:post buf[%d] is null\n",
- __func__, i);
- continue;
- }
-
- ret = dim_mm_release(devp->flag_cma,
- buf_p->pages,
- devp->post_buffer_size >> PAGE_SHIFT,
- buf_p->nr_adr);
- if (ret) {
- buf_p->pages = NULL;
- mm->sts.num_post--;
- rels_cnt++;
- if (dimp_get(eDI_MP_cma_print))
- pr_info(
- "DI release post buf[%d] ok.\n", i);
- } else {
- PR_ERR("%s:release post buf[%d] fail\n",
- __func__, i);
- }
- }
- }
- if (de_devp->nrds_enable) {
- dim_nr_ds_buf_uninit(de_devp->flag_cma,
- &de_devp->pdev->dev);
- }
- if (mm->sts.num_local < 0 || mm->sts.num_post < 0)
- PR_ERR("%s:mm:nub_local=%d,nub_post=%d\n",
- __func__,
- mm->sts.num_local,
- mm->sts.num_post);
- end_time = jiffies_to_msecs(jiffies);
- delta_time = end_time - start_time;
- pr_info("%s:release %u buffer use %u ms(%u~%u)\n",
- __func__, rels_cnt, delta_time, start_time, end_time);
-}
-#endif
-
-bool dim_cma_top_alloc(unsigned int ch)
-{
- struct di_dev_s *de_devp = get_dim_de_devp();
- bool ret = false;
-
-#ifdef CONFIG_CMA
-
- if (di_cma_alloc(de_devp, ch))
- ret = true;
-#endif
- return ret;
-}
-
-bool dim_cma_top_release(unsigned int ch)
-{
- struct di_dev_s *de_devp = get_dim_de_devp();
-
-#ifdef CONFIG_CMA
- di_cma_release(de_devp, ch);
-#endif
- return true;
-}
-
+//----begin
#ifdef DIM_HIS /*no use*/
/*******************************************
*
struct di_buf_s *keep_buf = ppost->keep_buf;
struct di_dev_s *de_devp = get_dim_de_devp();
/* struct di_buf_s *keep_buf_post = ppost->keep_buf_post;*/
- struct di_mm_s *mm = dim_mm_get(); /*mm-0705*/
+ struct di_mm_s *mm = dim_mm_get(channel); /*mm-0705*/
unsigned int mem_st_local;
if (prog_flag) {
ppre->prog_proc_type = 1;
- ppre->buf_alloc_mode = 1;
+ mm->cfg.buf_alloc_mode = 1;
di_buf_size = nr_width * canvas_height * 2;
di_buf_size = roundup(di_buf_size, PAGE_SIZE);
} else {
/*pr_info("canvas_height=%d\n", canvas_height);*/
ppre->prog_proc_type = 0;
- ppre->buf_alloc_mode = 0;
+ mm->cfg.buf_alloc_mode = 0;
/*nr_size(bits) = w * active_h * 8 * 2(yuv422)
* mtn(bits) = w * active_h * 4
* cont(bits) = w * active_h * 4 mv(bits) = w * active_h / 5*16
}
di_buf_size = roundup(di_buf_size, PAGE_SIZE);
}
- de_devp->buffer_size = di_buf_size;
- ppre->nr_size = nr_size;
- ppre->count_size = count_size;
- ppre->mtn_size = mtn_size;
- ppre->mv_size = mv_size;
- ppre->mcinfo_size = mc_size;
+ /*de_devp->buffer_size = di_buf_size;*/
+ mm->cfg.size_local = di_buf_size;
+
+ mm->cfg.nr_size = nr_size;
+ mm->cfg.count_size = count_size;
+ mm->cfg.mtn_size = mtn_size;
+ mm->cfg.mv_size = mv_size;
+ mm->cfg.mcinfo_size = mc_size;
+
dimp_set(eDI_MP_same_field_top_count, 0);
same_field_bot_count = 0;
dbg_init("size:\n");
- dbg_init("\t%-15s:0x%x\n", "nr_size", ppre->nr_size);
- dbg_init("\t%-15s:0x%x\n", "count", ppre->count_size);
- dbg_init("\t%-15s:0x%x\n", "mtn", ppre->mtn_size);
- dbg_init("\t%-15s:0x%x\n", "mv", ppre->mv_size);
- dbg_init("\t%-15s:0x%x\n", "mcinfo", ppre->mcinfo_size);
+ dbg_init("\t%-15s:0x%x\n", "nr_size", mm->cfg.nr_size);
+ dbg_init("\t%-15s:0x%x\n", "count", mm->cfg.count_size);
+ dbg_init("\t%-15s:0x%x\n", "mtn", mm->cfg.mtn_size);
+ dbg_init("\t%-15s:0x%x\n", "mv", mm->cfg.mv_size);
+ dbg_init("\t%-15s:0x%x\n", "mcinfo", mm->cfg.mcinfo_size);
/**********************************************/
/* que init */
di_buf_size * i + nr_size +
mtn_size + count_size
+ mv_size;
+ if (cfgeq(mem_flg, eDI_MEM_M_rev) ||
+ cfgeq(mem_flg, eDI_MEM_M_cma_all))
+ dim_mcinfo_v_alloc(di_buf,
+ mm->cfg.mcinfo_size);
}
di_buf->canvas_config_flag = 2;
}
}
}
- if (de_devp->flag_cma == 1 ||
- de_devp->flag_cma == 4 ||
- de_devp->flag_cma == 3) { /*trig cma alloc*/
+ if (cfgeq(mem_flg, eDI_MEM_M_cma) ||
+ cfgeq(mem_flg, eDI_MEM_M_codec_a) ||
+ cfgeq(mem_flg, eDI_MEM_M_codec_b)) { /*trig cma alloc*/
dip_wq_cma_run(channel, true);
}
/*mm-0705 ppost->di_post_num = MAX_POST_BUF_NUM;*/
di_post_buf_size = 0;
}
- de_devp->post_buffer_size = di_post_buf_size;
+ /*de_devp->post_buffer_size = di_post_buf_size;*/
+ mm->cfg.size_post = di_post_buf_size;
/**********************************************/
/* input buf init */
continue;
}
}
-
+ tmp_page = di_buf->pages;
memset(di_buf, 0, sizeof(struct di_buf_s));
+ di_buf->pages = tmp_page;
di_buf->type = VFRAME_TYPE_POST;
di_buf->index = i;
di_buf->vframe = &pvframe_post[i];
PR_ERR("%s:%d:post buf is null\n", __func__, i);
}
}
- if (de_devp->flag_cma == 0 && de_devp->nrds_enable) {
+ if (cfgeq(mem_flg, eDI_MEM_M_rev) && de_devp->nrds_enable) {
nrds_mem = di_post_mem + mm->cfg.num_post * di_post_buf_size;
/*mm-0705 ppost->di_post_num * di_post_buf_size;*/
- dim_nr_ds_buf_init(de_devp->flag_cma, nrds_mem,
+ dim_nr_ds_buf_init(cfgg(mem_flg), nrds_mem,
&de_devp->pdev->dev);
}
return 0;
PR_WARN("%s:ch[%d]:not post\n", __func__, di_buf->channel);
return;
}
- dbg_keep("release keep ch[%d],index[%d]\n",
- di_buf->channel,
- di_buf->index);
- task_send_cmd(LCMD2(ECMD_RL_KEEP, di_buf->channel, di_buf->index));
+
+ if (!di_que_is_in_que(di_buf->channel, QUE_POST_KEEP, di_buf)) {
+ PR_ERR("%s:not keep buf %d\n", __func__, di_buf->index);
+ } else {
+ dbg_keep("release keep ch[%d],index[%d]\n",
+ di_buf->channel,
+ di_buf->index);
+ task_send_cmd(LCMD2(ECMD_RL_KEEP, di_buf->channel,
+ di_buf->index));
+ }
}
EXPORT_SYMBOL(dim_post_keep_cmd_release2);
ppost->de_post_process_done = 0;
ppost->post_wr_cnt = 0;
}
- if (de_devp->flag_cma == 0 && de_devp->nrds_enable) {
- dim_nr_ds_buf_uninit(de_devp->flag_cma,
+ if (cfgeq(mem_flg, eDI_MEM_M_rev) && de_devp->nrds_enable) {
+ dim_nr_ds_buf_uninit(cfgg(mem_flg),
&de_devp->pdev->dev);
}
}
struct vframe_s **pvframe_in = get_vframe_in(channel);
struct di_pre_stru_s *ppre = get_pre_stru(channel);
struct di_post_stru_s *ppost = get_post_stru(channel);
- struct di_dev_s *de_devp = get_dim_de_devp();
+ /*struct di_dev_s *de_devp = get_dim_de_devp();*/
unsigned int tmpa[MAX_FIFO_SIZE]; /*new que*/
unsigned int psize; /*new que*/
- struct di_mm_s *mm = dim_mm_get(); /*mm-0705*/
+ struct di_mm_s *mm = dim_mm_get(channel); /*mm-0705*/
dump_state_flag = 1;
pr_info("version %s, init_flag %d, is_bypass %d\n",
recovery_log_queue_idx, recovery_log_di_buf);
pr_info("buffer_size=%d, mem_flag=%s, cma_flag=%d\n",
/*atomic_read(&de_devp->mem_flag)*/
- de_devp->buffer_size, di_cma_dbg_get_st_name(channel),
- de_devp->flag_cma);
+ mm->cfg.size_local, di_cma_dbg_get_st_name(channel),
+ cfgg(mem_flg));
keep_buf = ppost->keep_buf;
pr_info("used_post_buf_index %d(0x%p),",
IS_ERR_OR_NULL(keep_buf) ?
}
pr_info("post_doing_list:\n");
- queue_for_each_entry(p, channel, QUEUE_POST_DOING, list) {
+ //queue_for_each_entry(p, channel, QUEUE_POST_DOING, list) {
+ di_que_list(channel, QUE_POST_DOING, &tmpa[0], &psize);
+ for (itmp = 0; itmp < psize; itmp++) {
+ p = pw_qindex_2_buf(channel, tmpa[itmp]);
dim_print_di_buf(p, 2);
}
pr_info("pre_ready_list:\n");
is_meson_txhd_cpu())
dimh_mc_pre_mv_irq();
dimh_calc_lmv_base_mcinfo((ppre->cur_height >> 1),
- ppre->di_wr_buf->mcinfo_adr,
- ppre->mcinfo_size);
+ ppre->di_wr_buf->mcinfo_adr_v,
+ /*ppre->mcinfo_size*/0);
}
get_ops_nr()->nr_process_in_irq();
if ((data32 & 0x200) && de_devp->nrds_enable)
is_meson_tl1_cpu() ||
is_meson_tm2_cpu() ||
is_meson_sm1_cpu()) {
- if (di_cfg_top_get(eDI_CFG_ref_2) &&
+ if (di_cfg_top_get(EDI_CFG_ref_2) &&
mc_pre_flag &&
dimp_get(eDI_MP_post_wr_en)) { /*OTT-3210*/
dbg_once("mc_old=%d\n", mc_pre_flag);
struct di_post_stru_s *ppost = get_post_stru(channel);
struct di_dev_s *de_devp = get_dim_de_devp();
- if (!ppost->cur_post_buf)
+ if (!ppost->cur_post_buf) {
+ PR_ERR("%s:no cur\n", __func__);
return;
+ }
dbg_post_cnt(channel, "pd1");
/*dbg*/
dim_ddbg_mod_save(eDI_DBG_MOD_POST_DB, channel, ppost->frame_cnt);
+ di_buf = ppost->cur_post_buf;
di_lock_irqfiq_save(irq_flag2);
queue_out(channel, ppost->cur_post_buf);/*? which que?post free*/
- di_buf = ppost->cur_post_buf;
+
if (de_devp->pps_enable && dimp_get(eDI_MP_pps_position) == 0) {
di_buf->vframe->width = dimp_get(eDI_MP_pps_dstw);
if (dimp_get(eDI_MP_post_wr_en) &&
dimp_get(eDI_MP_post_wr_support))
- queue_in(channel, di_buf, QUEUE_POST_DOING);
+ //queue_in(channel, di_buf, QUEUE_POST_DOING);
+ di_que_in(channel, QUE_POST_DOING, di_buf);
else
di_que_in(channel, QUE_POST_READY, di_buf);
if (di_que_is_empty(channel, QUE_POST_FREE))
return 0;
/*add : for now post buf only 3.*/
- if (list_count(channel, QUEUE_POST_DOING) > 2)
+ //if (list_count(channel, QUEUE_POST_DOING) > 2)
+ if (di_que_list_count(channel, QUE_POST_DOING) > 2)
return 0;
#else
/*for post write mode ,need reserved a post free buf;*/
#endif
pr_info("%s:\n", __func__);
set_init_flag(channel, false); /*init_flag = 0;*/
+ dim_sumx_clear(channel);
/*mirror_disable = get_blackout_policy();*/
mirror_disable = 0;
di_lock_irqfiq_save(irq_flag2);
recovery_flag = 0;
ppre->cur_prog_flag = 0;
- if (de_devp->flag_cma == 1 ||
- de_devp->flag_cma == 3 ||
- de_devp->flag_cma == 4)
+ if (cfgeq(mem_flg, eDI_MEM_M_cma) ||
+ cfgeq(mem_flg, eDI_MEM_M_codec_a) ||
+ cfgeq(mem_flg, eDI_MEM_M_codec_b))
dip_wq_cma_run(channel, false);
sum_g_clear(channel);
dbg_reg("%s:end\n", __func__);
}
+#ifdef HIS_CODE
void dim_unreg_process_irq(unsigned int channel)
{
ulong irq_flag2 = 0;
#endif
}
+#endif
void diext_clk_b_sw(bool on)
{
dim_print("%s: vframe come => di_init_buf\n", __func__);
- if (de_devp->flag_cma == 0 && !de_devp->mem_flg)
+ if (cfgeq(mem_flg, eDI_MEM_M_rev) && !de_devp->mem_flg)
dim_rev_mem_check();
/*(is_progressive(vframe) && (prog_proc_config & 0x10)) {*/
__func__, vf);
return;
}
-#if 0
- if (ppost->keep_buf == di_buf) {
- pr_info("[DI]recycle buffer %d, get cnt %d.\n",
- di_buf->index, disp_frame_count);
- recycle_keep_buffer(channel);
- }
-#else
-
-#endif
-
- if (di_buf->type == VFRAME_TYPE_POST
- ) {
-#if 0
- dim_print("%s:put:%d\n", __func__, di_buf->index);
+ if (di_buf->type == VFRAME_TYPE_POST) {
di_lock_irqfiq_save(irq_flag2);
if (is_in_queue(channel, di_buf, QUEUE_DISPLAY)) {
- if (!atomic_dec_and_test(&di_buf->di_cnt))
- dim_print("%s,di_cnt > 0\n", __func__);
- dim_print("%s:put:%d\n", __func__, di_buf->index);
- recycle_vframe_type_post(di_buf, channel);
+ di_buf->queue_index = -1;
+ di_que_in(channel, QUE_POST_BACK, di_buf);
+ di_unlock_irqfiq_restore(irq_flag2);
} else {
- dim_print("%s: %s[%d] not in display list\n", __func__,
- vframe_type_name[di_buf->type],
- di_buf->index);
- }
di_unlock_irqfiq_restore(irq_flag2);
-#ifdef DI_BUFFER_DEBUG
- recycle_vframe_type_post_print(di_buf, __func__, __LINE__);
-#endif
-#else
- #ifdef DIM_HIS /*no use*/
- pw_queue_in(channel, QUE_POST_BACK, di_buf->index);
- #else
- di_buf->queue_index = -1;
- di_que_in(channel, QUE_POST_BACK, di_buf);
- #endif
-#endif
+ PR_ERR("%s:not in display %d\n",
+ __func__,
+ di_buf->index);
+ }
} else {
di_lock_irqfiq_save(irq_flag2);
queue_in(channel, di_buf, QUEUE_RECYCLE);
int di_vf_l_states(struct vframe_states *states, unsigned int channel)
{
- struct di_mm_s *mm = dim_mm_get();
+ struct di_mm_s *mm = dim_mm_get(channel);
+ struct dim_sum_s *psumx = get_sumx(channel);
/*pr_info("%s: ch[%d]\n", __func__, channel);*/
if (!states)
return -1;
states->vf_pool_size = mm->sts.num_local;
- states->buf_free_num = list_count(channel, QUEUE_LOCAL_FREE);
+ states->buf_free_num = psumx->b_pre_free;
- states->buf_avail_num = di_que_list_count(channel, QUE_POST_READY);
- states->buf_recycle_num = list_count(channel, QUEUE_RECYCLE);
+ states->buf_avail_num = psumx->b_pst_ready;
+ states->buf_recycle_num = psumx->b_recyc;
if (dimp_get(eDI_MP_di_dbg_mask) & 0x1) {
- di_pr_info("di-pre-ready-num:%d\n",
- /*new que list_count(channel, QUEUE_PRE_READY));*/
- di_que_list_count(channel, QUE_PRE_READY));
- di_pr_info("di-display-num:%d\n",
- list_count(channel, QUEUE_DISPLAY));
+ di_pr_info("di-pre-ready-num:%d\n", psumx->b_pre_ready);
+ di_pr_info("di-display-num:%d\n", psumx->b_display);
}
return 0;
}
unsigned long cnt_adr;
int cnt_canvas_idx;
unsigned long mcinfo_adr;
+ unsigned short *mcinfo_adr_v;/**/
+ bool mcinfo_alloc_flg; /**/
int mcinfo_canvas_idx;
unsigned long mcvec_adr;
int mcvec_canvas_idx;
/*new use this for put back control*/
#define QUEUE_POST_PUT_BACK (9)
-#define QUEUE_POST_KEEP (10)/*below use pw_queue_in*/
-#define QUEUE_POST_KEEP_BACK (11)
+#define QUEUE_POST_DOING2 (10)
+#define QUEUE_POST_KEEP (11)/*below use pw_queue_in*/
+#define QUEUE_POST_KEEP_BACK (12)
#define QUEUE_NUM 5 /* 9 */
#define QUEUE_NEW_THD_MIN (QUEUE_IN_FREE - 1)
unsigned int post_irq;
unsigned int flags;
unsigned long jiffy;
- unsigned long mem_start;
- unsigned int mem_size;
bool mem_flg; /*ary add for make sure mem is ok*/
- unsigned int buffer_size;
- unsigned int post_buffer_size;
unsigned int buf_num_avail;
int rdma_handle;
/* is support nr10bit */
unsigned int pps_enable;
unsigned int h_sc_down_en;/*sm1, tm2 ...*/
/*struct mutex cma_mutex;*/
- unsigned int flag_cma;
- struct page *total_pages;
struct dentry *dbg_root; /*dbg_fs*/
/***************************/
/*struct di_data_l_s data_l;*/
/* ary: loacal play p mode is 0
* local play i mode is 0
*/
-
- unsigned char buf_alloc_mode;
/* alloc di buf as p or i;0: alloc buf as i;
* 1: alloc buf as p;
*/
bool bypass_pre;
bool invert_flag;
bool vdin_source;
- int nr_size;
- int count_size;
- int mcinfo_size;
- int mv_size;
- int mtn_size;
-
int cma_release_req;
/* for performance debug */
unsigned long irq_time[2];
unsigned int size;
};
-struct dim_mm_s {
- struct page *ppage;
- unsigned long addr;
-};
-bool dim_mm_alloc(int cma_mode, size_t count, struct dim_mm_s *o);
-bool dim_mm_release(int cma_mode,
- struct page *pages,
- int count,
- unsigned long addr);
unsigned char dim_is_bypass(vframe_t *vf_in, unsigned int channel);
bool dim_bypass_first_frame(unsigned int ch);
void di_reg_setting(unsigned int channel, struct vframe_s *vframe);
void di_reg_variable(unsigned int channel, struct vframe_s *vframe);
-void dim_unreg_process_irq(unsigned int channel);
void di_unreg_variable(unsigned int channel);
void di_unreg_setting(void);
struct di_buf_s *di_bufp);
const char *dim_get_vfm_type_name(unsigned int nub);
-
-bool dim_cma_top_alloc(unsigned int ch);
-bool dim_cma_top_release(unsigned int ch);
+bool dim_get_mcmem_alloc(void);
int dim_get_reg_unreg_cnt(void);
void dim_reg_timeout_inc(void);
struct di_buf_s *p = NULL, *keep_buf;/* ptmp; */
struct di_pre_stru_s *di_pre_stru_p;
struct di_post_stru_s *di_post_stru_p;
- struct di_dev_s *de_devp = get_dim_de_devp();
const char *version_s = dim_get_version_s();
int dump_state_flag = dim_get_dump_state_flag();
unsigned char recovery_flag = dim_vcry_get_flg();
struct di_hpre_s *pre = get_hw_pre();
struct di_hpst_s *post = get_hw_pst();
char *splt = "---------------------------";
- struct di_mm_s *mm = dim_mm_get(); /*mm-0705*/
+ struct di_mm_s *mm = dim_mm_get(channel); /*mm-0705*/
di_pre_stru_p = get_pre_stru(channel);
di_post_stru_p = get_post_stru(channel);
seq_printf(seq, "recovery_log_q_idx=%d, recovery_log_di_buf=0x%p\n",
recovery_log_queue_idx, recovery_log_di_buf);
seq_printf(seq, "buffer_size=%d, mem_flag=%s, cma_flag=%d\n",
- de_devp->buffer_size,
+ mm->cfg.size_local,
di_cma_dbg_get_st_name(channel),
- de_devp->flag_cma);
+ cfgg(mem_flg));
keep_buf = di_post_stru_p->keep_buf;
seq_printf(seq, "used_post_buf_index %d(0x%p),",
IS_ERR_OR_NULL(keep_buf) ?
/********************************/
/* local_free_list */
/********************************/
- seq_printf(seq, "local_free_list (max %d):\n", mm->cfg.num_local);
+ itmp = list_count(channel, QUEUE_LOCAL_FREE);
+ seq_printf(seq, "local_free_list (max %d):(curr %d)\n",
+ mm->cfg.num_local, itmp);
queue_for_each_entry(p, channel, QUEUE_LOCAL_FREE, list) {
seq_printf(seq, "index %2d, 0x%p, type %d\n",
p->index, p, p->type);
/* post_doing_list */
/********************************/
seq_puts(seq, "post_doing_list:\n");
- queue_for_each_entry(p, channel, QUEUE_POST_DOING, list) {
+ //queue_for_each_entry(p, channel, QUEUE_POST_DOING, list) {
+ di_que_list(channel, QUE_POST_DOING, &tmpa[0], &psize);
+ for (itmp = 0; itmp < psize; itmp++) {
+ p = pw_qindex_2_buf(channel, tmpa[itmp]);
print_di_buf_seq(p, 2, seq);
}
seq_printf(seq, "%s\n", splt);
static struct mcinfo_lmv_s lines_mv[540];
-void dimh_calc_lmv_base_mcinfo(unsigned int vf_height, unsigned long mcinfo_adr,
+void dimh_calc_lmv_base_mcinfo(unsigned int vf_height,
+ unsigned short *mcinfo_adr_v,
unsigned int mcinfo_size)
{
unsigned short i, top_str, bot_str, top_end, bot_end, j = 0;
unsigned short flg_m1 = 0, flg_i = 0, nLmvLckSt = 0;
unsigned short lmv_lckstext[3] = {0, 0, 0}, nLmvLckEd;
unsigned short lmv_lckedext[3] = {0, 0, 0}, nLmvLckNum;
- bool bflg_vmap = false;
- u8 *tmp;
/*mcinfo_vadr = (unsigned short *)phys_to_virt(mcinfo_adr);*/
if (!dimp_get(eDI_MP_lmv_lock_win_en))
return;
- tmp = dim_vmap(mcinfo_adr, mcinfo_size, &bflg_vmap);
- if (!tmp) {
- dim_print("err:dim_vmap failed\n");
- return;
- }
- mcinfo_vadr = (unsigned short *)tmp;
+ mcinfo_vadr = mcinfo_adr_v;
for (i = 0; i < (vf_height >> 1); i++) {
lmvs_init(&lines_mv[i], *(mcinfo_vadr + i));
pr_info("\n");
}
}
- if (bflg_vmap)
- dim_unmap_phyaddr(tmp);
/*pr_mcinfo_cnt ? pr_mcinfo_cnt-- : (pr_mcinfo_cnt = 0);*/
dimp_get(eDI_MP_pr_mcinfo_cnt) ?
if (mcvecrd_mif->blend_en) {
dim_VSYNC_WR_MPEG_REG_BITS(MCDI_MC_CRTL,
dimp_get(eDI_MP_mcen_mode), 0, 2);
- if (!di_cfg_top_get(eDI_CFG_ref_2)) {
+ if (!di_cfg_top_get(EDI_CFG_ref_2)) {
/*(!dimp_get(eDI_MP_post_wr_en)) {*/
dim_VSYNC_WR_MPEG_REG_BITS(MCDI_MC_CRTL, 1, 11, 1);
dim_VSYNC_WR_MPEG_REG_BITS(MCDI_MC_CRTL, 3, 18, 2);
void dimh_combing_pd22_window_config(unsigned int width, unsigned int height);
void dimh_calc_lmv_init(void);
void dimh_calc_lmv_base_mcinfo(unsigned int vf_height,
- unsigned long mcinfo_adr,
+ unsigned short *mcinfo_adr_v,
unsigned int mcinfo_size);
void dimh_init_field_mode(unsigned short height);
void dim_film_mode_win_config(unsigned int width, unsigned int height);
void dump_vd2_afbc(void);
-u8 *dim_vmap(ulong addr, u32 size, bool *bflg);
-void dim_unmap_phyaddr(u8 *vaddr);
int dim_print(const char *fmt, ...);
#define DI_MC_SW_OTHER (1 << 0)
| VIDTYPE_COMPRESS \
| VIDTYPE_MVC)
+enum eDI_MEM_M {
+ eDI_MEM_M_rev = 0,
+ eDI_MEM_M_cma = 1,
+ eDI_MEM_M_cma_all = 2,
+ eDI_MEM_M_codec_a = 3,
+ eDI_MEM_M_codec_b = 4,
+ eDI_MEM_M_max /**/
+};
/* ************************************** */
/* *************** cfg top ************** */
/* ************************************** */
/* also see: di_cfg_top_ctr*/
enum eDI_CFG_TOP_IDX {
/* cfg for top */
- eDI_CFG_BEGIN,
- eDI_CFG_first_bypass,
- eDI_CFG_ref_2,
+ EDI_CFG_BEGIN,
+ EDI_CFG_mem_flg,
+ EDI_CFG_first_bypass,
+ EDI_CFG_ref_2,
EDI_CFG_KEEP_CLEAR_AUTO,
- eDI_CFG_END,
+ EDI_CFG_END,
};
-#define K_DI_CFG_NUB (eDI_CFG_END - eDI_CFG_BEGIN + 1)
+#define cfgeq(a, b) ((di_cfg_top_get(EDI_CFG_##a) == (b)) ? true : false)
+#define cfgnq(a, b) ((di_cfg_top_get(EDI_CFG_##a) != (b)) ? true : false)
+#define cfgg(a) di_cfg_top_get(EDI_CFG_##a)
+#define cfgs(a, b) di_cfg_top_set(EDI_CFG_##a, b)
+#define K_DI_CFG_NUB (EDI_CFG_END - EDI_CFG_BEGIN + 1)
+#define K_DI_CFG_T_FLG_NOTHING (0x00)
+#define K_DI_CFG_T_FLG_DTS (0x01)
+#define K_DI_CFG_T_FLG_NONE (0x80)
+union di_cfg_tdata_u {
+ unsigned int d32;
+ struct {
+ unsigned int val_df:4,/**/
+ val_dts:4,
+ val_dbg:4,
+ val_c:4,
+ dts_en:1,
+ dts_have:1,
+ dbg_have:1,
+ rev_en:1,
+ en_update:4,
+ reserved:8;
+ } b;
+};
struct di_cfg_ctr_s {
- char *name;
+ char *dts_name;
enum eDI_CFG_TOP_IDX id;
- bool default_val;
+ unsigned char default_val;
+ unsigned char flg;
};
/* ************************************** */
QUE_POST_FREE, /*7*/
QUE_POST_READY, /*8*/
QUE_POST_BACK, /*new*/
+ QUE_POST_DOING,
QUE_POST_KEEP, /*below use pw_queue_in*/
QUE_POST_KEEP_BACK,
/*----------------*/
/**/
unsigned int num_local;
unsigned int num_post;
+ unsigned int size_local;
+ unsigned int size_post;
+ int nr_size;
+ int count_size;
+ int mcinfo_size;
+ int mv_size;
+ int mtn_size;
+ unsigned char buf_alloc_mode;
};
-struct di_mm_st_s {
+struct dim_mm_t_s {
/* use for reserved and alloc all*/
unsigned long mem_start;
unsigned int mem_size;
struct page *total_pages;
+};
- unsigned int flag_cma;
-
- unsigned int size_local;
- unsigned int size_post;
+struct di_mm_st_s {
+ unsigned long mem_start;
+ unsigned int mem_size;
int num_local;
int num_post; /*ppost*/
};
struct di_mm_st_s sts;
};
+struct dim_sum_s {
+ /*buf*/
+ unsigned int b_pre_free;
+ unsigned int b_pst_ready;
+ unsigned int b_recyc;
+ unsigned int b_pre_ready;
+ unsigned int b_pst_free;
+ unsigned int b_display;
+};
+
struct di_ch_s {
/*struct di_cfgx_s dbg_cfg;*/
bool cfgx_en[K_DI_CFGX_NUB];
unsigned int sum[eDI_SUM_NUB + 1];
unsigned int sum_get;
unsigned int sum_put;
+ struct dim_sum_s sumx;
};
bool flg_hw_int; /*only once*/
- struct di_mm_s mm;
+ struct dim_mm_t_s mmt;
+ struct di_mm_s mm[DI_CHANNEL_NUB];
};
/*************************
};
struct di_data_l_s {
- bool cfg_en[K_DI_CFG_NUB]; /*cfg_top*/
+ /*bool cfg_en[K_DI_CFG_NUB];*/ /*cfg_top*/
+ union di_cfg_tdata_u cfg_en[K_DI_CFG_NUB];
+ unsigned int cfg_sel;
+ unsigned int cfg_dbg_mode; /*val or item*/
int mp_uit[K_DI_MP_UIT_NUB]; /*eDI_MP_UI_T*/
struct di_ch_s ch_data[DI_CHANNEL_NUB];
int plane[DI_CHANNEL_NUB]; /*use for debugfs*/
*
*************************************/
-#define DBG_M_C_ALL 0x2000 /*all debug close*/
-#define DBG_M_O_ALL 0x1000 /*all debug open*/
-
-#define DBG_M_DT 0x01 /*do table work*/
-#define DBG_M_REG 0x02 /*reg/unreg*/
-#define DBG_M_POST_REF 0x04
-#define DBG_M_TSK 0x08
-#define DBG_M_INIT 0x10
-#define DBG_M_EVENT 0x20
-#define DBG_M_FIRSTFRAME 0x40
-#define DBG_M_DBG 0x80
-
-#define DBG_M_POLLING 0x100
-#define DBG_M_ONCE 0x200
-#define DBG_M_KEEP 0x400
+#define DBG_M_C_ALL DI_BIT30 /*all debug close*/
+#define DBG_M_O_ALL DI_BIT31 /*all debug open*/
+
+#define DBG_M_DT DI_BIT0 /*do table work*/
+#define DBG_M_REG DI_BIT1 /*reg/unreg*/
+#define DBG_M_POST_REF DI_BIT2
+#define DBG_M_TSK DI_BIT3
+#define DBG_M_INIT DI_BIT4
+#define DBG_M_EVENT DI_BIT5
+#define DBG_M_FIRSTFRAME DI_BIT6
+#define DBG_M_DBG DI_BIT7
+
+#define DBG_M_POLLING DI_BIT8
+#define DBG_M_ONCE DI_BIT9
+#define DBG_M_KEEP DI_BIT10 /*keep buf*/
+#define DBG_M_MEM DI_BIT11 /*mem alloc release*/
#define DBG_M_WQ DI_BIT14 /*work que*/
extern unsigned int di_dbg;
#define dbg_first_frame(fmt, args ...) dbg_m(DBG_M_FIRSTFRAME, fmt, ##args)
#define dbg_dbg(fmt, args ...) dbg_m(DBG_M_DBG, fmt, ##args)
#define dbg_once(fmt, args ...) dbg_m(DBG_M_ONCE, fmt, ##args)
+#define dbg_mem(fmt, args ...) dbg_m(DBG_M_MEM, fmt, ##args)
#define dbg_keep(fmt, args ...) dbg_m(DBG_M_KEEP, fmt, ##args)
#define dbg_wq(fmt, args ...) dbg_m(DBG_M_WQ, fmt, ##args)
return &get_datal()->mng;
}
-static inline unsigned long di_get_mem_start(unsigned int ch)
-{
- return get_datal()->mng.mem_start[ch];
-}
-
-static inline void di_set_mem_info(unsigned int ch,
- unsigned long mstart, unsigned int size)
-{
- get_datal()->mng.mem_start[ch] = mstart;
- get_datal()->mng.mem_size[ch] = size;
-}
-
-static inline unsigned int *di_get_mem_size(unsigned int ch)
-{
- return &get_datal()->mng.mem_size[ch];
-}
static inline struct di_hpre_s *get_hw_pre(void)
{
get_datal()->ch_data[ch].sum_put = 0;
}
+static inline struct dim_sum_s *get_sumx(unsigned int ch)
+{
+ return &get_datal()->ch_data[ch].sumx;
+}
/*bypass_state*/
static inline bool di_bypass_state_get(unsigned int ch)
{
/******************************************
* mm
*****************************************/
-static inline struct di_mm_s *dim_mm_get(void)
+static inline struct di_mm_s *dim_mm_get(unsigned int ch)
+{
+ return &get_datal()->mng.mm[ch];
+}
+
+static inline struct dim_mm_t_s *dim_mmt_get(void)
+{
+ return &get_datal()->mng.mmt;
+}
+
+static inline unsigned long di_get_mem_start(unsigned int ch)
+{
+ return get_datal()->mng.mm[ch].sts.mem_start;
+}
+
+static inline void di_set_mem_info(unsigned int ch,
+ unsigned long mstart,
+ unsigned int size)
+{
+ get_datal()->mng.mm[ch].sts.mem_start = mstart;
+ get_datal()->mng.mm[ch].sts.mem_size = size;
+}
+
+static inline unsigned int di_get_mem_size(unsigned int ch)
{
- return &get_datal()->mng.mm;
+ return get_datal()->mng.mm[ch].sts.mem_size;
}
-/**/
void di_tout_int(struct di_time_out_s *tout, unsigned int thd);
bool di_tout_contr(enum eDI_TOUT_CONTR cmd, struct di_time_out_s *tout);
/* post_doing_list */
/********************************/
seq_puts(seq, "vfm for: post_doing_list:\n");
- queue_for_each_entry(p, ch, QUEUE_POST_DOING, list) {
+ //queue_for_each_entry(p, ch, QUEUE_POST_DOING, list) {
+ di_que_list(ch, QUE_POST_DOING, &tmpa[0], &psize);
+ for (itmp = 0; itmp < psize; itmp++) {
+ p = pw_qindex_2_buf(ch, tmpa[itmp]);
pvfm = p->vframe;
seq_file_vframe(seq, v, pvfm);
seq_printf(seq, "%s\n", splt2);
return count;
}
+
+static int cfgt_help_show(struct seq_file *s, void *what)
+{
+ seq_puts(s, "cat list\n");
+ seq_printf(s, "\t%-10s:%s\n", "cfg_ai", "all cfg infor");
+ seq_printf(s, "\t%-10s:%s\n", "cfg_av", "all cfg val");
+ seq_printf(s, "\t%-10s:%s\n", "cfg_one", "sel val or infor");
+ seq_printf(s, "\t%-10s:%s\n", "cfg_sel", "sel infor");
+ seq_puts(s, "echo list\n");
+ seq_printf(s, "\t%-10s:%s\n", "val > cfgw_one",
+ "change cfg that have sel");
+ seq_printf(s, "\t%-10s:%s\n", "index val > cfgw_index",
+ "change cfg by index");
+ seq_printf(s, "\t%-10s:%s\n", "mode sel(0/1) index > cfgw_sel",
+ "change sel");
+ return 0;
+}
+
+static int cfgt_itme_all_show(struct seq_file *s, void *what)
+{
+ di_cfgt_show_item_all(s);
+ return 0;
+}
+
+static int cfgt_val_all_show(struct seq_file *s, void *what)
+{
+ di_cfgt_show_val_all(s);
+ return 0;
+}
+
+static int cfgt_one_show(struct seq_file *s, void *what)
+{
+ if (get_datal()->cfg_dbg_mode)
+ di_cfgt_show_item_sel(s);
+ else
+ di_cfgt_show_val_sel(s);
+ return 0;
+}
+
+static int cfgt_sel_show(struct seq_file *s, void *what)
+{
+ unsigned int i;
+
+ i = get_datal()->cfg_sel;
+ seq_printf(s, "mode[%d]:index[%d]\n",
+ get_datal()->cfg_dbg_mode, i);
+ if (!di_cfg_top_check(i))
+ return 0;
+ seq_printf(s, "%s\n", di_cfg_top_get_name(i));
+ return 0;
+}
+
+static ssize_t cfgt_sel_store(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int sel, index;
+ char buf[80];
+ int ret;
+
+ count = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+ buf[count] = 0;
+ ret = sscanf(buf, "%i %i", &sel, &index);
+ switch (ret) {
+ case 2:
+ di_cfgt_set_sel(sel, index);
+ break;
+ default:
+ pr_info("err:please enter: cfg_item, index\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t cfgtw_id_store(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int index, val;
+ char buf[80];
+ int ret;
+
+ count = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+ buf[count] = 0;
+ ret = sscanf(buf, "%i %i", &index, &val);
+ switch (ret) {
+ case 2:
+ if (!di_cfg_top_check(index))
+ break;
+ pr_info("%s:%d->%d\n",
+ di_cfg_top_get_name(index),
+ di_cfg_top_get(index),
+ val);
+ di_cfg_top_set(index, val);
+ break;
+ default:
+ pr_info("err:please enter: cfg_item, index\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static ssize_t cfgt_one_store(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int index, val;
+ char buf[80];
+ int ret;
+
+ count = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+ buf[count] = 0;
+ ret = kstrtouint(buf, 0, &val);
+ if (ret) {
+ pr_info("war:please enter val\n");
+ return 0;
+ }
+ index = get_datal()->cfg_sel;
+ if (!di_cfg_top_check(index))
+ return count;
+ pr_info("%s:%d->%d\n",
+ di_cfg_top_get_name(index),
+ di_cfg_top_get(index),
+ val);
+ di_cfg_top_set(index, val);
+ return count;
+}
+
/***************************************************************
* parameter show and store for top : DI
**************************************************************/
DEFINE_SEQ_SHOW_ONLY(mpr_nr);
DEFINE_SEQ_SHOW_ONLY(mpr_pd);
DEFINE_SEQ_SHOW_ONLY(mpr_mtn);
+DEFINE_SEQ_SHOW_ONLY(cfgt_help);
+DEFINE_SEQ_SHOW_ONLY(cfgt_itme_all);
+DEFINE_SEQ_SHOW_ONLY(cfgt_val_all);
+DEFINE_STORE_ONLY(cfgtw_id);
+DEFINE_SEQ_SHOW_STORE(cfgt_one);
+DEFINE_SEQ_SHOW_STORE(cfgt_sel);
DEFINE_SEQ_SHOW_ONLY(seq_file_curr_vframe);
{"mw_mtn", S_IFREG | 0644, &mpw_mtn_fops},
{"buf_cnt", S_IFREG | 0644, &buf_cnt_fops},
{"keep_clear", S_IFREG | 0644, &keep_buf_clear_fops},
+ {"cfghelp", S_IFREG | 0644, &cfgt_help_fops},
+ {"cfgr_ai", S_IFREG | 0644, &cfgt_itme_all_fops},
+ {"cfgr_av", S_IFREG | 0644, &cfgt_val_all_fops},
+ {"cfgw_id", S_IFREG | 0644, &cfgtw_id_fops},
+ {"cfg_one", S_IFREG | 0644, &cfgt_one_fops},
+ {"cfg_sel", S_IFREG | 0644, &cfgt_sel_fops},
};
static const struct di_dbgfs_files_t di_debugfs_files[] = {
ch = pst->curr_ch;
ppost = get_post_stru(ch);
- if (queue_empty(ch, QUEUE_POST_DOING)) {
+ //if (queue_empty(ch, QUEUE_POST_DOING)) {
+ if (di_que_is_empty(ch, QUE_POST_DOING)) {
ppost->post_peek_underflow++;
pst->state--;
return reflesh;
ch = pst->curr_ch;
ppost = get_post_stru(ch);
- di_buf = get_di_buf_head(ch, QUEUE_POST_DOING);
+ //di_buf = get_di_buf_head(ch, QUEUE_POST_DOING);
+ di_buf = di_que_peek(ch, QUE_POST_DOING);
if (dim_check_di_buf(di_buf, 20, ch)) {
PR_ERR("%s:err1\n", __func__);
return reflesh;
#include "di_data_l.h"
#include "di_data.h"
#include "di_dbg.h"
+#include "di_sys.h"
#include "di_vframe.h"
#include "di_que.h"
#include "di_task.h"
const struct di_cfg_ctr_s di_cfg_top_ctr[K_DI_CFG_NUB] = {
/*same order with enum eDI_DBG_CFG*/
/* cfg for top */
- [eDI_CFG_BEGIN] = {"cfg top begin ", eDI_CFG_BEGIN, 0},
-
- [eDI_CFG_first_bypass] = {"first_bypass",
- eDI_CFG_first_bypass, 0},
- [eDI_CFG_ref_2] = {"ref_2",
- eDI_CFG_ref_2, 0},
+ [EDI_CFG_BEGIN] = {"cfg top begin ", EDI_CFG_BEGIN, 0,
+ K_DI_CFG_T_FLG_NONE},
+ [EDI_CFG_mem_flg] = {"flag_cma", EDI_CFG_mem_flg,
+ eDI_MEM_M_cma,
+ K_DI_CFG_T_FLG_DTS},
+ [EDI_CFG_first_bypass] = {"first_bypass",
+ EDI_CFG_first_bypass,
+ 0,
+ K_DI_CFG_T_FLG_DTS},
+ [EDI_CFG_ref_2] = {"ref_2",
+ EDI_CFG_ref_2, 0, K_DI_CFG_T_FLG_NOTHING},
[EDI_CFG_KEEP_CLEAR_AUTO] = {"keep_buf clear auto",
- EDI_CFG_KEEP_CLEAR_AUTO, 0},
- [eDI_CFG_END] = {"cfg top end ", eDI_CFG_END, 0},
+ EDI_CFG_KEEP_CLEAR_AUTO,
+ 0,
+ K_DI_CFG_T_FLG_NOTHING},
+ [EDI_CFG_END] = {"cfg top end ", EDI_CFG_END, 0,
+ K_DI_CFG_T_FLG_NONE},
};
char *di_cfg_top_get_name(enum eDI_CFG_TOP_IDX idx)
{
- return di_cfg_top_ctr[idx].name;
+ return di_cfg_top_ctr[idx].dts_name;
}
void di_cfg_top_get_info(unsigned int idx, char **name)
PR_ERR("%s:err:idx not map [%d->%d]\n", __func__,
idx, di_cfg_top_ctr[idx].id);
- *name = di_cfg_top_ctr[idx].name;
+ *name = di_cfg_top_ctr[idx].dts_name;
}
bool di_cfg_top_check(unsigned int idx)
__func__, idx, tsize);
return false;
}
+ if (di_cfg_top_ctr[idx].flg & K_DI_CFG_T_FLG_NONE)
+ return false;
if (idx != di_cfg_top_ctr[idx].id) {
- PR_ERR("%s:err:not map:%d->%d\n",
- __func__, idx, di_cfg_top_ctr[idx].id);
+ PR_ERR("%s:%s:err:not map:%d->%d\n",
+ __func__,
+ di_cfg_top_ctr[idx].dts_name,
+ idx,
+ di_cfg_top_ctr[idx].id);
return false;
}
- pr_info("\t%-15s=%d\n", di_cfg_top_ctr[idx].name,
- di_cfg_top_ctr[idx].default_val);
return true;
}
void di_cfg_top_init_val(void)
{
int i;
+ union di_cfg_tdata_u *pd;
+ const struct di_cfg_ctr_s *pt;
- pr_info("%s:\n", __func__);
- for (i = eDI_CFG_BEGIN; i < eDI_CFG_END; i++) {
+ PR_INF("%s:\n", __func__);
+ for (i = EDI_CFG_BEGIN; i < EDI_CFG_END; i++) {
if (!di_cfg_top_check(i))
continue;
- di_cfg_top_set(i, di_cfg_top_ctr[i].default_val);
+ pd = &get_datal()->cfg_en[i];
+ pt = &di_cfg_top_ctr[i];
+ /*di_cfg_top_set(i, di_cfg_top_ctr[i].default_val);*/
+ pd->d32 = 0;/*clear*/
+ pd->b.val_df = pt->default_val;
+ pd->b.val_c = pd->b.val_df;
}
- pr_info("%s:finish\n", __func__);
+ PR_INF("%s:finish\n", __func__);
+}
+
+void di_cfg_top_dts(void)
+{
+ struct platform_device *pdev = get_dim_de_devp()->pdev;
+ int i;
+ union di_cfg_tdata_u *pd;
+ const struct di_cfg_ctr_s *pt;
+ int ret;
+ unsigned int uval;
+
+ if (!pdev) {
+ PR_ERR("%s:no pdev\n", __func__);
+ return;
+ }
+ PR_INF("%s\n", __func__);
+ for (i = EDI_CFG_BEGIN; i < EDI_CFG_END; i++) {
+ if (!di_cfg_top_check(i))
+ continue;
+ if (!(di_cfg_top_ctr[i].flg & K_DI_CFG_T_FLG_DTS))
+ continue;
+ pd = &get_datal()->cfg_en[i];
+ pt = &di_cfg_top_ctr[i];
+ pd->b.dts_en = 1;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ pt->dts_name,
+ &uval);
+ if (ret)
+ continue;
+ PR_INF("\t%s:%d\n", pt->dts_name, uval);
+ pd->b.dts_have = 1;
+ pd->b.val_dts = uval;
+ pd->b.val_c = pd->b.val_dts;
+ }
+ PR_INF("%s end\n", __func__);
+}
+
+static void di_cfgt_show_item_one(struct seq_file *s, unsigned int index)
+{
+ union di_cfg_tdata_u *pd;
+ const struct di_cfg_ctr_s *pt;
+
+ if (!di_cfg_top_check(index))
+ return;
+ pd = &get_datal()->cfg_en[index];
+ pt = &di_cfg_top_ctr[index];
+ seq_printf(s, "id:%2d:%-10s\n", index, pt->dts_name);
+ seq_printf(s, "\t%-5s:0x%2x[%d]\n",
+ "tdf", pt->default_val, pt->default_val);
+ seq_printf(s, "\t%-5s:%d\n",
+ "tdts", pt->flg & K_DI_CFG_T_FLG_DTS);
+ seq_printf(s, "\t%-5s:0x%-4x\n", "d32", pd->d32);
+ seq_printf(s, "\t%-5s:0x%2x[%d]\n",
+ "vdf", pd->b.val_df, pd->b.val_df);
+ seq_printf(s, "\t%-5s:0x%2x[%d]\n",
+ "vdts", pd->b.val_dts, pd->b.val_dts);
+ seq_printf(s, "\t%-5s:0x%2x[%d]\n",
+ "vdbg", pd->b.val_dbg, pd->b.val_dbg);
+ seq_printf(s, "\t%-5s:0x%2x[%d]\n", "vc", pd->b.val_c, pd->b.val_c);
+ seq_printf(s, "\t%-5s:%d\n", "endts", pd->b.dts_en);
+ seq_printf(s, "\t%-5s:%d\n", "hdts", pd->b.dts_have);
+ seq_printf(s, "\t%-5s:%d\n", "hdbg", pd->b.dbg_have);
+}
+
+void di_cfgt_show_item_sel(struct seq_file *s)
+{
+ int i = get_datal()->cfg_sel;
+
+ di_cfgt_show_item_one(s, i);
}
-bool di_cfg_top_get(enum eDI_CFG_TOP_IDX id)
+void di_cfgt_set_sel(unsigned int dbg_mode, unsigned int id)
{
- return get_datal()->cfg_en[id];
+ if (!di_cfg_top_check(id)) {
+ PR_ERR("%s:%d is overflow\n", __func__, id);
+ return;
+ }
+ get_datal()->cfg_sel = id;
+ get_datal()->cfg_dbg_mode = dbg_mode;
}
-void di_cfg_top_set(enum eDI_CFG_TOP_IDX id, bool en)
+void di_cfgt_show_item_all(struct seq_file *s)
{
- get_datal()->cfg_en[id] = en;
+ int i;
+
+ for (i = EDI_CFG_BEGIN; i < EDI_CFG_END; i++)
+ di_cfgt_show_item_one(s, i);
+}
+
+static void di_cfgt_show_val_one(struct seq_file *s, unsigned int index)
+{
+ union di_cfg_tdata_u *pd;
+ const struct di_cfg_ctr_s *pt;
+
+ if (!di_cfg_top_check(index))
+ return;
+ pd = &get_datal()->cfg_en[index];
+ pt = &di_cfg_top_ctr[index];
+ seq_printf(s, "id:%2d:%-10s\n", index, pt->dts_name);
+ seq_printf(s, "\t%-5s:0x%-4x\n", "d32", pd->d32);
+ seq_printf(s, "\t%-5s:0x%2x[%d]\n", "vc", pd->b.val_c, pd->b.val_c);
+}
+
+void di_cfgt_show_val_sel(struct seq_file *s)
+{
+ unsigned int i = get_datal()->cfg_sel;
+
+ di_cfgt_show_val_one(s, i);
+}
+
+void di_cfgt_show_val_all(struct seq_file *s)
+{
+ int i;
+
+ for (i = EDI_CFG_BEGIN; i < EDI_CFG_END; i++)
+ di_cfgt_show_val_one(s, i);
+}
+
+unsigned int di_cfg_top_get(enum eDI_CFG_TOP_IDX id)
+{
+ union di_cfg_tdata_u *pd;
+
+ pd = &get_datal()->cfg_en[id];
+ return pd->b.val_c;
+}
+
+void di_cfg_top_set(enum eDI_CFG_TOP_IDX id, unsigned int val)
+{
+ union di_cfg_tdata_u *pd;
+
+ pd = &get_datal()->cfg_en[id];
+ pd->b.val_dbg = val;
+ pd->b.dbg_have = 1;
+ pd->b.val_c = val;
}
/**************************************
return di_sum_get_l(ch, id);
}
+void dim_sumx_clear(unsigned int ch)
+{
+ struct dim_sum_s *psumx = get_sumx(ch);
+
+ memset(psumx, 0, sizeof(*psumx));
+}
+
+void dim_sumx_set(unsigned int ch)
+{
+ struct dim_sum_s *psumx = get_sumx(ch);
+
+ psumx->b_pre_free = list_count(ch, QUEUE_LOCAL_FREE);
+ psumx->b_pre_ready = di_que_list_count(ch, QUE_PRE_READY);
+ psumx->b_pst_free = di_que_list_count(ch, QUE_POST_FREE);
+ psumx->b_pst_ready = di_que_list_count(ch, QUE_POST_READY);
+ psumx->b_recyc = list_count(ch, QUEUE_RECYCLE);
+ psumx->b_display = list_count(ch, QUEUE_DISPLAY);
+}
+
/****************************/
/*call by event*/
/****************************/
break;
case eDI_TOP_STATE_REG_STEP2:/*now no change to do*/
if (dip_cma_get_st(ch) == EDI_CMA_ST_READY) {
- if (di_cfg_top_get(eDI_CFG_first_bypass)) {
+ if (di_cfg_top_get(EDI_CFG_first_bypass)) {
if (get_sum_g(ch) == 0)
dim_bypass_first_frame(ch);
else
switch (chst) {
case eDI_TOP_STATE_REG_STEP2:
if (dip_cma_get_st(ch) == EDI_CMA_ST_READY) {
- if (di_cfg_top_get(eDI_CFG_first_bypass)) {
+ if (di_cfg_top_get(EDI_CFG_first_bypass)) {
if (get_sum_g(ch) == 0)
dim_bypass_first_frame(ch);
else
break;
case EDI_TOP_STATE_READY:
dim_post_keep_back_recycle(ch);
+ dim_sumx_set(ch);
break;
default:
break;
return p;
}
+static const struct di_mm_cfg_s c_mm_cfg_normal = {
+ .di_h = 1088,
+ .di_w = 1920,
+ .num_local = MAX_LOCAL_BUF_NUM,
+ .num_post = MAX_POST_BUF_NUM,
+};
+
/**********************************/
/* TIME OUT CHEKC api*/
/**********************************/
{
unsigned int ch;
struct di_post_stru_s *ppost;
- struct di_mm_s *mm = dim_mm_get();
+ struct di_mm_s *mm;
+ struct dim_mm_t_s *mmt = dim_mmt_get();
+ struct di_ch_s *pch;
bool ret = false;
for (ch = 0; ch < DI_CHANNEL_NUB; ch++) {
+ pch = get_chdata(ch);
+ pch->ch_id = ch;
ppost = get_post_stru(ch);
memset(ppost, 0, sizeof(struct di_post_stru_s));
ppost->next_canvas_id = 1;
pw_queue_clear(ch, QUE_POST_KEEP);
pw_queue_clear(ch, QUE_POST_KEEP_BACK);
}
+ mm = dim_mm_get(ch);
+ memcpy(&mm->cfg, &c_mm_cfg_normal, sizeof(struct di_mm_cfg_s));
}
+ mmt->mem_start = 0;
+ mmt->mem_size = 0;
+ mmt->total_pages = NULL;
set_current_channel(0);
- /*mm cfg*/
- mm->cfg.di_h = 1080;
- mm->cfg.di_w = 1920;
- mm->cfg.num_local = MAX_LOCAL_BUF_NUM;
- mm->cfg.num_post = MAX_POST_BUF_NUM;
- /*mm sts*/
- mm->sts.mem_start = 0;
- mm->sts.mem_size = 0;
- mm->sts.total_pages = NULL;
- mm->sts.flag_cma = 0;
-
return ret;
}
* cfg ctr top
* bool
**************************************/
+bool di_cfg_top_check(unsigned int idx);
char *di_cfg_top_get_name(enum eDI_CFG_TOP_IDX idx);
void di_cfg_top_get_info(unsigned int idx, char **name);
void di_cfg_top_init_val(void);
-bool di_cfg_top_get(enum eDI_CFG_TOP_IDX id);
-void di_cfg_top_set(enum eDI_CFG_TOP_IDX id, bool en);
+void di_cfg_top_dts(void);
+unsigned int di_cfg_top_get(enum eDI_CFG_TOP_IDX id);
+void di_cfg_top_set(enum eDI_CFG_TOP_IDX id, unsigned int en);
+void di_cfgt_show_item_sel(struct seq_file *s);
+void di_cfgt_show_item_all(struct seq_file *s);
+void di_cfgt_show_val_sel(struct seq_file *s);
+void di_cfgt_show_val_all(struct seq_file *s);
+void di_cfgt_set_sel(unsigned int dbg_mode, unsigned int id);
/**************************************
*
void di_pause_step_done(unsigned int ch);
void di_pause(unsigned int ch, bool on);
+void dim_sumx_clear(unsigned int ch);
+void dim_sumx_set(unsigned int ch);
#endif /*__DI_PRC_H__*/
"QUE_POST_FREE", /*2*/
"QUE_POST_READY", /*3*/
"QUE_POST_BACK", /*4*/
+ "QUE_POST_DOING",
"QUE_POST_KEEP",
"QUE_POST_KEEP_BACK",
"QUE_DBG",
return false;
#endif
if (kfifo_out(&pch->fifo[qtype], &index, sizeof(unsigned int))
- != sizeof(unsigned int))
+ != sizeof(unsigned int)) {
+ PR_ERR("%s:ch[%d],qtye[%d],buf[%d]\n",
+ __func__, ch, qtype, *buf_index);
return false;
+ }
*buf_index = index;
unsigned int q_index;
struct di_buf_s *pdi_buf = NULL;
- if (!pw_queue_peek(ch, qtype, &q_index))
+ if (!pw_queue_peek(ch, qtype, &q_index)) {
+ PR_ERR("%s:no buf\n", __func__);
return pdi_buf;
+ }
pdi_buf = pw_qindex_2_buf(ch, q_index);
if (!pdi_buf) {
- PR_ERR("di:err:%s:buf is null[%d]\n", __func__, q_index);
+ PR_ERR("%s:buf is null[%d]\n", __func__, q_index);
return NULL;
}
unsigned int q_index;
unsigned int q_index2;
- if (!pw_queue_peek(ch, qtype, &q_index))
+ if (!pw_queue_peek(ch, qtype, &q_index)) {
+ PR_ERR("%s:no buf ch[%d], qtype[%d], buf[%d,%d]\n", __func__,
+ ch, qtype, di_buf->type, di_buf->index);
return false;
-
+ }
q_index2 = pw_buf_2_qindex(ch, di_buf);
if (q_index2 != q_index) {
- PR_ERR("di:%s:not map[%d,%d]\n", __func__, q_index2, q_index);
+ PR_ERR("di:%s:%d not map[0x%x,0x%x]\n", __func__,
+ qtype, q_index2, q_index);
return false;
}
return false;
}
if (di_buf->queue_index != -1) {
- PR_ERR("di:%s:buf in some que,ch[%d],qt[%d],qi[%d],bi[%d]\n",
+ PR_ERR("%s:buf in some que,ch[%d],qt[%d],qi[%d],bi[%d]\n",
__func__,
ch, qtype, di_buf->queue_index, di_buf->index);
dump_stack();
q_index = pw_buf_2_qindex(ch, di_buf);
if (!pw_queue_in(ch, qtype, q_index)) {
- PR_ERR("di:%s:err:can't que in,ch[%d],qtype[%d],q_index[%d]\n",
+ PR_ERR("%s:can't que in,ch[%d],qtype[%d],q_index[%d]\n",
__func__,
ch, qtype, q_index);
return false;
pw_queue_clear(ch, qtype);
- if (asize == 0)
+ if (asize == 0) {
+ PR_ERR("%s:size 0\n", __func__);
return ret;
+ }
for (i = 0; i < asize; i++) {
if (arr[i] == q_index) {
}
}
} else {
- PR_ERR("%s: Error, queue_index %d is not right\n",
+ PR_ERR("%s: queue_index %d is not right\n",
__func__, di_buf->queue_index);
if (dim_vcry_get_flg() == 0) {
#include "di_prc.h"
#include "di_sys.h"
#include "di_api.h"
+#include "di_que.h"
#include "register.h"
#include "nr_downscale.h"
return get_dim_de_devp()->nrds_enable;
}
+u8 *dim_vmap(ulong addr, u32 size, bool *bflg)
+{
+ u8 *vaddr = NULL;
+ ulong phys = addr;
+ u32 offset = phys & ~PAGE_MASK;
+ u32 npages = PAGE_ALIGN(size) / PAGE_SIZE;
+ struct page **pages = NULL;
+ pgprot_t pgprot;
+ int i;
+
+ if (!PageHighMem(phys_to_page(phys)))
+ return phys_to_virt(phys);
+ if (offset)
+ npages++;
+ pages = vmalloc(sizeof(struct page *) * npages);
+ if (!pages)
+ return NULL;
+ for (i = 0; i < npages; i++) {
+ pages[i] = phys_to_page(phys);
+ phys += PAGE_SIZE;
+ }
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ if (!vaddr) {
+ PR_ERR("the phy(%lx) vmaped fail, size: %d\n",
+ addr - offset, npages << PAGE_SHIFT);
+ vfree(pages);
+ return NULL;
+ }
+ vfree(pages);
+ *bflg = true;
+ return vaddr + offset;
+}
+
+void dim_unmap_phyaddr(u8 *vaddr)
+{
+ void *addr = (void *)(PAGE_MASK & (ulong)vaddr);
+
+ vunmap(addr);
+}
+
+void dim_mcinfo_v_alloc(struct di_buf_s *pbuf, unsigned int bsize)
+{
+ if (!dimp_get(eDI_MP_lmv_lock_win_en) ||
+ pbuf->mcinfo_alloc_flg)
+ return;
+ pbuf->mcinfo_adr_v = (unsigned short *)dim_vmap(pbuf->mcinfo_adr,
+ bsize,
+ &pbuf->mcinfo_alloc_flg);
+ if (!pbuf->mcinfo_adr_v)
+ PR_ERR("%s:%d\n", __func__, pbuf->index);
+ else
+ PR_INF("mcinfo v [%d], ok\n", pbuf->index);
+}
+
+void dim_mcinfo_v_release(struct di_buf_s *pbuf)
+{
+ if (pbuf->mcinfo_alloc_flg) {
+ dim_unmap_phyaddr((u8 *)pbuf->mcinfo_adr_v);
+ pbuf->mcinfo_alloc_flg = false;
+ PR_INF("%s [%d], ok\n", __func__, pbuf->index);
+ }
+}
+
/********************************************
* mem
*******************************************/
+#ifdef CONFIG_CMA
+#define TVP_MEM_PAGES 0xffff
+static bool mm_codec_alloc(const char *owner, size_t count,
+ int cma_mode,
+ struct dim_mm_s *o)
+{
+ int flags = 0;
+ bool istvp = false;
+
+ if (codec_mm_video_tvp_enabled()) {
+ istvp = true;
+ flags |= CODEC_MM_FLAGS_TVP;
+ } else {
+ flags |= CODEC_MM_FLAGS_RESERVED | CODEC_MM_FLAGS_CPU;
+ }
+ if (cma_mode == 4 && !istvp)
+ flags = CODEC_MM_FLAGS_CMA_FIRST |
+ CODEC_MM_FLAGS_CPU;
+ o->addr = codec_mm_alloc_for_dma(owner,
+ count,
+ 0,
+ flags);
+ if (o->addr == 0) {
+ PR_ERR("%s: failed\n", __func__);
+ return false;
+ }
+ if (istvp)
+ o->ppage = (struct page *)TVP_MEM_PAGES;
+ else
+ o->ppage = codec_mm_phys_to_virt(o->addr);
+ return true;
+}
+
+static bool mm_cma_alloc(struct device *dev, size_t count,
+ struct dim_mm_s *o)
+{
+ o->ppage = dma_alloc_from_contiguous(dev, count, 0);
+ if (o->ppage) {
+ o->addr = page_to_phys(o->ppage);
+ return true;
+ }
+ PR_ERR("%s: failed\n", __func__);
+ return false;
+}
+
+bool dim_mm_alloc(int cma_mode, size_t count, struct dim_mm_s *o)
+{
+ struct di_dev_s *de_devp = get_dim_de_devp();
+ bool ret;
+
+ if (cma_mode == 3 || cma_mode == 4)
+ ret = mm_codec_alloc(DEVICE_NAME,
+ count,
+ cma_mode,
+ o);
+ else
+ ret = mm_cma_alloc(&de_devp->pdev->dev, count, o);
+ return ret;
+}
+
+bool dim_mm_release(int cma_mode,
+ struct page *pages,
+ int count,
+ unsigned long addr)
+{
+ struct di_dev_s *de_devp = get_dim_de_devp();
+ bool ret = true;
+
+ if (cma_mode == 3 || cma_mode == 4)
+ codec_mm_free_for_dma(DEVICE_NAME, addr);
+ else
+ ret = dma_release_from_contiguous(&de_devp->pdev->dev,
+ pages,
+ count);
+ return ret;
+}
+
+unsigned int dim_cma_alloc_total(struct di_dev_s *de_devp)
+{
+ struct dim_mm_t_s *mmt = dim_mmt_get();
+ struct dim_mm_s omm;
+ bool ret;
+
+ ret = dim_mm_alloc(cfgg(mem_flg),
+ mmt->mem_size >> PAGE_SHIFT,
+ &omm);
+ if (!ret) /*failed*/
+ return 0;
+ mmt->mem_start = omm.addr;
+ mmt->total_pages = omm.ppage;
+ if (cfgnq(mem_flg, eDI_MEM_M_rev) && de_devp->nrds_enable)
+ dim_nr_ds_buf_init(cfgg(mem_flg), 0, &de_devp->pdev->dev);
+ return 1;
+}
+
+static bool dim_cma_release_total(void)
+{
+ struct dim_mm_t_s *mmt = dim_mmt_get();
+ bool ret = false;
+ bool lret = false;
+
+ if (!mmt) {
+ PR_ERR("%s:mmt is null\n", __func__);
+ return lret;
+ }
+ ret = dim_mm_release(cfgg(mem_flg), mmt->total_pages,
+ mmt->mem_size >> PAGE_SHIFT,
+ mmt->mem_start);
+ if (ret) {
+ mmt->total_pages = NULL;
+ mmt->mem_start = 0;
+ mmt->mem_size = 0;
+ lret = true;
+ } else {
+ PR_ERR("%s:fail.\n", __func__);
+ }
+ return lret;
+}
+
+static unsigned int di_cma_alloc(struct di_dev_s *devp, unsigned int channel)
+{
+ unsigned int start_time, end_time, delta_time;
+ struct di_buf_s *buf_p = NULL;
+ int itmp, alloc_cnt = 0;
+ struct di_dev_s *de_devp = get_dim_de_devp();
+ struct di_mm_s *mm = dim_mm_get(channel);
+ bool aret;
+ struct dim_mm_s omm;
+
+ start_time = jiffies_to_msecs(jiffies);
+ queue_for_each_entry(buf_p, channel, QUEUE_LOCAL_FREE, list) {
+ if (buf_p->pages) {
+ PR_ERR("1:%s:buf[%d] page:0x%p alloced skip\n",
+ __func__, buf_p->index, buf_p->pages);
+ continue;
+ }
+ aret = dim_mm_alloc(cfgg(mem_flg),
+ mm->cfg.size_local >> PAGE_SHIFT,
+ &omm);
+ if (!aret) {
+ buf_p->pages = NULL;
+ PR_ERR("2:%s: alloc failed %d fail.\n",
+ __func__,
+ buf_p->index);
+ return 0;
+ }
+ buf_p->pages = omm.ppage;
+ buf_p->nr_adr = omm.addr;
+ alloc_cnt++;
+ mm->sts.num_local++;
+ dbg_mem("CMA allocate buf[%d]page:0x%p\n",
+ buf_p->index, buf_p->pages);
+ dbg_mem(" addr 0x%lx ok.\n", buf_p->nr_adr);
+ if (mm->cfg.buf_alloc_mode == 0) {
+ buf_p->mtn_adr = buf_p->nr_adr +
+ mm->cfg.nr_size;
+ buf_p->cnt_adr = buf_p->nr_adr +
+ mm->cfg.nr_size +
+ mm->cfg.mtn_size;
+ if (dim_get_mcmem_alloc()) {
+ buf_p->mcvec_adr = buf_p->nr_adr +
+ mm->cfg.nr_size +
+ mm->cfg.mtn_size +
+ mm->cfg.count_size;
+ buf_p->mcinfo_adr =
+ buf_p->nr_adr +
+ mm->cfg.nr_size +
+ mm->cfg.mtn_size +
+ mm->cfg.count_size +
+ mm->cfg.mv_size;
+ dim_mcinfo_v_alloc(buf_p, mm->cfg.mcinfo_size);
+ }
+ }
+ }
+ PR_INF("%s:ch[%d] num_local[%d]:[%d]\n", __func__,
+ channel, mm->sts.num_local, alloc_cnt);
+ if (cfgnq(mem_flg, eDI_MEM_M_rev) && de_devp->nrds_enable)
+ dim_nr_ds_buf_init(cfgg(mem_flg), 0, &de_devp->pdev->dev);
+ end_time = jiffies_to_msecs(jiffies);
+ delta_time = end_time - start_time;
+ PR_INF("%s:ch[%d] use %u ms(%u~%u)\n",
+ __func__,
+ channel,
+ delta_time, start_time, end_time);
+ return 1;
+}
+
+static unsigned int dpst_cma_alloc(struct di_dev_s *devp, unsigned int channel)
+{
+ struct di_buf_s *buf_p = NULL;
+ int itmp, alloc_cnt = 0;
+ unsigned int tmpa[MAX_FIFO_SIZE];
+ unsigned int psize;
+ struct di_mm_s *mm = dim_mm_get(channel);
+ bool aret;
+ struct dim_mm_s omm;
+ u64 time1, time2;
+
+ time1 = cur_to_usecs();
+ if (dimp_get(eDI_MP_post_wr_en) && dimp_get(eDI_MP_post_wr_support)) {
+ di_que_list(channel, QUE_POST_FREE, &tmpa[0], &psize);
+ for (itmp = 0; itmp < psize; itmp++) {
+ buf_p = pw_qindex_2_buf(channel, tmpa[itmp]);
+ if (buf_p->pages) {
+ dbg_mem("3:%s:buf[%d] page:0x%p skip\n",
+ __func__,
+ buf_p->index, buf_p->pages);
+ continue;
+ }
+ aret = dim_mm_alloc(cfgg(mem_flg),
+ mm->cfg.size_post >> PAGE_SHIFT,
+ &omm);
+ if (!aret) {
+ buf_p->pages = NULL;
+ PR_ERR("4:%s: buf[%d] fail.\n", __func__,
+ buf_p->index);
+ return 0;
+ }
+ buf_p->pages = omm.ppage;
+ buf_p->nr_adr = omm.addr;
+ mm->sts.num_post++;
+ alloc_cnt++;
+ dbg_mem("%s:pbuf[%d]page:0x%p\n",
+ __func__,
+ buf_p->index, buf_p->pages);
+ dbg_mem(" addr 0x%lx ok.\n", buf_p->nr_adr);
+ }
+ PR_INF("%s:num_pst[%d]:[%d]\n", __func__, mm->sts.num_post,
+ alloc_cnt);
+ }
+ time2 = cur_to_usecs();
+ PR_INF("%s:ch[%d] use %u us\n",
+ __func__,
+ channel,
+ (unsigned int)(time2 - time1));
+ return 1;
+}
+
+static void di_cma_release(struct di_dev_s *devp, unsigned int channel)
+{
+ unsigned int i, ii, rels_cnt = 0, start_time, end_time, delta_time;
+ struct di_buf_s *buf_p;
+ struct di_buf_s *pbuf_local = get_buf_local(channel);
+ struct di_dev_s *de_devp = get_dim_de_devp();
+ bool ret;
+ struct di_mm_s *mm = dim_mm_get(channel);
+
+ start_time = jiffies_to_msecs(jiffies);
+ for (i = 0; (i < mm->cfg.num_local); i++) {
+ buf_p = &pbuf_local[i];
+ ii = USED_LOCAL_BUF_MAX;
+ if ((ii >= USED_LOCAL_BUF_MAX) &&
+ (buf_p->pages)) {
+ dim_mcinfo_v_release(buf_p);
+ ret = dim_mm_release(cfgg(mem_flg),
+ buf_p->pages,
+ mm->cfg.size_local >> PAGE_SHIFT,
+ buf_p->nr_adr);
+ if (ret) {
+ buf_p->pages = NULL;
+ mm->sts.num_local--;
+ rels_cnt++;
+ dbg_mem("release buf[%d] ok.\n", i);
+ } else {
+ PR_ERR("%s:release buf[%d] fail.\n",
+ __func__, i);
+ }
+ } else {
+ if (!IS_ERR_OR_NULL(buf_p->pages)) {
+ dbg_mem("buf[%d] page:0x%p no release.\n",
+ buf_p->index, buf_p->pages);
+ }
+ }
+ }
+ if (de_devp->nrds_enable)
+ dim_nr_ds_buf_uninit(cfgg(mem_flg), &de_devp->pdev->dev);
+ if (mm->sts.num_local < 0 || mm->sts.num_post < 0)
+ PR_ERR("%s:mm:nub_local=%d,nub_post=%d\n",
+ __func__,
+ mm->sts.num_local,
+ mm->sts.num_post);
+ end_time = jiffies_to_msecs(jiffies);
+ delta_time = end_time - start_time;
+ PR_INF("%s:ch[%d] release %u buffer use %u ms(%u~%u)\n",
+ __func__,
+ channel,
+ rels_cnt, delta_time, start_time, end_time);
+}
+
+static void dpst_cma_release(struct di_dev_s *devp, unsigned int ch)
+{
+ unsigned int i, rels_cnt = 0;
+ struct di_buf_s *buf_p;
+ struct di_buf_s *pbuf_post = get_buf_post(ch);
+ bool ret;
+ struct di_mm_s *mm = dim_mm_get(ch);
+ u64 time1, time2;
+
+ time1 = cur_to_usecs();
+ if (dimp_get(eDI_MP_post_wr_en) && dimp_get(eDI_MP_post_wr_support)) {
+ for (i = 0; i < mm->cfg.num_post; i++) {
+ buf_p = &pbuf_post[i];
+ if (di_que_is_in_que(ch, QUE_POST_KEEP, buf_p))
+ continue;
+ if (!buf_p->pages) {
+ PR_INF("2:%s:post buf[%d] is null\n",
+ __func__, i);
+ continue;
+ }
+ ret = dim_mm_release(cfgg(mem_flg),
+ buf_p->pages,
+ mm->cfg.size_post >> PAGE_SHIFT,
+ buf_p->nr_adr);
+ if (ret) {
+ buf_p->pages = NULL;
+ mm->sts.num_post--;
+ rels_cnt++;
+ dbg_mem("post buf[%d] ok.\n", i);
+ } else {
+ PR_ERR("%s:post buf[%d]\n", __func__, i);
+ }
+ }
+ }
+ if (mm->sts.num_post < 0)
+ PR_ERR("%s:mm:nub_post=%d\n",
+ __func__,
+ mm->sts.num_post);
+ time2 = cur_to_usecs();
+ PR_INF("%s:ch[%d] %u buffer use %u us\n",
+ __func__,
+ ch,
+ rels_cnt, (unsigned int)(time2 - time1));
+}
+#endif
+bool dim_cma_top_alloc(unsigned int ch)
+{
+ struct di_dev_s *de_devp = get_dim_de_devp();
+ bool ret = false;
+#ifdef CONFIG_CMA
+ if (di_cma_alloc(de_devp, ch) &&
+ dpst_cma_alloc(de_devp, ch))
+ ret = true;
+#endif
+ return ret;
+}
+
+bool dim_cma_top_release(unsigned int ch)
+{
+ struct di_dev_s *de_devp = get_dim_de_devp();
+#ifdef CONFIG_CMA
+ di_cma_release(de_devp, ch);
+ dpst_cma_release(de_devp, ch);
+#endif
+ return true;
+}
+
+bool dim_mm_alloc_api(int cma_mode, size_t count, struct dim_mm_s *o)
+{
+ bool ret = false;
+#ifdef CONFIG_CMA
+ ret = dim_mm_alloc(cma_mode, count, o);
+#endif
+ return ret;
+}
+
+bool dim_mm_release_api(int cma_mode,
+ struct page *pages,
+ int count,
+ unsigned long addr)
+{
+ bool ret = false;
+#ifdef CONFIG_CMA
+ ret = dim_mm_release(cma_mode, pages, count, addr);
+#endif
+ return ret;
+}
+
+bool dim_rev_mem_check(void)
+{
+ struct di_dev_s *di_devp = get_dim_de_devp();
+ struct dim_mm_t_s *mmt = dim_mmt_get();
+ unsigned int ch;
+ unsigned int o_size;
+ unsigned long rmstart;
+ unsigned int rmsize;
+ unsigned int flg_map;
+
+ if (!di_devp) {
+ PR_ERR("%s:no dev\n", __func__);
+ return false;
+ }
+ if (!mmt) {
+ PR_ERR("%s:mmt\n", __func__);
+ return false;
+ }
+ if (cfgeq(mem_flg, eDI_MEM_M_rev) && di_devp->mem_flg)
+ return true;
+ PR_INF("%s\n", __func__);
+ dil_get_rev_mem(&rmstart, &rmsize);
+ dil_get_flg(&flg_map);
+ if (!rmstart) {
+ PR_ERR("%s:reserved mem start add is 0\n", __func__);
+ return false;
+ }
+ mmt->mem_start = rmstart;
+ mmt->mem_size = rmsize;
+ if (!flg_map)
+ di_devp->flags |= DI_MAP_FLAG;
+ o_size = rmsize / DI_CHANNEL_NUB;
+ for (ch = 0; ch < DI_CHANNEL_NUB; ch++) {
+ di_set_mem_info(ch,
+ mmt->mem_start + (o_size * ch), o_size);
+ PR_INF("rmem:ch[%d]:start:0x%lx, size:%uB\n",
+ ch,
+ (mmt->mem_start + (o_size * ch)),
+ o_size);
+ }
+ PR_INF("rmem:0x%lx, size %uMB.\n",
+ mmt->mem_start, (mmt->mem_size >> 20));
+ di_devp->mem_flg = true;
+ return true;
+}
+
+static void dim_mem_remove(void)
+{
+#ifdef CONFIG_CMA
+ dim_cma_release_total();
+#endif
+}
+
+static void dim_mem_prob(void)
+{
+ unsigned int mem_flg = cfgg(mem_flg);
+ struct di_dev_s *di_devp = get_dim_de_devp();
+ struct dim_mm_t_s *mmt = dim_mmt_get();
+
+ if (mem_flg >= eDI_MEM_M_max) {
+ cfgs(mem_flg, eDI_MEM_M_cma);
+ PR_ERR("%s:mem_flg overflow[%d], set to def\n",
+ __func__, mem_flg);
+ mem_flg = cfgg(mem_flg);
+ }
+ switch (mem_flg) {
+ case eDI_MEM_M_rev:
+ dim_rev_mem_check();
+ dip_cma_st_set_ready_all();
+ break;
+#ifdef CONFIG_CMA
+ case eDI_MEM_M_cma:
+ di_devp->flags |= DI_MAP_FLAG;
+ mmt->mem_size
+ = dma_get_cma_size_int_byte(&di_devp->pdev->dev);
+ PR_INF("mem size from dts:0x%x\n", mmt->mem_size);
+ break;
+ case eDI_MEM_M_cma_all:
+ di_devp->flags |= DI_MAP_FLAG;
+ mmt->mem_size
+ = dma_get_cma_size_int_byte(&di_devp->pdev->dev);
+ PR_INF("mem size from dts:0x%x\n", mmt->mem_size);
+ if (dim_cma_alloc_total(di_devp))
+ dip_cma_st_set_ready_all();
+ break;
+ case eDI_MEM_M_codec_a:
+ case eDI_MEM_M_codec_b:
+ di_devp->flags |= DI_MAP_FLAG;
+ if (mmt->mem_size <= 0x800000) {/*need check??*/
+ mmt->mem_size = 0x2800000;
+ if (mem_flg != eDI_MEM_M_codec_a)
+ cfgs(mem_flg, eDI_MEM_M_codec_b);
+ }
+ break;
+#endif
+ case eDI_MEM_M_max:
+ default:
+ break;
+ }
+}
/********************************************/
static ssize_t
struct device_attribute *attr, char *buff)
{
ssize_t len = 0;
- struct di_dev_s *de_devp = get_dim_de_devp();
+ /*struct di_dev_s *de_devp = get_dim_de_devp();*/
+ struct dim_mm_t_s *mmt = dim_mmt_get();
+ if (!mmt)
+ return 0;
len = sprintf(buff, "segment DI:%lx - %lx (size:0x%x)\n",
- de_devp->mem_start,
- de_devp->mem_start + de_devp->mem_size - 1,
- de_devp->mem_size);
+ mmt->mem_start,
+ mmt->mem_start + mmt->mem_size - 1,
+ mmt->mem_size);
return len;
}
#endif
};
-static int dim_rev_mem(struct di_dev_s *di_devp)
-{
- unsigned int ch;
- unsigned int o_size;
- unsigned long rmstart;
- unsigned int rmsize;
- unsigned int flg_map;
-
- if (di_devp && !di_devp->flag_cma) {
- dil_get_rev_mem(&rmstart, &rmsize);
- dil_get_flg(&flg_map);
- if (!rmstart) {
- PR_ERR("%s:reserved mem start add is 0\n", __func__);
- return -1;
- }
- di_devp->mem_start = rmstart;
- di_devp->mem_size = rmsize;
-
- if (!flg_map)
- di_devp->flags |= DI_MAP_FLAG;
-
- o_size = rmsize / DI_CHANNEL_NUB;
-
- for (ch = 0; ch < DI_CHANNEL_NUB; ch++) {
- di_set_mem_info(ch,
- di_devp->mem_start + (o_size * ch),
- o_size);
- PR_INF("rmem:ch[%d]:start:0x%lx, size:%uB\n",
- ch,
- (di_devp->mem_start + (o_size * ch)),
- o_size);
- }
- PR_INF("rmem:0x%lx, size %uMB.\n",
- di_devp->mem_start, (di_devp->mem_size >> 20));
-
- di_devp->mem_flg = true;
- return 0;
- }
- PR_INF("%s:no dev or no rev mem\n", __func__);
- return -1;
-}
-
-bool dim_rev_mem_check(void)/*tmp*/
-{
- di_dev_t *di_devp = get_dim_de_devp();
-
- if (di_devp && !di_devp->flag_cma && di_devp->mem_flg)
- return true;
-
- if (!di_devp) {
- PR_ERR("%s:no dev\n", __func__);
- return false;
- }
- PR_INF("%s\n", __func__);
- dim_rev_mem(di_devp);
-
- return true;
-}
#define ARY_MATCH (1)
#ifdef ARY_MATCH
}
dev_set_drvdata(di_devp->dev, di_devp);
platform_set_drvdata(pdev, di_devp);
+ di_devp->pdev = pdev;
#ifdef ARY_MATCH
/************************/
if (ret != 0)
PR_INF("no reserved mem.\n");
- ret = of_property_read_u32(pdev->dev.of_node,
- "flag_cma", &di_devp->flag_cma);
- if (ret)
- PR_ERR("DI-%s: get flag_cma error.\n", __func__);
- else
- PR_INF("flag_cma=%d\n", di_devp->flag_cma);
+ di_cfg_top_dts();
- dim_rev_mem(di_devp);
+ /* move to dim_mem_prob dim_rev_mem(di_devp);*/
ret = of_property_read_u32(pdev->dev.of_node,
"nrds-enable", &di_devp->nrds_enable);
/*di pre h scaling down :sm1 tm2*/
/*pre_hsc_down_en;*/
- di_devp->h_sc_down_en = di_mp_uit_get(eDI_MP_pre_hsc_down_en);
+ di_devp->h_sc_down_en = dimp_get(eDI_MP_pre_hsc_down_en);
- if (di_devp->flag_cma >= 1) {
-#ifdef CONFIG_CMA
- di_devp->pdev = pdev;
- di_devp->flags |= DI_MAP_FLAG;
- #if 0
- di_devp->mem_size = dma_get_cma_size_int_byte(&pdev->dev);
- #else
- if (di_devp->flag_cma == 1 ||
- di_devp->flag_cma == 2) {
- di_devp->mem_size
- = dma_get_cma_size_int_byte(&pdev->dev);
- PR_INF("mem size from dts:0x%x\n", di_devp->mem_size);
- }
+ di_devp->pps_enable = dimp_get(eDI_MP_pps_en);
+// PR_INF("pps2:[%d]\n", di_devp->h_sc_down_en);
- if (di_devp->mem_size <= 0x800000) {/*need check??*/
- di_devp->mem_size = 0x2800000;
/*(flag_cma ? 3) reserved in*/
/*codec mm : cma in codec mm*/
- if (di_devp->flag_cma != 3) {
- /*no di cma, try use*/
- /*cma from codec mm*/
- di_devp->flag_cma = 4;
- }
- }
- #endif
- pr_info("DI: CMA size 0x%x.\n", di_devp->mem_size);
- if (di_devp->flag_cma == 2) {
- if (dim_cma_alloc_total(di_devp))
- dip_cma_st_set_ready_all();
- }
-#endif
- } else {
- dip_cma_st_set_ready_all();
- }
+ dim_mem_prob();
+
/* mutex_init(&di_devp->cma_mutex); */
INIT_LIST_HEAD(&di_devp->pq_table_list);
atomic_set(&di_devp->pq_flag, 0);
di_devp->pre_irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- pr_info("pre_irq:%d\n",
- di_devp->pre_irq);
+ PR_INF("pre_irq:%d\n", di_devp->pre_irq);
di_devp->post_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);
- pr_info("post_irq:%d\n",
- di_devp->post_irq);
+ PR_INF("post_irq:%d\n", di_devp->post_irq);
di_pr_info("%s allocate rdma channel %d.\n", __func__,
di_devp->rdma_handle);
dim_get_vpu_clkb(&pdev->dev, di_devp);
#ifdef CLK_TREE_SUPPORT
clk_prepare_enable(di_devp->vpu_clkb);
- pr_info("DI:enable vpu clkb.\n");
+ PR_INF("enable vpu clkb.\n");
#else
aml_write_hiubus(HHI_VPU_CLKB_CNTL, 0x1000100);
#endif
}
di_devp->flags &= (~DI_SUSPEND_FLAG);
- ret = of_property_read_u32(pdev->dev.of_node,
- "buffer-size", &di_devp->buffer_size);
- if (ret)
- PR_ERR("DI-%s: get buffer size error.\n", __func__);
/* set flag to indicate that post_wr is supportted */
ret = of_property_read_u32(pdev->dev.of_node,
get_ops_nr()->nr_drv_uninit(di_devp->dev);
cdev_del(&di_devp->cdev);
- if (di_devp->flag_cma == 2) {
- if (dma_release_from_contiguous(&pdev->dev,
- di_devp->total_pages,
- di_devp->mem_size >> PAGE_SHIFT)) {
- di_devp->total_pages = NULL;
- di_devp->mem_start = 0;
- pr_dbg("DI CMA total release ok.\n");
- } else {
- pr_dbg("DI CMA total release fail.\n");
- }
- if (di_pdev->nrds_enable) {
- dim_nr_ds_buf_uninit(di_pdev->flag_cma,
- &pdev->dev);
- }
- }
+ dim_mem_remove();
+
device_destroy(di_devp->pclss, di_devp->devno);
/* free drvdata */
static void di_clear_for_suspend(struct di_dev_s *di_devp)
{
- unsigned int channel = get_current_channel(); /*tmp*/
-
pr_info("%s\n", __func__);
- di_vframe_unreg(channel);/*have flag*/
-
- if (dip_chst_get(channel) != EDI_TOP_STATE_IDLE)
- dim_unreg_process_irq(channel);
-
dip_cma_close();
pr_info("%s end\n", __func__);
}
#define DEVICE_NAME "di_multi"
#define CLASS_NAME "deinterlace"
+u8 *dim_vmap(ulong addr, u32 size, bool *bflg);
+void dim_unmap_phyaddr(u8 *vaddr);
+void dim_mcinfo_v_alloc(struct di_buf_s *pbuf, unsigned int bsize);
+void dim_mcinfo_v_release(struct di_buf_s *pbuf);
+struct dim_mm_s {
+ struct page *ppage;
+ unsigned long addr;
+};
+
+bool dim_mm_alloc_api(int cma_mode, size_t count, struct dim_mm_s *o);
+bool dim_mm_release_api(int cma_mode,
+ struct page *pages,
+ int count,
+ unsigned long addr);
+bool dim_cma_top_alloc(unsigned int ch);
+bool dim_cma_top_release(unsigned int ch);
bool dim_rev_mem_check(void);
#endif /*__DI_SYS_H__*/
#include "deinterlace.h"
#include "di_data_l.h"
+#include "di_sys.h"
#include "di_api.h"
static struct nr_ds_s nrds_dev;
else
PR_ERR("DI: alloc nr ds mem error.\n");
#else
- ret = dim_mm_alloc(cma_flag, NR_DS_PAGE_NUM, &omm);
+ ret = dim_mm_alloc_api(cma_flag, NR_DS_PAGE_NUM, &omm);
if (ret) {
nrds_dev.nrds_pages = omm.ppage;
nrds_dev.nrds_addr = omm.addr;
nrds_dev.nrds_addr = 0;
} else {
if (nrds_dev.nrds_pages) {
- #if 0
- dma_release_from_contiguous(dev,
- nrds_dev.nrds_pages,
- NR_DS_PAGE_NUM);
- #else
- dim_mm_release(cma_flag,
- nrds_dev.nrds_pages,
- NR_DS_PAGE_NUM,
- nrds_dev.nrds_addr);
- #endif
+ dim_mm_release_api(cma_flag,
+ nrds_dev.nrds_pages,
+ NR_DS_PAGE_NUM,
+ nrds_dev.nrds_addr);
nrds_dev.nrds_addr = 0;
nrds_dev.nrds_pages = NULL;
} else
- pr_info("DI: no release nr ds mem.\n");
+ PR_INF("no release nr ds mem.\n");
}
for (i = 0; i < NR_DS_BUF_NUM; i++)
nrds_dev.buf[i] = 0;
PR_ERR("%s alloc nrds canvas error.\n", __func__);
return;
}
- pr_info("%s alloc nrds canvas %u.\n",
- __func__, nrds_dev.canvas_idx);
+ PR_INF("%s alloc nrds canvas %u.\n", __func__, nrds_dev.canvas_idx);
}
/*
{
*addr = nrds_dev.nrds_addr;
*size = NR_DS_BUF_SIZE;
- pr_info("%s addr 0x%lx, size 0x%lx.\n",
- __func__, *addr, *size);
+ PR_INF("%s addr 0x%lx, size 0x%lx.\n", __func__, *addr, *size);
}
/*