static int vh264_vf_states(struct vframe_states *states, void *);
static int vh264_event_cb(int type, void *data, void *private_data);
static void vh264_work(struct work_struct *work);
+static void vh264_timeout_work(struct work_struct *work);
static void vh264_notify_work(struct work_struct *work);
#ifdef MH264_USERDATA_ENABLE
static void user_data_ready_notify_work(struct work_struct *work);
bool new_iframe_flag;
bool ref_err_flush_dpb_flag;
unsigned int first_i_policy;
+ u32 tfn_cnt;
+ u64 tfn_ns;
};
static u32 again_threshold;
+static void timeout_process(struct vdec_h264_hw_s *hw);
static void dump_bufspec(struct vdec_h264_hw_s *hw,
const char *caller);
static void h264_reconfig(struct vdec_h264_hw_s *hw);
unsigned int dec_dpb_status = p_H264_Dpb->dec_dpb_status;
u32 debug_tag;
int ret;
+ if (++hw->tfn_cnt == 1) {
+ hw->tfn_ns = local_clock();
+ } else if (hw->tfn_cnt >= 10) {
+ u64 tenth_ns = local_clock();
+ /* Here borrow the varible debug_tag for use */
+ debug_tag = tenth_ns - hw->tfn_ns;
+ hw->tfn_cnt = 1;
+ hw->tfn_cnt = tenth_ns;
+ if (debug_tag <= 10000000) {
+ pr_err("Within 10ms 10 vh264_isr_thread_fn. Abnormal!\n");
+ timeout_process(hw);
+ return IRQ_HANDLED;
+ }
+ }
if (dec_dpb_status == H264_CONFIG_REQUEST) {
#if 1
struct vdec_s *vdec = hw_to_vdec(hw);
struct h264_dpb_stru *p_H264_Dpb = &hw->dpb;
+ /*
+ * In this very timeout point,the vh264_work arrives,
+ * let it to handle the scenario.
+ */
+ if (work_pending(&hw->work))
+ return;
+
hw->timeout_num++;
amvdec_stop();
vdec->mc_loaded = 0;
release_cur_decoding_buf(hw);
hw->dec_result = DEC_RESULT_DONE;
hw->data_flag |= ERROR_FLAG;
- vdec_schedule_work(&hw->work);
+
+ if (work_pending(&hw->work))
+ return;
+ vdec_schedule_work(&hw->timeout_work);
}
static void dump_bufspec(struct vdec_h264_hw_s *hw,
if (hw->decode_timeout_count == 0)
{
reset_process_time(hw);
- vdec_schedule_work(&hw->timeout_work);
+ timeout_process(hw);
}
} else
start_process_time(hw);
if (hw->decode_timeout_count == 0)
{
reset_process_time(hw);
- vdec_schedule_work(&hw->timeout_work);
+ timeout_process(hw);
}
}
}
return;
}
-static void timeout_process_work(struct work_struct *work)
-{
- struct vdec_h264_hw_s *hw = container_of(work,
- struct vdec_h264_hw_s, timeout_work);
-
- timeout_process(hw);
-}
-
static s32 vh264_init(struct vdec_h264_hw_s *hw)
{
int size = -1;
vh264_local_init(hw);
INIT_WORK(&hw->work, vh264_work);
INIT_WORK(&hw->notify_work, vh264_notify_work);
- INIT_WORK(&hw->timeout_work, timeout_process_work);
+ INIT_WORK(&hw->timeout_work, vh264_timeout_work);
#ifdef MH264_USERDATA_ENABLE
INIT_WORK(&hw->user_data_ready_work, user_data_ready_notify_work);
#endif
#endif
-static void vh264_work(struct work_struct *work)
+static void vh264_work_implement(struct vdec_h264_hw_s *hw,
+ struct vdec_s *vdec, int from)
{
- struct vdec_h264_hw_s *hw = container_of(work,
- struct vdec_h264_hw_s, work);
- struct vdec_s *vdec = hw_to_vdec(hw);
/* finished decoding one frame or error,
* notify vdec core to switch context
*/
vdec_set_next_sched(vdec, vdec);
#endif
+ if (from == 1) {
+ /* This is a timeout work */
+ if (work_pending(&hw->work)) {
+ /*
+ * The vh264_work arrives at the last second,
+ * give it a chance to handle the scenario.
+ */
+ return;
+ }
+ }
+
/* mark itself has all HW resource released and input released */
if (vdec->parallel_dec == 1) {
if (hw->mmu_enable == 0)
hw->vdec_cb(hw_to_vdec(hw), hw->vdec_cb_arg);
}
+
+static void vh264_work(struct work_struct *work)
+{
+ struct vdec_h264_hw_s *hw = container_of(work,
+ struct vdec_h264_hw_s, work);
+ struct vdec_s *vdec = hw_to_vdec(hw);
+
+ vh264_work_implement(hw, vdec, 0);
+}
+
+
+static void vh264_timeout_work(struct work_struct *work)
+{
+ struct vdec_h264_hw_s *hw = container_of(work,
+ struct vdec_h264_hw_s, timeout_work);
+ struct vdec_s *vdec = hw_to_vdec(hw);
+
+ if (work_pending(&hw->work))
+ return;
+
+ vh264_work_implement(hw, vdec, 1);
+}
+
static unsigned long run_ready(struct vdec_s *vdec, unsigned long mask)
{
bool ret = 0;
#define DEC_RESULT_FORCE_EXIT 10
static void vh265_work(struct work_struct *work);
+static void vh265_timeout_work(struct work_struct *work);
static void vh265_notify_work(struct work_struct *work);
#endif
struct vframe_chunk_s *chunk;
int dec_result;
struct work_struct work;
+ struct work_struct timeout_work;
struct work_struct notify_work;
struct work_struct set_clk_work;
/* timeout handle */
*/
INIT_WORK(&hevc->work, vh265_work);
+ INIT_WORK(&hevc->timeout_work, vh265_timeout_work);
hevc->fw = fw;
static void timeout_process(struct hevc_state_s *hevc)
{
+ /*
+ * In this very timeout point,the vh265_work arrives,
+ * let it to handle the scenario.
+ */
+ if (work_pending(&hevc->work))
+ return;
+
hevc->timeout_num++;
amhevc_stop();
read_decode_info(hevc);
hevc->decoding_pic = NULL;
hevc->dec_result = DEC_RESULT_DONE;
reset_process_time(hevc);
- vdec_schedule_work(&hevc->work);
+
+ if (work_pending(&hevc->work))
+ return;
+ vdec_schedule_work(&hevc->timeout_work);
}
#ifdef CONSTRAIN_MAX_BUF_NUM
cancel_work_sync(&hevc->notify_work);
cancel_work_sync(&hevc->set_clk_work);
cancel_work_sync(&hevc->work);
+ cancel_work_sync(&hevc->timeout_work);
uninit_mmu_buffers(hevc);
vfree(hevc->fw);
return;
}
-static void vh265_work(struct work_struct *work)
+static void vh265_work_implement(struct hevc_state_s *hevc,
+ struct vdec_s *vdec,int from)
{
- struct hevc_state_s *hevc = container_of(work,
- struct hevc_state_s, work);
- struct vdec_s *vdec = hw_to_vdec(hevc);
-
if (hevc->uninit_list) {
/*USE_BUF_BLOCK*/
uninit_pic_list(hevc);
vdec_set_next_sched(vdec, vdec);
#endif
+ if (from == 1) {
+ /* This is a timeout work */
+ if (work_pending(&hevc->work)) {
+ /*
+ * The vh265_work arrives at the last second,
+ * give it a chance to handle the scenario.
+ */
+ return;
+ //cancel_work_sync(&hevc->work);//reserved for future considraion
+ }
+ }
+
/* mark itself has all HW resource released and input released */
if (vdec->parallel_dec == 1)
vdec_core_finish_run(vdec, CORE_MASK_HEVC);
hevc->vdec_cb(hw_to_vdec(hevc), hevc->vdec_cb_arg);
}
+static void vh265_work(struct work_struct *work)
+{
+ struct hevc_state_s *hevc = container_of(work,
+ struct hevc_state_s, work);
+ struct vdec_s *vdec = hw_to_vdec(hevc);
+
+ vh265_work_implement(hevc, vdec, 0);
+}
+
+static void vh265_timeout_work(struct work_struct *work)
+{
+ struct hevc_state_s *hevc = container_of(work,
+ struct hevc_state_s, timeout_work);
+ struct vdec_s *vdec = hw_to_vdec(hevc);
+
+ if (work_pending(&hevc->work))
+ return;
+ vh265_work_implement(hevc, vdec, 1);
+}
+
+
static int vh265_hw_ctx_restore(struct hevc_state_s *hevc)
{
/* new to do ... */
s32 refs[2];
int dec_result;
struct work_struct work;
+ struct work_struct timeout_work;
struct work_struct notify_work;
void (*vdec_cb)(struct vdec_s *, void *);
void *vdec_cb_arg;
}
if (hw->cur_ud_idx >= MAX_UD_RECORDS) {
- debug_print(DECODE_ID(hw), 0,
+ debug_print(DECODE_ID(hw), PRINT_FLAG_DEC_DETAIL,
"UD Records over: %d, skip it\n", MAX_UD_RECORDS);
WRITE_VREG(AV_SCRATCH_J, 0);
+ hw->cur_ud_idx = 0;
return;
}
prepare_display_buf(hw, &hw->pics[index]);
}
-static void vmpeg12_work(struct work_struct *work)
+static void vmpeg12_work_implement(struct vdec_mpeg12_hw_s *hw,
+ struct vdec_s *vdec, int from)
{
- struct vdec_mpeg12_hw_s *hw =
- container_of(work, struct vdec_mpeg12_hw_s, work);
- struct vdec_s *vdec = hw_to_vdec(hw);
-
if (hw->dec_result != DEC_RESULT_DONE)
debug_print(DECODE_ID(hw), PRINT_FLAG_RUN_FLOW,
"%s, result=%d, status=%d\n", __func__,
amvdec_stop();
hw->stat &= ~STAT_VDEC_RUN;
}
+
+ if (from == 1) {
+ /*This is a timeout work*/
+ if (work_pending(&hw->work)) {
+ pr_err("timeout work return befor finishing.");
+ /*
+ * The vmpeg12_work arrives at the last second,
+ * give it a chance to handle the scenario.
+ */
+ return;
+ }
+ }
+
/*disable mbox interrupt */
WRITE_VREG(ASSIST_MBOX1_MASK, 0);
wait_vmmpeg12_search_done(hw);
hw->vdec_cb(vdec, hw->vdec_cb_arg);
}
+static void vmpeg12_work(struct work_struct *work)
+{
+ struct vdec_mpeg12_hw_s *hw =
+ container_of(work, struct vdec_mpeg12_hw_s, work);
+ struct vdec_s *vdec = hw_to_vdec(hw);
+
+ vmpeg12_work_implement(hw, vdec, 0);
+}
+static void vmpeg12_timeout_work(struct work_struct *work)
+{
+ struct vdec_mpeg12_hw_s *hw =
+ container_of(work, struct vdec_mpeg12_hw_s, timeout_work);
+ struct vdec_s *vdec = hw_to_vdec(hw);
+
+ if (work_pending(&hw->work)) {
+ pr_err("timeout work return befor executing.");
+ return;
+ }
+
+ vmpeg12_work_implement(hw, vdec, 1);
+}
+
static struct vframe_s *vmpeg_vf_peek(void *op_arg)
{
struct vframe_s *vf;
{
struct vdec_s *vdec = hw_to_vdec(hw);
+ if (work_pending(&hw->work)) {
+ pr_err("timeout_process return befor do anything.");
+ return;
+ }
reset_process_time(hw);
amvdec_stop();
debug_print(DECODE_ID(hw), PRINT_FLAG_ERROR,
__func__, vdec->status, READ_VREG(VLD_MEM_VIFIFO_LEVEL));
hw->dec_result = DEC_RESULT_DONE;
hw->first_i_frame_ready = 0;
- vdec_schedule_work(&hw->work);
+
+ /*
+ * In this very timeout point,the vmpeg12_work arrives,
+ * let it to handle the scenario.
+ */
+ if (work_pending(&hw->work)) {
+ pr_err("timeout_process return befor schedule.");
+ return;
+ }
+ vdec_schedule_work(&hw->timeout_work);
}
static void check_timer_func(unsigned long arg)
INIT_WORK(&hw->userdata_push_work, userdata_push_do_work);
INIT_WORK(&hw->work, vmpeg12_work);
+ INIT_WORK(&hw->timeout_work, vmpeg12_timeout_work);
INIT_WORK(&hw->notify_work, vmpeg12_notify_work);
if (NULL == hw->user_data_buffer) {
cancel_work_sync(&hw->userdata_push_work);
cancel_work_sync(&hw->notify_work);
cancel_work_sync(&hw->work);
+ cancel_work_sync(&hw->timeout_work);
if (hw->mm_blk_handle) {
decoder_bmmu_box_free(hw->mm_blk_handle);
vdec = NULL;
}
- if (vdec)
+ if (vdec) {
atomic_set(&vdec->inirq_flag, 1);
+ vdec->isr_ns = local_clock();
+ }
if (c->dev_isr) {
ret = c->dev_isr(irq, c->dev_id);
goto isr_done;
vdec = NULL;
}
- if (vdec)
+ if (vdec) {
+ u32 isr2tfn = 0;
atomic_set(&vdec->inirq_thread_flag, 1);
+ vdec->tfn_ns = local_clock();
+ isr2tfn = vdec->tfn_ns - vdec->isr_ns;
+ if (isr2tfn > 10000000)
+ pr_err("!!!!!!! %s vdec_isr to %s took %uns !!!\n",
+ vdec->vf_provider_name, __func__, isr2tfn);
+ }
if (c->dev_threaded_isr) {
ret = c->dev_threaded_isr(irq, c->dev_id);
goto thread_isr_done;
atomic_t inirq_thread_flag;
atomic_t inirq_flag;
int parallel_dec;
+ volatile u64 isr_ns;
+ volatile u64 tfn_ns;
};
/* common decoder vframe provider name to use default vfm path */