smgt->alloc_100ms_up_cnt
);
{
- int average_timeus = smgt->alloc_cnt == 0 ?
- 0 : (int)(smgt->alloc_total_us/smgt->alloc_cnt);
+ int average_timeus;
+ u64 divider = smgt->alloc_total_us;
+
+ do_div(divider, smgt->alloc_cnt);
+ average_timeus = (smgt->alloc_cnt == 0 ?
+ 0 : (int)divider);
BUFPRINT("\talloc time average us:%d\n",
average_timeus);
}
return pos;
}
+static u64 cur_to_msecs(void)
+{
+ u64 cur = sched_clock();
+
+ do_div(cur, NSEC_PER_MSEC);
+ return cur;
+}
+
/* static int log_seq = 0; */
int di_print(const char *fmt, ...)
{
if (di_printk_flag & 1) {
if (di_log_flag & DI_LOG_PRECISE_TIMESTAMP)
- pr_dbg("%llums:", sched_clock()/NSEC_PER_MSEC);
+ pr_dbg("%llums:", cur_to_msecs());
va_start(args, fmt);
vprintk(fmt, args);
va_end(args);
if (mpeg2vdin_flag)
RDMA_WR_BITS(DI_PRE_CTRL, 1, 13, 1);
#endif
- di_pre_stru.irq_time[0] = sched_clock()/NSEC_PER_MSEC;
- di_pre_stru.irq_time[1] = sched_clock()/NSEC_PER_MSEC;
+ di_pre_stru.irq_time[0] = cur_to_msecs();
+ di_pre_stru.irq_time[1] = cur_to_msecs();
#ifdef CONFIG_AMLOGIC_MEDIA_RDMA
if (di_pre_rdma_enable & 0x2)
rdma_config(de_devp->rdma_handle, RDMA_TRIGGER_MANUAL);
if (flag) {
di_pre_stru.irq_time[0] =
- (sched_clock()/NSEC_PER_MSEC - di_pre_stru.irq_time[0]);
+ (cur_to_msecs() - di_pre_stru.irq_time[0]);
trace_di_pre("PRE-IRQ-0",
di_pre_stru.field_count_for_cont,
di_pre_stru.irq_time[0]);
di_post_stru.de_post_process_done = 1;
di_post_stru.post_de_busy = 0;
di_post_stru.irq_time =
- (sched_clock()/NSEC_PER_MSEC - di_post_stru.irq_time);
+ (cur_to_msecs() - di_post_stru.irq_time);
trace_di_post("POST-IRQ-1",
di_post_stru.post_wr_cnt,
di_post_stru.irq_time);
di_buf, 0, vf_p->width-1,
0, vf_p->height-1, vf_p);
di_post_stru.post_de_busy = 1;
- di_post_stru.irq_time = sched_clock()/NSEC_PER_MSEC;
+ di_post_stru.irq_time = cur_to_msecs();
} else {
di_post_stru.de_post_process_done = 1;
}
if (pre_stru_p->pre_de_busy && init_flag) {
pre_stru_p->pre_de_busy_timer_count++;
if (pre_stru_p->pre_de_busy_timer_count >= nr_done_check_cnt &&
- ((sched_clock()/NSEC_PER_MSEC - di_pre_stru.irq_time[1]) >
+ ((cur_to_msecs() - di_pre_stru.irq_time[1]) >
(10*nr_done_check_cnt))) {
if (di_dbg_mask & 4) {
dump_mif_size_state(&di_pre_stru,
pr_info("DI*****wait %d timeout 0x%x(%d ms)*****\n",
pre_stru_p->field_count_for_cont,
Rd(DI_INTR_CTRL),
- (unsigned int)(sched_clock()/
- NSEC_PER_MSEC -
+ (unsigned int)(cur_to_msecs() -
di_pre_stru.irq_time[1]));
}
}
int64_t (*out)[3], int32_t norm)
{
int i, j;
+ int64_t tmp;
for (i = 0; i < 3; i++)
- for (j = 0; j < 3; j++)
- out[i][j] = (a[i][j] * b[i][j] + (norm >> 1)) / norm;
+ for (j = 0; j < 3; j++) {
+ tmp = a[i][j] * b[i][j] + (norm >> 1);
+ div_s64(tmp, norm);
+ out[i][j] = tmp;
+ }
}
static void mtx_mul(int64_t (*a)[3], int64_t *b, int64_t *out, int32_t norm)
{
int j, k;
+ int64_t tmp;
for (j = 0; j < 3; j++) {
out[j] = 0;
for (k = 0; k < 3; k++)
out[j] += a[k][j] * b[k];
- out[j] = (out[j] + (norm >> 1)) / norm;
+ tmp = out[j] + (norm >> 1);
+ div_s64(tmp, norm);
+ out[j] = tmp;
}
}
int64_t (*out)[3], int32_t norm)
{
int i, j, k;
+ int64_t tmp;
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++) {
out[i][j] = 0;
for (k = 0; k < 3; k++)
out[i][j] += a[k][j] * b[i][k];
- out[i][j] = (out[i][j] + (norm >> 1)) / norm;
+ tmp = out[i][j] + (norm >> 1);
+ div_s64(tmp, norm);
+ out[i][j] = tmp;
}
}
{
int i, j;
int64_t determinant = 0;
+ int64_t tmp;
for (i = 0; i < 3; i++)
determinant +=
out[j][i] -= (in[(i + 1) % 3][(j + 2) % 3]
* in[(i + 2) % 3][(j + 1) % 3]);
out[j][i] = (out[j][i] * norm) << (obl - 1);
- out[j][i] =
- (out[j][i] + (determinant >> 1)) / determinant;
+ tmp = out[j][i] + (determinant >> 1);
+ div_s64(tmp, determinant);
+ out[j][i] = tmp;
}
}
}
int64_t C[3];
int64_t D[3][3];
int64_t E[3][3];
+ int64_t tmp;
for (i = 0; i < 4; i++)
z[i] = norm - prmy[i][0] - prmy[i][1];
A[i][j] = prmy[i][j];
A[i][2] = z[i];
}
- B[0] = (norm * prmy[3][0] * 2 / prmy[3][1] + 1) >> 1;
+ tmp = norm * prmy[3][0] * 2;
+ div_s64(tmp, prmy[3][1]);
+ B[0] = (tmp + 1) >> 1;
B[1] = norm;
- B[2] = (norm * z[3] * 2 / prmy[3][1] + 1) >> 1;
+ tmp = norm * z[3] * 2;
+ div_s64(tmp, prmy[3][1]);
+ B[2] = (tmp + 1) >> 1;
inverse_3x3(A, D, norm, obl);
mtx_mul(D, B, C, norm);
for (i = 0; i < 3; i++)
void vpp_get_vframe_hist_info(struct vframe_s *vf)
{
unsigned int hist_height, hist_width;
+ u64 divid;
hist_height = READ_VPP_REG_BITS(VPP_IN_H_V_SIZE, 0, 13);
hist_width = READ_VPP_REG_BITS(VPP_IN_H_V_SIZE, 16, 13);
VI_HIST_ON_BIN_63_BIT, VI_HIST_ON_BIN_63_WID);
if (debug_game_mode_1 &&
(vpp_luma_max != vf->prop.hist.vpp_luma_max)) {
- vf->ready_clock_hist[1] = sched_clock();
+ divid = vf->ready_clock_hist[1] = sched_clock();
+ do_div(divid, 1000);
pr_info("vpp output done %lld us. luma_max(0x%x-->0x%x)\n",
- vf->ready_clock_hist[1]/1000,
- vpp_luma_max, vf->prop.hist.vpp_luma_max);
+ divid, vpp_luma_max, vf->prop.hist.vpp_luma_max);
vpp_luma_max = vf->prop.hist.vpp_luma_max;
}
}
struct vivi_dev *dev = video_drvdata(file);
int ret = 0;
u64 pts_us64 = 0;
+ u64 pts_tmp;
struct vframe_s *next_vf;
if (vfq_level(&dev->q_ready) > AMLVIDEO_POOL_SIZE - 1)
dev->first_frame = 1;
pts_us64 = 0;
} else {
+ pts_tmp = DUR2PTS(dev->vf->duration) * 100;
+ do_div(pts_tmp, 9);
pts_us64 = dev->last_pts_us64
- + (DUR2PTS(dev->vf->duration))*100/9;
- dev->vf->pts = pts_us64*9/100;
+ + pts_tmp;
+ pts_tmp = pts_us64*9;
+ do_div(pts_tmp, 100);
+ dev->vf->pts = pts_tmp;
/*AMLVIDEO_WARN("pts= %d, dev->vf->duration= %d\n",*/
/*dev->vf->pts, (DUR2PTS(dev->vf->duration)));*/
}
return req.dv_enhance_exist;
}
u32 property_changed_true;
+
+static u64 func_div(u64 number, u32 divid)
+{
+ u64 tmp = number;
+
+ do_div(tmp, divid);
+ return tmp;
+}
+
static void vsync_toggle_frame(struct vframe_s *vf)
{
u32 first_picture = 0;
unsigned long flags = 0;
bool vf_with_el = false;
bool force_toggle = false;
+ long long *clk_array;
if (vf == NULL)
return;
#endif
if (debug_flag & DEBUG_FLAG_LATENCY) {
vf->ready_clock[3] = sched_clock();
- pr_info("video toggle latency %lld ms video get latency %lld ms vdin put latency %lld ms. first %lld ms.\n",
- vf->ready_clock[3]/1000,
- vf->ready_clock[2]/1000,
- vf->ready_clock[1]/1000,
- vf->ready_clock[0]/1000);
+ pr_info("video toggle latency %lld ms,"
+ "video get latency %lld ms,"
+ "vdin put latency %lld ms,"
+ "first %lld ms.\n",
+ func_div(vf->ready_clock[3], 1000),
+ func_div(vf->ready_clock[2], 1000),
+ func_div(vf->ready_clock[1], 1000),
+ func_div(vf->ready_clock[0], 1000));
cur_dispbuf->ready_clock[4] = sched_clock();
- pr_info("video put latency %lld ms video toggle latency %lld ms video get latency %lld ms vdin put latency %lld ms. first %lld ms.\n",
- cur_dispbuf->ready_clock[4]/1000,
- cur_dispbuf->ready_clock[3]/1000,
- cur_dispbuf->ready_clock[2]/1000,
- cur_dispbuf->ready_clock[1]/1000,
- cur_dispbuf->ready_clock[0]/1000);
+ clk_array = cur_dispbuf->ready_clock;
+ pr_info("video put latency %lld ms,"
+ "video toggle latency %lld ms,"
+ "video get latency %lld ms,"
+ "vdin put latency %lld ms,"
+ "first %lld ms.\n",
+ func_div(*(clk_array + 4), 1000),
+ func_div(*(clk_array + 3), 1000),
+ func_div(*(clk_array + 2), 1000),
+ func_div(*(clk_array + 1), 1000),
+ func_div(*clk_array, 1000));
}
}
if (debug_flag & DEBUG_FLAG_LATENCY) {
vf->ready_clock[2] = sched_clock();
pr_info("video get latency %lld ms vdin put latency %lld ms. first %lld ms.\n",
- vf->ready_clock[2]/1000,
- vf->ready_clock[1]/1000,
- vf->ready_clock[0]/1000);
+ func_div(vf->ready_clock[2], 1000),
+ func_div(vf->ready_clock[1], 1000),
+ func_div(vf->ready_clock[0], 1000));
}
if (video_vf_dirty_put(vf))
break;
struct vdin_dev_s *devp)
{
unsigned int offset = devp->addr_offset;
+ u64 divid;
struct vframe_bbar_s bbar = {0};
#ifdef CONFIG_AML_LOCAL_DIMMING
/*int i;*/
vf->prop.meas.vs_cycle = devp->cycle;
if ((vdin_ctl_dbg & (1 << 8)) &&
(vdin_luma_max != vf->prop.hist.luma_max)) {
- vf->ready_clock_hist[0] = sched_clock();
+ divid = vf->ready_clock_hist[0] = sched_clock();
+ do_div(divid, 1000);
pr_info("vdin write done %lld us. lum_max(0x%x-->0x%x)\n",
- vf->ready_clock_hist[0]/1000,
- vdin_luma_max, vf->prop.hist.luma_max);
+ divid, vdin_luma_max, vf->prop.hist.luma_max);
vdin_luma_max = vf->prop.hist.luma_max;
}
#if 0
devp->parm.histgram[i] = vf->prop.hist.gamma[i];
}
+static u64 func_div(u64 cur, u32 divid)
+{
+ do_div(cur, divid);
+ return cur;
+}
+
/*
*VDIN_FLAG_RDMA_ENABLE=1
* provider_vf_put(devp->last_wr_vfe, devp->vfp);
unsigned int offset = 0, vf_drop_cnt = 0;
enum tvin_trans_fmt trans_fmt;
struct tvin_sig_property_s *prop, *pre_prop;
+ long long *clk_array;
/* debug interrupt interval time
*
if (time_en) {
devp->last_wr_vfe->vf.ready_clock[1] =
sched_clock();
- pr_info("vdin put latency %lld us. first %lld us.\n",
- devp->last_wr_vfe->vf.ready_clock[1]/1000,
- devp->last_wr_vfe->vf.ready_clock[0]/1000);
+ clk_array = devp->last_wr_vfe->vf.ready_clock;
+ pr_info("vdin put latency %lld us, first %lld us.\n",
+ func_div(*(clk_array + 1), 1000),
+ func_div(*clk_array, 1000));
}
} else {
devp->vdin_irq_flag = 15;
provider_vf_put(curr_wr_vfe, devp->vfp);
if (vdin_dbg_en) {
curr_wr_vfe->vf.ready_clock[1] = sched_clock();
- pr_info("vdin put latency %lld us. first %lld us.\n",
- curr_wr_vfe->vf.ready_clock[1]/1000,
- curr_wr_vfe->vf.ready_clock[0]/1000);
+ clk_array = curr_wr_vfe->vf.ready_clock;
+ pr_info("vdin put latency %lld us, first %lld us.\n",
+ func_div(*(clk_array + 1), 1000),
+ func_div(*clk_array, 1000));
}
} else {
devp->vdin_irq_flag = 15;
struct _ext_info *p_ext_info = NULL;
struct nand_setup *p_nand_setup = NULL;
int each_boot_pages, boot_num;
- loff_t ofs;
+ uint64_t ofs, tmp;
+ uint32_t remainder;
u8 type = aml_chip->new_nand_info.type;
read_page++;
READ_BAD_BLOCK:
ofs = (read_page << chip->page_shift);
- if (!(ofs % mtd->erasesize)) {
+ tmp = ofs;
+ div_u64_rem(tmp, mtd->erasesize, &remainder);
+ if (!remainder) {
if (chip->block_bad(mtd, ofs)) {
read_page +=
1 << (chip->phys_erase_shift-chip->page_shift);
uint32_t priv_slc_page;
int en_slc = 0, each_boot_pages, boot_num;
u8 type = aml_chip->new_nand_info.type;
- loff_t ofs;
+ uint64_t ofs, tmp;
+ uint32_t remainder;
new_nand_info = &aml_chip->new_nand_info;
slc_program_info = &new_nand_info->slc_program_info;
WRITE_BAD_BLOCK:
ofs = (write_page << chip->page_shift);
- if (!(ofs % mtd->erasesize)) {
+ tmp = ofs;
+ div_u64_rem(tmp, mtd->erasesize, &remainder);
+ if (!remainder) {
if (chip->block_bad(mtd, ofs)) {
write_page +=
1 << (chip->phys_erase_shift-chip->page_shift);
struct oobinfo_t *oobinfo;
struct free_node_t *free_node, *tmp_node = NULL;
unsigned char oob_buf[sizeof(struct oobinfo_t)];
- loff_t offset;
+ uint64_t offset;
unsigned char *data_buf, good_addr[256] = {0};
int start_blk, max_scan_blk, i, k, scan_status = 0, env_status = 0;
int phys_erase_shift, pages_per_blk, page_num;
int error = 0, ret = 0;
+ uint32_t remainder;
data_buf = aml_chip->rsv_data_buf;
oobinfo = (struct oobinfo_t *)oob_buf;
pr_info("blk check good but read failed: %llx, %d\n",
(uint64_t)offset, error);
offset += nandrsv_info->size;
- if ((scan_status++ > 6) || (!(offset % mtd->erasesize))) {
+ div_u64_rem(offset, mtd->erasesize, &remainder);
+ if ((scan_status++ > 6) || (!remainder)) {
pr_info("ECC error, scan ONE block exit\n");
scan_status = 0;
continue;
static u32 timere_read(void)
{
u32 time = 0;
- unsigned long te = 0, temp = 0;
+ unsigned long long te = 0, temp = 0;
/*timeE high+low, first read low, second read high*/
te = readl(timere_low_vaddr);
temp = readl(timere_high_vaddr);
te += (temp << 32);
- time = (u32)(te / 1000000);
+ do_div(te, 1000000);
+ time = (u32)te;
pr_debug("----------time_e: %us\n", time);
return time;
}