{
int err;
- err = ubi_read(c->ubi, lnum, buf, offs, len);
+ err = ubi_leb_read(c->ubi, lnum, buf, offs, len, 0);
/*
* In case of %-EBADMSG print the error message only if the
* @even_ebadmsg is true.
return err;
}
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+extern void ubifs_perf_lwcount(unsigned long long usage, unsigned int len);
+extern void ubifs_perf_lrcount(unsigned long long usage, unsigned int len);
+int ubifs_leb_write_log(struct ubifs_info *c, int lnum, const void *buf, int offs,
+ int len)
+{
+ int err;
+ unsigned long long time1 = sched_clock();
+
+ err = ubifs_leb_write(c, lnum, buf, offs, len);
+ ubifs_perf_lwcount(sched_clock() - time1, len);
+
+ return err;
+}
+#endif
+
int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
int len)
{
dirt = sync_len - wbuf->used;
if (dirt)
ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+ if(wbuf->jhead == DATAHD)
+ err = ubifs_leb_write_log(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
+ else
+#endif
err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
if (err)
return err;
+ wbuf->w_count += sync_len; //MTK
spin_lock(&wbuf->lock);
wbuf->offs += sync_len;
if (aligned_len == wbuf->avail) {
dbg_io("flush jhead %s wbuf to LEB %d:%d",
dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+ if(wbuf->jhead == DATAHD)
+ err = ubifs_leb_write_log(c, wbuf->lnum, wbuf->buf,
+ wbuf->offs, wbuf->size);
+ else
+#endif
err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
wbuf->offs, wbuf->size);
if (err)
goto out;
+ wbuf->w_count += wbuf->size; //MTK
spin_lock(&wbuf->lock);
wbuf->offs += wbuf->size;
dbg_io("flush jhead %s wbuf to LEB %d:%d",
dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+ if(wbuf->jhead == DATAHD)
+ err = ubifs_leb_write_log(c, wbuf->lnum, wbuf->buf, wbuf->offs,
+ wbuf->size);
+ else
+#endif
err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
wbuf->size);
if (err)
goto out;
+ wbuf->w_count += wbuf->size; //MTK
wbuf->offs += wbuf->size;
len -= wbuf->avail;
*/
dbg_io("write %d bytes to LEB %d:%d",
wbuf->size, wbuf->lnum, wbuf->offs);
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+ if(wbuf->jhead == DATAHD)
+ err = ubifs_leb_write_log(c, wbuf->lnum, wbuf->buf, wbuf->offs,
+ wbuf->size);
+ else
+#endif
err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
wbuf->size);
if (err)
goto out;
+ wbuf->w_count += wbuf->size; //MTK
wbuf->offs += wbuf->size;
len -= wbuf->size;
n <<= c->max_write_shift;
dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
wbuf->offs);
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+ if(wbuf->jhead == DATAHD)
+ err = ubifs_leb_write_log(c, wbuf->lnum, buf + written,
+ wbuf->offs, n);
+ else
+#endif
err = ubifs_leb_write(c, wbuf->lnum, buf + written,
wbuf->offs, n);
if (err)
goto out;
+ wbuf->w_count += n; //MTK
wbuf->offs += n;
aligned_len -= n;
len -= n;
const struct ubifs_info *c = wbuf->c;
int err, rlen, overlap;
struct ubifs_ch *ch = buf;
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+ unsigned long long time1 = sched_clock();
+ int log_len = 0;
+#endif
dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
goto out;
}
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+ if(log_len > 0) {
+ ubifs_perf_lrcount(sched_clock() - time1, log_len);
+ }
+#endif
return 0;
out:
{
int err, l;
struct ubifs_ch *ch = buf;
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+ unsigned long long time1 = sched_clock();
+#endif
dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
goto out;
}
+#if defined(FEATURE_UBIFS_PERF_INDEX)
+ if(type == UBIFS_DATA_NODE) {
+ ubifs_perf_lrcount(sched_clock() - time1, len);
+ }
+#endif
return 0;
out:
wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
wbuf->delta *= 1000000000ULL;
ubifs_assert(wbuf->delta <= ULONG_MAX);
+
+ wbuf->w_count = 0; //MTK
return 0;
}