#include <linux/kernel.h>
#include <linux/slab.h>
#include "ubi.h"
+#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
+#include <linux/mtd/combo_nand.h>
+#endif
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
#ifdef CONFIG_MTD_UBI_FASTMAP
/* UBI module parameter to enable fastmap automatically on non-fastmap images */
-static bool fm_autoconvert;
+#ifdef CONFIG_MTK_NAND_UBIFS_FASTMAP_SUPPORT
+static bool fm_autoconvert = 1;
+#else
+static bool fm_autoconvert = 0;
+#endif
#endif
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
+//MTK
+static ssize_t dev_attribute_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
static struct device_attribute dev_eraseblock_size =
__ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_max_ec =
__ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
+//MTK start
+static struct device_attribute dev_lbb =
+ __ATTR(lbb, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_move_retry =
+ __ATTR(move_retry, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_ec_count =
+ __ATTR(ec_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mean_ec =
+ __ATTR(mean_ec, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_ec_sum =
+ __ATTR(ec_sum, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_min_ec =
+ __ATTR(min_ec, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_wl_count =
+ __ATTR(wl_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_wl_size =
+ __ATTR(wl_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_scrub_count =
+ __ATTR(scrub_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_scrub_size =
+ __ATTR(scrub_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_wl_th =
+ __ATTR(wl_th, 00755, dev_attribute_show, dev_attribute_store);
+static struct device_attribute dev_torture =
+ __ATTR(torture, 00755, dev_attribute_show, NULL);
+//MTK end
static struct device_attribute dev_reserved_for_bad =
__ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_bad_peb_count =
return ubi_num;
}
+/* MTK: "Store" method for files in '/<sysfs>/class/ubi/ubiX/' */
+static ssize_t dev_attribute_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ubi_device *ubi;
+ int th=0;
+
+ ubi = container_of(dev, struct ubi_device, dev);
+ ubi = ubi_get_device(ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ if (attr == &dev_wl_th)
+ {
+ sscanf(buf, "%d", &th);
+ printk("set th=%d\n", th);
+ ubi->wl_th = th;
+ }
+ return count;
+}
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
else if (attr == &dev_max_ec)
ret = sprintf(buf, "%d\n", ubi->max_ec);
+//MTK start
+ else if (attr == &dev_torture)
+ ret = sprintf(buf, "torture: %d\n", ubi->torture);
+ else if (attr == &dev_wl_th)
+ ret = sprintf(buf, "wl_th: %d\n", ubi->wl_th);
+ else if (attr == &dev_wl_count)
+ ret = sprintf(buf, "wl_count: %d\n", ubi->wl_count);
+ else if (attr == &dev_wl_size)
+ ret = sprintf(buf, "wl_size: %lld\n", ubi->wl_size);
+ else if (attr == &dev_scrub_count)
+ ret = sprintf(buf, "scrub_count: %d\n", ubi->scrub_count);
+ else if (attr == &dev_scrub_size)
+ ret = sprintf(buf, "scrub_size: %lld\n", ubi->scrub_size);
+ else if (attr == &dev_move_retry)
+ ret = sprintf(buf, "move_retry: %d\n", atomic_read(&ubi->move_retry));
+ else if (attr == &dev_lbb)
+ ret = sprintf(buf, "lbb: %d\n", atomic_read(&ubi->lbb));
+ else if (attr == &dev_ec_count)
+ ret = sprintf(buf, "ec_count: %d\n", atomic_read(&ubi->ec_count));
+ else if (attr == &dev_mean_ec)
+ ret = sprintf(buf, "mean_ec: %d\n", ubi->mean_ec);
+ else if (attr == &dev_ec_sum)
+ ret = sprintf(buf, "%lld\n", ubi->ec_sum);
+ else if (attr == &dev_min_ec) {
+ struct ubi_wl_entry *e=NULL, *efree=NULL, *eused=NULL;
+ spin_lock(&ubi->wl_lock);
+ efree = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
+ eused = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ if(efree && eused) {
+ if(efree->ec < eused->ec)
+ e = efree;
+ else
+ e = eused;
+ } else if(efree){
+ e = efree;
+ } else {
+ e = eused;
+ }
+ ret = sprintf(buf, "%d\n", e->ec);
+ spin_unlock(&ubi->wl_lock);
+ }
+//MTK end
else if (attr == &dev_reserved_for_bad)
ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
else if (attr == &dev_bad_peb_count)
err = device_create_file(&ubi->dev, &dev_max_ec);
if (err)
return err;
+//MTK start
+ err = device_create_file(&ubi->dev, &dev_lbb);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_move_retry);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_ec_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_mean_ec);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_ec_sum);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_min_ec);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_wl_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_wl_size);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_scrub_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_scrub_size);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_wl_th);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_torture);
+ if (err)
+ return err;
+//MTK end
err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
if (err)
return err;
* physical eraseblocks maximum.
*/
+#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
+ ubi->peb_size = COMBO_NAND_BLOCK_SIZE;
+ ubi->peb_count = (int)div_u64(ubi->mtd->size, ubi->peb_size);
+#else
ubi->peb_size = ubi->mtd->erasesize;
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
+#endif
ubi->flash_size = ubi->mtd->size;
if (mtd_can_have_bb(ubi->mtd)) {
ubi->nor_flash = 1;
}
+#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
+ ubi->min_io_size = COMBO_NAND_PAGE_SIZE;
+ ubi->hdrs_min_io_size = ubi->min_io_size >> ubi->mtd->subpage_sft;
+#else
ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
+#endif
/*
* Make sure minimal I/O unit is power of 2. Note, there is no
ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
+#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
+ ubi->max_write_size = COMBO_NAND_PAGE_SIZE;
+#else
ubi->max_write_size = ubi->mtd->writebufsize;
+#endif
+#ifdef CONFIG_MTK_MLC_NAND_SUPPORT
+ ubi->max_write_size = ubi->mtd->erasesize/4;
+#endif
/*
* Maximum write size has to be greater or equivalent to min. I/O
* size, and be multiple of min. I/O size.
ubi->ro_mode = 1;
}
+ ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
+ ubi->peb_size, ubi->peb_size >> 10);
+ ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
+ ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
+ if (ubi->hdrs_min_io_size != ubi->min_io_size)
+ ubi_msg("sub-page size: %d",
+ ubi->hdrs_min_io_size);
+ ubi_msg("VID header offset: %d (aligned %d)",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
+ ubi_msg("data offset: %d", ubi->leb_start);
+
/*
* Note, ideally, we have to initialize @ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
{
struct ubi_device *ubi;
int i, err, ref = 0;
+ unsigned long long attach_time = 0;
if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
return -EINVAL;
ubi->ubi_num = ubi_num;
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
+//MTK start
+ ubi->wl_th = CONFIG_MTD_UBI_WL_THRESHOLD;
+ atomic_set(&ubi->ec_count, 0);
+ atomic_set(&ubi->move_retry, 0);
+//MTK end
#ifdef CONFIG_MTD_UBI_FASTMAP
ubi->fm_pool.used = ubi->fm_pool.size = 0;
goto out_free;
err = -ENOMEM;
- ubi->peb_buf = vmalloc(ubi->peb_size);
+ ubi->peb_buf = kmalloc(ubi->peb_size, GFP_KERNEL);
if (!ubi->peb_buf)
goto out_free;
#ifdef CONFIG_MTD_UBI_FASTMAP
ubi->fm_size = ubi_calc_fm_size(ubi);
- ubi->fm_buf = vzalloc(ubi->fm_size);
+ ubi->fm_buf = kzalloc(ubi->fm_size, GFP_KERNEL);
if (!ubi->fm_buf)
goto out_free;
#endif
+ attach_time = sched_clock();
err = ubi_attach(ubi, 0);
if (err) {
ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
goto out_debugfs;
}
+ attach_time = sched_clock() - attach_time;
+ do_div(attach_time, 1000000);
ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
- vfree(ubi->vtbl);
+ kfree(ubi->vtbl);
out_free:
- vfree(ubi->peb_buf);
- vfree(ubi->fm_buf);
+ kfree(ubi->peb_buf);
+ kfree(ubi->fm_buf);
if (ref)
put_device(&ubi->dev);
else
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
- vfree(ubi->vtbl);
+ kfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
- vfree(ubi->peb_buf);
- vfree(ubi->fm_buf);
+#ifdef CONFIG_BLB
+ kfree(ubi->databuf);
+ kfree(ubi->oobbuf);
+#endif
+ kfree(ubi->peb_buf);
+ kfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;