#include <linux/err.h>
#include "ubi.h"
+/* Number of physical eraseblocks reserved for atomic LEB change operation */
+#define EBA_RESERVED_PEBS 1
+
/**
* struct ltree_entry - an entry in the lock tree.
* @rb: links RB-tree nodes
* data, which has to be aligned. This function guarantees that in case of an
* unclean reboot the old contents is preserved. Returns zero in case of
* success and a negative error code in case of failure.
+ *
+ * UBI reserves one LEB for the "atomic LEB change" operation, so only one
+ * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
const void *buf, int len, int dtype)
if (!vid_hdr)
return -ENOMEM;
+ mutex_lock(&ubi->alc_mutex);
err = leb_write_lock(ubi, vol_id, lnum);
- if (err) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- return err;
- }
+ if (err)
+ goto out_mutex;
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
if (pnum < 0) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- leb_write_unlock(ubi, vol_id, lnum);
- return pnum;
+ err = pnum;
+ goto out_leb_unlock;
}
dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
if (vol->eba_tbl[lnum] >= 0) {
err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
- if (err) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- leb_write_unlock(ubi, vol_id, lnum);
- return err;
- }
+ if (err)
+ goto out_leb_unlock;
}
vol->eba_tbl[lnum] = pnum;
+
+out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
+out_mutex:
+ mutex_unlock(&ubi->alc_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
- return 0;
+ return err;
write_error:
if (err != -EIO || !ubi->bad_allowed) {
* mode just in case.
*/
ubi_ro_mode(ubi);
- leb_write_unlock(ubi, vol_id, lnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return err;
+ goto out_leb_unlock;
}
err = ubi_wl_put_peb(ubi, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
- leb_write_unlock(ubi, vol_id, lnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return err;
+ goto out_leb_unlock;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
dbg_eba("initialize EBA unit");
spin_lock_init(&ubi->ltree_lock);
+ mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
if (ubi_devices_cnt == 0) {
ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
}
+ if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
+ ubi_err("no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, EBA_RESERVED_PEBS);
+ goto out_free;
+ }
+ ubi->avail_pebs -= EBA_RESERVED_PEBS;
+ ubi->rsvd_pebs += EBA_RESERVED_PEBS;
+
dbg_eba("EBA unit is initialized");
return 0;
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
+ * @vtbl_mutex: protects on-flash volume table
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
*
- * global_sqnum: global sequence number
+ * @global_sqnum: global sequence number
* @ltree_lock: protects the lock tree and @global_sqnum
* @ltree: the lock tree
- * @vtbl_mutex: protects on-flash volume table
+ * @alc_mutex: serializes "atomic LEB change" operations
*
* @used: RB-tree of used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
unsigned long long global_sqnum;
spinlock_t ltree_lock;
struct rb_root ltree;
+ struct mutex alc_mutex;
/* Wear-leveling unit's stuff */
struct rb_root used;