return rs->md.level == 10;
}
+/* Return true, if raid set in @rs is level 6 */
+static bool rs_is_raid6(struct raid_set *rs)
+{
+ return rs->md.level == 6;
+}
+
/* Return true, if raid set in @rs is level 4, 5 or 6 */
static bool rs_is_raid456(struct raid_set *rs)
{
rs->md.layout = raid_type->algorithm;
rs->md.new_layout = rs->md.layout;
rs->md.delta_disks = 0;
- rs->md.recovery_cp = rs_is_raid0(rs) ? MaxSector : 0;
+ rs->md.recovery_cp = MaxSector;
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
rs->ti->error = "Only one 'nosync' argument allowed";
return -EINVAL;
}
- rs->md.recovery_cp = MaxSector;
continue;
}
if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
rs->ti->error = "Only one 'sync' argument allowed";
return -EINVAL;
}
- rs->md.recovery_cp = 0;
continue;
}
if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
struct mddev *mddev = &rs->md;
struct md_rdev *rdev;
sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
- sector_t cur_dev_sectors = rs->dev[0].rdev.sectors;
if (use_mddev) {
delta_disks = mddev->delta_disks;
mddev->array_sectors = array_sectors;
mddev->dev_sectors = dev_sectors;
- if (!rs_is_raid0(rs) && dev_sectors > cur_dev_sectors)
- mddev->recovery_cp = dev_sectors;
-
return 0;
bad:
rs->ti->error = "Target length not divisible by number of data devices";
return EINVAL;
}
+/* Setup recovery on @rs */
+static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+{
+ /* raid0 does not recover */
+ if (rs_is_raid0(rs))
+ rs->md.recovery_cp = MaxSector;
+ /*
+ * A raid6 set has to be recovered either
+ * completely or for the grown part to
+ * ensure proper parity and Q-Syndrome
+ */
+ else if (rs_is_raid6(rs))
+ rs->md.recovery_cp = dev_sectors;
+ /*
+ * Other raid set types may skip recovery
+ * depending on the 'nosync' flag.
+ */
+ else
+ rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
+ ? MaxSector : dev_sectors;
+}
+
+/* Setup recovery on @rs based on raid type, device size and 'nosync' flag */
+static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+{
+ if (!dev_sectors)
+ /* New raid set or 'sync' flag provided */
+ __rs_setup_recovery(rs, 0);
+ else if (dev_sectors == MaxSector)
+ /* Prevent recovery */
+ __rs_setup_recovery(rs, MaxSector);
+ else if (rs->dev[0].rdev.sectors < dev_sectors)
+ /* Grown raid set */
+ __rs_setup_recovery(rs, rs->dev[0].rdev.sectors);
+ else
+ __rs_setup_recovery(rs, MaxSector);
+}
+
static void do_table_event(struct work_struct *ws)
{
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
if (new_devs == rs->raid_disks) {
DMINFO("Superblocks created for new raid set");
set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
- mddev->recovery_cp = 0;
} else if (new_devs != rebuilds &&
new_devs != rs->delta_disks) {
DMERR("New device injected into existing raid set without "
int r;
struct raid_type *rt;
unsigned num_raid_params, num_raid_devs;
+ sector_t calculated_dev_sectors;
struct raid_set *rs = NULL;
const char *arg;
struct rs_layout rs_layout;
if (r)
return r;
+ calculated_dev_sectors = rs->dev[0].rdev.sectors;
+
/*
* Backup any new raid set level, layout, ...
* requested to be able to compare to superblock
if (r)
goto bad;
+ rs_setup_recovery(rs, calculated_dev_sectors);
+
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
ti->num_flush_bios = 1;
set_bit(MD_ARRAY_FIRST_USE, &rs->md.flags);
rs_set_cur(rs);
- } else
+ rs_setup_recovery(rs, MaxSector);
+ } else {
rs_set_cur(rs);
+ rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
+ 0 : calculated_dev_sectors);
+ }
/* If constructor requested it, change data and new_data offsets */
r = rs_adjust_data_offsets(rs);