* idle IO detection.
*
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
+ * or /sys/block/mdX/md/sync_speed_{min,max}
*/
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
+static inline int speed_min(mddev_t *mddev)
+{
+ return mddev->sync_speed_min ?
+ mddev->sync_speed_min : sysctl_speed_limit_min;
+}
+
+static inline int speed_max(mddev_t *mddev)
+{
+ return mddev->sync_speed_max ?
+ mddev->sync_speed_max : sysctl_speed_limit_max;
+}
static struct ctl_table_header *raid_table_header;
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
-void md_new_event(mddev_t *mddev)
+static void md_new_event(mddev_t *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
return NULL;
}
-static struct mdk_personality *find_pers(int level)
+static struct mdk_personality *find_pers(int level, char *clevel)
{
struct mdk_personality *pers;
- list_for_each_entry(pers, &pers_list, list)
- if (pers->level == level)
+ list_for_each_entry(pers, &pers_list, list) {
+ if (level != LEVEL_NONE && pers->level == level)
return pers;
+ if (strcmp(pers->name, clevel)==0)
+ return pers;
+ }
return NULL;
}
}
rdev->size = calc_dev_size(rdev, sb->chunk_size);
+ if (rdev->size < sb->size && sb->level > 1)
+ /* "this cannot possibly happen" ... */
+ ret = -EINVAL;
+
abort:
return ret;
}
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
mddev->level = sb->level;
+ mddev->clevel[0] = 0;
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
mddev->size = sb->size;
}
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
+ atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
rdev->size = le64_to_cpu(sb->data_size)/2;
if (le32_to_cpu(sb->chunksize))
rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
+
+ if (le32_to_cpu(sb->size) > rdev->size*2)
+ return -EINVAL;
return 0;
}
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level);
+ mddev->clevel[0] = 0;
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->size = le64_to_cpu(sb->size)/2;
else
sb->resync_offset = cpu_to_le64(0);
+ sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
+
+ sb->raid_disks = cpu_to_le32(mddev->raid_disks);
+ sb->size = cpu_to_le64(mddev->size);
+
if (mddev->bitmap && mddev->bitmap_file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
mdk_rdev_t *same_pdev;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct kobject *ko;
+ char *s;
if (rdev->mddev) {
MD_BUG();
return -EINVAL;
}
+ /* make sure rdev->size exceeds mddev->size */
+ if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
+ if (mddev->pers)
+ /* Cannot change size, so fail */
+ return -ENOSPC;
+ else
+ mddev->size = rdev->size;
+ }
same_pdev = match_dev_unit(mddev, rdev);
if (same_pdev)
printk(KERN_WARNING
bdevname(rdev->bdev,b);
if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
return -ENOMEM;
+ while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
+ *s = '!';
list_add(&rdev->same_set, &mddev->disks);
rdev->mddev = mddev;
}
static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
+static ssize_t
+errors_show(mdk_rdev_t *rdev, char *page)
+{
+ return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
+}
+
+static ssize_t
+errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+ if (*buf && (*e == 0 || *e == '\n')) {
+ atomic_set(&rdev->corrected_errors, n);
+ return len;
+ }
+ return -EINVAL;
+}
+static struct rdev_sysfs_entry rdev_errors =
+__ATTR(errors, 0644, errors_show, errors_store);
+
+static ssize_t
+slot_show(mdk_rdev_t *rdev, char *page)
+{
+ if (rdev->raid_disk < 0)
+ return sprintf(page, "none\n");
+ else
+ return sprintf(page, "%d\n", rdev->raid_disk);
+}
+
+static ssize_t
+slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ char *e;
+ int slot = simple_strtoul(buf, &e, 10);
+ if (strncmp(buf, "none", 4)==0)
+ slot = -1;
+ else if (e==buf || (*e && *e!= '\n'))
+ return -EINVAL;
+ if (rdev->mddev->pers)
+ /* Cannot set slot in active array (yet) */
+ return -EBUSY;
+ if (slot >= rdev->mddev->raid_disks)
+ return -ENOSPC;
+ rdev->raid_disk = slot;
+ /* assume it is working */
+ rdev->flags = 0;
+ set_bit(In_sync, &rdev->flags);
+ return len;
+}
+
+
+static struct rdev_sysfs_entry rdev_slot =
+__ATTR(slot, 0644, slot_show, slot_store);
+
+static ssize_t
+offset_show(mdk_rdev_t *rdev, char *page)
+{
+ return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
+}
+
+static ssize_t
+offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long long offset = simple_strtoull(buf, &e, 10);
+ if (e==buf || (*e && *e != '\n'))
+ return -EINVAL;
+ if (rdev->mddev->pers)
+ return -EBUSY;
+ rdev->data_offset = offset;
+ return len;
+}
+
+static struct rdev_sysfs_entry rdev_offset =
+__ATTR(offset, 0644, offset_show, offset_store);
+
+static ssize_t
+rdev_size_show(mdk_rdev_t *rdev, char *page)
+{
+ return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
+}
+
+static ssize_t
+rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long long size = simple_strtoull(buf, &e, 10);
+ if (e==buf || (*e && *e != '\n'))
+ return -EINVAL;
+ if (rdev->mddev->pers)
+ return -EBUSY;
+ rdev->size = size;
+ if (size < rdev->mddev->size || rdev->mddev->size == 0)
+ rdev->mddev->size = size;
+ return len;
+}
+
+static struct rdev_sysfs_entry rdev_size =
+__ATTR(size, 0644, rdev_size_show, rdev_size_store);
+
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_super.attr,
+ &rdev_errors.attr,
+ &rdev_slot.attr,
+ &rdev_offset.attr,
+ &rdev_size.attr,
NULL,
};
static ssize_t
rdev->data_offset = 0;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
+ atomic_set(&rdev->corrected_errors, 0);
size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
if (!size) {
level_show(mddev_t *mddev, char *page)
{
struct mdk_personality *p = mddev->pers;
- if (p == NULL && mddev->raid_disks == 0)
- return 0;
- if (mddev->level >= 0)
- return sprintf(page, "raid%d\n", mddev->level);
- else
+ if (p)
return sprintf(page, "%s\n", p->name);
+ else if (mddev->clevel[0])
+ return sprintf(page, "%s\n", mddev->clevel);
+ else if (mddev->level != LEVEL_NONE)
+ return sprintf(page, "%d\n", mddev->level);
+ else
+ return 0;
+}
+
+static ssize_t
+level_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ int rv = len;
+ if (mddev->pers)
+ return -EBUSY;
+ if (len == 0)
+ return 0;
+ if (len >= sizeof(mddev->clevel))
+ return -ENOSPC;
+ strncpy(mddev->clevel, buf, len);
+ if (mddev->clevel[len-1] == '\n')
+ len--;
+ mddev->clevel[len] = 0;
+ mddev->level = LEVEL_NONE;
+ return rv;
}
-static struct md_sysfs_entry md_level = __ATTR_RO(level);
+static struct md_sysfs_entry md_level =
+__ATTR(level, 0644, level_show, level_store);
static ssize_t
raid_disks_show(mddev_t *mddev, char *page)
return sprintf(page, "%d\n", mddev->raid_disks);
}
-static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks);
+static int update_raid_disks(mddev_t *mddev, int raid_disks);
+
+static ssize_t
+raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* can only set raid_disks if array is not yet active */
+ char *e;
+ int rv = 0;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+
+ if (!*buf || (*e && *e != '\n'))
+ return -EINVAL;
+
+ if (mddev->pers)
+ rv = update_raid_disks(mddev, n);
+ else
+ mddev->raid_disks = n;
+ return rv ? rv : len;
+}
+static struct md_sysfs_entry md_raid_disks =
+__ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store);
+
+static ssize_t
+chunk_size_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%d\n", mddev->chunk_size);
+}
+
+static ssize_t
+chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* can only set chunk_size if array is not yet active */
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+
+ if (mddev->pers)
+ return -EBUSY;
+ if (!*buf || (*e && *e != '\n'))
+ return -EINVAL;
+
+ mddev->chunk_size = n;
+ return len;
+}
+static struct md_sysfs_entry md_chunk_size =
+__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
+
+static ssize_t
+null_show(mddev_t *mddev, char *page)
+{
+ return -EINVAL;
+}
+
+static ssize_t
+new_dev_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* buf must be %d:%d\n? giving major and minor numbers */
+ /* The new device is added to the array.
+ * If the array has a persistent superblock, we read the
+ * superblock to initialise info and check validity.
+ * Otherwise, only checking done is that in bind_rdev_to_array,
+ * which mainly checks size.
+ */
+ char *e;
+ int major = simple_strtoul(buf, &e, 10);
+ int minor;
+ dev_t dev;
+ mdk_rdev_t *rdev;
+ int err;
+
+ if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
+ return -EINVAL;
+ minor = simple_strtoul(e+1, &e, 10);
+ if (*e && *e != '\n')
+ return -EINVAL;
+ dev = MKDEV(major, minor);
+ if (major != MAJOR(dev) ||
+ minor != MINOR(dev))
+ return -EOVERFLOW;
+
+
+ if (mddev->persistent) {
+ rdev = md_import_device(dev, mddev->major_version,
+ mddev->minor_version);
+ if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
+ mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
+ mdk_rdev_t, same_set);
+ err = super_types[mddev->major_version]
+ .load_super(rdev, rdev0, mddev->minor_version);
+ if (err < 0)
+ goto out;
+ }
+ } else
+ rdev = md_import_device(dev, -1, -1);
+
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+ err = bind_rdev_to_array(rdev, mddev);
+ out:
+ if (err)
+ export_rdev(rdev);
+ return err ? err : len;
+}
+
+static struct md_sysfs_entry md_new_device =
+__ATTR(new_dev, 0200, null_show, new_dev_store);
+
+static ssize_t
+size_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
+}
+
+static int update_size(mddev_t *mddev, unsigned long size);
+
+static ssize_t
+size_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* If array is inactive, we can reduce the component size, but
+ * not increase it (except from 0).
+ * If array is active, we can try an on-line resize
+ */
+ char *e;
+ int err = 0;
+ unsigned long long size = simple_strtoull(buf, &e, 10);
+ if (!*buf || *buf == '\n' ||
+ (*e && *e != '\n'))
+ return -EINVAL;
+
+ if (mddev->pers) {
+ err = update_size(mddev, size);
+ md_update_sb(mddev);
+ } else {
+ if (mddev->size == 0 ||
+ mddev->size > size)
+ mddev->size = size;
+ else
+ err = -ENOSPC;
+ }
+ return err ? err : len;
+}
+
+static struct md_sysfs_entry md_size =
+__ATTR(component_size, 0644, size_show, size_store);
+
+
+/* Metdata version.
+ * This is either 'none' for arrays with externally managed metadata,
+ * or N.M for internally known formats
+ */
+static ssize_t
+metadata_show(mddev_t *mddev, char *page)
+{
+ if (mddev->persistent)
+ return sprintf(page, "%d.%d\n",
+ mddev->major_version, mddev->minor_version);
+ else
+ return sprintf(page, "none\n");
+}
+
+static ssize_t
+metadata_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ int major, minor;
+ char *e;
+ if (!list_empty(&mddev->disks))
+ return -EBUSY;
+
+ if (cmd_match(buf, "none")) {
+ mddev->persistent = 0;
+ mddev->major_version = 0;
+ mddev->minor_version = 90;
+ return len;
+ }
+ major = simple_strtoul(buf, &e, 10);
+ if (e==buf || *e != '.')
+ return -EINVAL;
+ buf = e+1;
+ minor = simple_strtoul(buf, &e, 10);
+ if (e==buf || *e != '\n')
+ return -EINVAL;
+ if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
+ super_types[major].name == NULL)
+ return -ENOENT;
+ mddev->major_version = major;
+ mddev->minor_version = minor;
+ mddev->persistent = 1;
+ return len;
+}
+
+static struct md_sysfs_entry md_metadata =
+__ATTR(metadata_version, 0644, metadata_show, metadata_store);
static ssize_t
action_show(mddev_t *mddev, char *page)
mddev->sync_thread = NULL;
mddev->recovery = 0;
}
- return len;
- }
-
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
- if (cmd_match(page, "resync") || cmd_match(page, "recover"))
+ else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
else {
if (cmd_match(page, "check"))
return -EINVAL;
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return len;
}
static struct md_sysfs_entry
md_mismatches = __ATTR_RO(mismatch_cnt);
+static ssize_t
+sync_min_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%d (%s)\n", speed_min(mddev),
+ mddev->sync_speed_min ? "local": "system");
+}
+
+static ssize_t
+sync_min_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ int min;
+ char *e;
+ if (strncmp(buf, "system", 6)==0) {
+ mddev->sync_speed_min = 0;
+ return len;
+ }
+ min = simple_strtoul(buf, &e, 10);
+ if (buf == e || (*e && *e != '\n') || min <= 0)
+ return -EINVAL;
+ mddev->sync_speed_min = min;
+ return len;
+}
+
+static struct md_sysfs_entry md_sync_min =
+__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
+
+static ssize_t
+sync_max_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%d (%s)\n", speed_max(mddev),
+ mddev->sync_speed_max ? "local": "system");
+}
+
+static ssize_t
+sync_max_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ int max;
+ char *e;
+ if (strncmp(buf, "system", 6)==0) {
+ mddev->sync_speed_max = 0;
+ return len;
+ }
+ max = simple_strtoul(buf, &e, 10);
+ if (buf == e || (*e && *e != '\n') || max <= 0)
+ return -EINVAL;
+ mddev->sync_speed_max = max;
+ return len;
+}
+
+static struct md_sysfs_entry md_sync_max =
+__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
+
+
+static ssize_t
+sync_speed_show(mddev_t *mddev, char *page)
+{
+ unsigned long resync, dt, db;
+ resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
+ dt = ((jiffies - mddev->resync_mark) / HZ);
+ if (!dt) dt++;
+ db = resync - (mddev->resync_mark_cnt);
+ return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
+}
+
+static struct md_sysfs_entry
+md_sync_speed = __ATTR_RO(sync_speed);
+
+static ssize_t
+sync_completed_show(mddev_t *mddev, char *page)
+{
+ unsigned long max_blocks, resync;
+
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ max_blocks = mddev->resync_max_sectors;
+ else
+ max_blocks = mddev->size << 1;
+
+ resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
+ return sprintf(page, "%lu / %lu\n", resync, max_blocks);
+}
+
+static struct md_sysfs_entry
+md_sync_completed = __ATTR_RO(sync_completed);
+
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_raid_disks.attr,
+ &md_chunk_size.attr,
+ &md_size.attr,
+ &md_metadata.attr,
+ &md_new_device.attr,
NULL,
};
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_mismatches.attr,
+ &md_sync_min.attr,
+ &md_sync_max.attr,
+ &md_sync_speed.attr,
+ &md_sync_completed.attr,
NULL,
};
static struct attribute_group md_redundancy_group = {
}
#ifdef CONFIG_KMOD
- request_module("md-level-%d", mddev->level);
+ if (mddev->level != LEVEL_NONE)
+ request_module("md-level-%d", mddev->level);
+ else if (mddev->clevel[0])
+ request_module("md-%s", mddev->clevel);
#endif
/*
return -ENOMEM;
spin_lock(&pers_lock);
- pers = find_pers(mddev->level);
+ pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
- mddev->level);
+ if (mddev->level != LEVEL_NONE)
+ printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
+ mddev->level);
+ else
+ printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
+ mddev->clevel);
return -EINVAL;
}
mddev->pers = pers;
spin_unlock(&pers_lock);
+ mddev->level = pers->level;
+ strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->recovery = 0;
mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
set_disk_ro(disk, 1);
}
- bitmap_destroy(mddev);
- if (mddev->bitmap_file) {
- atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
- fput(mddev->bitmap_file);
- mddev->bitmap_file = NULL;
- }
- mddev->bitmap_offset = 0;
-
/*
* Free resources if final stop
*/
struct gendisk *disk;
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
+ bitmap_destroy(mddev);
+ if (mddev->bitmap_file) {
+ atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
+ fput(mddev->bitmap_file);
+ mddev->bitmap_file = NULL;
+ }
+ mddev->bitmap_offset = 0;
+
ITERATE_RDEV(mddev,rdev,tmp)
if (rdev->raid_disk >= 0) {
char nm[20];
info.ctime = mddev->ctime;
info.level = mddev->level;
info.size = mddev->size;
+ if (info.size != mddev->size) /* overflow */
+ info.size = -1;
info.nr_disks = nr;
info.raid_disks = mddev->raid_disks;
info.md_minor = mddev->md_minor;
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
- err = bind_rdev_to_array(rdev, mddev);
- if (err) {
- export_rdev(rdev);
- return err;
- }
-
if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n");
rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
rdev->size = calc_dev_size(rdev, mddev->chunk_size);
- if (!mddev->size || (mddev->size > rdev->size))
- mddev->size = rdev->size;
+ err = bind_rdev_to_array(rdev, mddev);
+ if (err) {
+ export_rdev(rdev);
+ return err;
+ }
}
return 0;
size = calc_dev_size(rdev, mddev->chunk_size);
rdev->size = size;
- if (size < mddev->size) {
- printk(KERN_WARNING
- "%s: disk size %llu blocks < array size %llu\n",
- mdname(mddev), (unsigned long long)size,
- (unsigned long long)mddev->size);
- err = -ENOSPC;
- goto abort_export;
- }
-
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
}
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
- bind_rdev_to_array(rdev, mddev);
+ err = bind_rdev_to_array(rdev, mddev);
+ if (err)
+ goto abort_export;
/*
* The rest should better be atomic, we can have disk failures
mddev->ctime = get_seconds();
mddev->level = info->level;
+ mddev->clevel[0] = 0;
mddev->size = info->size;
mddev->raid_disks = info->raid_disks;
/* don't set md_minor, it is determined by which /dev/md* was
return 0;
}
+static int update_size(mddev_t *mddev, unsigned long size)
+{
+ mdk_rdev_t * rdev;
+ int rv;
+ struct list_head *tmp;
+
+ if (mddev->pers->resize == NULL)
+ return -EINVAL;
+ /* The "size" is the amount of each device that is used.
+ * This can only make sense for arrays with redundancy.
+ * linear and raid0 always use whatever space is available
+ * We can only consider changing the size if no resync
+ * or reconstruction is happening, and if the new size
+ * is acceptable. It must fit before the sb_offset or,
+ * if that is <data_offset, it must fit before the
+ * size of each device.
+ * If size is zero, we find the largest size that fits.
+ */
+ if (mddev->sync_thread)
+ return -EBUSY;
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ sector_t avail;
+ int fit = (size == 0);
+ if (rdev->sb_offset > rdev->data_offset)
+ avail = (rdev->sb_offset*2) - rdev->data_offset;
+ else
+ avail = get_capacity(rdev->bdev->bd_disk)
+ - rdev->data_offset;
+ if (fit && (size == 0 || size > avail/2))
+ size = avail/2;
+ if (avail < ((sector_t)size << 1))
+ return -ENOSPC;
+ }
+ rv = mddev->pers->resize(mddev, (sector_t)size *2);
+ if (!rv) {
+ struct block_device *bdev;
+
+ bdev = bdget_disk(mddev->gendisk, 0);
+ if (bdev) {
+ mutex_lock(&bdev->bd_inode->i_mutex);
+ i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
+ mutex_unlock(&bdev->bd_inode->i_mutex);
+ bdput(bdev);
+ }
+ }
+ return rv;
+}
+
+static int update_raid_disks(mddev_t *mddev, int raid_disks)
+{
+ int rv;
+ /* change the number of raid disks */
+ if (mddev->pers->reshape == NULL)
+ return -EINVAL;
+ if (raid_disks <= 0 ||
+ raid_disks >= mddev->max_disks)
+ return -EINVAL;
+ if (mddev->sync_thread)
+ return -EBUSY;
+ rv = mddev->pers->reshape(mddev, raid_disks);
+ return rv;
+}
+
+
/*
* update_array_info is used to change the configuration of an
* on-line array.
)
return -EINVAL;
/* Check there is only one change */
- if (mddev->size != info->size) cnt++;
+ if (info->size >= 0 && mddev->size != info->size) cnt++;
if (mddev->raid_disks != info->raid_disks) cnt++;
if (mddev->layout != info->layout) cnt++;
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
else
return mddev->pers->reconfig(mddev, info->layout, -1);
}
- if (mddev->size != info->size) {
- mdk_rdev_t * rdev;
- struct list_head *tmp;
- if (mddev->pers->resize == NULL)
- return -EINVAL;
- /* The "size" is the amount of each device that is used.
- * This can only make sense for arrays with redundancy.
- * linear and raid0 always use whatever space is available
- * We can only consider changing the size if no resync
- * or reconstruction is happening, and if the new size
- * is acceptable. It must fit before the sb_offset or,
- * if that is <data_offset, it must fit before the
- * size of each device.
- * If size is zero, we find the largest size that fits.
- */
- if (mddev->sync_thread)
- return -EBUSY;
- ITERATE_RDEV(mddev,rdev,tmp) {
- sector_t avail;
- int fit = (info->size == 0);
- if (rdev->sb_offset > rdev->data_offset)
- avail = (rdev->sb_offset*2) - rdev->data_offset;
- else
- avail = get_capacity(rdev->bdev->bd_disk)
- - rdev->data_offset;
- if (fit && (info->size == 0 || info->size > avail/2))
- info->size = avail/2;
- if (avail < ((sector_t)info->size << 1))
- return -ENOSPC;
- }
- rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
- if (!rv) {
- struct block_device *bdev;
-
- bdev = bdget_disk(mddev->gendisk, 0);
- if (bdev) {
- down(&bdev->bd_inode->i_sem);
- i_size_write(bdev->bd_inode, mddev->array_size << 10);
- up(&bdev->bd_inode->i_sem);
- bdput(bdev);
- }
- }
- }
- if (mddev->raid_disks != info->raid_disks) {
- /* change the number of raid disks */
- if (mddev->pers->reshape == NULL)
- return -EINVAL;
- if (info->raid_disks <= 0 ||
- info->raid_disks >= mddev->max_disks)
- return -EINVAL;
- if (mddev->sync_thread)
- return -EBUSY;
- rv = mddev->pers->reshape(mddev, info->raid_disks);
- if (!rv) {
- struct block_device *bdev;
-
- bdev = bdget_disk(mddev->gendisk, 0);
- if (bdev) {
- down(&bdev->bd_inode->i_sem);
- i_size_write(bdev->bd_inode, mddev->array_size << 10);
- up(&bdev->bd_inode->i_sem);
- bdput(bdev);
- }
- }
- }
+ if (info->size >= 0 && mddev->size != info->size)
+ rv = update_size(mddev, info->size);
+
+ if (mddev->raid_disks != info->raid_disks)
+ rv = update_raid_disks(mddev, info->raid_disks);
+
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
if (mddev->pers->quiesce == NULL)
return -EINVAL;
return 0;
}
+static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ mddev_t *mddev = bdev->bd_disk->private_data;
+
+ geo->heads = 2;
+ geo->sectors = 4;
+ geo->cylinders = get_capacity(mddev->gendisk) / 8;
+ return 0;
+}
+
static int md_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
- struct hd_geometry __user *loc = argp;
mddev_t *mddev = NULL;
if (!capable(CAP_SYS_ADMIN))
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
- case HDIO_GETGEO:
- if (!loc) {
- err = -EINVAL;
- goto abort_unlock;
- }
- err = put_user (2, (char __user *) &loc->heads);
- if (err)
- goto abort_unlock;
- err = put_user (4, (char __user *) &loc->sectors);
- if (err)
- goto abort_unlock;
- err = put_user(get_capacity(mddev->gendisk)/8,
- (short __user *) &loc->cylinders);
- if (err)
- goto abort_unlock;
- err = put_user (get_start_sect(inode->i_bdev),
- (long __user *) &loc->start);
- goto done_unlock;
}
/*
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
+ .getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
};
printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
- " %d KB/sec/disc.\n", sysctl_speed_limit_min);
+ " %d KB/sec/disc.\n", speed_min(mddev));
printk(KERN_INFO "md: using maximum available idle IO bandwidth "
"(but not more than %d KB/sec) for reconstruction.\n",
- sysctl_speed_limit_max);
+ speed_max(mddev));
is_mddev_idle(mddev); /* this also initializes IO event counters */
/* we don't use the checkpoint if there's a bitmap */
skipped = 0;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
- currspeed < sysctl_speed_limit_min);
+ currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_ERR, &mddev->recovery);
goto out;
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
/((jiffies-mddev->resync_mark)/HZ +1) +1;
- if (currspeed > sysctl_speed_limit_min) {
- if ((currspeed > sysctl_speed_limit_max) ||
+ if (currspeed > speed_min(mddev)) {
+ if ((currspeed > speed_max(mddev)) ||
!is_mddev_idle(mddev)) {
msleep(500);
goto repeat;
int num = simple_strtoul(val, &e, 10);
if (*val && (*e == '\0' || *e == '\n')) {
start_readonly = num;
- return 0;;
+ return 0;
}
return -EINVAL;
}