static struct raid_type {
const char *name; /* RAID algorithm. */
const char *descr; /* Descriptor text for logging. */
- const unsigned parity_devs; /* # of parity devices. */
- const unsigned minimal_devs; /* minimal # of devices in set. */
- const unsigned level; /* RAID level. */
- const unsigned algorithm; /* RAID algorithm. */
+ const unsigned int parity_devs; /* # of parity devices. */
+ const unsigned int minimal_devs;/* minimal # of devices in set. */
+ const unsigned int level; /* RAID level. */
+ const unsigned int algorithm; /* RAID algorithm. */
} raid_types[] = {
{"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */},
{"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */},
}
static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
- unsigned raid_devs)
+ unsigned int raid_devs)
{
- unsigned i;
+ unsigned int i;
struct raid_set *rs;
if (raid_devs <= raid_type->parity_devs) {
*/
static int validate_raid_redundancy(struct raid_set *rs)
{
- unsigned i, rebuild_cnt = 0;
- unsigned rebuilds_per_group = 0, copies;
- unsigned group_size, last_group_start;
+ unsigned int i, rebuild_cnt = 0;
+ unsigned int rebuilds_per_group = 0, copies;
+ unsigned int group_size, last_group_start;
for (i = 0; i < rs->md.raid_disks; i++)
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
* [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
*/
static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
- unsigned num_raid_params)
+ unsigned int num_raid_params)
{
int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
- unsigned raid10_copies = 2;
- unsigned i, write_mostly = 0;
- unsigned region_size = 0;
+ unsigned int raid10_copies = 2;
+ unsigned int i, write_mostly = 0;
+ unsigned int region_size = 0;
sector_t max_io_len;
const char *arg, *key;
struct raid_dev *rd;
if (rs->raid10_copies < 2 ||
delta_disks < 0) {
rs->ti->error = "Bogus raid10 data copies or delta disks";
- return EINVAL;
+ return -EINVAL;
}
dev_sectors *= rs->raid10_copies;
return 0;
bad:
rs->ti->error = "Target length not divisible by number of data devices";
- return EINVAL;
+ return -EINVAL;
}
/* Setup recovery on @rs */
/* raid1 -> raid10_near layout */
mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
rs->raid_disks);
- else
+ else
return -EINVAL;
}
* enforce recreation based on the passed in table parameters.
*
*/
-static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
bool resize;
struct raid_type *rt;
- unsigned num_raid_params, num_raid_devs;
+ unsigned int num_raid_params, num_raid_devs;
sector_t calculated_dev_sectors;
struct raid_set *rs = NULL;
const char *arg;
}
}
-static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
+static int raid_message(struct dm_target *ti, unsigned int argc, char **argv)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
iterate_devices_callout_fn fn, void *data)
{
struct raid_set *rs = ti->private;
- unsigned i;
+ unsigned int i;
int r = 0;
for (i = 0; !r && i < rs->md.raid_disks; i++)
static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct raid_set *rs = ti->private;
- unsigned chunk_size = rs->md.chunk_sectors << 9;
+ unsigned int chunk_size = rs->md.chunk_sectors << 9;
struct r5conf *conf = rs->md.private;
blk_limits_io_min(limits, chunk_size);