md: raid0: Make raid0_run() return a proper error code.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / raid0.c
CommitLineData
1da177e4
LT
1/*
2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7
8
9 RAID-0 management functions.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
bff61975 21#include <linux/blkdev.h>
bff61975 22#include <linux/seq_file.h>
43b2e5d8 23#include "md.h"
ef740c37 24#include "raid0.h"
1da177e4 25
165125e1 26static void raid0_unplug(struct request_queue *q)
1da177e4
LT
27{
28 mddev_t *mddev = q->queuedata;
29 raid0_conf_t *conf = mddev_to_conf(mddev);
30 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
31 int i;
32
33 for (i=0; i<mddev->raid_disks; i++) {
165125e1 34 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
1da177e4 35
2ad8b1ef 36 blk_unplug(r_queue);
1da177e4
LT
37 }
38}
39
26be34dc
N
40static int raid0_congested(void *data, int bits)
41{
42 mddev_t *mddev = data;
43 raid0_conf_t *conf = mddev_to_conf(mddev);
44 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
45 int i, ret = 0;
46
47 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
165125e1 48 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
26be34dc
N
49
50 ret |= bdi_congested(&q->backing_dev_info, bits);
51 }
52 return ret;
53}
54
1da177e4
LT
55static int create_strip_zones (mddev_t *mddev)
56{
57 int i, c, j;
d27a43ab 58 sector_t curr_zone_end;
1da177e4
LT
59 raid0_conf_t *conf = mddev_to_conf(mddev);
60 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
1da177e4
LT
61 struct strip_zone *zone;
62 int cnt;
63 char b[BDEVNAME_SIZE];
64
65 /*
66 * The number of 'same size groups'
67 */
68 conf->nr_strip_zones = 0;
69
159ec1fc 70 list_for_each_entry(rdev1, &mddev->disks, same_set) {
0825b87a 71 printk(KERN_INFO "raid0: looking at %s\n",
1da177e4
LT
72 bdevname(rdev1->bdev,b));
73 c = 0;
159ec1fc 74 list_for_each_entry(rdev2, &mddev->disks, same_set) {
0825b87a 75 printk(KERN_INFO "raid0: comparing %s(%llu)",
1da177e4 76 bdevname(rdev1->bdev,b),
dd8ac336 77 (unsigned long long)rdev1->sectors);
0825b87a 78 printk(KERN_INFO " with %s(%llu)\n",
1da177e4 79 bdevname(rdev2->bdev,b),
dd8ac336 80 (unsigned long long)rdev2->sectors);
1da177e4 81 if (rdev2 == rdev1) {
0825b87a 82 printk(KERN_INFO "raid0: END\n");
1da177e4
LT
83 break;
84 }
dd8ac336 85 if (rdev2->sectors == rdev1->sectors) {
1da177e4
LT
86 /*
87 * Not unique, don't count it as a new
88 * group
89 */
0825b87a 90 printk(KERN_INFO "raid0: EQUAL\n");
1da177e4
LT
91 c = 1;
92 break;
93 }
0825b87a 94 printk(KERN_INFO "raid0: NOT EQUAL\n");
1da177e4
LT
95 }
96 if (!c) {
0825b87a 97 printk(KERN_INFO "raid0: ==> UNIQUE\n");
1da177e4 98 conf->nr_strip_zones++;
0825b87a
AN
99 printk(KERN_INFO "raid0: %d zones\n",
100 conf->nr_strip_zones);
1da177e4
LT
101 }
102 }
0825b87a 103 printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
1da177e4 104
9ffae0cf 105 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
1da177e4
LT
106 conf->nr_strip_zones, GFP_KERNEL);
107 if (!conf->strip_zone)
5568a603 108 return -ENOMEM;
9ffae0cf 109 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
1da177e4
LT
110 conf->nr_strip_zones*mddev->raid_disks,
111 GFP_KERNEL);
112 if (!conf->devlist)
5568a603 113 return -ENOMEM;
1da177e4 114
1da177e4
LT
115 /* The first zone must contain all devices, so here we check that
116 * there is a proper alignment of slots to devices and find them all
117 */
118 zone = &conf->strip_zone[0];
119 cnt = 0;
120 smallest = NULL;
121 zone->dev = conf->devlist;
159ec1fc 122 list_for_each_entry(rdev1, &mddev->disks, same_set) {
1da177e4
LT
123 int j = rdev1->raid_disk;
124
125 if (j < 0 || j >= mddev->raid_disks) {
0825b87a
AN
126 printk(KERN_ERR "raid0: bad disk number %d - "
127 "aborting!\n", j);
1da177e4
LT
128 goto abort;
129 }
130 if (zone->dev[j]) {
0825b87a
AN
131 printk(KERN_ERR "raid0: multiple devices for %d - "
132 "aborting!\n", j);
1da177e4
LT
133 goto abort;
134 }
135 zone->dev[j] = rdev1;
136
137 blk_queue_stack_limits(mddev->queue,
138 rdev1->bdev->bd_disk->queue);
139 /* as we don't honour merge_bvec_fn, we must never risk
140 * violating it, so limit ->max_sector to one PAGE, as
141 * a one page request is never in violation.
142 */
143
144 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
ae03bf63 145 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1da177e4
LT
146 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
147
dd8ac336 148 if (!smallest || (rdev1->sectors < smallest->sectors))
1da177e4
LT
149 smallest = rdev1;
150 cnt++;
151 }
152 if (cnt != mddev->raid_disks) {
0825b87a
AN
153 printk(KERN_ERR "raid0: too few disks (%d of %d) - "
154 "aborting!\n", cnt, mddev->raid_disks);
1da177e4
LT
155 goto abort;
156 }
157 zone->nb_dev = cnt;
dd8ac336 158 zone->sectors = smallest->sectors * cnt;
dc582663 159 zone->zone_end = zone->sectors;
1da177e4 160
d27a43ab 161 curr_zone_end = zone->sectors;
1da177e4
LT
162
163 /* now do the other zones */
164 for (i = 1; i < conf->nr_strip_zones; i++)
165 {
166 zone = conf->strip_zone + i;
167 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
168
0825b87a 169 printk(KERN_INFO "raid0: zone %d\n", i);
d27a43ab 170 zone->dev_start = smallest->sectors;
1da177e4
LT
171 smallest = NULL;
172 c = 0;
173
174 for (j=0; j<cnt; j++) {
175 char b[BDEVNAME_SIZE];
176 rdev = conf->strip_zone[0].dev[j];
0825b87a
AN
177 printk(KERN_INFO "raid0: checking %s ...",
178 bdevname(rdev->bdev, b));
d27a43ab 179 if (rdev->sectors <= zone->dev_start) {
0825b87a 180 printk(KERN_INFO " nope.\n");
dd8ac336
AN
181 continue;
182 }
183 printk(KERN_INFO " contained as device %d\n", c);
184 zone->dev[c] = rdev;
185 c++;
186 if (!smallest || rdev->sectors < smallest->sectors) {
187 smallest = rdev;
188 printk(KERN_INFO " (%llu) is smallest!.\n",
189 (unsigned long long)rdev->sectors);
190 }
1da177e4
LT
191 }
192
193 zone->nb_dev = c;
d27a43ab 194 zone->sectors = (smallest->sectors - zone->dev_start) * c;
83838ed8
AN
195 printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
196 zone->nb_dev, (unsigned long long)zone->sectors);
1da177e4 197
d27a43ab
N
198 curr_zone_end += zone->sectors;
199 zone->zone_end = curr_zone_end;
1da177e4 200
6b8796cc 201 printk(KERN_INFO "raid0: current zone start: %llu\n",
d27a43ab 202 (unsigned long long)smallest->sectors);
1da177e4 203 }
1da177e4 204 mddev->queue->unplug_fn = raid0_unplug;
26be34dc
N
205 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
206 mddev->queue->backing_dev_info.congested_data = mddev;
1da177e4 207
0825b87a 208 printk(KERN_INFO "raid0: done.\n");
1da177e4 209 return 0;
5568a603
AN
210abort:
211 return -EINVAL;
1da177e4
LT
212}
213
214/**
215 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
216 * @q: request queue
cc371e66 217 * @bvm: properties of new bio
1da177e4
LT
218 * @biovec: the request that could be merged to it.
219 *
220 * Return amount of bytes we can accept at this offset
221 */
cc371e66
AK
222static int raid0_mergeable_bvec(struct request_queue *q,
223 struct bvec_merge_data *bvm,
224 struct bio_vec *biovec)
1da177e4
LT
225{
226 mddev_t *mddev = q->queuedata;
cc371e66 227 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
1da177e4
LT
228 int max;
229 unsigned int chunk_sectors = mddev->chunk_size >> 9;
cc371e66 230 unsigned int bio_sectors = bvm->bi_size >> 9;
1da177e4
LT
231
232 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
233 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
234 if (max <= biovec->bv_len && bio_sectors == 0)
235 return biovec->bv_len;
236 else
237 return max;
238}
239
80c3a6ce
DW
240static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
241{
242 sector_t array_sectors = 0;
243 mdk_rdev_t *rdev;
244
245 WARN_ONCE(sectors || raid_disks,
246 "%s does not support generic reshape\n", __func__);
247
248 list_for_each_entry(rdev, &mddev->disks, same_set)
249 array_sectors += rdev->sectors;
250
251 return array_sectors;
252}
253
8f79cfcd 254static int raid0_run(mddev_t *mddev)
1da177e4 255{
1da177e4 256 raid0_conf_t *conf;
5568a603 257 int ret;
1da177e4 258
2604b703
N
259 if (mddev->chunk_size == 0) {
260 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
261 return -EINVAL;
262 }
263 printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
1da177e4
LT
264 mdname(mddev),
265 mddev->chunk_size >> 9,
266 (mddev->chunk_size>>1)-1);
267 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
268 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
e7e72bf6 269 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
1da177e4
LT
270
271 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
272 if (!conf)
5568a603 273 return -ENOMEM;
1da177e4
LT
274 mddev->private = (void *)conf;
275
276 conf->strip_zone = NULL;
277 conf->devlist = NULL;
5568a603
AN
278 ret = create_strip_zones(mddev);
279 if (ret < 0)
1da177e4
LT
280 goto out_free_conf;
281
282 /* calculate array device size */
1f403624 283 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
1da177e4 284
ccacc7d2
AN
285 printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
286 (unsigned long long)mddev->array_sectors);
1da177e4
LT
287 /* calculate the max read-ahead size.
288 * For read-ahead of large files to be effective, we need to
289 * readahead at least twice a whole stripe. i.e. number of devices
290 * multiplied by chunk size times 2.
291 * If an individual device has an ra_pages greater than the
292 * chunk size, then we will not drive that device as hard as it
293 * wants. We consider this a configuration error: a larger
294 * chunksize should be used in that case.
295 */
296 {
2d1f3b5d 297 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
1da177e4
LT
298 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
299 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
300 }
301
302
303 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
304 return 0;
305
306out_free_conf:
990a8baf
JJ
307 kfree(conf->strip_zone);
308 kfree(conf->devlist);
1da177e4
LT
309 kfree(conf);
310 mddev->private = NULL;
5568a603 311 return ret;
1da177e4
LT
312}
313
314static int raid0_stop (mddev_t *mddev)
315{
316 raid0_conf_t *conf = mddev_to_conf(mddev);
317
318 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
990a8baf 319 kfree(conf->strip_zone);
1da177e4 320 conf->strip_zone = NULL;
990a8baf 321 kfree(conf);
1da177e4
LT
322 mddev->private = NULL;
323
324 return 0;
325}
326
dc582663
AN
327/* Find the zone which holds a particular offset */
328static struct strip_zone *find_zone(struct raid0_private_data *conf,
329 sector_t sector)
330{
331 int i;
332 struct strip_zone *z = conf->strip_zone;
333
334 for (i = 0; i < conf->nr_strip_zones; i++)
335 if (sector < z[i].zone_end)
336 return z + i;
337 BUG();
338}
339
165125e1 340static int raid0_make_request (struct request_queue *q, struct bio *bio)
1da177e4
LT
341{
342 mddev_t *mddev = q->queuedata;
a4712005 343 unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
1da177e4
LT
344 raid0_conf_t *conf = mddev_to_conf(mddev);
345 struct strip_zone *zone;
346 mdk_rdev_t *tmp_dev;
787f17fe 347 sector_t chunk;
e0f06868 348 sector_t sector, rsect;
a362357b 349 const int rw = bio_data_dir(bio);
c9959059 350 int cpu;
1da177e4 351
e5dcdd80 352 if (unlikely(bio_barrier(bio))) {
6712ecf8 353 bio_endio(bio, -EOPNOTSUPP);
e5dcdd80
N
354 return 0;
355 }
356
074a7aca
TH
357 cpu = part_stat_lock();
358 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
359 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
360 bio_sectors(bio));
361 part_stat_unlock();
1da177e4 362
1da177e4 363 chunk_sects = mddev->chunk_size >> 9;
1b7fdf8f 364 chunksect_bits = ffz(~chunk_sects);
e0f06868 365 sector = bio->bi_sector;
1da177e4
LT
366
367 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
368 struct bio_pair *bp;
369 /* Sanity check -- queue functions should prevent this happening */
370 if (bio->bi_vcnt != 1 ||
371 bio->bi_idx != 0)
372 goto bad_map;
373 /* This is a one page bio that upper layers
374 * refuse to split for us, so we need to split it.
375 */
6feef531 376 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
1da177e4
LT
377 if (raid0_make_request(q, &bp->bio1))
378 generic_make_request(&bp->bio1);
379 if (raid0_make_request(q, &bp->bio2))
380 generic_make_request(&bp->bio2);
381
382 bio_pair_release(bp);
383 return 0;
384 }
dc582663 385 zone = find_zone(conf, sector);
a4712005 386 sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
1da177e4 387 {
dc582663
AN
388 sector_t x = (zone->sectors + sector - zone->zone_end)
389 >> chunksect_bits;
1da177e4
LT
390
391 sector_div(x, zone->nb_dev);
392 chunk = x;
1da177e4 393
e0f06868 394 x = sector >> chunksect_bits;
1da177e4
LT
395 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
396 }
019c4e2f 397 rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
1da177e4
LT
398
399 bio->bi_bdev = tmp_dev->bdev;
400 bio->bi_sector = rsect + tmp_dev->data_offset;
401
402 /*
403 * Let the main block layer submit the IO and resolve recursion:
404 */
405 return 1;
406
407bad_map:
408 printk("raid0_make_request bug: can't convert block across chunks"
a4712005 409 " or bigger than %dk %llu %d\n", chunk_sects / 2,
1da177e4
LT
410 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
411
6712ecf8 412 bio_io_error(bio);
1da177e4
LT
413 return 0;
414}
8299d7f7 415
1da177e4
LT
416static void raid0_status (struct seq_file *seq, mddev_t *mddev)
417{
418#undef MD_DEBUG
419#ifdef MD_DEBUG
420 int j, k, h;
421 char b[BDEVNAME_SIZE];
422 raid0_conf_t *conf = mddev_to_conf(mddev);
8299d7f7 423
1da177e4
LT
424 h = 0;
425 for (j = 0; j < conf->nr_strip_zones; j++) {
426 seq_printf(seq, " z%d", j);
1da177e4
LT
427 seq_printf(seq, "=[");
428 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
8299d7f7 429 seq_printf(seq, "%s/", bdevname(
1da177e4
LT
430 conf->strip_zone[j].dev[k]->bdev,b));
431
dc582663
AN
432 seq_printf(seq, "] ze=%d ds=%d s=%d\n",
433 conf->strip_zone[j].zone_end,
019c4e2f 434 conf->strip_zone[j].dev_start,
83838ed8 435 conf->strip_zone[j].sectors);
1da177e4
LT
436 }
437#endif
438 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
439 return;
440}
441
2604b703 442static struct mdk_personality raid0_personality=
1da177e4
LT
443{
444 .name = "raid0",
2604b703 445 .level = 0,
1da177e4
LT
446 .owner = THIS_MODULE,
447 .make_request = raid0_make_request,
448 .run = raid0_run,
449 .stop = raid0_stop,
450 .status = raid0_status,
80c3a6ce 451 .size = raid0_size,
1da177e4
LT
452};
453
454static int __init raid0_init (void)
455{
2604b703 456 return register_md_personality (&raid0_personality);
1da177e4
LT
457}
458
459static void raid0_exit (void)
460{
2604b703 461 unregister_md_personality (&raid0_personality);
1da177e4
LT
462}
463
464module_init(raid0_init);
465module_exit(raid0_exit);
466MODULE_LICENSE("GPL");
467MODULE_ALIAS("md-personality-2"); /* RAID0 */
d9d166c2 468MODULE_ALIAS("md-raid0");
2604b703 469MODULE_ALIAS("md-level-0");