Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | linear.c : Multiple Devices driver for Linux | |
3 | Copyright (C) 1994-96 Marc ZYNGIER | |
4 | <zyngier@ufr-info-p7.ibp.fr> or | |
5 | <maz@gloups.fdn.fr> | |
6 | ||
7 | Linear mode management functions. | |
8 | ||
9 | This program is free software; you can redistribute it and/or modify | |
10 | it under the terms of the GNU General Public License as published by | |
11 | the Free Software Foundation; either version 2, or (at your option) | |
12 | any later version. | |
13 | ||
14 | You should have received a copy of the GNU General Public License | |
15 | (for example /usr/src/linux/COPYING); if not, write to the Free | |
16 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
17 | */ | |
18 | ||
bff61975 N |
19 | #include <linux/blkdev.h> |
20 | #include <linux/raid/md_u.h> | |
bff61975 | 21 | #include <linux/seq_file.h> |
056075c7 | 22 | #include <linux/module.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
43b2e5d8 | 24 | #include "md.h" |
ef740c37 | 25 | #include "linear.h" |
1da177e4 | 26 | |
1da177e4 LT |
27 | /* |
28 | * find which device holds a particular offset | |
29 | */ | |
a7120771 | 30 | static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) |
1da177e4 | 31 | { |
aece3d1f | 32 | int lo, mid, hi; |
e849b938 | 33 | struct linear_conf *conf; |
1da177e4 | 34 | |
aece3d1f SS |
35 | lo = 0; |
36 | hi = mddev->raid_disks - 1; | |
af11c397 | 37 | conf = rcu_dereference(mddev->private); |
1da177e4 | 38 | |
aece3d1f SS |
39 | /* |
40 | * Binary Search | |
41 | */ | |
42 | ||
43 | while (hi > lo) { | |
44 | ||
45 | mid = (hi + lo) / 2; | |
46 | if (sector < conf->disks[mid].end_sector) | |
47 | hi = mid; | |
48 | else | |
49 | lo = mid + 1; | |
50 | } | |
51 | ||
52 | return conf->disks + lo; | |
1da177e4 LT |
53 | } |
54 | ||
55 | /** | |
15945fee | 56 | * linear_mergeable_bvec -- tell bio layer if two requests can be merged |
1da177e4 | 57 | * @q: request queue |
cc371e66 | 58 | * @bvm: properties of new bio |
1da177e4 LT |
59 | * @biovec: the request that could be merged to it. |
60 | * | |
61 | * Return amount of bytes we can take at this offset | |
62 | */ | |
cc371e66 AK |
63 | static int linear_mergeable_bvec(struct request_queue *q, |
64 | struct bvec_merge_data *bvm, | |
65 | struct bio_vec *biovec) | |
1da177e4 | 66 | { |
fd01b88c | 67 | struct mddev *mddev = q->queuedata; |
a7120771 | 68 | struct dev_info *dev0; |
cc371e66 AK |
69 | unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; |
70 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | |
ba13da47 N |
71 | int maxbytes = biovec->bv_len; |
72 | struct request_queue *subq; | |
1da177e4 | 73 | |
af11c397 | 74 | rcu_read_lock(); |
1da177e4 | 75 | dev0 = which_dev(mddev, sector); |
4db7cdc8 | 76 | maxsectors = dev0->end_sector - sector; |
ba13da47 N |
77 | subq = bdev_get_queue(dev0->rdev->bdev); |
78 | if (subq->merge_bvec_fn) { | |
79 | bvm->bi_bdev = dev0->rdev->bdev; | |
80 | bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors; | |
81 | maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm, | |
82 | biovec)); | |
83 | } | |
af11c397 | 84 | rcu_read_unlock(); |
1da177e4 LT |
85 | |
86 | if (maxsectors < bio_sectors) | |
87 | maxsectors = 0; | |
88 | else | |
89 | maxsectors -= bio_sectors; | |
90 | ||
91 | if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0) | |
ba13da47 N |
92 | return maxbytes; |
93 | ||
94 | if (maxsectors > (maxbytes >> 9)) | |
95 | return maxbytes; | |
96 | else | |
97 | return maxsectors << 9; | |
1da177e4 LT |
98 | } |
99 | ||
d45256ff | 100 | /* |
101 | * In linear_congested() conf->raid_disks is used as a copy of | |
102 | * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks | |
103 | * and conf->disks[] are created in linear_conf(), they are always | |
104 | * consitent with each other, but mddev->raid_disks does not. | |
105 | */ | |
26be34dc N |
106 | static int linear_congested(void *data, int bits) |
107 | { | |
fd01b88c | 108 | struct mddev *mddev = data; |
e849b938 | 109 | struct linear_conf *conf; |
26be34dc N |
110 | int i, ret = 0; |
111 | ||
3fa841d7 N |
112 | if (mddev_congested(mddev, bits)) |
113 | return 1; | |
114 | ||
af11c397 S |
115 | rcu_read_lock(); |
116 | conf = rcu_dereference(mddev->private); | |
117 | ||
d45256ff | 118 | for (i = 0; i < conf->raid_disks && !ret ; i++) { |
165125e1 | 119 | struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); |
26be34dc N |
120 | ret |= bdi_congested(&q->backing_dev_info, bits); |
121 | } | |
af11c397 S |
122 | |
123 | rcu_read_unlock(); | |
26be34dc N |
124 | return ret; |
125 | } | |
126 | ||
fd01b88c | 127 | static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
80c3a6ce | 128 | { |
e849b938 | 129 | struct linear_conf *conf; |
af11c397 | 130 | sector_t array_sectors; |
80c3a6ce | 131 | |
af11c397 S |
132 | rcu_read_lock(); |
133 | conf = rcu_dereference(mddev->private); | |
80c3a6ce DW |
134 | WARN_ONCE(sectors || raid_disks, |
135 | "%s does not support generic reshape\n", __func__); | |
af11c397 S |
136 | array_sectors = conf->array_sectors; |
137 | rcu_read_unlock(); | |
80c3a6ce | 138 | |
af11c397 | 139 | return array_sectors; |
80c3a6ce DW |
140 | } |
141 | ||
e849b938 | 142 | static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) |
1da177e4 | 143 | { |
e849b938 | 144 | struct linear_conf *conf; |
3cb03002 | 145 | struct md_rdev *rdev; |
45d4582f | 146 | int i, cnt; |
f1cad2b6 | 147 | bool discard_supported = false; |
1da177e4 | 148 | |
a7120771 | 149 | conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info), |
1da177e4 LT |
150 | GFP_KERNEL); |
151 | if (!conf) | |
7c7546cc N |
152 | return NULL; |
153 | ||
1da177e4 | 154 | cnt = 0; |
d6e22150 | 155 | conf->array_sectors = 0; |
1da177e4 | 156 | |
dafb20fa | 157 | rdev_for_each(rdev, mddev) { |
1da177e4 | 158 | int j = rdev->raid_disk; |
a7120771 | 159 | struct dev_info *disk = conf->disks + j; |
13f2682b | 160 | sector_t sectors; |
1da177e4 | 161 | |
13864515 | 162 | if (j < 0 || j >= raid_disks || disk->rdev) { |
2dc40f80 N |
163 | printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n", |
164 | mdname(mddev)); | |
1da177e4 LT |
165 | goto out; |
166 | } | |
167 | ||
168 | disk->rdev = rdev; | |
13f2682b N |
169 | if (mddev->chunk_sectors) { |
170 | sectors = rdev->sectors; | |
171 | sector_div(sectors, mddev->chunk_sectors); | |
172 | rdev->sectors = sectors * mddev->chunk_sectors; | |
173 | } | |
1da177e4 | 174 | |
8f6c2e4b MP |
175 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
176 | rdev->data_offset << 9); | |
1da177e4 | 177 | |
dd8ac336 | 178 | conf->array_sectors += rdev->sectors; |
1da177e4 | 179 | cnt++; |
4db7cdc8 | 180 | |
f1cad2b6 SL |
181 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) |
182 | discard_supported = true; | |
1da177e4 | 183 | } |
7c7546cc | 184 | if (cnt != raid_disks) { |
2dc40f80 N |
185 | printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n", |
186 | mdname(mddev)); | |
1da177e4 LT |
187 | goto out; |
188 | } | |
189 | ||
f1cad2b6 SL |
190 | if (!discard_supported) |
191 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | |
192 | else | |
193 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | |
194 | ||
1da177e4 | 195 | /* |
45d4582f | 196 | * Here we calculate the device offsets. |
1da177e4 | 197 | */ |
4db7cdc8 SS |
198 | conf->disks[0].end_sector = conf->disks[0].rdev->sectors; |
199 | ||
a778b73f | 200 | for (i = 1; i < raid_disks; i++) |
4db7cdc8 SS |
201 | conf->disks[i].end_sector = |
202 | conf->disks[i-1].end_sector + | |
203 | conf->disks[i].rdev->sectors; | |
15945fee | 204 | |
d45256ff | 205 | /* |
206 | * conf->raid_disks is copy of mddev->raid_disks. The reason to | |
207 | * keep a copy of mddev->raid_disks in struct linear_conf is, | |
208 | * mddev->raid_disks may not be consistent with pointers number of | |
209 | * conf->disks[] when it is updated in linear_add() and used to | |
210 | * iterate old conf->disks[] earray in linear_congested(). | |
211 | * Here conf->raid_disks is always consitent with number of | |
212 | * pointers in conf->disks[] array, and mddev->private is updated | |
213 | * with rcu_assign_pointer() in linear_addr(), such race can be | |
214 | * avoided. | |
215 | */ | |
216 | conf->raid_disks = raid_disks; | |
217 | ||
7c7546cc N |
218 | return conf; |
219 | ||
220 | out: | |
221 | kfree(conf); | |
222 | return NULL; | |
223 | } | |
224 | ||
fd01b88c | 225 | static int linear_run (struct mddev *mddev) |
7c7546cc | 226 | { |
e849b938 | 227 | struct linear_conf *conf; |
98d5561b | 228 | int ret; |
7c7546cc | 229 | |
0894cc30 AN |
230 | if (md_check_no_bitmap(mddev)) |
231 | return -EINVAL; | |
7c7546cc N |
232 | conf = linear_conf(mddev, mddev->raid_disks); |
233 | ||
234 | if (!conf) | |
235 | return 1; | |
236 | mddev->private = conf; | |
1f403624 | 237 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); |
7c7546cc | 238 | |
1da177e4 | 239 | blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); |
26be34dc N |
240 | mddev->queue->backing_dev_info.congested_fn = linear_congested; |
241 | mddev->queue->backing_dev_info.congested_data = mddev; | |
98d5561b | 242 | |
243 | ret = md_integrity_register(mddev); | |
244 | if (ret) { | |
245 | kfree(conf); | |
246 | mddev->private = NULL; | |
247 | } | |
248 | return ret; | |
7c7546cc | 249 | } |
1da177e4 | 250 | |
fd01b88c | 251 | static int linear_add(struct mddev *mddev, struct md_rdev *rdev) |
7c7546cc N |
252 | { |
253 | /* Adding a drive to a linear array allows the array to grow. | |
254 | * It is permitted if the new drive has a matching superblock | |
255 | * already on it, with raid_disk equal to raid_disks. | |
256 | * It is achieved by creating a new linear_private_data structure | |
257 | * and swapping it in in-place of the current one. | |
258 | * The current one is never freed until the array is stopped. | |
259 | * This avoids races. | |
260 | */ | |
e849b938 | 261 | struct linear_conf *newconf, *oldconf; |
7c7546cc | 262 | |
a778b73f | 263 | if (rdev->saved_raid_disk != mddev->raid_disks) |
7c7546cc N |
264 | return -EINVAL; |
265 | ||
a778b73f | 266 | rdev->raid_disk = rdev->saved_raid_disk; |
09cd9270 | 267 | rdev->saved_raid_disk = -1; |
a778b73f | 268 | |
7c7546cc N |
269 | newconf = linear_conf(mddev,mddev->raid_disks+1); |
270 | ||
271 | if (!newconf) | |
272 | return -ENOMEM; | |
273 | ||
d45256ff | 274 | /* newconf->raid_disks already keeps a copy of * the increased |
275 | * value of mddev->raid_disks, WARN_ONCE() is just used to make | |
276 | * sure of this. It is possible that oldconf is still referenced | |
277 | * in linear_congested(), therefore kfree_rcu() is used to free | |
278 | * oldconf until no one uses it anymore. | |
279 | */ | |
bc78c573 DE |
280 | oldconf = rcu_dereference_protected(mddev->private, |
281 | lockdep_is_held( | |
282 | &mddev->reconfig_mutex)); | |
7c7546cc | 283 | mddev->raid_disks++; |
d45256ff | 284 | WARN_ONCE(mddev->raid_disks != newconf->raid_disks, |
285 | "copied raid_disks doesn't match mddev->raid_disks"); | |
af11c397 | 286 | rcu_assign_pointer(mddev->private, newconf); |
1f403624 | 287 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); |
f233ea5c | 288 | set_capacity(mddev->gendisk, mddev->array_sectors); |
449aad3e | 289 | revalidate_disk(mddev->gendisk); |
b119cbab | 290 | kfree_rcu(oldconf, rcu); |
7c7546cc | 291 | return 0; |
1da177e4 LT |
292 | } |
293 | ||
fd01b88c | 294 | static int linear_stop (struct mddev *mddev) |
1da177e4 | 295 | { |
bc78c573 DE |
296 | struct linear_conf *conf = |
297 | rcu_dereference_protected(mddev->private, | |
298 | lockdep_is_held( | |
299 | &mddev->reconfig_mutex)); | |
af11c397 S |
300 | |
301 | /* | |
302 | * We do not require rcu protection here since | |
303 | * we hold reconfig_mutex for both linear_add and | |
304 | * linear_stop, so they cannot race. | |
495d3573 N |
305 | * We should make sure any old 'conf's are properly |
306 | * freed though. | |
af11c397 | 307 | */ |
495d3573 | 308 | rcu_barrier(); |
1da177e4 | 309 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
495d3573 | 310 | kfree(conf); |
ef2f80ff | 311 | mddev->private = NULL; |
1da177e4 LT |
312 | |
313 | return 0; | |
314 | } | |
315 | ||
b4fdcb02 | 316 | static void linear_make_request(struct mddev *mddev, struct bio *bio) |
1da177e4 | 317 | { |
a7120771 | 318 | struct dev_info *tmp_dev; |
4db7cdc8 | 319 | sector_t start_sector; |
1da177e4 | 320 | |
e9c7469b TH |
321 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
322 | md_flush_request(mddev, bio); | |
5a7bbad2 | 323 | return; |
e5dcdd80 N |
324 | } |
325 | ||
af11c397 | 326 | rcu_read_lock(); |
1da177e4 | 327 | tmp_dev = which_dev(mddev, bio->bi_sector); |
4db7cdc8 SS |
328 | start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; |
329 | ||
af11c397 | 330 | |
4db7cdc8 SS |
331 | if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) |
332 | || (bio->bi_sector < start_sector))) { | |
1da177e4 LT |
333 | char b[BDEVNAME_SIZE]; |
334 | ||
2dc40f80 N |
335 | printk(KERN_ERR |
336 | "md/linear:%s: make_request: Sector %llu out of bounds on " | |
337 | "dev %s: %llu sectors, offset %llu\n", | |
338 | mdname(mddev), | |
339 | (unsigned long long)bio->bi_sector, | |
340 | bdevname(tmp_dev->rdev->bdev, b), | |
341 | (unsigned long long)tmp_dev->rdev->sectors, | |
342 | (unsigned long long)start_sector); | |
af11c397 | 343 | rcu_read_unlock(); |
6712ecf8 | 344 | bio_io_error(bio); |
5a7bbad2 | 345 | return; |
1da177e4 | 346 | } |
f73a1c7d | 347 | if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) { |
1da177e4 LT |
348 | /* This bio crosses a device boundary, so we have to |
349 | * split it. | |
350 | */ | |
351 | struct bio_pair *bp; | |
af11c397 S |
352 | sector_t end_sector = tmp_dev->end_sector; |
353 | ||
354 | rcu_read_unlock(); | |
6283815d | 355 | |
af11c397 | 356 | bp = bio_split(bio, end_sector - bio->bi_sector); |
6283815d | 357 | |
5a7bbad2 CH |
358 | linear_make_request(mddev, &bp->bio1); |
359 | linear_make_request(mddev, &bp->bio2); | |
1da177e4 | 360 | bio_pair_release(bp); |
5a7bbad2 | 361 | return; |
1da177e4 LT |
362 | } |
363 | ||
364 | bio->bi_bdev = tmp_dev->rdev->bdev; | |
4db7cdc8 | 365 | bio->bi_sector = bio->bi_sector - start_sector |
6283815d | 366 | + tmp_dev->rdev->data_offset; |
af11c397 | 367 | rcu_read_unlock(); |
f1cad2b6 SL |
368 | |
369 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | |
370 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { | |
371 | /* Just ignore it */ | |
372 | bio_endio(bio, 0); | |
373 | return; | |
374 | } | |
375 | ||
5a7bbad2 | 376 | generic_make_request(bio); |
1da177e4 LT |
377 | } |
378 | ||
fd01b88c | 379 | static void linear_status (struct seq_file *seq, struct mddev *mddev) |
1da177e4 LT |
380 | { |
381 | ||
9d8f0363 | 382 | seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); |
1da177e4 LT |
383 | } |
384 | ||
385 | ||
84fc4b56 | 386 | static struct md_personality linear_personality = |
1da177e4 LT |
387 | { |
388 | .name = "linear", | |
2604b703 | 389 | .level = LEVEL_LINEAR, |
1da177e4 LT |
390 | .owner = THIS_MODULE, |
391 | .make_request = linear_make_request, | |
392 | .run = linear_run, | |
393 | .stop = linear_stop, | |
394 | .status = linear_status, | |
7c7546cc | 395 | .hot_add_disk = linear_add, |
80c3a6ce | 396 | .size = linear_size, |
1da177e4 LT |
397 | }; |
398 | ||
399 | static int __init linear_init (void) | |
400 | { | |
2604b703 | 401 | return register_md_personality (&linear_personality); |
1da177e4 LT |
402 | } |
403 | ||
404 | static void linear_exit (void) | |
405 | { | |
2604b703 | 406 | unregister_md_personality (&linear_personality); |
1da177e4 LT |
407 | } |
408 | ||
409 | ||
410 | module_init(linear_init); | |
411 | module_exit(linear_exit); | |
412 | MODULE_LICENSE("GPL"); | |
0efb9e61 | 413 | MODULE_DESCRIPTION("Linear device concatenation personality for MD"); |
d9d166c2 N |
414 | MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/ |
415 | MODULE_ALIAS("md-linear"); | |
2604b703 | 416 | MODULE_ALIAS("md-level--1"); |