[PATCH] md: support adding new devices to md arrays via sysfs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / md.c
CommitLineData
1da177e4
LT
1/*
2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5 completely rewritten, based on the MD driver code from Marc Zyngier
6
7 Changes:
8
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
19
20 Neil Brown <neilb@cse.unsw.edu.au>.
21
32a7627c
N
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
1da177e4
LT
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
28 any later version.
29
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33*/
34
35#include <linux/module.h>
36#include <linux/config.h>
a6fb0934 37#include <linux/kthread.h>
1da177e4
LT
38#include <linux/linkage.h>
39#include <linux/raid/md.h>
32a7627c 40#include <linux/raid/bitmap.h>
1da177e4
LT
41#include <linux/sysctl.h>
42#include <linux/devfs_fs_kernel.h>
43#include <linux/buffer_head.h> /* for invalidate_bdev */
44#include <linux/suspend.h>
d7603b7e 45#include <linux/poll.h>
1da177e4
LT
46
47#include <linux/init.h>
48
32a7627c
N
49#include <linux/file.h>
50
1da177e4
LT
51#ifdef CONFIG_KMOD
52#include <linux/kmod.h>
53#endif
54
55#include <asm/unaligned.h>
56
57#define MAJOR_NR MD_MAJOR
58#define MD_DRIVER
59
60/* 63 partitions with the alternate major number (mdp) */
61#define MdpMinorShift 6
62
63#define DEBUG 0
64#define dprintk(x...) ((void)(DEBUG && printk(x)))
65
66
67#ifndef MODULE
68static void autostart_arrays (int part);
69#endif
70
2604b703 71static LIST_HEAD(pers_list);
1da177e4
LT
72static DEFINE_SPINLOCK(pers_lock);
73
74/*
75 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
76 * is 1000 KB/sec, so the extra system load does not show up that much.
77 * Increase it if you want to have more _guaranteed_ speed. Note that
338cec32 78 * the RAID driver will use the maximum available bandwidth if the IO
1da177e4
LT
79 * subsystem is idle. There is also an 'absolute maximum' reconstruction
80 * speed limit - in case reconstruction slows down your system despite
81 * idle IO detection.
82 *
83 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
84 */
85
86static int sysctl_speed_limit_min = 1000;
87static int sysctl_speed_limit_max = 200000;
88
89static struct ctl_table_header *raid_table_header;
90
91static ctl_table raid_table[] = {
92 {
93 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
94 .procname = "speed_limit_min",
95 .data = &sysctl_speed_limit_min,
96 .maxlen = sizeof(int),
97 .mode = 0644,
98 .proc_handler = &proc_dointvec,
99 },
100 {
101 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
102 .procname = "speed_limit_max",
103 .data = &sysctl_speed_limit_max,
104 .maxlen = sizeof(int),
105 .mode = 0644,
106 .proc_handler = &proc_dointvec,
107 },
108 { .ctl_name = 0 }
109};
110
111static ctl_table raid_dir_table[] = {
112 {
113 .ctl_name = DEV_RAID,
114 .procname = "raid",
115 .maxlen = 0,
116 .mode = 0555,
117 .child = raid_table,
118 },
119 { .ctl_name = 0 }
120};
121
122static ctl_table raid_root_table[] = {
123 {
124 .ctl_name = CTL_DEV,
125 .procname = "dev",
126 .maxlen = 0,
127 .mode = 0555,
128 .child = raid_dir_table,
129 },
130 { .ctl_name = 0 }
131};
132
133static struct block_device_operations md_fops;
134
f91de92e
N
135static int start_readonly;
136
d7603b7e
N
137/*
138 * We have a system wide 'event count' that is incremented
139 * on any 'interesting' event, and readers of /proc/mdstat
140 * can use 'poll' or 'select' to find out when the event
141 * count increases.
142 *
143 * Events are:
144 * start array, stop array, error, add device, remove device,
145 * start build, activate spare
146 */
2989ddbd 147static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
d7603b7e 148static atomic_t md_event_count;
07dbd377 149static void md_new_event(mddev_t *mddev)
d7603b7e
N
150{
151 atomic_inc(&md_event_count);
152 wake_up(&md_event_waiters);
153}
154
1da177e4
LT
155/*
156 * Enables to iterate over all existing md arrays
157 * all_mddevs_lock protects this list.
158 */
159static LIST_HEAD(all_mddevs);
160static DEFINE_SPINLOCK(all_mddevs_lock);
161
162
163/*
164 * iterates through all used mddevs in the system.
165 * We take care to grab the all_mddevs_lock whenever navigating
166 * the list, and to always hold a refcount when unlocked.
167 * Any code which breaks out of this loop while own
168 * a reference to the current mddev and must mddev_put it.
169 */
170#define ITERATE_MDDEV(mddev,tmp) \
171 \
172 for (({ spin_lock(&all_mddevs_lock); \
173 tmp = all_mddevs.next; \
174 mddev = NULL;}); \
175 ({ if (tmp != &all_mddevs) \
176 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
177 spin_unlock(&all_mddevs_lock); \
178 if (mddev) mddev_put(mddev); \
179 mddev = list_entry(tmp, mddev_t, all_mddevs); \
180 tmp != &all_mddevs;}); \
181 ({ spin_lock(&all_mddevs_lock); \
182 tmp = tmp->next;}) \
183 )
184
185
186static int md_fail_request (request_queue_t *q, struct bio *bio)
187{
188 bio_io_error(bio, bio->bi_size);
189 return 0;
190}
191
192static inline mddev_t *mddev_get(mddev_t *mddev)
193{
194 atomic_inc(&mddev->active);
195 return mddev;
196}
197
198static void mddev_put(mddev_t *mddev)
199{
200 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
201 return;
202 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
203 list_del(&mddev->all_mddevs);
204 blk_put_queue(mddev->queue);
eae1701f 205 kobject_unregister(&mddev->kobj);
1da177e4
LT
206 }
207 spin_unlock(&all_mddevs_lock);
208}
209
210static mddev_t * mddev_find(dev_t unit)
211{
212 mddev_t *mddev, *new = NULL;
213
214 retry:
215 spin_lock(&all_mddevs_lock);
216 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
217 if (mddev->unit == unit) {
218 mddev_get(mddev);
219 spin_unlock(&all_mddevs_lock);
990a8baf 220 kfree(new);
1da177e4
LT
221 return mddev;
222 }
223
224 if (new) {
225 list_add(&new->all_mddevs, &all_mddevs);
226 spin_unlock(&all_mddevs_lock);
227 return new;
228 }
229 spin_unlock(&all_mddevs_lock);
230
9ffae0cf 231 new = kzalloc(sizeof(*new), GFP_KERNEL);
1da177e4
LT
232 if (!new)
233 return NULL;
234
1da177e4
LT
235 new->unit = unit;
236 if (MAJOR(unit) == MD_MAJOR)
237 new->md_minor = MINOR(unit);
238 else
239 new->md_minor = MINOR(unit) >> MdpMinorShift;
240
241 init_MUTEX(&new->reconfig_sem);
242 INIT_LIST_HEAD(&new->disks);
243 INIT_LIST_HEAD(&new->all_mddevs);
244 init_timer(&new->safemode_timer);
245 atomic_set(&new->active, 1);
06d91a5f 246 spin_lock_init(&new->write_lock);
3d310eb7 247 init_waitqueue_head(&new->sb_wait);
1da177e4
LT
248
249 new->queue = blk_alloc_queue(GFP_KERNEL);
250 if (!new->queue) {
251 kfree(new);
252 return NULL;
253 }
254
255 blk_queue_make_request(new->queue, md_fail_request);
256
257 goto retry;
258}
259
260static inline int mddev_lock(mddev_t * mddev)
261{
262 return down_interruptible(&mddev->reconfig_sem);
263}
264
265static inline void mddev_lock_uninterruptible(mddev_t * mddev)
266{
267 down(&mddev->reconfig_sem);
268}
269
270static inline int mddev_trylock(mddev_t * mddev)
271{
272 return down_trylock(&mddev->reconfig_sem);
273}
274
275static inline void mddev_unlock(mddev_t * mddev)
276{
277 up(&mddev->reconfig_sem);
278
005eca5e 279 md_wakeup_thread(mddev->thread);
1da177e4
LT
280}
281
2989ddbd 282static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
1da177e4
LT
283{
284 mdk_rdev_t * rdev;
285 struct list_head *tmp;
286
287 ITERATE_RDEV(mddev,rdev,tmp) {
288 if (rdev->desc_nr == nr)
289 return rdev;
290 }
291 return NULL;
292}
293
294static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
295{
296 struct list_head *tmp;
297 mdk_rdev_t *rdev;
298
299 ITERATE_RDEV(mddev,rdev,tmp) {
300 if (rdev->bdev->bd_dev == dev)
301 return rdev;
302 }
303 return NULL;
304}
305
d9d166c2 306static struct mdk_personality *find_pers(int level, char *clevel)
2604b703
N
307{
308 struct mdk_personality *pers;
d9d166c2
N
309 list_for_each_entry(pers, &pers_list, list) {
310 if (level != LEVEL_NONE && pers->level == level)
2604b703 311 return pers;
d9d166c2
N
312 if (strcmp(pers->name, clevel)==0)
313 return pers;
314 }
2604b703
N
315 return NULL;
316}
317
77933d72 318static inline sector_t calc_dev_sboffset(struct block_device *bdev)
1da177e4
LT
319{
320 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
321 return MD_NEW_SIZE_BLOCKS(size);
322}
323
324static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
325{
326 sector_t size;
327
328 size = rdev->sb_offset;
329
330 if (chunk_size)
331 size &= ~((sector_t)chunk_size/1024 - 1);
332 return size;
333}
334
335static int alloc_disk_sb(mdk_rdev_t * rdev)
336{
337 if (rdev->sb_page)
338 MD_BUG();
339
340 rdev->sb_page = alloc_page(GFP_KERNEL);
341 if (!rdev->sb_page) {
342 printk(KERN_ALERT "md: out of memory.\n");
343 return -EINVAL;
344 }
345
346 return 0;
347}
348
349static void free_disk_sb(mdk_rdev_t * rdev)
350{
351 if (rdev->sb_page) {
2d1f3b5d 352 put_page(rdev->sb_page);
1da177e4
LT
353 rdev->sb_loaded = 0;
354 rdev->sb_page = NULL;
355 rdev->sb_offset = 0;
356 rdev->size = 0;
357 }
358}
359
360
7bfa19f2
N
361static int super_written(struct bio *bio, unsigned int bytes_done, int error)
362{
363 mdk_rdev_t *rdev = bio->bi_private;
a9701a30 364 mddev_t *mddev = rdev->mddev;
7bfa19f2
N
365 if (bio->bi_size)
366 return 1;
367
368 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
a9701a30 369 md_error(mddev, rdev);
7bfa19f2 370
a9701a30
N
371 if (atomic_dec_and_test(&mddev->pending_writes))
372 wake_up(&mddev->sb_wait);
f8b58edf 373 bio_put(bio);
7bfa19f2
N
374 return 0;
375}
376
a9701a30
N
377static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
378{
379 struct bio *bio2 = bio->bi_private;
380 mdk_rdev_t *rdev = bio2->bi_private;
381 mddev_t *mddev = rdev->mddev;
382 if (bio->bi_size)
383 return 1;
384
385 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
386 error == -EOPNOTSUPP) {
387 unsigned long flags;
388 /* barriers don't appear to be supported :-( */
389 set_bit(BarriersNotsupp, &rdev->flags);
390 mddev->barriers_work = 0;
391 spin_lock_irqsave(&mddev->write_lock, flags);
392 bio2->bi_next = mddev->biolist;
393 mddev->biolist = bio2;
394 spin_unlock_irqrestore(&mddev->write_lock, flags);
395 wake_up(&mddev->sb_wait);
396 bio_put(bio);
397 return 0;
398 }
399 bio_put(bio2);
400 bio->bi_private = rdev;
401 return super_written(bio, bytes_done, error);
402}
403
7bfa19f2
N
404void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
405 sector_t sector, int size, struct page *page)
406{
407 /* write first size bytes of page to sector of rdev
408 * Increment mddev->pending_writes before returning
409 * and decrement it on completion, waking up sb_wait
410 * if zero is reached.
411 * If an error occurred, call md_error
a9701a30
N
412 *
413 * As we might need to resubmit the request if BIO_RW_BARRIER
414 * causes ENOTSUPP, we allocate a spare bio...
7bfa19f2
N
415 */
416 struct bio *bio = bio_alloc(GFP_NOIO, 1);
a9701a30 417 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
7bfa19f2
N
418
419 bio->bi_bdev = rdev->bdev;
420 bio->bi_sector = sector;
421 bio_add_page(bio, page, size, 0);
422 bio->bi_private = rdev;
423 bio->bi_end_io = super_written;
a9701a30
N
424 bio->bi_rw = rw;
425
7bfa19f2 426 atomic_inc(&mddev->pending_writes);
a9701a30
N
427 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
428 struct bio *rbio;
429 rw |= (1<<BIO_RW_BARRIER);
430 rbio = bio_clone(bio, GFP_NOIO);
431 rbio->bi_private = bio;
432 rbio->bi_end_io = super_written_barrier;
433 submit_bio(rw, rbio);
434 } else
435 submit_bio(rw, bio);
436}
437
438void md_super_wait(mddev_t *mddev)
439{
440 /* wait for all superblock writes that were scheduled to complete.
441 * if any had to be retried (due to BARRIER problems), retry them
442 */
443 DEFINE_WAIT(wq);
444 for(;;) {
445 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
446 if (atomic_read(&mddev->pending_writes)==0)
447 break;
448 while (mddev->biolist) {
449 struct bio *bio;
450 spin_lock_irq(&mddev->write_lock);
451 bio = mddev->biolist;
452 mddev->biolist = bio->bi_next ;
453 bio->bi_next = NULL;
454 spin_unlock_irq(&mddev->write_lock);
455 submit_bio(bio->bi_rw, bio);
456 }
457 schedule();
458 }
459 finish_wait(&mddev->sb_wait, &wq);
7bfa19f2
N
460}
461
1da177e4
LT
462static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
463{
464 if (bio->bi_size)
465 return 1;
466
467 complete((struct completion*)bio->bi_private);
468 return 0;
469}
470
a654b9d8 471int sync_page_io(struct block_device *bdev, sector_t sector, int size,
1da177e4
LT
472 struct page *page, int rw)
473{
baaa2c51 474 struct bio *bio = bio_alloc(GFP_NOIO, 1);
1da177e4
LT
475 struct completion event;
476 int ret;
477
478 rw |= (1 << BIO_RW_SYNC);
479
480 bio->bi_bdev = bdev;
481 bio->bi_sector = sector;
482 bio_add_page(bio, page, size, 0);
483 init_completion(&event);
484 bio->bi_private = &event;
485 bio->bi_end_io = bi_complete;
486 submit_bio(rw, bio);
487 wait_for_completion(&event);
488
489 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
490 bio_put(bio);
491 return ret;
492}
a8745db2 493EXPORT_SYMBOL_GPL(sync_page_io);
1da177e4 494
0002b271 495static int read_disk_sb(mdk_rdev_t * rdev, int size)
1da177e4
LT
496{
497 char b[BDEVNAME_SIZE];
498 if (!rdev->sb_page) {
499 MD_BUG();
500 return -EINVAL;
501 }
502 if (rdev->sb_loaded)
503 return 0;
504
505
0002b271 506 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
1da177e4
LT
507 goto fail;
508 rdev->sb_loaded = 1;
509 return 0;
510
511fail:
512 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
513 bdevname(rdev->bdev,b));
514 return -EINVAL;
515}
516
517static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
518{
519 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
520 (sb1->set_uuid1 == sb2->set_uuid1) &&
521 (sb1->set_uuid2 == sb2->set_uuid2) &&
522 (sb1->set_uuid3 == sb2->set_uuid3))
523
524 return 1;
525
526 return 0;
527}
528
529
530static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
531{
532 int ret;
533 mdp_super_t *tmp1, *tmp2;
534
535 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
536 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
537
538 if (!tmp1 || !tmp2) {
539 ret = 0;
540 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
541 goto abort;
542 }
543
544 *tmp1 = *sb1;
545 *tmp2 = *sb2;
546
547 /*
548 * nr_disks is not constant
549 */
550 tmp1->nr_disks = 0;
551 tmp2->nr_disks = 0;
552
553 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
554 ret = 0;
555 else
556 ret = 1;
557
558abort:
990a8baf
JJ
559 kfree(tmp1);
560 kfree(tmp2);
1da177e4
LT
561 return ret;
562}
563
564static unsigned int calc_sb_csum(mdp_super_t * sb)
565{
566 unsigned int disk_csum, csum;
567
568 disk_csum = sb->sb_csum;
569 sb->sb_csum = 0;
570 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
571 sb->sb_csum = disk_csum;
572 return csum;
573}
574
575
576/*
577 * Handle superblock details.
578 * We want to be able to handle multiple superblock formats
579 * so we have a common interface to them all, and an array of
580 * different handlers.
581 * We rely on user-space to write the initial superblock, and support
582 * reading and updating of superblocks.
583 * Interface methods are:
584 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
585 * loads and validates a superblock on dev.
586 * if refdev != NULL, compare superblocks on both devices
587 * Return:
588 * 0 - dev has a superblock that is compatible with refdev
589 * 1 - dev has a superblock that is compatible and newer than refdev
590 * so dev should be used as the refdev in future
591 * -EINVAL superblock incompatible or invalid
592 * -othererror e.g. -EIO
593 *
594 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
595 * Verify that dev is acceptable into mddev.
596 * The first time, mddev->raid_disks will be 0, and data from
597 * dev should be merged in. Subsequent calls check that dev
598 * is new enough. Return 0 or -EINVAL
599 *
600 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
601 * Update the superblock for rdev with data in mddev
602 * This does not write to disc.
603 *
604 */
605
606struct super_type {
607 char *name;
608 struct module *owner;
609 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
610 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
611 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
612};
613
614/*
615 * load_super for 0.90.0
616 */
617static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
618{
619 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
620 mdp_super_t *sb;
621 int ret;
622 sector_t sb_offset;
623
624 /*
625 * Calculate the position of the superblock,
626 * it's at the end of the disk.
627 *
628 * It also happens to be a multiple of 4Kb.
629 */
630 sb_offset = calc_dev_sboffset(rdev->bdev);
631 rdev->sb_offset = sb_offset;
632
0002b271 633 ret = read_disk_sb(rdev, MD_SB_BYTES);
1da177e4
LT
634 if (ret) return ret;
635
636 ret = -EINVAL;
637
638 bdevname(rdev->bdev, b);
639 sb = (mdp_super_t*)page_address(rdev->sb_page);
640
641 if (sb->md_magic != MD_SB_MAGIC) {
642 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
643 b);
644 goto abort;
645 }
646
647 if (sb->major_version != 0 ||
648 sb->minor_version != 90) {
649 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
650 sb->major_version, sb->minor_version,
651 b);
652 goto abort;
653 }
654
655 if (sb->raid_disks <= 0)
656 goto abort;
657
658 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
659 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
660 b);
661 goto abort;
662 }
663
664 rdev->preferred_minor = sb->md_minor;
665 rdev->data_offset = 0;
0002b271 666 rdev->sb_size = MD_SB_BYTES;
1da177e4
LT
667
668 if (sb->level == LEVEL_MULTIPATH)
669 rdev->desc_nr = -1;
670 else
671 rdev->desc_nr = sb->this_disk.number;
672
673 if (refdev == 0)
674 ret = 1;
675 else {
676 __u64 ev1, ev2;
677 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
678 if (!uuid_equal(refsb, sb)) {
679 printk(KERN_WARNING "md: %s has different UUID to %s\n",
680 b, bdevname(refdev->bdev,b2));
681 goto abort;
682 }
683 if (!sb_equal(refsb, sb)) {
684 printk(KERN_WARNING "md: %s has same UUID"
685 " but different superblock to %s\n",
686 b, bdevname(refdev->bdev, b2));
687 goto abort;
688 }
689 ev1 = md_event(sb);
690 ev2 = md_event(refsb);
691 if (ev1 > ev2)
692 ret = 1;
693 else
694 ret = 0;
695 }
696 rdev->size = calc_dev_size(rdev, sb->chunk_size);
697
2bf071bf
N
698 if (rdev->size < sb->size && sb->level > 1)
699 /* "this cannot possibly happen" ... */
700 ret = -EINVAL;
701
1da177e4
LT
702 abort:
703 return ret;
704}
705
706/*
707 * validate_super for 0.90.0
708 */
709static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
710{
711 mdp_disk_t *desc;
712 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
713
41158c7e 714 rdev->raid_disk = -1;
b2d444d7 715 rdev->flags = 0;
1da177e4
LT
716 if (mddev->raid_disks == 0) {
717 mddev->major_version = 0;
718 mddev->minor_version = sb->minor_version;
719 mddev->patch_version = sb->patch_version;
720 mddev->persistent = ! sb->not_persistent;
721 mddev->chunk_size = sb->chunk_size;
722 mddev->ctime = sb->ctime;
723 mddev->utime = sb->utime;
724 mddev->level = sb->level;
d9d166c2 725 mddev->clevel[0] = 0;
1da177e4
LT
726 mddev->layout = sb->layout;
727 mddev->raid_disks = sb->raid_disks;
728 mddev->size = sb->size;
729 mddev->events = md_event(sb);
9223214e 730 mddev->bitmap_offset = 0;
36fa3063 731 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
1da177e4
LT
732
733 if (sb->state & (1<<MD_SB_CLEAN))
734 mddev->recovery_cp = MaxSector;
735 else {
736 if (sb->events_hi == sb->cp_events_hi &&
737 sb->events_lo == sb->cp_events_lo) {
738 mddev->recovery_cp = sb->recovery_cp;
739 } else
740 mddev->recovery_cp = 0;
741 }
742
743 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
744 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
745 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
746 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
747
748 mddev->max_disks = MD_SB_DISKS;
a654b9d8
N
749
750 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
751 mddev->bitmap_file == NULL) {
6cce3b23
N
752 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
753 && mddev->level != 10) {
a654b9d8 754 /* FIXME use a better test */
6cce3b23 755 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
a654b9d8
N
756 return -EINVAL;
757 }
36fa3063 758 mddev->bitmap_offset = mddev->default_bitmap_offset;
a654b9d8
N
759 }
760
41158c7e
N
761 } else if (mddev->pers == NULL) {
762 /* Insist on good event counter while assembling */
763 __u64 ev1 = md_event(sb);
1da177e4
LT
764 ++ev1;
765 if (ev1 < mddev->events)
766 return -EINVAL;
41158c7e
N
767 } else if (mddev->bitmap) {
768 /* if adding to array with a bitmap, then we can accept an
769 * older device ... but not too old.
770 */
771 __u64 ev1 = md_event(sb);
772 if (ev1 < mddev->bitmap->events_cleared)
773 return 0;
774 } else /* just a hot-add of a new device, leave raid_disk at -1 */
775 return 0;
776
1da177e4 777 if (mddev->level != LEVEL_MULTIPATH) {
1da177e4
LT
778 desc = sb->disks + rdev->desc_nr;
779
780 if (desc->state & (1<<MD_DISK_FAULTY))
b2d444d7 781 set_bit(Faulty, &rdev->flags);
1da177e4
LT
782 else if (desc->state & (1<<MD_DISK_SYNC) &&
783 desc->raid_disk < mddev->raid_disks) {
b2d444d7 784 set_bit(In_sync, &rdev->flags);
1da177e4
LT
785 rdev->raid_disk = desc->raid_disk;
786 }
8ddf9efe
N
787 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
788 set_bit(WriteMostly, &rdev->flags);
41158c7e 789 } else /* MULTIPATH are always insync */
b2d444d7 790 set_bit(In_sync, &rdev->flags);
1da177e4
LT
791 return 0;
792}
793
794/*
795 * sync_super for 0.90.0
796 */
797static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
798{
799 mdp_super_t *sb;
800 struct list_head *tmp;
801 mdk_rdev_t *rdev2;
802 int next_spare = mddev->raid_disks;
19133a42 803
1da177e4
LT
804
805 /* make rdev->sb match mddev data..
806 *
807 * 1/ zero out disks
808 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
809 * 3/ any empty disks < next_spare become removed
810 *
811 * disks[0] gets initialised to REMOVED because
812 * we cannot be sure from other fields if it has
813 * been initialised or not.
814 */
815 int i;
816 int active=0, working=0,failed=0,spare=0,nr_disks=0;
817
61181565
N
818 rdev->sb_size = MD_SB_BYTES;
819
1da177e4
LT
820 sb = (mdp_super_t*)page_address(rdev->sb_page);
821
822 memset(sb, 0, sizeof(*sb));
823
824 sb->md_magic = MD_SB_MAGIC;
825 sb->major_version = mddev->major_version;
826 sb->minor_version = mddev->minor_version;
827 sb->patch_version = mddev->patch_version;
828 sb->gvalid_words = 0; /* ignored */
829 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
830 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
831 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
832 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
833
834 sb->ctime = mddev->ctime;
835 sb->level = mddev->level;
836 sb->size = mddev->size;
837 sb->raid_disks = mddev->raid_disks;
838 sb->md_minor = mddev->md_minor;
839 sb->not_persistent = !mddev->persistent;
840 sb->utime = mddev->utime;
841 sb->state = 0;
842 sb->events_hi = (mddev->events>>32);
843 sb->events_lo = (u32)mddev->events;
844
845 if (mddev->in_sync)
846 {
847 sb->recovery_cp = mddev->recovery_cp;
848 sb->cp_events_hi = (mddev->events>>32);
849 sb->cp_events_lo = (u32)mddev->events;
850 if (mddev->recovery_cp == MaxSector)
851 sb->state = (1<< MD_SB_CLEAN);
852 } else
853 sb->recovery_cp = 0;
854
855 sb->layout = mddev->layout;
856 sb->chunk_size = mddev->chunk_size;
857
a654b9d8
N
858 if (mddev->bitmap && mddev->bitmap_file == NULL)
859 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
860
1da177e4
LT
861 sb->disks[0].state = (1<<MD_DISK_REMOVED);
862 ITERATE_RDEV(mddev,rdev2,tmp) {
863 mdp_disk_t *d;
86e6ffdd 864 int desc_nr;
b2d444d7
N
865 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
866 && !test_bit(Faulty, &rdev2->flags))
86e6ffdd 867 desc_nr = rdev2->raid_disk;
1da177e4 868 else
86e6ffdd 869 desc_nr = next_spare++;
19133a42 870 rdev2->desc_nr = desc_nr;
1da177e4
LT
871 d = &sb->disks[rdev2->desc_nr];
872 nr_disks++;
873 d->number = rdev2->desc_nr;
874 d->major = MAJOR(rdev2->bdev->bd_dev);
875 d->minor = MINOR(rdev2->bdev->bd_dev);
b2d444d7
N
876 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
877 && !test_bit(Faulty, &rdev2->flags))
1da177e4
LT
878 d->raid_disk = rdev2->raid_disk;
879 else
880 d->raid_disk = rdev2->desc_nr; /* compatibility */
b2d444d7 881 if (test_bit(Faulty, &rdev2->flags)) {
1da177e4
LT
882 d->state = (1<<MD_DISK_FAULTY);
883 failed++;
b2d444d7 884 } else if (test_bit(In_sync, &rdev2->flags)) {
1da177e4
LT
885 d->state = (1<<MD_DISK_ACTIVE);
886 d->state |= (1<<MD_DISK_SYNC);
887 active++;
888 working++;
889 } else {
890 d->state = 0;
891 spare++;
892 working++;
893 }
8ddf9efe
N
894 if (test_bit(WriteMostly, &rdev2->flags))
895 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1da177e4 896 }
1da177e4
LT
897 /* now set the "removed" and "faulty" bits on any missing devices */
898 for (i=0 ; i < mddev->raid_disks ; i++) {
899 mdp_disk_t *d = &sb->disks[i];
900 if (d->state == 0 && d->number == 0) {
901 d->number = i;
902 d->raid_disk = i;
903 d->state = (1<<MD_DISK_REMOVED);
904 d->state |= (1<<MD_DISK_FAULTY);
905 failed++;
906 }
907 }
908 sb->nr_disks = nr_disks;
909 sb->active_disks = active;
910 sb->working_disks = working;
911 sb->failed_disks = failed;
912 sb->spare_disks = spare;
913
914 sb->this_disk = sb->disks[rdev->desc_nr];
915 sb->sb_csum = calc_sb_csum(sb);
916}
917
918/*
919 * version 1 superblock
920 */
921
922static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
923{
924 unsigned int disk_csum, csum;
925 unsigned long long newcsum;
926 int size = 256 + le32_to_cpu(sb->max_dev)*2;
927 unsigned int *isuper = (unsigned int*)sb;
928 int i;
929
930 disk_csum = sb->sb_csum;
931 sb->sb_csum = 0;
932 newcsum = 0;
933 for (i=0; size>=4; size -= 4 )
934 newcsum += le32_to_cpu(*isuper++);
935
936 if (size == 2)
937 newcsum += le16_to_cpu(*(unsigned short*) isuper);
938
939 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
940 sb->sb_csum = disk_csum;
941 return cpu_to_le32(csum);
942}
943
944static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
945{
946 struct mdp_superblock_1 *sb;
947 int ret;
948 sector_t sb_offset;
949 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
0002b271 950 int bmask;
1da177e4
LT
951
952 /*
953 * Calculate the position of the superblock.
954 * It is always aligned to a 4K boundary and
955 * depeding on minor_version, it can be:
956 * 0: At least 8K, but less than 12K, from end of device
957 * 1: At start of device
958 * 2: 4K from start of device.
959 */
960 switch(minor_version) {
961 case 0:
962 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
963 sb_offset -= 8*2;
39730960 964 sb_offset &= ~(sector_t)(4*2-1);
1da177e4
LT
965 /* convert from sectors to K */
966 sb_offset /= 2;
967 break;
968 case 1:
969 sb_offset = 0;
970 break;
971 case 2:
972 sb_offset = 4;
973 break;
974 default:
975 return -EINVAL;
976 }
977 rdev->sb_offset = sb_offset;
978
0002b271
N
979 /* superblock is rarely larger than 1K, but it can be larger,
980 * and it is safe to read 4k, so we do that
981 */
982 ret = read_disk_sb(rdev, 4096);
1da177e4
LT
983 if (ret) return ret;
984
985
986 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
987
988 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
989 sb->major_version != cpu_to_le32(1) ||
990 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
991 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
71c0805c 992 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1da177e4
LT
993 return -EINVAL;
994
995 if (calc_sb_1_csum(sb) != sb->sb_csum) {
996 printk("md: invalid superblock checksum on %s\n",
997 bdevname(rdev->bdev,b));
998 return -EINVAL;
999 }
1000 if (le64_to_cpu(sb->data_size) < 10) {
1001 printk("md: data_size too small on %s\n",
1002 bdevname(rdev->bdev,b));
1003 return -EINVAL;
1004 }
1005 rdev->preferred_minor = 0xffff;
1006 rdev->data_offset = le64_to_cpu(sb->data_offset);
4dbcdc75 1007 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1da177e4 1008
0002b271 1009 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
720a3dc3 1010 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
0002b271
N
1011 if (rdev->sb_size & bmask)
1012 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1013
1da177e4
LT
1014 if (refdev == 0)
1015 return 1;
1016 else {
1017 __u64 ev1, ev2;
1018 struct mdp_superblock_1 *refsb =
1019 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1020
1021 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1022 sb->level != refsb->level ||
1023 sb->layout != refsb->layout ||
1024 sb->chunksize != refsb->chunksize) {
1025 printk(KERN_WARNING "md: %s has strangely different"
1026 " superblock to %s\n",
1027 bdevname(rdev->bdev,b),
1028 bdevname(refdev->bdev,b2));
1029 return -EINVAL;
1030 }
1031 ev1 = le64_to_cpu(sb->events);
1032 ev2 = le64_to_cpu(refsb->events);
1033
1034 if (ev1 > ev2)
1035 return 1;
1036 }
1037 if (minor_version)
1038 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1039 else
1040 rdev->size = rdev->sb_offset;
1041 if (rdev->size < le64_to_cpu(sb->data_size)/2)
1042 return -EINVAL;
1043 rdev->size = le64_to_cpu(sb->data_size)/2;
1044 if (le32_to_cpu(sb->chunksize))
1045 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
2bf071bf
N
1046
1047 if (le32_to_cpu(sb->size) > rdev->size*2)
1048 return -EINVAL;
1da177e4
LT
1049 return 0;
1050}
1051
1052static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1053{
1054 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1055
41158c7e 1056 rdev->raid_disk = -1;
b2d444d7 1057 rdev->flags = 0;
1da177e4
LT
1058 if (mddev->raid_disks == 0) {
1059 mddev->major_version = 1;
1060 mddev->patch_version = 0;
1061 mddev->persistent = 1;
1062 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1063 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1064 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1065 mddev->level = le32_to_cpu(sb->level);
d9d166c2 1066 mddev->clevel[0] = 0;
1da177e4
LT
1067 mddev->layout = le32_to_cpu(sb->layout);
1068 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1069 mddev->size = le64_to_cpu(sb->size)/2;
1070 mddev->events = le64_to_cpu(sb->events);
9223214e 1071 mddev->bitmap_offset = 0;
53e87fbb 1072 mddev->default_bitmap_offset = 1024;
1da177e4
LT
1073
1074 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1075 memcpy(mddev->uuid, sb->set_uuid, 16);
1076
1077 mddev->max_disks = (4096-256)/2;
a654b9d8 1078
71c0805c 1079 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
a654b9d8 1080 mddev->bitmap_file == NULL ) {
6cce3b23
N
1081 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1082 && mddev->level != 10) {
1083 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
a654b9d8
N
1084 return -EINVAL;
1085 }
1086 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1087 }
41158c7e
N
1088 } else if (mddev->pers == NULL) {
1089 /* Insist of good event counter while assembling */
1090 __u64 ev1 = le64_to_cpu(sb->events);
1da177e4
LT
1091 ++ev1;
1092 if (ev1 < mddev->events)
1093 return -EINVAL;
41158c7e
N
1094 } else if (mddev->bitmap) {
1095 /* If adding to array with a bitmap, then we can accept an
1096 * older device, but not too old.
1097 */
1098 __u64 ev1 = le64_to_cpu(sb->events);
1099 if (ev1 < mddev->bitmap->events_cleared)
1100 return 0;
1101 } else /* just a hot-add of a new device, leave raid_disk at -1 */
1102 return 0;
1da177e4
LT
1103
1104 if (mddev->level != LEVEL_MULTIPATH) {
1105 int role;
1106 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1107 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1108 switch(role) {
1109 case 0xffff: /* spare */
1da177e4
LT
1110 break;
1111 case 0xfffe: /* faulty */
b2d444d7 1112 set_bit(Faulty, &rdev->flags);
1da177e4
LT
1113 break;
1114 default:
b2d444d7 1115 set_bit(In_sync, &rdev->flags);
1da177e4
LT
1116 rdev->raid_disk = role;
1117 break;
1118 }
8ddf9efe
N
1119 if (sb->devflags & WriteMostly1)
1120 set_bit(WriteMostly, &rdev->flags);
41158c7e 1121 } else /* MULTIPATH are always insync */
b2d444d7 1122 set_bit(In_sync, &rdev->flags);
41158c7e 1123
1da177e4
LT
1124 return 0;
1125}
1126
1127static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1128{
1129 struct mdp_superblock_1 *sb;
1130 struct list_head *tmp;
1131 mdk_rdev_t *rdev2;
1132 int max_dev, i;
1133 /* make rdev->sb match mddev and rdev data. */
1134
1135 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1136
1137 sb->feature_map = 0;
1138 sb->pad0 = 0;
1139 memset(sb->pad1, 0, sizeof(sb->pad1));
1140 memset(sb->pad2, 0, sizeof(sb->pad2));
1141 memset(sb->pad3, 0, sizeof(sb->pad3));
1142
1143 sb->utime = cpu_to_le64((__u64)mddev->utime);
1144 sb->events = cpu_to_le64(mddev->events);
1145 if (mddev->in_sync)
1146 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1147 else
1148 sb->resync_offset = cpu_to_le64(0);
1149
4dbcdc75
N
1150 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1151
a654b9d8
N
1152 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1153 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
71c0805c 1154 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
a654b9d8
N
1155 }
1156
1da177e4
LT
1157 max_dev = 0;
1158 ITERATE_RDEV(mddev,rdev2,tmp)
1159 if (rdev2->desc_nr+1 > max_dev)
1160 max_dev = rdev2->desc_nr+1;
1161
1162 sb->max_dev = cpu_to_le32(max_dev);
1163 for (i=0; i<max_dev;i++)
1164 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1165
1166 ITERATE_RDEV(mddev,rdev2,tmp) {
1167 i = rdev2->desc_nr;
b2d444d7 1168 if (test_bit(Faulty, &rdev2->flags))
1da177e4 1169 sb->dev_roles[i] = cpu_to_le16(0xfffe);
b2d444d7 1170 else if (test_bit(In_sync, &rdev2->flags))
1da177e4
LT
1171 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1172 else
1173 sb->dev_roles[i] = cpu_to_le16(0xffff);
1174 }
1175
1176 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
1177 sb->sb_csum = calc_sb_1_csum(sb);
1178}
1179
1180
75c96f85 1181static struct super_type super_types[] = {
1da177e4
LT
1182 [0] = {
1183 .name = "0.90.0",
1184 .owner = THIS_MODULE,
1185 .load_super = super_90_load,
1186 .validate_super = super_90_validate,
1187 .sync_super = super_90_sync,
1188 },
1189 [1] = {
1190 .name = "md-1",
1191 .owner = THIS_MODULE,
1192 .load_super = super_1_load,
1193 .validate_super = super_1_validate,
1194 .sync_super = super_1_sync,
1195 },
1196};
1197
1198static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1199{
1200 struct list_head *tmp;
1201 mdk_rdev_t *rdev;
1202
1203 ITERATE_RDEV(mddev,rdev,tmp)
1204 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1205 return rdev;
1206
1207 return NULL;
1208}
1209
1210static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1211{
1212 struct list_head *tmp;
1213 mdk_rdev_t *rdev;
1214
1215 ITERATE_RDEV(mddev1,rdev,tmp)
1216 if (match_dev_unit(mddev2, rdev))
1217 return 1;
1218
1219 return 0;
1220}
1221
1222static LIST_HEAD(pending_raid_disks);
1223
1224static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1225{
1226 mdk_rdev_t *same_pdev;
1227 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
f637b9f9 1228 struct kobject *ko;
1da177e4
LT
1229
1230 if (rdev->mddev) {
1231 MD_BUG();
1232 return -EINVAL;
1233 }
2bf071bf
N
1234 /* make sure rdev->size exceeds mddev->size */
1235 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1236 if (mddev->pers)
1237 /* Cannot change size, so fail */
1238 return -ENOSPC;
1239 else
1240 mddev->size = rdev->size;
1241 }
1da177e4
LT
1242 same_pdev = match_dev_unit(mddev, rdev);
1243 if (same_pdev)
1244 printk(KERN_WARNING
1245 "%s: WARNING: %s appears to be on the same physical"
1246 " disk as %s. True\n protection against single-disk"
1247 " failure might be compromised.\n",
1248 mdname(mddev), bdevname(rdev->bdev,b),
1249 bdevname(same_pdev->bdev,b2));
1250
1251 /* Verify rdev->desc_nr is unique.
1252 * If it is -1, assign a free number, else
1253 * check number is not in use
1254 */
1255 if (rdev->desc_nr < 0) {
1256 int choice = 0;
1257 if (mddev->pers) choice = mddev->raid_disks;
1258 while (find_rdev_nr(mddev, choice))
1259 choice++;
1260 rdev->desc_nr = choice;
1261 } else {
1262 if (find_rdev_nr(mddev, rdev->desc_nr))
1263 return -EBUSY;
1264 }
19133a42
N
1265 bdevname(rdev->bdev,b);
1266 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1267 return -ENOMEM;
1da177e4
LT
1268
1269 list_add(&rdev->same_set, &mddev->disks);
1270 rdev->mddev = mddev;
19133a42 1271 printk(KERN_INFO "md: bind<%s>\n", b);
86e6ffdd 1272
9c791977 1273 rdev->kobj.parent = &mddev->kobj;
86e6ffdd
N
1274 kobject_add(&rdev->kobj);
1275
f637b9f9
N
1276 if (rdev->bdev->bd_part)
1277 ko = &rdev->bdev->bd_part->kobj;
1278 else
1279 ko = &rdev->bdev->bd_disk->kobj;
1280 sysfs_create_link(&rdev->kobj, ko, "block");
1da177e4
LT
1281 return 0;
1282}
1283
1284static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1285{
1286 char b[BDEVNAME_SIZE];
1287 if (!rdev->mddev) {
1288 MD_BUG();
1289 return;
1290 }
1291 list_del_init(&rdev->same_set);
1292 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1293 rdev->mddev = NULL;
86e6ffdd
N
1294 sysfs_remove_link(&rdev->kobj, "block");
1295 kobject_del(&rdev->kobj);
1da177e4
LT
1296}
1297
1298/*
1299 * prevent the device from being mounted, repartitioned or
1300 * otherwise reused by a RAID array (or any other kernel
1301 * subsystem), by bd_claiming the device.
1302 */
1303static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1304{
1305 int err = 0;
1306 struct block_device *bdev;
1307 char b[BDEVNAME_SIZE];
1308
1309 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1310 if (IS_ERR(bdev)) {
1311 printk(KERN_ERR "md: could not open %s.\n",
1312 __bdevname(dev, b));
1313 return PTR_ERR(bdev);
1314 }
1315 err = bd_claim(bdev, rdev);
1316 if (err) {
1317 printk(KERN_ERR "md: could not bd_claim %s.\n",
1318 bdevname(bdev, b));
1319 blkdev_put(bdev);
1320 return err;
1321 }
1322 rdev->bdev = bdev;
1323 return err;
1324}
1325
1326static void unlock_rdev(mdk_rdev_t *rdev)
1327{
1328 struct block_device *bdev = rdev->bdev;
1329 rdev->bdev = NULL;
1330 if (!bdev)
1331 MD_BUG();
1332 bd_release(bdev);
1333 blkdev_put(bdev);
1334}
1335
1336void md_autodetect_dev(dev_t dev);
1337
1338static void export_rdev(mdk_rdev_t * rdev)
1339{
1340 char b[BDEVNAME_SIZE];
1341 printk(KERN_INFO "md: export_rdev(%s)\n",
1342 bdevname(rdev->bdev,b));
1343 if (rdev->mddev)
1344 MD_BUG();
1345 free_disk_sb(rdev);
1346 list_del_init(&rdev->same_set);
1347#ifndef MODULE
1348 md_autodetect_dev(rdev->bdev->bd_dev);
1349#endif
1350 unlock_rdev(rdev);
86e6ffdd 1351 kobject_put(&rdev->kobj);
1da177e4
LT
1352}
1353
1354static void kick_rdev_from_array(mdk_rdev_t * rdev)
1355{
1356 unbind_rdev_from_array(rdev);
1357 export_rdev(rdev);
1358}
1359
1360static void export_array(mddev_t *mddev)
1361{
1362 struct list_head *tmp;
1363 mdk_rdev_t *rdev;
1364
1365 ITERATE_RDEV(mddev,rdev,tmp) {
1366 if (!rdev->mddev) {
1367 MD_BUG();
1368 continue;
1369 }
1370 kick_rdev_from_array(rdev);
1371 }
1372 if (!list_empty(&mddev->disks))
1373 MD_BUG();
1374 mddev->raid_disks = 0;
1375 mddev->major_version = 0;
1376}
1377
1378static void print_desc(mdp_disk_t *desc)
1379{
1380 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1381 desc->major,desc->minor,desc->raid_disk,desc->state);
1382}
1383
1384static void print_sb(mdp_super_t *sb)
1385{
1386 int i;
1387
1388 printk(KERN_INFO
1389 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1390 sb->major_version, sb->minor_version, sb->patch_version,
1391 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1392 sb->ctime);
1393 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1394 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1395 sb->md_minor, sb->layout, sb->chunk_size);
1396 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1397 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1398 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1399 sb->failed_disks, sb->spare_disks,
1400 sb->sb_csum, (unsigned long)sb->events_lo);
1401
1402 printk(KERN_INFO);
1403 for (i = 0; i < MD_SB_DISKS; i++) {
1404 mdp_disk_t *desc;
1405
1406 desc = sb->disks + i;
1407 if (desc->number || desc->major || desc->minor ||
1408 desc->raid_disk || (desc->state && (desc->state != 4))) {
1409 printk(" D %2d: ", i);
1410 print_desc(desc);
1411 }
1412 }
1413 printk(KERN_INFO "md: THIS: ");
1414 print_desc(&sb->this_disk);
1415
1416}
1417
1418static void print_rdev(mdk_rdev_t *rdev)
1419{
1420 char b[BDEVNAME_SIZE];
1421 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1422 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
b2d444d7
N
1423 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1424 rdev->desc_nr);
1da177e4
LT
1425 if (rdev->sb_loaded) {
1426 printk(KERN_INFO "md: rdev superblock:\n");
1427 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1428 } else
1429 printk(KERN_INFO "md: no rdev superblock!\n");
1430}
1431
1432void md_print_devices(void)
1433{
1434 struct list_head *tmp, *tmp2;
1435 mdk_rdev_t *rdev;
1436 mddev_t *mddev;
1437 char b[BDEVNAME_SIZE];
1438
1439 printk("\n");
1440 printk("md: **********************************\n");
1441 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1442 printk("md: **********************************\n");
1443 ITERATE_MDDEV(mddev,tmp) {
1da177e4 1444
32a7627c
N
1445 if (mddev->bitmap)
1446 bitmap_print_sb(mddev->bitmap);
1447 else
1448 printk("%s: ", mdname(mddev));
1da177e4
LT
1449 ITERATE_RDEV(mddev,rdev,tmp2)
1450 printk("<%s>", bdevname(rdev->bdev,b));
1451 printk("\n");
1452
1453 ITERATE_RDEV(mddev,rdev,tmp2)
1454 print_rdev(rdev);
1455 }
1456 printk("md: **********************************\n");
1457 printk("\n");
1458}
1459
1460
1da177e4
LT
1461static void sync_sbs(mddev_t * mddev)
1462{
1463 mdk_rdev_t *rdev;
1464 struct list_head *tmp;
1465
1466 ITERATE_RDEV(mddev,rdev,tmp) {
1467 super_types[mddev->major_version].
1468 sync_super(mddev, rdev);
1469 rdev->sb_loaded = 1;
1470 }
1471}
1472
1473static void md_update_sb(mddev_t * mddev)
1474{
7bfa19f2 1475 int err;
1da177e4
LT
1476 struct list_head *tmp;
1477 mdk_rdev_t *rdev;
06d91a5f 1478 int sync_req;
1da177e4 1479
1da177e4 1480repeat:
a9701a30 1481 spin_lock_irq(&mddev->write_lock);
06d91a5f 1482 sync_req = mddev->in_sync;
1da177e4
LT
1483 mddev->utime = get_seconds();
1484 mddev->events ++;
1485
1486 if (!mddev->events) {
1487 /*
1488 * oops, this 64-bit counter should never wrap.
1489 * Either we are in around ~1 trillion A.C., assuming
1490 * 1 reboot per second, or we have a bug:
1491 */
1492 MD_BUG();
1493 mddev->events --;
1494 }
7bfa19f2 1495 mddev->sb_dirty = 2;
1da177e4
LT
1496 sync_sbs(mddev);
1497
1498 /*
1499 * do not write anything to disk if using
1500 * nonpersistent superblocks
1501 */
06d91a5f
N
1502 if (!mddev->persistent) {
1503 mddev->sb_dirty = 0;
a9701a30 1504 spin_unlock_irq(&mddev->write_lock);
3d310eb7 1505 wake_up(&mddev->sb_wait);
1da177e4 1506 return;
06d91a5f 1507 }
a9701a30 1508 spin_unlock_irq(&mddev->write_lock);
1da177e4
LT
1509
1510 dprintk(KERN_INFO
1511 "md: updating %s RAID superblock on device (in sync %d)\n",
1512 mdname(mddev),mddev->in_sync);
1513
32a7627c 1514 err = bitmap_update_sb(mddev->bitmap);
1da177e4
LT
1515 ITERATE_RDEV(mddev,rdev,tmp) {
1516 char b[BDEVNAME_SIZE];
1517 dprintk(KERN_INFO "md: ");
b2d444d7 1518 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
1519 dprintk("(skipping faulty ");
1520
1521 dprintk("%s ", bdevname(rdev->bdev,b));
b2d444d7 1522 if (!test_bit(Faulty, &rdev->flags)) {
7bfa19f2 1523 md_super_write(mddev,rdev,
0002b271 1524 rdev->sb_offset<<1, rdev->sb_size,
7bfa19f2
N
1525 rdev->sb_page);
1526 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1527 bdevname(rdev->bdev,b),
1528 (unsigned long long)rdev->sb_offset);
1529
1da177e4
LT
1530 } else
1531 dprintk(")\n");
7bfa19f2 1532 if (mddev->level == LEVEL_MULTIPATH)
1da177e4
LT
1533 /* only need to write one superblock... */
1534 break;
1535 }
a9701a30 1536 md_super_wait(mddev);
7bfa19f2
N
1537 /* if there was a failure, sb_dirty was set to 1, and we re-write super */
1538
a9701a30 1539 spin_lock_irq(&mddev->write_lock);
7bfa19f2 1540 if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
06d91a5f 1541 /* have to write it out again */
a9701a30 1542 spin_unlock_irq(&mddev->write_lock);
06d91a5f
N
1543 goto repeat;
1544 }
1545 mddev->sb_dirty = 0;
a9701a30 1546 spin_unlock_irq(&mddev->write_lock);
3d310eb7 1547 wake_up(&mddev->sb_wait);
06d91a5f 1548
1da177e4
LT
1549}
1550
bce74dac
N
1551/* words written to sysfs files may, or my not, be \n terminated.
1552 * We want to accept with case. For this we use cmd_match.
1553 */
1554static int cmd_match(const char *cmd, const char *str)
1555{
1556 /* See if cmd, written into a sysfs file, matches
1557 * str. They must either be the same, or cmd can
1558 * have a trailing newline
1559 */
1560 while (*cmd && *str && *cmd == *str) {
1561 cmd++;
1562 str++;
1563 }
1564 if (*cmd == '\n')
1565 cmd++;
1566 if (*str || *cmd)
1567 return 0;
1568 return 1;
1569}
1570
86e6ffdd
N
1571struct rdev_sysfs_entry {
1572 struct attribute attr;
1573 ssize_t (*show)(mdk_rdev_t *, char *);
1574 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1575};
1576
1577static ssize_t
96de1e66 1578state_show(mdk_rdev_t *rdev, char *page)
86e6ffdd
N
1579{
1580 char *sep = "";
1581 int len=0;
1582
b2d444d7 1583 if (test_bit(Faulty, &rdev->flags)) {
86e6ffdd
N
1584 len+= sprintf(page+len, "%sfaulty",sep);
1585 sep = ",";
1586 }
b2d444d7 1587 if (test_bit(In_sync, &rdev->flags)) {
86e6ffdd
N
1588 len += sprintf(page+len, "%sin_sync",sep);
1589 sep = ",";
1590 }
b2d444d7
N
1591 if (!test_bit(Faulty, &rdev->flags) &&
1592 !test_bit(In_sync, &rdev->flags)) {
86e6ffdd
N
1593 len += sprintf(page+len, "%sspare", sep);
1594 sep = ",";
1595 }
1596 return len+sprintf(page+len, "\n");
1597}
1598
96de1e66
N
1599static struct rdev_sysfs_entry
1600rdev_state = __ATTR_RO(state);
86e6ffdd
N
1601
1602static ssize_t
96de1e66 1603super_show(mdk_rdev_t *rdev, char *page)
86e6ffdd
N
1604{
1605 if (rdev->sb_loaded && rdev->sb_size) {
1606 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1607 return rdev->sb_size;
1608 } else
1609 return 0;
1610}
96de1e66
N
1611static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1612
4dbcdc75
N
1613static ssize_t
1614errors_show(mdk_rdev_t *rdev, char *page)
1615{
1616 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1617}
1618
1619static ssize_t
1620errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1621{
1622 char *e;
1623 unsigned long n = simple_strtoul(buf, &e, 10);
1624 if (*buf && (*e == 0 || *e == '\n')) {
1625 atomic_set(&rdev->corrected_errors, n);
1626 return len;
1627 }
1628 return -EINVAL;
1629}
1630static struct rdev_sysfs_entry rdev_errors =
1631__ATTR(errors, 0644, errors_show, errors_store);
1632
014236d2
N
1633static ssize_t
1634slot_show(mdk_rdev_t *rdev, char *page)
1635{
1636 if (rdev->raid_disk < 0)
1637 return sprintf(page, "none\n");
1638 else
1639 return sprintf(page, "%d\n", rdev->raid_disk);
1640}
1641
1642static ssize_t
1643slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1644{
1645 char *e;
1646 int slot = simple_strtoul(buf, &e, 10);
1647 if (strncmp(buf, "none", 4)==0)
1648 slot = -1;
1649 else if (e==buf || (*e && *e!= '\n'))
1650 return -EINVAL;
1651 if (rdev->mddev->pers)
1652 /* Cannot set slot in active array (yet) */
1653 return -EBUSY;
1654 if (slot >= rdev->mddev->raid_disks)
1655 return -ENOSPC;
1656 rdev->raid_disk = slot;
1657 /* assume it is working */
1658 rdev->flags = 0;
1659 set_bit(In_sync, &rdev->flags);
1660 return len;
1661}
1662
1663
1664static struct rdev_sysfs_entry rdev_slot =
1665__ATTR(slot, 0644, slot_show, slot_store);
1666
93c8cad0
N
1667static ssize_t
1668offset_show(mdk_rdev_t *rdev, char *page)
1669{
6961ece4 1670 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
93c8cad0
N
1671}
1672
1673static ssize_t
1674offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1675{
1676 char *e;
1677 unsigned long long offset = simple_strtoull(buf, &e, 10);
1678 if (e==buf || (*e && *e != '\n'))
1679 return -EINVAL;
1680 if (rdev->mddev->pers)
1681 return -EBUSY;
1682 rdev->data_offset = offset;
1683 return len;
1684}
1685
1686static struct rdev_sysfs_entry rdev_offset =
1687__ATTR(offset, 0644, offset_show, offset_store);
1688
83303b61
N
1689static ssize_t
1690rdev_size_show(mdk_rdev_t *rdev, char *page)
1691{
1692 return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1693}
1694
1695static ssize_t
1696rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1697{
1698 char *e;
1699 unsigned long long size = simple_strtoull(buf, &e, 10);
1700 if (e==buf || (*e && *e != '\n'))
1701 return -EINVAL;
1702 if (rdev->mddev->pers)
1703 return -EBUSY;
1704 rdev->size = size;
1705 if (size < rdev->mddev->size || rdev->mddev->size == 0)
1706 rdev->mddev->size = size;
1707 return len;
1708}
1709
1710static struct rdev_sysfs_entry rdev_size =
1711__ATTR(size, 0644, rdev_size_show, rdev_size_store);
1712
86e6ffdd
N
1713static struct attribute *rdev_default_attrs[] = {
1714 &rdev_state.attr,
1715 &rdev_super.attr,
4dbcdc75 1716 &rdev_errors.attr,
014236d2 1717 &rdev_slot.attr,
93c8cad0 1718 &rdev_offset.attr,
83303b61 1719 &rdev_size.attr,
86e6ffdd
N
1720 NULL,
1721};
1722static ssize_t
1723rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1724{
1725 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1726 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1727
1728 if (!entry->show)
1729 return -EIO;
1730 return entry->show(rdev, page);
1731}
1732
1733static ssize_t
1734rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1735 const char *page, size_t length)
1736{
1737 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1738 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1739
1740 if (!entry->store)
1741 return -EIO;
1742 return entry->store(rdev, page, length);
1743}
1744
1745static void rdev_free(struct kobject *ko)
1746{
1747 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1748 kfree(rdev);
1749}
1750static struct sysfs_ops rdev_sysfs_ops = {
1751 .show = rdev_attr_show,
1752 .store = rdev_attr_store,
1753};
1754static struct kobj_type rdev_ktype = {
1755 .release = rdev_free,
1756 .sysfs_ops = &rdev_sysfs_ops,
1757 .default_attrs = rdev_default_attrs,
1758};
1759
1da177e4
LT
1760/*
1761 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1762 *
1763 * mark the device faulty if:
1764 *
1765 * - the device is nonexistent (zero size)
1766 * - the device has no valid superblock
1767 *
1768 * a faulty rdev _never_ has rdev->sb set.
1769 */
1770static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1771{
1772 char b[BDEVNAME_SIZE];
1773 int err;
1774 mdk_rdev_t *rdev;
1775 sector_t size;
1776
9ffae0cf 1777 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1da177e4
LT
1778 if (!rdev) {
1779 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1780 return ERR_PTR(-ENOMEM);
1781 }
1da177e4
LT
1782
1783 if ((err = alloc_disk_sb(rdev)))
1784 goto abort_free;
1785
1786 err = lock_rdev(rdev, newdev);
1787 if (err)
1788 goto abort_free;
1789
86e6ffdd
N
1790 rdev->kobj.parent = NULL;
1791 rdev->kobj.ktype = &rdev_ktype;
1792 kobject_init(&rdev->kobj);
1793
1da177e4 1794 rdev->desc_nr = -1;
b2d444d7 1795 rdev->flags = 0;
1da177e4
LT
1796 rdev->data_offset = 0;
1797 atomic_set(&rdev->nr_pending, 0);
ba22dcbf 1798 atomic_set(&rdev->read_errors, 0);
4dbcdc75 1799 atomic_set(&rdev->corrected_errors, 0);
1da177e4
LT
1800
1801 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1802 if (!size) {
1803 printk(KERN_WARNING
1804 "md: %s has zero or unknown size, marking faulty!\n",
1805 bdevname(rdev->bdev,b));
1806 err = -EINVAL;
1807 goto abort_free;
1808 }
1809
1810 if (super_format >= 0) {
1811 err = super_types[super_format].
1812 load_super(rdev, NULL, super_minor);
1813 if (err == -EINVAL) {
1814 printk(KERN_WARNING
1815 "md: %s has invalid sb, not importing!\n",
1816 bdevname(rdev->bdev,b));
1817 goto abort_free;
1818 }
1819 if (err < 0) {
1820 printk(KERN_WARNING
1821 "md: could not read %s's sb, not importing!\n",
1822 bdevname(rdev->bdev,b));
1823 goto abort_free;
1824 }
1825 }
1826 INIT_LIST_HEAD(&rdev->same_set);
1827
1828 return rdev;
1829
1830abort_free:
1831 if (rdev->sb_page) {
1832 if (rdev->bdev)
1833 unlock_rdev(rdev);
1834 free_disk_sb(rdev);
1835 }
1836 kfree(rdev);
1837 return ERR_PTR(err);
1838}
1839
1840/*
1841 * Check a full RAID array for plausibility
1842 */
1843
1844
a757e64c 1845static void analyze_sbs(mddev_t * mddev)
1da177e4
LT
1846{
1847 int i;
1848 struct list_head *tmp;
1849 mdk_rdev_t *rdev, *freshest;
1850 char b[BDEVNAME_SIZE];
1851
1852 freshest = NULL;
1853 ITERATE_RDEV(mddev,rdev,tmp)
1854 switch (super_types[mddev->major_version].
1855 load_super(rdev, freshest, mddev->minor_version)) {
1856 case 1:
1857 freshest = rdev;
1858 break;
1859 case 0:
1860 break;
1861 default:
1862 printk( KERN_ERR \
1863 "md: fatal superblock inconsistency in %s"
1864 " -- removing from array\n",
1865 bdevname(rdev->bdev,b));
1866 kick_rdev_from_array(rdev);
1867 }
1868
1869
1870 super_types[mddev->major_version].
1871 validate_super(mddev, freshest);
1872
1873 i = 0;
1874 ITERATE_RDEV(mddev,rdev,tmp) {
1875 if (rdev != freshest)
1876 if (super_types[mddev->major_version].
1877 validate_super(mddev, rdev)) {
1878 printk(KERN_WARNING "md: kicking non-fresh %s"
1879 " from array!\n",
1880 bdevname(rdev->bdev,b));
1881 kick_rdev_from_array(rdev);
1882 continue;
1883 }
1884 if (mddev->level == LEVEL_MULTIPATH) {
1885 rdev->desc_nr = i++;
1886 rdev->raid_disk = rdev->desc_nr;
b2d444d7 1887 set_bit(In_sync, &rdev->flags);
1da177e4
LT
1888 }
1889 }
1890
1891
1892
1893 if (mddev->recovery_cp != MaxSector &&
1894 mddev->level >= 1)
1895 printk(KERN_ERR "md: %s: raid array is not clean"
1896 " -- starting background reconstruction\n",
1897 mdname(mddev));
1898
1da177e4
LT
1899}
1900
eae1701f 1901static ssize_t
96de1e66 1902level_show(mddev_t *mddev, char *page)
eae1701f 1903{
2604b703 1904 struct mdk_personality *p = mddev->pers;
d9d166c2 1905 if (p)
eae1701f 1906 return sprintf(page, "%s\n", p->name);
d9d166c2
N
1907 else if (mddev->clevel[0])
1908 return sprintf(page, "%s\n", mddev->clevel);
1909 else if (mddev->level != LEVEL_NONE)
1910 return sprintf(page, "%d\n", mddev->level);
1911 else
1912 return 0;
eae1701f
N
1913}
1914
d9d166c2
N
1915static ssize_t
1916level_store(mddev_t *mddev, const char *buf, size_t len)
1917{
1918 int rv = len;
1919 if (mddev->pers)
1920 return -EBUSY;
1921 if (len == 0)
1922 return 0;
1923 if (len >= sizeof(mddev->clevel))
1924 return -ENOSPC;
1925 strncpy(mddev->clevel, buf, len);
1926 if (mddev->clevel[len-1] == '\n')
1927 len--;
1928 mddev->clevel[len] = 0;
1929 mddev->level = LEVEL_NONE;
1930 return rv;
1931}
1932
1933static struct md_sysfs_entry md_level =
1934__ATTR(level, 0644, level_show, level_store);
eae1701f
N
1935
1936static ssize_t
96de1e66 1937raid_disks_show(mddev_t *mddev, char *page)
eae1701f 1938{
bb636547
N
1939 if (mddev->raid_disks == 0)
1940 return 0;
eae1701f
N
1941 return sprintf(page, "%d\n", mddev->raid_disks);
1942}
1943
da943b99
N
1944static int update_raid_disks(mddev_t *mddev, int raid_disks);
1945
1946static ssize_t
1947raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
1948{
1949 /* can only set raid_disks if array is not yet active */
1950 char *e;
1951 int rv = 0;
1952 unsigned long n = simple_strtoul(buf, &e, 10);
1953
1954 if (!*buf || (*e && *e != '\n'))
1955 return -EINVAL;
1956
1957 if (mddev->pers)
1958 rv = update_raid_disks(mddev, n);
1959 else
1960 mddev->raid_disks = n;
1961 return rv ? rv : len;
1962}
1963static struct md_sysfs_entry md_raid_disks =
1964__ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store);
eae1701f 1965
3b34380a
N
1966static ssize_t
1967chunk_size_show(mddev_t *mddev, char *page)
1968{
1969 return sprintf(page, "%d\n", mddev->chunk_size);
1970}
1971
1972static ssize_t
1973chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
1974{
1975 /* can only set chunk_size if array is not yet active */
1976 char *e;
1977 unsigned long n = simple_strtoul(buf, &e, 10);
1978
1979 if (mddev->pers)
1980 return -EBUSY;
1981 if (!*buf || (*e && *e != '\n'))
1982 return -EINVAL;
1983
1984 mddev->chunk_size = n;
1985 return len;
1986}
1987static struct md_sysfs_entry md_chunk_size =
1988__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
1989
6d7ff738
N
1990static ssize_t
1991null_show(mddev_t *mddev, char *page)
1992{
1993 return -EINVAL;
1994}
1995
1996static ssize_t
1997new_dev_store(mddev_t *mddev, const char *buf, size_t len)
1998{
1999 /* buf must be %d:%d\n? giving major and minor numbers */
2000 /* The new device is added to the array.
2001 * If the array has a persistent superblock, we read the
2002 * superblock to initialise info and check validity.
2003 * Otherwise, only checking done is that in bind_rdev_to_array,
2004 * which mainly checks size.
2005 */
2006 char *e;
2007 int major = simple_strtoul(buf, &e, 10);
2008 int minor;
2009 dev_t dev;
2010 mdk_rdev_t *rdev;
2011 int err;
2012
2013 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2014 return -EINVAL;
2015 minor = simple_strtoul(e+1, &e, 10);
2016 if (*e && *e != '\n')
2017 return -EINVAL;
2018 dev = MKDEV(major, minor);
2019 if (major != MAJOR(dev) ||
2020 minor != MINOR(dev))
2021 return -EOVERFLOW;
2022
2023
2024 if (mddev->persistent) {
2025 rdev = md_import_device(dev, mddev->major_version,
2026 mddev->minor_version);
2027 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2028 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2029 mdk_rdev_t, same_set);
2030 err = super_types[mddev->major_version]
2031 .load_super(rdev, rdev0, mddev->minor_version);
2032 if (err < 0)
2033 goto out;
2034 }
2035 } else
2036 rdev = md_import_device(dev, -1, -1);
2037
2038 if (IS_ERR(rdev))
2039 return PTR_ERR(rdev);
2040 err = bind_rdev_to_array(rdev, mddev);
2041 out:
2042 if (err)
2043 export_rdev(rdev);
2044 return err ? err : len;
2045}
2046
2047static struct md_sysfs_entry md_new_device =
2048__ATTR(new_dev, 0200, null_show, new_dev_store);
3b34380a 2049
a35b0d69
N
2050static ssize_t
2051size_show(mddev_t *mddev, char *page)
2052{
2053 return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2054}
2055
2056static int update_size(mddev_t *mddev, unsigned long size);
2057
2058static ssize_t
2059size_store(mddev_t *mddev, const char *buf, size_t len)
2060{
2061 /* If array is inactive, we can reduce the component size, but
2062 * not increase it (except from 0).
2063 * If array is active, we can try an on-line resize
2064 */
2065 char *e;
2066 int err = 0;
2067 unsigned long long size = simple_strtoull(buf, &e, 10);
2068 if (!*buf || *buf == '\n' ||
2069 (*e && *e != '\n'))
2070 return -EINVAL;
2071
2072 if (mddev->pers) {
2073 err = update_size(mddev, size);
2074 md_update_sb(mddev);
2075 } else {
2076 if (mddev->size == 0 ||
2077 mddev->size > size)
2078 mddev->size = size;
2079 else
2080 err = -ENOSPC;
2081 }
2082 return err ? err : len;
2083}
2084
2085static struct md_sysfs_entry md_size =
2086__ATTR(component_size, 0644, size_show, size_store);
2087
8bb93aac
N
2088
2089/* Metdata version.
2090 * This is either 'none' for arrays with externally managed metadata,
2091 * or N.M for internally known formats
2092 */
2093static ssize_t
2094metadata_show(mddev_t *mddev, char *page)
2095{
2096 if (mddev->persistent)
2097 return sprintf(page, "%d.%d\n",
2098 mddev->major_version, mddev->minor_version);
2099 else
2100 return sprintf(page, "none\n");
2101}
2102
2103static ssize_t
2104metadata_store(mddev_t *mddev, const char *buf, size_t len)
2105{
2106 int major, minor;
2107 char *e;
2108 if (!list_empty(&mddev->disks))
2109 return -EBUSY;
2110
2111 if (cmd_match(buf, "none")) {
2112 mddev->persistent = 0;
2113 mddev->major_version = 0;
2114 mddev->minor_version = 90;
2115 return len;
2116 }
2117 major = simple_strtoul(buf, &e, 10);
2118 if (e==buf || *e != '.')
2119 return -EINVAL;
2120 buf = e+1;
2121 minor = simple_strtoul(buf, &e, 10);
2122 if (e==buf || *e != '\n')
2123 return -EINVAL;
2124 if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2125 super_types[major].name == NULL)
2126 return -ENOENT;
2127 mddev->major_version = major;
2128 mddev->minor_version = minor;
2129 mddev->persistent = 1;
2130 return len;
2131}
2132
2133static struct md_sysfs_entry md_metadata =
2134__ATTR(metadata_version, 0644, metadata_show, metadata_store);
2135
24dd469d 2136static ssize_t
7eec314d 2137action_show(mddev_t *mddev, char *page)
24dd469d 2138{
7eec314d 2139 char *type = "idle";
31399d9e
N
2140 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2141 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2142 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
24dd469d
N
2143 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2144 type = "resync";
2145 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2146 type = "check";
2147 else
2148 type = "repair";
2149 } else
2150 type = "recover";
2151 }
2152 return sprintf(page, "%s\n", type);
2153}
2154
2155static ssize_t
7eec314d 2156action_store(mddev_t *mddev, const char *page, size_t len)
24dd469d 2157{
7eec314d
N
2158 if (!mddev->pers || !mddev->pers->sync_request)
2159 return -EINVAL;
2160
bce74dac 2161 if (cmd_match(page, "idle")) {
7eec314d
N
2162 if (mddev->sync_thread) {
2163 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2164 md_unregister_thread(mddev->sync_thread);
2165 mddev->sync_thread = NULL;
2166 mddev->recovery = 0;
2167 }
03c902e1
N
2168 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2169 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
24dd469d 2170 return -EBUSY;
03c902e1 2171 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
7eec314d
N
2172 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2173 else {
bce74dac 2174 if (cmd_match(page, "check"))
7eec314d 2175 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
bce74dac 2176 else if (cmd_match(page, "repair"))
7eec314d
N
2177 return -EINVAL;
2178 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2179 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7eec314d 2180 }
03c902e1 2181 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
24dd469d
N
2182 md_wakeup_thread(mddev->thread);
2183 return len;
2184}
2185
9d88883e 2186static ssize_t
96de1e66 2187mismatch_cnt_show(mddev_t *mddev, char *page)
9d88883e
N
2188{
2189 return sprintf(page, "%llu\n",
2190 (unsigned long long) mddev->resync_mismatches);
2191}
2192
96de1e66 2193static struct md_sysfs_entry
7eec314d 2194md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
24dd469d 2195
96de1e66
N
2196
2197static struct md_sysfs_entry
2198md_mismatches = __ATTR_RO(mismatch_cnt);
9d88883e 2199
eae1701f
N
2200static struct attribute *md_default_attrs[] = {
2201 &md_level.attr,
2202 &md_raid_disks.attr,
3b34380a 2203 &md_chunk_size.attr,
a35b0d69 2204 &md_size.attr,
8bb93aac 2205 &md_metadata.attr,
6d7ff738 2206 &md_new_device.attr,
411036fa
N
2207 NULL,
2208};
2209
2210static struct attribute *md_redundancy_attrs[] = {
24dd469d 2211 &md_scan_mode.attr,
9d88883e 2212 &md_mismatches.attr,
eae1701f
N
2213 NULL,
2214};
411036fa
N
2215static struct attribute_group md_redundancy_group = {
2216 .name = NULL,
2217 .attrs = md_redundancy_attrs,
2218};
2219
eae1701f
N
2220
2221static ssize_t
2222md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2223{
2224 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2225 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
96de1e66 2226 ssize_t rv;
eae1701f
N
2227
2228 if (!entry->show)
2229 return -EIO;
96de1e66
N
2230 mddev_lock(mddev);
2231 rv = entry->show(mddev, page);
2232 mddev_unlock(mddev);
2233 return rv;
eae1701f
N
2234}
2235
2236static ssize_t
2237md_attr_store(struct kobject *kobj, struct attribute *attr,
2238 const char *page, size_t length)
2239{
2240 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2241 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
96de1e66 2242 ssize_t rv;
eae1701f
N
2243
2244 if (!entry->store)
2245 return -EIO;
96de1e66
N
2246 mddev_lock(mddev);
2247 rv = entry->store(mddev, page, length);
2248 mddev_unlock(mddev);
2249 return rv;
eae1701f
N
2250}
2251
2252static void md_free(struct kobject *ko)
2253{
2254 mddev_t *mddev = container_of(ko, mddev_t, kobj);
2255 kfree(mddev);
2256}
2257
2258static struct sysfs_ops md_sysfs_ops = {
2259 .show = md_attr_show,
2260 .store = md_attr_store,
2261};
2262static struct kobj_type md_ktype = {
2263 .release = md_free,
2264 .sysfs_ops = &md_sysfs_ops,
2265 .default_attrs = md_default_attrs,
2266};
2267
1da177e4
LT
2268int mdp_major = 0;
2269
2270static struct kobject *md_probe(dev_t dev, int *part, void *data)
2271{
2272 static DECLARE_MUTEX(disks_sem);
2273 mddev_t *mddev = mddev_find(dev);
2274 struct gendisk *disk;
2275 int partitioned = (MAJOR(dev) != MD_MAJOR);
2276 int shift = partitioned ? MdpMinorShift : 0;
2277 int unit = MINOR(dev) >> shift;
2278
2279 if (!mddev)
2280 return NULL;
2281
2282 down(&disks_sem);
2283 if (mddev->gendisk) {
2284 up(&disks_sem);
2285 mddev_put(mddev);
2286 return NULL;
2287 }
2288 disk = alloc_disk(1 << shift);
2289 if (!disk) {
2290 up(&disks_sem);
2291 mddev_put(mddev);
2292 return NULL;
2293 }
2294 disk->major = MAJOR(dev);
2295 disk->first_minor = unit << shift;
2296 if (partitioned) {
2297 sprintf(disk->disk_name, "md_d%d", unit);
2298 sprintf(disk->devfs_name, "md/d%d", unit);
2299 } else {
2300 sprintf(disk->disk_name, "md%d", unit);
2301 sprintf(disk->devfs_name, "md/%d", unit);
2302 }
2303 disk->fops = &md_fops;
2304 disk->private_data = mddev;
2305 disk->queue = mddev->queue;
2306 add_disk(disk);
2307 mddev->gendisk = disk;
2308 up(&disks_sem);
9c791977 2309 mddev->kobj.parent = &disk->kobj;
eae1701f
N
2310 mddev->kobj.k_name = NULL;
2311 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2312 mddev->kobj.ktype = &md_ktype;
2313 kobject_register(&mddev->kobj);
1da177e4
LT
2314 return NULL;
2315}
2316
2317void md_wakeup_thread(mdk_thread_t *thread);
2318
2319static void md_safemode_timeout(unsigned long data)
2320{
2321 mddev_t *mddev = (mddev_t *) data;
2322
2323 mddev->safemode = 1;
2324 md_wakeup_thread(mddev->thread);
2325}
2326
6ff8d8ec 2327static int start_dirty_degraded;
1da177e4
LT
2328
2329static int do_md_run(mddev_t * mddev)
2330{
2604b703 2331 int err;
1da177e4
LT
2332 int chunk_size;
2333 struct list_head *tmp;
2334 mdk_rdev_t *rdev;
2335 struct gendisk *disk;
2604b703 2336 struct mdk_personality *pers;
1da177e4
LT
2337 char b[BDEVNAME_SIZE];
2338
a757e64c
N
2339 if (list_empty(&mddev->disks))
2340 /* cannot run an array with no devices.. */
1da177e4 2341 return -EINVAL;
1da177e4
LT
2342
2343 if (mddev->pers)
2344 return -EBUSY;
2345
2346 /*
2347 * Analyze all RAID superblock(s)
2348 */
a757e64c
N
2349 if (!mddev->raid_disks)
2350 analyze_sbs(mddev);
1da177e4
LT
2351
2352 chunk_size = mddev->chunk_size;
2604b703
N
2353
2354 if (chunk_size) {
1da177e4
LT
2355 if (chunk_size > MAX_CHUNK_SIZE) {
2356 printk(KERN_ERR "too big chunk_size: %d > %d\n",
2357 chunk_size, MAX_CHUNK_SIZE);
2358 return -EINVAL;
2359 }
2360 /*
2361 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
2362 */
2363 if ( (1 << ffz(~chunk_size)) != chunk_size) {
a757e64c 2364 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
1da177e4
LT
2365 return -EINVAL;
2366 }
2367 if (chunk_size < PAGE_SIZE) {
2368 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
2369 chunk_size, PAGE_SIZE);
2370 return -EINVAL;
2371 }
2372
2373 /* devices must have minimum size of one chunk */
2374 ITERATE_RDEV(mddev,rdev,tmp) {
b2d444d7 2375 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
2376 continue;
2377 if (rdev->size < chunk_size / 1024) {
2378 printk(KERN_WARNING
2379 "md: Dev %s smaller than chunk_size:"
2380 " %lluk < %dk\n",
2381 bdevname(rdev->bdev,b),
2382 (unsigned long long)rdev->size,
2383 chunk_size / 1024);
2384 return -EINVAL;
2385 }
2386 }
2387 }
2388
1da177e4 2389#ifdef CONFIG_KMOD
d9d166c2
N
2390 if (mddev->level != LEVEL_NONE)
2391 request_module("md-level-%d", mddev->level);
2392 else if (mddev->clevel[0])
2393 request_module("md-%s", mddev->clevel);
1da177e4
LT
2394#endif
2395
2396 /*
2397 * Drop all container device buffers, from now on
2398 * the only valid external interface is through the md
2399 * device.
2400 * Also find largest hardsector size
2401 */
2402 ITERATE_RDEV(mddev,rdev,tmp) {
b2d444d7 2403 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
2404 continue;
2405 sync_blockdev(rdev->bdev);
2406 invalidate_bdev(rdev->bdev, 0);
2407 }
2408
2409 md_probe(mddev->unit, NULL, NULL);
2410 disk = mddev->gendisk;
2411 if (!disk)
2412 return -ENOMEM;
2413
2414 spin_lock(&pers_lock);
d9d166c2 2415 pers = find_pers(mddev->level, mddev->clevel);
2604b703 2416 if (!pers || !try_module_get(pers->owner)) {
1da177e4 2417 spin_unlock(&pers_lock);
d9d166c2
N
2418 if (mddev->level != LEVEL_NONE)
2419 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
2420 mddev->level);
2421 else
2422 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
2423 mddev->clevel);
1da177e4
LT
2424 return -EINVAL;
2425 }
2604b703 2426 mddev->pers = pers;
1da177e4 2427 spin_unlock(&pers_lock);
d9d166c2
N
2428 mddev->level = pers->level;
2429 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
1da177e4 2430
657390d2 2431 mddev->recovery = 0;
1da177e4 2432 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
a9701a30 2433 mddev->barriers_work = 1;
6ff8d8ec 2434 mddev->ok_start_degraded = start_dirty_degraded;
1da177e4 2435
f91de92e
N
2436 if (start_readonly)
2437 mddev->ro = 2; /* read-only, but switch on first write */
2438
b15c2e57
N
2439 err = mddev->pers->run(mddev);
2440 if (!err && mddev->pers->sync_request) {
2441 err = bitmap_create(mddev);
2442 if (err) {
2443 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
2444 mdname(mddev), err);
2445 mddev->pers->stop(mddev);
2446 }
2447 }
1da177e4
LT
2448 if (err) {
2449 printk(KERN_ERR "md: pers->run() failed ...\n");
2450 module_put(mddev->pers->owner);
2451 mddev->pers = NULL;
32a7627c
N
2452 bitmap_destroy(mddev);
2453 return err;
1da177e4 2454 }
411036fa
N
2455 if (mddev->pers->sync_request)
2456 sysfs_create_group(&mddev->kobj, &md_redundancy_group);
fd9d49ca
N
2457 else if (mddev->ro == 2) /* auto-readonly not meaningful */
2458 mddev->ro = 0;
2459
1da177e4
LT
2460 atomic_set(&mddev->writes_pending,0);
2461 mddev->safemode = 0;
2462 mddev->safemode_timer.function = md_safemode_timeout;
2463 mddev->safemode_timer.data = (unsigned long) mddev;
2464 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
2465 mddev->in_sync = 1;
86e6ffdd
N
2466
2467 ITERATE_RDEV(mddev,rdev,tmp)
2468 if (rdev->raid_disk >= 0) {
2469 char nm[20];
2470 sprintf(nm, "rd%d", rdev->raid_disk);
2471 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
2472 }
1da177e4
LT
2473
2474 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
005eca5e 2475 md_wakeup_thread(mddev->thread);
1da177e4
LT
2476
2477 if (mddev->sb_dirty)
2478 md_update_sb(mddev);
2479
2480 set_capacity(disk, mddev->array_size<<1);
2481
2482 /* If we call blk_queue_make_request here, it will
2483 * re-initialise max_sectors etc which may have been
2484 * refined inside -> run. So just set the bits we need to set.
2485 * Most initialisation happended when we called
2486 * blk_queue_make_request(..., md_fail_request)
2487 * earlier.
2488 */
2489 mddev->queue->queuedata = mddev;
2490 mddev->queue->make_request_fn = mddev->pers->make_request;
2491
2492 mddev->changed = 1;
d7603b7e 2493 md_new_event(mddev);
1da177e4
LT
2494 return 0;
2495}
2496
2497static int restart_array(mddev_t *mddev)
2498{
2499 struct gendisk *disk = mddev->gendisk;
2500 int err;
2501
2502 /*
2503 * Complain if it has no devices
2504 */
2505 err = -ENXIO;
2506 if (list_empty(&mddev->disks))
2507 goto out;
2508
2509 if (mddev->pers) {
2510 err = -EBUSY;
2511 if (!mddev->ro)
2512 goto out;
2513
2514 mddev->safemode = 0;
2515 mddev->ro = 0;
2516 set_disk_ro(disk, 0);
2517
2518 printk(KERN_INFO "md: %s switched to read-write mode.\n",
2519 mdname(mddev));
2520 /*
2521 * Kick recovery or resync if necessary
2522 */
2523 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2524 md_wakeup_thread(mddev->thread);
2525 err = 0;
2526 } else {
2527 printk(KERN_ERR "md: %s has no personality assigned.\n",
2528 mdname(mddev));
2529 err = -EINVAL;
2530 }
2531
2532out:
2533 return err;
2534}
2535
2536static int do_md_stop(mddev_t * mddev, int ro)
2537{
2538 int err = 0;
2539 struct gendisk *disk = mddev->gendisk;
2540
2541 if (mddev->pers) {
2542 if (atomic_read(&mddev->active)>2) {
2543 printk("md: %s still in use.\n",mdname(mddev));
2544 return -EBUSY;
2545 }
2546
2547 if (mddev->sync_thread) {
2548 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2549 md_unregister_thread(mddev->sync_thread);
2550 mddev->sync_thread = NULL;
2551 }
2552
2553 del_timer_sync(&mddev->safemode_timer);
2554
2555 invalidate_partition(disk, 0);
2556
2557 if (ro) {
2558 err = -ENXIO;
f91de92e 2559 if (mddev->ro==1)
1da177e4
LT
2560 goto out;
2561 mddev->ro = 1;
2562 } else {
6b8b3e8a 2563 bitmap_flush(mddev);
a9701a30 2564 md_super_wait(mddev);
1da177e4
LT
2565 if (mddev->ro)
2566 set_disk_ro(disk, 0);
2567 blk_queue_make_request(mddev->queue, md_fail_request);
2568 mddev->pers->stop(mddev);
411036fa
N
2569 if (mddev->pers->sync_request)
2570 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2571
1da177e4
LT
2572 module_put(mddev->pers->owner);
2573 mddev->pers = NULL;
2574 if (mddev->ro)
2575 mddev->ro = 0;
2576 }
2577 if (!mddev->in_sync) {
2578 /* mark array as shutdown cleanly */
2579 mddev->in_sync = 1;
2580 md_update_sb(mddev);
2581 }
2582 if (ro)
2583 set_disk_ro(disk, 1);
2584 }
32a7627c
N
2585
2586 bitmap_destroy(mddev);
2587 if (mddev->bitmap_file) {
2588 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
2589 fput(mddev->bitmap_file);
2590 mddev->bitmap_file = NULL;
2591 }
9223214e 2592 mddev->bitmap_offset = 0;
32a7627c 2593
1da177e4
LT
2594 /*
2595 * Free resources if final stop
2596 */
2597 if (!ro) {
86e6ffdd
N
2598 mdk_rdev_t *rdev;
2599 struct list_head *tmp;
1da177e4
LT
2600 struct gendisk *disk;
2601 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
2602
86e6ffdd
N
2603 ITERATE_RDEV(mddev,rdev,tmp)
2604 if (rdev->raid_disk >= 0) {
2605 char nm[20];
2606 sprintf(nm, "rd%d", rdev->raid_disk);
2607 sysfs_remove_link(&mddev->kobj, nm);
2608 }
2609
1da177e4
LT
2610 export_array(mddev);
2611
2612 mddev->array_size = 0;
2613 disk = mddev->gendisk;
2614 if (disk)
2615 set_capacity(disk, 0);
2616 mddev->changed = 1;
2617 } else
2618 printk(KERN_INFO "md: %s switched to read-only mode.\n",
2619 mdname(mddev));
2620 err = 0;
d7603b7e 2621 md_new_event(mddev);
1da177e4
LT
2622out:
2623 return err;
2624}
2625
2626static void autorun_array(mddev_t *mddev)
2627{
2628 mdk_rdev_t *rdev;
2629 struct list_head *tmp;
2630 int err;
2631
a757e64c 2632 if (list_empty(&mddev->disks))
1da177e4 2633 return;
1da177e4
LT
2634
2635 printk(KERN_INFO "md: running: ");
2636
2637 ITERATE_RDEV(mddev,rdev,tmp) {
2638 char b[BDEVNAME_SIZE];
2639 printk("<%s>", bdevname(rdev->bdev,b));
2640 }
2641 printk("\n");
2642
2643 err = do_md_run (mddev);
2644 if (err) {
2645 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
2646 do_md_stop (mddev, 0);
2647 }
2648}
2649
2650/*
2651 * lets try to run arrays based on all disks that have arrived
2652 * until now. (those are in pending_raid_disks)
2653 *
2654 * the method: pick the first pending disk, collect all disks with
2655 * the same UUID, remove all from the pending list and put them into
2656 * the 'same_array' list. Then order this list based on superblock
2657 * update time (freshest comes first), kick out 'old' disks and
2658 * compare superblocks. If everything's fine then run it.
2659 *
2660 * If "unit" is allocated, then bump its reference count
2661 */
2662static void autorun_devices(int part)
2663{
2664 struct list_head candidates;
2665 struct list_head *tmp;
2666 mdk_rdev_t *rdev0, *rdev;
2667 mddev_t *mddev;
2668 char b[BDEVNAME_SIZE];
2669
2670 printk(KERN_INFO "md: autorun ...\n");
2671 while (!list_empty(&pending_raid_disks)) {
2672 dev_t dev;
2673 rdev0 = list_entry(pending_raid_disks.next,
2674 mdk_rdev_t, same_set);
2675
2676 printk(KERN_INFO "md: considering %s ...\n",
2677 bdevname(rdev0->bdev,b));
2678 INIT_LIST_HEAD(&candidates);
2679 ITERATE_RDEV_PENDING(rdev,tmp)
2680 if (super_90_load(rdev, rdev0, 0) >= 0) {
2681 printk(KERN_INFO "md: adding %s ...\n",
2682 bdevname(rdev->bdev,b));
2683 list_move(&rdev->same_set, &candidates);
2684 }
2685 /*
2686 * now we have a set of devices, with all of them having
2687 * mostly sane superblocks. It's time to allocate the
2688 * mddev.
2689 */
2690 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
2691 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
2692 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
2693 break;
2694 }
2695 if (part)
2696 dev = MKDEV(mdp_major,
2697 rdev0->preferred_minor << MdpMinorShift);
2698 else
2699 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
2700
2701 md_probe(dev, NULL, NULL);
2702 mddev = mddev_find(dev);
2703 if (!mddev) {
2704 printk(KERN_ERR
2705 "md: cannot allocate memory for md drive.\n");
2706 break;
2707 }
2708 if (mddev_lock(mddev))
2709 printk(KERN_WARNING "md: %s locked, cannot run\n",
2710 mdname(mddev));
2711 else if (mddev->raid_disks || mddev->major_version
2712 || !list_empty(&mddev->disks)) {
2713 printk(KERN_WARNING
2714 "md: %s already running, cannot run %s\n",
2715 mdname(mddev), bdevname(rdev0->bdev,b));
2716 mddev_unlock(mddev);
2717 } else {
2718 printk(KERN_INFO "md: created %s\n", mdname(mddev));
2719 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
2720 list_del_init(&rdev->same_set);
2721 if (bind_rdev_to_array(rdev, mddev))
2722 export_rdev(rdev);
2723 }
2724 autorun_array(mddev);
2725 mddev_unlock(mddev);
2726 }
2727 /* on success, candidates will be empty, on error
2728 * it won't...
2729 */
2730 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
2731 export_rdev(rdev);
2732 mddev_put(mddev);
2733 }
2734 printk(KERN_INFO "md: ... autorun DONE.\n");
2735}
2736
2737/*
2738 * import RAID devices based on one partition
2739 * if possible, the array gets run as well.
2740 */
2741
2742static int autostart_array(dev_t startdev)
2743{
2744 char b[BDEVNAME_SIZE];
2745 int err = -EINVAL, i;
2746 mdp_super_t *sb = NULL;
2747 mdk_rdev_t *start_rdev = NULL, *rdev;
2748
2749 start_rdev = md_import_device(startdev, 0, 0);
2750 if (IS_ERR(start_rdev))
2751 return err;
2752
2753
2754 /* NOTE: this can only work for 0.90.0 superblocks */
2755 sb = (mdp_super_t*)page_address(start_rdev->sb_page);
2756 if (sb->major_version != 0 ||
2757 sb->minor_version != 90 ) {
2758 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
2759 export_rdev(start_rdev);
2760 return err;
2761 }
2762
b2d444d7 2763 if (test_bit(Faulty, &start_rdev->flags)) {
1da177e4
LT
2764 printk(KERN_WARNING
2765 "md: can not autostart based on faulty %s!\n",
2766 bdevname(start_rdev->bdev,b));
2767 export_rdev(start_rdev);
2768 return err;
2769 }
2770 list_add(&start_rdev->same_set, &pending_raid_disks);
2771
2772 for (i = 0; i < MD_SB_DISKS; i++) {
2773 mdp_disk_t *desc = sb->disks + i;
2774 dev_t dev = MKDEV(desc->major, desc->minor);
2775
2776 if (!dev)
2777 continue;
2778 if (dev == startdev)
2779 continue;
2780 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
2781 continue;
2782 rdev = md_import_device(dev, 0, 0);
2783 if (IS_ERR(rdev))
2784 continue;
2785
2786 list_add(&rdev->same_set, &pending_raid_disks);
2787 }
2788
2789 /*
2790 * possibly return codes
2791 */
2792 autorun_devices(0);
2793 return 0;
2794
2795}
2796
2797
2798static int get_version(void __user * arg)
2799{
2800 mdu_version_t ver;
2801
2802 ver.major = MD_MAJOR_VERSION;
2803 ver.minor = MD_MINOR_VERSION;
2804 ver.patchlevel = MD_PATCHLEVEL_VERSION;
2805
2806 if (copy_to_user(arg, &ver, sizeof(ver)))
2807 return -EFAULT;
2808
2809 return 0;
2810}
2811
2812static int get_array_info(mddev_t * mddev, void __user * arg)
2813{
2814 mdu_array_info_t info;
2815 int nr,working,active,failed,spare;
2816 mdk_rdev_t *rdev;
2817 struct list_head *tmp;
2818
2819 nr=working=active=failed=spare=0;
2820 ITERATE_RDEV(mddev,rdev,tmp) {
2821 nr++;
b2d444d7 2822 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
2823 failed++;
2824 else {
2825 working++;
b2d444d7 2826 if (test_bit(In_sync, &rdev->flags))
1da177e4
LT
2827 active++;
2828 else
2829 spare++;
2830 }
2831 }
2832
2833 info.major_version = mddev->major_version;
2834 info.minor_version = mddev->minor_version;
2835 info.patch_version = MD_PATCHLEVEL_VERSION;
2836 info.ctime = mddev->ctime;
2837 info.level = mddev->level;
2838 info.size = mddev->size;
2839 info.nr_disks = nr;
2840 info.raid_disks = mddev->raid_disks;
2841 info.md_minor = mddev->md_minor;
2842 info.not_persistent= !mddev->persistent;
2843
2844 info.utime = mddev->utime;
2845 info.state = 0;
2846 if (mddev->in_sync)
2847 info.state = (1<<MD_SB_CLEAN);
36fa3063
N
2848 if (mddev->bitmap && mddev->bitmap_offset)
2849 info.state = (1<<MD_SB_BITMAP_PRESENT);
1da177e4
LT
2850 info.active_disks = active;
2851 info.working_disks = working;
2852 info.failed_disks = failed;
2853 info.spare_disks = spare;
2854
2855 info.layout = mddev->layout;
2856 info.chunk_size = mddev->chunk_size;
2857
2858 if (copy_to_user(arg, &info, sizeof(info)))
2859 return -EFAULT;
2860
2861 return 0;
2862}
2863
87162a28 2864static int get_bitmap_file(mddev_t * mddev, void __user * arg)
32a7627c
N
2865{
2866 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
2867 char *ptr, *buf = NULL;
2868 int err = -ENOMEM;
2869
2870 file = kmalloc(sizeof(*file), GFP_KERNEL);
2871 if (!file)
2872 goto out;
2873
2874 /* bitmap disabled, zero the first byte and copy out */
2875 if (!mddev->bitmap || !mddev->bitmap->file) {
2876 file->pathname[0] = '\0';
2877 goto copy_out;
2878 }
2879
2880 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
2881 if (!buf)
2882 goto out;
2883
2884 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
2885 if (!ptr)
2886 goto out;
2887
2888 strcpy(file->pathname, ptr);
2889
2890copy_out:
2891 err = 0;
2892 if (copy_to_user(arg, file, sizeof(*file)))
2893 err = -EFAULT;
2894out:
2895 kfree(buf);
2896 kfree(file);
2897 return err;
2898}
2899
1da177e4
LT
2900static int get_disk_info(mddev_t * mddev, void __user * arg)
2901{
2902 mdu_disk_info_t info;
2903 unsigned int nr;
2904 mdk_rdev_t *rdev;
2905
2906 if (copy_from_user(&info, arg, sizeof(info)))
2907 return -EFAULT;
2908
2909 nr = info.number;
2910
2911 rdev = find_rdev_nr(mddev, nr);
2912 if (rdev) {
2913 info.major = MAJOR(rdev->bdev->bd_dev);
2914 info.minor = MINOR(rdev->bdev->bd_dev);
2915 info.raid_disk = rdev->raid_disk;
2916 info.state = 0;
b2d444d7 2917 if (test_bit(Faulty, &rdev->flags))
1da177e4 2918 info.state |= (1<<MD_DISK_FAULTY);
b2d444d7 2919 else if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
2920 info.state |= (1<<MD_DISK_ACTIVE);
2921 info.state |= (1<<MD_DISK_SYNC);
2922 }
8ddf9efe
N
2923 if (test_bit(WriteMostly, &rdev->flags))
2924 info.state |= (1<<MD_DISK_WRITEMOSTLY);
1da177e4
LT
2925 } else {
2926 info.major = info.minor = 0;
2927 info.raid_disk = -1;
2928 info.state = (1<<MD_DISK_REMOVED);
2929 }
2930
2931 if (copy_to_user(arg, &info, sizeof(info)))
2932 return -EFAULT;
2933
2934 return 0;
2935}
2936
2937static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2938{
2939 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
2940 mdk_rdev_t *rdev;
2941 dev_t dev = MKDEV(info->major,info->minor);
2942
2943 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
2944 return -EOVERFLOW;
2945
2946 if (!mddev->raid_disks) {
2947 int err;
2948 /* expecting a device which has a superblock */
2949 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
2950 if (IS_ERR(rdev)) {
2951 printk(KERN_WARNING
2952 "md: md_import_device returned %ld\n",
2953 PTR_ERR(rdev));
2954 return PTR_ERR(rdev);
2955 }
2956 if (!list_empty(&mddev->disks)) {
2957 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2958 mdk_rdev_t, same_set);
2959 int err = super_types[mddev->major_version]
2960 .load_super(rdev, rdev0, mddev->minor_version);
2961 if (err < 0) {
2962 printk(KERN_WARNING
2963 "md: %s has different UUID to %s\n",
2964 bdevname(rdev->bdev,b),
2965 bdevname(rdev0->bdev,b2));
2966 export_rdev(rdev);
2967 return -EINVAL;
2968 }
2969 }
2970 err = bind_rdev_to_array(rdev, mddev);
2971 if (err)
2972 export_rdev(rdev);
2973 return err;
2974 }
2975
2976 /*
2977 * add_new_disk can be used once the array is assembled
2978 * to add "hot spares". They must already have a superblock
2979 * written
2980 */
2981 if (mddev->pers) {
2982 int err;
2983 if (!mddev->pers->hot_add_disk) {
2984 printk(KERN_WARNING
2985 "%s: personality does not support diskops!\n",
2986 mdname(mddev));
2987 return -EINVAL;
2988 }
7b1e35f6
N
2989 if (mddev->persistent)
2990 rdev = md_import_device(dev, mddev->major_version,
2991 mddev->minor_version);
2992 else
2993 rdev = md_import_device(dev, -1, -1);
1da177e4
LT
2994 if (IS_ERR(rdev)) {
2995 printk(KERN_WARNING
2996 "md: md_import_device returned %ld\n",
2997 PTR_ERR(rdev));
2998 return PTR_ERR(rdev);
2999 }
41158c7e
N
3000 /* set save_raid_disk if appropriate */
3001 if (!mddev->persistent) {
3002 if (info->state & (1<<MD_DISK_SYNC) &&
3003 info->raid_disk < mddev->raid_disks)
3004 rdev->raid_disk = info->raid_disk;
3005 else
3006 rdev->raid_disk = -1;
3007 } else
3008 super_types[mddev->major_version].
3009 validate_super(mddev, rdev);
3010 rdev->saved_raid_disk = rdev->raid_disk;
3011
b2d444d7 3012 clear_bit(In_sync, &rdev->flags); /* just to be sure */
8ddf9efe
N
3013 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3014 set_bit(WriteMostly, &rdev->flags);
3015
1da177e4
LT
3016 rdev->raid_disk = -1;
3017 err = bind_rdev_to_array(rdev, mddev);
3018 if (err)
3019 export_rdev(rdev);
c361777f
N
3020
3021 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
005eca5e 3022 md_wakeup_thread(mddev->thread);
1da177e4
LT
3023 return err;
3024 }
3025
3026 /* otherwise, add_new_disk is only allowed
3027 * for major_version==0 superblocks
3028 */
3029 if (mddev->major_version != 0) {
3030 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3031 mdname(mddev));
3032 return -EINVAL;
3033 }
3034
3035 if (!(info->state & (1<<MD_DISK_FAULTY))) {
3036 int err;
3037 rdev = md_import_device (dev, -1, 0);
3038 if (IS_ERR(rdev)) {
3039 printk(KERN_WARNING
3040 "md: error, md_import_device() returned %ld\n",
3041 PTR_ERR(rdev));
3042 return PTR_ERR(rdev);
3043 }
3044 rdev->desc_nr = info->number;
3045 if (info->raid_disk < mddev->raid_disks)
3046 rdev->raid_disk = info->raid_disk;
3047 else
3048 rdev->raid_disk = -1;
3049
b2d444d7
N
3050 rdev->flags = 0;
3051
1da177e4 3052 if (rdev->raid_disk < mddev->raid_disks)
b2d444d7
N
3053 if (info->state & (1<<MD_DISK_SYNC))
3054 set_bit(In_sync, &rdev->flags);
1da177e4 3055
8ddf9efe
N
3056 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3057 set_bit(WriteMostly, &rdev->flags);
3058
1da177e4
LT
3059 if (!mddev->persistent) {
3060 printk(KERN_INFO "md: nonpersistent superblock ...\n");
3061 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3062 } else
3063 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3064 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3065
2bf071bf
N
3066 err = bind_rdev_to_array(rdev, mddev);
3067 if (err) {
3068 export_rdev(rdev);
3069 return err;
3070 }
1da177e4
LT
3071 }
3072
3073 return 0;
3074}
3075
3076static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3077{
3078 char b[BDEVNAME_SIZE];
3079 mdk_rdev_t *rdev;
3080
3081 if (!mddev->pers)
3082 return -ENODEV;
3083
3084 rdev = find_rdev(mddev, dev);
3085 if (!rdev)
3086 return -ENXIO;
3087
3088 if (rdev->raid_disk >= 0)
3089 goto busy;
3090
3091 kick_rdev_from_array(rdev);
3092 md_update_sb(mddev);
d7603b7e 3093 md_new_event(mddev);
1da177e4
LT
3094
3095 return 0;
3096busy:
3097 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3098 bdevname(rdev->bdev,b), mdname(mddev));
3099 return -EBUSY;
3100}
3101
3102static int hot_add_disk(mddev_t * mddev, dev_t dev)
3103{
3104 char b[BDEVNAME_SIZE];
3105 int err;
3106 unsigned int size;
3107 mdk_rdev_t *rdev;
3108
3109 if (!mddev->pers)
3110 return -ENODEV;
3111
3112 if (mddev->major_version != 0) {
3113 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3114 " version-0 superblocks.\n",
3115 mdname(mddev));
3116 return -EINVAL;
3117 }
3118 if (!mddev->pers->hot_add_disk) {
3119 printk(KERN_WARNING
3120 "%s: personality does not support diskops!\n",
3121 mdname(mddev));
3122 return -EINVAL;
3123 }
3124
3125 rdev = md_import_device (dev, -1, 0);
3126 if (IS_ERR(rdev)) {
3127 printk(KERN_WARNING
3128 "md: error, md_import_device() returned %ld\n",
3129 PTR_ERR(rdev));
3130 return -EINVAL;
3131 }
3132
3133 if (mddev->persistent)
3134 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3135 else
3136 rdev->sb_offset =
3137 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3138
3139 size = calc_dev_size(rdev, mddev->chunk_size);
3140 rdev->size = size;
3141
b2d444d7 3142 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
3143 printk(KERN_WARNING
3144 "md: can not hot-add faulty %s disk to %s!\n",
3145 bdevname(rdev->bdev,b), mdname(mddev));
3146 err = -EINVAL;
3147 goto abort_export;
3148 }
b2d444d7 3149 clear_bit(In_sync, &rdev->flags);
1da177e4 3150 rdev->desc_nr = -1;
2bf071bf
N
3151 err = bind_rdev_to_array(rdev, mddev);
3152 if (err)
3153 goto abort_export;
1da177e4
LT
3154
3155 /*
3156 * The rest should better be atomic, we can have disk failures
3157 * noticed in interrupt contexts ...
3158 */
3159
3160 if (rdev->desc_nr == mddev->max_disks) {
3161 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3162 mdname(mddev));
3163 err = -EBUSY;
3164 goto abort_unbind_export;
3165 }
3166
3167 rdev->raid_disk = -1;
3168
3169 md_update_sb(mddev);
3170
3171 /*
3172 * Kick recovery, maybe this spare has to be added to the
3173 * array immediately.
3174 */
3175 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3176 md_wakeup_thread(mddev->thread);
d7603b7e 3177 md_new_event(mddev);
1da177e4
LT
3178 return 0;
3179
3180abort_unbind_export:
3181 unbind_rdev_from_array(rdev);
3182
3183abort_export:
3184 export_rdev(rdev);
3185 return err;
3186}
3187
32a7627c
N
3188/* similar to deny_write_access, but accounts for our holding a reference
3189 * to the file ourselves */
3190static int deny_bitmap_write_access(struct file * file)
3191{
3192 struct inode *inode = file->f_mapping->host;
3193
3194 spin_lock(&inode->i_lock);
3195 if (atomic_read(&inode->i_writecount) > 1) {
3196 spin_unlock(&inode->i_lock);
3197 return -ETXTBSY;
3198 }
3199 atomic_set(&inode->i_writecount, -1);
3200 spin_unlock(&inode->i_lock);
3201
3202 return 0;
3203}
3204
3205static int set_bitmap_file(mddev_t *mddev, int fd)
3206{
3207 int err;
3208
36fa3063
N
3209 if (mddev->pers) {
3210 if (!mddev->pers->quiesce)
3211 return -EBUSY;
3212 if (mddev->recovery || mddev->sync_thread)
3213 return -EBUSY;
3214 /* we should be able to change the bitmap.. */
3215 }
32a7627c 3216
32a7627c 3217
36fa3063
N
3218 if (fd >= 0) {
3219 if (mddev->bitmap)
3220 return -EEXIST; /* cannot add when bitmap is present */
3221 mddev->bitmap_file = fget(fd);
32a7627c 3222
36fa3063
N
3223 if (mddev->bitmap_file == NULL) {
3224 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3225 mdname(mddev));
3226 return -EBADF;
3227 }
3228
3229 err = deny_bitmap_write_access(mddev->bitmap_file);
3230 if (err) {
3231 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3232 mdname(mddev));
3233 fput(mddev->bitmap_file);
3234 mddev->bitmap_file = NULL;
3235 return err;
3236 }
a654b9d8 3237 mddev->bitmap_offset = 0; /* file overrides offset */
36fa3063
N
3238 } else if (mddev->bitmap == NULL)
3239 return -ENOENT; /* cannot remove what isn't there */
3240 err = 0;
3241 if (mddev->pers) {
3242 mddev->pers->quiesce(mddev, 1);
3243 if (fd >= 0)
3244 err = bitmap_create(mddev);
3245 if (fd < 0 || err)
3246 bitmap_destroy(mddev);
3247 mddev->pers->quiesce(mddev, 0);
3248 } else if (fd < 0) {
3249 if (mddev->bitmap_file)
3250 fput(mddev->bitmap_file);
3251 mddev->bitmap_file = NULL;
3252 }
3253
32a7627c
N
3254 return err;
3255}
3256
1da177e4
LT
3257/*
3258 * set_array_info is used two different ways
3259 * The original usage is when creating a new array.
3260 * In this usage, raid_disks is > 0 and it together with
3261 * level, size, not_persistent,layout,chunksize determine the
3262 * shape of the array.
3263 * This will always create an array with a type-0.90.0 superblock.
3264 * The newer usage is when assembling an array.
3265 * In this case raid_disks will be 0, and the major_version field is
3266 * use to determine which style super-blocks are to be found on the devices.
3267 * The minor and patch _version numbers are also kept incase the
3268 * super_block handler wishes to interpret them.
3269 */
3270static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
3271{
3272
3273 if (info->raid_disks == 0) {
3274 /* just setting version number for superblock loading */
3275 if (info->major_version < 0 ||
3276 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
3277 super_types[info->major_version].name == NULL) {
3278 /* maybe try to auto-load a module? */
3279 printk(KERN_INFO
3280 "md: superblock version %d not known\n",
3281 info->major_version);
3282 return -EINVAL;
3283 }
3284 mddev->major_version = info->major_version;
3285 mddev->minor_version = info->minor_version;
3286 mddev->patch_version = info->patch_version;
3287 return 0;
3288 }
3289 mddev->major_version = MD_MAJOR_VERSION;
3290 mddev->minor_version = MD_MINOR_VERSION;
3291 mddev->patch_version = MD_PATCHLEVEL_VERSION;
3292 mddev->ctime = get_seconds();
3293
3294 mddev->level = info->level;
3295 mddev->size = info->size;
3296 mddev->raid_disks = info->raid_disks;
3297 /* don't set md_minor, it is determined by which /dev/md* was
3298 * openned
3299 */
3300 if (info->state & (1<<MD_SB_CLEAN))
3301 mddev->recovery_cp = MaxSector;
3302 else
3303 mddev->recovery_cp = 0;
3304 mddev->persistent = ! info->not_persistent;
3305
3306 mddev->layout = info->layout;
3307 mddev->chunk_size = info->chunk_size;
3308
3309 mddev->max_disks = MD_SB_DISKS;
3310
3311 mddev->sb_dirty = 1;
3312
b2a2703c
N
3313 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
3314 mddev->bitmap_offset = 0;
3315
1da177e4
LT
3316 /*
3317 * Generate a 128 bit UUID
3318 */
3319 get_random_bytes(mddev->uuid, 16);
3320
3321 return 0;
3322}
3323
a35b0d69
N
3324static int update_size(mddev_t *mddev, unsigned long size)
3325{
3326 mdk_rdev_t * rdev;
3327 int rv;
3328 struct list_head *tmp;
3329
3330 if (mddev->pers->resize == NULL)
3331 return -EINVAL;
3332 /* The "size" is the amount of each device that is used.
3333 * This can only make sense for arrays with redundancy.
3334 * linear and raid0 always use whatever space is available
3335 * We can only consider changing the size if no resync
3336 * or reconstruction is happening, and if the new size
3337 * is acceptable. It must fit before the sb_offset or,
3338 * if that is <data_offset, it must fit before the
3339 * size of each device.
3340 * If size is zero, we find the largest size that fits.
3341 */
3342 if (mddev->sync_thread)
3343 return -EBUSY;
3344 ITERATE_RDEV(mddev,rdev,tmp) {
3345 sector_t avail;
3346 int fit = (size == 0);
3347 if (rdev->sb_offset > rdev->data_offset)
3348 avail = (rdev->sb_offset*2) - rdev->data_offset;
3349 else
3350 avail = get_capacity(rdev->bdev->bd_disk)
3351 - rdev->data_offset;
3352 if (fit && (size == 0 || size > avail/2))
3353 size = avail/2;
3354 if (avail < ((sector_t)size << 1))
3355 return -ENOSPC;
3356 }
3357 rv = mddev->pers->resize(mddev, (sector_t)size *2);
3358 if (!rv) {
3359 struct block_device *bdev;
3360
3361 bdev = bdget_disk(mddev->gendisk, 0);
3362 if (bdev) {
3363 down(&bdev->bd_inode->i_sem);
3364 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3365 up(&bdev->bd_inode->i_sem);
3366 bdput(bdev);
3367 }
3368 }
3369 return rv;
3370}
3371
da943b99
N
3372static int update_raid_disks(mddev_t *mddev, int raid_disks)
3373{
3374 int rv;
3375 /* change the number of raid disks */
3376 if (mddev->pers->reshape == NULL)
3377 return -EINVAL;
3378 if (raid_disks <= 0 ||
3379 raid_disks >= mddev->max_disks)
3380 return -EINVAL;
3381 if (mddev->sync_thread)
3382 return -EBUSY;
3383 rv = mddev->pers->reshape(mddev, raid_disks);
3384 if (!rv) {
3385 struct block_device *bdev;
3386
3387 bdev = bdget_disk(mddev->gendisk, 0);
3388 if (bdev) {
3389 down(&bdev->bd_inode->i_sem);
3390 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3391 up(&bdev->bd_inode->i_sem);
3392 bdput(bdev);
3393 }
3394 }
3395 return rv;
3396}
3397
3398
1da177e4
LT
3399/*
3400 * update_array_info is used to change the configuration of an
3401 * on-line array.
3402 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
3403 * fields in the info are checked against the array.
3404 * Any differences that cannot be handled will cause an error.
3405 * Normally, only one change can be managed at a time.
3406 */
3407static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
3408{
3409 int rv = 0;
3410 int cnt = 0;
36fa3063
N
3411 int state = 0;
3412
3413 /* calculate expected state,ignoring low bits */
3414 if (mddev->bitmap && mddev->bitmap_offset)
3415 state |= (1 << MD_SB_BITMAP_PRESENT);
1da177e4
LT
3416
3417 if (mddev->major_version != info->major_version ||
3418 mddev->minor_version != info->minor_version ||
3419/* mddev->patch_version != info->patch_version || */
3420 mddev->ctime != info->ctime ||
3421 mddev->level != info->level ||
3422/* mddev->layout != info->layout || */
3423 !mddev->persistent != info->not_persistent||
36fa3063
N
3424 mddev->chunk_size != info->chunk_size ||
3425 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
3426 ((state^info->state) & 0xfffffe00)
3427 )
1da177e4
LT
3428 return -EINVAL;
3429 /* Check there is only one change */
3430 if (mddev->size != info->size) cnt++;
3431 if (mddev->raid_disks != info->raid_disks) cnt++;
3432 if (mddev->layout != info->layout) cnt++;
36fa3063 3433 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
1da177e4
LT
3434 if (cnt == 0) return 0;
3435 if (cnt > 1) return -EINVAL;
3436
3437 if (mddev->layout != info->layout) {
3438 /* Change layout
3439 * we don't need to do anything at the md level, the
3440 * personality will take care of it all.
3441 */
3442 if (mddev->pers->reconfig == NULL)
3443 return -EINVAL;
3444 else
3445 return mddev->pers->reconfig(mddev, info->layout, -1);
3446 }
a35b0d69
N
3447 if (mddev->size != info->size)
3448 rv = update_size(mddev, info->size);
3449
da943b99
N
3450 if (mddev->raid_disks != info->raid_disks)
3451 rv = update_raid_disks(mddev, info->raid_disks);
3452
36fa3063
N
3453 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
3454 if (mddev->pers->quiesce == NULL)
3455 return -EINVAL;
3456 if (mddev->recovery || mddev->sync_thread)
3457 return -EBUSY;
3458 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
3459 /* add the bitmap */
3460 if (mddev->bitmap)
3461 return -EEXIST;
3462 if (mddev->default_bitmap_offset == 0)
3463 return -EINVAL;
3464 mddev->bitmap_offset = mddev->default_bitmap_offset;
3465 mddev->pers->quiesce(mddev, 1);
3466 rv = bitmap_create(mddev);
3467 if (rv)
3468 bitmap_destroy(mddev);
3469 mddev->pers->quiesce(mddev, 0);
3470 } else {
3471 /* remove the bitmap */
3472 if (!mddev->bitmap)
3473 return -ENOENT;
3474 if (mddev->bitmap->file)
3475 return -EINVAL;
3476 mddev->pers->quiesce(mddev, 1);
3477 bitmap_destroy(mddev);
3478 mddev->pers->quiesce(mddev, 0);
3479 mddev->bitmap_offset = 0;
3480 }
3481 }
1da177e4
LT
3482 md_update_sb(mddev);
3483 return rv;
3484}
3485
3486static int set_disk_faulty(mddev_t *mddev, dev_t dev)
3487{
3488 mdk_rdev_t *rdev;
3489
3490 if (mddev->pers == NULL)
3491 return -ENODEV;
3492
3493 rdev = find_rdev(mddev, dev);
3494 if (!rdev)
3495 return -ENODEV;
3496
3497 md_error(mddev, rdev);
3498 return 0;
3499}
3500
3501static int md_ioctl(struct inode *inode, struct file *file,
3502 unsigned int cmd, unsigned long arg)
3503{
3504 int err = 0;
3505 void __user *argp = (void __user *)arg;
3506 struct hd_geometry __user *loc = argp;
3507 mddev_t *mddev = NULL;
3508
3509 if (!capable(CAP_SYS_ADMIN))
3510 return -EACCES;
3511
3512 /*
3513 * Commands dealing with the RAID driver but not any
3514 * particular array:
3515 */
3516 switch (cmd)
3517 {
3518 case RAID_VERSION:
3519 err = get_version(argp);
3520 goto done;
3521
3522 case PRINT_RAID_DEBUG:
3523 err = 0;
3524 md_print_devices();
3525 goto done;
3526
3527#ifndef MODULE
3528 case RAID_AUTORUN:
3529 err = 0;
3530 autostart_arrays(arg);
3531 goto done;
3532#endif
3533 default:;
3534 }
3535
3536 /*
3537 * Commands creating/starting a new array:
3538 */
3539
3540 mddev = inode->i_bdev->bd_disk->private_data;
3541
3542 if (!mddev) {
3543 BUG();
3544 goto abort;
3545 }
3546
3547
3548 if (cmd == START_ARRAY) {
3549 /* START_ARRAY doesn't need to lock the array as autostart_array
3550 * does the locking, and it could even be a different array
3551 */
3552 static int cnt = 3;
3553 if (cnt > 0 ) {
3554 printk(KERN_WARNING
3555 "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
e8a00334 3556 "This will not be supported beyond July 2006\n",
1da177e4
LT
3557 current->comm, current->pid);
3558 cnt--;
3559 }
3560 err = autostart_array(new_decode_dev(arg));
3561 if (err) {
3562 printk(KERN_WARNING "md: autostart failed!\n");
3563 goto abort;
3564 }
3565 goto done;
3566 }
3567
3568 err = mddev_lock(mddev);
3569 if (err) {
3570 printk(KERN_INFO
3571 "md: ioctl lock interrupted, reason %d, cmd %d\n",
3572 err, cmd);
3573 goto abort;
3574 }
3575
3576 switch (cmd)
3577 {
3578 case SET_ARRAY_INFO:
3579 {
3580 mdu_array_info_t info;
3581 if (!arg)
3582 memset(&info, 0, sizeof(info));
3583 else if (copy_from_user(&info, argp, sizeof(info))) {
3584 err = -EFAULT;
3585 goto abort_unlock;
3586 }
3587 if (mddev->pers) {
3588 err = update_array_info(mddev, &info);
3589 if (err) {
3590 printk(KERN_WARNING "md: couldn't update"
3591 " array info. %d\n", err);
3592 goto abort_unlock;
3593 }
3594 goto done_unlock;
3595 }
3596 if (!list_empty(&mddev->disks)) {
3597 printk(KERN_WARNING
3598 "md: array %s already has disks!\n",
3599 mdname(mddev));
3600 err = -EBUSY;
3601 goto abort_unlock;
3602 }
3603 if (mddev->raid_disks) {
3604 printk(KERN_WARNING
3605 "md: array %s already initialised!\n",
3606 mdname(mddev));
3607 err = -EBUSY;
3608 goto abort_unlock;
3609 }
3610 err = set_array_info(mddev, &info);
3611 if (err) {
3612 printk(KERN_WARNING "md: couldn't set"
3613 " array info. %d\n", err);
3614 goto abort_unlock;
3615 }
3616 }
3617 goto done_unlock;
3618
3619 default:;
3620 }
3621
3622 /*
3623 * Commands querying/configuring an existing array:
3624 */
32a7627c
N
3625 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
3626 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
3627 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
3628 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
1da177e4
LT
3629 err = -ENODEV;
3630 goto abort_unlock;
3631 }
3632
3633 /*
3634 * Commands even a read-only array can execute:
3635 */
3636 switch (cmd)
3637 {
3638 case GET_ARRAY_INFO:
3639 err = get_array_info(mddev, argp);
3640 goto done_unlock;
3641
32a7627c 3642 case GET_BITMAP_FILE:
87162a28 3643 err = get_bitmap_file(mddev, argp);
32a7627c
N
3644 goto done_unlock;
3645
1da177e4
LT
3646 case GET_DISK_INFO:
3647 err = get_disk_info(mddev, argp);
3648 goto done_unlock;
3649
3650 case RESTART_ARRAY_RW:
3651 err = restart_array(mddev);
3652 goto done_unlock;
3653
3654 case STOP_ARRAY:
3655 err = do_md_stop (mddev, 0);
3656 goto done_unlock;
3657
3658 case STOP_ARRAY_RO:
3659 err = do_md_stop (mddev, 1);
3660 goto done_unlock;
3661
3662 /*
3663 * We have a problem here : there is no easy way to give a CHS
3664 * virtual geometry. We currently pretend that we have a 2 heads
3665 * 4 sectors (with a BIG number of cylinders...). This drives
3666 * dosfs just mad... ;-)
3667 */
3668 case HDIO_GETGEO:
3669 if (!loc) {
3670 err = -EINVAL;
3671 goto abort_unlock;
3672 }
3673 err = put_user (2, (char __user *) &loc->heads);
3674 if (err)
3675 goto abort_unlock;
3676 err = put_user (4, (char __user *) &loc->sectors);
3677 if (err)
3678 goto abort_unlock;
3679 err = put_user(get_capacity(mddev->gendisk)/8,
3680 (short __user *) &loc->cylinders);
3681 if (err)
3682 goto abort_unlock;
3683 err = put_user (get_start_sect(inode->i_bdev),
3684 (long __user *) &loc->start);
3685 goto done_unlock;
3686 }
3687
3688 /*
3689 * The remaining ioctls are changing the state of the
f91de92e
N
3690 * superblock, so we do not allow them on read-only arrays.
3691 * However non-MD ioctls (e.g. get-size) will still come through
3692 * here and hit the 'default' below, so only disallow
3693 * 'md' ioctls, and switch to rw mode if started auto-readonly.
1da177e4 3694 */
f91de92e
N
3695 if (_IOC_TYPE(cmd) == MD_MAJOR &&
3696 mddev->ro && mddev->pers) {
3697 if (mddev->ro == 2) {
3698 mddev->ro = 0;
3699 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3700 md_wakeup_thread(mddev->thread);
3701
3702 } else {
3703 err = -EROFS;
3704 goto abort_unlock;
3705 }
1da177e4
LT
3706 }
3707
3708 switch (cmd)
3709 {
3710 case ADD_NEW_DISK:
3711 {
3712 mdu_disk_info_t info;
3713 if (copy_from_user(&info, argp, sizeof(info)))
3714 err = -EFAULT;
3715 else
3716 err = add_new_disk(mddev, &info);
3717 goto done_unlock;
3718 }
3719
3720 case HOT_REMOVE_DISK:
3721 err = hot_remove_disk(mddev, new_decode_dev(arg));
3722 goto done_unlock;
3723
3724 case HOT_ADD_DISK:
3725 err = hot_add_disk(mddev, new_decode_dev(arg));
3726 goto done_unlock;
3727
3728 case SET_DISK_FAULTY:
3729 err = set_disk_faulty(mddev, new_decode_dev(arg));
3730 goto done_unlock;
3731
3732 case RUN_ARRAY:
3733 err = do_md_run (mddev);
3734 goto done_unlock;
3735
32a7627c
N
3736 case SET_BITMAP_FILE:
3737 err = set_bitmap_file(mddev, (int)arg);
3738 goto done_unlock;
3739
1da177e4
LT
3740 default:
3741 if (_IOC_TYPE(cmd) == MD_MAJOR)
3742 printk(KERN_WARNING "md: %s(pid %d) used"
3743 " obsolete MD ioctl, upgrade your"
3744 " software to use new ictls.\n",
3745 current->comm, current->pid);
3746 err = -EINVAL;
3747 goto abort_unlock;
3748 }
3749
3750done_unlock:
3751abort_unlock:
3752 mddev_unlock(mddev);
3753
3754 return err;
3755done:
3756 if (err)
3757 MD_BUG();
3758abort:
3759 return err;
3760}
3761
3762static int md_open(struct inode *inode, struct file *file)
3763{
3764 /*
3765 * Succeed if we can lock the mddev, which confirms that
3766 * it isn't being stopped right now.
3767 */
3768 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3769 int err;
3770
3771 if ((err = mddev_lock(mddev)))
3772 goto out;
3773
3774 err = 0;
3775 mddev_get(mddev);
3776 mddev_unlock(mddev);
3777
3778 check_disk_change(inode->i_bdev);
3779 out:
3780 return err;
3781}
3782
3783static int md_release(struct inode *inode, struct file * file)
3784{
3785 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3786
3787 if (!mddev)
3788 BUG();
3789 mddev_put(mddev);
3790
3791 return 0;
3792}
3793
3794static int md_media_changed(struct gendisk *disk)
3795{
3796 mddev_t *mddev = disk->private_data;
3797
3798 return mddev->changed;
3799}
3800
3801static int md_revalidate(struct gendisk *disk)
3802{
3803 mddev_t *mddev = disk->private_data;
3804
3805 mddev->changed = 0;
3806 return 0;
3807}
3808static struct block_device_operations md_fops =
3809{
3810 .owner = THIS_MODULE,
3811 .open = md_open,
3812 .release = md_release,
3813 .ioctl = md_ioctl,
3814 .media_changed = md_media_changed,
3815 .revalidate_disk= md_revalidate,
3816};
3817
75c96f85 3818static int md_thread(void * arg)
1da177e4
LT
3819{
3820 mdk_thread_t *thread = arg;
3821
1da177e4
LT
3822 /*
3823 * md_thread is a 'system-thread', it's priority should be very
3824 * high. We avoid resource deadlocks individually in each
3825 * raid personality. (RAID5 does preallocation) We also use RR and
3826 * the very same RT priority as kswapd, thus we will never get
3827 * into a priority inversion deadlock.
3828 *
3829 * we definitely have to have equal or higher priority than
3830 * bdflush, otherwise bdflush will deadlock if there are too
3831 * many dirty RAID5 blocks.
3832 */
1da177e4 3833
6985c43f 3834 allow_signal(SIGKILL);
a6fb0934 3835 while (!kthread_should_stop()) {
1da177e4 3836
93588e22
N
3837 /* We need to wait INTERRUPTIBLE so that
3838 * we don't add to the load-average.
3839 * That means we need to be sure no signals are
3840 * pending
3841 */
3842 if (signal_pending(current))
3843 flush_signals(current);
3844
3845 wait_event_interruptible_timeout
3846 (thread->wqueue,
3847 test_bit(THREAD_WAKEUP, &thread->flags)
3848 || kthread_should_stop(),
3849 thread->timeout);
3e1d1d28 3850 try_to_freeze();
1da177e4
LT
3851
3852 clear_bit(THREAD_WAKEUP, &thread->flags);
3853
787453c2 3854 thread->run(thread->mddev);
1da177e4 3855 }
a6fb0934 3856
1da177e4
LT
3857 return 0;
3858}
3859
3860void md_wakeup_thread(mdk_thread_t *thread)
3861{
3862 if (thread) {
3863 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
3864 set_bit(THREAD_WAKEUP, &thread->flags);
3865 wake_up(&thread->wqueue);
3866 }
3867}
3868
3869mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
3870 const char *name)
3871{
3872 mdk_thread_t *thread;
1da177e4 3873
9ffae0cf 3874 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
1da177e4
LT
3875 if (!thread)
3876 return NULL;
3877
1da177e4
LT
3878 init_waitqueue_head(&thread->wqueue);
3879
1da177e4
LT
3880 thread->run = run;
3881 thread->mddev = mddev;
32a7627c 3882 thread->timeout = MAX_SCHEDULE_TIMEOUT;
6985c43f 3883 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
a6fb0934 3884 if (IS_ERR(thread->tsk)) {
1da177e4
LT
3885 kfree(thread);
3886 return NULL;
3887 }
1da177e4
LT
3888 return thread;
3889}
3890
1da177e4
LT
3891void md_unregister_thread(mdk_thread_t *thread)
3892{
d28446fe 3893 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
a6fb0934
N
3894
3895 kthread_stop(thread->tsk);
1da177e4
LT
3896 kfree(thread);
3897}
3898
3899void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
3900{
3901 if (!mddev) {
3902 MD_BUG();
3903 return;
3904 }
3905
b2d444d7 3906 if (!rdev || test_bit(Faulty, &rdev->flags))
1da177e4 3907 return;
32a7627c 3908/*
1da177e4
LT
3909 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
3910 mdname(mddev),
3911 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
3912 __builtin_return_address(0),__builtin_return_address(1),
3913 __builtin_return_address(2),__builtin_return_address(3));
32a7627c 3914*/
1da177e4
LT
3915 if (!mddev->pers->error_handler)
3916 return;
3917 mddev->pers->error_handler(mddev,rdev);
3918 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3919 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3920 md_wakeup_thread(mddev->thread);
d7603b7e 3921 md_new_event(mddev);
1da177e4
LT
3922}
3923
3924/* seq_file implementation /proc/mdstat */
3925
3926static void status_unused(struct seq_file *seq)
3927{
3928 int i = 0;
3929 mdk_rdev_t *rdev;
3930 struct list_head *tmp;
3931
3932 seq_printf(seq, "unused devices: ");
3933
3934 ITERATE_RDEV_PENDING(rdev,tmp) {
3935 char b[BDEVNAME_SIZE];
3936 i++;
3937 seq_printf(seq, "%s ",
3938 bdevname(rdev->bdev,b));
3939 }
3940 if (!i)
3941 seq_printf(seq, "<none>");
3942
3943 seq_printf(seq, "\n");
3944}
3945
3946
3947static void status_resync(struct seq_file *seq, mddev_t * mddev)
3948{
3949 unsigned long max_blocks, resync, res, dt, db, rt;
3950
3951 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
3952
3953 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3954 max_blocks = mddev->resync_max_sectors >> 1;
3955 else
3956 max_blocks = mddev->size;
3957
3958 /*
3959 * Should not happen.
3960 */
3961 if (!max_blocks) {
3962 MD_BUG();
3963 return;
3964 }
3965 res = (resync/1024)*1000/(max_blocks/1024 + 1);
3966 {
3967 int i, x = res/50, y = 20-x;
3968 seq_printf(seq, "[");
3969 for (i = 0; i < x; i++)
3970 seq_printf(seq, "=");
3971 seq_printf(seq, ">");
3972 for (i = 0; i < y; i++)
3973 seq_printf(seq, ".");
3974 seq_printf(seq, "] ");
3975 }
3976 seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
3977 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
3978 "resync" : "recovery"),
3979 res/10, res % 10, resync, max_blocks);
3980
3981 /*
3982 * We do not want to overflow, so the order of operands and
3983 * the * 100 / 100 trick are important. We do a +1 to be
3984 * safe against division by zero. We only estimate anyway.
3985 *
3986 * dt: time from mark until now
3987 * db: blocks written from mark until now
3988 * rt: remaining time
3989 */
3990 dt = ((jiffies - mddev->resync_mark) / HZ);
3991 if (!dt) dt++;
3992 db = resync - (mddev->resync_mark_cnt/2);
3993 rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
3994
3995 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
3996
3997 seq_printf(seq, " speed=%ldK/sec", db/dt);
3998}
3999
4000static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4001{
4002 struct list_head *tmp;
4003 loff_t l = *pos;
4004 mddev_t *mddev;
4005
4006 if (l >= 0x10000)
4007 return NULL;
4008 if (!l--)
4009 /* header */
4010 return (void*)1;
4011
4012 spin_lock(&all_mddevs_lock);
4013 list_for_each(tmp,&all_mddevs)
4014 if (!l--) {
4015 mddev = list_entry(tmp, mddev_t, all_mddevs);
4016 mddev_get(mddev);
4017 spin_unlock(&all_mddevs_lock);
4018 return mddev;
4019 }
4020 spin_unlock(&all_mddevs_lock);
4021 if (!l--)
4022 return (void*)2;/* tail */
4023 return NULL;
4024}
4025
4026static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4027{
4028 struct list_head *tmp;
4029 mddev_t *next_mddev, *mddev = v;
4030
4031 ++*pos;
4032 if (v == (void*)2)
4033 return NULL;
4034
4035 spin_lock(&all_mddevs_lock);
4036 if (v == (void*)1)
4037 tmp = all_mddevs.next;
4038 else
4039 tmp = mddev->all_mddevs.next;
4040 if (tmp != &all_mddevs)
4041 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4042 else {
4043 next_mddev = (void*)2;
4044 *pos = 0x10000;
4045 }
4046 spin_unlock(&all_mddevs_lock);
4047
4048 if (v != (void*)1)
4049 mddev_put(mddev);
4050 return next_mddev;
4051
4052}
4053
4054static void md_seq_stop(struct seq_file *seq, void *v)
4055{
4056 mddev_t *mddev = v;
4057
4058 if (mddev && v != (void*)1 && v != (void*)2)
4059 mddev_put(mddev);
4060}
4061
d7603b7e
N
4062struct mdstat_info {
4063 int event;
4064};
4065
1da177e4
LT
4066static int md_seq_show(struct seq_file *seq, void *v)
4067{
4068 mddev_t *mddev = v;
4069 sector_t size;
4070 struct list_head *tmp2;
4071 mdk_rdev_t *rdev;
d7603b7e 4072 struct mdstat_info *mi = seq->private;
32a7627c 4073 struct bitmap *bitmap;
1da177e4
LT
4074
4075 if (v == (void*)1) {
2604b703 4076 struct mdk_personality *pers;
1da177e4
LT
4077 seq_printf(seq, "Personalities : ");
4078 spin_lock(&pers_lock);
2604b703
N
4079 list_for_each_entry(pers, &pers_list, list)
4080 seq_printf(seq, "[%s] ", pers->name);
1da177e4
LT
4081
4082 spin_unlock(&pers_lock);
4083 seq_printf(seq, "\n");
d7603b7e 4084 mi->event = atomic_read(&md_event_count);
1da177e4
LT
4085 return 0;
4086 }
4087 if (v == (void*)2) {
4088 status_unused(seq);
4089 return 0;
4090 }
4091
4092 if (mddev_lock(mddev)!=0)
4093 return -EINTR;
4094 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4095 seq_printf(seq, "%s : %sactive", mdname(mddev),
4096 mddev->pers ? "" : "in");
4097 if (mddev->pers) {
f91de92e 4098 if (mddev->ro==1)
1da177e4 4099 seq_printf(seq, " (read-only)");
f91de92e
N
4100 if (mddev->ro==2)
4101 seq_printf(seq, "(auto-read-only)");
1da177e4
LT
4102 seq_printf(seq, " %s", mddev->pers->name);
4103 }
4104
4105 size = 0;
4106 ITERATE_RDEV(mddev,rdev,tmp2) {
4107 char b[BDEVNAME_SIZE];
4108 seq_printf(seq, " %s[%d]",
4109 bdevname(rdev->bdev,b), rdev->desc_nr);
8ddf9efe
N
4110 if (test_bit(WriteMostly, &rdev->flags))
4111 seq_printf(seq, "(W)");
b2d444d7 4112 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
4113 seq_printf(seq, "(F)");
4114 continue;
b325a32e
N
4115 } else if (rdev->raid_disk < 0)
4116 seq_printf(seq, "(S)"); /* spare */
1da177e4
LT
4117 size += rdev->size;
4118 }
4119
4120 if (!list_empty(&mddev->disks)) {
4121 if (mddev->pers)
4122 seq_printf(seq, "\n %llu blocks",
4123 (unsigned long long)mddev->array_size);
4124 else
4125 seq_printf(seq, "\n %llu blocks",
4126 (unsigned long long)size);
4127 }
1cd6bf19
N
4128 if (mddev->persistent) {
4129 if (mddev->major_version != 0 ||
4130 mddev->minor_version != 90) {
4131 seq_printf(seq," super %d.%d",
4132 mddev->major_version,
4133 mddev->minor_version);
4134 }
4135 } else
4136 seq_printf(seq, " super non-persistent");
1da177e4
LT
4137
4138 if (mddev->pers) {
4139 mddev->pers->status (seq, mddev);
4140 seq_printf(seq, "\n ");
8e1b39d6
N
4141 if (mddev->pers->sync_request) {
4142 if (mddev->curr_resync > 2) {
4143 status_resync (seq, mddev);
4144 seq_printf(seq, "\n ");
4145 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4146 seq_printf(seq, "\tresync=DELAYED\n ");
4147 else if (mddev->recovery_cp < MaxSector)
4148 seq_printf(seq, "\tresync=PENDING\n ");
4149 }
32a7627c
N
4150 } else
4151 seq_printf(seq, "\n ");
4152
4153 if ((bitmap = mddev->bitmap)) {
32a7627c
N
4154 unsigned long chunk_kb;
4155 unsigned long flags;
32a7627c
N
4156 spin_lock_irqsave(&bitmap->lock, flags);
4157 chunk_kb = bitmap->chunksize >> 10;
4158 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4159 "%lu%s chunk",
4160 bitmap->pages - bitmap->missing_pages,
4161 bitmap->pages,
4162 (bitmap->pages - bitmap->missing_pages)
4163 << (PAGE_SHIFT - 10),
4164 chunk_kb ? chunk_kb : bitmap->chunksize,
4165 chunk_kb ? "KB" : "B");
78d742d8
N
4166 if (bitmap->file) {
4167 seq_printf(seq, ", file: ");
4168 seq_path(seq, bitmap->file->f_vfsmnt,
4169 bitmap->file->f_dentry," \t\n");
32a7627c 4170 }
78d742d8 4171
32a7627c
N
4172 seq_printf(seq, "\n");
4173 spin_unlock_irqrestore(&bitmap->lock, flags);
1da177e4
LT
4174 }
4175
4176 seq_printf(seq, "\n");
4177 }
4178 mddev_unlock(mddev);
4179
4180 return 0;
4181}
4182
4183static struct seq_operations md_seq_ops = {
4184 .start = md_seq_start,
4185 .next = md_seq_next,
4186 .stop = md_seq_stop,
4187 .show = md_seq_show,
4188};
4189
4190static int md_seq_open(struct inode *inode, struct file *file)
4191{
4192 int error;
d7603b7e
N
4193 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4194 if (mi == NULL)
4195 return -ENOMEM;
1da177e4
LT
4196
4197 error = seq_open(file, &md_seq_ops);
d7603b7e
N
4198 if (error)
4199 kfree(mi);
4200 else {
4201 struct seq_file *p = file->private_data;
4202 p->private = mi;
4203 mi->event = atomic_read(&md_event_count);
4204 }
1da177e4
LT
4205 return error;
4206}
4207
d7603b7e
N
4208static int md_seq_release(struct inode *inode, struct file *file)
4209{
4210 struct seq_file *m = file->private_data;
4211 struct mdstat_info *mi = m->private;
4212 m->private = NULL;
4213 kfree(mi);
4214 return seq_release(inode, file);
4215}
4216
4217static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4218{
4219 struct seq_file *m = filp->private_data;
4220 struct mdstat_info *mi = m->private;
4221 int mask;
4222
4223 poll_wait(filp, &md_event_waiters, wait);
4224
4225 /* always allow read */
4226 mask = POLLIN | POLLRDNORM;
4227
4228 if (mi->event != atomic_read(&md_event_count))
4229 mask |= POLLERR | POLLPRI;
4230 return mask;
4231}
4232
1da177e4
LT
4233static struct file_operations md_seq_fops = {
4234 .open = md_seq_open,
4235 .read = seq_read,
4236 .llseek = seq_lseek,
d7603b7e
N
4237 .release = md_seq_release,
4238 .poll = mdstat_poll,
1da177e4
LT
4239};
4240
2604b703 4241int register_md_personality(struct mdk_personality *p)
1da177e4 4242{
1da177e4 4243 spin_lock(&pers_lock);
2604b703
N
4244 list_add_tail(&p->list, &pers_list);
4245 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
1da177e4
LT
4246 spin_unlock(&pers_lock);
4247 return 0;
4248}
4249
2604b703 4250int unregister_md_personality(struct mdk_personality *p)
1da177e4 4251{
2604b703 4252 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
1da177e4 4253 spin_lock(&pers_lock);
2604b703 4254 list_del_init(&p->list);
1da177e4
LT
4255 spin_unlock(&pers_lock);
4256 return 0;
4257}
4258
4259static int is_mddev_idle(mddev_t *mddev)
4260{
4261 mdk_rdev_t * rdev;
4262 struct list_head *tmp;
4263 int idle;
4264 unsigned long curr_events;
4265
4266 idle = 1;
4267 ITERATE_RDEV(mddev,rdev,tmp) {
4268 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
a362357b
JA
4269 curr_events = disk_stat_read(disk, sectors[0]) +
4270 disk_stat_read(disk, sectors[1]) -
1da177e4 4271 atomic_read(&disk->sync_io);
c0e48521
N
4272 /* The difference between curr_events and last_events
4273 * will be affected by any new non-sync IO (making
4274 * curr_events bigger) and any difference in the amount of
4275 * in-flight syncio (making current_events bigger or smaller)
4276 * The amount in-flight is currently limited to
4277 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
4278 * which is at most 4096 sectors.
4279 * These numbers are fairly fragile and should be made
4280 * more robust, probably by enforcing the
4281 * 'window size' that md_do_sync sort-of uses.
4282 *
1da177e4
LT
4283 * Note: the following is an unsigned comparison.
4284 */
c0e48521 4285 if ((curr_events - rdev->last_events + 4096) > 8192) {
1da177e4
LT
4286 rdev->last_events = curr_events;
4287 idle = 0;
4288 }
4289 }
4290 return idle;
4291}
4292
4293void md_done_sync(mddev_t *mddev, int blocks, int ok)
4294{
4295 /* another "blocks" (512byte) blocks have been synced */
4296 atomic_sub(blocks, &mddev->recovery_active);
4297 wake_up(&mddev->recovery_wait);
4298 if (!ok) {
4299 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4300 md_wakeup_thread(mddev->thread);
4301 // stop recovery, signal do_sync ....
4302 }
4303}
4304
4305
06d91a5f
N
4306/* md_write_start(mddev, bi)
4307 * If we need to update some array metadata (e.g. 'active' flag
3d310eb7
N
4308 * in superblock) before writing, schedule a superblock update
4309 * and wait for it to complete.
06d91a5f 4310 */
3d310eb7 4311void md_write_start(mddev_t *mddev, struct bio *bi)
1da177e4 4312{
06d91a5f 4313 if (bio_data_dir(bi) != WRITE)
3d310eb7 4314 return;
06d91a5f 4315
f91de92e
N
4316 BUG_ON(mddev->ro == 1);
4317 if (mddev->ro == 2) {
4318 /* need to switch to read/write */
4319 mddev->ro = 0;
4320 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4321 md_wakeup_thread(mddev->thread);
4322 }
06d91a5f 4323 atomic_inc(&mddev->writes_pending);
06d91a5f 4324 if (mddev->in_sync) {
a9701a30 4325 spin_lock_irq(&mddev->write_lock);
3d310eb7
N
4326 if (mddev->in_sync) {
4327 mddev->in_sync = 0;
4328 mddev->sb_dirty = 1;
4329 md_wakeup_thread(mddev->thread);
4330 }
a9701a30 4331 spin_unlock_irq(&mddev->write_lock);
06d91a5f 4332 }
3d310eb7 4333 wait_event(mddev->sb_wait, mddev->sb_dirty==0);
1da177e4
LT
4334}
4335
4336void md_write_end(mddev_t *mddev)
4337{
4338 if (atomic_dec_and_test(&mddev->writes_pending)) {
4339 if (mddev->safemode == 2)
4340 md_wakeup_thread(mddev->thread);
4341 else
4342 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
4343 }
4344}
4345
75c96f85 4346static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
1da177e4
LT
4347
4348#define SYNC_MARKS 10
4349#define SYNC_MARK_STEP (3*HZ)
4350static void md_do_sync(mddev_t *mddev)
4351{
4352 mddev_t *mddev2;
4353 unsigned int currspeed = 0,
4354 window;
57afd89f 4355 sector_t max_sectors,j, io_sectors;
1da177e4
LT
4356 unsigned long mark[SYNC_MARKS];
4357 sector_t mark_cnt[SYNC_MARKS];
4358 int last_mark,m;
4359 struct list_head *tmp;
4360 sector_t last_check;
57afd89f 4361 int skipped = 0;
1da177e4
LT
4362
4363 /* just incase thread restarts... */
4364 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
4365 return;
4366
4367 /* we overload curr_resync somewhat here.
4368 * 0 == not engaged in resync at all
4369 * 2 == checking that there is no conflict with another sync
4370 * 1 == like 2, but have yielded to allow conflicting resync to
4371 * commense
4372 * other == active in resync - this many blocks
4373 *
4374 * Before starting a resync we must have set curr_resync to
4375 * 2, and then checked that every "conflicting" array has curr_resync
4376 * less than ours. When we find one that is the same or higher
4377 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
4378 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
4379 * This will mean we have to start checking from the beginning again.
4380 *
4381 */
4382
4383 do {
4384 mddev->curr_resync = 2;
4385
4386 try_again:
787453c2 4387 if (kthread_should_stop()) {
6985c43f 4388 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1da177e4
LT
4389 goto skip;
4390 }
4391 ITERATE_MDDEV(mddev2,tmp) {
1da177e4
LT
4392 if (mddev2 == mddev)
4393 continue;
4394 if (mddev2->curr_resync &&
4395 match_mddev_units(mddev,mddev2)) {
4396 DEFINE_WAIT(wq);
4397 if (mddev < mddev2 && mddev->curr_resync == 2) {
4398 /* arbitrarily yield */
4399 mddev->curr_resync = 1;
4400 wake_up(&resync_wait);
4401 }
4402 if (mddev > mddev2 && mddev->curr_resync == 1)
4403 /* no need to wait here, we can wait the next
4404 * time 'round when curr_resync == 2
4405 */
4406 continue;
787453c2
N
4407 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
4408 if (!kthread_should_stop() &&
8712e553 4409 mddev2->curr_resync >= mddev->curr_resync) {
1da177e4
LT
4410 printk(KERN_INFO "md: delaying resync of %s"
4411 " until %s has finished resync (they"
4412 " share one or more physical units)\n",
4413 mdname(mddev), mdname(mddev2));
4414 mddev_put(mddev2);
4415 schedule();
4416 finish_wait(&resync_wait, &wq);
4417 goto try_again;
4418 }
4419 finish_wait(&resync_wait, &wq);
4420 }
4421 }
4422 } while (mddev->curr_resync < 2);
4423
9d88883e 4424 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1da177e4 4425 /* resync follows the size requested by the personality,
57afd89f 4426 * which defaults to physical size, but can be virtual size
1da177e4
LT
4427 */
4428 max_sectors = mddev->resync_max_sectors;
9d88883e
N
4429 mddev->resync_mismatches = 0;
4430 } else
1da177e4
LT
4431 /* recovery follows the physical size of devices */
4432 max_sectors = mddev->size << 1;
4433
4434 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
4435 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
4436 " %d KB/sec/disc.\n", sysctl_speed_limit_min);
338cec32 4437 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
1da177e4
LT
4438 "(but not more than %d KB/sec) for reconstruction.\n",
4439 sysctl_speed_limit_max);
4440
4441 is_mddev_idle(mddev); /* this also initializes IO event counters */
32a7627c 4442 /* we don't use the checkpoint if there's a bitmap */
24dd469d
N
4443 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap
4444 && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1da177e4
LT
4445 j = mddev->recovery_cp;
4446 else
4447 j = 0;
57afd89f 4448 io_sectors = 0;
1da177e4
LT
4449 for (m = 0; m < SYNC_MARKS; m++) {
4450 mark[m] = jiffies;
57afd89f 4451 mark_cnt[m] = io_sectors;
1da177e4
LT
4452 }
4453 last_mark = 0;
4454 mddev->resync_mark = mark[last_mark];
4455 mddev->resync_mark_cnt = mark_cnt[last_mark];
4456
4457 /*
4458 * Tune reconstruction:
4459 */
4460 window = 32*(PAGE_SIZE/512);
4461 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
4462 window/2,(unsigned long long) max_sectors/2);
4463
4464 atomic_set(&mddev->recovery_active, 0);
4465 init_waitqueue_head(&mddev->recovery_wait);
4466 last_check = 0;
4467
4468 if (j>2) {
4469 printk(KERN_INFO
4470 "md: resuming recovery of %s from checkpoint.\n",
4471 mdname(mddev));
4472 mddev->curr_resync = j;
4473 }
4474
4475 while (j < max_sectors) {
57afd89f 4476 sector_t sectors;
1da177e4 4477
57afd89f
N
4478 skipped = 0;
4479 sectors = mddev->pers->sync_request(mddev, j, &skipped,
4480 currspeed < sysctl_speed_limit_min);
4481 if (sectors == 0) {
1da177e4
LT
4482 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4483 goto out;
4484 }
57afd89f
N
4485
4486 if (!skipped) { /* actual IO requested */
4487 io_sectors += sectors;
4488 atomic_add(sectors, &mddev->recovery_active);
4489 }
4490
1da177e4
LT
4491 j += sectors;
4492 if (j>1) mddev->curr_resync = j;
d7603b7e
N
4493 if (last_check == 0)
4494 /* this is the earliers that rebuilt will be
4495 * visible in /proc/mdstat
4496 */
4497 md_new_event(mddev);
57afd89f
N
4498
4499 if (last_check + window > io_sectors || j == max_sectors)
1da177e4
LT
4500 continue;
4501
57afd89f 4502 last_check = io_sectors;
1da177e4
LT
4503
4504 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
4505 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
4506 break;
4507
4508 repeat:
4509 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
4510 /* step marks */
4511 int next = (last_mark+1) % SYNC_MARKS;
4512
4513 mddev->resync_mark = mark[next];
4514 mddev->resync_mark_cnt = mark_cnt[next];
4515 mark[next] = jiffies;
57afd89f 4516 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
1da177e4
LT
4517 last_mark = next;
4518 }
4519
4520
787453c2 4521 if (kthread_should_stop()) {
1da177e4
LT
4522 /*
4523 * got a signal, exit.
4524 */
4525 printk(KERN_INFO
4526 "md: md_do_sync() got signal ... exiting\n");
1da177e4
LT
4527 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4528 goto out;
4529 }
4530
4531 /*
4532 * this loop exits only if either when we are slower than
4533 * the 'hard' speed limit, or the system was IO-idle for
4534 * a jiffy.
4535 * the system might be non-idle CPU-wise, but we only care
4536 * about not overloading the IO subsystem. (things like an
4537 * e2fsck being done on the RAID array should execute fast)
4538 */
4539 mddev->queue->unplug_fn(mddev->queue);
4540 cond_resched();
4541
57afd89f
N
4542 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
4543 /((jiffies-mddev->resync_mark)/HZ +1) +1;
1da177e4
LT
4544
4545 if (currspeed > sysctl_speed_limit_min) {
4546 if ((currspeed > sysctl_speed_limit_max) ||
4547 !is_mddev_idle(mddev)) {
c0e48521 4548 msleep(500);
1da177e4
LT
4549 goto repeat;
4550 }
4551 }
4552 }
4553 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
4554 /*
4555 * this also signals 'finished resyncing' to md_stop
4556 */
4557 out:
4558 mddev->queue->unplug_fn(mddev->queue);
4559
4560 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
4561
4562 /* tell personality that we are finished */
57afd89f 4563 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
1da177e4
LT
4564
4565 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4566 mddev->curr_resync > 2 &&
4567 mddev->curr_resync >= mddev->recovery_cp) {
4568 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4569 printk(KERN_INFO
4570 "md: checkpointing recovery of %s.\n",
4571 mdname(mddev));
4572 mddev->recovery_cp = mddev->curr_resync;
4573 } else
4574 mddev->recovery_cp = MaxSector;
4575 }
4576
1da177e4
LT
4577 skip:
4578 mddev->curr_resync = 0;
4579 wake_up(&resync_wait);
4580 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
4581 md_wakeup_thread(mddev->thread);
4582}
4583
4584
4585/*
4586 * This routine is regularly called by all per-raid-array threads to
4587 * deal with generic issues like resync and super-block update.
4588 * Raid personalities that don't have a thread (linear/raid0) do not
4589 * need this as they never do any recovery or update the superblock.
4590 *
4591 * It does not do any resync itself, but rather "forks" off other threads
4592 * to do that as needed.
4593 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
4594 * "->recovery" and create a thread at ->sync_thread.
4595 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
4596 * and wakeups up this thread which will reap the thread and finish up.
4597 * This thread also removes any faulty devices (with nr_pending == 0).
4598 *
4599 * The overall approach is:
4600 * 1/ if the superblock needs updating, update it.
4601 * 2/ If a recovery thread is running, don't do anything else.
4602 * 3/ If recovery has finished, clean up, possibly marking spares active.
4603 * 4/ If there are any faulty devices, remove them.
4604 * 5/ If array is degraded, try to add spares devices
4605 * 6/ If array has spares or is not in-sync, start a resync thread.
4606 */
4607void md_check_recovery(mddev_t *mddev)
4608{
4609 mdk_rdev_t *rdev;
4610 struct list_head *rtmp;
4611
4612
5f40402d
N
4613 if (mddev->bitmap)
4614 bitmap_daemon_work(mddev->bitmap);
1da177e4
LT
4615
4616 if (mddev->ro)
4617 return;
fca4d848
N
4618
4619 if (signal_pending(current)) {
4620 if (mddev->pers->sync_request) {
4621 printk(KERN_INFO "md: %s in immediate safe mode\n",
4622 mdname(mddev));
4623 mddev->safemode = 2;
4624 }
4625 flush_signals(current);
4626 }
4627
1da177e4
LT
4628 if ( ! (
4629 mddev->sb_dirty ||
4630 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
fca4d848
N
4631 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
4632 (mddev->safemode == 1) ||
4633 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
4634 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
1da177e4
LT
4635 ))
4636 return;
fca4d848 4637
1da177e4
LT
4638 if (mddev_trylock(mddev)==0) {
4639 int spares =0;
fca4d848 4640
a9701a30 4641 spin_lock_irq(&mddev->write_lock);
fca4d848
N
4642 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
4643 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
4644 mddev->in_sync = 1;
4645 mddev->sb_dirty = 1;
4646 }
4647 if (mddev->safemode == 1)
4648 mddev->safemode = 0;
a9701a30 4649 spin_unlock_irq(&mddev->write_lock);
fca4d848 4650
1da177e4
LT
4651 if (mddev->sb_dirty)
4652 md_update_sb(mddev);
06d91a5f 4653
06d91a5f 4654
1da177e4
LT
4655 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4656 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
4657 /* resync/recovery still happening */
4658 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4659 goto unlock;
4660 }
4661 if (mddev->sync_thread) {
4662 /* resync has finished, collect result */
4663 md_unregister_thread(mddev->sync_thread);
4664 mddev->sync_thread = NULL;
4665 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4666 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4667 /* success...*/
4668 /* activate any spares */
4669 mddev->pers->spare_active(mddev);
4670 }
4671 md_update_sb(mddev);
41158c7e
N
4672
4673 /* if array is no-longer degraded, then any saved_raid_disk
4674 * information must be scrapped
4675 */
4676 if (!mddev->degraded)
4677 ITERATE_RDEV(mddev,rdev,rtmp)
4678 rdev->saved_raid_disk = -1;
4679
1da177e4
LT
4680 mddev->recovery = 0;
4681 /* flag recovery needed just to double check */
4682 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
d7603b7e 4683 md_new_event(mddev);
1da177e4
LT
4684 goto unlock;
4685 }
24dd469d
N
4686 /* Clear some bits that don't mean anything, but
4687 * might be left set
4688 */
4689 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4690 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
4691 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
4692 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
1da177e4
LT
4693
4694 /* no recovery is running.
4695 * remove any failed drives, then
4696 * add spares if possible.
4697 * Spare are also removed and re-added, to allow
4698 * the personality to fail the re-add.
4699 */
4700 ITERATE_RDEV(mddev,rdev,rtmp)
4701 if (rdev->raid_disk >= 0 &&
b2d444d7 4702 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
1da177e4 4703 atomic_read(&rdev->nr_pending)==0) {
86e6ffdd
N
4704 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
4705 char nm[20];
4706 sprintf(nm,"rd%d", rdev->raid_disk);
4707 sysfs_remove_link(&mddev->kobj, nm);
1da177e4 4708 rdev->raid_disk = -1;
86e6ffdd 4709 }
1da177e4
LT
4710 }
4711
4712 if (mddev->degraded) {
4713 ITERATE_RDEV(mddev,rdev,rtmp)
4714 if (rdev->raid_disk < 0
b2d444d7 4715 && !test_bit(Faulty, &rdev->flags)) {
86e6ffdd
N
4716 if (mddev->pers->hot_add_disk(mddev,rdev)) {
4717 char nm[20];
4718 sprintf(nm, "rd%d", rdev->raid_disk);
4719 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
1da177e4 4720 spares++;
d7603b7e 4721 md_new_event(mddev);
86e6ffdd 4722 } else
1da177e4
LT
4723 break;
4724 }
4725 }
4726
24dd469d
N
4727 if (spares) {
4728 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4729 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4730 } else if (mddev->recovery_cp < MaxSector) {
4731 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4732 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4733 /* nothing to be done ... */
1da177e4 4734 goto unlock;
24dd469d 4735
1da177e4
LT
4736 if (mddev->pers->sync_request) {
4737 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
a654b9d8
N
4738 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
4739 /* We are adding a device or devices to an array
4740 * which has the bitmap stored on all devices.
4741 * So make sure all bitmap pages get written
4742 */
4743 bitmap_write_all(mddev->bitmap);
4744 }
1da177e4
LT
4745 mddev->sync_thread = md_register_thread(md_do_sync,
4746 mddev,
4747 "%s_resync");
4748 if (!mddev->sync_thread) {
4749 printk(KERN_ERR "%s: could not start resync"
4750 " thread...\n",
4751 mdname(mddev));
4752 /* leave the spares where they are, it shouldn't hurt */
4753 mddev->recovery = 0;
d7603b7e 4754 } else
1da177e4 4755 md_wakeup_thread(mddev->sync_thread);
d7603b7e 4756 md_new_event(mddev);
1da177e4
LT
4757 }
4758 unlock:
4759 mddev_unlock(mddev);
4760 }
4761}
4762
75c96f85
AB
4763static int md_notify_reboot(struct notifier_block *this,
4764 unsigned long code, void *x)
1da177e4
LT
4765{
4766 struct list_head *tmp;
4767 mddev_t *mddev;
4768
4769 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
4770
4771 printk(KERN_INFO "md: stopping all md devices.\n");
4772
4773 ITERATE_MDDEV(mddev,tmp)
4774 if (mddev_trylock(mddev)==0)
4775 do_md_stop (mddev, 1);
4776 /*
4777 * certain more exotic SCSI devices are known to be
4778 * volatile wrt too early system reboots. While the
4779 * right place to handle this issue is the given
4780 * driver, we do want to have a safe RAID driver ...
4781 */
4782 mdelay(1000*1);
4783 }
4784 return NOTIFY_DONE;
4785}
4786
75c96f85 4787static struct notifier_block md_notifier = {
1da177e4
LT
4788 .notifier_call = md_notify_reboot,
4789 .next = NULL,
4790 .priority = INT_MAX, /* before any real devices */
4791};
4792
4793static void md_geninit(void)
4794{
4795 struct proc_dir_entry *p;
4796
4797 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
4798
4799 p = create_proc_entry("mdstat", S_IRUGO, NULL);
4800 if (p)
4801 p->proc_fops = &md_seq_fops;
4802}
4803
75c96f85 4804static int __init md_init(void)
1da177e4
LT
4805{
4806 int minor;
4807
4808 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
4809 " MD_SB_DISKS=%d\n",
4810 MD_MAJOR_VERSION, MD_MINOR_VERSION,
4811 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
bd926c63 4812 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
32a7627c 4813 BITMAP_MINOR);
1da177e4
LT
4814
4815 if (register_blkdev(MAJOR_NR, "md"))
4816 return -1;
4817 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
4818 unregister_blkdev(MAJOR_NR, "md");
4819 return -1;
4820 }
4821 devfs_mk_dir("md");
4822 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
4823 md_probe, NULL, NULL);
4824 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
4825 md_probe, NULL, NULL);
4826
4827 for (minor=0; minor < MAX_MD_DEVS; ++minor)
4828 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
4829 S_IFBLK|S_IRUSR|S_IWUSR,
4830 "md/%d", minor);
4831
4832 for (minor=0; minor < MAX_MD_DEVS; ++minor)
4833 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
4834 S_IFBLK|S_IRUSR|S_IWUSR,
4835 "md/mdp%d", minor);
4836
4837
4838 register_reboot_notifier(&md_notifier);
4839 raid_table_header = register_sysctl_table(raid_root_table, 1);
4840
4841 md_geninit();
4842 return (0);
4843}
4844
4845
4846#ifndef MODULE
4847
4848/*
4849 * Searches all registered partitions for autorun RAID arrays
4850 * at boot time.
4851 */
4852static dev_t detected_devices[128];
4853static int dev_cnt;
4854
4855void md_autodetect_dev(dev_t dev)
4856{
4857 if (dev_cnt >= 0 && dev_cnt < 127)
4858 detected_devices[dev_cnt++] = dev;
4859}
4860
4861
4862static void autostart_arrays(int part)
4863{
4864 mdk_rdev_t *rdev;
4865 int i;
4866
4867 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
4868
4869 for (i = 0; i < dev_cnt; i++) {
4870 dev_t dev = detected_devices[i];
4871
4872 rdev = md_import_device(dev,0, 0);
4873 if (IS_ERR(rdev))
4874 continue;
4875
b2d444d7 4876 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
4877 MD_BUG();
4878 continue;
4879 }
4880 list_add(&rdev->same_set, &pending_raid_disks);
4881 }
4882 dev_cnt = 0;
4883
4884 autorun_devices(part);
4885}
4886
4887#endif
4888
4889static __exit void md_exit(void)
4890{
4891 mddev_t *mddev;
4892 struct list_head *tmp;
4893 int i;
4894 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
4895 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
4896 for (i=0; i < MAX_MD_DEVS; i++)
4897 devfs_remove("md/%d", i);
4898 for (i=0; i < MAX_MD_DEVS; i++)
4899 devfs_remove("md/d%d", i);
4900
4901 devfs_remove("md");
4902
4903 unregister_blkdev(MAJOR_NR,"md");
4904 unregister_blkdev(mdp_major, "mdp");
4905 unregister_reboot_notifier(&md_notifier);
4906 unregister_sysctl_table(raid_table_header);
4907 remove_proc_entry("mdstat", NULL);
4908 ITERATE_MDDEV(mddev,tmp) {
4909 struct gendisk *disk = mddev->gendisk;
4910 if (!disk)
4911 continue;
4912 export_array(mddev);
4913 del_gendisk(disk);
4914 put_disk(disk);
4915 mddev->gendisk = NULL;
4916 mddev_put(mddev);
4917 }
4918}
4919
4920module_init(md_init)
4921module_exit(md_exit)
4922
f91de92e
N
4923static int get_ro(char *buffer, struct kernel_param *kp)
4924{
4925 return sprintf(buffer, "%d", start_readonly);
4926}
4927static int set_ro(const char *val, struct kernel_param *kp)
4928{
4929 char *e;
4930 int num = simple_strtoul(val, &e, 10);
4931 if (*val && (*e == '\0' || *e == '\n')) {
4932 start_readonly = num;
4dbcdc75 4933 return 0;
f91de92e
N
4934 }
4935 return -EINVAL;
4936}
4937
4938module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
6ff8d8ec
N
4939module_param(start_dirty_degraded, int, 0644);
4940
f91de92e 4941
1da177e4
LT
4942EXPORT_SYMBOL(register_md_personality);
4943EXPORT_SYMBOL(unregister_md_personality);
4944EXPORT_SYMBOL(md_error);
4945EXPORT_SYMBOL(md_done_sync);
4946EXPORT_SYMBOL(md_write_start);
4947EXPORT_SYMBOL(md_write_end);
1da177e4
LT
4948EXPORT_SYMBOL(md_register_thread);
4949EXPORT_SYMBOL(md_unregister_thread);
4950EXPORT_SYMBOL(md_wakeup_thread);
4951EXPORT_SYMBOL(md_print_devices);
4952EXPORT_SYMBOL(md_check_recovery);
4953MODULE_LICENSE("GPL");
aa1595e9 4954MODULE_ALIAS("md");
72008652 4955MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);