[PATCH] md: clean up 'page' related names in md
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / md.c
CommitLineData
1da177e4
LT
1/*
2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5 completely rewritten, based on the MD driver code from Marc Zyngier
6
7 Changes:
8
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
19
20 Neil Brown <neilb@cse.unsw.edu.au>.
21
32a7627c
N
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
1da177e4
LT
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
28 any later version.
29
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33*/
34
35#include <linux/module.h>
36#include <linux/config.h>
a6fb0934 37#include <linux/kthread.h>
1da177e4
LT
38#include <linux/linkage.h>
39#include <linux/raid/md.h>
32a7627c 40#include <linux/raid/bitmap.h>
1da177e4
LT
41#include <linux/sysctl.h>
42#include <linux/devfs_fs_kernel.h>
43#include <linux/buffer_head.h> /* for invalidate_bdev */
44#include <linux/suspend.h>
d7603b7e 45#include <linux/poll.h>
1da177e4
LT
46
47#include <linux/init.h>
48
32a7627c
N
49#include <linux/file.h>
50
1da177e4
LT
51#ifdef CONFIG_KMOD
52#include <linux/kmod.h>
53#endif
54
55#include <asm/unaligned.h>
56
57#define MAJOR_NR MD_MAJOR
58#define MD_DRIVER
59
60/* 63 partitions with the alternate major number (mdp) */
61#define MdpMinorShift 6
62
63#define DEBUG 0
64#define dprintk(x...) ((void)(DEBUG && printk(x)))
65
66
67#ifndef MODULE
68static void autostart_arrays (int part);
69#endif
70
71static mdk_personality_t *pers[MAX_PERSONALITY];
72static DEFINE_SPINLOCK(pers_lock);
73
74/*
75 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
76 * is 1000 KB/sec, so the extra system load does not show up that much.
77 * Increase it if you want to have more _guaranteed_ speed. Note that
338cec32 78 * the RAID driver will use the maximum available bandwidth if the IO
1da177e4
LT
79 * subsystem is idle. There is also an 'absolute maximum' reconstruction
80 * speed limit - in case reconstruction slows down your system despite
81 * idle IO detection.
82 *
83 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
84 */
85
86static int sysctl_speed_limit_min = 1000;
87static int sysctl_speed_limit_max = 200000;
88
89static struct ctl_table_header *raid_table_header;
90
91static ctl_table raid_table[] = {
92 {
93 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
94 .procname = "speed_limit_min",
95 .data = &sysctl_speed_limit_min,
96 .maxlen = sizeof(int),
97 .mode = 0644,
98 .proc_handler = &proc_dointvec,
99 },
100 {
101 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
102 .procname = "speed_limit_max",
103 .data = &sysctl_speed_limit_max,
104 .maxlen = sizeof(int),
105 .mode = 0644,
106 .proc_handler = &proc_dointvec,
107 },
108 { .ctl_name = 0 }
109};
110
111static ctl_table raid_dir_table[] = {
112 {
113 .ctl_name = DEV_RAID,
114 .procname = "raid",
115 .maxlen = 0,
116 .mode = 0555,
117 .child = raid_table,
118 },
119 { .ctl_name = 0 }
120};
121
122static ctl_table raid_root_table[] = {
123 {
124 .ctl_name = CTL_DEV,
125 .procname = "dev",
126 .maxlen = 0,
127 .mode = 0555,
128 .child = raid_dir_table,
129 },
130 { .ctl_name = 0 }
131};
132
133static struct block_device_operations md_fops;
134
f91de92e
N
135static int start_readonly;
136
d7603b7e
N
137/*
138 * We have a system wide 'event count' that is incremented
139 * on any 'interesting' event, and readers of /proc/mdstat
140 * can use 'poll' or 'select' to find out when the event
141 * count increases.
142 *
143 * Events are:
144 * start array, stop array, error, add device, remove device,
145 * start build, activate spare
146 */
147DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
148static atomic_t md_event_count;
149void md_new_event(mddev_t *mddev)
150{
151 atomic_inc(&md_event_count);
152 wake_up(&md_event_waiters);
153}
154
1da177e4
LT
155/*
156 * Enables to iterate over all existing md arrays
157 * all_mddevs_lock protects this list.
158 */
159static LIST_HEAD(all_mddevs);
160static DEFINE_SPINLOCK(all_mddevs_lock);
161
162
163/*
164 * iterates through all used mddevs in the system.
165 * We take care to grab the all_mddevs_lock whenever navigating
166 * the list, and to always hold a refcount when unlocked.
167 * Any code which breaks out of this loop while own
168 * a reference to the current mddev and must mddev_put it.
169 */
170#define ITERATE_MDDEV(mddev,tmp) \
171 \
172 for (({ spin_lock(&all_mddevs_lock); \
173 tmp = all_mddevs.next; \
174 mddev = NULL;}); \
175 ({ if (tmp != &all_mddevs) \
176 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
177 spin_unlock(&all_mddevs_lock); \
178 if (mddev) mddev_put(mddev); \
179 mddev = list_entry(tmp, mddev_t, all_mddevs); \
180 tmp != &all_mddevs;}); \
181 ({ spin_lock(&all_mddevs_lock); \
182 tmp = tmp->next;}) \
183 )
184
185
186static int md_fail_request (request_queue_t *q, struct bio *bio)
187{
188 bio_io_error(bio, bio->bi_size);
189 return 0;
190}
191
192static inline mddev_t *mddev_get(mddev_t *mddev)
193{
194 atomic_inc(&mddev->active);
195 return mddev;
196}
197
198static void mddev_put(mddev_t *mddev)
199{
200 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
201 return;
202 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
203 list_del(&mddev->all_mddevs);
204 blk_put_queue(mddev->queue);
eae1701f 205 kobject_unregister(&mddev->kobj);
1da177e4
LT
206 }
207 spin_unlock(&all_mddevs_lock);
208}
209
210static mddev_t * mddev_find(dev_t unit)
211{
212 mddev_t *mddev, *new = NULL;
213
214 retry:
215 spin_lock(&all_mddevs_lock);
216 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
217 if (mddev->unit == unit) {
218 mddev_get(mddev);
219 spin_unlock(&all_mddevs_lock);
990a8baf 220 kfree(new);
1da177e4
LT
221 return mddev;
222 }
223
224 if (new) {
225 list_add(&new->all_mddevs, &all_mddevs);
226 spin_unlock(&all_mddevs_lock);
227 return new;
228 }
229 spin_unlock(&all_mddevs_lock);
230
231 new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
232 if (!new)
233 return NULL;
234
235 memset(new, 0, sizeof(*new));
236
237 new->unit = unit;
238 if (MAJOR(unit) == MD_MAJOR)
239 new->md_minor = MINOR(unit);
240 else
241 new->md_minor = MINOR(unit) >> MdpMinorShift;
242
243 init_MUTEX(&new->reconfig_sem);
244 INIT_LIST_HEAD(&new->disks);
245 INIT_LIST_HEAD(&new->all_mddevs);
246 init_timer(&new->safemode_timer);
247 atomic_set(&new->active, 1);
06d91a5f 248 spin_lock_init(&new->write_lock);
3d310eb7 249 init_waitqueue_head(&new->sb_wait);
1da177e4
LT
250
251 new->queue = blk_alloc_queue(GFP_KERNEL);
252 if (!new->queue) {
253 kfree(new);
254 return NULL;
255 }
256
257 blk_queue_make_request(new->queue, md_fail_request);
258
259 goto retry;
260}
261
262static inline int mddev_lock(mddev_t * mddev)
263{
264 return down_interruptible(&mddev->reconfig_sem);
265}
266
267static inline void mddev_lock_uninterruptible(mddev_t * mddev)
268{
269 down(&mddev->reconfig_sem);
270}
271
272static inline int mddev_trylock(mddev_t * mddev)
273{
274 return down_trylock(&mddev->reconfig_sem);
275}
276
277static inline void mddev_unlock(mddev_t * mddev)
278{
279 up(&mddev->reconfig_sem);
280
005eca5e 281 md_wakeup_thread(mddev->thread);
1da177e4
LT
282}
283
284mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
285{
286 mdk_rdev_t * rdev;
287 struct list_head *tmp;
288
289 ITERATE_RDEV(mddev,rdev,tmp) {
290 if (rdev->desc_nr == nr)
291 return rdev;
292 }
293 return NULL;
294}
295
296static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
297{
298 struct list_head *tmp;
299 mdk_rdev_t *rdev;
300
301 ITERATE_RDEV(mddev,rdev,tmp) {
302 if (rdev->bdev->bd_dev == dev)
303 return rdev;
304 }
305 return NULL;
306}
307
77933d72 308static inline sector_t calc_dev_sboffset(struct block_device *bdev)
1da177e4
LT
309{
310 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
311 return MD_NEW_SIZE_BLOCKS(size);
312}
313
314static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
315{
316 sector_t size;
317
318 size = rdev->sb_offset;
319
320 if (chunk_size)
321 size &= ~((sector_t)chunk_size/1024 - 1);
322 return size;
323}
324
325static int alloc_disk_sb(mdk_rdev_t * rdev)
326{
327 if (rdev->sb_page)
328 MD_BUG();
329
330 rdev->sb_page = alloc_page(GFP_KERNEL);
331 if (!rdev->sb_page) {
332 printk(KERN_ALERT "md: out of memory.\n");
333 return -EINVAL;
334 }
335
336 return 0;
337}
338
339static void free_disk_sb(mdk_rdev_t * rdev)
340{
341 if (rdev->sb_page) {
2d1f3b5d 342 put_page(rdev->sb_page);
1da177e4
LT
343 rdev->sb_loaded = 0;
344 rdev->sb_page = NULL;
345 rdev->sb_offset = 0;
346 rdev->size = 0;
347 }
348}
349
350
7bfa19f2
N
351static int super_written(struct bio *bio, unsigned int bytes_done, int error)
352{
353 mdk_rdev_t *rdev = bio->bi_private;
a9701a30 354 mddev_t *mddev = rdev->mddev;
7bfa19f2
N
355 if (bio->bi_size)
356 return 1;
357
358 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
a9701a30 359 md_error(mddev, rdev);
7bfa19f2 360
a9701a30
N
361 if (atomic_dec_and_test(&mddev->pending_writes))
362 wake_up(&mddev->sb_wait);
f8b58edf 363 bio_put(bio);
7bfa19f2
N
364 return 0;
365}
366
a9701a30
N
367static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
368{
369 struct bio *bio2 = bio->bi_private;
370 mdk_rdev_t *rdev = bio2->bi_private;
371 mddev_t *mddev = rdev->mddev;
372 if (bio->bi_size)
373 return 1;
374
375 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
376 error == -EOPNOTSUPP) {
377 unsigned long flags;
378 /* barriers don't appear to be supported :-( */
379 set_bit(BarriersNotsupp, &rdev->flags);
380 mddev->barriers_work = 0;
381 spin_lock_irqsave(&mddev->write_lock, flags);
382 bio2->bi_next = mddev->biolist;
383 mddev->biolist = bio2;
384 spin_unlock_irqrestore(&mddev->write_lock, flags);
385 wake_up(&mddev->sb_wait);
386 bio_put(bio);
387 return 0;
388 }
389 bio_put(bio2);
390 bio->bi_private = rdev;
391 return super_written(bio, bytes_done, error);
392}
393
7bfa19f2
N
394void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
395 sector_t sector, int size, struct page *page)
396{
397 /* write first size bytes of page to sector of rdev
398 * Increment mddev->pending_writes before returning
399 * and decrement it on completion, waking up sb_wait
400 * if zero is reached.
401 * If an error occurred, call md_error
a9701a30
N
402 *
403 * As we might need to resubmit the request if BIO_RW_BARRIER
404 * causes ENOTSUPP, we allocate a spare bio...
7bfa19f2
N
405 */
406 struct bio *bio = bio_alloc(GFP_NOIO, 1);
a9701a30 407 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
7bfa19f2
N
408
409 bio->bi_bdev = rdev->bdev;
410 bio->bi_sector = sector;
411 bio_add_page(bio, page, size, 0);
412 bio->bi_private = rdev;
413 bio->bi_end_io = super_written;
a9701a30
N
414 bio->bi_rw = rw;
415
7bfa19f2 416 atomic_inc(&mddev->pending_writes);
a9701a30
N
417 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
418 struct bio *rbio;
419 rw |= (1<<BIO_RW_BARRIER);
420 rbio = bio_clone(bio, GFP_NOIO);
421 rbio->bi_private = bio;
422 rbio->bi_end_io = super_written_barrier;
423 submit_bio(rw, rbio);
424 } else
425 submit_bio(rw, bio);
426}
427
428void md_super_wait(mddev_t *mddev)
429{
430 /* wait for all superblock writes that were scheduled to complete.
431 * if any had to be retried (due to BARRIER problems), retry them
432 */
433 DEFINE_WAIT(wq);
434 for(;;) {
435 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
436 if (atomic_read(&mddev->pending_writes)==0)
437 break;
438 while (mddev->biolist) {
439 struct bio *bio;
440 spin_lock_irq(&mddev->write_lock);
441 bio = mddev->biolist;
442 mddev->biolist = bio->bi_next ;
443 bio->bi_next = NULL;
444 spin_unlock_irq(&mddev->write_lock);
445 submit_bio(bio->bi_rw, bio);
446 }
447 schedule();
448 }
449 finish_wait(&mddev->sb_wait, &wq);
7bfa19f2
N
450}
451
1da177e4
LT
452static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
453{
454 if (bio->bi_size)
455 return 1;
456
457 complete((struct completion*)bio->bi_private);
458 return 0;
459}
460
a654b9d8 461int sync_page_io(struct block_device *bdev, sector_t sector, int size,
1da177e4
LT
462 struct page *page, int rw)
463{
baaa2c51 464 struct bio *bio = bio_alloc(GFP_NOIO, 1);
1da177e4
LT
465 struct completion event;
466 int ret;
467
468 rw |= (1 << BIO_RW_SYNC);
469
470 bio->bi_bdev = bdev;
471 bio->bi_sector = sector;
472 bio_add_page(bio, page, size, 0);
473 init_completion(&event);
474 bio->bi_private = &event;
475 bio->bi_end_io = bi_complete;
476 submit_bio(rw, bio);
477 wait_for_completion(&event);
478
479 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
480 bio_put(bio);
481 return ret;
482}
ddaf22ab 483EXPORT_SYMBOL(sync_page_io);
1da177e4 484
0002b271 485static int read_disk_sb(mdk_rdev_t * rdev, int size)
1da177e4
LT
486{
487 char b[BDEVNAME_SIZE];
488 if (!rdev->sb_page) {
489 MD_BUG();
490 return -EINVAL;
491 }
492 if (rdev->sb_loaded)
493 return 0;
494
495
0002b271 496 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
1da177e4
LT
497 goto fail;
498 rdev->sb_loaded = 1;
499 return 0;
500
501fail:
502 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
503 bdevname(rdev->bdev,b));
504 return -EINVAL;
505}
506
507static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
508{
509 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
510 (sb1->set_uuid1 == sb2->set_uuid1) &&
511 (sb1->set_uuid2 == sb2->set_uuid2) &&
512 (sb1->set_uuid3 == sb2->set_uuid3))
513
514 return 1;
515
516 return 0;
517}
518
519
520static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
521{
522 int ret;
523 mdp_super_t *tmp1, *tmp2;
524
525 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
526 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
527
528 if (!tmp1 || !tmp2) {
529 ret = 0;
530 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
531 goto abort;
532 }
533
534 *tmp1 = *sb1;
535 *tmp2 = *sb2;
536
537 /*
538 * nr_disks is not constant
539 */
540 tmp1->nr_disks = 0;
541 tmp2->nr_disks = 0;
542
543 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
544 ret = 0;
545 else
546 ret = 1;
547
548abort:
990a8baf
JJ
549 kfree(tmp1);
550 kfree(tmp2);
1da177e4
LT
551 return ret;
552}
553
554static unsigned int calc_sb_csum(mdp_super_t * sb)
555{
556 unsigned int disk_csum, csum;
557
558 disk_csum = sb->sb_csum;
559 sb->sb_csum = 0;
560 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
561 sb->sb_csum = disk_csum;
562 return csum;
563}
564
565
566/*
567 * Handle superblock details.
568 * We want to be able to handle multiple superblock formats
569 * so we have a common interface to them all, and an array of
570 * different handlers.
571 * We rely on user-space to write the initial superblock, and support
572 * reading and updating of superblocks.
573 * Interface methods are:
574 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
575 * loads and validates a superblock on dev.
576 * if refdev != NULL, compare superblocks on both devices
577 * Return:
578 * 0 - dev has a superblock that is compatible with refdev
579 * 1 - dev has a superblock that is compatible and newer than refdev
580 * so dev should be used as the refdev in future
581 * -EINVAL superblock incompatible or invalid
582 * -othererror e.g. -EIO
583 *
584 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
585 * Verify that dev is acceptable into mddev.
586 * The first time, mddev->raid_disks will be 0, and data from
587 * dev should be merged in. Subsequent calls check that dev
588 * is new enough. Return 0 or -EINVAL
589 *
590 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
591 * Update the superblock for rdev with data in mddev
592 * This does not write to disc.
593 *
594 */
595
596struct super_type {
597 char *name;
598 struct module *owner;
599 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
600 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
601 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
602};
603
604/*
605 * load_super for 0.90.0
606 */
607static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
608{
609 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
610 mdp_super_t *sb;
611 int ret;
612 sector_t sb_offset;
613
614 /*
615 * Calculate the position of the superblock,
616 * it's at the end of the disk.
617 *
618 * It also happens to be a multiple of 4Kb.
619 */
620 sb_offset = calc_dev_sboffset(rdev->bdev);
621 rdev->sb_offset = sb_offset;
622
0002b271 623 ret = read_disk_sb(rdev, MD_SB_BYTES);
1da177e4
LT
624 if (ret) return ret;
625
626 ret = -EINVAL;
627
628 bdevname(rdev->bdev, b);
629 sb = (mdp_super_t*)page_address(rdev->sb_page);
630
631 if (sb->md_magic != MD_SB_MAGIC) {
632 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
633 b);
634 goto abort;
635 }
636
637 if (sb->major_version != 0 ||
638 sb->minor_version != 90) {
639 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
640 sb->major_version, sb->minor_version,
641 b);
642 goto abort;
643 }
644
645 if (sb->raid_disks <= 0)
646 goto abort;
647
648 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
649 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
650 b);
651 goto abort;
652 }
653
654 rdev->preferred_minor = sb->md_minor;
655 rdev->data_offset = 0;
0002b271 656 rdev->sb_size = MD_SB_BYTES;
1da177e4
LT
657
658 if (sb->level == LEVEL_MULTIPATH)
659 rdev->desc_nr = -1;
660 else
661 rdev->desc_nr = sb->this_disk.number;
662
663 if (refdev == 0)
664 ret = 1;
665 else {
666 __u64 ev1, ev2;
667 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
668 if (!uuid_equal(refsb, sb)) {
669 printk(KERN_WARNING "md: %s has different UUID to %s\n",
670 b, bdevname(refdev->bdev,b2));
671 goto abort;
672 }
673 if (!sb_equal(refsb, sb)) {
674 printk(KERN_WARNING "md: %s has same UUID"
675 " but different superblock to %s\n",
676 b, bdevname(refdev->bdev, b2));
677 goto abort;
678 }
679 ev1 = md_event(sb);
680 ev2 = md_event(refsb);
681 if (ev1 > ev2)
682 ret = 1;
683 else
684 ret = 0;
685 }
686 rdev->size = calc_dev_size(rdev, sb->chunk_size);
687
688 abort:
689 return ret;
690}
691
692/*
693 * validate_super for 0.90.0
694 */
695static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
696{
697 mdp_disk_t *desc;
698 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
699
41158c7e 700 rdev->raid_disk = -1;
b2d444d7 701 rdev->flags = 0;
1da177e4
LT
702 if (mddev->raid_disks == 0) {
703 mddev->major_version = 0;
704 mddev->minor_version = sb->minor_version;
705 mddev->patch_version = sb->patch_version;
706 mddev->persistent = ! sb->not_persistent;
707 mddev->chunk_size = sb->chunk_size;
708 mddev->ctime = sb->ctime;
709 mddev->utime = sb->utime;
710 mddev->level = sb->level;
711 mddev->layout = sb->layout;
712 mddev->raid_disks = sb->raid_disks;
713 mddev->size = sb->size;
714 mddev->events = md_event(sb);
9223214e 715 mddev->bitmap_offset = 0;
36fa3063 716 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
1da177e4
LT
717
718 if (sb->state & (1<<MD_SB_CLEAN))
719 mddev->recovery_cp = MaxSector;
720 else {
721 if (sb->events_hi == sb->cp_events_hi &&
722 sb->events_lo == sb->cp_events_lo) {
723 mddev->recovery_cp = sb->recovery_cp;
724 } else
725 mddev->recovery_cp = 0;
726 }
727
728 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
729 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
730 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
731 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
732
733 mddev->max_disks = MD_SB_DISKS;
a654b9d8
N
734
735 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
736 mddev->bitmap_file == NULL) {
6cce3b23
N
737 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
738 && mddev->level != 10) {
a654b9d8 739 /* FIXME use a better test */
6cce3b23 740 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
a654b9d8
N
741 return -EINVAL;
742 }
36fa3063 743 mddev->bitmap_offset = mddev->default_bitmap_offset;
a654b9d8
N
744 }
745
41158c7e
N
746 } else if (mddev->pers == NULL) {
747 /* Insist on good event counter while assembling */
748 __u64 ev1 = md_event(sb);
1da177e4
LT
749 ++ev1;
750 if (ev1 < mddev->events)
751 return -EINVAL;
41158c7e
N
752 } else if (mddev->bitmap) {
753 /* if adding to array with a bitmap, then we can accept an
754 * older device ... but not too old.
755 */
756 __u64 ev1 = md_event(sb);
757 if (ev1 < mddev->bitmap->events_cleared)
758 return 0;
759 } else /* just a hot-add of a new device, leave raid_disk at -1 */
760 return 0;
761
1da177e4 762 if (mddev->level != LEVEL_MULTIPATH) {
1da177e4
LT
763 desc = sb->disks + rdev->desc_nr;
764
765 if (desc->state & (1<<MD_DISK_FAULTY))
b2d444d7 766 set_bit(Faulty, &rdev->flags);
1da177e4
LT
767 else if (desc->state & (1<<MD_DISK_SYNC) &&
768 desc->raid_disk < mddev->raid_disks) {
b2d444d7 769 set_bit(In_sync, &rdev->flags);
1da177e4
LT
770 rdev->raid_disk = desc->raid_disk;
771 }
8ddf9efe
N
772 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
773 set_bit(WriteMostly, &rdev->flags);
41158c7e 774 } else /* MULTIPATH are always insync */
b2d444d7 775 set_bit(In_sync, &rdev->flags);
1da177e4
LT
776 return 0;
777}
778
779/*
780 * sync_super for 0.90.0
781 */
782static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
783{
784 mdp_super_t *sb;
785 struct list_head *tmp;
786 mdk_rdev_t *rdev2;
787 int next_spare = mddev->raid_disks;
19133a42 788
1da177e4
LT
789
790 /* make rdev->sb match mddev data..
791 *
792 * 1/ zero out disks
793 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
794 * 3/ any empty disks < next_spare become removed
795 *
796 * disks[0] gets initialised to REMOVED because
797 * we cannot be sure from other fields if it has
798 * been initialised or not.
799 */
800 int i;
801 int active=0, working=0,failed=0,spare=0,nr_disks=0;
802
61181565
N
803 rdev->sb_size = MD_SB_BYTES;
804
1da177e4
LT
805 sb = (mdp_super_t*)page_address(rdev->sb_page);
806
807 memset(sb, 0, sizeof(*sb));
808
809 sb->md_magic = MD_SB_MAGIC;
810 sb->major_version = mddev->major_version;
811 sb->minor_version = mddev->minor_version;
812 sb->patch_version = mddev->patch_version;
813 sb->gvalid_words = 0; /* ignored */
814 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
815 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
816 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
817 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
818
819 sb->ctime = mddev->ctime;
820 sb->level = mddev->level;
821 sb->size = mddev->size;
822 sb->raid_disks = mddev->raid_disks;
823 sb->md_minor = mddev->md_minor;
824 sb->not_persistent = !mddev->persistent;
825 sb->utime = mddev->utime;
826 sb->state = 0;
827 sb->events_hi = (mddev->events>>32);
828 sb->events_lo = (u32)mddev->events;
829
830 if (mddev->in_sync)
831 {
832 sb->recovery_cp = mddev->recovery_cp;
833 sb->cp_events_hi = (mddev->events>>32);
834 sb->cp_events_lo = (u32)mddev->events;
835 if (mddev->recovery_cp == MaxSector)
836 sb->state = (1<< MD_SB_CLEAN);
837 } else
838 sb->recovery_cp = 0;
839
840 sb->layout = mddev->layout;
841 sb->chunk_size = mddev->chunk_size;
842
a654b9d8
N
843 if (mddev->bitmap && mddev->bitmap_file == NULL)
844 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
845
1da177e4
LT
846 sb->disks[0].state = (1<<MD_DISK_REMOVED);
847 ITERATE_RDEV(mddev,rdev2,tmp) {
848 mdp_disk_t *d;
86e6ffdd 849 int desc_nr;
b2d444d7
N
850 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
851 && !test_bit(Faulty, &rdev2->flags))
86e6ffdd 852 desc_nr = rdev2->raid_disk;
1da177e4 853 else
86e6ffdd 854 desc_nr = next_spare++;
19133a42 855 rdev2->desc_nr = desc_nr;
1da177e4
LT
856 d = &sb->disks[rdev2->desc_nr];
857 nr_disks++;
858 d->number = rdev2->desc_nr;
859 d->major = MAJOR(rdev2->bdev->bd_dev);
860 d->minor = MINOR(rdev2->bdev->bd_dev);
b2d444d7
N
861 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
862 && !test_bit(Faulty, &rdev2->flags))
1da177e4
LT
863 d->raid_disk = rdev2->raid_disk;
864 else
865 d->raid_disk = rdev2->desc_nr; /* compatibility */
b2d444d7 866 if (test_bit(Faulty, &rdev2->flags)) {
1da177e4
LT
867 d->state = (1<<MD_DISK_FAULTY);
868 failed++;
b2d444d7 869 } else if (test_bit(In_sync, &rdev2->flags)) {
1da177e4
LT
870 d->state = (1<<MD_DISK_ACTIVE);
871 d->state |= (1<<MD_DISK_SYNC);
872 active++;
873 working++;
874 } else {
875 d->state = 0;
876 spare++;
877 working++;
878 }
8ddf9efe
N
879 if (test_bit(WriteMostly, &rdev2->flags))
880 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1da177e4 881 }
1da177e4
LT
882 /* now set the "removed" and "faulty" bits on any missing devices */
883 for (i=0 ; i < mddev->raid_disks ; i++) {
884 mdp_disk_t *d = &sb->disks[i];
885 if (d->state == 0 && d->number == 0) {
886 d->number = i;
887 d->raid_disk = i;
888 d->state = (1<<MD_DISK_REMOVED);
889 d->state |= (1<<MD_DISK_FAULTY);
890 failed++;
891 }
892 }
893 sb->nr_disks = nr_disks;
894 sb->active_disks = active;
895 sb->working_disks = working;
896 sb->failed_disks = failed;
897 sb->spare_disks = spare;
898
899 sb->this_disk = sb->disks[rdev->desc_nr];
900 sb->sb_csum = calc_sb_csum(sb);
901}
902
903/*
904 * version 1 superblock
905 */
906
907static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
908{
909 unsigned int disk_csum, csum;
910 unsigned long long newcsum;
911 int size = 256 + le32_to_cpu(sb->max_dev)*2;
912 unsigned int *isuper = (unsigned int*)sb;
913 int i;
914
915 disk_csum = sb->sb_csum;
916 sb->sb_csum = 0;
917 newcsum = 0;
918 for (i=0; size>=4; size -= 4 )
919 newcsum += le32_to_cpu(*isuper++);
920
921 if (size == 2)
922 newcsum += le16_to_cpu(*(unsigned short*) isuper);
923
924 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
925 sb->sb_csum = disk_csum;
926 return cpu_to_le32(csum);
927}
928
929static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
930{
931 struct mdp_superblock_1 *sb;
932 int ret;
933 sector_t sb_offset;
934 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
0002b271 935 int bmask;
1da177e4
LT
936
937 /*
938 * Calculate the position of the superblock.
939 * It is always aligned to a 4K boundary and
940 * depeding on minor_version, it can be:
941 * 0: At least 8K, but less than 12K, from end of device
942 * 1: At start of device
943 * 2: 4K from start of device.
944 */
945 switch(minor_version) {
946 case 0:
947 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
948 sb_offset -= 8*2;
39730960 949 sb_offset &= ~(sector_t)(4*2-1);
1da177e4
LT
950 /* convert from sectors to K */
951 sb_offset /= 2;
952 break;
953 case 1:
954 sb_offset = 0;
955 break;
956 case 2:
957 sb_offset = 4;
958 break;
959 default:
960 return -EINVAL;
961 }
962 rdev->sb_offset = sb_offset;
963
0002b271
N
964 /* superblock is rarely larger than 1K, but it can be larger,
965 * and it is safe to read 4k, so we do that
966 */
967 ret = read_disk_sb(rdev, 4096);
1da177e4
LT
968 if (ret) return ret;
969
970
971 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
972
973 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
974 sb->major_version != cpu_to_le32(1) ||
975 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
976 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
71c0805c 977 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1da177e4
LT
978 return -EINVAL;
979
980 if (calc_sb_1_csum(sb) != sb->sb_csum) {
981 printk("md: invalid superblock checksum on %s\n",
982 bdevname(rdev->bdev,b));
983 return -EINVAL;
984 }
985 if (le64_to_cpu(sb->data_size) < 10) {
986 printk("md: data_size too small on %s\n",
987 bdevname(rdev->bdev,b));
988 return -EINVAL;
989 }
990 rdev->preferred_minor = 0xffff;
991 rdev->data_offset = le64_to_cpu(sb->data_offset);
992
0002b271 993 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
720a3dc3 994 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
0002b271
N
995 if (rdev->sb_size & bmask)
996 rdev-> sb_size = (rdev->sb_size | bmask)+1;
997
1da177e4
LT
998 if (refdev == 0)
999 return 1;
1000 else {
1001 __u64 ev1, ev2;
1002 struct mdp_superblock_1 *refsb =
1003 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1004
1005 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1006 sb->level != refsb->level ||
1007 sb->layout != refsb->layout ||
1008 sb->chunksize != refsb->chunksize) {
1009 printk(KERN_WARNING "md: %s has strangely different"
1010 " superblock to %s\n",
1011 bdevname(rdev->bdev,b),
1012 bdevname(refdev->bdev,b2));
1013 return -EINVAL;
1014 }
1015 ev1 = le64_to_cpu(sb->events);
1016 ev2 = le64_to_cpu(refsb->events);
1017
1018 if (ev1 > ev2)
1019 return 1;
1020 }
1021 if (minor_version)
1022 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1023 else
1024 rdev->size = rdev->sb_offset;
1025 if (rdev->size < le64_to_cpu(sb->data_size)/2)
1026 return -EINVAL;
1027 rdev->size = le64_to_cpu(sb->data_size)/2;
1028 if (le32_to_cpu(sb->chunksize))
1029 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1030 return 0;
1031}
1032
1033static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1034{
1035 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1036
41158c7e 1037 rdev->raid_disk = -1;
b2d444d7 1038 rdev->flags = 0;
1da177e4
LT
1039 if (mddev->raid_disks == 0) {
1040 mddev->major_version = 1;
1041 mddev->patch_version = 0;
1042 mddev->persistent = 1;
1043 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1044 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1045 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1046 mddev->level = le32_to_cpu(sb->level);
1047 mddev->layout = le32_to_cpu(sb->layout);
1048 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1049 mddev->size = le64_to_cpu(sb->size)/2;
1050 mddev->events = le64_to_cpu(sb->events);
9223214e 1051 mddev->bitmap_offset = 0;
53e87fbb 1052 mddev->default_bitmap_offset = 1024;
1da177e4
LT
1053
1054 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1055 memcpy(mddev->uuid, sb->set_uuid, 16);
1056
1057 mddev->max_disks = (4096-256)/2;
a654b9d8 1058
71c0805c 1059 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
a654b9d8 1060 mddev->bitmap_file == NULL ) {
6cce3b23
N
1061 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1062 && mddev->level != 10) {
1063 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
a654b9d8
N
1064 return -EINVAL;
1065 }
1066 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1067 }
41158c7e
N
1068 } else if (mddev->pers == NULL) {
1069 /* Insist of good event counter while assembling */
1070 __u64 ev1 = le64_to_cpu(sb->events);
1da177e4
LT
1071 ++ev1;
1072 if (ev1 < mddev->events)
1073 return -EINVAL;
41158c7e
N
1074 } else if (mddev->bitmap) {
1075 /* If adding to array with a bitmap, then we can accept an
1076 * older device, but not too old.
1077 */
1078 __u64 ev1 = le64_to_cpu(sb->events);
1079 if (ev1 < mddev->bitmap->events_cleared)
1080 return 0;
1081 } else /* just a hot-add of a new device, leave raid_disk at -1 */
1082 return 0;
1da177e4
LT
1083
1084 if (mddev->level != LEVEL_MULTIPATH) {
1085 int role;
1086 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1087 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1088 switch(role) {
1089 case 0xffff: /* spare */
1da177e4
LT
1090 break;
1091 case 0xfffe: /* faulty */
b2d444d7 1092 set_bit(Faulty, &rdev->flags);
1da177e4
LT
1093 break;
1094 default:
b2d444d7 1095 set_bit(In_sync, &rdev->flags);
1da177e4
LT
1096 rdev->raid_disk = role;
1097 break;
1098 }
8ddf9efe
N
1099 if (sb->devflags & WriteMostly1)
1100 set_bit(WriteMostly, &rdev->flags);
41158c7e 1101 } else /* MULTIPATH are always insync */
b2d444d7 1102 set_bit(In_sync, &rdev->flags);
41158c7e 1103
1da177e4
LT
1104 return 0;
1105}
1106
1107static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1108{
1109 struct mdp_superblock_1 *sb;
1110 struct list_head *tmp;
1111 mdk_rdev_t *rdev2;
1112 int max_dev, i;
1113 /* make rdev->sb match mddev and rdev data. */
1114
1115 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1116
1117 sb->feature_map = 0;
1118 sb->pad0 = 0;
1119 memset(sb->pad1, 0, sizeof(sb->pad1));
1120 memset(sb->pad2, 0, sizeof(sb->pad2));
1121 memset(sb->pad3, 0, sizeof(sb->pad3));
1122
1123 sb->utime = cpu_to_le64((__u64)mddev->utime);
1124 sb->events = cpu_to_le64(mddev->events);
1125 if (mddev->in_sync)
1126 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1127 else
1128 sb->resync_offset = cpu_to_le64(0);
1129
a654b9d8
N
1130 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1131 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
71c0805c 1132 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
a654b9d8
N
1133 }
1134
1da177e4
LT
1135 max_dev = 0;
1136 ITERATE_RDEV(mddev,rdev2,tmp)
1137 if (rdev2->desc_nr+1 > max_dev)
1138 max_dev = rdev2->desc_nr+1;
1139
1140 sb->max_dev = cpu_to_le32(max_dev);
1141 for (i=0; i<max_dev;i++)
1142 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1143
1144 ITERATE_RDEV(mddev,rdev2,tmp) {
1145 i = rdev2->desc_nr;
b2d444d7 1146 if (test_bit(Faulty, &rdev2->flags))
1da177e4 1147 sb->dev_roles[i] = cpu_to_le16(0xfffe);
b2d444d7 1148 else if (test_bit(In_sync, &rdev2->flags))
1da177e4
LT
1149 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1150 else
1151 sb->dev_roles[i] = cpu_to_le16(0xffff);
1152 }
1153
1154 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
1155 sb->sb_csum = calc_sb_1_csum(sb);
1156}
1157
1158
75c96f85 1159static struct super_type super_types[] = {
1da177e4
LT
1160 [0] = {
1161 .name = "0.90.0",
1162 .owner = THIS_MODULE,
1163 .load_super = super_90_load,
1164 .validate_super = super_90_validate,
1165 .sync_super = super_90_sync,
1166 },
1167 [1] = {
1168 .name = "md-1",
1169 .owner = THIS_MODULE,
1170 .load_super = super_1_load,
1171 .validate_super = super_1_validate,
1172 .sync_super = super_1_sync,
1173 },
1174};
1175
1176static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1177{
1178 struct list_head *tmp;
1179 mdk_rdev_t *rdev;
1180
1181 ITERATE_RDEV(mddev,rdev,tmp)
1182 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1183 return rdev;
1184
1185 return NULL;
1186}
1187
1188static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1189{
1190 struct list_head *tmp;
1191 mdk_rdev_t *rdev;
1192
1193 ITERATE_RDEV(mddev1,rdev,tmp)
1194 if (match_dev_unit(mddev2, rdev))
1195 return 1;
1196
1197 return 0;
1198}
1199
1200static LIST_HEAD(pending_raid_disks);
1201
1202static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1203{
1204 mdk_rdev_t *same_pdev;
1205 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
f637b9f9 1206 struct kobject *ko;
1da177e4
LT
1207
1208 if (rdev->mddev) {
1209 MD_BUG();
1210 return -EINVAL;
1211 }
1212 same_pdev = match_dev_unit(mddev, rdev);
1213 if (same_pdev)
1214 printk(KERN_WARNING
1215 "%s: WARNING: %s appears to be on the same physical"
1216 " disk as %s. True\n protection against single-disk"
1217 " failure might be compromised.\n",
1218 mdname(mddev), bdevname(rdev->bdev,b),
1219 bdevname(same_pdev->bdev,b2));
1220
1221 /* Verify rdev->desc_nr is unique.
1222 * If it is -1, assign a free number, else
1223 * check number is not in use
1224 */
1225 if (rdev->desc_nr < 0) {
1226 int choice = 0;
1227 if (mddev->pers) choice = mddev->raid_disks;
1228 while (find_rdev_nr(mddev, choice))
1229 choice++;
1230 rdev->desc_nr = choice;
1231 } else {
1232 if (find_rdev_nr(mddev, rdev->desc_nr))
1233 return -EBUSY;
1234 }
19133a42
N
1235 bdevname(rdev->bdev,b);
1236 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1237 return -ENOMEM;
1da177e4
LT
1238
1239 list_add(&rdev->same_set, &mddev->disks);
1240 rdev->mddev = mddev;
19133a42 1241 printk(KERN_INFO "md: bind<%s>\n", b);
86e6ffdd 1242
9c791977 1243 rdev->kobj.parent = &mddev->kobj;
86e6ffdd
N
1244 kobject_add(&rdev->kobj);
1245
f637b9f9
N
1246 if (rdev->bdev->bd_part)
1247 ko = &rdev->bdev->bd_part->kobj;
1248 else
1249 ko = &rdev->bdev->bd_disk->kobj;
1250 sysfs_create_link(&rdev->kobj, ko, "block");
1da177e4
LT
1251 return 0;
1252}
1253
1254static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1255{
1256 char b[BDEVNAME_SIZE];
1257 if (!rdev->mddev) {
1258 MD_BUG();
1259 return;
1260 }
1261 list_del_init(&rdev->same_set);
1262 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1263 rdev->mddev = NULL;
86e6ffdd
N
1264 sysfs_remove_link(&rdev->kobj, "block");
1265 kobject_del(&rdev->kobj);
1da177e4
LT
1266}
1267
1268/*
1269 * prevent the device from being mounted, repartitioned or
1270 * otherwise reused by a RAID array (or any other kernel
1271 * subsystem), by bd_claiming the device.
1272 */
1273static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1274{
1275 int err = 0;
1276 struct block_device *bdev;
1277 char b[BDEVNAME_SIZE];
1278
1279 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1280 if (IS_ERR(bdev)) {
1281 printk(KERN_ERR "md: could not open %s.\n",
1282 __bdevname(dev, b));
1283 return PTR_ERR(bdev);
1284 }
1285 err = bd_claim(bdev, rdev);
1286 if (err) {
1287 printk(KERN_ERR "md: could not bd_claim %s.\n",
1288 bdevname(bdev, b));
1289 blkdev_put(bdev);
1290 return err;
1291 }
1292 rdev->bdev = bdev;
1293 return err;
1294}
1295
1296static void unlock_rdev(mdk_rdev_t *rdev)
1297{
1298 struct block_device *bdev = rdev->bdev;
1299 rdev->bdev = NULL;
1300 if (!bdev)
1301 MD_BUG();
1302 bd_release(bdev);
1303 blkdev_put(bdev);
1304}
1305
1306void md_autodetect_dev(dev_t dev);
1307
1308static void export_rdev(mdk_rdev_t * rdev)
1309{
1310 char b[BDEVNAME_SIZE];
1311 printk(KERN_INFO "md: export_rdev(%s)\n",
1312 bdevname(rdev->bdev,b));
1313 if (rdev->mddev)
1314 MD_BUG();
1315 free_disk_sb(rdev);
1316 list_del_init(&rdev->same_set);
1317#ifndef MODULE
1318 md_autodetect_dev(rdev->bdev->bd_dev);
1319#endif
1320 unlock_rdev(rdev);
86e6ffdd 1321 kobject_put(&rdev->kobj);
1da177e4
LT
1322}
1323
1324static void kick_rdev_from_array(mdk_rdev_t * rdev)
1325{
1326 unbind_rdev_from_array(rdev);
1327 export_rdev(rdev);
1328}
1329
1330static void export_array(mddev_t *mddev)
1331{
1332 struct list_head *tmp;
1333 mdk_rdev_t *rdev;
1334
1335 ITERATE_RDEV(mddev,rdev,tmp) {
1336 if (!rdev->mddev) {
1337 MD_BUG();
1338 continue;
1339 }
1340 kick_rdev_from_array(rdev);
1341 }
1342 if (!list_empty(&mddev->disks))
1343 MD_BUG();
1344 mddev->raid_disks = 0;
1345 mddev->major_version = 0;
1346}
1347
1348static void print_desc(mdp_disk_t *desc)
1349{
1350 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1351 desc->major,desc->minor,desc->raid_disk,desc->state);
1352}
1353
1354static void print_sb(mdp_super_t *sb)
1355{
1356 int i;
1357
1358 printk(KERN_INFO
1359 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1360 sb->major_version, sb->minor_version, sb->patch_version,
1361 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1362 sb->ctime);
1363 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1364 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1365 sb->md_minor, sb->layout, sb->chunk_size);
1366 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1367 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1368 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1369 sb->failed_disks, sb->spare_disks,
1370 sb->sb_csum, (unsigned long)sb->events_lo);
1371
1372 printk(KERN_INFO);
1373 for (i = 0; i < MD_SB_DISKS; i++) {
1374 mdp_disk_t *desc;
1375
1376 desc = sb->disks + i;
1377 if (desc->number || desc->major || desc->minor ||
1378 desc->raid_disk || (desc->state && (desc->state != 4))) {
1379 printk(" D %2d: ", i);
1380 print_desc(desc);
1381 }
1382 }
1383 printk(KERN_INFO "md: THIS: ");
1384 print_desc(&sb->this_disk);
1385
1386}
1387
1388static void print_rdev(mdk_rdev_t *rdev)
1389{
1390 char b[BDEVNAME_SIZE];
1391 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1392 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
b2d444d7
N
1393 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1394 rdev->desc_nr);
1da177e4
LT
1395 if (rdev->sb_loaded) {
1396 printk(KERN_INFO "md: rdev superblock:\n");
1397 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1398 } else
1399 printk(KERN_INFO "md: no rdev superblock!\n");
1400}
1401
1402void md_print_devices(void)
1403{
1404 struct list_head *tmp, *tmp2;
1405 mdk_rdev_t *rdev;
1406 mddev_t *mddev;
1407 char b[BDEVNAME_SIZE];
1408
1409 printk("\n");
1410 printk("md: **********************************\n");
1411 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1412 printk("md: **********************************\n");
1413 ITERATE_MDDEV(mddev,tmp) {
1da177e4 1414
32a7627c
N
1415 if (mddev->bitmap)
1416 bitmap_print_sb(mddev->bitmap);
1417 else
1418 printk("%s: ", mdname(mddev));
1da177e4
LT
1419 ITERATE_RDEV(mddev,rdev,tmp2)
1420 printk("<%s>", bdevname(rdev->bdev,b));
1421 printk("\n");
1422
1423 ITERATE_RDEV(mddev,rdev,tmp2)
1424 print_rdev(rdev);
1425 }
1426 printk("md: **********************************\n");
1427 printk("\n");
1428}
1429
1430
1da177e4
LT
1431static void sync_sbs(mddev_t * mddev)
1432{
1433 mdk_rdev_t *rdev;
1434 struct list_head *tmp;
1435
1436 ITERATE_RDEV(mddev,rdev,tmp) {
1437 super_types[mddev->major_version].
1438 sync_super(mddev, rdev);
1439 rdev->sb_loaded = 1;
1440 }
1441}
1442
1443static void md_update_sb(mddev_t * mddev)
1444{
7bfa19f2 1445 int err;
1da177e4
LT
1446 struct list_head *tmp;
1447 mdk_rdev_t *rdev;
06d91a5f 1448 int sync_req;
1da177e4 1449
1da177e4 1450repeat:
a9701a30 1451 spin_lock_irq(&mddev->write_lock);
06d91a5f 1452 sync_req = mddev->in_sync;
1da177e4
LT
1453 mddev->utime = get_seconds();
1454 mddev->events ++;
1455
1456 if (!mddev->events) {
1457 /*
1458 * oops, this 64-bit counter should never wrap.
1459 * Either we are in around ~1 trillion A.C., assuming
1460 * 1 reboot per second, or we have a bug:
1461 */
1462 MD_BUG();
1463 mddev->events --;
1464 }
7bfa19f2 1465 mddev->sb_dirty = 2;
1da177e4
LT
1466 sync_sbs(mddev);
1467
1468 /*
1469 * do not write anything to disk if using
1470 * nonpersistent superblocks
1471 */
06d91a5f
N
1472 if (!mddev->persistent) {
1473 mddev->sb_dirty = 0;
a9701a30 1474 spin_unlock_irq(&mddev->write_lock);
3d310eb7 1475 wake_up(&mddev->sb_wait);
1da177e4 1476 return;
06d91a5f 1477 }
a9701a30 1478 spin_unlock_irq(&mddev->write_lock);
1da177e4
LT
1479
1480 dprintk(KERN_INFO
1481 "md: updating %s RAID superblock on device (in sync %d)\n",
1482 mdname(mddev),mddev->in_sync);
1483
32a7627c 1484 err = bitmap_update_sb(mddev->bitmap);
1da177e4
LT
1485 ITERATE_RDEV(mddev,rdev,tmp) {
1486 char b[BDEVNAME_SIZE];
1487 dprintk(KERN_INFO "md: ");
b2d444d7 1488 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
1489 dprintk("(skipping faulty ");
1490
1491 dprintk("%s ", bdevname(rdev->bdev,b));
b2d444d7 1492 if (!test_bit(Faulty, &rdev->flags)) {
7bfa19f2 1493 md_super_write(mddev,rdev,
0002b271 1494 rdev->sb_offset<<1, rdev->sb_size,
7bfa19f2
N
1495 rdev->sb_page);
1496 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1497 bdevname(rdev->bdev,b),
1498 (unsigned long long)rdev->sb_offset);
1499
1da177e4
LT
1500 } else
1501 dprintk(")\n");
7bfa19f2 1502 if (mddev->level == LEVEL_MULTIPATH)
1da177e4
LT
1503 /* only need to write one superblock... */
1504 break;
1505 }
a9701a30 1506 md_super_wait(mddev);
7bfa19f2
N
1507 /* if there was a failure, sb_dirty was set to 1, and we re-write super */
1508
a9701a30 1509 spin_lock_irq(&mddev->write_lock);
7bfa19f2 1510 if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
06d91a5f 1511 /* have to write it out again */
a9701a30 1512 spin_unlock_irq(&mddev->write_lock);
06d91a5f
N
1513 goto repeat;
1514 }
1515 mddev->sb_dirty = 0;
a9701a30 1516 spin_unlock_irq(&mddev->write_lock);
3d310eb7 1517 wake_up(&mddev->sb_wait);
06d91a5f 1518
1da177e4
LT
1519}
1520
86e6ffdd
N
1521struct rdev_sysfs_entry {
1522 struct attribute attr;
1523 ssize_t (*show)(mdk_rdev_t *, char *);
1524 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1525};
1526
1527static ssize_t
96de1e66 1528state_show(mdk_rdev_t *rdev, char *page)
86e6ffdd
N
1529{
1530 char *sep = "";
1531 int len=0;
1532
b2d444d7 1533 if (test_bit(Faulty, &rdev->flags)) {
86e6ffdd
N
1534 len+= sprintf(page+len, "%sfaulty",sep);
1535 sep = ",";
1536 }
b2d444d7 1537 if (test_bit(In_sync, &rdev->flags)) {
86e6ffdd
N
1538 len += sprintf(page+len, "%sin_sync",sep);
1539 sep = ",";
1540 }
b2d444d7
N
1541 if (!test_bit(Faulty, &rdev->flags) &&
1542 !test_bit(In_sync, &rdev->flags)) {
86e6ffdd
N
1543 len += sprintf(page+len, "%sspare", sep);
1544 sep = ",";
1545 }
1546 return len+sprintf(page+len, "\n");
1547}
1548
96de1e66
N
1549static struct rdev_sysfs_entry
1550rdev_state = __ATTR_RO(state);
86e6ffdd
N
1551
1552static ssize_t
96de1e66 1553super_show(mdk_rdev_t *rdev, char *page)
86e6ffdd
N
1554{
1555 if (rdev->sb_loaded && rdev->sb_size) {
1556 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1557 return rdev->sb_size;
1558 } else
1559 return 0;
1560}
96de1e66
N
1561static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1562
86e6ffdd
N
1563static struct attribute *rdev_default_attrs[] = {
1564 &rdev_state.attr,
1565 &rdev_super.attr,
1566 NULL,
1567};
1568static ssize_t
1569rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1570{
1571 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1572 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1573
1574 if (!entry->show)
1575 return -EIO;
1576 return entry->show(rdev, page);
1577}
1578
1579static ssize_t
1580rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1581 const char *page, size_t length)
1582{
1583 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1584 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1585
1586 if (!entry->store)
1587 return -EIO;
1588 return entry->store(rdev, page, length);
1589}
1590
1591static void rdev_free(struct kobject *ko)
1592{
1593 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1594 kfree(rdev);
1595}
1596static struct sysfs_ops rdev_sysfs_ops = {
1597 .show = rdev_attr_show,
1598 .store = rdev_attr_store,
1599};
1600static struct kobj_type rdev_ktype = {
1601 .release = rdev_free,
1602 .sysfs_ops = &rdev_sysfs_ops,
1603 .default_attrs = rdev_default_attrs,
1604};
1605
1da177e4
LT
1606/*
1607 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1608 *
1609 * mark the device faulty if:
1610 *
1611 * - the device is nonexistent (zero size)
1612 * - the device has no valid superblock
1613 *
1614 * a faulty rdev _never_ has rdev->sb set.
1615 */
1616static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1617{
1618 char b[BDEVNAME_SIZE];
1619 int err;
1620 mdk_rdev_t *rdev;
1621 sector_t size;
1622
1623 rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
1624 if (!rdev) {
1625 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1626 return ERR_PTR(-ENOMEM);
1627 }
1628 memset(rdev, 0, sizeof(*rdev));
1629
1630 if ((err = alloc_disk_sb(rdev)))
1631 goto abort_free;
1632
1633 err = lock_rdev(rdev, newdev);
1634 if (err)
1635 goto abort_free;
1636
86e6ffdd
N
1637 rdev->kobj.parent = NULL;
1638 rdev->kobj.ktype = &rdev_ktype;
1639 kobject_init(&rdev->kobj);
1640
1da177e4 1641 rdev->desc_nr = -1;
b2d444d7 1642 rdev->flags = 0;
1da177e4
LT
1643 rdev->data_offset = 0;
1644 atomic_set(&rdev->nr_pending, 0);
ba22dcbf 1645 atomic_set(&rdev->read_errors, 0);
1da177e4
LT
1646
1647 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1648 if (!size) {
1649 printk(KERN_WARNING
1650 "md: %s has zero or unknown size, marking faulty!\n",
1651 bdevname(rdev->bdev,b));
1652 err = -EINVAL;
1653 goto abort_free;
1654 }
1655
1656 if (super_format >= 0) {
1657 err = super_types[super_format].
1658 load_super(rdev, NULL, super_minor);
1659 if (err == -EINVAL) {
1660 printk(KERN_WARNING
1661 "md: %s has invalid sb, not importing!\n",
1662 bdevname(rdev->bdev,b));
1663 goto abort_free;
1664 }
1665 if (err < 0) {
1666 printk(KERN_WARNING
1667 "md: could not read %s's sb, not importing!\n",
1668 bdevname(rdev->bdev,b));
1669 goto abort_free;
1670 }
1671 }
1672 INIT_LIST_HEAD(&rdev->same_set);
1673
1674 return rdev;
1675
1676abort_free:
1677 if (rdev->sb_page) {
1678 if (rdev->bdev)
1679 unlock_rdev(rdev);
1680 free_disk_sb(rdev);
1681 }
1682 kfree(rdev);
1683 return ERR_PTR(err);
1684}
1685
1686/*
1687 * Check a full RAID array for plausibility
1688 */
1689
1690
a757e64c 1691static void analyze_sbs(mddev_t * mddev)
1da177e4
LT
1692{
1693 int i;
1694 struct list_head *tmp;
1695 mdk_rdev_t *rdev, *freshest;
1696 char b[BDEVNAME_SIZE];
1697
1698 freshest = NULL;
1699 ITERATE_RDEV(mddev,rdev,tmp)
1700 switch (super_types[mddev->major_version].
1701 load_super(rdev, freshest, mddev->minor_version)) {
1702 case 1:
1703 freshest = rdev;
1704 break;
1705 case 0:
1706 break;
1707 default:
1708 printk( KERN_ERR \
1709 "md: fatal superblock inconsistency in %s"
1710 " -- removing from array\n",
1711 bdevname(rdev->bdev,b));
1712 kick_rdev_from_array(rdev);
1713 }
1714
1715
1716 super_types[mddev->major_version].
1717 validate_super(mddev, freshest);
1718
1719 i = 0;
1720 ITERATE_RDEV(mddev,rdev,tmp) {
1721 if (rdev != freshest)
1722 if (super_types[mddev->major_version].
1723 validate_super(mddev, rdev)) {
1724 printk(KERN_WARNING "md: kicking non-fresh %s"
1725 " from array!\n",
1726 bdevname(rdev->bdev,b));
1727 kick_rdev_from_array(rdev);
1728 continue;
1729 }
1730 if (mddev->level == LEVEL_MULTIPATH) {
1731 rdev->desc_nr = i++;
1732 rdev->raid_disk = rdev->desc_nr;
b2d444d7 1733 set_bit(In_sync, &rdev->flags);
1da177e4
LT
1734 }
1735 }
1736
1737
1738
1739 if (mddev->recovery_cp != MaxSector &&
1740 mddev->level >= 1)
1741 printk(KERN_ERR "md: %s: raid array is not clean"
1742 " -- starting background reconstruction\n",
1743 mdname(mddev));
1744
1da177e4
LT
1745}
1746
eae1701f 1747static ssize_t
96de1e66 1748level_show(mddev_t *mddev, char *page)
eae1701f
N
1749{
1750 mdk_personality_t *p = mddev->pers;
bb636547 1751 if (p == NULL && mddev->raid_disks == 0)
eae1701f
N
1752 return 0;
1753 if (mddev->level >= 0)
bcb97940 1754 return sprintf(page, "raid%d\n", mddev->level);
eae1701f
N
1755 else
1756 return sprintf(page, "%s\n", p->name);
1757}
1758
96de1e66 1759static struct md_sysfs_entry md_level = __ATTR_RO(level);
eae1701f
N
1760
1761static ssize_t
96de1e66 1762raid_disks_show(mddev_t *mddev, char *page)
eae1701f 1763{
bb636547
N
1764 if (mddev->raid_disks == 0)
1765 return 0;
eae1701f
N
1766 return sprintf(page, "%d\n", mddev->raid_disks);
1767}
1768
96de1e66 1769static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks);
eae1701f 1770
24dd469d 1771static ssize_t
7eec314d 1772action_show(mddev_t *mddev, char *page)
24dd469d 1773{
7eec314d 1774 char *type = "idle";
31399d9e
N
1775 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1776 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
1777 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
24dd469d
N
1778 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1779 type = "resync";
1780 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1781 type = "check";
1782 else
1783 type = "repair";
1784 } else
1785 type = "recover";
1786 }
1787 return sprintf(page, "%s\n", type);
1788}
1789
1790static ssize_t
7eec314d 1791action_store(mddev_t *mddev, const char *page, size_t len)
24dd469d 1792{
7eec314d
N
1793 if (!mddev->pers || !mddev->pers->sync_request)
1794 return -EINVAL;
1795
1796 if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) {
1797 if (mddev->sync_thread) {
1798 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1799 md_unregister_thread(mddev->sync_thread);
1800 mddev->sync_thread = NULL;
1801 mddev->recovery = 0;
1802 }
1803 return len;
1804 }
31399d9e
N
1805
1806 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1807 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
24dd469d 1808 return -EBUSY;
7eec314d
N
1809 if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 ||
1810 strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0)
1811 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1812 else {
1813 if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0)
1814 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1815 else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0)
1816 return -EINVAL;
1817 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1818 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1819 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1820 }
24dd469d
N
1821 md_wakeup_thread(mddev->thread);
1822 return len;
1823}
1824
9d88883e 1825static ssize_t
96de1e66 1826mismatch_cnt_show(mddev_t *mddev, char *page)
9d88883e
N
1827{
1828 return sprintf(page, "%llu\n",
1829 (unsigned long long) mddev->resync_mismatches);
1830}
1831
96de1e66 1832static struct md_sysfs_entry
7eec314d 1833md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
24dd469d 1834
96de1e66
N
1835
1836static struct md_sysfs_entry
1837md_mismatches = __ATTR_RO(mismatch_cnt);
9d88883e 1838
eae1701f
N
1839static struct attribute *md_default_attrs[] = {
1840 &md_level.attr,
1841 &md_raid_disks.attr,
411036fa
N
1842 NULL,
1843};
1844
1845static struct attribute *md_redundancy_attrs[] = {
24dd469d 1846 &md_scan_mode.attr,
9d88883e 1847 &md_mismatches.attr,
eae1701f
N
1848 NULL,
1849};
411036fa
N
1850static struct attribute_group md_redundancy_group = {
1851 .name = NULL,
1852 .attrs = md_redundancy_attrs,
1853};
1854
eae1701f
N
1855
1856static ssize_t
1857md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1858{
1859 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
1860 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
96de1e66 1861 ssize_t rv;
eae1701f
N
1862
1863 if (!entry->show)
1864 return -EIO;
96de1e66
N
1865 mddev_lock(mddev);
1866 rv = entry->show(mddev, page);
1867 mddev_unlock(mddev);
1868 return rv;
eae1701f
N
1869}
1870
1871static ssize_t
1872md_attr_store(struct kobject *kobj, struct attribute *attr,
1873 const char *page, size_t length)
1874{
1875 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
1876 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
96de1e66 1877 ssize_t rv;
eae1701f
N
1878
1879 if (!entry->store)
1880 return -EIO;
96de1e66
N
1881 mddev_lock(mddev);
1882 rv = entry->store(mddev, page, length);
1883 mddev_unlock(mddev);
1884 return rv;
eae1701f
N
1885}
1886
1887static void md_free(struct kobject *ko)
1888{
1889 mddev_t *mddev = container_of(ko, mddev_t, kobj);
1890 kfree(mddev);
1891}
1892
1893static struct sysfs_ops md_sysfs_ops = {
1894 .show = md_attr_show,
1895 .store = md_attr_store,
1896};
1897static struct kobj_type md_ktype = {
1898 .release = md_free,
1899 .sysfs_ops = &md_sysfs_ops,
1900 .default_attrs = md_default_attrs,
1901};
1902
1da177e4
LT
1903int mdp_major = 0;
1904
1905static struct kobject *md_probe(dev_t dev, int *part, void *data)
1906{
1907 static DECLARE_MUTEX(disks_sem);
1908 mddev_t *mddev = mddev_find(dev);
1909 struct gendisk *disk;
1910 int partitioned = (MAJOR(dev) != MD_MAJOR);
1911 int shift = partitioned ? MdpMinorShift : 0;
1912 int unit = MINOR(dev) >> shift;
1913
1914 if (!mddev)
1915 return NULL;
1916
1917 down(&disks_sem);
1918 if (mddev->gendisk) {
1919 up(&disks_sem);
1920 mddev_put(mddev);
1921 return NULL;
1922 }
1923 disk = alloc_disk(1 << shift);
1924 if (!disk) {
1925 up(&disks_sem);
1926 mddev_put(mddev);
1927 return NULL;
1928 }
1929 disk->major = MAJOR(dev);
1930 disk->first_minor = unit << shift;
1931 if (partitioned) {
1932 sprintf(disk->disk_name, "md_d%d", unit);
1933 sprintf(disk->devfs_name, "md/d%d", unit);
1934 } else {
1935 sprintf(disk->disk_name, "md%d", unit);
1936 sprintf(disk->devfs_name, "md/%d", unit);
1937 }
1938 disk->fops = &md_fops;
1939 disk->private_data = mddev;
1940 disk->queue = mddev->queue;
1941 add_disk(disk);
1942 mddev->gendisk = disk;
1943 up(&disks_sem);
9c791977 1944 mddev->kobj.parent = &disk->kobj;
eae1701f
N
1945 mddev->kobj.k_name = NULL;
1946 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
1947 mddev->kobj.ktype = &md_ktype;
1948 kobject_register(&mddev->kobj);
1da177e4
LT
1949 return NULL;
1950}
1951
1952void md_wakeup_thread(mdk_thread_t *thread);
1953
1954static void md_safemode_timeout(unsigned long data)
1955{
1956 mddev_t *mddev = (mddev_t *) data;
1957
1958 mddev->safemode = 1;
1959 md_wakeup_thread(mddev->thread);
1960}
1961
6ff8d8ec 1962static int start_dirty_degraded;
1da177e4
LT
1963
1964static int do_md_run(mddev_t * mddev)
1965{
1966 int pnum, err;
1967 int chunk_size;
1968 struct list_head *tmp;
1969 mdk_rdev_t *rdev;
1970 struct gendisk *disk;
1971 char b[BDEVNAME_SIZE];
1972
a757e64c
N
1973 if (list_empty(&mddev->disks))
1974 /* cannot run an array with no devices.. */
1da177e4 1975 return -EINVAL;
1da177e4
LT
1976
1977 if (mddev->pers)
1978 return -EBUSY;
1979
1980 /*
1981 * Analyze all RAID superblock(s)
1982 */
a757e64c
N
1983 if (!mddev->raid_disks)
1984 analyze_sbs(mddev);
1da177e4
LT
1985
1986 chunk_size = mddev->chunk_size;
1987 pnum = level_to_pers(mddev->level);
1988
1989 if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1990 if (!chunk_size) {
1991 /*
1992 * 'default chunksize' in the old md code used to
1993 * be PAGE_SIZE, baaad.
1994 * we abort here to be on the safe side. We don't
1995 * want to continue the bad practice.
1996 */
1997 printk(KERN_ERR
1998 "no chunksize specified, see 'man raidtab'\n");
1999 return -EINVAL;
2000 }
2001 if (chunk_size > MAX_CHUNK_SIZE) {
2002 printk(KERN_ERR "too big chunk_size: %d > %d\n",
2003 chunk_size, MAX_CHUNK_SIZE);
2004 return -EINVAL;
2005 }
2006 /*
2007 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
2008 */
2009 if ( (1 << ffz(~chunk_size)) != chunk_size) {
a757e64c 2010 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
1da177e4
LT
2011 return -EINVAL;
2012 }
2013 if (chunk_size < PAGE_SIZE) {
2014 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
2015 chunk_size, PAGE_SIZE);
2016 return -EINVAL;
2017 }
2018
2019 /* devices must have minimum size of one chunk */
2020 ITERATE_RDEV(mddev,rdev,tmp) {
b2d444d7 2021 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
2022 continue;
2023 if (rdev->size < chunk_size / 1024) {
2024 printk(KERN_WARNING
2025 "md: Dev %s smaller than chunk_size:"
2026 " %lluk < %dk\n",
2027 bdevname(rdev->bdev,b),
2028 (unsigned long long)rdev->size,
2029 chunk_size / 1024);
2030 return -EINVAL;
2031 }
2032 }
2033 }
2034
1da177e4
LT
2035#ifdef CONFIG_KMOD
2036 if (!pers[pnum])
2037 {
2038 request_module("md-personality-%d", pnum);
2039 }
2040#endif
2041
2042 /*
2043 * Drop all container device buffers, from now on
2044 * the only valid external interface is through the md
2045 * device.
2046 * Also find largest hardsector size
2047 */
2048 ITERATE_RDEV(mddev,rdev,tmp) {
b2d444d7 2049 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
2050 continue;
2051 sync_blockdev(rdev->bdev);
2052 invalidate_bdev(rdev->bdev, 0);
2053 }
2054
2055 md_probe(mddev->unit, NULL, NULL);
2056 disk = mddev->gendisk;
2057 if (!disk)
2058 return -ENOMEM;
2059
2060 spin_lock(&pers_lock);
2061 if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
2062 spin_unlock(&pers_lock);
2063 printk(KERN_WARNING "md: personality %d is not loaded!\n",
2064 pnum);
2065 return -EINVAL;
2066 }
2067
2068 mddev->pers = pers[pnum];
2069 spin_unlock(&pers_lock);
2070
657390d2 2071 mddev->recovery = 0;
1da177e4 2072 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
a9701a30 2073 mddev->barriers_work = 1;
6ff8d8ec 2074 mddev->ok_start_degraded = start_dirty_degraded;
1da177e4 2075
f91de92e
N
2076 if (start_readonly)
2077 mddev->ro = 2; /* read-only, but switch on first write */
2078
b15c2e57
N
2079 err = mddev->pers->run(mddev);
2080 if (!err && mddev->pers->sync_request) {
2081 err = bitmap_create(mddev);
2082 if (err) {
2083 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
2084 mdname(mddev), err);
2085 mddev->pers->stop(mddev);
2086 }
2087 }
1da177e4
LT
2088 if (err) {
2089 printk(KERN_ERR "md: pers->run() failed ...\n");
2090 module_put(mddev->pers->owner);
2091 mddev->pers = NULL;
32a7627c
N
2092 bitmap_destroy(mddev);
2093 return err;
1da177e4 2094 }
411036fa
N
2095 if (mddev->pers->sync_request)
2096 sysfs_create_group(&mddev->kobj, &md_redundancy_group);
fd9d49ca
N
2097 else if (mddev->ro == 2) /* auto-readonly not meaningful */
2098 mddev->ro = 0;
2099
1da177e4
LT
2100 atomic_set(&mddev->writes_pending,0);
2101 mddev->safemode = 0;
2102 mddev->safemode_timer.function = md_safemode_timeout;
2103 mddev->safemode_timer.data = (unsigned long) mddev;
2104 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
2105 mddev->in_sync = 1;
86e6ffdd
N
2106
2107 ITERATE_RDEV(mddev,rdev,tmp)
2108 if (rdev->raid_disk >= 0) {
2109 char nm[20];
2110 sprintf(nm, "rd%d", rdev->raid_disk);
2111 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
2112 }
1da177e4
LT
2113
2114 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
005eca5e 2115 md_wakeup_thread(mddev->thread);
1da177e4
LT
2116
2117 if (mddev->sb_dirty)
2118 md_update_sb(mddev);
2119
2120 set_capacity(disk, mddev->array_size<<1);
2121
2122 /* If we call blk_queue_make_request here, it will
2123 * re-initialise max_sectors etc which may have been
2124 * refined inside -> run. So just set the bits we need to set.
2125 * Most initialisation happended when we called
2126 * blk_queue_make_request(..., md_fail_request)
2127 * earlier.
2128 */
2129 mddev->queue->queuedata = mddev;
2130 mddev->queue->make_request_fn = mddev->pers->make_request;
2131
2132 mddev->changed = 1;
d7603b7e 2133 md_new_event(mddev);
1da177e4
LT
2134 return 0;
2135}
2136
2137static int restart_array(mddev_t *mddev)
2138{
2139 struct gendisk *disk = mddev->gendisk;
2140 int err;
2141
2142 /*
2143 * Complain if it has no devices
2144 */
2145 err = -ENXIO;
2146 if (list_empty(&mddev->disks))
2147 goto out;
2148
2149 if (mddev->pers) {
2150 err = -EBUSY;
2151 if (!mddev->ro)
2152 goto out;
2153
2154 mddev->safemode = 0;
2155 mddev->ro = 0;
2156 set_disk_ro(disk, 0);
2157
2158 printk(KERN_INFO "md: %s switched to read-write mode.\n",
2159 mdname(mddev));
2160 /*
2161 * Kick recovery or resync if necessary
2162 */
2163 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2164 md_wakeup_thread(mddev->thread);
2165 err = 0;
2166 } else {
2167 printk(KERN_ERR "md: %s has no personality assigned.\n",
2168 mdname(mddev));
2169 err = -EINVAL;
2170 }
2171
2172out:
2173 return err;
2174}
2175
2176static int do_md_stop(mddev_t * mddev, int ro)
2177{
2178 int err = 0;
2179 struct gendisk *disk = mddev->gendisk;
2180
2181 if (mddev->pers) {
2182 if (atomic_read(&mddev->active)>2) {
2183 printk("md: %s still in use.\n",mdname(mddev));
2184 return -EBUSY;
2185 }
2186
2187 if (mddev->sync_thread) {
2188 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2189 md_unregister_thread(mddev->sync_thread);
2190 mddev->sync_thread = NULL;
2191 }
2192
2193 del_timer_sync(&mddev->safemode_timer);
2194
2195 invalidate_partition(disk, 0);
2196
2197 if (ro) {
2198 err = -ENXIO;
f91de92e 2199 if (mddev->ro==1)
1da177e4
LT
2200 goto out;
2201 mddev->ro = 1;
2202 } else {
6b8b3e8a 2203 bitmap_flush(mddev);
a9701a30 2204 md_super_wait(mddev);
1da177e4
LT
2205 if (mddev->ro)
2206 set_disk_ro(disk, 0);
2207 blk_queue_make_request(mddev->queue, md_fail_request);
2208 mddev->pers->stop(mddev);
411036fa
N
2209 if (mddev->pers->sync_request)
2210 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2211
1da177e4
LT
2212 module_put(mddev->pers->owner);
2213 mddev->pers = NULL;
2214 if (mddev->ro)
2215 mddev->ro = 0;
2216 }
2217 if (!mddev->in_sync) {
2218 /* mark array as shutdown cleanly */
2219 mddev->in_sync = 1;
2220 md_update_sb(mddev);
2221 }
2222 if (ro)
2223 set_disk_ro(disk, 1);
2224 }
32a7627c
N
2225
2226 bitmap_destroy(mddev);
2227 if (mddev->bitmap_file) {
2228 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
2229 fput(mddev->bitmap_file);
2230 mddev->bitmap_file = NULL;
2231 }
9223214e 2232 mddev->bitmap_offset = 0;
32a7627c 2233
1da177e4
LT
2234 /*
2235 * Free resources if final stop
2236 */
2237 if (!ro) {
86e6ffdd
N
2238 mdk_rdev_t *rdev;
2239 struct list_head *tmp;
1da177e4
LT
2240 struct gendisk *disk;
2241 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
2242
86e6ffdd
N
2243 ITERATE_RDEV(mddev,rdev,tmp)
2244 if (rdev->raid_disk >= 0) {
2245 char nm[20];
2246 sprintf(nm, "rd%d", rdev->raid_disk);
2247 sysfs_remove_link(&mddev->kobj, nm);
2248 }
2249
1da177e4
LT
2250 export_array(mddev);
2251
2252 mddev->array_size = 0;
2253 disk = mddev->gendisk;
2254 if (disk)
2255 set_capacity(disk, 0);
2256 mddev->changed = 1;
2257 } else
2258 printk(KERN_INFO "md: %s switched to read-only mode.\n",
2259 mdname(mddev));
2260 err = 0;
d7603b7e 2261 md_new_event(mddev);
1da177e4
LT
2262out:
2263 return err;
2264}
2265
2266static void autorun_array(mddev_t *mddev)
2267{
2268 mdk_rdev_t *rdev;
2269 struct list_head *tmp;
2270 int err;
2271
a757e64c 2272 if (list_empty(&mddev->disks))
1da177e4 2273 return;
1da177e4
LT
2274
2275 printk(KERN_INFO "md: running: ");
2276
2277 ITERATE_RDEV(mddev,rdev,tmp) {
2278 char b[BDEVNAME_SIZE];
2279 printk("<%s>", bdevname(rdev->bdev,b));
2280 }
2281 printk("\n");
2282
2283 err = do_md_run (mddev);
2284 if (err) {
2285 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
2286 do_md_stop (mddev, 0);
2287 }
2288}
2289
2290/*
2291 * lets try to run arrays based on all disks that have arrived
2292 * until now. (those are in pending_raid_disks)
2293 *
2294 * the method: pick the first pending disk, collect all disks with
2295 * the same UUID, remove all from the pending list and put them into
2296 * the 'same_array' list. Then order this list based on superblock
2297 * update time (freshest comes first), kick out 'old' disks and
2298 * compare superblocks. If everything's fine then run it.
2299 *
2300 * If "unit" is allocated, then bump its reference count
2301 */
2302static void autorun_devices(int part)
2303{
2304 struct list_head candidates;
2305 struct list_head *tmp;
2306 mdk_rdev_t *rdev0, *rdev;
2307 mddev_t *mddev;
2308 char b[BDEVNAME_SIZE];
2309
2310 printk(KERN_INFO "md: autorun ...\n");
2311 while (!list_empty(&pending_raid_disks)) {
2312 dev_t dev;
2313 rdev0 = list_entry(pending_raid_disks.next,
2314 mdk_rdev_t, same_set);
2315
2316 printk(KERN_INFO "md: considering %s ...\n",
2317 bdevname(rdev0->bdev,b));
2318 INIT_LIST_HEAD(&candidates);
2319 ITERATE_RDEV_PENDING(rdev,tmp)
2320 if (super_90_load(rdev, rdev0, 0) >= 0) {
2321 printk(KERN_INFO "md: adding %s ...\n",
2322 bdevname(rdev->bdev,b));
2323 list_move(&rdev->same_set, &candidates);
2324 }
2325 /*
2326 * now we have a set of devices, with all of them having
2327 * mostly sane superblocks. It's time to allocate the
2328 * mddev.
2329 */
2330 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
2331 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
2332 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
2333 break;
2334 }
2335 if (part)
2336 dev = MKDEV(mdp_major,
2337 rdev0->preferred_minor << MdpMinorShift);
2338 else
2339 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
2340
2341 md_probe(dev, NULL, NULL);
2342 mddev = mddev_find(dev);
2343 if (!mddev) {
2344 printk(KERN_ERR
2345 "md: cannot allocate memory for md drive.\n");
2346 break;
2347 }
2348 if (mddev_lock(mddev))
2349 printk(KERN_WARNING "md: %s locked, cannot run\n",
2350 mdname(mddev));
2351 else if (mddev->raid_disks || mddev->major_version
2352 || !list_empty(&mddev->disks)) {
2353 printk(KERN_WARNING
2354 "md: %s already running, cannot run %s\n",
2355 mdname(mddev), bdevname(rdev0->bdev,b));
2356 mddev_unlock(mddev);
2357 } else {
2358 printk(KERN_INFO "md: created %s\n", mdname(mddev));
2359 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
2360 list_del_init(&rdev->same_set);
2361 if (bind_rdev_to_array(rdev, mddev))
2362 export_rdev(rdev);
2363 }
2364 autorun_array(mddev);
2365 mddev_unlock(mddev);
2366 }
2367 /* on success, candidates will be empty, on error
2368 * it won't...
2369 */
2370 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
2371 export_rdev(rdev);
2372 mddev_put(mddev);
2373 }
2374 printk(KERN_INFO "md: ... autorun DONE.\n");
2375}
2376
2377/*
2378 * import RAID devices based on one partition
2379 * if possible, the array gets run as well.
2380 */
2381
2382static int autostart_array(dev_t startdev)
2383{
2384 char b[BDEVNAME_SIZE];
2385 int err = -EINVAL, i;
2386 mdp_super_t *sb = NULL;
2387 mdk_rdev_t *start_rdev = NULL, *rdev;
2388
2389 start_rdev = md_import_device(startdev, 0, 0);
2390 if (IS_ERR(start_rdev))
2391 return err;
2392
2393
2394 /* NOTE: this can only work for 0.90.0 superblocks */
2395 sb = (mdp_super_t*)page_address(start_rdev->sb_page);
2396 if (sb->major_version != 0 ||
2397 sb->minor_version != 90 ) {
2398 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
2399 export_rdev(start_rdev);
2400 return err;
2401 }
2402
b2d444d7 2403 if (test_bit(Faulty, &start_rdev->flags)) {
1da177e4
LT
2404 printk(KERN_WARNING
2405 "md: can not autostart based on faulty %s!\n",
2406 bdevname(start_rdev->bdev,b));
2407 export_rdev(start_rdev);
2408 return err;
2409 }
2410 list_add(&start_rdev->same_set, &pending_raid_disks);
2411
2412 for (i = 0; i < MD_SB_DISKS; i++) {
2413 mdp_disk_t *desc = sb->disks + i;
2414 dev_t dev = MKDEV(desc->major, desc->minor);
2415
2416 if (!dev)
2417 continue;
2418 if (dev == startdev)
2419 continue;
2420 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
2421 continue;
2422 rdev = md_import_device(dev, 0, 0);
2423 if (IS_ERR(rdev))
2424 continue;
2425
2426 list_add(&rdev->same_set, &pending_raid_disks);
2427 }
2428
2429 /*
2430 * possibly return codes
2431 */
2432 autorun_devices(0);
2433 return 0;
2434
2435}
2436
2437
2438static int get_version(void __user * arg)
2439{
2440 mdu_version_t ver;
2441
2442 ver.major = MD_MAJOR_VERSION;
2443 ver.minor = MD_MINOR_VERSION;
2444 ver.patchlevel = MD_PATCHLEVEL_VERSION;
2445
2446 if (copy_to_user(arg, &ver, sizeof(ver)))
2447 return -EFAULT;
2448
2449 return 0;
2450}
2451
2452static int get_array_info(mddev_t * mddev, void __user * arg)
2453{
2454 mdu_array_info_t info;
2455 int nr,working,active,failed,spare;
2456 mdk_rdev_t *rdev;
2457 struct list_head *tmp;
2458
2459 nr=working=active=failed=spare=0;
2460 ITERATE_RDEV(mddev,rdev,tmp) {
2461 nr++;
b2d444d7 2462 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
2463 failed++;
2464 else {
2465 working++;
b2d444d7 2466 if (test_bit(In_sync, &rdev->flags))
1da177e4
LT
2467 active++;
2468 else
2469 spare++;
2470 }
2471 }
2472
2473 info.major_version = mddev->major_version;
2474 info.minor_version = mddev->minor_version;
2475 info.patch_version = MD_PATCHLEVEL_VERSION;
2476 info.ctime = mddev->ctime;
2477 info.level = mddev->level;
2478 info.size = mddev->size;
2479 info.nr_disks = nr;
2480 info.raid_disks = mddev->raid_disks;
2481 info.md_minor = mddev->md_minor;
2482 info.not_persistent= !mddev->persistent;
2483
2484 info.utime = mddev->utime;
2485 info.state = 0;
2486 if (mddev->in_sync)
2487 info.state = (1<<MD_SB_CLEAN);
36fa3063
N
2488 if (mddev->bitmap && mddev->bitmap_offset)
2489 info.state = (1<<MD_SB_BITMAP_PRESENT);
1da177e4
LT
2490 info.active_disks = active;
2491 info.working_disks = working;
2492 info.failed_disks = failed;
2493 info.spare_disks = spare;
2494
2495 info.layout = mddev->layout;
2496 info.chunk_size = mddev->chunk_size;
2497
2498 if (copy_to_user(arg, &info, sizeof(info)))
2499 return -EFAULT;
2500
2501 return 0;
2502}
2503
87162a28 2504static int get_bitmap_file(mddev_t * mddev, void __user * arg)
32a7627c
N
2505{
2506 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
2507 char *ptr, *buf = NULL;
2508 int err = -ENOMEM;
2509
2510 file = kmalloc(sizeof(*file), GFP_KERNEL);
2511 if (!file)
2512 goto out;
2513
2514 /* bitmap disabled, zero the first byte and copy out */
2515 if (!mddev->bitmap || !mddev->bitmap->file) {
2516 file->pathname[0] = '\0';
2517 goto copy_out;
2518 }
2519
2520 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
2521 if (!buf)
2522 goto out;
2523
2524 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
2525 if (!ptr)
2526 goto out;
2527
2528 strcpy(file->pathname, ptr);
2529
2530copy_out:
2531 err = 0;
2532 if (copy_to_user(arg, file, sizeof(*file)))
2533 err = -EFAULT;
2534out:
2535 kfree(buf);
2536 kfree(file);
2537 return err;
2538}
2539
1da177e4
LT
2540static int get_disk_info(mddev_t * mddev, void __user * arg)
2541{
2542 mdu_disk_info_t info;
2543 unsigned int nr;
2544 mdk_rdev_t *rdev;
2545
2546 if (copy_from_user(&info, arg, sizeof(info)))
2547 return -EFAULT;
2548
2549 nr = info.number;
2550
2551 rdev = find_rdev_nr(mddev, nr);
2552 if (rdev) {
2553 info.major = MAJOR(rdev->bdev->bd_dev);
2554 info.minor = MINOR(rdev->bdev->bd_dev);
2555 info.raid_disk = rdev->raid_disk;
2556 info.state = 0;
b2d444d7 2557 if (test_bit(Faulty, &rdev->flags))
1da177e4 2558 info.state |= (1<<MD_DISK_FAULTY);
b2d444d7 2559 else if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
2560 info.state |= (1<<MD_DISK_ACTIVE);
2561 info.state |= (1<<MD_DISK_SYNC);
2562 }
8ddf9efe
N
2563 if (test_bit(WriteMostly, &rdev->flags))
2564 info.state |= (1<<MD_DISK_WRITEMOSTLY);
1da177e4
LT
2565 } else {
2566 info.major = info.minor = 0;
2567 info.raid_disk = -1;
2568 info.state = (1<<MD_DISK_REMOVED);
2569 }
2570
2571 if (copy_to_user(arg, &info, sizeof(info)))
2572 return -EFAULT;
2573
2574 return 0;
2575}
2576
2577static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2578{
2579 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
2580 mdk_rdev_t *rdev;
2581 dev_t dev = MKDEV(info->major,info->minor);
2582
2583 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
2584 return -EOVERFLOW;
2585
2586 if (!mddev->raid_disks) {
2587 int err;
2588 /* expecting a device which has a superblock */
2589 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
2590 if (IS_ERR(rdev)) {
2591 printk(KERN_WARNING
2592 "md: md_import_device returned %ld\n",
2593 PTR_ERR(rdev));
2594 return PTR_ERR(rdev);
2595 }
2596 if (!list_empty(&mddev->disks)) {
2597 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2598 mdk_rdev_t, same_set);
2599 int err = super_types[mddev->major_version]
2600 .load_super(rdev, rdev0, mddev->minor_version);
2601 if (err < 0) {
2602 printk(KERN_WARNING
2603 "md: %s has different UUID to %s\n",
2604 bdevname(rdev->bdev,b),
2605 bdevname(rdev0->bdev,b2));
2606 export_rdev(rdev);
2607 return -EINVAL;
2608 }
2609 }
2610 err = bind_rdev_to_array(rdev, mddev);
2611 if (err)
2612 export_rdev(rdev);
2613 return err;
2614 }
2615
2616 /*
2617 * add_new_disk can be used once the array is assembled
2618 * to add "hot spares". They must already have a superblock
2619 * written
2620 */
2621 if (mddev->pers) {
2622 int err;
2623 if (!mddev->pers->hot_add_disk) {
2624 printk(KERN_WARNING
2625 "%s: personality does not support diskops!\n",
2626 mdname(mddev));
2627 return -EINVAL;
2628 }
7b1e35f6
N
2629 if (mddev->persistent)
2630 rdev = md_import_device(dev, mddev->major_version,
2631 mddev->minor_version);
2632 else
2633 rdev = md_import_device(dev, -1, -1);
1da177e4
LT
2634 if (IS_ERR(rdev)) {
2635 printk(KERN_WARNING
2636 "md: md_import_device returned %ld\n",
2637 PTR_ERR(rdev));
2638 return PTR_ERR(rdev);
2639 }
41158c7e
N
2640 /* set save_raid_disk if appropriate */
2641 if (!mddev->persistent) {
2642 if (info->state & (1<<MD_DISK_SYNC) &&
2643 info->raid_disk < mddev->raid_disks)
2644 rdev->raid_disk = info->raid_disk;
2645 else
2646 rdev->raid_disk = -1;
2647 } else
2648 super_types[mddev->major_version].
2649 validate_super(mddev, rdev);
2650 rdev->saved_raid_disk = rdev->raid_disk;
2651
b2d444d7 2652 clear_bit(In_sync, &rdev->flags); /* just to be sure */
8ddf9efe
N
2653 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2654 set_bit(WriteMostly, &rdev->flags);
2655
1da177e4
LT
2656 rdev->raid_disk = -1;
2657 err = bind_rdev_to_array(rdev, mddev);
2658 if (err)
2659 export_rdev(rdev);
c361777f
N
2660
2661 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
005eca5e 2662 md_wakeup_thread(mddev->thread);
1da177e4
LT
2663 return err;
2664 }
2665
2666 /* otherwise, add_new_disk is only allowed
2667 * for major_version==0 superblocks
2668 */
2669 if (mddev->major_version != 0) {
2670 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
2671 mdname(mddev));
2672 return -EINVAL;
2673 }
2674
2675 if (!(info->state & (1<<MD_DISK_FAULTY))) {
2676 int err;
2677 rdev = md_import_device (dev, -1, 0);
2678 if (IS_ERR(rdev)) {
2679 printk(KERN_WARNING
2680 "md: error, md_import_device() returned %ld\n",
2681 PTR_ERR(rdev));
2682 return PTR_ERR(rdev);
2683 }
2684 rdev->desc_nr = info->number;
2685 if (info->raid_disk < mddev->raid_disks)
2686 rdev->raid_disk = info->raid_disk;
2687 else
2688 rdev->raid_disk = -1;
2689
b2d444d7
N
2690 rdev->flags = 0;
2691
1da177e4 2692 if (rdev->raid_disk < mddev->raid_disks)
b2d444d7
N
2693 if (info->state & (1<<MD_DISK_SYNC))
2694 set_bit(In_sync, &rdev->flags);
1da177e4 2695
8ddf9efe
N
2696 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2697 set_bit(WriteMostly, &rdev->flags);
2698
1da177e4
LT
2699 err = bind_rdev_to_array(rdev, mddev);
2700 if (err) {
2701 export_rdev(rdev);
2702 return err;
2703 }
2704
2705 if (!mddev->persistent) {
2706 printk(KERN_INFO "md: nonpersistent superblock ...\n");
2707 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2708 } else
2709 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2710 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2711
2712 if (!mddev->size || (mddev->size > rdev->size))
2713 mddev->size = rdev->size;
2714 }
2715
2716 return 0;
2717}
2718
2719static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2720{
2721 char b[BDEVNAME_SIZE];
2722 mdk_rdev_t *rdev;
2723
2724 if (!mddev->pers)
2725 return -ENODEV;
2726
2727 rdev = find_rdev(mddev, dev);
2728 if (!rdev)
2729 return -ENXIO;
2730
2731 if (rdev->raid_disk >= 0)
2732 goto busy;
2733
2734 kick_rdev_from_array(rdev);
2735 md_update_sb(mddev);
d7603b7e 2736 md_new_event(mddev);
1da177e4
LT
2737
2738 return 0;
2739busy:
2740 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
2741 bdevname(rdev->bdev,b), mdname(mddev));
2742 return -EBUSY;
2743}
2744
2745static int hot_add_disk(mddev_t * mddev, dev_t dev)
2746{
2747 char b[BDEVNAME_SIZE];
2748 int err;
2749 unsigned int size;
2750 mdk_rdev_t *rdev;
2751
2752 if (!mddev->pers)
2753 return -ENODEV;
2754
2755 if (mddev->major_version != 0) {
2756 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
2757 " version-0 superblocks.\n",
2758 mdname(mddev));
2759 return -EINVAL;
2760 }
2761 if (!mddev->pers->hot_add_disk) {
2762 printk(KERN_WARNING
2763 "%s: personality does not support diskops!\n",
2764 mdname(mddev));
2765 return -EINVAL;
2766 }
2767
2768 rdev = md_import_device (dev, -1, 0);
2769 if (IS_ERR(rdev)) {
2770 printk(KERN_WARNING
2771 "md: error, md_import_device() returned %ld\n",
2772 PTR_ERR(rdev));
2773 return -EINVAL;
2774 }
2775
2776 if (mddev->persistent)
2777 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2778 else
2779 rdev->sb_offset =
2780 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2781
2782 size = calc_dev_size(rdev, mddev->chunk_size);
2783 rdev->size = size;
2784
2785 if (size < mddev->size) {
2786 printk(KERN_WARNING
2787 "%s: disk size %llu blocks < array size %llu\n",
2788 mdname(mddev), (unsigned long long)size,
2789 (unsigned long long)mddev->size);
2790 err = -ENOSPC;
2791 goto abort_export;
2792 }
2793
b2d444d7 2794 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
2795 printk(KERN_WARNING
2796 "md: can not hot-add faulty %s disk to %s!\n",
2797 bdevname(rdev->bdev,b), mdname(mddev));
2798 err = -EINVAL;
2799 goto abort_export;
2800 }
b2d444d7 2801 clear_bit(In_sync, &rdev->flags);
1da177e4
LT
2802 rdev->desc_nr = -1;
2803 bind_rdev_to_array(rdev, mddev);
2804
2805 /*
2806 * The rest should better be atomic, we can have disk failures
2807 * noticed in interrupt contexts ...
2808 */
2809
2810 if (rdev->desc_nr == mddev->max_disks) {
2811 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
2812 mdname(mddev));
2813 err = -EBUSY;
2814 goto abort_unbind_export;
2815 }
2816
2817 rdev->raid_disk = -1;
2818
2819 md_update_sb(mddev);
2820
2821 /*
2822 * Kick recovery, maybe this spare has to be added to the
2823 * array immediately.
2824 */
2825 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2826 md_wakeup_thread(mddev->thread);
d7603b7e 2827 md_new_event(mddev);
1da177e4
LT
2828 return 0;
2829
2830abort_unbind_export:
2831 unbind_rdev_from_array(rdev);
2832
2833abort_export:
2834 export_rdev(rdev);
2835 return err;
2836}
2837
32a7627c
N
2838/* similar to deny_write_access, but accounts for our holding a reference
2839 * to the file ourselves */
2840static int deny_bitmap_write_access(struct file * file)
2841{
2842 struct inode *inode = file->f_mapping->host;
2843
2844 spin_lock(&inode->i_lock);
2845 if (atomic_read(&inode->i_writecount) > 1) {
2846 spin_unlock(&inode->i_lock);
2847 return -ETXTBSY;
2848 }
2849 atomic_set(&inode->i_writecount, -1);
2850 spin_unlock(&inode->i_lock);
2851
2852 return 0;
2853}
2854
2855static int set_bitmap_file(mddev_t *mddev, int fd)
2856{
2857 int err;
2858
36fa3063
N
2859 if (mddev->pers) {
2860 if (!mddev->pers->quiesce)
2861 return -EBUSY;
2862 if (mddev->recovery || mddev->sync_thread)
2863 return -EBUSY;
2864 /* we should be able to change the bitmap.. */
2865 }
32a7627c 2866
32a7627c 2867
36fa3063
N
2868 if (fd >= 0) {
2869 if (mddev->bitmap)
2870 return -EEXIST; /* cannot add when bitmap is present */
2871 mddev->bitmap_file = fget(fd);
32a7627c 2872
36fa3063
N
2873 if (mddev->bitmap_file == NULL) {
2874 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
2875 mdname(mddev));
2876 return -EBADF;
2877 }
2878
2879 err = deny_bitmap_write_access(mddev->bitmap_file);
2880 if (err) {
2881 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
2882 mdname(mddev));
2883 fput(mddev->bitmap_file);
2884 mddev->bitmap_file = NULL;
2885 return err;
2886 }
a654b9d8 2887 mddev->bitmap_offset = 0; /* file overrides offset */
36fa3063
N
2888 } else if (mddev->bitmap == NULL)
2889 return -ENOENT; /* cannot remove what isn't there */
2890 err = 0;
2891 if (mddev->pers) {
2892 mddev->pers->quiesce(mddev, 1);
2893 if (fd >= 0)
2894 err = bitmap_create(mddev);
2895 if (fd < 0 || err)
2896 bitmap_destroy(mddev);
2897 mddev->pers->quiesce(mddev, 0);
2898 } else if (fd < 0) {
2899 if (mddev->bitmap_file)
2900 fput(mddev->bitmap_file);
2901 mddev->bitmap_file = NULL;
2902 }
2903
32a7627c
N
2904 return err;
2905}
2906
1da177e4
LT
2907/*
2908 * set_array_info is used two different ways
2909 * The original usage is when creating a new array.
2910 * In this usage, raid_disks is > 0 and it together with
2911 * level, size, not_persistent,layout,chunksize determine the
2912 * shape of the array.
2913 * This will always create an array with a type-0.90.0 superblock.
2914 * The newer usage is when assembling an array.
2915 * In this case raid_disks will be 0, and the major_version field is
2916 * use to determine which style super-blocks are to be found on the devices.
2917 * The minor and patch _version numbers are also kept incase the
2918 * super_block handler wishes to interpret them.
2919 */
2920static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2921{
2922
2923 if (info->raid_disks == 0) {
2924 /* just setting version number for superblock loading */
2925 if (info->major_version < 0 ||
2926 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
2927 super_types[info->major_version].name == NULL) {
2928 /* maybe try to auto-load a module? */
2929 printk(KERN_INFO
2930 "md: superblock version %d not known\n",
2931 info->major_version);
2932 return -EINVAL;
2933 }
2934 mddev->major_version = info->major_version;
2935 mddev->minor_version = info->minor_version;
2936 mddev->patch_version = info->patch_version;
2937 return 0;
2938 }
2939 mddev->major_version = MD_MAJOR_VERSION;
2940 mddev->minor_version = MD_MINOR_VERSION;
2941 mddev->patch_version = MD_PATCHLEVEL_VERSION;
2942 mddev->ctime = get_seconds();
2943
2944 mddev->level = info->level;
2945 mddev->size = info->size;
2946 mddev->raid_disks = info->raid_disks;
2947 /* don't set md_minor, it is determined by which /dev/md* was
2948 * openned
2949 */
2950 if (info->state & (1<<MD_SB_CLEAN))
2951 mddev->recovery_cp = MaxSector;
2952 else
2953 mddev->recovery_cp = 0;
2954 mddev->persistent = ! info->not_persistent;
2955
2956 mddev->layout = info->layout;
2957 mddev->chunk_size = info->chunk_size;
2958
2959 mddev->max_disks = MD_SB_DISKS;
2960
2961 mddev->sb_dirty = 1;
2962
b2a2703c
N
2963 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
2964 mddev->bitmap_offset = 0;
2965
1da177e4
LT
2966 /*
2967 * Generate a 128 bit UUID
2968 */
2969 get_random_bytes(mddev->uuid, 16);
2970
2971 return 0;
2972}
2973
2974/*
2975 * update_array_info is used to change the configuration of an
2976 * on-line array.
2977 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
2978 * fields in the info are checked against the array.
2979 * Any differences that cannot be handled will cause an error.
2980 * Normally, only one change can be managed at a time.
2981 */
2982static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
2983{
2984 int rv = 0;
2985 int cnt = 0;
36fa3063
N
2986 int state = 0;
2987
2988 /* calculate expected state,ignoring low bits */
2989 if (mddev->bitmap && mddev->bitmap_offset)
2990 state |= (1 << MD_SB_BITMAP_PRESENT);
1da177e4
LT
2991
2992 if (mddev->major_version != info->major_version ||
2993 mddev->minor_version != info->minor_version ||
2994/* mddev->patch_version != info->patch_version || */
2995 mddev->ctime != info->ctime ||
2996 mddev->level != info->level ||
2997/* mddev->layout != info->layout || */
2998 !mddev->persistent != info->not_persistent||
36fa3063
N
2999 mddev->chunk_size != info->chunk_size ||
3000 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
3001 ((state^info->state) & 0xfffffe00)
3002 )
1da177e4
LT
3003 return -EINVAL;
3004 /* Check there is only one change */
3005 if (mddev->size != info->size) cnt++;
3006 if (mddev->raid_disks != info->raid_disks) cnt++;
3007 if (mddev->layout != info->layout) cnt++;
36fa3063 3008 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
1da177e4
LT
3009 if (cnt == 0) return 0;
3010 if (cnt > 1) return -EINVAL;
3011
3012 if (mddev->layout != info->layout) {
3013 /* Change layout
3014 * we don't need to do anything at the md level, the
3015 * personality will take care of it all.
3016 */
3017 if (mddev->pers->reconfig == NULL)
3018 return -EINVAL;
3019 else
3020 return mddev->pers->reconfig(mddev, info->layout, -1);
3021 }
3022 if (mddev->size != info->size) {
3023 mdk_rdev_t * rdev;
3024 struct list_head *tmp;
3025 if (mddev->pers->resize == NULL)
3026 return -EINVAL;
3027 /* The "size" is the amount of each device that is used.
3028 * This can only make sense for arrays with redundancy.
3029 * linear and raid0 always use whatever space is available
3030 * We can only consider changing the size if no resync
3031 * or reconstruction is happening, and if the new size
3032 * is acceptable. It must fit before the sb_offset or,
3033 * if that is <data_offset, it must fit before the
3034 * size of each device.
3035 * If size is zero, we find the largest size that fits.
3036 */
3037 if (mddev->sync_thread)
3038 return -EBUSY;
3039 ITERATE_RDEV(mddev,rdev,tmp) {
3040 sector_t avail;
3041 int fit = (info->size == 0);
3042 if (rdev->sb_offset > rdev->data_offset)
3043 avail = (rdev->sb_offset*2) - rdev->data_offset;
3044 else
3045 avail = get_capacity(rdev->bdev->bd_disk)
3046 - rdev->data_offset;
3047 if (fit && (info->size == 0 || info->size > avail/2))
3048 info->size = avail/2;
3049 if (avail < ((sector_t)info->size << 1))
3050 return -ENOSPC;
3051 }
3052 rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
3053 if (!rv) {
3054 struct block_device *bdev;
3055
3056 bdev = bdget_disk(mddev->gendisk, 0);
3057 if (bdev) {
3058 down(&bdev->bd_inode->i_sem);
3059 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3060 up(&bdev->bd_inode->i_sem);
3061 bdput(bdev);
3062 }
3063 }
3064 }
3065 if (mddev->raid_disks != info->raid_disks) {
3066 /* change the number of raid disks */
3067 if (mddev->pers->reshape == NULL)
3068 return -EINVAL;
3069 if (info->raid_disks <= 0 ||
3070 info->raid_disks >= mddev->max_disks)
3071 return -EINVAL;
3072 if (mddev->sync_thread)
3073 return -EBUSY;
3074 rv = mddev->pers->reshape(mddev, info->raid_disks);
3075 if (!rv) {
3076 struct block_device *bdev;
3077
3078 bdev = bdget_disk(mddev->gendisk, 0);
3079 if (bdev) {
3080 down(&bdev->bd_inode->i_sem);
3081 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3082 up(&bdev->bd_inode->i_sem);
3083 bdput(bdev);
3084 }
3085 }
3086 }
36fa3063
N
3087 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
3088 if (mddev->pers->quiesce == NULL)
3089 return -EINVAL;
3090 if (mddev->recovery || mddev->sync_thread)
3091 return -EBUSY;
3092 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
3093 /* add the bitmap */
3094 if (mddev->bitmap)
3095 return -EEXIST;
3096 if (mddev->default_bitmap_offset == 0)
3097 return -EINVAL;
3098 mddev->bitmap_offset = mddev->default_bitmap_offset;
3099 mddev->pers->quiesce(mddev, 1);
3100 rv = bitmap_create(mddev);
3101 if (rv)
3102 bitmap_destroy(mddev);
3103 mddev->pers->quiesce(mddev, 0);
3104 } else {
3105 /* remove the bitmap */
3106 if (!mddev->bitmap)
3107 return -ENOENT;
3108 if (mddev->bitmap->file)
3109 return -EINVAL;
3110 mddev->pers->quiesce(mddev, 1);
3111 bitmap_destroy(mddev);
3112 mddev->pers->quiesce(mddev, 0);
3113 mddev->bitmap_offset = 0;
3114 }
3115 }
1da177e4
LT
3116 md_update_sb(mddev);
3117 return rv;
3118}
3119
3120static int set_disk_faulty(mddev_t *mddev, dev_t dev)
3121{
3122 mdk_rdev_t *rdev;
3123
3124 if (mddev->pers == NULL)
3125 return -ENODEV;
3126
3127 rdev = find_rdev(mddev, dev);
3128 if (!rdev)
3129 return -ENODEV;
3130
3131 md_error(mddev, rdev);
3132 return 0;
3133}
3134
3135static int md_ioctl(struct inode *inode, struct file *file,
3136 unsigned int cmd, unsigned long arg)
3137{
3138 int err = 0;
3139 void __user *argp = (void __user *)arg;
3140 struct hd_geometry __user *loc = argp;
3141 mddev_t *mddev = NULL;
3142
3143 if (!capable(CAP_SYS_ADMIN))
3144 return -EACCES;
3145
3146 /*
3147 * Commands dealing with the RAID driver but not any
3148 * particular array:
3149 */
3150 switch (cmd)
3151 {
3152 case RAID_VERSION:
3153 err = get_version(argp);
3154 goto done;
3155
3156 case PRINT_RAID_DEBUG:
3157 err = 0;
3158 md_print_devices();
3159 goto done;
3160
3161#ifndef MODULE
3162 case RAID_AUTORUN:
3163 err = 0;
3164 autostart_arrays(arg);
3165 goto done;
3166#endif
3167 default:;
3168 }
3169
3170 /*
3171 * Commands creating/starting a new array:
3172 */
3173
3174 mddev = inode->i_bdev->bd_disk->private_data;
3175
3176 if (!mddev) {
3177 BUG();
3178 goto abort;
3179 }
3180
3181
3182 if (cmd == START_ARRAY) {
3183 /* START_ARRAY doesn't need to lock the array as autostart_array
3184 * does the locking, and it could even be a different array
3185 */
3186 static int cnt = 3;
3187 if (cnt > 0 ) {
3188 printk(KERN_WARNING
3189 "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
e8a00334 3190 "This will not be supported beyond July 2006\n",
1da177e4
LT
3191 current->comm, current->pid);
3192 cnt--;
3193 }
3194 err = autostart_array(new_decode_dev(arg));
3195 if (err) {
3196 printk(KERN_WARNING "md: autostart failed!\n");
3197 goto abort;
3198 }
3199 goto done;
3200 }
3201
3202 err = mddev_lock(mddev);
3203 if (err) {
3204 printk(KERN_INFO
3205 "md: ioctl lock interrupted, reason %d, cmd %d\n",
3206 err, cmd);
3207 goto abort;
3208 }
3209
3210 switch (cmd)
3211 {
3212 case SET_ARRAY_INFO:
3213 {
3214 mdu_array_info_t info;
3215 if (!arg)
3216 memset(&info, 0, sizeof(info));
3217 else if (copy_from_user(&info, argp, sizeof(info))) {
3218 err = -EFAULT;
3219 goto abort_unlock;
3220 }
3221 if (mddev->pers) {
3222 err = update_array_info(mddev, &info);
3223 if (err) {
3224 printk(KERN_WARNING "md: couldn't update"
3225 " array info. %d\n", err);
3226 goto abort_unlock;
3227 }
3228 goto done_unlock;
3229 }
3230 if (!list_empty(&mddev->disks)) {
3231 printk(KERN_WARNING
3232 "md: array %s already has disks!\n",
3233 mdname(mddev));
3234 err = -EBUSY;
3235 goto abort_unlock;
3236 }
3237 if (mddev->raid_disks) {
3238 printk(KERN_WARNING
3239 "md: array %s already initialised!\n",
3240 mdname(mddev));
3241 err = -EBUSY;
3242 goto abort_unlock;
3243 }
3244 err = set_array_info(mddev, &info);
3245 if (err) {
3246 printk(KERN_WARNING "md: couldn't set"
3247 " array info. %d\n", err);
3248 goto abort_unlock;
3249 }
3250 }
3251 goto done_unlock;
3252
3253 default:;
3254 }
3255
3256 /*
3257 * Commands querying/configuring an existing array:
3258 */
32a7627c
N
3259 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
3260 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
3261 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
3262 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
1da177e4
LT
3263 err = -ENODEV;
3264 goto abort_unlock;
3265 }
3266
3267 /*
3268 * Commands even a read-only array can execute:
3269 */
3270 switch (cmd)
3271 {
3272 case GET_ARRAY_INFO:
3273 err = get_array_info(mddev, argp);
3274 goto done_unlock;
3275
32a7627c 3276 case GET_BITMAP_FILE:
87162a28 3277 err = get_bitmap_file(mddev, argp);
32a7627c
N
3278 goto done_unlock;
3279
1da177e4
LT
3280 case GET_DISK_INFO:
3281 err = get_disk_info(mddev, argp);
3282 goto done_unlock;
3283
3284 case RESTART_ARRAY_RW:
3285 err = restart_array(mddev);
3286 goto done_unlock;
3287
3288 case STOP_ARRAY:
3289 err = do_md_stop (mddev, 0);
3290 goto done_unlock;
3291
3292 case STOP_ARRAY_RO:
3293 err = do_md_stop (mddev, 1);
3294 goto done_unlock;
3295
3296 /*
3297 * We have a problem here : there is no easy way to give a CHS
3298 * virtual geometry. We currently pretend that we have a 2 heads
3299 * 4 sectors (with a BIG number of cylinders...). This drives
3300 * dosfs just mad... ;-)
3301 */
3302 case HDIO_GETGEO:
3303 if (!loc) {
3304 err = -EINVAL;
3305 goto abort_unlock;
3306 }
3307 err = put_user (2, (char __user *) &loc->heads);
3308 if (err)
3309 goto abort_unlock;
3310 err = put_user (4, (char __user *) &loc->sectors);
3311 if (err)
3312 goto abort_unlock;
3313 err = put_user(get_capacity(mddev->gendisk)/8,
3314 (short __user *) &loc->cylinders);
3315 if (err)
3316 goto abort_unlock;
3317 err = put_user (get_start_sect(inode->i_bdev),
3318 (long __user *) &loc->start);
3319 goto done_unlock;
3320 }
3321
3322 /*
3323 * The remaining ioctls are changing the state of the
f91de92e
N
3324 * superblock, so we do not allow them on read-only arrays.
3325 * However non-MD ioctls (e.g. get-size) will still come through
3326 * here and hit the 'default' below, so only disallow
3327 * 'md' ioctls, and switch to rw mode if started auto-readonly.
1da177e4 3328 */
f91de92e
N
3329 if (_IOC_TYPE(cmd) == MD_MAJOR &&
3330 mddev->ro && mddev->pers) {
3331 if (mddev->ro == 2) {
3332 mddev->ro = 0;
3333 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3334 md_wakeup_thread(mddev->thread);
3335
3336 } else {
3337 err = -EROFS;
3338 goto abort_unlock;
3339 }
1da177e4
LT
3340 }
3341
3342 switch (cmd)
3343 {
3344 case ADD_NEW_DISK:
3345 {
3346 mdu_disk_info_t info;
3347 if (copy_from_user(&info, argp, sizeof(info)))
3348 err = -EFAULT;
3349 else
3350 err = add_new_disk(mddev, &info);
3351 goto done_unlock;
3352 }
3353
3354 case HOT_REMOVE_DISK:
3355 err = hot_remove_disk(mddev, new_decode_dev(arg));
3356 goto done_unlock;
3357
3358 case HOT_ADD_DISK:
3359 err = hot_add_disk(mddev, new_decode_dev(arg));
3360 goto done_unlock;
3361
3362 case SET_DISK_FAULTY:
3363 err = set_disk_faulty(mddev, new_decode_dev(arg));
3364 goto done_unlock;
3365
3366 case RUN_ARRAY:
3367 err = do_md_run (mddev);
3368 goto done_unlock;
3369
32a7627c
N
3370 case SET_BITMAP_FILE:
3371 err = set_bitmap_file(mddev, (int)arg);
3372 goto done_unlock;
3373
1da177e4
LT
3374 default:
3375 if (_IOC_TYPE(cmd) == MD_MAJOR)
3376 printk(KERN_WARNING "md: %s(pid %d) used"
3377 " obsolete MD ioctl, upgrade your"
3378 " software to use new ictls.\n",
3379 current->comm, current->pid);
3380 err = -EINVAL;
3381 goto abort_unlock;
3382 }
3383
3384done_unlock:
3385abort_unlock:
3386 mddev_unlock(mddev);
3387
3388 return err;
3389done:
3390 if (err)
3391 MD_BUG();
3392abort:
3393 return err;
3394}
3395
3396static int md_open(struct inode *inode, struct file *file)
3397{
3398 /*
3399 * Succeed if we can lock the mddev, which confirms that
3400 * it isn't being stopped right now.
3401 */
3402 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3403 int err;
3404
3405 if ((err = mddev_lock(mddev)))
3406 goto out;
3407
3408 err = 0;
3409 mddev_get(mddev);
3410 mddev_unlock(mddev);
3411
3412 check_disk_change(inode->i_bdev);
3413 out:
3414 return err;
3415}
3416
3417static int md_release(struct inode *inode, struct file * file)
3418{
3419 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3420
3421 if (!mddev)
3422 BUG();
3423 mddev_put(mddev);
3424
3425 return 0;
3426}
3427
3428static int md_media_changed(struct gendisk *disk)
3429{
3430 mddev_t *mddev = disk->private_data;
3431
3432 return mddev->changed;
3433}
3434
3435static int md_revalidate(struct gendisk *disk)
3436{
3437 mddev_t *mddev = disk->private_data;
3438
3439 mddev->changed = 0;
3440 return 0;
3441}
3442static struct block_device_operations md_fops =
3443{
3444 .owner = THIS_MODULE,
3445 .open = md_open,
3446 .release = md_release,
3447 .ioctl = md_ioctl,
3448 .media_changed = md_media_changed,
3449 .revalidate_disk= md_revalidate,
3450};
3451
75c96f85 3452static int md_thread(void * arg)
1da177e4
LT
3453{
3454 mdk_thread_t *thread = arg;
3455
1da177e4
LT
3456 /*
3457 * md_thread is a 'system-thread', it's priority should be very
3458 * high. We avoid resource deadlocks individually in each
3459 * raid personality. (RAID5 does preallocation) We also use RR and
3460 * the very same RT priority as kswapd, thus we will never get
3461 * into a priority inversion deadlock.
3462 *
3463 * we definitely have to have equal or higher priority than
3464 * bdflush, otherwise bdflush will deadlock if there are too
3465 * many dirty RAID5 blocks.
3466 */
1da177e4 3467
6985c43f 3468 allow_signal(SIGKILL);
a6fb0934 3469 while (!kthread_should_stop()) {
1da177e4 3470
93588e22
N
3471 /* We need to wait INTERRUPTIBLE so that
3472 * we don't add to the load-average.
3473 * That means we need to be sure no signals are
3474 * pending
3475 */
3476 if (signal_pending(current))
3477 flush_signals(current);
3478
3479 wait_event_interruptible_timeout
3480 (thread->wqueue,
3481 test_bit(THREAD_WAKEUP, &thread->flags)
3482 || kthread_should_stop(),
3483 thread->timeout);
3e1d1d28 3484 try_to_freeze();
1da177e4
LT
3485
3486 clear_bit(THREAD_WAKEUP, &thread->flags);
3487
787453c2 3488 thread->run(thread->mddev);
1da177e4 3489 }
a6fb0934 3490
1da177e4
LT
3491 return 0;
3492}
3493
3494void md_wakeup_thread(mdk_thread_t *thread)
3495{
3496 if (thread) {
3497 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
3498 set_bit(THREAD_WAKEUP, &thread->flags);
3499 wake_up(&thread->wqueue);
3500 }
3501}
3502
3503mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
3504 const char *name)
3505{
3506 mdk_thread_t *thread;
1da177e4 3507
a6fb0934 3508 thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL);
1da177e4
LT
3509 if (!thread)
3510 return NULL;
3511
3512 memset(thread, 0, sizeof(mdk_thread_t));
3513 init_waitqueue_head(&thread->wqueue);
3514
1da177e4
LT
3515 thread->run = run;
3516 thread->mddev = mddev;
32a7627c 3517 thread->timeout = MAX_SCHEDULE_TIMEOUT;
6985c43f 3518 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
a6fb0934 3519 if (IS_ERR(thread->tsk)) {
1da177e4
LT
3520 kfree(thread);
3521 return NULL;
3522 }
1da177e4
LT
3523 return thread;
3524}
3525
1da177e4
LT
3526void md_unregister_thread(mdk_thread_t *thread)
3527{
d28446fe 3528 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
a6fb0934
N
3529
3530 kthread_stop(thread->tsk);
1da177e4
LT
3531 kfree(thread);
3532}
3533
3534void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
3535{
3536 if (!mddev) {
3537 MD_BUG();
3538 return;
3539 }
3540
b2d444d7 3541 if (!rdev || test_bit(Faulty, &rdev->flags))
1da177e4 3542 return;
32a7627c 3543/*
1da177e4
LT
3544 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
3545 mdname(mddev),
3546 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
3547 __builtin_return_address(0),__builtin_return_address(1),
3548 __builtin_return_address(2),__builtin_return_address(3));
32a7627c 3549*/
1da177e4
LT
3550 if (!mddev->pers->error_handler)
3551 return;
3552 mddev->pers->error_handler(mddev,rdev);
3553 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3554 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3555 md_wakeup_thread(mddev->thread);
d7603b7e 3556 md_new_event(mddev);
1da177e4
LT
3557}
3558
3559/* seq_file implementation /proc/mdstat */
3560
3561static void status_unused(struct seq_file *seq)
3562{
3563 int i = 0;
3564 mdk_rdev_t *rdev;
3565 struct list_head *tmp;
3566
3567 seq_printf(seq, "unused devices: ");
3568
3569 ITERATE_RDEV_PENDING(rdev,tmp) {
3570 char b[BDEVNAME_SIZE];
3571 i++;
3572 seq_printf(seq, "%s ",
3573 bdevname(rdev->bdev,b));
3574 }
3575 if (!i)
3576 seq_printf(seq, "<none>");
3577
3578 seq_printf(seq, "\n");
3579}
3580
3581
3582static void status_resync(struct seq_file *seq, mddev_t * mddev)
3583{
3584 unsigned long max_blocks, resync, res, dt, db, rt;
3585
3586 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
3587
3588 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3589 max_blocks = mddev->resync_max_sectors >> 1;
3590 else
3591 max_blocks = mddev->size;
3592
3593 /*
3594 * Should not happen.
3595 */
3596 if (!max_blocks) {
3597 MD_BUG();
3598 return;
3599 }
3600 res = (resync/1024)*1000/(max_blocks/1024 + 1);
3601 {
3602 int i, x = res/50, y = 20-x;
3603 seq_printf(seq, "[");
3604 for (i = 0; i < x; i++)
3605 seq_printf(seq, "=");
3606 seq_printf(seq, ">");
3607 for (i = 0; i < y; i++)
3608 seq_printf(seq, ".");
3609 seq_printf(seq, "] ");
3610 }
3611 seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
3612 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
3613 "resync" : "recovery"),
3614 res/10, res % 10, resync, max_blocks);
3615
3616 /*
3617 * We do not want to overflow, so the order of operands and
3618 * the * 100 / 100 trick are important. We do a +1 to be
3619 * safe against division by zero. We only estimate anyway.
3620 *
3621 * dt: time from mark until now
3622 * db: blocks written from mark until now
3623 * rt: remaining time
3624 */
3625 dt = ((jiffies - mddev->resync_mark) / HZ);
3626 if (!dt) dt++;
3627 db = resync - (mddev->resync_mark_cnt/2);
3628 rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
3629
3630 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
3631
3632 seq_printf(seq, " speed=%ldK/sec", db/dt);
3633}
3634
3635static void *md_seq_start(struct seq_file *seq, loff_t *pos)
3636{
3637 struct list_head *tmp;
3638 loff_t l = *pos;
3639 mddev_t *mddev;
3640
3641 if (l >= 0x10000)
3642 return NULL;
3643 if (!l--)
3644 /* header */
3645 return (void*)1;
3646
3647 spin_lock(&all_mddevs_lock);
3648 list_for_each(tmp,&all_mddevs)
3649 if (!l--) {
3650 mddev = list_entry(tmp, mddev_t, all_mddevs);
3651 mddev_get(mddev);
3652 spin_unlock(&all_mddevs_lock);
3653 return mddev;
3654 }
3655 spin_unlock(&all_mddevs_lock);
3656 if (!l--)
3657 return (void*)2;/* tail */
3658 return NULL;
3659}
3660
3661static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3662{
3663 struct list_head *tmp;
3664 mddev_t *next_mddev, *mddev = v;
3665
3666 ++*pos;
3667 if (v == (void*)2)
3668 return NULL;
3669
3670 spin_lock(&all_mddevs_lock);
3671 if (v == (void*)1)
3672 tmp = all_mddevs.next;
3673 else
3674 tmp = mddev->all_mddevs.next;
3675 if (tmp != &all_mddevs)
3676 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
3677 else {
3678 next_mddev = (void*)2;
3679 *pos = 0x10000;
3680 }
3681 spin_unlock(&all_mddevs_lock);
3682
3683 if (v != (void*)1)
3684 mddev_put(mddev);
3685 return next_mddev;
3686
3687}
3688
3689static void md_seq_stop(struct seq_file *seq, void *v)
3690{
3691 mddev_t *mddev = v;
3692
3693 if (mddev && v != (void*)1 && v != (void*)2)
3694 mddev_put(mddev);
3695}
3696
d7603b7e
N
3697struct mdstat_info {
3698 int event;
3699};
3700
1da177e4
LT
3701static int md_seq_show(struct seq_file *seq, void *v)
3702{
3703 mddev_t *mddev = v;
3704 sector_t size;
3705 struct list_head *tmp2;
3706 mdk_rdev_t *rdev;
d7603b7e 3707 struct mdstat_info *mi = seq->private;
1da177e4 3708 int i;
32a7627c 3709 struct bitmap *bitmap;
1da177e4
LT
3710
3711 if (v == (void*)1) {
3712 seq_printf(seq, "Personalities : ");
3713 spin_lock(&pers_lock);
3714 for (i = 0; i < MAX_PERSONALITY; i++)
3715 if (pers[i])
3716 seq_printf(seq, "[%s] ", pers[i]->name);
3717
3718 spin_unlock(&pers_lock);
3719 seq_printf(seq, "\n");
d7603b7e 3720 mi->event = atomic_read(&md_event_count);
1da177e4
LT
3721 return 0;
3722 }
3723 if (v == (void*)2) {
3724 status_unused(seq);
3725 return 0;
3726 }
3727
3728 if (mddev_lock(mddev)!=0)
3729 return -EINTR;
3730 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
3731 seq_printf(seq, "%s : %sactive", mdname(mddev),
3732 mddev->pers ? "" : "in");
3733 if (mddev->pers) {
f91de92e 3734 if (mddev->ro==1)
1da177e4 3735 seq_printf(seq, " (read-only)");
f91de92e
N
3736 if (mddev->ro==2)
3737 seq_printf(seq, "(auto-read-only)");
1da177e4
LT
3738 seq_printf(seq, " %s", mddev->pers->name);
3739 }
3740
3741 size = 0;
3742 ITERATE_RDEV(mddev,rdev,tmp2) {
3743 char b[BDEVNAME_SIZE];
3744 seq_printf(seq, " %s[%d]",
3745 bdevname(rdev->bdev,b), rdev->desc_nr);
8ddf9efe
N
3746 if (test_bit(WriteMostly, &rdev->flags))
3747 seq_printf(seq, "(W)");
b2d444d7 3748 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
3749 seq_printf(seq, "(F)");
3750 continue;
b325a32e
N
3751 } else if (rdev->raid_disk < 0)
3752 seq_printf(seq, "(S)"); /* spare */
1da177e4
LT
3753 size += rdev->size;
3754 }
3755
3756 if (!list_empty(&mddev->disks)) {
3757 if (mddev->pers)
3758 seq_printf(seq, "\n %llu blocks",
3759 (unsigned long long)mddev->array_size);
3760 else
3761 seq_printf(seq, "\n %llu blocks",
3762 (unsigned long long)size);
3763 }
1cd6bf19
N
3764 if (mddev->persistent) {
3765 if (mddev->major_version != 0 ||
3766 mddev->minor_version != 90) {
3767 seq_printf(seq," super %d.%d",
3768 mddev->major_version,
3769 mddev->minor_version);
3770 }
3771 } else
3772 seq_printf(seq, " super non-persistent");
1da177e4
LT
3773
3774 if (mddev->pers) {
3775 mddev->pers->status (seq, mddev);
3776 seq_printf(seq, "\n ");
8e1b39d6
N
3777 if (mddev->pers->sync_request) {
3778 if (mddev->curr_resync > 2) {
3779 status_resync (seq, mddev);
3780 seq_printf(seq, "\n ");
3781 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
3782 seq_printf(seq, "\tresync=DELAYED\n ");
3783 else if (mddev->recovery_cp < MaxSector)
3784 seq_printf(seq, "\tresync=PENDING\n ");
3785 }
32a7627c
N
3786 } else
3787 seq_printf(seq, "\n ");
3788
3789 if ((bitmap = mddev->bitmap)) {
32a7627c
N
3790 unsigned long chunk_kb;
3791 unsigned long flags;
32a7627c
N
3792 spin_lock_irqsave(&bitmap->lock, flags);
3793 chunk_kb = bitmap->chunksize >> 10;
3794 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
3795 "%lu%s chunk",
3796 bitmap->pages - bitmap->missing_pages,
3797 bitmap->pages,
3798 (bitmap->pages - bitmap->missing_pages)
3799 << (PAGE_SHIFT - 10),
3800 chunk_kb ? chunk_kb : bitmap->chunksize,
3801 chunk_kb ? "KB" : "B");
78d742d8
N
3802 if (bitmap->file) {
3803 seq_printf(seq, ", file: ");
3804 seq_path(seq, bitmap->file->f_vfsmnt,
3805 bitmap->file->f_dentry," \t\n");
32a7627c 3806 }
78d742d8 3807
32a7627c
N
3808 seq_printf(seq, "\n");
3809 spin_unlock_irqrestore(&bitmap->lock, flags);
1da177e4
LT
3810 }
3811
3812 seq_printf(seq, "\n");
3813 }
3814 mddev_unlock(mddev);
3815
3816 return 0;
3817}
3818
3819static struct seq_operations md_seq_ops = {
3820 .start = md_seq_start,
3821 .next = md_seq_next,
3822 .stop = md_seq_stop,
3823 .show = md_seq_show,
3824};
3825
3826static int md_seq_open(struct inode *inode, struct file *file)
3827{
3828 int error;
d7603b7e
N
3829 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
3830 if (mi == NULL)
3831 return -ENOMEM;
1da177e4
LT
3832
3833 error = seq_open(file, &md_seq_ops);
d7603b7e
N
3834 if (error)
3835 kfree(mi);
3836 else {
3837 struct seq_file *p = file->private_data;
3838 p->private = mi;
3839 mi->event = atomic_read(&md_event_count);
3840 }
1da177e4
LT
3841 return error;
3842}
3843
d7603b7e
N
3844static int md_seq_release(struct inode *inode, struct file *file)
3845{
3846 struct seq_file *m = file->private_data;
3847 struct mdstat_info *mi = m->private;
3848 m->private = NULL;
3849 kfree(mi);
3850 return seq_release(inode, file);
3851}
3852
3853static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
3854{
3855 struct seq_file *m = filp->private_data;
3856 struct mdstat_info *mi = m->private;
3857 int mask;
3858
3859 poll_wait(filp, &md_event_waiters, wait);
3860
3861 /* always allow read */
3862 mask = POLLIN | POLLRDNORM;
3863
3864 if (mi->event != atomic_read(&md_event_count))
3865 mask |= POLLERR | POLLPRI;
3866 return mask;
3867}
3868
1da177e4
LT
3869static struct file_operations md_seq_fops = {
3870 .open = md_seq_open,
3871 .read = seq_read,
3872 .llseek = seq_lseek,
d7603b7e
N
3873 .release = md_seq_release,
3874 .poll = mdstat_poll,
1da177e4
LT
3875};
3876
3877int register_md_personality(int pnum, mdk_personality_t *p)
3878{
3879 if (pnum >= MAX_PERSONALITY) {
3880 printk(KERN_ERR
3881 "md: tried to install personality %s as nr %d, but max is %lu\n",
3882 p->name, pnum, MAX_PERSONALITY-1);
3883 return -EINVAL;
3884 }
3885
3886 spin_lock(&pers_lock);
3887 if (pers[pnum]) {
3888 spin_unlock(&pers_lock);
1da177e4
LT
3889 return -EBUSY;
3890 }
3891
3892 pers[pnum] = p;
3893 printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3894 spin_unlock(&pers_lock);
3895 return 0;
3896}
3897
3898int unregister_md_personality(int pnum)
3899{
a757e64c 3900 if (pnum >= MAX_PERSONALITY)
1da177e4 3901 return -EINVAL;
1da177e4
LT
3902
3903 printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3904 spin_lock(&pers_lock);
3905 pers[pnum] = NULL;
3906 spin_unlock(&pers_lock);
3907 return 0;
3908}
3909
3910static int is_mddev_idle(mddev_t *mddev)
3911{
3912 mdk_rdev_t * rdev;
3913 struct list_head *tmp;
3914 int idle;
3915 unsigned long curr_events;
3916
3917 idle = 1;
3918 ITERATE_RDEV(mddev,rdev,tmp) {
3919 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
a362357b
JA
3920 curr_events = disk_stat_read(disk, sectors[0]) +
3921 disk_stat_read(disk, sectors[1]) -
1da177e4 3922 atomic_read(&disk->sync_io);
c0e48521
N
3923 /* The difference between curr_events and last_events
3924 * will be affected by any new non-sync IO (making
3925 * curr_events bigger) and any difference in the amount of
3926 * in-flight syncio (making current_events bigger or smaller)
3927 * The amount in-flight is currently limited to
3928 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
3929 * which is at most 4096 sectors.
3930 * These numbers are fairly fragile and should be made
3931 * more robust, probably by enforcing the
3932 * 'window size' that md_do_sync sort-of uses.
3933 *
1da177e4
LT
3934 * Note: the following is an unsigned comparison.
3935 */
c0e48521 3936 if ((curr_events - rdev->last_events + 4096) > 8192) {
1da177e4
LT
3937 rdev->last_events = curr_events;
3938 idle = 0;
3939 }
3940 }
3941 return idle;
3942}
3943
3944void md_done_sync(mddev_t *mddev, int blocks, int ok)
3945{
3946 /* another "blocks" (512byte) blocks have been synced */
3947 atomic_sub(blocks, &mddev->recovery_active);
3948 wake_up(&mddev->recovery_wait);
3949 if (!ok) {
3950 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3951 md_wakeup_thread(mddev->thread);
3952 // stop recovery, signal do_sync ....
3953 }
3954}
3955
3956
06d91a5f
N
3957/* md_write_start(mddev, bi)
3958 * If we need to update some array metadata (e.g. 'active' flag
3d310eb7
N
3959 * in superblock) before writing, schedule a superblock update
3960 * and wait for it to complete.
06d91a5f 3961 */
3d310eb7 3962void md_write_start(mddev_t *mddev, struct bio *bi)
1da177e4 3963{
06d91a5f 3964 if (bio_data_dir(bi) != WRITE)
3d310eb7 3965 return;
06d91a5f 3966
f91de92e
N
3967 BUG_ON(mddev->ro == 1);
3968 if (mddev->ro == 2) {
3969 /* need to switch to read/write */
3970 mddev->ro = 0;
3971 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3972 md_wakeup_thread(mddev->thread);
3973 }
06d91a5f 3974 atomic_inc(&mddev->writes_pending);
06d91a5f 3975 if (mddev->in_sync) {
a9701a30 3976 spin_lock_irq(&mddev->write_lock);
3d310eb7
N
3977 if (mddev->in_sync) {
3978 mddev->in_sync = 0;
3979 mddev->sb_dirty = 1;
3980 md_wakeup_thread(mddev->thread);
3981 }
a9701a30 3982 spin_unlock_irq(&mddev->write_lock);
06d91a5f 3983 }
3d310eb7 3984 wait_event(mddev->sb_wait, mddev->sb_dirty==0);
1da177e4
LT
3985}
3986
3987void md_write_end(mddev_t *mddev)
3988{
3989 if (atomic_dec_and_test(&mddev->writes_pending)) {
3990 if (mddev->safemode == 2)
3991 md_wakeup_thread(mddev->thread);
3992 else
3993 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
3994 }
3995}
3996
75c96f85 3997static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
1da177e4
LT
3998
3999#define SYNC_MARKS 10
4000#define SYNC_MARK_STEP (3*HZ)
4001static void md_do_sync(mddev_t *mddev)
4002{
4003 mddev_t *mddev2;
4004 unsigned int currspeed = 0,
4005 window;
57afd89f 4006 sector_t max_sectors,j, io_sectors;
1da177e4
LT
4007 unsigned long mark[SYNC_MARKS];
4008 sector_t mark_cnt[SYNC_MARKS];
4009 int last_mark,m;
4010 struct list_head *tmp;
4011 sector_t last_check;
57afd89f 4012 int skipped = 0;
1da177e4
LT
4013
4014 /* just incase thread restarts... */
4015 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
4016 return;
4017
4018 /* we overload curr_resync somewhat here.
4019 * 0 == not engaged in resync at all
4020 * 2 == checking that there is no conflict with another sync
4021 * 1 == like 2, but have yielded to allow conflicting resync to
4022 * commense
4023 * other == active in resync - this many blocks
4024 *
4025 * Before starting a resync we must have set curr_resync to
4026 * 2, and then checked that every "conflicting" array has curr_resync
4027 * less than ours. When we find one that is the same or higher
4028 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
4029 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
4030 * This will mean we have to start checking from the beginning again.
4031 *
4032 */
4033
4034 do {
4035 mddev->curr_resync = 2;
4036
4037 try_again:
787453c2 4038 if (kthread_should_stop()) {
6985c43f 4039 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1da177e4
LT
4040 goto skip;
4041 }
4042 ITERATE_MDDEV(mddev2,tmp) {
1da177e4
LT
4043 if (mddev2 == mddev)
4044 continue;
4045 if (mddev2->curr_resync &&
4046 match_mddev_units(mddev,mddev2)) {
4047 DEFINE_WAIT(wq);
4048 if (mddev < mddev2 && mddev->curr_resync == 2) {
4049 /* arbitrarily yield */
4050 mddev->curr_resync = 1;
4051 wake_up(&resync_wait);
4052 }
4053 if (mddev > mddev2 && mddev->curr_resync == 1)
4054 /* no need to wait here, we can wait the next
4055 * time 'round when curr_resync == 2
4056 */
4057 continue;
787453c2
N
4058 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
4059 if (!kthread_should_stop() &&
8712e553 4060 mddev2->curr_resync >= mddev->curr_resync) {
1da177e4
LT
4061 printk(KERN_INFO "md: delaying resync of %s"
4062 " until %s has finished resync (they"
4063 " share one or more physical units)\n",
4064 mdname(mddev), mdname(mddev2));
4065 mddev_put(mddev2);
4066 schedule();
4067 finish_wait(&resync_wait, &wq);
4068 goto try_again;
4069 }
4070 finish_wait(&resync_wait, &wq);
4071 }
4072 }
4073 } while (mddev->curr_resync < 2);
4074
9d88883e 4075 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1da177e4 4076 /* resync follows the size requested by the personality,
57afd89f 4077 * which defaults to physical size, but can be virtual size
1da177e4
LT
4078 */
4079 max_sectors = mddev->resync_max_sectors;
9d88883e
N
4080 mddev->resync_mismatches = 0;
4081 } else
1da177e4
LT
4082 /* recovery follows the physical size of devices */
4083 max_sectors = mddev->size << 1;
4084
4085 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
4086 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
4087 " %d KB/sec/disc.\n", sysctl_speed_limit_min);
338cec32 4088 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
1da177e4
LT
4089 "(but not more than %d KB/sec) for reconstruction.\n",
4090 sysctl_speed_limit_max);
4091
4092 is_mddev_idle(mddev); /* this also initializes IO event counters */
32a7627c 4093 /* we don't use the checkpoint if there's a bitmap */
24dd469d
N
4094 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap
4095 && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1da177e4
LT
4096 j = mddev->recovery_cp;
4097 else
4098 j = 0;
57afd89f 4099 io_sectors = 0;
1da177e4
LT
4100 for (m = 0; m < SYNC_MARKS; m++) {
4101 mark[m] = jiffies;
57afd89f 4102 mark_cnt[m] = io_sectors;
1da177e4
LT
4103 }
4104 last_mark = 0;
4105 mddev->resync_mark = mark[last_mark];
4106 mddev->resync_mark_cnt = mark_cnt[last_mark];
4107
4108 /*
4109 * Tune reconstruction:
4110 */
4111 window = 32*(PAGE_SIZE/512);
4112 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
4113 window/2,(unsigned long long) max_sectors/2);
4114
4115 atomic_set(&mddev->recovery_active, 0);
4116 init_waitqueue_head(&mddev->recovery_wait);
4117 last_check = 0;
4118
4119 if (j>2) {
4120 printk(KERN_INFO
4121 "md: resuming recovery of %s from checkpoint.\n",
4122 mdname(mddev));
4123 mddev->curr_resync = j;
4124 }
4125
4126 while (j < max_sectors) {
57afd89f 4127 sector_t sectors;
1da177e4 4128
57afd89f
N
4129 skipped = 0;
4130 sectors = mddev->pers->sync_request(mddev, j, &skipped,
4131 currspeed < sysctl_speed_limit_min);
4132 if (sectors == 0) {
1da177e4
LT
4133 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4134 goto out;
4135 }
57afd89f
N
4136
4137 if (!skipped) { /* actual IO requested */
4138 io_sectors += sectors;
4139 atomic_add(sectors, &mddev->recovery_active);
4140 }
4141
1da177e4
LT
4142 j += sectors;
4143 if (j>1) mddev->curr_resync = j;
d7603b7e
N
4144 if (last_check == 0)
4145 /* this is the earliers that rebuilt will be
4146 * visible in /proc/mdstat
4147 */
4148 md_new_event(mddev);
57afd89f
N
4149
4150 if (last_check + window > io_sectors || j == max_sectors)
1da177e4
LT
4151 continue;
4152
57afd89f 4153 last_check = io_sectors;
1da177e4
LT
4154
4155 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
4156 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
4157 break;
4158
4159 repeat:
4160 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
4161 /* step marks */
4162 int next = (last_mark+1) % SYNC_MARKS;
4163
4164 mddev->resync_mark = mark[next];
4165 mddev->resync_mark_cnt = mark_cnt[next];
4166 mark[next] = jiffies;
57afd89f 4167 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
1da177e4
LT
4168 last_mark = next;
4169 }
4170
4171
787453c2 4172 if (kthread_should_stop()) {
1da177e4
LT
4173 /*
4174 * got a signal, exit.
4175 */
4176 printk(KERN_INFO
4177 "md: md_do_sync() got signal ... exiting\n");
1da177e4
LT
4178 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4179 goto out;
4180 }
4181
4182 /*
4183 * this loop exits only if either when we are slower than
4184 * the 'hard' speed limit, or the system was IO-idle for
4185 * a jiffy.
4186 * the system might be non-idle CPU-wise, but we only care
4187 * about not overloading the IO subsystem. (things like an
4188 * e2fsck being done on the RAID array should execute fast)
4189 */
4190 mddev->queue->unplug_fn(mddev->queue);
4191 cond_resched();
4192
57afd89f
N
4193 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
4194 /((jiffies-mddev->resync_mark)/HZ +1) +1;
1da177e4
LT
4195
4196 if (currspeed > sysctl_speed_limit_min) {
4197 if ((currspeed > sysctl_speed_limit_max) ||
4198 !is_mddev_idle(mddev)) {
c0e48521 4199 msleep(500);
1da177e4
LT
4200 goto repeat;
4201 }
4202 }
4203 }
4204 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
4205 /*
4206 * this also signals 'finished resyncing' to md_stop
4207 */
4208 out:
4209 mddev->queue->unplug_fn(mddev->queue);
4210
4211 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
4212
4213 /* tell personality that we are finished */
57afd89f 4214 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
1da177e4
LT
4215
4216 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4217 mddev->curr_resync > 2 &&
4218 mddev->curr_resync >= mddev->recovery_cp) {
4219 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4220 printk(KERN_INFO
4221 "md: checkpointing recovery of %s.\n",
4222 mdname(mddev));
4223 mddev->recovery_cp = mddev->curr_resync;
4224 } else
4225 mddev->recovery_cp = MaxSector;
4226 }
4227
1da177e4
LT
4228 skip:
4229 mddev->curr_resync = 0;
4230 wake_up(&resync_wait);
4231 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
4232 md_wakeup_thread(mddev->thread);
4233}
4234
4235
4236/*
4237 * This routine is regularly called by all per-raid-array threads to
4238 * deal with generic issues like resync and super-block update.
4239 * Raid personalities that don't have a thread (linear/raid0) do not
4240 * need this as they never do any recovery or update the superblock.
4241 *
4242 * It does not do any resync itself, but rather "forks" off other threads
4243 * to do that as needed.
4244 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
4245 * "->recovery" and create a thread at ->sync_thread.
4246 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
4247 * and wakeups up this thread which will reap the thread and finish up.
4248 * This thread also removes any faulty devices (with nr_pending == 0).
4249 *
4250 * The overall approach is:
4251 * 1/ if the superblock needs updating, update it.
4252 * 2/ If a recovery thread is running, don't do anything else.
4253 * 3/ If recovery has finished, clean up, possibly marking spares active.
4254 * 4/ If there are any faulty devices, remove them.
4255 * 5/ If array is degraded, try to add spares devices
4256 * 6/ If array has spares or is not in-sync, start a resync thread.
4257 */
4258void md_check_recovery(mddev_t *mddev)
4259{
4260 mdk_rdev_t *rdev;
4261 struct list_head *rtmp;
4262
4263
5f40402d
N
4264 if (mddev->bitmap)
4265 bitmap_daemon_work(mddev->bitmap);
1da177e4
LT
4266
4267 if (mddev->ro)
4268 return;
fca4d848
N
4269
4270 if (signal_pending(current)) {
4271 if (mddev->pers->sync_request) {
4272 printk(KERN_INFO "md: %s in immediate safe mode\n",
4273 mdname(mddev));
4274 mddev->safemode = 2;
4275 }
4276 flush_signals(current);
4277 }
4278
1da177e4
LT
4279 if ( ! (
4280 mddev->sb_dirty ||
4281 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
fca4d848
N
4282 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
4283 (mddev->safemode == 1) ||
4284 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
4285 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
1da177e4
LT
4286 ))
4287 return;
fca4d848 4288
1da177e4
LT
4289 if (mddev_trylock(mddev)==0) {
4290 int spares =0;
fca4d848 4291
a9701a30 4292 spin_lock_irq(&mddev->write_lock);
fca4d848
N
4293 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
4294 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
4295 mddev->in_sync = 1;
4296 mddev->sb_dirty = 1;
4297 }
4298 if (mddev->safemode == 1)
4299 mddev->safemode = 0;
a9701a30 4300 spin_unlock_irq(&mddev->write_lock);
fca4d848 4301
1da177e4
LT
4302 if (mddev->sb_dirty)
4303 md_update_sb(mddev);
06d91a5f 4304
06d91a5f 4305
1da177e4
LT
4306 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4307 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
4308 /* resync/recovery still happening */
4309 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4310 goto unlock;
4311 }
4312 if (mddev->sync_thread) {
4313 /* resync has finished, collect result */
4314 md_unregister_thread(mddev->sync_thread);
4315 mddev->sync_thread = NULL;
4316 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4317 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4318 /* success...*/
4319 /* activate any spares */
4320 mddev->pers->spare_active(mddev);
4321 }
4322 md_update_sb(mddev);
41158c7e
N
4323
4324 /* if array is no-longer degraded, then any saved_raid_disk
4325 * information must be scrapped
4326 */
4327 if (!mddev->degraded)
4328 ITERATE_RDEV(mddev,rdev,rtmp)
4329 rdev->saved_raid_disk = -1;
4330
1da177e4
LT
4331 mddev->recovery = 0;
4332 /* flag recovery needed just to double check */
4333 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
d7603b7e 4334 md_new_event(mddev);
1da177e4
LT
4335 goto unlock;
4336 }
24dd469d
N
4337 /* Clear some bits that don't mean anything, but
4338 * might be left set
4339 */
4340 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4341 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
4342 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
4343 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
1da177e4
LT
4344
4345 /* no recovery is running.
4346 * remove any failed drives, then
4347 * add spares if possible.
4348 * Spare are also removed and re-added, to allow
4349 * the personality to fail the re-add.
4350 */
4351 ITERATE_RDEV(mddev,rdev,rtmp)
4352 if (rdev->raid_disk >= 0 &&
b2d444d7 4353 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
1da177e4 4354 atomic_read(&rdev->nr_pending)==0) {
86e6ffdd
N
4355 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
4356 char nm[20];
4357 sprintf(nm,"rd%d", rdev->raid_disk);
4358 sysfs_remove_link(&mddev->kobj, nm);
1da177e4 4359 rdev->raid_disk = -1;
86e6ffdd 4360 }
1da177e4
LT
4361 }
4362
4363 if (mddev->degraded) {
4364 ITERATE_RDEV(mddev,rdev,rtmp)
4365 if (rdev->raid_disk < 0
b2d444d7 4366 && !test_bit(Faulty, &rdev->flags)) {
86e6ffdd
N
4367 if (mddev->pers->hot_add_disk(mddev,rdev)) {
4368 char nm[20];
4369 sprintf(nm, "rd%d", rdev->raid_disk);
4370 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
1da177e4 4371 spares++;
d7603b7e 4372 md_new_event(mddev);
86e6ffdd 4373 } else
1da177e4
LT
4374 break;
4375 }
4376 }
4377
24dd469d
N
4378 if (spares) {
4379 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4380 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4381 } else if (mddev->recovery_cp < MaxSector) {
4382 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4383 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4384 /* nothing to be done ... */
1da177e4 4385 goto unlock;
24dd469d 4386
1da177e4
LT
4387 if (mddev->pers->sync_request) {
4388 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
a654b9d8
N
4389 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
4390 /* We are adding a device or devices to an array
4391 * which has the bitmap stored on all devices.
4392 * So make sure all bitmap pages get written
4393 */
4394 bitmap_write_all(mddev->bitmap);
4395 }
1da177e4
LT
4396 mddev->sync_thread = md_register_thread(md_do_sync,
4397 mddev,
4398 "%s_resync");
4399 if (!mddev->sync_thread) {
4400 printk(KERN_ERR "%s: could not start resync"
4401 " thread...\n",
4402 mdname(mddev));
4403 /* leave the spares where they are, it shouldn't hurt */
4404 mddev->recovery = 0;
d7603b7e 4405 } else
1da177e4 4406 md_wakeup_thread(mddev->sync_thread);
d7603b7e 4407 md_new_event(mddev);
1da177e4
LT
4408 }
4409 unlock:
4410 mddev_unlock(mddev);
4411 }
4412}
4413
75c96f85
AB
4414static int md_notify_reboot(struct notifier_block *this,
4415 unsigned long code, void *x)
1da177e4
LT
4416{
4417 struct list_head *tmp;
4418 mddev_t *mddev;
4419
4420 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
4421
4422 printk(KERN_INFO "md: stopping all md devices.\n");
4423
4424 ITERATE_MDDEV(mddev,tmp)
4425 if (mddev_trylock(mddev)==0)
4426 do_md_stop (mddev, 1);
4427 /*
4428 * certain more exotic SCSI devices are known to be
4429 * volatile wrt too early system reboots. While the
4430 * right place to handle this issue is the given
4431 * driver, we do want to have a safe RAID driver ...
4432 */
4433 mdelay(1000*1);
4434 }
4435 return NOTIFY_DONE;
4436}
4437
75c96f85 4438static struct notifier_block md_notifier = {
1da177e4
LT
4439 .notifier_call = md_notify_reboot,
4440 .next = NULL,
4441 .priority = INT_MAX, /* before any real devices */
4442};
4443
4444static void md_geninit(void)
4445{
4446 struct proc_dir_entry *p;
4447
4448 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
4449
4450 p = create_proc_entry("mdstat", S_IRUGO, NULL);
4451 if (p)
4452 p->proc_fops = &md_seq_fops;
4453}
4454
75c96f85 4455static int __init md_init(void)
1da177e4
LT
4456{
4457 int minor;
4458
4459 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
4460 " MD_SB_DISKS=%d\n",
4461 MD_MAJOR_VERSION, MD_MINOR_VERSION,
4462 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
bd926c63 4463 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
32a7627c 4464 BITMAP_MINOR);
1da177e4
LT
4465
4466 if (register_blkdev(MAJOR_NR, "md"))
4467 return -1;
4468 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
4469 unregister_blkdev(MAJOR_NR, "md");
4470 return -1;
4471 }
4472 devfs_mk_dir("md");
4473 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
4474 md_probe, NULL, NULL);
4475 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
4476 md_probe, NULL, NULL);
4477
4478 for (minor=0; minor < MAX_MD_DEVS; ++minor)
4479 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
4480 S_IFBLK|S_IRUSR|S_IWUSR,
4481 "md/%d", minor);
4482
4483 for (minor=0; minor < MAX_MD_DEVS; ++minor)
4484 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
4485 S_IFBLK|S_IRUSR|S_IWUSR,
4486 "md/mdp%d", minor);
4487
4488
4489 register_reboot_notifier(&md_notifier);
4490 raid_table_header = register_sysctl_table(raid_root_table, 1);
4491
4492 md_geninit();
4493 return (0);
4494}
4495
4496
4497#ifndef MODULE
4498
4499/*
4500 * Searches all registered partitions for autorun RAID arrays
4501 * at boot time.
4502 */
4503static dev_t detected_devices[128];
4504static int dev_cnt;
4505
4506void md_autodetect_dev(dev_t dev)
4507{
4508 if (dev_cnt >= 0 && dev_cnt < 127)
4509 detected_devices[dev_cnt++] = dev;
4510}
4511
4512
4513static void autostart_arrays(int part)
4514{
4515 mdk_rdev_t *rdev;
4516 int i;
4517
4518 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
4519
4520 for (i = 0; i < dev_cnt; i++) {
4521 dev_t dev = detected_devices[i];
4522
4523 rdev = md_import_device(dev,0, 0);
4524 if (IS_ERR(rdev))
4525 continue;
4526
b2d444d7 4527 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
4528 MD_BUG();
4529 continue;
4530 }
4531 list_add(&rdev->same_set, &pending_raid_disks);
4532 }
4533 dev_cnt = 0;
4534
4535 autorun_devices(part);
4536}
4537
4538#endif
4539
4540static __exit void md_exit(void)
4541{
4542 mddev_t *mddev;
4543 struct list_head *tmp;
4544 int i;
4545 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
4546 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
4547 for (i=0; i < MAX_MD_DEVS; i++)
4548 devfs_remove("md/%d", i);
4549 for (i=0; i < MAX_MD_DEVS; i++)
4550 devfs_remove("md/d%d", i);
4551
4552 devfs_remove("md");
4553
4554 unregister_blkdev(MAJOR_NR,"md");
4555 unregister_blkdev(mdp_major, "mdp");
4556 unregister_reboot_notifier(&md_notifier);
4557 unregister_sysctl_table(raid_table_header);
4558 remove_proc_entry("mdstat", NULL);
4559 ITERATE_MDDEV(mddev,tmp) {
4560 struct gendisk *disk = mddev->gendisk;
4561 if (!disk)
4562 continue;
4563 export_array(mddev);
4564 del_gendisk(disk);
4565 put_disk(disk);
4566 mddev->gendisk = NULL;
4567 mddev_put(mddev);
4568 }
4569}
4570
4571module_init(md_init)
4572module_exit(md_exit)
4573
f91de92e
N
4574static int get_ro(char *buffer, struct kernel_param *kp)
4575{
4576 return sprintf(buffer, "%d", start_readonly);
4577}
4578static int set_ro(const char *val, struct kernel_param *kp)
4579{
4580 char *e;
4581 int num = simple_strtoul(val, &e, 10);
4582 if (*val && (*e == '\0' || *e == '\n')) {
4583 start_readonly = num;
4584 return 0;;
4585 }
4586 return -EINVAL;
4587}
4588
4589module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
6ff8d8ec
N
4590module_param(start_dirty_degraded, int, 0644);
4591
f91de92e 4592
1da177e4
LT
4593EXPORT_SYMBOL(register_md_personality);
4594EXPORT_SYMBOL(unregister_md_personality);
4595EXPORT_SYMBOL(md_error);
4596EXPORT_SYMBOL(md_done_sync);
4597EXPORT_SYMBOL(md_write_start);
4598EXPORT_SYMBOL(md_write_end);
1da177e4
LT
4599EXPORT_SYMBOL(md_register_thread);
4600EXPORT_SYMBOL(md_unregister_thread);
4601EXPORT_SYMBOL(md_wakeup_thread);
4602EXPORT_SYMBOL(md_print_devices);
4603EXPORT_SYMBOL(md_check_recovery);
4604MODULE_LICENSE("GPL");
aa1595e9 4605MODULE_ALIAS("md");
72008652 4606MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);