2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
48 #include <linux/init.h>
50 #include <linux/file.h>
53 #include <linux/kmod.h>
56 #include <asm/unaligned.h>
58 #define MAJOR_NR MD_MAJOR
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
69 static void autostart_arrays (int part
);
72 static LIST_HEAD(pers_list
);
73 static DEFINE_SPINLOCK(pers_lock
);
75 static void md_print_devices(void);
77 static DECLARE_WAIT_QUEUE_HEAD(resync_wait
);
79 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
82 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
83 * is 1000 KB/sec, so the extra system load does not show up that much.
84 * Increase it if you want to have more _guaranteed_ speed. Note that
85 * the RAID driver will use the maximum available bandwidth if the IO
86 * subsystem is idle. There is also an 'absolute maximum' reconstruction
87 * speed limit - in case reconstruction slows down your system despite
90 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
91 * or /sys/block/mdX/md/sync_speed_{min,max}
94 static int sysctl_speed_limit_min
= 1000;
95 static int sysctl_speed_limit_max
= 200000;
96 static inline int speed_min(mddev_t
*mddev
)
98 return mddev
->sync_speed_min
?
99 mddev
->sync_speed_min
: sysctl_speed_limit_min
;
102 static inline int speed_max(mddev_t
*mddev
)
104 return mddev
->sync_speed_max
?
105 mddev
->sync_speed_max
: sysctl_speed_limit_max
;
108 static struct ctl_table_header
*raid_table_header
;
110 static ctl_table raid_table
[] = {
112 .ctl_name
= DEV_RAID_SPEED_LIMIT_MIN
,
113 .procname
= "speed_limit_min",
114 .data
= &sysctl_speed_limit_min
,
115 .maxlen
= sizeof(int),
116 .mode
= S_IRUGO
|S_IWUSR
,
117 .proc_handler
= &proc_dointvec
,
120 .ctl_name
= DEV_RAID_SPEED_LIMIT_MAX
,
121 .procname
= "speed_limit_max",
122 .data
= &sysctl_speed_limit_max
,
123 .maxlen
= sizeof(int),
124 .mode
= S_IRUGO
|S_IWUSR
,
125 .proc_handler
= &proc_dointvec
,
130 static ctl_table raid_dir_table
[] = {
132 .ctl_name
= DEV_RAID
,
135 .mode
= S_IRUGO
|S_IXUGO
,
141 static ctl_table raid_root_table
[] = {
147 .child
= raid_dir_table
,
152 static struct block_device_operations md_fops
;
154 static int start_readonly
;
157 * We have a system wide 'event count' that is incremented
158 * on any 'interesting' event, and readers of /proc/mdstat
159 * can use 'poll' or 'select' to find out when the event
163 * start array, stop array, error, add device, remove device,
164 * start build, activate spare
166 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters
);
167 static atomic_t md_event_count
;
168 void md_new_event(mddev_t
*mddev
)
170 atomic_inc(&md_event_count
);
171 wake_up(&md_event_waiters
);
172 sysfs_notify(&mddev
->kobj
, NULL
, "sync_action");
174 EXPORT_SYMBOL_GPL(md_new_event
);
176 /* Alternate version that can be called from interrupts
177 * when calling sysfs_notify isn't needed.
179 static void md_new_event_inintr(mddev_t
*mddev
)
181 atomic_inc(&md_event_count
);
182 wake_up(&md_event_waiters
);
186 * Enables to iterate over all existing md arrays
187 * all_mddevs_lock protects this list.
189 static LIST_HEAD(all_mddevs
);
190 static DEFINE_SPINLOCK(all_mddevs_lock
);
194 * iterates through all used mddevs in the system.
195 * We take care to grab the all_mddevs_lock whenever navigating
196 * the list, and to always hold a refcount when unlocked.
197 * Any code which breaks out of this loop while own
198 * a reference to the current mddev and must mddev_put it.
200 #define for_each_mddev(mddev,tmp) \
202 for (({ spin_lock(&all_mddevs_lock); \
203 tmp = all_mddevs.next; \
205 ({ if (tmp != &all_mddevs) \
206 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
207 spin_unlock(&all_mddevs_lock); \
208 if (mddev) mddev_put(mddev); \
209 mddev = list_entry(tmp, mddev_t, all_mddevs); \
210 tmp != &all_mddevs;}); \
211 ({ spin_lock(&all_mddevs_lock); \
216 static int md_fail_request (struct request_queue
*q
, struct bio
*bio
)
222 static inline mddev_t
*mddev_get(mddev_t
*mddev
)
224 atomic_inc(&mddev
->active
);
228 static void mddev_put(mddev_t
*mddev
)
230 if (!atomic_dec_and_lock(&mddev
->active
, &all_mddevs_lock
))
232 if (!mddev
->raid_disks
&& list_empty(&mddev
->disks
)) {
233 list_del(&mddev
->all_mddevs
);
234 spin_unlock(&all_mddevs_lock
);
235 blk_cleanup_queue(mddev
->queue
);
236 kobject_put(&mddev
->kobj
);
238 spin_unlock(&all_mddevs_lock
);
241 static mddev_t
* mddev_find(dev_t unit
)
243 mddev_t
*mddev
, *new = NULL
;
246 spin_lock(&all_mddevs_lock
);
247 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
248 if (mddev
->unit
== unit
) {
250 spin_unlock(&all_mddevs_lock
);
256 list_add(&new->all_mddevs
, &all_mddevs
);
257 spin_unlock(&all_mddevs_lock
);
260 spin_unlock(&all_mddevs_lock
);
262 new = kzalloc(sizeof(*new), GFP_KERNEL
);
267 if (MAJOR(unit
) == MD_MAJOR
)
268 new->md_minor
= MINOR(unit
);
270 new->md_minor
= MINOR(unit
) >> MdpMinorShift
;
272 mutex_init(&new->reconfig_mutex
);
273 INIT_LIST_HEAD(&new->disks
);
274 INIT_LIST_HEAD(&new->all_mddevs
);
275 init_timer(&new->safemode_timer
);
276 atomic_set(&new->active
, 1);
277 spin_lock_init(&new->write_lock
);
278 init_waitqueue_head(&new->sb_wait
);
279 init_waitqueue_head(&new->recovery_wait
);
280 new->reshape_position
= MaxSector
;
282 new->resync_max
= MaxSector
;
283 new->level
= LEVEL_NONE
;
285 new->queue
= blk_alloc_queue(GFP_KERNEL
);
290 /* Can be unlocked because the queue is new: no concurrency */
291 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, new->queue
);
293 blk_queue_make_request(new->queue
, md_fail_request
);
298 static inline int mddev_lock(mddev_t
* mddev
)
300 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
303 static inline int mddev_trylock(mddev_t
* mddev
)
305 return mutex_trylock(&mddev
->reconfig_mutex
);
308 static inline void mddev_unlock(mddev_t
* mddev
)
310 mutex_unlock(&mddev
->reconfig_mutex
);
312 md_wakeup_thread(mddev
->thread
);
315 static mdk_rdev_t
* find_rdev_nr(mddev_t
*mddev
, int nr
)
318 struct list_head
*tmp
;
320 rdev_for_each(rdev
, tmp
, mddev
) {
321 if (rdev
->desc_nr
== nr
)
327 static mdk_rdev_t
* find_rdev(mddev_t
* mddev
, dev_t dev
)
329 struct list_head
*tmp
;
332 rdev_for_each(rdev
, tmp
, mddev
) {
333 if (rdev
->bdev
->bd_dev
== dev
)
339 static struct mdk_personality
*find_pers(int level
, char *clevel
)
341 struct mdk_personality
*pers
;
342 list_for_each_entry(pers
, &pers_list
, list
) {
343 if (level
!= LEVEL_NONE
&& pers
->level
== level
)
345 if (strcmp(pers
->name
, clevel
)==0)
351 static inline sector_t
calc_dev_sboffset(struct block_device
*bdev
)
353 sector_t size
= bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
354 return MD_NEW_SIZE_BLOCKS(size
);
357 static sector_t
calc_dev_size(mdk_rdev_t
*rdev
, unsigned chunk_size
)
361 size
= rdev
->sb_offset
;
364 size
&= ~((sector_t
)chunk_size
/1024 - 1);
368 static int alloc_disk_sb(mdk_rdev_t
* rdev
)
373 rdev
->sb_page
= alloc_page(GFP_KERNEL
);
374 if (!rdev
->sb_page
) {
375 printk(KERN_ALERT
"md: out of memory.\n");
382 static void free_disk_sb(mdk_rdev_t
* rdev
)
385 put_page(rdev
->sb_page
);
387 rdev
->sb_page
= NULL
;
394 static void super_written(struct bio
*bio
, int error
)
396 mdk_rdev_t
*rdev
= bio
->bi_private
;
397 mddev_t
*mddev
= rdev
->mddev
;
399 if (error
|| !test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
400 printk("md: super_written gets error=%d, uptodate=%d\n",
401 error
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
402 WARN_ON(test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
403 md_error(mddev
, rdev
);
406 if (atomic_dec_and_test(&mddev
->pending_writes
))
407 wake_up(&mddev
->sb_wait
);
411 static void super_written_barrier(struct bio
*bio
, int error
)
413 struct bio
*bio2
= bio
->bi_private
;
414 mdk_rdev_t
*rdev
= bio2
->bi_private
;
415 mddev_t
*mddev
= rdev
->mddev
;
417 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) &&
418 error
== -EOPNOTSUPP
) {
420 /* barriers don't appear to be supported :-( */
421 set_bit(BarriersNotsupp
, &rdev
->flags
);
422 mddev
->barriers_work
= 0;
423 spin_lock_irqsave(&mddev
->write_lock
, flags
);
424 bio2
->bi_next
= mddev
->biolist
;
425 mddev
->biolist
= bio2
;
426 spin_unlock_irqrestore(&mddev
->write_lock
, flags
);
427 wake_up(&mddev
->sb_wait
);
431 bio
->bi_private
= rdev
;
432 super_written(bio
, error
);
436 void md_super_write(mddev_t
*mddev
, mdk_rdev_t
*rdev
,
437 sector_t sector
, int size
, struct page
*page
)
439 /* write first size bytes of page to sector of rdev
440 * Increment mddev->pending_writes before returning
441 * and decrement it on completion, waking up sb_wait
442 * if zero is reached.
443 * If an error occurred, call md_error
445 * As we might need to resubmit the request if BIO_RW_BARRIER
446 * causes ENOTSUPP, we allocate a spare bio...
448 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
449 int rw
= (1<<BIO_RW
) | (1<<BIO_RW_SYNC
);
451 bio
->bi_bdev
= rdev
->bdev
;
452 bio
->bi_sector
= sector
;
453 bio_add_page(bio
, page
, size
, 0);
454 bio
->bi_private
= rdev
;
455 bio
->bi_end_io
= super_written
;
458 atomic_inc(&mddev
->pending_writes
);
459 if (!test_bit(BarriersNotsupp
, &rdev
->flags
)) {
461 rw
|= (1<<BIO_RW_BARRIER
);
462 rbio
= bio_clone(bio
, GFP_NOIO
);
463 rbio
->bi_private
= bio
;
464 rbio
->bi_end_io
= super_written_barrier
;
465 submit_bio(rw
, rbio
);
470 void md_super_wait(mddev_t
*mddev
)
472 /* wait for all superblock writes that were scheduled to complete.
473 * if any had to be retried (due to BARRIER problems), retry them
477 prepare_to_wait(&mddev
->sb_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
478 if (atomic_read(&mddev
->pending_writes
)==0)
480 while (mddev
->biolist
) {
482 spin_lock_irq(&mddev
->write_lock
);
483 bio
= mddev
->biolist
;
484 mddev
->biolist
= bio
->bi_next
;
486 spin_unlock_irq(&mddev
->write_lock
);
487 submit_bio(bio
->bi_rw
, bio
);
491 finish_wait(&mddev
->sb_wait
, &wq
);
494 static void bi_complete(struct bio
*bio
, int error
)
496 complete((struct completion
*)bio
->bi_private
);
499 int sync_page_io(struct block_device
*bdev
, sector_t sector
, int size
,
500 struct page
*page
, int rw
)
502 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
503 struct completion event
;
506 rw
|= (1 << BIO_RW_SYNC
);
509 bio
->bi_sector
= sector
;
510 bio_add_page(bio
, page
, size
, 0);
511 init_completion(&event
);
512 bio
->bi_private
= &event
;
513 bio
->bi_end_io
= bi_complete
;
515 wait_for_completion(&event
);
517 ret
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
521 EXPORT_SYMBOL_GPL(sync_page_io
);
523 static int read_disk_sb(mdk_rdev_t
* rdev
, int size
)
525 char b
[BDEVNAME_SIZE
];
526 if (!rdev
->sb_page
) {
534 if (!sync_page_io(rdev
->bdev
, rdev
->sb_offset
<<1, size
, rdev
->sb_page
, READ
))
540 printk(KERN_WARNING
"md: disabled device %s, could not read superblock.\n",
541 bdevname(rdev
->bdev
,b
));
545 static int uuid_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
547 if ( (sb1
->set_uuid0
== sb2
->set_uuid0
) &&
548 (sb1
->set_uuid1
== sb2
->set_uuid1
) &&
549 (sb1
->set_uuid2
== sb2
->set_uuid2
) &&
550 (sb1
->set_uuid3
== sb2
->set_uuid3
))
558 static int sb_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
561 mdp_super_t
*tmp1
, *tmp2
;
563 tmp1
= kmalloc(sizeof(*tmp1
),GFP_KERNEL
);
564 tmp2
= kmalloc(sizeof(*tmp2
),GFP_KERNEL
);
566 if (!tmp1
|| !tmp2
) {
568 printk(KERN_INFO
"md.c: sb1 is not equal to sb2!\n");
576 * nr_disks is not constant
581 if (memcmp(tmp1
, tmp2
, MD_SB_GENERIC_CONSTANT_WORDS
* 4))
593 static u32
md_csum_fold(u32 csum
)
595 csum
= (csum
& 0xffff) + (csum
>> 16);
596 return (csum
& 0xffff) + (csum
>> 16);
599 static unsigned int calc_sb_csum(mdp_super_t
* sb
)
602 u32
*sb32
= (u32
*)sb
;
604 unsigned int disk_csum
, csum
;
606 disk_csum
= sb
->sb_csum
;
609 for (i
= 0; i
< MD_SB_BYTES
/4 ; i
++)
611 csum
= (newcsum
& 0xffffffff) + (newcsum
>>32);
615 /* This used to use csum_partial, which was wrong for several
616 * reasons including that different results are returned on
617 * different architectures. It isn't critical that we get exactly
618 * the same return value as before (we always csum_fold before
619 * testing, and that removes any differences). However as we
620 * know that csum_partial always returned a 16bit value on
621 * alphas, do a fold to maximise conformity to previous behaviour.
623 sb
->sb_csum
= md_csum_fold(disk_csum
);
625 sb
->sb_csum
= disk_csum
;
632 * Handle superblock details.
633 * We want to be able to handle multiple superblock formats
634 * so we have a common interface to them all, and an array of
635 * different handlers.
636 * We rely on user-space to write the initial superblock, and support
637 * reading and updating of superblocks.
638 * Interface methods are:
639 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
640 * loads and validates a superblock on dev.
641 * if refdev != NULL, compare superblocks on both devices
643 * 0 - dev has a superblock that is compatible with refdev
644 * 1 - dev has a superblock that is compatible and newer than refdev
645 * so dev should be used as the refdev in future
646 * -EINVAL superblock incompatible or invalid
647 * -othererror e.g. -EIO
649 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
650 * Verify that dev is acceptable into mddev.
651 * The first time, mddev->raid_disks will be 0, and data from
652 * dev should be merged in. Subsequent calls check that dev
653 * is new enough. Return 0 or -EINVAL
655 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
656 * Update the superblock for rdev with data in mddev
657 * This does not write to disc.
663 struct module
*owner
;
664 int (*load_super
)(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
);
665 int (*validate_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
666 void (*sync_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
670 * load_super for 0.90.0
672 static int super_90_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
674 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
680 * Calculate the position of the superblock,
681 * it's at the end of the disk.
683 * It also happens to be a multiple of 4Kb.
685 sb_offset
= calc_dev_sboffset(rdev
->bdev
);
686 rdev
->sb_offset
= sb_offset
;
688 ret
= read_disk_sb(rdev
, MD_SB_BYTES
);
693 bdevname(rdev
->bdev
, b
);
694 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
696 if (sb
->md_magic
!= MD_SB_MAGIC
) {
697 printk(KERN_ERR
"md: invalid raid superblock magic on %s\n",
702 if (sb
->major_version
!= 0 ||
703 sb
->minor_version
< 90 ||
704 sb
->minor_version
> 91) {
705 printk(KERN_WARNING
"Bad version number %d.%d on %s\n",
706 sb
->major_version
, sb
->minor_version
,
711 if (sb
->raid_disks
<= 0)
714 if (md_csum_fold(calc_sb_csum(sb
)) != md_csum_fold(sb
->sb_csum
)) {
715 printk(KERN_WARNING
"md: invalid superblock checksum on %s\n",
720 rdev
->preferred_minor
= sb
->md_minor
;
721 rdev
->data_offset
= 0;
722 rdev
->sb_size
= MD_SB_BYTES
;
724 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
725 if (sb
->level
!= 1 && sb
->level
!= 4
726 && sb
->level
!= 5 && sb
->level
!= 6
727 && sb
->level
!= 10) {
728 /* FIXME use a better test */
730 "md: bitmaps not supported for this level.\n");
735 if (sb
->level
== LEVEL_MULTIPATH
)
738 rdev
->desc_nr
= sb
->this_disk
.number
;
744 mdp_super_t
*refsb
= (mdp_super_t
*)page_address(refdev
->sb_page
);
745 if (!uuid_equal(refsb
, sb
)) {
746 printk(KERN_WARNING
"md: %s has different UUID to %s\n",
747 b
, bdevname(refdev
->bdev
,b2
));
750 if (!sb_equal(refsb
, sb
)) {
751 printk(KERN_WARNING
"md: %s has same UUID"
752 " but different superblock to %s\n",
753 b
, bdevname(refdev
->bdev
, b2
));
757 ev2
= md_event(refsb
);
763 rdev
->size
= calc_dev_size(rdev
, sb
->chunk_size
);
765 if (rdev
->size
< sb
->size
&& sb
->level
> 1)
766 /* "this cannot possibly happen" ... */
774 * validate_super for 0.90.0
776 static int super_90_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
779 mdp_super_t
*sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
780 __u64 ev1
= md_event(sb
);
782 rdev
->raid_disk
= -1;
783 clear_bit(Faulty
, &rdev
->flags
);
784 clear_bit(In_sync
, &rdev
->flags
);
785 clear_bit(WriteMostly
, &rdev
->flags
);
786 clear_bit(BarriersNotsupp
, &rdev
->flags
);
788 if (mddev
->raid_disks
== 0) {
789 mddev
->major_version
= 0;
790 mddev
->minor_version
= sb
->minor_version
;
791 mddev
->patch_version
= sb
->patch_version
;
793 mddev
->chunk_size
= sb
->chunk_size
;
794 mddev
->ctime
= sb
->ctime
;
795 mddev
->utime
= sb
->utime
;
796 mddev
->level
= sb
->level
;
797 mddev
->clevel
[0] = 0;
798 mddev
->layout
= sb
->layout
;
799 mddev
->raid_disks
= sb
->raid_disks
;
800 mddev
->size
= sb
->size
;
802 mddev
->bitmap_offset
= 0;
803 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
805 if (mddev
->minor_version
>= 91) {
806 mddev
->reshape_position
= sb
->reshape_position
;
807 mddev
->delta_disks
= sb
->delta_disks
;
808 mddev
->new_level
= sb
->new_level
;
809 mddev
->new_layout
= sb
->new_layout
;
810 mddev
->new_chunk
= sb
->new_chunk
;
812 mddev
->reshape_position
= MaxSector
;
813 mddev
->delta_disks
= 0;
814 mddev
->new_level
= mddev
->level
;
815 mddev
->new_layout
= mddev
->layout
;
816 mddev
->new_chunk
= mddev
->chunk_size
;
819 if (sb
->state
& (1<<MD_SB_CLEAN
))
820 mddev
->recovery_cp
= MaxSector
;
822 if (sb
->events_hi
== sb
->cp_events_hi
&&
823 sb
->events_lo
== sb
->cp_events_lo
) {
824 mddev
->recovery_cp
= sb
->recovery_cp
;
826 mddev
->recovery_cp
= 0;
829 memcpy(mddev
->uuid
+0, &sb
->set_uuid0
, 4);
830 memcpy(mddev
->uuid
+4, &sb
->set_uuid1
, 4);
831 memcpy(mddev
->uuid
+8, &sb
->set_uuid2
, 4);
832 memcpy(mddev
->uuid
+12,&sb
->set_uuid3
, 4);
834 mddev
->max_disks
= MD_SB_DISKS
;
836 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
) &&
837 mddev
->bitmap_file
== NULL
)
838 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
840 } else if (mddev
->pers
== NULL
) {
841 /* Insist on good event counter while assembling */
843 if (ev1
< mddev
->events
)
845 } else if (mddev
->bitmap
) {
846 /* if adding to array with a bitmap, then we can accept an
847 * older device ... but not too old.
849 if (ev1
< mddev
->bitmap
->events_cleared
)
852 if (ev1
< mddev
->events
)
853 /* just a hot-add of a new device, leave raid_disk at -1 */
857 if (mddev
->level
!= LEVEL_MULTIPATH
) {
858 desc
= sb
->disks
+ rdev
->desc_nr
;
860 if (desc
->state
& (1<<MD_DISK_FAULTY
))
861 set_bit(Faulty
, &rdev
->flags
);
862 else if (desc
->state
& (1<<MD_DISK_SYNC
) /* &&
863 desc->raid_disk < mddev->raid_disks */) {
864 set_bit(In_sync
, &rdev
->flags
);
865 rdev
->raid_disk
= desc
->raid_disk
;
867 if (desc
->state
& (1<<MD_DISK_WRITEMOSTLY
))
868 set_bit(WriteMostly
, &rdev
->flags
);
869 } else /* MULTIPATH are always insync */
870 set_bit(In_sync
, &rdev
->flags
);
875 * sync_super for 0.90.0
877 static void super_90_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
880 struct list_head
*tmp
;
882 int next_spare
= mddev
->raid_disks
;
885 /* make rdev->sb match mddev data..
888 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
889 * 3/ any empty disks < next_spare become removed
891 * disks[0] gets initialised to REMOVED because
892 * we cannot be sure from other fields if it has
893 * been initialised or not.
896 int active
=0, working
=0,failed
=0,spare
=0,nr_disks
=0;
898 rdev
->sb_size
= MD_SB_BYTES
;
900 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
902 memset(sb
, 0, sizeof(*sb
));
904 sb
->md_magic
= MD_SB_MAGIC
;
905 sb
->major_version
= mddev
->major_version
;
906 sb
->patch_version
= mddev
->patch_version
;
907 sb
->gvalid_words
= 0; /* ignored */
908 memcpy(&sb
->set_uuid0
, mddev
->uuid
+0, 4);
909 memcpy(&sb
->set_uuid1
, mddev
->uuid
+4, 4);
910 memcpy(&sb
->set_uuid2
, mddev
->uuid
+8, 4);
911 memcpy(&sb
->set_uuid3
, mddev
->uuid
+12,4);
913 sb
->ctime
= mddev
->ctime
;
914 sb
->level
= mddev
->level
;
915 sb
->size
= mddev
->size
;
916 sb
->raid_disks
= mddev
->raid_disks
;
917 sb
->md_minor
= mddev
->md_minor
;
918 sb
->not_persistent
= 0;
919 sb
->utime
= mddev
->utime
;
921 sb
->events_hi
= (mddev
->events
>>32);
922 sb
->events_lo
= (u32
)mddev
->events
;
924 if (mddev
->reshape_position
== MaxSector
)
925 sb
->minor_version
= 90;
927 sb
->minor_version
= 91;
928 sb
->reshape_position
= mddev
->reshape_position
;
929 sb
->new_level
= mddev
->new_level
;
930 sb
->delta_disks
= mddev
->delta_disks
;
931 sb
->new_layout
= mddev
->new_layout
;
932 sb
->new_chunk
= mddev
->new_chunk
;
934 mddev
->minor_version
= sb
->minor_version
;
937 sb
->recovery_cp
= mddev
->recovery_cp
;
938 sb
->cp_events_hi
= (mddev
->events
>>32);
939 sb
->cp_events_lo
= (u32
)mddev
->events
;
940 if (mddev
->recovery_cp
== MaxSector
)
941 sb
->state
= (1<< MD_SB_CLEAN
);
945 sb
->layout
= mddev
->layout
;
946 sb
->chunk_size
= mddev
->chunk_size
;
948 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
)
949 sb
->state
|= (1<<MD_SB_BITMAP_PRESENT
);
951 sb
->disks
[0].state
= (1<<MD_DISK_REMOVED
);
952 rdev_for_each(rdev2
, tmp
, mddev
) {
955 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
956 && !test_bit(Faulty
, &rdev2
->flags
))
957 desc_nr
= rdev2
->raid_disk
;
959 desc_nr
= next_spare
++;
960 rdev2
->desc_nr
= desc_nr
;
961 d
= &sb
->disks
[rdev2
->desc_nr
];
963 d
->number
= rdev2
->desc_nr
;
964 d
->major
= MAJOR(rdev2
->bdev
->bd_dev
);
965 d
->minor
= MINOR(rdev2
->bdev
->bd_dev
);
966 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
967 && !test_bit(Faulty
, &rdev2
->flags
))
968 d
->raid_disk
= rdev2
->raid_disk
;
970 d
->raid_disk
= rdev2
->desc_nr
; /* compatibility */
971 if (test_bit(Faulty
, &rdev2
->flags
))
972 d
->state
= (1<<MD_DISK_FAULTY
);
973 else if (test_bit(In_sync
, &rdev2
->flags
)) {
974 d
->state
= (1<<MD_DISK_ACTIVE
);
975 d
->state
|= (1<<MD_DISK_SYNC
);
983 if (test_bit(WriteMostly
, &rdev2
->flags
))
984 d
->state
|= (1<<MD_DISK_WRITEMOSTLY
);
986 /* now set the "removed" and "faulty" bits on any missing devices */
987 for (i
=0 ; i
< mddev
->raid_disks
; i
++) {
988 mdp_disk_t
*d
= &sb
->disks
[i
];
989 if (d
->state
== 0 && d
->number
== 0) {
992 d
->state
= (1<<MD_DISK_REMOVED
);
993 d
->state
|= (1<<MD_DISK_FAULTY
);
997 sb
->nr_disks
= nr_disks
;
998 sb
->active_disks
= active
;
999 sb
->working_disks
= working
;
1000 sb
->failed_disks
= failed
;
1001 sb
->spare_disks
= spare
;
1003 sb
->this_disk
= sb
->disks
[rdev
->desc_nr
];
1004 sb
->sb_csum
= calc_sb_csum(sb
);
1008 * version 1 superblock
1011 static __le32
calc_sb_1_csum(struct mdp_superblock_1
* sb
)
1015 unsigned long long newcsum
;
1016 int size
= 256 + le32_to_cpu(sb
->max_dev
)*2;
1017 __le32
*isuper
= (__le32
*)sb
;
1020 disk_csum
= sb
->sb_csum
;
1023 for (i
=0; size
>=4; size
-= 4 )
1024 newcsum
+= le32_to_cpu(*isuper
++);
1027 newcsum
+= le16_to_cpu(*(__le16
*) isuper
);
1029 csum
= (newcsum
& 0xffffffff) + (newcsum
>> 32);
1030 sb
->sb_csum
= disk_csum
;
1031 return cpu_to_le32(csum
);
1034 static int super_1_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
1036 struct mdp_superblock_1
*sb
;
1039 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
1043 * Calculate the position of the superblock.
1044 * It is always aligned to a 4K boundary and
1045 * depeding on minor_version, it can be:
1046 * 0: At least 8K, but less than 12K, from end of device
1047 * 1: At start of device
1048 * 2: 4K from start of device.
1050 switch(minor_version
) {
1052 sb_offset
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1054 sb_offset
&= ~(sector_t
)(4*2-1);
1055 /* convert from sectors to K */
1067 rdev
->sb_offset
= sb_offset
;
1069 /* superblock is rarely larger than 1K, but it can be larger,
1070 * and it is safe to read 4k, so we do that
1072 ret
= read_disk_sb(rdev
, 4096);
1073 if (ret
) return ret
;
1076 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1078 if (sb
->magic
!= cpu_to_le32(MD_SB_MAGIC
) ||
1079 sb
->major_version
!= cpu_to_le32(1) ||
1080 le32_to_cpu(sb
->max_dev
) > (4096-256)/2 ||
1081 le64_to_cpu(sb
->super_offset
) != (rdev
->sb_offset
<<1) ||
1082 (le32_to_cpu(sb
->feature_map
) & ~MD_FEATURE_ALL
) != 0)
1085 if (calc_sb_1_csum(sb
) != sb
->sb_csum
) {
1086 printk("md: invalid superblock checksum on %s\n",
1087 bdevname(rdev
->bdev
,b
));
1090 if (le64_to_cpu(sb
->data_size
) < 10) {
1091 printk("md: data_size too small on %s\n",
1092 bdevname(rdev
->bdev
,b
));
1095 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
)) {
1096 if (sb
->level
!= cpu_to_le32(1) &&
1097 sb
->level
!= cpu_to_le32(4) &&
1098 sb
->level
!= cpu_to_le32(5) &&
1099 sb
->level
!= cpu_to_le32(6) &&
1100 sb
->level
!= cpu_to_le32(10)) {
1102 "md: bitmaps not supported for this level.\n");
1107 rdev
->preferred_minor
= 0xffff;
1108 rdev
->data_offset
= le64_to_cpu(sb
->data_offset
);
1109 atomic_set(&rdev
->corrected_errors
, le32_to_cpu(sb
->cnt_corrected_read
));
1111 rdev
->sb_size
= le32_to_cpu(sb
->max_dev
) * 2 + 256;
1112 bmask
= queue_hardsect_size(rdev
->bdev
->bd_disk
->queue
)-1;
1113 if (rdev
->sb_size
& bmask
)
1114 rdev
->sb_size
= (rdev
->sb_size
| bmask
) + 1;
1117 && rdev
->data_offset
< sb_offset
+ (rdev
->sb_size
/512))
1120 if (sb
->level
== cpu_to_le32(LEVEL_MULTIPATH
))
1123 rdev
->desc_nr
= le32_to_cpu(sb
->dev_number
);
1129 struct mdp_superblock_1
*refsb
=
1130 (struct mdp_superblock_1
*)page_address(refdev
->sb_page
);
1132 if (memcmp(sb
->set_uuid
, refsb
->set_uuid
, 16) != 0 ||
1133 sb
->level
!= refsb
->level
||
1134 sb
->layout
!= refsb
->layout
||
1135 sb
->chunksize
!= refsb
->chunksize
) {
1136 printk(KERN_WARNING
"md: %s has strangely different"
1137 " superblock to %s\n",
1138 bdevname(rdev
->bdev
,b
),
1139 bdevname(refdev
->bdev
,b2
));
1142 ev1
= le64_to_cpu(sb
->events
);
1143 ev2
= le64_to_cpu(refsb
->events
);
1151 rdev
->size
= ((rdev
->bdev
->bd_inode
->i_size
>>9) - le64_to_cpu(sb
->data_offset
)) / 2;
1153 rdev
->size
= rdev
->sb_offset
;
1154 if (rdev
->size
< le64_to_cpu(sb
->data_size
)/2)
1156 rdev
->size
= le64_to_cpu(sb
->data_size
)/2;
1157 if (le32_to_cpu(sb
->chunksize
))
1158 rdev
->size
&= ~((sector_t
)le32_to_cpu(sb
->chunksize
)/2 - 1);
1160 if (le64_to_cpu(sb
->size
) > rdev
->size
*2)
1165 static int super_1_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1167 struct mdp_superblock_1
*sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1168 __u64 ev1
= le64_to_cpu(sb
->events
);
1170 rdev
->raid_disk
= -1;
1171 clear_bit(Faulty
, &rdev
->flags
);
1172 clear_bit(In_sync
, &rdev
->flags
);
1173 clear_bit(WriteMostly
, &rdev
->flags
);
1174 clear_bit(BarriersNotsupp
, &rdev
->flags
);
1176 if (mddev
->raid_disks
== 0) {
1177 mddev
->major_version
= 1;
1178 mddev
->patch_version
= 0;
1179 mddev
->external
= 0;
1180 mddev
->chunk_size
= le32_to_cpu(sb
->chunksize
) << 9;
1181 mddev
->ctime
= le64_to_cpu(sb
->ctime
) & ((1ULL << 32)-1);
1182 mddev
->utime
= le64_to_cpu(sb
->utime
) & ((1ULL << 32)-1);
1183 mddev
->level
= le32_to_cpu(sb
->level
);
1184 mddev
->clevel
[0] = 0;
1185 mddev
->layout
= le32_to_cpu(sb
->layout
);
1186 mddev
->raid_disks
= le32_to_cpu(sb
->raid_disks
);
1187 mddev
->size
= le64_to_cpu(sb
->size
)/2;
1188 mddev
->events
= ev1
;
1189 mddev
->bitmap_offset
= 0;
1190 mddev
->default_bitmap_offset
= 1024 >> 9;
1192 mddev
->recovery_cp
= le64_to_cpu(sb
->resync_offset
);
1193 memcpy(mddev
->uuid
, sb
->set_uuid
, 16);
1195 mddev
->max_disks
= (4096-256)/2;
1197 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
) &&
1198 mddev
->bitmap_file
== NULL
)
1199 mddev
->bitmap_offset
= (__s32
)le32_to_cpu(sb
->bitmap_offset
);
1201 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_RESHAPE_ACTIVE
)) {
1202 mddev
->reshape_position
= le64_to_cpu(sb
->reshape_position
);
1203 mddev
->delta_disks
= le32_to_cpu(sb
->delta_disks
);
1204 mddev
->new_level
= le32_to_cpu(sb
->new_level
);
1205 mddev
->new_layout
= le32_to_cpu(sb
->new_layout
);
1206 mddev
->new_chunk
= le32_to_cpu(sb
->new_chunk
)<<9;
1208 mddev
->reshape_position
= MaxSector
;
1209 mddev
->delta_disks
= 0;
1210 mddev
->new_level
= mddev
->level
;
1211 mddev
->new_layout
= mddev
->layout
;
1212 mddev
->new_chunk
= mddev
->chunk_size
;
1215 } else if (mddev
->pers
== NULL
) {
1216 /* Insist of good event counter while assembling */
1218 if (ev1
< mddev
->events
)
1220 } else if (mddev
->bitmap
) {
1221 /* If adding to array with a bitmap, then we can accept an
1222 * older device, but not too old.
1224 if (ev1
< mddev
->bitmap
->events_cleared
)
1227 if (ev1
< mddev
->events
)
1228 /* just a hot-add of a new device, leave raid_disk at -1 */
1231 if (mddev
->level
!= LEVEL_MULTIPATH
) {
1233 role
= le16_to_cpu(sb
->dev_roles
[rdev
->desc_nr
]);
1235 case 0xffff: /* spare */
1237 case 0xfffe: /* faulty */
1238 set_bit(Faulty
, &rdev
->flags
);
1241 if ((le32_to_cpu(sb
->feature_map
) &
1242 MD_FEATURE_RECOVERY_OFFSET
))
1243 rdev
->recovery_offset
= le64_to_cpu(sb
->recovery_offset
);
1245 set_bit(In_sync
, &rdev
->flags
);
1246 rdev
->raid_disk
= role
;
1249 if (sb
->devflags
& WriteMostly1
)
1250 set_bit(WriteMostly
, &rdev
->flags
);
1251 } else /* MULTIPATH are always insync */
1252 set_bit(In_sync
, &rdev
->flags
);
1257 static void super_1_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1259 struct mdp_superblock_1
*sb
;
1260 struct list_head
*tmp
;
1263 /* make rdev->sb match mddev and rdev data. */
1265 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1267 sb
->feature_map
= 0;
1269 sb
->recovery_offset
= cpu_to_le64(0);
1270 memset(sb
->pad1
, 0, sizeof(sb
->pad1
));
1271 memset(sb
->pad2
, 0, sizeof(sb
->pad2
));
1272 memset(sb
->pad3
, 0, sizeof(sb
->pad3
));
1274 sb
->utime
= cpu_to_le64((__u64
)mddev
->utime
);
1275 sb
->events
= cpu_to_le64(mddev
->events
);
1277 sb
->resync_offset
= cpu_to_le64(mddev
->recovery_cp
);
1279 sb
->resync_offset
= cpu_to_le64(0);
1281 sb
->cnt_corrected_read
= cpu_to_le32(atomic_read(&rdev
->corrected_errors
));
1283 sb
->raid_disks
= cpu_to_le32(mddev
->raid_disks
);
1284 sb
->size
= cpu_to_le64(mddev
->size
<<1);
1286 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
) {
1287 sb
->bitmap_offset
= cpu_to_le32((__u32
)mddev
->bitmap_offset
);
1288 sb
->feature_map
= cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
);
1291 if (rdev
->raid_disk
>= 0 &&
1292 !test_bit(In_sync
, &rdev
->flags
) &&
1293 rdev
->recovery_offset
> 0) {
1294 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET
);
1295 sb
->recovery_offset
= cpu_to_le64(rdev
->recovery_offset
);
1298 if (mddev
->reshape_position
!= MaxSector
) {
1299 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
);
1300 sb
->reshape_position
= cpu_to_le64(mddev
->reshape_position
);
1301 sb
->new_layout
= cpu_to_le32(mddev
->new_layout
);
1302 sb
->delta_disks
= cpu_to_le32(mddev
->delta_disks
);
1303 sb
->new_level
= cpu_to_le32(mddev
->new_level
);
1304 sb
->new_chunk
= cpu_to_le32(mddev
->new_chunk
>>9);
1308 rdev_for_each(rdev2
, tmp
, mddev
)
1309 if (rdev2
->desc_nr
+1 > max_dev
)
1310 max_dev
= rdev2
->desc_nr
+1;
1312 if (max_dev
> le32_to_cpu(sb
->max_dev
))
1313 sb
->max_dev
= cpu_to_le32(max_dev
);
1314 for (i
=0; i
<max_dev
;i
++)
1315 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1317 rdev_for_each(rdev2
, tmp
, mddev
) {
1319 if (test_bit(Faulty
, &rdev2
->flags
))
1320 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1321 else if (test_bit(In_sync
, &rdev2
->flags
))
1322 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1323 else if (rdev2
->raid_disk
>= 0 && rdev2
->recovery_offset
> 0)
1324 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1326 sb
->dev_roles
[i
] = cpu_to_le16(0xffff);
1329 sb
->sb_csum
= calc_sb_1_csum(sb
);
1333 static struct super_type super_types
[] = {
1336 .owner
= THIS_MODULE
,
1337 .load_super
= super_90_load
,
1338 .validate_super
= super_90_validate
,
1339 .sync_super
= super_90_sync
,
1343 .owner
= THIS_MODULE
,
1344 .load_super
= super_1_load
,
1345 .validate_super
= super_1_validate
,
1346 .sync_super
= super_1_sync
,
1350 static int match_mddev_units(mddev_t
*mddev1
, mddev_t
*mddev2
)
1352 struct list_head
*tmp
, *tmp2
;
1353 mdk_rdev_t
*rdev
, *rdev2
;
1355 rdev_for_each(rdev
, tmp
, mddev1
)
1356 rdev_for_each(rdev2
, tmp2
, mddev2
)
1357 if (rdev
->bdev
->bd_contains
==
1358 rdev2
->bdev
->bd_contains
)
1364 static LIST_HEAD(pending_raid_disks
);
1366 static int bind_rdev_to_array(mdk_rdev_t
* rdev
, mddev_t
* mddev
)
1368 char b
[BDEVNAME_SIZE
];
1378 /* prevent duplicates */
1379 if (find_rdev(mddev
, rdev
->bdev
->bd_dev
))
1382 /* make sure rdev->size exceeds mddev->size */
1383 if (rdev
->size
&& (mddev
->size
== 0 || rdev
->size
< mddev
->size
)) {
1385 /* Cannot change size, so fail
1386 * If mddev->level <= 0, then we don't care
1387 * about aligning sizes (e.g. linear)
1389 if (mddev
->level
> 0)
1392 mddev
->size
= rdev
->size
;
1395 /* Verify rdev->desc_nr is unique.
1396 * If it is -1, assign a free number, else
1397 * check number is not in use
1399 if (rdev
->desc_nr
< 0) {
1401 if (mddev
->pers
) choice
= mddev
->raid_disks
;
1402 while (find_rdev_nr(mddev
, choice
))
1404 rdev
->desc_nr
= choice
;
1406 if (find_rdev_nr(mddev
, rdev
->desc_nr
))
1409 bdevname(rdev
->bdev
,b
);
1410 while ( (s
=strchr(b
, '/')) != NULL
)
1413 rdev
->mddev
= mddev
;
1414 printk(KERN_INFO
"md: bind<%s>\n", b
);
1416 if ((err
= kobject_add(&rdev
->kobj
, &mddev
->kobj
, "dev-%s", b
)))
1419 if (rdev
->bdev
->bd_part
)
1420 ko
= &rdev
->bdev
->bd_part
->dev
.kobj
;
1422 ko
= &rdev
->bdev
->bd_disk
->dev
.kobj
;
1423 if ((err
= sysfs_create_link(&rdev
->kobj
, ko
, "block"))) {
1424 kobject_del(&rdev
->kobj
);
1427 list_add(&rdev
->same_set
, &mddev
->disks
);
1428 bd_claim_by_disk(rdev
->bdev
, rdev
->bdev
->bd_holder
, mddev
->gendisk
);
1432 printk(KERN_WARNING
"md: failed to register dev-%s for %s\n",
1437 static void md_delayed_delete(struct work_struct
*ws
)
1439 mdk_rdev_t
*rdev
= container_of(ws
, mdk_rdev_t
, del_work
);
1440 kobject_del(&rdev
->kobj
);
1441 kobject_put(&rdev
->kobj
);
1444 static void unbind_rdev_from_array(mdk_rdev_t
* rdev
)
1446 char b
[BDEVNAME_SIZE
];
1451 bd_release_from_disk(rdev
->bdev
, rdev
->mddev
->gendisk
);
1452 list_del_init(&rdev
->same_set
);
1453 printk(KERN_INFO
"md: unbind<%s>\n", bdevname(rdev
->bdev
,b
));
1455 sysfs_remove_link(&rdev
->kobj
, "block");
1457 /* We need to delay this, otherwise we can deadlock when
1458 * writing to 'remove' to "dev/state"
1460 INIT_WORK(&rdev
->del_work
, md_delayed_delete
);
1461 kobject_get(&rdev
->kobj
);
1462 schedule_work(&rdev
->del_work
);
1466 * prevent the device from being mounted, repartitioned or
1467 * otherwise reused by a RAID array (or any other kernel
1468 * subsystem), by bd_claiming the device.
1470 static int lock_rdev(mdk_rdev_t
*rdev
, dev_t dev
, int shared
)
1473 struct block_device
*bdev
;
1474 char b
[BDEVNAME_SIZE
];
1476 bdev
= open_by_devnum(dev
, FMODE_READ
|FMODE_WRITE
);
1478 printk(KERN_ERR
"md: could not open %s.\n",
1479 __bdevname(dev
, b
));
1480 return PTR_ERR(bdev
);
1482 err
= bd_claim(bdev
, shared
? (mdk_rdev_t
*)lock_rdev
: rdev
);
1484 printk(KERN_ERR
"md: could not bd_claim %s.\n",
1490 set_bit(AllReserved
, &rdev
->flags
);
1495 static void unlock_rdev(mdk_rdev_t
*rdev
)
1497 struct block_device
*bdev
= rdev
->bdev
;
1505 void md_autodetect_dev(dev_t dev
);
1507 static void export_rdev(mdk_rdev_t
* rdev
)
1509 char b
[BDEVNAME_SIZE
];
1510 printk(KERN_INFO
"md: export_rdev(%s)\n",
1511 bdevname(rdev
->bdev
,b
));
1515 list_del_init(&rdev
->same_set
);
1517 if (test_bit(AutoDetected
, &rdev
->flags
))
1518 md_autodetect_dev(rdev
->bdev
->bd_dev
);
1521 kobject_put(&rdev
->kobj
);
1524 static void kick_rdev_from_array(mdk_rdev_t
* rdev
)
1526 unbind_rdev_from_array(rdev
);
1530 static void export_array(mddev_t
*mddev
)
1532 struct list_head
*tmp
;
1535 rdev_for_each(rdev
, tmp
, mddev
) {
1540 kick_rdev_from_array(rdev
);
1542 if (!list_empty(&mddev
->disks
))
1544 mddev
->raid_disks
= 0;
1545 mddev
->major_version
= 0;
1548 static void print_desc(mdp_disk_t
*desc
)
1550 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc
->number
,
1551 desc
->major
,desc
->minor
,desc
->raid_disk
,desc
->state
);
1554 static void print_sb(mdp_super_t
*sb
)
1559 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1560 sb
->major_version
, sb
->minor_version
, sb
->patch_version
,
1561 sb
->set_uuid0
, sb
->set_uuid1
, sb
->set_uuid2
, sb
->set_uuid3
,
1563 printk(KERN_INFO
"md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1564 sb
->level
, sb
->size
, sb
->nr_disks
, sb
->raid_disks
,
1565 sb
->md_minor
, sb
->layout
, sb
->chunk_size
);
1566 printk(KERN_INFO
"md: UT:%08x ST:%d AD:%d WD:%d"
1567 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1568 sb
->utime
, sb
->state
, sb
->active_disks
, sb
->working_disks
,
1569 sb
->failed_disks
, sb
->spare_disks
,
1570 sb
->sb_csum
, (unsigned long)sb
->events_lo
);
1573 for (i
= 0; i
< MD_SB_DISKS
; i
++) {
1576 desc
= sb
->disks
+ i
;
1577 if (desc
->number
|| desc
->major
|| desc
->minor
||
1578 desc
->raid_disk
|| (desc
->state
&& (desc
->state
!= 4))) {
1579 printk(" D %2d: ", i
);
1583 printk(KERN_INFO
"md: THIS: ");
1584 print_desc(&sb
->this_disk
);
1588 static void print_rdev(mdk_rdev_t
*rdev
)
1590 char b
[BDEVNAME_SIZE
];
1591 printk(KERN_INFO
"md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1592 bdevname(rdev
->bdev
,b
), (unsigned long long)rdev
->size
,
1593 test_bit(Faulty
, &rdev
->flags
), test_bit(In_sync
, &rdev
->flags
),
1595 if (rdev
->sb_loaded
) {
1596 printk(KERN_INFO
"md: rdev superblock:\n");
1597 print_sb((mdp_super_t
*)page_address(rdev
->sb_page
));
1599 printk(KERN_INFO
"md: no rdev superblock!\n");
1602 static void md_print_devices(void)
1604 struct list_head
*tmp
, *tmp2
;
1607 char b
[BDEVNAME_SIZE
];
1610 printk("md: **********************************\n");
1611 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1612 printk("md: **********************************\n");
1613 for_each_mddev(mddev
, tmp
) {
1616 bitmap_print_sb(mddev
->bitmap
);
1618 printk("%s: ", mdname(mddev
));
1619 rdev_for_each(rdev
, tmp2
, mddev
)
1620 printk("<%s>", bdevname(rdev
->bdev
,b
));
1623 rdev_for_each(rdev
, tmp2
, mddev
)
1626 printk("md: **********************************\n");
1631 static void sync_sbs(mddev_t
* mddev
, int nospares
)
1633 /* Update each superblock (in-memory image), but
1634 * if we are allowed to, skip spares which already
1635 * have the right event counter, or have one earlier
1636 * (which would mean they aren't being marked as dirty
1637 * with the rest of the array)
1640 struct list_head
*tmp
;
1642 rdev_for_each(rdev
, tmp
, mddev
) {
1643 if (rdev
->sb_events
== mddev
->events
||
1645 rdev
->raid_disk
< 0 &&
1646 (rdev
->sb_events
&1)==0 &&
1647 rdev
->sb_events
+1 == mddev
->events
)) {
1648 /* Don't update this superblock */
1649 rdev
->sb_loaded
= 2;
1651 super_types
[mddev
->major_version
].
1652 sync_super(mddev
, rdev
);
1653 rdev
->sb_loaded
= 1;
1658 static void md_update_sb(mddev_t
* mddev
, int force_change
)
1660 struct list_head
*tmp
;
1665 if (mddev
->external
)
1668 spin_lock_irq(&mddev
->write_lock
);
1670 set_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1671 if (test_and_clear_bit(MD_CHANGE_DEVS
, &mddev
->flags
))
1673 if (test_and_clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
1674 /* just a clean<-> dirty transition, possibly leave spares alone,
1675 * though if events isn't the right even/odd, we will have to do
1681 if (mddev
->degraded
)
1682 /* If the array is degraded, then skipping spares is both
1683 * dangerous and fairly pointless.
1684 * Dangerous because a device that was removed from the array
1685 * might have a event_count that still looks up-to-date,
1686 * so it can be re-added without a resync.
1687 * Pointless because if there are any spares to skip,
1688 * then a recovery will happen and soon that array won't
1689 * be degraded any more and the spare can go back to sleep then.
1693 sync_req
= mddev
->in_sync
;
1694 mddev
->utime
= get_seconds();
1696 /* If this is just a dirty<->clean transition, and the array is clean
1697 * and 'events' is odd, we can roll back to the previous clean state */
1699 && (mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
1700 && (mddev
->events
& 1)
1701 && mddev
->events
!= 1)
1704 /* otherwise we have to go forward and ... */
1706 if (!mddev
->in_sync
|| mddev
->recovery_cp
!= MaxSector
) { /* not clean */
1707 /* .. if the array isn't clean, insist on an odd 'events' */
1708 if ((mddev
->events
&1)==0) {
1713 /* otherwise insist on an even 'events' (for clean states) */
1714 if ((mddev
->events
&1)) {
1721 if (!mddev
->events
) {
1723 * oops, this 64-bit counter should never wrap.
1724 * Either we are in around ~1 trillion A.C., assuming
1725 * 1 reboot per second, or we have a bug:
1732 * do not write anything to disk if using
1733 * nonpersistent superblocks
1735 if (!mddev
->persistent
) {
1736 if (!mddev
->external
)
1737 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1739 spin_unlock_irq(&mddev
->write_lock
);
1740 wake_up(&mddev
->sb_wait
);
1743 sync_sbs(mddev
, nospares
);
1744 spin_unlock_irq(&mddev
->write_lock
);
1747 "md: updating %s RAID superblock on device (in sync %d)\n",
1748 mdname(mddev
),mddev
->in_sync
);
1750 bitmap_update_sb(mddev
->bitmap
);
1751 rdev_for_each(rdev
, tmp
, mddev
) {
1752 char b
[BDEVNAME_SIZE
];
1753 dprintk(KERN_INFO
"md: ");
1754 if (rdev
->sb_loaded
!= 1)
1755 continue; /* no noise on spare devices */
1756 if (test_bit(Faulty
, &rdev
->flags
))
1757 dprintk("(skipping faulty ");
1759 dprintk("%s ", bdevname(rdev
->bdev
,b
));
1760 if (!test_bit(Faulty
, &rdev
->flags
)) {
1761 md_super_write(mddev
,rdev
,
1762 rdev
->sb_offset
<<1, rdev
->sb_size
,
1764 dprintk(KERN_INFO
"(write) %s's sb offset: %llu\n",
1765 bdevname(rdev
->bdev
,b
),
1766 (unsigned long long)rdev
->sb_offset
);
1767 rdev
->sb_events
= mddev
->events
;
1771 if (mddev
->level
== LEVEL_MULTIPATH
)
1772 /* only need to write one superblock... */
1775 md_super_wait(mddev
);
1776 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1778 spin_lock_irq(&mddev
->write_lock
);
1779 if (mddev
->in_sync
!= sync_req
||
1780 test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)) {
1781 /* have to write it out again */
1782 spin_unlock_irq(&mddev
->write_lock
);
1785 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1786 spin_unlock_irq(&mddev
->write_lock
);
1787 wake_up(&mddev
->sb_wait
);
1791 /* words written to sysfs files may, or my not, be \n terminated.
1792 * We want to accept with case. For this we use cmd_match.
1794 static int cmd_match(const char *cmd
, const char *str
)
1796 /* See if cmd, written into a sysfs file, matches
1797 * str. They must either be the same, or cmd can
1798 * have a trailing newline
1800 while (*cmd
&& *str
&& *cmd
== *str
) {
1811 struct rdev_sysfs_entry
{
1812 struct attribute attr
;
1813 ssize_t (*show
)(mdk_rdev_t
*, char *);
1814 ssize_t (*store
)(mdk_rdev_t
*, const char *, size_t);
1818 state_show(mdk_rdev_t
*rdev
, char *page
)
1823 if (test_bit(Faulty
, &rdev
->flags
)) {
1824 len
+= sprintf(page
+len
, "%sfaulty",sep
);
1827 if (test_bit(In_sync
, &rdev
->flags
)) {
1828 len
+= sprintf(page
+len
, "%sin_sync",sep
);
1831 if (test_bit(WriteMostly
, &rdev
->flags
)) {
1832 len
+= sprintf(page
+len
, "%swrite_mostly",sep
);
1835 if (test_bit(Blocked
, &rdev
->flags
)) {
1836 len
+= sprintf(page
+len
, "%sblocked", sep
);
1839 if (!test_bit(Faulty
, &rdev
->flags
) &&
1840 !test_bit(In_sync
, &rdev
->flags
)) {
1841 len
+= sprintf(page
+len
, "%sspare", sep
);
1844 return len
+sprintf(page
+len
, "\n");
1848 state_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1851 * faulty - simulates and error
1852 * remove - disconnects the device
1853 * writemostly - sets write_mostly
1854 * -writemostly - clears write_mostly
1855 * blocked - sets the Blocked flag
1856 * -blocked - clears the Blocked flag
1859 if (cmd_match(buf
, "faulty") && rdev
->mddev
->pers
) {
1860 md_error(rdev
->mddev
, rdev
);
1862 } else if (cmd_match(buf
, "remove")) {
1863 if (rdev
->raid_disk
>= 0)
1866 mddev_t
*mddev
= rdev
->mddev
;
1867 kick_rdev_from_array(rdev
);
1869 md_update_sb(mddev
, 1);
1870 md_new_event(mddev
);
1873 } else if (cmd_match(buf
, "writemostly")) {
1874 set_bit(WriteMostly
, &rdev
->flags
);
1876 } else if (cmd_match(buf
, "-writemostly")) {
1877 clear_bit(WriteMostly
, &rdev
->flags
);
1879 } else if (cmd_match(buf
, "blocked")) {
1880 set_bit(Blocked
, &rdev
->flags
);
1882 } else if (cmd_match(buf
, "-blocked")) {
1883 clear_bit(Blocked
, &rdev
->flags
);
1884 wake_up(&rdev
->blocked_wait
);
1885 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
1886 md_wakeup_thread(rdev
->mddev
->thread
);
1890 return err
? err
: len
;
1892 static struct rdev_sysfs_entry rdev_state
=
1893 __ATTR(state
, S_IRUGO
|S_IWUSR
, state_show
, state_store
);
1896 errors_show(mdk_rdev_t
*rdev
, char *page
)
1898 return sprintf(page
, "%d\n", atomic_read(&rdev
->corrected_errors
));
1902 errors_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1905 unsigned long n
= simple_strtoul(buf
, &e
, 10);
1906 if (*buf
&& (*e
== 0 || *e
== '\n')) {
1907 atomic_set(&rdev
->corrected_errors
, n
);
1912 static struct rdev_sysfs_entry rdev_errors
=
1913 __ATTR(errors
, S_IRUGO
|S_IWUSR
, errors_show
, errors_store
);
1916 slot_show(mdk_rdev_t
*rdev
, char *page
)
1918 if (rdev
->raid_disk
< 0)
1919 return sprintf(page
, "none\n");
1921 return sprintf(page
, "%d\n", rdev
->raid_disk
);
1925 slot_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1930 int slot
= simple_strtoul(buf
, &e
, 10);
1931 if (strncmp(buf
, "none", 4)==0)
1933 else if (e
==buf
|| (*e
&& *e
!= '\n'))
1935 if (rdev
->mddev
->pers
&& slot
== -1) {
1936 /* Setting 'slot' on an active array requires also
1937 * updating the 'rd%d' link, and communicating
1938 * with the personality with ->hot_*_disk.
1939 * For now we only support removing
1940 * failed/spare devices. This normally happens automatically,
1941 * but not when the metadata is externally managed.
1943 if (rdev
->raid_disk
== -1)
1945 /* personality does all needed checks */
1946 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
1948 err
= rdev
->mddev
->pers
->
1949 hot_remove_disk(rdev
->mddev
, rdev
->raid_disk
);
1952 sprintf(nm
, "rd%d", rdev
->raid_disk
);
1953 sysfs_remove_link(&rdev
->mddev
->kobj
, nm
);
1954 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
1955 md_wakeup_thread(rdev
->mddev
->thread
);
1956 } else if (rdev
->mddev
->pers
) {
1958 struct list_head
*tmp
;
1959 /* Activating a spare .. or possibly reactivating
1960 * if we every get bitmaps working here.
1963 if (rdev
->raid_disk
!= -1)
1966 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
1969 rdev_for_each(rdev2
, tmp
, rdev
->mddev
)
1970 if (rdev2
->raid_disk
== slot
)
1973 rdev
->raid_disk
= slot
;
1974 if (test_bit(In_sync
, &rdev
->flags
))
1975 rdev
->saved_raid_disk
= slot
;
1977 rdev
->saved_raid_disk
= -1;
1978 err
= rdev
->mddev
->pers
->
1979 hot_add_disk(rdev
->mddev
, rdev
);
1981 rdev
->raid_disk
= -1;
1984 sprintf(nm
, "rd%d", rdev
->raid_disk
);
1985 if (sysfs_create_link(&rdev
->mddev
->kobj
, &rdev
->kobj
, nm
))
1987 "md: cannot register "
1989 nm
, mdname(rdev
->mddev
));
1991 /* don't wakeup anyone, leave that to userspace. */
1993 if (slot
>= rdev
->mddev
->raid_disks
)
1995 rdev
->raid_disk
= slot
;
1996 /* assume it is working */
1997 clear_bit(Faulty
, &rdev
->flags
);
1998 clear_bit(WriteMostly
, &rdev
->flags
);
1999 set_bit(In_sync
, &rdev
->flags
);
2005 static struct rdev_sysfs_entry rdev_slot
=
2006 __ATTR(slot
, S_IRUGO
|S_IWUSR
, slot_show
, slot_store
);
2009 offset_show(mdk_rdev_t
*rdev
, char *page
)
2011 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->data_offset
);
2015 offset_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2018 unsigned long long offset
= simple_strtoull(buf
, &e
, 10);
2019 if (e
==buf
|| (*e
&& *e
!= '\n'))
2021 if (rdev
->mddev
->pers
&& rdev
->raid_disk
>= 0)
2023 if (rdev
->size
&& rdev
->mddev
->external
)
2024 /* Must set offset before size, so overlap checks
2027 rdev
->data_offset
= offset
;
2031 static struct rdev_sysfs_entry rdev_offset
=
2032 __ATTR(offset
, S_IRUGO
|S_IWUSR
, offset_show
, offset_store
);
2035 rdev_size_show(mdk_rdev_t
*rdev
, char *page
)
2037 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->size
);
2040 static int overlaps(sector_t s1
, sector_t l1
, sector_t s2
, sector_t l2
)
2042 /* check if two start/length pairs overlap */
2051 rdev_size_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2054 unsigned long long size
= simple_strtoull(buf
, &e
, 10);
2055 unsigned long long oldsize
= rdev
->size
;
2056 mddev_t
*my_mddev
= rdev
->mddev
;
2058 if (e
==buf
|| (*e
&& *e
!= '\n'))
2060 if (my_mddev
->pers
&& rdev
->raid_disk
>= 0)
2063 if (size
> oldsize
&& rdev
->mddev
->external
) {
2064 /* need to check that all other rdevs with the same ->bdev
2065 * do not overlap. We need to unlock the mddev to avoid
2066 * a deadlock. We have already changed rdev->size, and if
2067 * we have to change it back, we will have the lock again.
2071 struct list_head
*tmp
, *tmp2
;
2073 mddev_unlock(my_mddev
);
2074 for_each_mddev(mddev
, tmp
) {
2078 rdev_for_each(rdev2
, tmp2
, mddev
)
2079 if (test_bit(AllReserved
, &rdev2
->flags
) ||
2080 (rdev
->bdev
== rdev2
->bdev
&&
2082 overlaps(rdev
->data_offset
, rdev
->size
,
2083 rdev2
->data_offset
, rdev2
->size
))) {
2087 mddev_unlock(mddev
);
2093 mddev_lock(my_mddev
);
2095 /* Someone else could have slipped in a size
2096 * change here, but doing so is just silly.
2097 * We put oldsize back because we *know* it is
2098 * safe, and trust userspace not to race with
2101 rdev
->size
= oldsize
;
2105 if (size
< my_mddev
->size
|| my_mddev
->size
== 0)
2106 my_mddev
->size
= size
;
2110 static struct rdev_sysfs_entry rdev_size
=
2111 __ATTR(size
, S_IRUGO
|S_IWUSR
, rdev_size_show
, rdev_size_store
);
2113 static struct attribute
*rdev_default_attrs
[] = {
2122 rdev_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
2124 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2125 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2126 mddev_t
*mddev
= rdev
->mddev
;
2132 rv
= mddev
? mddev_lock(mddev
) : -EBUSY
;
2134 if (rdev
->mddev
== NULL
)
2137 rv
= entry
->show(rdev
, page
);
2138 mddev_unlock(mddev
);
2144 rdev_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
2145 const char *page
, size_t length
)
2147 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2148 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2150 mddev_t
*mddev
= rdev
->mddev
;
2154 if (!capable(CAP_SYS_ADMIN
))
2156 rv
= mddev
? mddev_lock(mddev
): -EBUSY
;
2158 if (rdev
->mddev
== NULL
)
2161 rv
= entry
->store(rdev
, page
, length
);
2162 mddev_unlock(mddev
);
2167 static void rdev_free(struct kobject
*ko
)
2169 mdk_rdev_t
*rdev
= container_of(ko
, mdk_rdev_t
, kobj
);
2172 static struct sysfs_ops rdev_sysfs_ops
= {
2173 .show
= rdev_attr_show
,
2174 .store
= rdev_attr_store
,
2176 static struct kobj_type rdev_ktype
= {
2177 .release
= rdev_free
,
2178 .sysfs_ops
= &rdev_sysfs_ops
,
2179 .default_attrs
= rdev_default_attrs
,
2183 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2185 * mark the device faulty if:
2187 * - the device is nonexistent (zero size)
2188 * - the device has no valid superblock
2190 * a faulty rdev _never_ has rdev->sb set.
2192 static mdk_rdev_t
*md_import_device(dev_t newdev
, int super_format
, int super_minor
)
2194 char b
[BDEVNAME_SIZE
];
2199 rdev
= kzalloc(sizeof(*rdev
), GFP_KERNEL
);
2201 printk(KERN_ERR
"md: could not alloc mem for new device!\n");
2202 return ERR_PTR(-ENOMEM
);
2205 if ((err
= alloc_disk_sb(rdev
)))
2208 err
= lock_rdev(rdev
, newdev
, super_format
== -2);
2212 kobject_init(&rdev
->kobj
, &rdev_ktype
);
2215 rdev
->saved_raid_disk
= -1;
2216 rdev
->raid_disk
= -1;
2218 rdev
->data_offset
= 0;
2219 rdev
->sb_events
= 0;
2220 atomic_set(&rdev
->nr_pending
, 0);
2221 atomic_set(&rdev
->read_errors
, 0);
2222 atomic_set(&rdev
->corrected_errors
, 0);
2224 size
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
2227 "md: %s has zero or unknown size, marking faulty!\n",
2228 bdevname(rdev
->bdev
,b
));
2233 if (super_format
>= 0) {
2234 err
= super_types
[super_format
].
2235 load_super(rdev
, NULL
, super_minor
);
2236 if (err
== -EINVAL
) {
2238 "md: %s does not have a valid v%d.%d "
2239 "superblock, not importing!\n",
2240 bdevname(rdev
->bdev
,b
),
2241 super_format
, super_minor
);
2246 "md: could not read %s's sb, not importing!\n",
2247 bdevname(rdev
->bdev
,b
));
2252 INIT_LIST_HEAD(&rdev
->same_set
);
2253 init_waitqueue_head(&rdev
->blocked_wait
);
2258 if (rdev
->sb_page
) {
2264 return ERR_PTR(err
);
2268 * Check a full RAID array for plausibility
2272 static void analyze_sbs(mddev_t
* mddev
)
2275 struct list_head
*tmp
;
2276 mdk_rdev_t
*rdev
, *freshest
;
2277 char b
[BDEVNAME_SIZE
];
2280 rdev_for_each(rdev
, tmp
, mddev
)
2281 switch (super_types
[mddev
->major_version
].
2282 load_super(rdev
, freshest
, mddev
->minor_version
)) {
2290 "md: fatal superblock inconsistency in %s"
2291 " -- removing from array\n",
2292 bdevname(rdev
->bdev
,b
));
2293 kick_rdev_from_array(rdev
);
2297 super_types
[mddev
->major_version
].
2298 validate_super(mddev
, freshest
);
2301 rdev_for_each(rdev
, tmp
, mddev
) {
2302 if (rdev
!= freshest
)
2303 if (super_types
[mddev
->major_version
].
2304 validate_super(mddev
, rdev
)) {
2305 printk(KERN_WARNING
"md: kicking non-fresh %s"
2307 bdevname(rdev
->bdev
,b
));
2308 kick_rdev_from_array(rdev
);
2311 if (mddev
->level
== LEVEL_MULTIPATH
) {
2312 rdev
->desc_nr
= i
++;
2313 rdev
->raid_disk
= rdev
->desc_nr
;
2314 set_bit(In_sync
, &rdev
->flags
);
2315 } else if (rdev
->raid_disk
>= mddev
->raid_disks
) {
2316 rdev
->raid_disk
= -1;
2317 clear_bit(In_sync
, &rdev
->flags
);
2323 if (mddev
->recovery_cp
!= MaxSector
&&
2325 printk(KERN_ERR
"md: %s: raid array is not clean"
2326 " -- starting background reconstruction\n",
2332 safe_delay_show(mddev_t
*mddev
, char *page
)
2334 int msec
= (mddev
->safemode_delay
*1000)/HZ
;
2335 return sprintf(page
, "%d.%03d\n", msec
/1000, msec
%1000);
2338 safe_delay_store(mddev_t
*mddev
, const char *cbuf
, size_t len
)
2346 /* remove a period, and count digits after it */
2347 if (len
>= sizeof(buf
))
2349 strlcpy(buf
, cbuf
, len
);
2351 for (i
=0; i
<len
; i
++) {
2353 if (isdigit(buf
[i
])) {
2358 } else if (buf
[i
] == '.') {
2363 msec
= simple_strtoul(buf
, &e
, 10);
2364 if (e
== buf
|| (*e
&& *e
!= '\n'))
2366 msec
= (msec
* 1000) / scale
;
2368 mddev
->safemode_delay
= 0;
2370 mddev
->safemode_delay
= (msec
*HZ
)/1000;
2371 if (mddev
->safemode_delay
== 0)
2372 mddev
->safemode_delay
= 1;
2376 static struct md_sysfs_entry md_safe_delay
=
2377 __ATTR(safe_mode_delay
, S_IRUGO
|S_IWUSR
,safe_delay_show
, safe_delay_store
);
2380 level_show(mddev_t
*mddev
, char *page
)
2382 struct mdk_personality
*p
= mddev
->pers
;
2384 return sprintf(page
, "%s\n", p
->name
);
2385 else if (mddev
->clevel
[0])
2386 return sprintf(page
, "%s\n", mddev
->clevel
);
2387 else if (mddev
->level
!= LEVEL_NONE
)
2388 return sprintf(page
, "%d\n", mddev
->level
);
2394 level_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2401 if (len
>= sizeof(mddev
->clevel
))
2403 strncpy(mddev
->clevel
, buf
, len
);
2404 if (mddev
->clevel
[len
-1] == '\n')
2406 mddev
->clevel
[len
] = 0;
2407 mddev
->level
= LEVEL_NONE
;
2411 static struct md_sysfs_entry md_level
=
2412 __ATTR(level
, S_IRUGO
|S_IWUSR
, level_show
, level_store
);
2416 layout_show(mddev_t
*mddev
, char *page
)
2418 /* just a number, not meaningful for all levels */
2419 if (mddev
->reshape_position
!= MaxSector
&&
2420 mddev
->layout
!= mddev
->new_layout
)
2421 return sprintf(page
, "%d (%d)\n",
2422 mddev
->new_layout
, mddev
->layout
);
2423 return sprintf(page
, "%d\n", mddev
->layout
);
2427 layout_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2430 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2432 if (!*buf
|| (*e
&& *e
!= '\n'))
2437 if (mddev
->reshape_position
!= MaxSector
)
2438 mddev
->new_layout
= n
;
2443 static struct md_sysfs_entry md_layout
=
2444 __ATTR(layout
, S_IRUGO
|S_IWUSR
, layout_show
, layout_store
);
2448 raid_disks_show(mddev_t
*mddev
, char *page
)
2450 if (mddev
->raid_disks
== 0)
2452 if (mddev
->reshape_position
!= MaxSector
&&
2453 mddev
->delta_disks
!= 0)
2454 return sprintf(page
, "%d (%d)\n", mddev
->raid_disks
,
2455 mddev
->raid_disks
- mddev
->delta_disks
);
2456 return sprintf(page
, "%d\n", mddev
->raid_disks
);
2459 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
);
2462 raid_disks_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2466 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2468 if (!*buf
|| (*e
&& *e
!= '\n'))
2472 rv
= update_raid_disks(mddev
, n
);
2473 else if (mddev
->reshape_position
!= MaxSector
) {
2474 int olddisks
= mddev
->raid_disks
- mddev
->delta_disks
;
2475 mddev
->delta_disks
= n
- olddisks
;
2476 mddev
->raid_disks
= n
;
2478 mddev
->raid_disks
= n
;
2479 return rv
? rv
: len
;
2481 static struct md_sysfs_entry md_raid_disks
=
2482 __ATTR(raid_disks
, S_IRUGO
|S_IWUSR
, raid_disks_show
, raid_disks_store
);
2485 chunk_size_show(mddev_t
*mddev
, char *page
)
2487 if (mddev
->reshape_position
!= MaxSector
&&
2488 mddev
->chunk_size
!= mddev
->new_chunk
)
2489 return sprintf(page
, "%d (%d)\n", mddev
->new_chunk
,
2491 return sprintf(page
, "%d\n", mddev
->chunk_size
);
2495 chunk_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2497 /* can only set chunk_size if array is not yet active */
2499 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2501 if (!*buf
|| (*e
&& *e
!= '\n'))
2506 else if (mddev
->reshape_position
!= MaxSector
)
2507 mddev
->new_chunk
= n
;
2509 mddev
->chunk_size
= n
;
2512 static struct md_sysfs_entry md_chunk_size
=
2513 __ATTR(chunk_size
, S_IRUGO
|S_IWUSR
, chunk_size_show
, chunk_size_store
);
2516 resync_start_show(mddev_t
*mddev
, char *page
)
2518 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->recovery_cp
);
2522 resync_start_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2525 unsigned long long n
= simple_strtoull(buf
, &e
, 10);
2529 if (!*buf
|| (*e
&& *e
!= '\n'))
2532 mddev
->recovery_cp
= n
;
2535 static struct md_sysfs_entry md_resync_start
=
2536 __ATTR(resync_start
, S_IRUGO
|S_IWUSR
, resync_start_show
, resync_start_store
);
2539 * The array state can be:
2542 * No devices, no size, no level
2543 * Equivalent to STOP_ARRAY ioctl
2545 * May have some settings, but array is not active
2546 * all IO results in error
2547 * When written, doesn't tear down array, but just stops it
2548 * suspended (not supported yet)
2549 * All IO requests will block. The array can be reconfigured.
2550 * Writing this, if accepted, will block until array is quiessent
2552 * no resync can happen. no superblocks get written.
2553 * write requests fail
2555 * like readonly, but behaves like 'clean' on a write request.
2557 * clean - no pending writes, but otherwise active.
2558 * When written to inactive array, starts without resync
2559 * If a write request arrives then
2560 * if metadata is known, mark 'dirty' and switch to 'active'.
2561 * if not known, block and switch to write-pending
2562 * If written to an active array that has pending writes, then fails.
2564 * fully active: IO and resync can be happening.
2565 * When written to inactive array, starts with resync
2568 * clean, but writes are blocked waiting for 'active' to be written.
2571 * like active, but no writes have been seen for a while (100msec).
2574 enum array_state
{ clear
, inactive
, suspended
, readonly
, read_auto
, clean
, active
,
2575 write_pending
, active_idle
, bad_word
};
2576 static char *array_states
[] = {
2577 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2578 "write-pending", "active-idle", NULL
};
2580 static int match_word(const char *word
, char **list
)
2583 for (n
=0; list
[n
]; n
++)
2584 if (cmd_match(word
, list
[n
]))
2590 array_state_show(mddev_t
*mddev
, char *page
)
2592 enum array_state st
= inactive
;
2605 else if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
2607 else if (mddev
->safemode
)
2613 if (list_empty(&mddev
->disks
) &&
2614 mddev
->raid_disks
== 0 &&
2620 return sprintf(page
, "%s\n", array_states
[st
]);
2623 static int do_md_stop(mddev_t
* mddev
, int ro
);
2624 static int do_md_run(mddev_t
* mddev
);
2625 static int restart_array(mddev_t
*mddev
);
2628 array_state_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2631 enum array_state st
= match_word(buf
, array_states
);
2636 /* stopping an active array */
2637 if (atomic_read(&mddev
->active
) > 1)
2639 err
= do_md_stop(mddev
, 0);
2642 /* stopping an active array */
2644 if (atomic_read(&mddev
->active
) > 1)
2646 err
= do_md_stop(mddev
, 2);
2648 err
= 0; /* already inactive */
2651 break; /* not supported yet */
2654 err
= do_md_stop(mddev
, 1);
2657 set_disk_ro(mddev
->gendisk
, 1);
2658 err
= do_md_run(mddev
);
2664 err
= do_md_stop(mddev
, 1);
2666 err
= restart_array(mddev
);
2669 set_disk_ro(mddev
->gendisk
, 0);
2673 err
= do_md_run(mddev
);
2678 restart_array(mddev
);
2679 spin_lock_irq(&mddev
->write_lock
);
2680 if (atomic_read(&mddev
->writes_pending
) == 0) {
2681 if (mddev
->in_sync
== 0) {
2683 if (mddev
->safemode
== 1)
2684 mddev
->safemode
= 0;
2685 if (mddev
->persistent
)
2686 set_bit(MD_CHANGE_CLEAN
,
2692 spin_unlock_irq(&mddev
->write_lock
);
2695 mddev
->recovery_cp
= MaxSector
;
2696 err
= do_md_run(mddev
);
2701 restart_array(mddev
);
2702 if (mddev
->external
)
2703 clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
2704 wake_up(&mddev
->sb_wait
);
2708 set_disk_ro(mddev
->gendisk
, 0);
2709 err
= do_md_run(mddev
);
2714 /* these cannot be set */
2722 static struct md_sysfs_entry md_array_state
=
2723 __ATTR(array_state
, S_IRUGO
|S_IWUSR
, array_state_show
, array_state_store
);
2726 null_show(mddev_t
*mddev
, char *page
)
2732 new_dev_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2734 /* buf must be %d:%d\n? giving major and minor numbers */
2735 /* The new device is added to the array.
2736 * If the array has a persistent superblock, we read the
2737 * superblock to initialise info and check validity.
2738 * Otherwise, only checking done is that in bind_rdev_to_array,
2739 * which mainly checks size.
2742 int major
= simple_strtoul(buf
, &e
, 10);
2748 if (!*buf
|| *e
!= ':' || !e
[1] || e
[1] == '\n')
2750 minor
= simple_strtoul(e
+1, &e
, 10);
2751 if (*e
&& *e
!= '\n')
2753 dev
= MKDEV(major
, minor
);
2754 if (major
!= MAJOR(dev
) ||
2755 minor
!= MINOR(dev
))
2759 if (mddev
->persistent
) {
2760 rdev
= md_import_device(dev
, mddev
->major_version
,
2761 mddev
->minor_version
);
2762 if (!IS_ERR(rdev
) && !list_empty(&mddev
->disks
)) {
2763 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
2764 mdk_rdev_t
, same_set
);
2765 err
= super_types
[mddev
->major_version
]
2766 .load_super(rdev
, rdev0
, mddev
->minor_version
);
2770 } else if (mddev
->external
)
2771 rdev
= md_import_device(dev
, -2, -1);
2773 rdev
= md_import_device(dev
, -1, -1);
2776 return PTR_ERR(rdev
);
2777 err
= bind_rdev_to_array(rdev
, mddev
);
2781 return err
? err
: len
;
2784 static struct md_sysfs_entry md_new_device
=
2785 __ATTR(new_dev
, S_IWUSR
, null_show
, new_dev_store
);
2788 bitmap_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2791 unsigned long chunk
, end_chunk
;
2795 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2797 chunk
= end_chunk
= simple_strtoul(buf
, &end
, 0);
2798 if (buf
== end
) break;
2799 if (*end
== '-') { /* range */
2801 end_chunk
= simple_strtoul(buf
, &end
, 0);
2802 if (buf
== end
) break;
2804 if (*end
&& !isspace(*end
)) break;
2805 bitmap_dirty_bits(mddev
->bitmap
, chunk
, end_chunk
);
2807 while (isspace(*buf
)) buf
++;
2809 bitmap_unplug(mddev
->bitmap
); /* flush the bits to disk */
2814 static struct md_sysfs_entry md_bitmap
=
2815 __ATTR(bitmap_set_bits
, S_IWUSR
, null_show
, bitmap_store
);
2818 size_show(mddev_t
*mddev
, char *page
)
2820 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->size
);
2823 static int update_size(mddev_t
*mddev
, unsigned long size
);
2826 size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2828 /* If array is inactive, we can reduce the component size, but
2829 * not increase it (except from 0).
2830 * If array is active, we can try an on-line resize
2834 unsigned long long size
= simple_strtoull(buf
, &e
, 10);
2835 if (!*buf
|| *buf
== '\n' ||
2840 err
= update_size(mddev
, size
);
2841 md_update_sb(mddev
, 1);
2843 if (mddev
->size
== 0 ||
2849 return err
? err
: len
;
2852 static struct md_sysfs_entry md_size
=
2853 __ATTR(component_size
, S_IRUGO
|S_IWUSR
, size_show
, size_store
);
2858 * 'none' for arrays with no metadata (good luck...)
2859 * 'external' for arrays with externally managed metadata,
2860 * or N.M for internally known formats
2863 metadata_show(mddev_t
*mddev
, char *page
)
2865 if (mddev
->persistent
)
2866 return sprintf(page
, "%d.%d\n",
2867 mddev
->major_version
, mddev
->minor_version
);
2868 else if (mddev
->external
)
2869 return sprintf(page
, "external:%s\n", mddev
->metadata_type
);
2871 return sprintf(page
, "none\n");
2875 metadata_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2879 if (!list_empty(&mddev
->disks
))
2882 if (cmd_match(buf
, "none")) {
2883 mddev
->persistent
= 0;
2884 mddev
->external
= 0;
2885 mddev
->major_version
= 0;
2886 mddev
->minor_version
= 90;
2889 if (strncmp(buf
, "external:", 9) == 0) {
2890 size_t namelen
= len
-9;
2891 if (namelen
>= sizeof(mddev
->metadata_type
))
2892 namelen
= sizeof(mddev
->metadata_type
)-1;
2893 strncpy(mddev
->metadata_type
, buf
+9, namelen
);
2894 mddev
->metadata_type
[namelen
] = 0;
2895 if (namelen
&& mddev
->metadata_type
[namelen
-1] == '\n')
2896 mddev
->metadata_type
[--namelen
] = 0;
2897 mddev
->persistent
= 0;
2898 mddev
->external
= 1;
2899 mddev
->major_version
= 0;
2900 mddev
->minor_version
= 90;
2903 major
= simple_strtoul(buf
, &e
, 10);
2904 if (e
==buf
|| *e
!= '.')
2907 minor
= simple_strtoul(buf
, &e
, 10);
2908 if (e
==buf
|| (*e
&& *e
!= '\n') )
2910 if (major
>= ARRAY_SIZE(super_types
) || super_types
[major
].name
== NULL
)
2912 mddev
->major_version
= major
;
2913 mddev
->minor_version
= minor
;
2914 mddev
->persistent
= 1;
2915 mddev
->external
= 0;
2919 static struct md_sysfs_entry md_metadata
=
2920 __ATTR(metadata_version
, S_IRUGO
|S_IWUSR
, metadata_show
, metadata_store
);
2923 action_show(mddev_t
*mddev
, char *page
)
2925 char *type
= "idle";
2926 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
2927 (!mddev
->ro
&& test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))) {
2928 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
2930 else if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
2931 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
2933 else if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
2940 return sprintf(page
, "%s\n", type
);
2944 action_store(mddev_t
*mddev
, const char *page
, size_t len
)
2946 if (!mddev
->pers
|| !mddev
->pers
->sync_request
)
2949 if (cmd_match(page
, "idle")) {
2950 if (mddev
->sync_thread
) {
2951 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
2952 md_unregister_thread(mddev
->sync_thread
);
2953 mddev
->sync_thread
= NULL
;
2954 mddev
->recovery
= 0;
2956 } else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
2957 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
2959 else if (cmd_match(page
, "resync") || cmd_match(page
, "recover"))
2960 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
2961 else if (cmd_match(page
, "reshape")) {
2963 if (mddev
->pers
->start_reshape
== NULL
)
2965 err
= mddev
->pers
->start_reshape(mddev
);
2969 if (cmd_match(page
, "check"))
2970 set_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
2971 else if (!cmd_match(page
, "repair"))
2973 set_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
2974 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
2976 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
2977 md_wakeup_thread(mddev
->thread
);
2982 mismatch_cnt_show(mddev_t
*mddev
, char *page
)
2984 return sprintf(page
, "%llu\n",
2985 (unsigned long long) mddev
->resync_mismatches
);
2988 static struct md_sysfs_entry md_scan_mode
=
2989 __ATTR(sync_action
, S_IRUGO
|S_IWUSR
, action_show
, action_store
);
2992 static struct md_sysfs_entry md_mismatches
= __ATTR_RO(mismatch_cnt
);
2995 sync_min_show(mddev_t
*mddev
, char *page
)
2997 return sprintf(page
, "%d (%s)\n", speed_min(mddev
),
2998 mddev
->sync_speed_min
? "local": "system");
3002 sync_min_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3006 if (strncmp(buf
, "system", 6)==0) {
3007 mddev
->sync_speed_min
= 0;
3010 min
= simple_strtoul(buf
, &e
, 10);
3011 if (buf
== e
|| (*e
&& *e
!= '\n') || min
<= 0)
3013 mddev
->sync_speed_min
= min
;
3017 static struct md_sysfs_entry md_sync_min
=
3018 __ATTR(sync_speed_min
, S_IRUGO
|S_IWUSR
, sync_min_show
, sync_min_store
);
3021 sync_max_show(mddev_t
*mddev
, char *page
)
3023 return sprintf(page
, "%d (%s)\n", speed_max(mddev
),
3024 mddev
->sync_speed_max
? "local": "system");
3028 sync_max_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3032 if (strncmp(buf
, "system", 6)==0) {
3033 mddev
->sync_speed_max
= 0;
3036 max
= simple_strtoul(buf
, &e
, 10);
3037 if (buf
== e
|| (*e
&& *e
!= '\n') || max
<= 0)
3039 mddev
->sync_speed_max
= max
;
3043 static struct md_sysfs_entry md_sync_max
=
3044 __ATTR(sync_speed_max
, S_IRUGO
|S_IWUSR
, sync_max_show
, sync_max_store
);
3047 degraded_show(mddev_t
*mddev
, char *page
)
3049 return sprintf(page
, "%d\n", mddev
->degraded
);
3051 static struct md_sysfs_entry md_degraded
= __ATTR_RO(degraded
);
3054 sync_force_parallel_show(mddev_t
*mddev
, char *page
)
3056 return sprintf(page
, "%d\n", mddev
->parallel_resync
);
3060 sync_force_parallel_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3064 if (strict_strtol(buf
, 10, &n
))
3067 if (n
!= 0 && n
!= 1)
3070 mddev
->parallel_resync
= n
;
3072 if (mddev
->sync_thread
)
3073 wake_up(&resync_wait
);
3078 /* force parallel resync, even with shared block devices */
3079 static struct md_sysfs_entry md_sync_force_parallel
=
3080 __ATTR(sync_force_parallel
, S_IRUGO
|S_IWUSR
,
3081 sync_force_parallel_show
, sync_force_parallel_store
);
3084 sync_speed_show(mddev_t
*mddev
, char *page
)
3086 unsigned long resync
, dt
, db
;
3087 resync
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
));
3088 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
3090 db
= resync
- (mddev
->resync_mark_cnt
);
3091 return sprintf(page
, "%ld\n", db
/dt
/2); /* K/sec */
3094 static struct md_sysfs_entry md_sync_speed
= __ATTR_RO(sync_speed
);
3097 sync_completed_show(mddev_t
*mddev
, char *page
)
3099 unsigned long max_blocks
, resync
;
3101 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
3102 max_blocks
= mddev
->resync_max_sectors
;
3104 max_blocks
= mddev
->size
<< 1;
3106 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
));
3107 return sprintf(page
, "%lu / %lu\n", resync
, max_blocks
);
3110 static struct md_sysfs_entry md_sync_completed
= __ATTR_RO(sync_completed
);
3113 min_sync_show(mddev_t
*mddev
, char *page
)
3115 return sprintf(page
, "%llu\n",
3116 (unsigned long long)mddev
->resync_min
);
3119 min_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3121 unsigned long long min
;
3122 if (strict_strtoull(buf
, 10, &min
))
3124 if (min
> mddev
->resync_max
)
3126 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3129 /* Must be a multiple of chunk_size */
3130 if (mddev
->chunk_size
) {
3131 if (min
& (sector_t
)((mddev
->chunk_size
>>9)-1))
3134 mddev
->resync_min
= min
;
3139 static struct md_sysfs_entry md_min_sync
=
3140 __ATTR(sync_min
, S_IRUGO
|S_IWUSR
, min_sync_show
, min_sync_store
);
3143 max_sync_show(mddev_t
*mddev
, char *page
)
3145 if (mddev
->resync_max
== MaxSector
)
3146 return sprintf(page
, "max\n");
3148 return sprintf(page
, "%llu\n",
3149 (unsigned long long)mddev
->resync_max
);
3152 max_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3154 if (strncmp(buf
, "max", 3) == 0)
3155 mddev
->resync_max
= MaxSector
;
3157 unsigned long long max
;
3158 if (strict_strtoull(buf
, 10, &max
))
3160 if (max
< mddev
->resync_min
)
3162 if (max
< mddev
->resync_max
&&
3163 test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3166 /* Must be a multiple of chunk_size */
3167 if (mddev
->chunk_size
) {
3168 if (max
& (sector_t
)((mddev
->chunk_size
>>9)-1))
3171 mddev
->resync_max
= max
;
3173 wake_up(&mddev
->recovery_wait
);
3177 static struct md_sysfs_entry md_max_sync
=
3178 __ATTR(sync_max
, S_IRUGO
|S_IWUSR
, max_sync_show
, max_sync_store
);
3181 suspend_lo_show(mddev_t
*mddev
, char *page
)
3183 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_lo
);
3187 suspend_lo_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3190 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3192 if (mddev
->pers
->quiesce
== NULL
)
3194 if (buf
== e
|| (*e
&& *e
!= '\n'))
3196 if (new >= mddev
->suspend_hi
||
3197 (new > mddev
->suspend_lo
&& new < mddev
->suspend_hi
)) {
3198 mddev
->suspend_lo
= new;
3199 mddev
->pers
->quiesce(mddev
, 2);
3204 static struct md_sysfs_entry md_suspend_lo
=
3205 __ATTR(suspend_lo
, S_IRUGO
|S_IWUSR
, suspend_lo_show
, suspend_lo_store
);
3209 suspend_hi_show(mddev_t
*mddev
, char *page
)
3211 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_hi
);
3215 suspend_hi_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3218 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3220 if (mddev
->pers
->quiesce
== NULL
)
3222 if (buf
== e
|| (*e
&& *e
!= '\n'))
3224 if ((new <= mddev
->suspend_lo
&& mddev
->suspend_lo
>= mddev
->suspend_hi
) ||
3225 (new > mddev
->suspend_lo
&& new > mddev
->suspend_hi
)) {
3226 mddev
->suspend_hi
= new;
3227 mddev
->pers
->quiesce(mddev
, 1);
3228 mddev
->pers
->quiesce(mddev
, 0);
3233 static struct md_sysfs_entry md_suspend_hi
=
3234 __ATTR(suspend_hi
, S_IRUGO
|S_IWUSR
, suspend_hi_show
, suspend_hi_store
);
3237 reshape_position_show(mddev_t
*mddev
, char *page
)
3239 if (mddev
->reshape_position
!= MaxSector
)
3240 return sprintf(page
, "%llu\n",
3241 (unsigned long long)mddev
->reshape_position
);
3242 strcpy(page
, "none\n");
3247 reshape_position_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3250 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3253 if (buf
== e
|| (*e
&& *e
!= '\n'))
3255 mddev
->reshape_position
= new;
3256 mddev
->delta_disks
= 0;
3257 mddev
->new_level
= mddev
->level
;
3258 mddev
->new_layout
= mddev
->layout
;
3259 mddev
->new_chunk
= mddev
->chunk_size
;
3263 static struct md_sysfs_entry md_reshape_position
=
3264 __ATTR(reshape_position
, S_IRUGO
|S_IWUSR
, reshape_position_show
,
3265 reshape_position_store
);
3268 static struct attribute
*md_default_attrs
[] = {
3271 &md_raid_disks
.attr
,
3272 &md_chunk_size
.attr
,
3274 &md_resync_start
.attr
,
3276 &md_new_device
.attr
,
3277 &md_safe_delay
.attr
,
3278 &md_array_state
.attr
,
3279 &md_reshape_position
.attr
,
3283 static struct attribute
*md_redundancy_attrs
[] = {
3285 &md_mismatches
.attr
,
3288 &md_sync_speed
.attr
,
3289 &md_sync_force_parallel
.attr
,
3290 &md_sync_completed
.attr
,
3293 &md_suspend_lo
.attr
,
3294 &md_suspend_hi
.attr
,
3299 static struct attribute_group md_redundancy_group
= {
3301 .attrs
= md_redundancy_attrs
,
3306 md_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
3308 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3309 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3314 rv
= mddev_lock(mddev
);
3316 rv
= entry
->show(mddev
, page
);
3317 mddev_unlock(mddev
);
3323 md_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
3324 const char *page
, size_t length
)
3326 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3327 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3332 if (!capable(CAP_SYS_ADMIN
))
3334 rv
= mddev_lock(mddev
);
3336 rv
= entry
->store(mddev
, page
, length
);
3337 mddev_unlock(mddev
);
3342 static void md_free(struct kobject
*ko
)
3344 mddev_t
*mddev
= container_of(ko
, mddev_t
, kobj
);
3348 static struct sysfs_ops md_sysfs_ops
= {
3349 .show
= md_attr_show
,
3350 .store
= md_attr_store
,
3352 static struct kobj_type md_ktype
= {
3354 .sysfs_ops
= &md_sysfs_ops
,
3355 .default_attrs
= md_default_attrs
,
3360 static struct kobject
*md_probe(dev_t dev
, int *part
, void *data
)
3362 static DEFINE_MUTEX(disks_mutex
);
3363 mddev_t
*mddev
= mddev_find(dev
);
3364 struct gendisk
*disk
;
3365 int partitioned
= (MAJOR(dev
) != MD_MAJOR
);
3366 int shift
= partitioned
? MdpMinorShift
: 0;
3367 int unit
= MINOR(dev
) >> shift
;
3373 mutex_lock(&disks_mutex
);
3374 if (mddev
->gendisk
) {
3375 mutex_unlock(&disks_mutex
);
3379 disk
= alloc_disk(1 << shift
);
3381 mutex_unlock(&disks_mutex
);
3385 disk
->major
= MAJOR(dev
);
3386 disk
->first_minor
= unit
<< shift
;
3388 sprintf(disk
->disk_name
, "md_d%d", unit
);
3390 sprintf(disk
->disk_name
, "md%d", unit
);
3391 disk
->fops
= &md_fops
;
3392 disk
->private_data
= mddev
;
3393 disk
->queue
= mddev
->queue
;
3395 mddev
->gendisk
= disk
;
3396 error
= kobject_init_and_add(&mddev
->kobj
, &md_ktype
, &disk
->dev
.kobj
,
3398 mutex_unlock(&disks_mutex
);
3400 printk(KERN_WARNING
"md: cannot register %s/md - name in use\n",
3403 kobject_uevent(&mddev
->kobj
, KOBJ_ADD
);
3407 static void md_safemode_timeout(unsigned long data
)
3409 mddev_t
*mddev
= (mddev_t
*) data
;
3411 mddev
->safemode
= 1;
3412 md_wakeup_thread(mddev
->thread
);
3415 static int start_dirty_degraded
;
3417 static int do_md_run(mddev_t
* mddev
)
3421 struct list_head
*tmp
;
3423 struct gendisk
*disk
;
3424 struct mdk_personality
*pers
;
3425 char b
[BDEVNAME_SIZE
];
3427 if (list_empty(&mddev
->disks
))
3428 /* cannot run an array with no devices.. */
3435 * Analyze all RAID superblock(s)
3437 if (!mddev
->raid_disks
) {
3438 if (!mddev
->persistent
)
3443 chunk_size
= mddev
->chunk_size
;
3446 if (chunk_size
> MAX_CHUNK_SIZE
) {
3447 printk(KERN_ERR
"too big chunk_size: %d > %d\n",
3448 chunk_size
, MAX_CHUNK_SIZE
);
3452 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3454 if ( (1 << ffz(~chunk_size
)) != chunk_size
) {
3455 printk(KERN_ERR
"chunk_size of %d not valid\n", chunk_size
);
3458 if (chunk_size
< PAGE_SIZE
) {
3459 printk(KERN_ERR
"too small chunk_size: %d < %ld\n",
3460 chunk_size
, PAGE_SIZE
);
3464 /* devices must have minimum size of one chunk */
3465 rdev_for_each(rdev
, tmp
, mddev
) {
3466 if (test_bit(Faulty
, &rdev
->flags
))
3468 if (rdev
->size
< chunk_size
/ 1024) {
3470 "md: Dev %s smaller than chunk_size:"
3472 bdevname(rdev
->bdev
,b
),
3473 (unsigned long long)rdev
->size
,
3481 if (mddev
->level
!= LEVEL_NONE
)
3482 request_module("md-level-%d", mddev
->level
);
3483 else if (mddev
->clevel
[0])
3484 request_module("md-%s", mddev
->clevel
);
3488 * Drop all container device buffers, from now on
3489 * the only valid external interface is through the md
3492 rdev_for_each(rdev
, tmp
, mddev
) {
3493 if (test_bit(Faulty
, &rdev
->flags
))
3495 sync_blockdev(rdev
->bdev
);
3496 invalidate_bdev(rdev
->bdev
);
3498 /* perform some consistency tests on the device.
3499 * We don't want the data to overlap the metadata,
3500 * Internal Bitmap issues has handled elsewhere.
3502 if (rdev
->data_offset
< rdev
->sb_offset
) {
3504 rdev
->data_offset
+ mddev
->size
*2
3505 > rdev
->sb_offset
*2) {
3506 printk("md: %s: data overlaps metadata\n",
3511 if (rdev
->sb_offset
*2 + rdev
->sb_size
/512
3512 > rdev
->data_offset
) {
3513 printk("md: %s: metadata overlaps data\n",
3520 md_probe(mddev
->unit
, NULL
, NULL
);
3521 disk
= mddev
->gendisk
;
3525 spin_lock(&pers_lock
);
3526 pers
= find_pers(mddev
->level
, mddev
->clevel
);
3527 if (!pers
|| !try_module_get(pers
->owner
)) {
3528 spin_unlock(&pers_lock
);
3529 if (mddev
->level
!= LEVEL_NONE
)
3530 printk(KERN_WARNING
"md: personality for level %d is not loaded!\n",
3533 printk(KERN_WARNING
"md: personality for level %s is not loaded!\n",
3538 spin_unlock(&pers_lock
);
3539 mddev
->level
= pers
->level
;
3540 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
3542 if (mddev
->reshape_position
!= MaxSector
&&
3543 pers
->start_reshape
== NULL
) {
3544 /* This personality cannot handle reshaping... */
3546 module_put(pers
->owner
);
3550 if (pers
->sync_request
) {
3551 /* Warn if this is a potentially silly
3554 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
3556 struct list_head
*tmp2
;
3558 rdev_for_each(rdev
, tmp
, mddev
) {
3559 rdev_for_each(rdev2
, tmp2
, mddev
) {
3561 rdev
->bdev
->bd_contains
==
3562 rdev2
->bdev
->bd_contains
) {
3564 "%s: WARNING: %s appears to be"
3565 " on the same physical disk as"
3568 bdevname(rdev
->bdev
,b
),
3569 bdevname(rdev2
->bdev
,b2
));
3576 "True protection against single-disk"
3577 " failure might be compromised.\n");
3580 mddev
->recovery
= 0;
3581 mddev
->resync_max_sectors
= mddev
->size
<< 1; /* may be over-ridden by personality */
3582 mddev
->barriers_work
= 1;
3583 mddev
->ok_start_degraded
= start_dirty_degraded
;
3586 mddev
->ro
= 2; /* read-only, but switch on first write */
3588 err
= mddev
->pers
->run(mddev
);
3589 if (!err
&& mddev
->pers
->sync_request
) {
3590 err
= bitmap_create(mddev
);
3592 printk(KERN_ERR
"%s: failed to create bitmap (%d)\n",
3593 mdname(mddev
), err
);
3594 mddev
->pers
->stop(mddev
);
3598 printk(KERN_ERR
"md: pers->run() failed ...\n");
3599 module_put(mddev
->pers
->owner
);
3601 bitmap_destroy(mddev
);
3604 if (mddev
->pers
->sync_request
) {
3605 if (sysfs_create_group(&mddev
->kobj
, &md_redundancy_group
))
3607 "md: cannot register extra attributes for %s\n",
3609 } else if (mddev
->ro
== 2) /* auto-readonly not meaningful */
3612 atomic_set(&mddev
->writes_pending
,0);
3613 mddev
->safemode
= 0;
3614 mddev
->safemode_timer
.function
= md_safemode_timeout
;
3615 mddev
->safemode_timer
.data
= (unsigned long) mddev
;
3616 mddev
->safemode_delay
= (200 * HZ
)/1000 +1; /* 200 msec delay */
3619 rdev_for_each(rdev
, tmp
, mddev
)
3620 if (rdev
->raid_disk
>= 0) {
3622 sprintf(nm
, "rd%d", rdev
->raid_disk
);
3623 if (sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
))
3624 printk("md: cannot register %s for %s\n",
3628 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3631 md_update_sb(mddev
, 0);
3633 set_capacity(disk
, mddev
->array_size
<<1);
3635 /* If we call blk_queue_make_request here, it will
3636 * re-initialise max_sectors etc which may have been
3637 * refined inside -> run. So just set the bits we need to set.
3638 * Most initialisation happended when we called
3639 * blk_queue_make_request(..., md_fail_request)
3642 mddev
->queue
->queuedata
= mddev
;
3643 mddev
->queue
->make_request_fn
= mddev
->pers
->make_request
;
3645 /* If there is a partially-recovered drive we need to
3646 * start recovery here. If we leave it to md_check_recovery,
3647 * it will remove the drives and not do the right thing
3649 if (mddev
->degraded
&& !mddev
->sync_thread
) {
3650 struct list_head
*rtmp
;
3652 rdev_for_each(rdev
, rtmp
, mddev
)
3653 if (rdev
->raid_disk
>= 0 &&
3654 !test_bit(In_sync
, &rdev
->flags
) &&
3655 !test_bit(Faulty
, &rdev
->flags
))
3656 /* complete an interrupted recovery */
3658 if (spares
&& mddev
->pers
->sync_request
) {
3659 mddev
->recovery
= 0;
3660 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
3661 mddev
->sync_thread
= md_register_thread(md_do_sync
,
3664 if (!mddev
->sync_thread
) {
3665 printk(KERN_ERR
"%s: could not start resync"
3668 /* leave the spares where they are, it shouldn't hurt */
3669 mddev
->recovery
= 0;
3673 md_wakeup_thread(mddev
->thread
);
3674 md_wakeup_thread(mddev
->sync_thread
); /* possibly kick off a reshape */
3677 md_new_event(mddev
);
3678 kobject_uevent(&mddev
->gendisk
->dev
.kobj
, KOBJ_CHANGE
);
3682 static int restart_array(mddev_t
*mddev
)
3684 struct gendisk
*disk
= mddev
->gendisk
;
3688 * Complain if it has no devices
3691 if (list_empty(&mddev
->disks
))
3699 mddev
->safemode
= 0;
3701 set_disk_ro(disk
, 0);
3703 printk(KERN_INFO
"md: %s switched to read-write mode.\n",
3706 * Kick recovery or resync if necessary
3708 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3709 md_wakeup_thread(mddev
->thread
);
3710 md_wakeup_thread(mddev
->sync_thread
);
3719 /* similar to deny_write_access, but accounts for our holding a reference
3720 * to the file ourselves */
3721 static int deny_bitmap_write_access(struct file
* file
)
3723 struct inode
*inode
= file
->f_mapping
->host
;
3725 spin_lock(&inode
->i_lock
);
3726 if (atomic_read(&inode
->i_writecount
) > 1) {
3727 spin_unlock(&inode
->i_lock
);
3730 atomic_set(&inode
->i_writecount
, -1);
3731 spin_unlock(&inode
->i_lock
);
3736 static void restore_bitmap_write_access(struct file
*file
)
3738 struct inode
*inode
= file
->f_mapping
->host
;
3740 spin_lock(&inode
->i_lock
);
3741 atomic_set(&inode
->i_writecount
, 1);
3742 spin_unlock(&inode
->i_lock
);
3746 * 0 - completely stop and dis-assemble array
3747 * 1 - switch to readonly
3748 * 2 - stop but do not disassemble array
3750 static int do_md_stop(mddev_t
* mddev
, int mode
)
3753 struct gendisk
*disk
= mddev
->gendisk
;
3756 if (atomic_read(&mddev
->active
)>2) {
3757 printk("md: %s still in use.\n",mdname(mddev
));
3761 if (mddev
->sync_thread
) {
3762 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3763 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
3764 md_unregister_thread(mddev
->sync_thread
);
3765 mddev
->sync_thread
= NULL
;
3768 del_timer_sync(&mddev
->safemode_timer
);
3770 invalidate_partition(disk
, 0);
3773 case 1: /* readonly */
3779 case 0: /* disassemble */
3781 bitmap_flush(mddev
);
3782 md_super_wait(mddev
);
3784 set_disk_ro(disk
, 0);
3785 blk_queue_make_request(mddev
->queue
, md_fail_request
);
3786 mddev
->pers
->stop(mddev
);
3787 mddev
->queue
->merge_bvec_fn
= NULL
;
3788 mddev
->queue
->unplug_fn
= NULL
;
3789 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
3790 if (mddev
->pers
->sync_request
)
3791 sysfs_remove_group(&mddev
->kobj
, &md_redundancy_group
);
3793 module_put(mddev
->pers
->owner
);
3795 /* tell userspace to handle 'inactive' */
3796 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
3798 set_capacity(disk
, 0);
3804 if (!mddev
->in_sync
|| mddev
->flags
) {
3805 /* mark array as shutdown cleanly */
3807 md_update_sb(mddev
, 1);
3810 set_disk_ro(disk
, 1);
3811 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3815 * Free resources if final stop
3819 struct list_head
*tmp
;
3821 printk(KERN_INFO
"md: %s stopped.\n", mdname(mddev
));
3823 bitmap_destroy(mddev
);
3824 if (mddev
->bitmap_file
) {
3825 restore_bitmap_write_access(mddev
->bitmap_file
);
3826 fput(mddev
->bitmap_file
);
3827 mddev
->bitmap_file
= NULL
;
3829 mddev
->bitmap_offset
= 0;
3831 rdev_for_each(rdev
, tmp
, mddev
)
3832 if (rdev
->raid_disk
>= 0) {
3834 sprintf(nm
, "rd%d", rdev
->raid_disk
);
3835 sysfs_remove_link(&mddev
->kobj
, nm
);
3838 /* make sure all md_delayed_delete calls have finished */
3839 flush_scheduled_work();
3841 export_array(mddev
);
3843 mddev
->array_size
= 0;
3845 mddev
->raid_disks
= 0;
3846 mddev
->recovery_cp
= 0;
3847 mddev
->resync_min
= 0;
3848 mddev
->resync_max
= MaxSector
;
3849 mddev
->reshape_position
= MaxSector
;
3850 mddev
->external
= 0;
3851 mddev
->persistent
= 0;
3852 mddev
->level
= LEVEL_NONE
;
3853 mddev
->clevel
[0] = 0;
3856 mddev
->metadata_type
[0] = 0;
3857 mddev
->chunk_size
= 0;
3858 mddev
->ctime
= mddev
->utime
= 0;
3860 mddev
->max_disks
= 0;
3862 mddev
->delta_disks
= 0;
3863 mddev
->new_level
= LEVEL_NONE
;
3864 mddev
->new_layout
= 0;
3865 mddev
->new_chunk
= 0;
3866 mddev
->curr_resync
= 0;
3867 mddev
->resync_mismatches
= 0;
3868 mddev
->suspend_lo
= mddev
->suspend_hi
= 0;
3869 mddev
->sync_speed_min
= mddev
->sync_speed_max
= 0;
3870 mddev
->recovery
= 0;
3873 mddev
->degraded
= 0;
3874 mddev
->barriers_work
= 0;
3875 mddev
->safemode
= 0;
3877 } else if (mddev
->pers
)
3878 printk(KERN_INFO
"md: %s switched to read-only mode.\n",
3881 md_new_event(mddev
);
3887 static void autorun_array(mddev_t
*mddev
)
3890 struct list_head
*tmp
;
3893 if (list_empty(&mddev
->disks
))
3896 printk(KERN_INFO
"md: running: ");
3898 rdev_for_each(rdev
, tmp
, mddev
) {
3899 char b
[BDEVNAME_SIZE
];
3900 printk("<%s>", bdevname(rdev
->bdev
,b
));
3904 err
= do_md_run (mddev
);
3906 printk(KERN_WARNING
"md: do_md_run() returned %d\n", err
);
3907 do_md_stop (mddev
, 0);
3912 * lets try to run arrays based on all disks that have arrived
3913 * until now. (those are in pending_raid_disks)
3915 * the method: pick the first pending disk, collect all disks with
3916 * the same UUID, remove all from the pending list and put them into
3917 * the 'same_array' list. Then order this list based on superblock
3918 * update time (freshest comes first), kick out 'old' disks and
3919 * compare superblocks. If everything's fine then run it.
3921 * If "unit" is allocated, then bump its reference count
3923 static void autorun_devices(int part
)
3925 struct list_head
*tmp
;
3926 mdk_rdev_t
*rdev0
, *rdev
;
3928 char b
[BDEVNAME_SIZE
];
3930 printk(KERN_INFO
"md: autorun ...\n");
3931 while (!list_empty(&pending_raid_disks
)) {
3934 LIST_HEAD(candidates
);
3935 rdev0
= list_entry(pending_raid_disks
.next
,
3936 mdk_rdev_t
, same_set
);
3938 printk(KERN_INFO
"md: considering %s ...\n",
3939 bdevname(rdev0
->bdev
,b
));
3940 INIT_LIST_HEAD(&candidates
);
3941 rdev_for_each_list(rdev
, tmp
, pending_raid_disks
)
3942 if (super_90_load(rdev
, rdev0
, 0) >= 0) {
3943 printk(KERN_INFO
"md: adding %s ...\n",
3944 bdevname(rdev
->bdev
,b
));
3945 list_move(&rdev
->same_set
, &candidates
);
3948 * now we have a set of devices, with all of them having
3949 * mostly sane superblocks. It's time to allocate the
3953 dev
= MKDEV(mdp_major
,
3954 rdev0
->preferred_minor
<< MdpMinorShift
);
3955 unit
= MINOR(dev
) >> MdpMinorShift
;
3957 dev
= MKDEV(MD_MAJOR
, rdev0
->preferred_minor
);
3960 if (rdev0
->preferred_minor
!= unit
) {
3961 printk(KERN_INFO
"md: unit number in %s is bad: %d\n",
3962 bdevname(rdev0
->bdev
, b
), rdev0
->preferred_minor
);
3966 md_probe(dev
, NULL
, NULL
);
3967 mddev
= mddev_find(dev
);
3968 if (!mddev
|| !mddev
->gendisk
) {
3972 "md: cannot allocate memory for md drive.\n");
3975 if (mddev_lock(mddev
))
3976 printk(KERN_WARNING
"md: %s locked, cannot run\n",
3978 else if (mddev
->raid_disks
|| mddev
->major_version
3979 || !list_empty(&mddev
->disks
)) {
3981 "md: %s already running, cannot run %s\n",
3982 mdname(mddev
), bdevname(rdev0
->bdev
,b
));
3983 mddev_unlock(mddev
);
3985 printk(KERN_INFO
"md: created %s\n", mdname(mddev
));
3986 mddev
->persistent
= 1;
3987 rdev_for_each_list(rdev
, tmp
, candidates
) {
3988 list_del_init(&rdev
->same_set
);
3989 if (bind_rdev_to_array(rdev
, mddev
))
3992 autorun_array(mddev
);
3993 mddev_unlock(mddev
);
3995 /* on success, candidates will be empty, on error
3998 rdev_for_each_list(rdev
, tmp
, candidates
)
4002 printk(KERN_INFO
"md: ... autorun DONE.\n");
4004 #endif /* !MODULE */
4006 static int get_version(void __user
* arg
)
4010 ver
.major
= MD_MAJOR_VERSION
;
4011 ver
.minor
= MD_MINOR_VERSION
;
4012 ver
.patchlevel
= MD_PATCHLEVEL_VERSION
;
4014 if (copy_to_user(arg
, &ver
, sizeof(ver
)))
4020 static int get_array_info(mddev_t
* mddev
, void __user
* arg
)
4022 mdu_array_info_t info
;
4023 int nr
,working
,active
,failed
,spare
;
4025 struct list_head
*tmp
;
4027 nr
=working
=active
=failed
=spare
=0;
4028 rdev_for_each(rdev
, tmp
, mddev
) {
4030 if (test_bit(Faulty
, &rdev
->flags
))
4034 if (test_bit(In_sync
, &rdev
->flags
))
4041 info
.major_version
= mddev
->major_version
;
4042 info
.minor_version
= mddev
->minor_version
;
4043 info
.patch_version
= MD_PATCHLEVEL_VERSION
;
4044 info
.ctime
= mddev
->ctime
;
4045 info
.level
= mddev
->level
;
4046 info
.size
= mddev
->size
;
4047 if (info
.size
!= mddev
->size
) /* overflow */
4050 info
.raid_disks
= mddev
->raid_disks
;
4051 info
.md_minor
= mddev
->md_minor
;
4052 info
.not_persistent
= !mddev
->persistent
;
4054 info
.utime
= mddev
->utime
;
4057 info
.state
= (1<<MD_SB_CLEAN
);
4058 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4059 info
.state
= (1<<MD_SB_BITMAP_PRESENT
);
4060 info
.active_disks
= active
;
4061 info
.working_disks
= working
;
4062 info
.failed_disks
= failed
;
4063 info
.spare_disks
= spare
;
4065 info
.layout
= mddev
->layout
;
4066 info
.chunk_size
= mddev
->chunk_size
;
4068 if (copy_to_user(arg
, &info
, sizeof(info
)))
4074 static int get_bitmap_file(mddev_t
* mddev
, void __user
* arg
)
4076 mdu_bitmap_file_t
*file
= NULL
; /* too big for stack allocation */
4077 char *ptr
, *buf
= NULL
;
4080 md_allow_write(mddev
);
4082 file
= kmalloc(sizeof(*file
), GFP_KERNEL
);
4086 /* bitmap disabled, zero the first byte and copy out */
4087 if (!mddev
->bitmap
|| !mddev
->bitmap
->file
) {
4088 file
->pathname
[0] = '\0';
4092 buf
= kmalloc(sizeof(file
->pathname
), GFP_KERNEL
);
4096 ptr
= d_path(&mddev
->bitmap
->file
->f_path
, buf
, sizeof(file
->pathname
));
4100 strcpy(file
->pathname
, ptr
);
4104 if (copy_to_user(arg
, file
, sizeof(*file
)))
4112 static int get_disk_info(mddev_t
* mddev
, void __user
* arg
)
4114 mdu_disk_info_t info
;
4118 if (copy_from_user(&info
, arg
, sizeof(info
)))
4123 rdev
= find_rdev_nr(mddev
, nr
);
4125 info
.major
= MAJOR(rdev
->bdev
->bd_dev
);
4126 info
.minor
= MINOR(rdev
->bdev
->bd_dev
);
4127 info
.raid_disk
= rdev
->raid_disk
;
4129 if (test_bit(Faulty
, &rdev
->flags
))
4130 info
.state
|= (1<<MD_DISK_FAULTY
);
4131 else if (test_bit(In_sync
, &rdev
->flags
)) {
4132 info
.state
|= (1<<MD_DISK_ACTIVE
);
4133 info
.state
|= (1<<MD_DISK_SYNC
);
4135 if (test_bit(WriteMostly
, &rdev
->flags
))
4136 info
.state
|= (1<<MD_DISK_WRITEMOSTLY
);
4138 info
.major
= info
.minor
= 0;
4139 info
.raid_disk
= -1;
4140 info
.state
= (1<<MD_DISK_REMOVED
);
4143 if (copy_to_user(arg
, &info
, sizeof(info
)))
4149 static int add_new_disk(mddev_t
* mddev
, mdu_disk_info_t
*info
)
4151 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4153 dev_t dev
= MKDEV(info
->major
,info
->minor
);
4155 if (info
->major
!= MAJOR(dev
) || info
->minor
!= MINOR(dev
))
4158 if (!mddev
->raid_disks
) {
4160 /* expecting a device which has a superblock */
4161 rdev
= md_import_device(dev
, mddev
->major_version
, mddev
->minor_version
);
4164 "md: md_import_device returned %ld\n",
4166 return PTR_ERR(rdev
);
4168 if (!list_empty(&mddev
->disks
)) {
4169 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
4170 mdk_rdev_t
, same_set
);
4171 int err
= super_types
[mddev
->major_version
]
4172 .load_super(rdev
, rdev0
, mddev
->minor_version
);
4175 "md: %s has different UUID to %s\n",
4176 bdevname(rdev
->bdev
,b
),
4177 bdevname(rdev0
->bdev
,b2
));
4182 err
= bind_rdev_to_array(rdev
, mddev
);
4189 * add_new_disk can be used once the array is assembled
4190 * to add "hot spares". They must already have a superblock
4195 if (!mddev
->pers
->hot_add_disk
) {
4197 "%s: personality does not support diskops!\n",
4201 if (mddev
->persistent
)
4202 rdev
= md_import_device(dev
, mddev
->major_version
,
4203 mddev
->minor_version
);
4205 rdev
= md_import_device(dev
, -1, -1);
4208 "md: md_import_device returned %ld\n",
4210 return PTR_ERR(rdev
);
4212 /* set save_raid_disk if appropriate */
4213 if (!mddev
->persistent
) {
4214 if (info
->state
& (1<<MD_DISK_SYNC
) &&
4215 info
->raid_disk
< mddev
->raid_disks
)
4216 rdev
->raid_disk
= info
->raid_disk
;
4218 rdev
->raid_disk
= -1;
4220 super_types
[mddev
->major_version
].
4221 validate_super(mddev
, rdev
);
4222 rdev
->saved_raid_disk
= rdev
->raid_disk
;
4224 clear_bit(In_sync
, &rdev
->flags
); /* just to be sure */
4225 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4226 set_bit(WriteMostly
, &rdev
->flags
);
4228 rdev
->raid_disk
= -1;
4229 err
= bind_rdev_to_array(rdev
, mddev
);
4230 if (!err
&& !mddev
->pers
->hot_remove_disk
) {
4231 /* If there is hot_add_disk but no hot_remove_disk
4232 * then added disks for geometry changes,
4233 * and should be added immediately.
4235 super_types
[mddev
->major_version
].
4236 validate_super(mddev
, rdev
);
4237 err
= mddev
->pers
->hot_add_disk(mddev
, rdev
);
4239 unbind_rdev_from_array(rdev
);
4244 md_update_sb(mddev
, 1);
4245 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4246 md_wakeup_thread(mddev
->thread
);
4250 /* otherwise, add_new_disk is only allowed
4251 * for major_version==0 superblocks
4253 if (mddev
->major_version
!= 0) {
4254 printk(KERN_WARNING
"%s: ADD_NEW_DISK not supported\n",
4259 if (!(info
->state
& (1<<MD_DISK_FAULTY
))) {
4261 rdev
= md_import_device (dev
, -1, 0);
4264 "md: error, md_import_device() returned %ld\n",
4266 return PTR_ERR(rdev
);
4268 rdev
->desc_nr
= info
->number
;
4269 if (info
->raid_disk
< mddev
->raid_disks
)
4270 rdev
->raid_disk
= info
->raid_disk
;
4272 rdev
->raid_disk
= -1;
4274 if (rdev
->raid_disk
< mddev
->raid_disks
)
4275 if (info
->state
& (1<<MD_DISK_SYNC
))
4276 set_bit(In_sync
, &rdev
->flags
);
4278 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4279 set_bit(WriteMostly
, &rdev
->flags
);
4281 if (!mddev
->persistent
) {
4282 printk(KERN_INFO
"md: nonpersistent superblock ...\n");
4283 rdev
->sb_offset
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
4285 rdev
->sb_offset
= calc_dev_sboffset(rdev
->bdev
);
4286 rdev
->size
= calc_dev_size(rdev
, mddev
->chunk_size
);
4288 err
= bind_rdev_to_array(rdev
, mddev
);
4298 static int hot_remove_disk(mddev_t
* mddev
, dev_t dev
)
4300 char b
[BDEVNAME_SIZE
];
4306 rdev
= find_rdev(mddev
, dev
);
4310 if (rdev
->raid_disk
>= 0)
4313 kick_rdev_from_array(rdev
);
4314 md_update_sb(mddev
, 1);
4315 md_new_event(mddev
);
4319 printk(KERN_WARNING
"md: cannot remove active disk %s from %s ...\n",
4320 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4324 static int hot_add_disk(mddev_t
* mddev
, dev_t dev
)
4326 char b
[BDEVNAME_SIZE
];
4334 if (mddev
->major_version
!= 0) {
4335 printk(KERN_WARNING
"%s: HOT_ADD may only be used with"
4336 " version-0 superblocks.\n",
4340 if (!mddev
->pers
->hot_add_disk
) {
4342 "%s: personality does not support diskops!\n",
4347 rdev
= md_import_device (dev
, -1, 0);
4350 "md: error, md_import_device() returned %ld\n",
4355 if (mddev
->persistent
)
4356 rdev
->sb_offset
= calc_dev_sboffset(rdev
->bdev
);
4359 rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
4361 size
= calc_dev_size(rdev
, mddev
->chunk_size
);
4364 if (test_bit(Faulty
, &rdev
->flags
)) {
4366 "md: can not hot-add faulty %s disk to %s!\n",
4367 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4371 clear_bit(In_sync
, &rdev
->flags
);
4373 rdev
->saved_raid_disk
= -1;
4374 err
= bind_rdev_to_array(rdev
, mddev
);
4379 * The rest should better be atomic, we can have disk failures
4380 * noticed in interrupt contexts ...
4383 if (rdev
->desc_nr
== mddev
->max_disks
) {
4384 printk(KERN_WARNING
"%s: can not hot-add to full array!\n",
4387 goto abort_unbind_export
;
4390 rdev
->raid_disk
= -1;
4392 md_update_sb(mddev
, 1);
4395 * Kick recovery, maybe this spare has to be added to the
4396 * array immediately.
4398 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4399 md_wakeup_thread(mddev
->thread
);
4400 md_new_event(mddev
);
4403 abort_unbind_export
:
4404 unbind_rdev_from_array(rdev
);
4411 static int set_bitmap_file(mddev_t
*mddev
, int fd
)
4416 if (!mddev
->pers
->quiesce
)
4418 if (mddev
->recovery
|| mddev
->sync_thread
)
4420 /* we should be able to change the bitmap.. */
4426 return -EEXIST
; /* cannot add when bitmap is present */
4427 mddev
->bitmap_file
= fget(fd
);
4429 if (mddev
->bitmap_file
== NULL
) {
4430 printk(KERN_ERR
"%s: error: failed to get bitmap file\n",
4435 err
= deny_bitmap_write_access(mddev
->bitmap_file
);
4437 printk(KERN_ERR
"%s: error: bitmap file is already in use\n",
4439 fput(mddev
->bitmap_file
);
4440 mddev
->bitmap_file
= NULL
;
4443 mddev
->bitmap_offset
= 0; /* file overrides offset */
4444 } else if (mddev
->bitmap
== NULL
)
4445 return -ENOENT
; /* cannot remove what isn't there */
4448 mddev
->pers
->quiesce(mddev
, 1);
4450 err
= bitmap_create(mddev
);
4451 if (fd
< 0 || err
) {
4452 bitmap_destroy(mddev
);
4453 fd
= -1; /* make sure to put the file */
4455 mddev
->pers
->quiesce(mddev
, 0);
4458 if (mddev
->bitmap_file
) {
4459 restore_bitmap_write_access(mddev
->bitmap_file
);
4460 fput(mddev
->bitmap_file
);
4462 mddev
->bitmap_file
= NULL
;
4469 * set_array_info is used two different ways
4470 * The original usage is when creating a new array.
4471 * In this usage, raid_disks is > 0 and it together with
4472 * level, size, not_persistent,layout,chunksize determine the
4473 * shape of the array.
4474 * This will always create an array with a type-0.90.0 superblock.
4475 * The newer usage is when assembling an array.
4476 * In this case raid_disks will be 0, and the major_version field is
4477 * use to determine which style super-blocks are to be found on the devices.
4478 * The minor and patch _version numbers are also kept incase the
4479 * super_block handler wishes to interpret them.
4481 static int set_array_info(mddev_t
* mddev
, mdu_array_info_t
*info
)
4484 if (info
->raid_disks
== 0) {
4485 /* just setting version number for superblock loading */
4486 if (info
->major_version
< 0 ||
4487 info
->major_version
>= ARRAY_SIZE(super_types
) ||
4488 super_types
[info
->major_version
].name
== NULL
) {
4489 /* maybe try to auto-load a module? */
4491 "md: superblock version %d not known\n",
4492 info
->major_version
);
4495 mddev
->major_version
= info
->major_version
;
4496 mddev
->minor_version
= info
->minor_version
;
4497 mddev
->patch_version
= info
->patch_version
;
4498 mddev
->persistent
= !info
->not_persistent
;
4501 mddev
->major_version
= MD_MAJOR_VERSION
;
4502 mddev
->minor_version
= MD_MINOR_VERSION
;
4503 mddev
->patch_version
= MD_PATCHLEVEL_VERSION
;
4504 mddev
->ctime
= get_seconds();
4506 mddev
->level
= info
->level
;
4507 mddev
->clevel
[0] = 0;
4508 mddev
->size
= info
->size
;
4509 mddev
->raid_disks
= info
->raid_disks
;
4510 /* don't set md_minor, it is determined by which /dev/md* was
4513 if (info
->state
& (1<<MD_SB_CLEAN
))
4514 mddev
->recovery_cp
= MaxSector
;
4516 mddev
->recovery_cp
= 0;
4517 mddev
->persistent
= ! info
->not_persistent
;
4518 mddev
->external
= 0;
4520 mddev
->layout
= info
->layout
;
4521 mddev
->chunk_size
= info
->chunk_size
;
4523 mddev
->max_disks
= MD_SB_DISKS
;
4525 if (mddev
->persistent
)
4527 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4529 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
4530 mddev
->bitmap_offset
= 0;
4532 mddev
->reshape_position
= MaxSector
;
4535 * Generate a 128 bit UUID
4537 get_random_bytes(mddev
->uuid
, 16);
4539 mddev
->new_level
= mddev
->level
;
4540 mddev
->new_chunk
= mddev
->chunk_size
;
4541 mddev
->new_layout
= mddev
->layout
;
4542 mddev
->delta_disks
= 0;
4547 static int update_size(mddev_t
*mddev
, unsigned long size
)
4551 struct list_head
*tmp
;
4552 int fit
= (size
== 0);
4554 if (mddev
->pers
->resize
== NULL
)
4556 /* The "size" is the amount of each device that is used.
4557 * This can only make sense for arrays with redundancy.
4558 * linear and raid0 always use whatever space is available
4559 * We can only consider changing the size if no resync
4560 * or reconstruction is happening, and if the new size
4561 * is acceptable. It must fit before the sb_offset or,
4562 * if that is <data_offset, it must fit before the
4563 * size of each device.
4564 * If size is zero, we find the largest size that fits.
4566 if (mddev
->sync_thread
)
4568 rdev_for_each(rdev
, tmp
, mddev
) {
4570 avail
= rdev
->size
* 2;
4572 if (fit
&& (size
== 0 || size
> avail
/2))
4574 if (avail
< ((sector_t
)size
<< 1))
4577 rv
= mddev
->pers
->resize(mddev
, (sector_t
)size
*2);
4579 struct block_device
*bdev
;
4581 bdev
= bdget_disk(mddev
->gendisk
, 0);
4583 mutex_lock(&bdev
->bd_inode
->i_mutex
);
4584 i_size_write(bdev
->bd_inode
, (loff_t
)mddev
->array_size
<< 10);
4585 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
4592 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
)
4595 /* change the number of raid disks */
4596 if (mddev
->pers
->check_reshape
== NULL
)
4598 if (raid_disks
<= 0 ||
4599 raid_disks
>= mddev
->max_disks
)
4601 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
4603 mddev
->delta_disks
= raid_disks
- mddev
->raid_disks
;
4605 rv
= mddev
->pers
->check_reshape(mddev
);
4611 * update_array_info is used to change the configuration of an
4613 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4614 * fields in the info are checked against the array.
4615 * Any differences that cannot be handled will cause an error.
4616 * Normally, only one change can be managed at a time.
4618 static int update_array_info(mddev_t
*mddev
, mdu_array_info_t
*info
)
4624 /* calculate expected state,ignoring low bits */
4625 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4626 state
|= (1 << MD_SB_BITMAP_PRESENT
);
4628 if (mddev
->major_version
!= info
->major_version
||
4629 mddev
->minor_version
!= info
->minor_version
||
4630 /* mddev->patch_version != info->patch_version || */
4631 mddev
->ctime
!= info
->ctime
||
4632 mddev
->level
!= info
->level
||
4633 /* mddev->layout != info->layout || */
4634 !mddev
->persistent
!= info
->not_persistent
||
4635 mddev
->chunk_size
!= info
->chunk_size
||
4636 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4637 ((state
^info
->state
) & 0xfffffe00)
4640 /* Check there is only one change */
4641 if (info
->size
>= 0 && mddev
->size
!= info
->size
) cnt
++;
4642 if (mddev
->raid_disks
!= info
->raid_disks
) cnt
++;
4643 if (mddev
->layout
!= info
->layout
) cnt
++;
4644 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) cnt
++;
4645 if (cnt
== 0) return 0;
4646 if (cnt
> 1) return -EINVAL
;
4648 if (mddev
->layout
!= info
->layout
) {
4650 * we don't need to do anything at the md level, the
4651 * personality will take care of it all.
4653 if (mddev
->pers
->reconfig
== NULL
)
4656 return mddev
->pers
->reconfig(mddev
, info
->layout
, -1);
4658 if (info
->size
>= 0 && mddev
->size
!= info
->size
)
4659 rv
= update_size(mddev
, info
->size
);
4661 if (mddev
->raid_disks
!= info
->raid_disks
)
4662 rv
= update_raid_disks(mddev
, info
->raid_disks
);
4664 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) {
4665 if (mddev
->pers
->quiesce
== NULL
)
4667 if (mddev
->recovery
|| mddev
->sync_thread
)
4669 if (info
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
4670 /* add the bitmap */
4673 if (mddev
->default_bitmap_offset
== 0)
4675 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
4676 mddev
->pers
->quiesce(mddev
, 1);
4677 rv
= bitmap_create(mddev
);
4679 bitmap_destroy(mddev
);
4680 mddev
->pers
->quiesce(mddev
, 0);
4682 /* remove the bitmap */
4685 if (mddev
->bitmap
->file
)
4687 mddev
->pers
->quiesce(mddev
, 1);
4688 bitmap_destroy(mddev
);
4689 mddev
->pers
->quiesce(mddev
, 0);
4690 mddev
->bitmap_offset
= 0;
4693 md_update_sb(mddev
, 1);
4697 static int set_disk_faulty(mddev_t
*mddev
, dev_t dev
)
4701 if (mddev
->pers
== NULL
)
4704 rdev
= find_rdev(mddev
, dev
);
4708 md_error(mddev
, rdev
);
4712 static int md_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
4714 mddev_t
*mddev
= bdev
->bd_disk
->private_data
;
4718 geo
->cylinders
= get_capacity(mddev
->gendisk
) / 8;
4722 static int md_ioctl(struct inode
*inode
, struct file
*file
,
4723 unsigned int cmd
, unsigned long arg
)
4726 void __user
*argp
= (void __user
*)arg
;
4727 mddev_t
*mddev
= NULL
;
4729 if (!capable(CAP_SYS_ADMIN
))
4733 * Commands dealing with the RAID driver but not any
4739 err
= get_version(argp
);
4742 case PRINT_RAID_DEBUG
:
4750 autostart_arrays(arg
);
4757 * Commands creating/starting a new array:
4760 mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4767 err
= mddev_lock(mddev
);
4770 "md: ioctl lock interrupted, reason %d, cmd %d\n",
4777 case SET_ARRAY_INFO
:
4779 mdu_array_info_t info
;
4781 memset(&info
, 0, sizeof(info
));
4782 else if (copy_from_user(&info
, argp
, sizeof(info
))) {
4787 err
= update_array_info(mddev
, &info
);
4789 printk(KERN_WARNING
"md: couldn't update"
4790 " array info. %d\n", err
);
4795 if (!list_empty(&mddev
->disks
)) {
4797 "md: array %s already has disks!\n",
4802 if (mddev
->raid_disks
) {
4804 "md: array %s already initialised!\n",
4809 err
= set_array_info(mddev
, &info
);
4811 printk(KERN_WARNING
"md: couldn't set"
4812 " array info. %d\n", err
);
4822 * Commands querying/configuring an existing array:
4824 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4825 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4826 if ((!mddev
->raid_disks
&& !mddev
->external
)
4827 && cmd
!= ADD_NEW_DISK
&& cmd
!= STOP_ARRAY
4828 && cmd
!= RUN_ARRAY
&& cmd
!= SET_BITMAP_FILE
4829 && cmd
!= GET_BITMAP_FILE
) {
4835 * Commands even a read-only array can execute:
4839 case GET_ARRAY_INFO
:
4840 err
= get_array_info(mddev
, argp
);
4843 case GET_BITMAP_FILE
:
4844 err
= get_bitmap_file(mddev
, argp
);
4848 err
= get_disk_info(mddev
, argp
);
4851 case RESTART_ARRAY_RW
:
4852 err
= restart_array(mddev
);
4856 err
= do_md_stop (mddev
, 0);
4860 err
= do_md_stop (mddev
, 1);
4864 * We have a problem here : there is no easy way to give a CHS
4865 * virtual geometry. We currently pretend that we have a 2 heads
4866 * 4 sectors (with a BIG number of cylinders...). This drives
4867 * dosfs just mad... ;-)
4872 * The remaining ioctls are changing the state of the
4873 * superblock, so we do not allow them on read-only arrays.
4874 * However non-MD ioctls (e.g. get-size) will still come through
4875 * here and hit the 'default' below, so only disallow
4876 * 'md' ioctls, and switch to rw mode if started auto-readonly.
4878 if (_IOC_TYPE(cmd
) == MD_MAJOR
&&
4879 mddev
->ro
&& mddev
->pers
) {
4880 if (mddev
->ro
== 2) {
4882 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4883 md_wakeup_thread(mddev
->thread
);
4895 mdu_disk_info_t info
;
4896 if (copy_from_user(&info
, argp
, sizeof(info
)))
4899 err
= add_new_disk(mddev
, &info
);
4903 case HOT_REMOVE_DISK
:
4904 err
= hot_remove_disk(mddev
, new_decode_dev(arg
));
4908 err
= hot_add_disk(mddev
, new_decode_dev(arg
));
4911 case SET_DISK_FAULTY
:
4912 err
= set_disk_faulty(mddev
, new_decode_dev(arg
));
4916 err
= do_md_run (mddev
);
4919 case SET_BITMAP_FILE
:
4920 err
= set_bitmap_file(mddev
, (int)arg
);
4930 mddev_unlock(mddev
);
4940 static int md_open(struct inode
*inode
, struct file
*file
)
4943 * Succeed if we can lock the mddev, which confirms that
4944 * it isn't being stopped right now.
4946 mddev_t
*mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4949 if ((err
= mutex_lock_interruptible_nested(&mddev
->reconfig_mutex
, 1)))
4954 mddev_unlock(mddev
);
4956 check_disk_change(inode
->i_bdev
);
4961 static int md_release(struct inode
*inode
, struct file
* file
)
4963 mddev_t
*mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4971 static int md_media_changed(struct gendisk
*disk
)
4973 mddev_t
*mddev
= disk
->private_data
;
4975 return mddev
->changed
;
4978 static int md_revalidate(struct gendisk
*disk
)
4980 mddev_t
*mddev
= disk
->private_data
;
4985 static struct block_device_operations md_fops
=
4987 .owner
= THIS_MODULE
,
4989 .release
= md_release
,
4991 .getgeo
= md_getgeo
,
4992 .media_changed
= md_media_changed
,
4993 .revalidate_disk
= md_revalidate
,
4996 static int md_thread(void * arg
)
4998 mdk_thread_t
*thread
= arg
;
5001 * md_thread is a 'system-thread', it's priority should be very
5002 * high. We avoid resource deadlocks individually in each
5003 * raid personality. (RAID5 does preallocation) We also use RR and
5004 * the very same RT priority as kswapd, thus we will never get
5005 * into a priority inversion deadlock.
5007 * we definitely have to have equal or higher priority than
5008 * bdflush, otherwise bdflush will deadlock if there are too
5009 * many dirty RAID5 blocks.
5012 allow_signal(SIGKILL
);
5013 while (!kthread_should_stop()) {
5015 /* We need to wait INTERRUPTIBLE so that
5016 * we don't add to the load-average.
5017 * That means we need to be sure no signals are
5020 if (signal_pending(current
))
5021 flush_signals(current
);
5023 wait_event_interruptible_timeout
5025 test_bit(THREAD_WAKEUP
, &thread
->flags
)
5026 || kthread_should_stop(),
5029 clear_bit(THREAD_WAKEUP
, &thread
->flags
);
5031 thread
->run(thread
->mddev
);
5037 void md_wakeup_thread(mdk_thread_t
*thread
)
5040 dprintk("md: waking up MD thread %s.\n", thread
->tsk
->comm
);
5041 set_bit(THREAD_WAKEUP
, &thread
->flags
);
5042 wake_up(&thread
->wqueue
);
5046 mdk_thread_t
*md_register_thread(void (*run
) (mddev_t
*), mddev_t
*mddev
,
5049 mdk_thread_t
*thread
;
5051 thread
= kzalloc(sizeof(mdk_thread_t
), GFP_KERNEL
);
5055 init_waitqueue_head(&thread
->wqueue
);
5058 thread
->mddev
= mddev
;
5059 thread
->timeout
= MAX_SCHEDULE_TIMEOUT
;
5060 thread
->tsk
= kthread_run(md_thread
, thread
, name
, mdname(thread
->mddev
));
5061 if (IS_ERR(thread
->tsk
)) {
5068 void md_unregister_thread(mdk_thread_t
*thread
)
5070 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread
->tsk
));
5072 kthread_stop(thread
->tsk
);
5076 void md_error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5083 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
5086 if (mddev
->external
)
5087 set_bit(Blocked
, &rdev
->flags
);
5089 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5091 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5092 __builtin_return_address(0),__builtin_return_address(1),
5093 __builtin_return_address(2),__builtin_return_address(3));
5097 if (!mddev
->pers
->error_handler
)
5099 mddev
->pers
->error_handler(mddev
,rdev
);
5100 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5101 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5102 md_wakeup_thread(mddev
->thread
);
5103 md_new_event_inintr(mddev
);
5106 /* seq_file implementation /proc/mdstat */
5108 static void status_unused(struct seq_file
*seq
)
5112 struct list_head
*tmp
;
5114 seq_printf(seq
, "unused devices: ");
5116 rdev_for_each_list(rdev
, tmp
, pending_raid_disks
) {
5117 char b
[BDEVNAME_SIZE
];
5119 seq_printf(seq
, "%s ",
5120 bdevname(rdev
->bdev
,b
));
5123 seq_printf(seq
, "<none>");
5125 seq_printf(seq
, "\n");
5129 static void status_resync(struct seq_file
*seq
, mddev_t
* mddev
)
5131 sector_t max_blocks
, resync
, res
;
5132 unsigned long dt
, db
, rt
;
5134 unsigned int per_milli
;
5136 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
))/2;
5138 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
5139 max_blocks
= mddev
->resync_max_sectors
>> 1;
5141 max_blocks
= mddev
->size
;
5144 * Should not happen.
5150 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5151 * in a sector_t, and (max_blocks>>scale) will fit in a
5152 * u32, as those are the requirements for sector_div.
5153 * Thus 'scale' must be at least 10
5156 if (sizeof(sector_t
) > sizeof(unsigned long)) {
5157 while ( max_blocks
/2 > (1ULL<<(scale
+32)))
5160 res
= (resync
>>scale
)*1000;
5161 sector_div(res
, (u32
)((max_blocks
>>scale
)+1));
5165 int i
, x
= per_milli
/50, y
= 20-x
;
5166 seq_printf(seq
, "[");
5167 for (i
= 0; i
< x
; i
++)
5168 seq_printf(seq
, "=");
5169 seq_printf(seq
, ">");
5170 for (i
= 0; i
< y
; i
++)
5171 seq_printf(seq
, ".");
5172 seq_printf(seq
, "] ");
5174 seq_printf(seq
, " %s =%3u.%u%% (%llu/%llu)",
5175 (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)?
5177 (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)?
5179 (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) ?
5180 "resync" : "recovery"))),
5181 per_milli
/10, per_milli
% 10,
5182 (unsigned long long) resync
,
5183 (unsigned long long) max_blocks
);
5186 * We do not want to overflow, so the order of operands and
5187 * the * 100 / 100 trick are important. We do a +1 to be
5188 * safe against division by zero. We only estimate anyway.
5190 * dt: time from mark until now
5191 * db: blocks written from mark until now
5192 * rt: remaining time
5194 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
5196 db
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
))
5197 - mddev
->resync_mark_cnt
;
5198 rt
= (dt
* ((unsigned long)(max_blocks
-resync
) / (db
/2/100+1)))/100;
5200 seq_printf(seq
, " finish=%lu.%lumin", rt
/ 60, (rt
% 60)/6);
5202 seq_printf(seq
, " speed=%ldK/sec", db
/2/dt
);
5205 static void *md_seq_start(struct seq_file
*seq
, loff_t
*pos
)
5207 struct list_head
*tmp
;
5217 spin_lock(&all_mddevs_lock
);
5218 list_for_each(tmp
,&all_mddevs
)
5220 mddev
= list_entry(tmp
, mddev_t
, all_mddevs
);
5222 spin_unlock(&all_mddevs_lock
);
5225 spin_unlock(&all_mddevs_lock
);
5227 return (void*)2;/* tail */
5231 static void *md_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
5233 struct list_head
*tmp
;
5234 mddev_t
*next_mddev
, *mddev
= v
;
5240 spin_lock(&all_mddevs_lock
);
5242 tmp
= all_mddevs
.next
;
5244 tmp
= mddev
->all_mddevs
.next
;
5245 if (tmp
!= &all_mddevs
)
5246 next_mddev
= mddev_get(list_entry(tmp
,mddev_t
,all_mddevs
));
5248 next_mddev
= (void*)2;
5251 spin_unlock(&all_mddevs_lock
);
5259 static void md_seq_stop(struct seq_file
*seq
, void *v
)
5263 if (mddev
&& v
!= (void*)1 && v
!= (void*)2)
5267 struct mdstat_info
{
5271 static int md_seq_show(struct seq_file
*seq
, void *v
)
5275 struct list_head
*tmp2
;
5277 struct mdstat_info
*mi
= seq
->private;
5278 struct bitmap
*bitmap
;
5280 if (v
== (void*)1) {
5281 struct mdk_personality
*pers
;
5282 seq_printf(seq
, "Personalities : ");
5283 spin_lock(&pers_lock
);
5284 list_for_each_entry(pers
, &pers_list
, list
)
5285 seq_printf(seq
, "[%s] ", pers
->name
);
5287 spin_unlock(&pers_lock
);
5288 seq_printf(seq
, "\n");
5289 mi
->event
= atomic_read(&md_event_count
);
5292 if (v
== (void*)2) {
5297 if (mddev_lock(mddev
) < 0)
5300 if (mddev
->pers
|| mddev
->raid_disks
|| !list_empty(&mddev
->disks
)) {
5301 seq_printf(seq
, "%s : %sactive", mdname(mddev
),
5302 mddev
->pers
? "" : "in");
5305 seq_printf(seq
, " (read-only)");
5307 seq_printf(seq
, " (auto-read-only)");
5308 seq_printf(seq
, " %s", mddev
->pers
->name
);
5312 rdev_for_each(rdev
, tmp2
, mddev
) {
5313 char b
[BDEVNAME_SIZE
];
5314 seq_printf(seq
, " %s[%d]",
5315 bdevname(rdev
->bdev
,b
), rdev
->desc_nr
);
5316 if (test_bit(WriteMostly
, &rdev
->flags
))
5317 seq_printf(seq
, "(W)");
5318 if (test_bit(Faulty
, &rdev
->flags
)) {
5319 seq_printf(seq
, "(F)");
5321 } else if (rdev
->raid_disk
< 0)
5322 seq_printf(seq
, "(S)"); /* spare */
5326 if (!list_empty(&mddev
->disks
)) {
5328 seq_printf(seq
, "\n %llu blocks",
5329 (unsigned long long)mddev
->array_size
);
5331 seq_printf(seq
, "\n %llu blocks",
5332 (unsigned long long)size
);
5334 if (mddev
->persistent
) {
5335 if (mddev
->major_version
!= 0 ||
5336 mddev
->minor_version
!= 90) {
5337 seq_printf(seq
," super %d.%d",
5338 mddev
->major_version
,
5339 mddev
->minor_version
);
5341 } else if (mddev
->external
)
5342 seq_printf(seq
, " super external:%s",
5343 mddev
->metadata_type
);
5345 seq_printf(seq
, " super non-persistent");
5348 mddev
->pers
->status (seq
, mddev
);
5349 seq_printf(seq
, "\n ");
5350 if (mddev
->pers
->sync_request
) {
5351 if (mddev
->curr_resync
> 2) {
5352 status_resync (seq
, mddev
);
5353 seq_printf(seq
, "\n ");
5354 } else if (mddev
->curr_resync
== 1 || mddev
->curr_resync
== 2)
5355 seq_printf(seq
, "\tresync=DELAYED\n ");
5356 else if (mddev
->recovery_cp
< MaxSector
)
5357 seq_printf(seq
, "\tresync=PENDING\n ");
5360 seq_printf(seq
, "\n ");
5362 if ((bitmap
= mddev
->bitmap
)) {
5363 unsigned long chunk_kb
;
5364 unsigned long flags
;
5365 spin_lock_irqsave(&bitmap
->lock
, flags
);
5366 chunk_kb
= bitmap
->chunksize
>> 10;
5367 seq_printf(seq
, "bitmap: %lu/%lu pages [%luKB], "
5369 bitmap
->pages
- bitmap
->missing_pages
,
5371 (bitmap
->pages
- bitmap
->missing_pages
)
5372 << (PAGE_SHIFT
- 10),
5373 chunk_kb
? chunk_kb
: bitmap
->chunksize
,
5374 chunk_kb
? "KB" : "B");
5376 seq_printf(seq
, ", file: ");
5377 seq_path(seq
, &bitmap
->file
->f_path
, " \t\n");
5380 seq_printf(seq
, "\n");
5381 spin_unlock_irqrestore(&bitmap
->lock
, flags
);
5384 seq_printf(seq
, "\n");
5386 mddev_unlock(mddev
);
5391 static struct seq_operations md_seq_ops
= {
5392 .start
= md_seq_start
,
5393 .next
= md_seq_next
,
5394 .stop
= md_seq_stop
,
5395 .show
= md_seq_show
,
5398 static int md_seq_open(struct inode
*inode
, struct file
*file
)
5401 struct mdstat_info
*mi
= kmalloc(sizeof(*mi
), GFP_KERNEL
);
5405 error
= seq_open(file
, &md_seq_ops
);
5409 struct seq_file
*p
= file
->private_data
;
5411 mi
->event
= atomic_read(&md_event_count
);
5416 static unsigned int mdstat_poll(struct file
*filp
, poll_table
*wait
)
5418 struct seq_file
*m
= filp
->private_data
;
5419 struct mdstat_info
*mi
= m
->private;
5422 poll_wait(filp
, &md_event_waiters
, wait
);
5424 /* always allow read */
5425 mask
= POLLIN
| POLLRDNORM
;
5427 if (mi
->event
!= atomic_read(&md_event_count
))
5428 mask
|= POLLERR
| POLLPRI
;
5432 static const struct file_operations md_seq_fops
= {
5433 .owner
= THIS_MODULE
,
5434 .open
= md_seq_open
,
5436 .llseek
= seq_lseek
,
5437 .release
= seq_release_private
,
5438 .poll
= mdstat_poll
,
5441 int register_md_personality(struct mdk_personality
*p
)
5443 spin_lock(&pers_lock
);
5444 list_add_tail(&p
->list
, &pers_list
);
5445 printk(KERN_INFO
"md: %s personality registered for level %d\n", p
->name
, p
->level
);
5446 spin_unlock(&pers_lock
);
5450 int unregister_md_personality(struct mdk_personality
*p
)
5452 printk(KERN_INFO
"md: %s personality unregistered\n", p
->name
);
5453 spin_lock(&pers_lock
);
5454 list_del_init(&p
->list
);
5455 spin_unlock(&pers_lock
);
5459 static int is_mddev_idle(mddev_t
*mddev
)
5462 struct list_head
*tmp
;
5467 rdev_for_each(rdev
, tmp
, mddev
) {
5468 struct gendisk
*disk
= rdev
->bdev
->bd_contains
->bd_disk
;
5469 curr_events
= disk_stat_read(disk
, sectors
[0]) +
5470 disk_stat_read(disk
, sectors
[1]) -
5471 atomic_read(&disk
->sync_io
);
5472 /* sync IO will cause sync_io to increase before the disk_stats
5473 * as sync_io is counted when a request starts, and
5474 * disk_stats is counted when it completes.
5475 * So resync activity will cause curr_events to be smaller than
5476 * when there was no such activity.
5477 * non-sync IO will cause disk_stat to increase without
5478 * increasing sync_io so curr_events will (eventually)
5479 * be larger than it was before. Once it becomes
5480 * substantially larger, the test below will cause
5481 * the array to appear non-idle, and resync will slow
5483 * If there is a lot of outstanding resync activity when
5484 * we set last_event to curr_events, then all that activity
5485 * completing might cause the array to appear non-idle
5486 * and resync will be slowed down even though there might
5487 * not have been non-resync activity. This will only
5488 * happen once though. 'last_events' will soon reflect
5489 * the state where there is little or no outstanding
5490 * resync requests, and further resync activity will
5491 * always make curr_events less than last_events.
5494 if (curr_events
- rdev
->last_events
> 4096) {
5495 rdev
->last_events
= curr_events
;
5502 void md_done_sync(mddev_t
*mddev
, int blocks
, int ok
)
5504 /* another "blocks" (512byte) blocks have been synced */
5505 atomic_sub(blocks
, &mddev
->recovery_active
);
5506 wake_up(&mddev
->recovery_wait
);
5508 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5509 md_wakeup_thread(mddev
->thread
);
5510 // stop recovery, signal do_sync ....
5515 /* md_write_start(mddev, bi)
5516 * If we need to update some array metadata (e.g. 'active' flag
5517 * in superblock) before writing, schedule a superblock update
5518 * and wait for it to complete.
5520 void md_write_start(mddev_t
*mddev
, struct bio
*bi
)
5522 if (bio_data_dir(bi
) != WRITE
)
5525 BUG_ON(mddev
->ro
== 1);
5526 if (mddev
->ro
== 2) {
5527 /* need to switch to read/write */
5529 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5530 md_wakeup_thread(mddev
->thread
);
5531 md_wakeup_thread(mddev
->sync_thread
);
5533 atomic_inc(&mddev
->writes_pending
);
5534 if (mddev
->safemode
== 1)
5535 mddev
->safemode
= 0;
5536 if (mddev
->in_sync
) {
5537 spin_lock_irq(&mddev
->write_lock
);
5538 if (mddev
->in_sync
) {
5540 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
5541 md_wakeup_thread(mddev
->thread
);
5543 spin_unlock_irq(&mddev
->write_lock
);
5544 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
5546 wait_event(mddev
->sb_wait
,
5547 !test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
) &&
5548 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
));
5551 void md_write_end(mddev_t
*mddev
)
5553 if (atomic_dec_and_test(&mddev
->writes_pending
)) {
5554 if (mddev
->safemode
== 2)
5555 md_wakeup_thread(mddev
->thread
);
5556 else if (mddev
->safemode_delay
)
5557 mod_timer(&mddev
->safemode_timer
, jiffies
+ mddev
->safemode_delay
);
5561 /* md_allow_write(mddev)
5562 * Calling this ensures that the array is marked 'active' so that writes
5563 * may proceed without blocking. It is important to call this before
5564 * attempting a GFP_KERNEL allocation while holding the mddev lock.
5565 * Must be called with mddev_lock held.
5567 void md_allow_write(mddev_t
*mddev
)
5573 if (!mddev
->pers
->sync_request
)
5576 spin_lock_irq(&mddev
->write_lock
);
5577 if (mddev
->in_sync
) {
5579 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
5580 if (mddev
->safemode_delay
&&
5581 mddev
->safemode
== 0)
5582 mddev
->safemode
= 1;
5583 spin_unlock_irq(&mddev
->write_lock
);
5584 md_update_sb(mddev
, 0);
5586 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
5587 /* wait for the dirty state to be recorded in the metadata */
5588 wait_event(mddev
->sb_wait
,
5589 !test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
) &&
5590 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
));
5592 spin_unlock_irq(&mddev
->write_lock
);
5594 EXPORT_SYMBOL_GPL(md_allow_write
);
5596 #define SYNC_MARKS 10
5597 #define SYNC_MARK_STEP (3*HZ)
5598 void md_do_sync(mddev_t
*mddev
)
5601 unsigned int currspeed
= 0,
5603 sector_t max_sectors
,j
, io_sectors
;
5604 unsigned long mark
[SYNC_MARKS
];
5605 sector_t mark_cnt
[SYNC_MARKS
];
5607 struct list_head
*tmp
;
5608 sector_t last_check
;
5610 struct list_head
*rtmp
;
5614 /* just incase thread restarts... */
5615 if (test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
))
5617 if (mddev
->ro
) /* never try to sync a read-only array */
5620 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5621 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
5622 desc
= "data-check";
5623 else if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
5624 desc
= "requested-resync";
5627 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5632 /* we overload curr_resync somewhat here.
5633 * 0 == not engaged in resync at all
5634 * 2 == checking that there is no conflict with another sync
5635 * 1 == like 2, but have yielded to allow conflicting resync to
5637 * other == active in resync - this many blocks
5639 * Before starting a resync we must have set curr_resync to
5640 * 2, and then checked that every "conflicting" array has curr_resync
5641 * less than ours. When we find one that is the same or higher
5642 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
5643 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5644 * This will mean we have to start checking from the beginning again.
5649 mddev
->curr_resync
= 2;
5652 if (kthread_should_stop()) {
5653 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5656 for_each_mddev(mddev2
, tmp
) {
5657 if (mddev2
== mddev
)
5659 if (!mddev
->parallel_resync
5660 && mddev2
->curr_resync
5661 && match_mddev_units(mddev
, mddev2
)) {
5663 if (mddev
< mddev2
&& mddev
->curr_resync
== 2) {
5664 /* arbitrarily yield */
5665 mddev
->curr_resync
= 1;
5666 wake_up(&resync_wait
);
5668 if (mddev
> mddev2
&& mddev
->curr_resync
== 1)
5669 /* no need to wait here, we can wait the next
5670 * time 'round when curr_resync == 2
5673 prepare_to_wait(&resync_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
5674 if (!kthread_should_stop() &&
5675 mddev2
->curr_resync
>= mddev
->curr_resync
) {
5676 printk(KERN_INFO
"md: delaying %s of %s"
5677 " until %s has finished (they"
5678 " share one or more physical units)\n",
5679 desc
, mdname(mddev
), mdname(mddev2
));
5682 finish_wait(&resync_wait
, &wq
);
5685 finish_wait(&resync_wait
, &wq
);
5688 } while (mddev
->curr_resync
< 2);
5691 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5692 /* resync follows the size requested by the personality,
5693 * which defaults to physical size, but can be virtual size
5695 max_sectors
= mddev
->resync_max_sectors
;
5696 mddev
->resync_mismatches
= 0;
5697 /* we don't use the checkpoint if there's a bitmap */
5698 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
5699 j
= mddev
->resync_min
;
5700 else if (!mddev
->bitmap
)
5701 j
= mddev
->recovery_cp
;
5703 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5704 max_sectors
= mddev
->size
<< 1;
5706 /* recovery follows the physical size of devices */
5707 max_sectors
= mddev
->size
<< 1;
5709 rdev_for_each(rdev
, rtmp
, mddev
)
5710 if (rdev
->raid_disk
>= 0 &&
5711 !test_bit(Faulty
, &rdev
->flags
) &&
5712 !test_bit(In_sync
, &rdev
->flags
) &&
5713 rdev
->recovery_offset
< j
)
5714 j
= rdev
->recovery_offset
;
5717 printk(KERN_INFO
"md: %s of RAID array %s\n", desc
, mdname(mddev
));
5718 printk(KERN_INFO
"md: minimum _guaranteed_ speed:"
5719 " %d KB/sec/disk.\n", speed_min(mddev
));
5720 printk(KERN_INFO
"md: using maximum available idle IO bandwidth "
5721 "(but not more than %d KB/sec) for %s.\n",
5722 speed_max(mddev
), desc
);
5724 is_mddev_idle(mddev
); /* this also initializes IO event counters */
5727 for (m
= 0; m
< SYNC_MARKS
; m
++) {
5729 mark_cnt
[m
] = io_sectors
;
5732 mddev
->resync_mark
= mark
[last_mark
];
5733 mddev
->resync_mark_cnt
= mark_cnt
[last_mark
];
5736 * Tune reconstruction:
5738 window
= 32*(PAGE_SIZE
/512);
5739 printk(KERN_INFO
"md: using %dk window, over a total of %llu blocks.\n",
5740 window
/2,(unsigned long long) max_sectors
/2);
5742 atomic_set(&mddev
->recovery_active
, 0);
5747 "md: resuming %s of %s from checkpoint.\n",
5748 desc
, mdname(mddev
));
5749 mddev
->curr_resync
= j
;
5752 while (j
< max_sectors
) {
5756 if (j
>= mddev
->resync_max
) {
5757 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5758 wait_event(mddev
->recovery_wait
,
5759 mddev
->resync_max
> j
5760 || kthread_should_stop());
5762 if (kthread_should_stop())
5764 sectors
= mddev
->pers
->sync_request(mddev
, j
, &skipped
,
5765 currspeed
< speed_min(mddev
));
5767 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5771 if (!skipped
) { /* actual IO requested */
5772 io_sectors
+= sectors
;
5773 atomic_add(sectors
, &mddev
->recovery_active
);
5777 if (j
>1) mddev
->curr_resync
= j
;
5778 mddev
->curr_mark_cnt
= io_sectors
;
5779 if (last_check
== 0)
5780 /* this is the earliers that rebuilt will be
5781 * visible in /proc/mdstat
5783 md_new_event(mddev
);
5785 if (last_check
+ window
> io_sectors
|| j
== max_sectors
)
5788 last_check
= io_sectors
;
5790 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5794 if (time_after_eq(jiffies
, mark
[last_mark
] + SYNC_MARK_STEP
)) {
5796 int next
= (last_mark
+1) % SYNC_MARKS
;
5798 mddev
->resync_mark
= mark
[next
];
5799 mddev
->resync_mark_cnt
= mark_cnt
[next
];
5800 mark
[next
] = jiffies
;
5801 mark_cnt
[next
] = io_sectors
- atomic_read(&mddev
->recovery_active
);
5806 if (kthread_should_stop())
5811 * this loop exits only if either when we are slower than
5812 * the 'hard' speed limit, or the system was IO-idle for
5814 * the system might be non-idle CPU-wise, but we only care
5815 * about not overloading the IO subsystem. (things like an
5816 * e2fsck being done on the RAID array should execute fast)
5818 blk_unplug(mddev
->queue
);
5821 currspeed
= ((unsigned long)(io_sectors
-mddev
->resync_mark_cnt
))/2
5822 /((jiffies
-mddev
->resync_mark
)/HZ
+1) +1;
5824 if (currspeed
> speed_min(mddev
)) {
5825 if ((currspeed
> speed_max(mddev
)) ||
5826 !is_mddev_idle(mddev
)) {
5832 printk(KERN_INFO
"md: %s: %s done.\n",mdname(mddev
), desc
);
5834 * this also signals 'finished resyncing' to md_stop
5837 blk_unplug(mddev
->queue
);
5839 wait_event(mddev
->recovery_wait
, !atomic_read(&mddev
->recovery_active
));
5841 /* tell personality that we are finished */
5842 mddev
->pers
->sync_request(mddev
, max_sectors
, &skipped
, 1);
5844 if (!test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
) &&
5845 mddev
->curr_resync
> 2) {
5846 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5847 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5848 if (mddev
->curr_resync
>= mddev
->recovery_cp
) {
5850 "md: checkpointing %s of %s.\n",
5851 desc
, mdname(mddev
));
5852 mddev
->recovery_cp
= mddev
->curr_resync
;
5855 mddev
->recovery_cp
= MaxSector
;
5857 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5858 mddev
->curr_resync
= MaxSector
;
5859 rdev_for_each(rdev
, rtmp
, mddev
)
5860 if (rdev
->raid_disk
>= 0 &&
5861 !test_bit(Faulty
, &rdev
->flags
) &&
5862 !test_bit(In_sync
, &rdev
->flags
) &&
5863 rdev
->recovery_offset
< mddev
->curr_resync
)
5864 rdev
->recovery_offset
= mddev
->curr_resync
;
5867 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5870 mddev
->curr_resync
= 0;
5871 mddev
->resync_min
= 0;
5872 mddev
->resync_max
= MaxSector
;
5873 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5874 wake_up(&resync_wait
);
5875 set_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
5876 md_wakeup_thread(mddev
->thread
);
5881 * got a signal, exit.
5884 "md: md_do_sync() got signal ... exiting\n");
5885 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5889 EXPORT_SYMBOL_GPL(md_do_sync
);
5892 static int remove_and_add_spares(mddev_t
*mddev
)
5895 struct list_head
*rtmp
;
5898 rdev_for_each(rdev
, rtmp
, mddev
)
5899 if (rdev
->raid_disk
>= 0 &&
5900 !test_bit(Blocked
, &rdev
->flags
) &&
5901 (test_bit(Faulty
, &rdev
->flags
) ||
5902 ! test_bit(In_sync
, &rdev
->flags
)) &&
5903 atomic_read(&rdev
->nr_pending
)==0) {
5904 if (mddev
->pers
->hot_remove_disk(
5905 mddev
, rdev
->raid_disk
)==0) {
5907 sprintf(nm
,"rd%d", rdev
->raid_disk
);
5908 sysfs_remove_link(&mddev
->kobj
, nm
);
5909 rdev
->raid_disk
= -1;
5913 if (mddev
->degraded
) {
5914 rdev_for_each(rdev
, rtmp
, mddev
) {
5915 if (rdev
->raid_disk
>= 0 &&
5916 !test_bit(In_sync
, &rdev
->flags
))
5918 if (rdev
->raid_disk
< 0
5919 && !test_bit(Faulty
, &rdev
->flags
)) {
5920 rdev
->recovery_offset
= 0;
5922 hot_add_disk(mddev
, rdev
) == 0) {
5924 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5925 if (sysfs_create_link(&mddev
->kobj
,
5928 "md: cannot register "
5932 md_new_event(mddev
);
5941 * This routine is regularly called by all per-raid-array threads to
5942 * deal with generic issues like resync and super-block update.
5943 * Raid personalities that don't have a thread (linear/raid0) do not
5944 * need this as they never do any recovery or update the superblock.
5946 * It does not do any resync itself, but rather "forks" off other threads
5947 * to do that as needed.
5948 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5949 * "->recovery" and create a thread at ->sync_thread.
5950 * When the thread finishes it sets MD_RECOVERY_DONE
5951 * and wakeups up this thread which will reap the thread and finish up.
5952 * This thread also removes any faulty devices (with nr_pending == 0).
5954 * The overall approach is:
5955 * 1/ if the superblock needs updating, update it.
5956 * 2/ If a recovery thread is running, don't do anything else.
5957 * 3/ If recovery has finished, clean up, possibly marking spares active.
5958 * 4/ If there are any faulty devices, remove them.
5959 * 5/ If array is degraded, try to add spares devices
5960 * 6/ If array has spares or is not in-sync, start a resync thread.
5962 void md_check_recovery(mddev_t
*mddev
)
5965 struct list_head
*rtmp
;
5969 bitmap_daemon_work(mddev
->bitmap
);
5974 if (signal_pending(current
)) {
5975 if (mddev
->pers
->sync_request
&& !mddev
->external
) {
5976 printk(KERN_INFO
"md: %s in immediate safe mode\n",
5978 mddev
->safemode
= 2;
5980 flush_signals(current
);
5984 (mddev
->flags
&& !mddev
->external
) ||
5985 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
) ||
5986 test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
) ||
5987 (mddev
->external
== 0 && mddev
->safemode
== 1) ||
5988 (mddev
->safemode
== 2 && ! atomic_read(&mddev
->writes_pending
)
5989 && !mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
5993 if (mddev_trylock(mddev
)) {
5996 if (!mddev
->external
) {
5997 spin_lock_irq(&mddev
->write_lock
);
5998 if (mddev
->safemode
&&
5999 !atomic_read(&mddev
->writes_pending
) &&
6001 mddev
->recovery_cp
== MaxSector
) {
6003 if (mddev
->persistent
)
6004 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6006 if (mddev
->safemode
== 1)
6007 mddev
->safemode
= 0;
6008 spin_unlock_irq(&mddev
->write_lock
);
6012 md_update_sb(mddev
, 0);
6015 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) &&
6016 !test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
)) {
6017 /* resync/recovery still happening */
6018 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6021 if (mddev
->sync_thread
) {
6022 /* resync has finished, collect result */
6023 md_unregister_thread(mddev
->sync_thread
);
6024 mddev
->sync_thread
= NULL
;
6025 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
6027 /* activate any spares */
6028 mddev
->pers
->spare_active(mddev
);
6030 md_update_sb(mddev
, 1);
6032 /* if array is no-longer degraded, then any saved_raid_disk
6033 * information must be scrapped
6035 if (!mddev
->degraded
)
6036 rdev_for_each(rdev
, rtmp
, mddev
)
6037 rdev
->saved_raid_disk
= -1;
6039 mddev
->recovery
= 0;
6040 /* flag recovery needed just to double check */
6041 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6042 md_new_event(mddev
);
6045 /* Clear some bits that don't mean anything, but
6048 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6049 clear_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6050 clear_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
6052 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
6054 /* no recovery is running.
6055 * remove any failed drives, then
6056 * add spares if possible.
6057 * Spare are also removed and re-added, to allow
6058 * the personality to fail the re-add.
6061 if (mddev
->reshape_position
!= MaxSector
) {
6062 if (mddev
->pers
->check_reshape(mddev
) != 0)
6063 /* Cannot proceed */
6065 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
6066 } else if ((spares
= remove_and_add_spares(mddev
))) {
6067 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6068 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
6069 } else if (mddev
->recovery_cp
< MaxSector
) {
6070 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6071 } else if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
6072 /* nothing to be done ... */
6075 if (mddev
->pers
->sync_request
) {
6076 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6077 if (spares
&& mddev
->bitmap
&& ! mddev
->bitmap
->file
) {
6078 /* We are adding a device or devices to an array
6079 * which has the bitmap stored on all devices.
6080 * So make sure all bitmap pages get written
6082 bitmap_write_all(mddev
->bitmap
);
6084 mddev
->sync_thread
= md_register_thread(md_do_sync
,
6087 if (!mddev
->sync_thread
) {
6088 printk(KERN_ERR
"%s: could not start resync"
6091 /* leave the spares where they are, it shouldn't hurt */
6092 mddev
->recovery
= 0;
6094 md_wakeup_thread(mddev
->sync_thread
);
6095 md_new_event(mddev
);
6098 mddev_unlock(mddev
);
6102 void md_wait_for_blocked_rdev(mdk_rdev_t
*rdev
, mddev_t
*mddev
)
6104 sysfs_notify(&rdev
->kobj
, NULL
, "state");
6105 wait_event_timeout(rdev
->blocked_wait
,
6106 !test_bit(Blocked
, &rdev
->flags
),
6107 msecs_to_jiffies(5000));
6108 rdev_dec_pending(rdev
, mddev
);
6110 EXPORT_SYMBOL(md_wait_for_blocked_rdev
);
6112 static int md_notify_reboot(struct notifier_block
*this,
6113 unsigned long code
, void *x
)
6115 struct list_head
*tmp
;
6118 if ((code
== SYS_DOWN
) || (code
== SYS_HALT
) || (code
== SYS_POWER_OFF
)) {
6120 printk(KERN_INFO
"md: stopping all md devices.\n");
6122 for_each_mddev(mddev
, tmp
)
6123 if (mddev_trylock(mddev
)) {
6124 do_md_stop (mddev
, 1);
6125 mddev_unlock(mddev
);
6128 * certain more exotic SCSI devices are known to be
6129 * volatile wrt too early system reboots. While the
6130 * right place to handle this issue is the given
6131 * driver, we do want to have a safe RAID driver ...
6138 static struct notifier_block md_notifier
= {
6139 .notifier_call
= md_notify_reboot
,
6141 .priority
= INT_MAX
, /* before any real devices */
6144 static void md_geninit(void)
6146 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t
));
6148 proc_create("mdstat", S_IRUGO
, NULL
, &md_seq_fops
);
6151 static int __init
md_init(void)
6153 if (register_blkdev(MAJOR_NR
, "md"))
6155 if ((mdp_major
=register_blkdev(0, "mdp"))<=0) {
6156 unregister_blkdev(MAJOR_NR
, "md");
6159 blk_register_region(MKDEV(MAJOR_NR
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6160 md_probe
, NULL
, NULL
);
6161 blk_register_region(MKDEV(mdp_major
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6162 md_probe
, NULL
, NULL
);
6164 register_reboot_notifier(&md_notifier
);
6165 raid_table_header
= register_sysctl_table(raid_root_table
);
6175 * Searches all registered partitions for autorun RAID arrays
6179 static LIST_HEAD(all_detected_devices
);
6180 struct detected_devices_node
{
6181 struct list_head list
;
6185 void md_autodetect_dev(dev_t dev
)
6187 struct detected_devices_node
*node_detected_dev
;
6189 node_detected_dev
= kzalloc(sizeof(*node_detected_dev
), GFP_KERNEL
);
6190 if (node_detected_dev
) {
6191 node_detected_dev
->dev
= dev
;
6192 list_add_tail(&node_detected_dev
->list
, &all_detected_devices
);
6194 printk(KERN_CRIT
"md: md_autodetect_dev: kzalloc failed"
6195 ", skipping dev(%d,%d)\n", MAJOR(dev
), MINOR(dev
));
6200 static void autostart_arrays(int part
)
6203 struct detected_devices_node
*node_detected_dev
;
6205 int i_scanned
, i_passed
;
6210 printk(KERN_INFO
"md: Autodetecting RAID arrays.\n");
6212 while (!list_empty(&all_detected_devices
) && i_scanned
< INT_MAX
) {
6214 node_detected_dev
= list_entry(all_detected_devices
.next
,
6215 struct detected_devices_node
, list
);
6216 list_del(&node_detected_dev
->list
);
6217 dev
= node_detected_dev
->dev
;
6218 kfree(node_detected_dev
);
6219 rdev
= md_import_device(dev
,0, 90);
6223 if (test_bit(Faulty
, &rdev
->flags
)) {
6227 set_bit(AutoDetected
, &rdev
->flags
);
6228 list_add(&rdev
->same_set
, &pending_raid_disks
);
6232 printk(KERN_INFO
"md: Scanned %d and added %d devices.\n",
6233 i_scanned
, i_passed
);
6235 autorun_devices(part
);
6238 #endif /* !MODULE */
6240 static __exit
void md_exit(void)
6243 struct list_head
*tmp
;
6245 blk_unregister_region(MKDEV(MAJOR_NR
,0), 1U << MINORBITS
);
6246 blk_unregister_region(MKDEV(mdp_major
,0), 1U << MINORBITS
);
6248 unregister_blkdev(MAJOR_NR
,"md");
6249 unregister_blkdev(mdp_major
, "mdp");
6250 unregister_reboot_notifier(&md_notifier
);
6251 unregister_sysctl_table(raid_table_header
);
6252 remove_proc_entry("mdstat", NULL
);
6253 for_each_mddev(mddev
, tmp
) {
6254 struct gendisk
*disk
= mddev
->gendisk
;
6257 export_array(mddev
);
6260 mddev
->gendisk
= NULL
;
6265 subsys_initcall(md_init
);
6266 module_exit(md_exit
)
6268 static int get_ro(char *buffer
, struct kernel_param
*kp
)
6270 return sprintf(buffer
, "%d", start_readonly
);
6272 static int set_ro(const char *val
, struct kernel_param
*kp
)
6275 int num
= simple_strtoul(val
, &e
, 10);
6276 if (*val
&& (*e
== '\0' || *e
== '\n')) {
6277 start_readonly
= num
;
6283 module_param_call(start_ro
, set_ro
, get_ro
, NULL
, S_IRUSR
|S_IWUSR
);
6284 module_param(start_dirty_degraded
, int, S_IRUGO
|S_IWUSR
);
6287 EXPORT_SYMBOL(register_md_personality
);
6288 EXPORT_SYMBOL(unregister_md_personality
);
6289 EXPORT_SYMBOL(md_error
);
6290 EXPORT_SYMBOL(md_done_sync
);
6291 EXPORT_SYMBOL(md_write_start
);
6292 EXPORT_SYMBOL(md_write_end
);
6293 EXPORT_SYMBOL(md_register_thread
);
6294 EXPORT_SYMBOL(md_unregister_thread
);
6295 EXPORT_SYMBOL(md_wakeup_thread
);
6296 EXPORT_SYMBOL(md_check_recovery
);
6297 MODULE_LICENSE("GPL");
6299 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR
);