2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
3 * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/kernel.h>
16 #include <linux/wait.h>
17 #include <linux/blkdev.h>
18 #include <linux/slab.h>
19 #include <linux/raid/md_p.h>
20 #include <linux/crc32c.h>
21 #include <linux/random.h>
22 #include <linux/kthread.h>
23 #include <linux/types.h>
29 * metadata/data stored in disk with 4k size unit (a block) regardless
30 * underneath hardware sector size. only works with PAGE_SIZE == 4096
32 #define BLOCK_SECTORS (8)
33 #define BLOCK_SECTOR_SHIFT (3)
36 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
38 * In write through mode, the reclaim runs every log->max_free_space.
39 * This can prevent the recovery scans for too long
41 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
42 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
44 /* wake up reclaim thread periodically */
45 #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
46 /* start flush with these full stripes */
47 #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
48 /* reclaim stripes in groups */
49 #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
52 * We only need 2 bios per I/O unit to make progress, but ensure we
53 * have a few more available to not get too tight.
55 #define R5L_POOL_SIZE 4
57 static char *r5c_journal_mode_str
[] = {"write-through",
60 * raid5 cache state machine
62 * With the RAID cache, each stripe works in two phases:
66 * These two phases are controlled by bit STRIPE_R5C_CACHING:
67 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
68 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
70 * When there is no journal, or the journal is in write-through mode,
71 * the stripe is always in writing-out phase.
73 * For write-back journal, the stripe is sent to caching phase on write
74 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
75 * the write-out phase by clearing STRIPE_R5C_CACHING.
77 * Stripes in caching phase do not write the raid disks. Instead, all
78 * writes are committed from the log device. Therefore, a stripe in
79 * caching phase handles writes as:
80 * - write to log device
83 * Stripes in writing-out phase handle writes as:
85 * - write pending data and parity to journal
86 * - write data and parity to raid disks
87 * - return IO for pending writes
95 sector_t device_size
; /* log device size, round to
97 sector_t max_free_space
; /* reclaim run if free space is at
100 sector_t last_checkpoint
; /* log tail. where recovery scan
102 u64 last_cp_seq
; /* log tail sequence */
104 sector_t log_start
; /* log head. where new data appends */
105 u64 seq
; /* log head sequence */
107 sector_t next_checkpoint
;
109 struct mutex io_mutex
;
110 struct r5l_io_unit
*current_io
; /* current io_unit accepting new data */
112 spinlock_t io_list_lock
;
113 struct list_head running_ios
; /* io_units which are still running,
114 * and have not yet been completely
115 * written to the log */
116 struct list_head io_end_ios
; /* io_units which have been completely
117 * written to the log but not yet written
119 struct list_head flushing_ios
; /* io_units which are waiting for log
121 struct list_head finished_ios
; /* io_units which settle down in log disk */
122 struct bio flush_bio
;
124 struct list_head no_mem_stripes
; /* pending stripes, -ENOMEM */
126 struct kmem_cache
*io_kc
;
129 mempool_t
*meta_pool
;
131 struct md_thread
*reclaim_thread
;
132 unsigned long reclaim_target
; /* number of space that need to be
133 * reclaimed. if it's 0, reclaim spaces
134 * used by io_units which are in
135 * IO_UNIT_STRIPE_END state (eg, reclaim
136 * dones't wait for specific io_unit
137 * switching to IO_UNIT_STRIPE_END
139 wait_queue_head_t iounit_wait
;
141 struct list_head no_space_stripes
; /* pending stripes, log has no space */
142 spinlock_t no_space_stripes_lock
;
144 bool need_cache_flush
;
147 enum r5c_journal_mode r5c_journal_mode
;
149 /* all stripes in r5cache, in the order of seq at sh->log_start */
150 struct list_head stripe_in_journal_list
;
152 spinlock_t stripe_in_journal_lock
;
153 atomic_t stripe_in_journal_count
;
155 /* to submit async io_units, to fulfill ordering of flush */
156 struct work_struct deferred_io_work
;
157 /* to disable write back during in degraded mode */
158 struct work_struct disable_writeback_work
;
160 /* to for chunk_aligned_read in writeback mode, details below */
161 spinlock_t tree_lock
;
162 struct radix_tree_root big_stripe_tree
;
166 * Enable chunk_aligned_read() with write back cache.
168 * Each chunk may contain more than one stripe (for example, a 256kB
169 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
170 * chunk_aligned_read, these stripes are grouped into one "big_stripe".
171 * For each big_stripe, we count how many stripes of this big_stripe
172 * are in the write back cache. These data are tracked in a radix tree
173 * (big_stripe_tree). We use radix_tree item pointer as the counter.
174 * r5c_tree_index() is used to calculate keys for the radix tree.
176 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
177 * big_stripe of each chunk in the tree. If this big_stripe is in the
178 * tree, chunk_aligned_read() aborts. This look up is protected by
181 * It is necessary to remember whether a stripe is counted in
182 * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
183 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
184 * two flags are set, the stripe is counted in big_stripe_tree. This
185 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
186 * r5c_try_caching_write(); and moving clear_bit of
187 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
188 * r5c_finish_stripe_write_out().
192 * radix tree requests lowest 2 bits of data pointer to be 2b'00.
193 * So it is necessary to left shift the counter by 2 bits before using it
194 * as data pointer of the tree.
196 #define R5C_RADIX_COUNT_SHIFT 2
199 * calculate key for big_stripe_tree
201 * sect: align_bi->bi_iter.bi_sector or sh->sector
203 static inline sector_t
r5c_tree_index(struct r5conf
*conf
,
208 offset
= sector_div(sect
, conf
->chunk_sectors
);
213 * an IO range starts from a meta data block and end at the next meta data
214 * block. The io unit's the meta data block tracks data/parity followed it. io
215 * unit is written to log disk with normal write, as we always flush log disk
216 * first and then start move data to raid disks, there is no requirement to
217 * write io unit with FLUSH/FUA
222 struct page
*meta_page
; /* store meta block */
223 int meta_offset
; /* current offset in meta_page */
225 struct bio
*current_bio
;/* current_bio accepting new data */
227 atomic_t pending_stripe
;/* how many stripes not flushed to raid */
228 u64 seq
; /* seq number of the metablock */
229 sector_t log_start
; /* where the io_unit starts */
230 sector_t log_end
; /* where the io_unit ends */
231 struct list_head log_sibling
; /* log->running_ios */
232 struct list_head stripe_list
; /* stripes added to the io_unit */
236 struct bio
*split_bio
;
238 unsigned int has_flush
:1; /* include flush request */
239 unsigned int has_fua
:1; /* include fua request */
240 unsigned int has_null_flush
:1; /* include empty flush request */
242 * io isn't sent yet, flush/fua request can only be submitted till it's
243 * the first IO in running_ios list
245 unsigned int io_deferred
:1;
247 struct bio_list flush_barriers
; /* size == 0 flush bios */
250 /* r5l_io_unit state */
251 enum r5l_io_unit_state
{
252 IO_UNIT_RUNNING
= 0, /* accepting new IO */
253 IO_UNIT_IO_START
= 1, /* io_unit bio start writing to log,
254 * don't accepting new bio */
255 IO_UNIT_IO_END
= 2, /* io_unit bio finish writing to log */
256 IO_UNIT_STRIPE_END
= 3, /* stripes data finished writing to raid */
259 bool r5c_is_writeback(struct r5l_log
*log
)
261 return (log
!= NULL
&&
262 log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_BACK
);
265 static sector_t
r5l_ring_add(struct r5l_log
*log
, sector_t start
, sector_t inc
)
268 if (start
>= log
->device_size
)
269 start
= start
- log
->device_size
;
273 static sector_t
r5l_ring_distance(struct r5l_log
*log
, sector_t start
,
279 return end
+ log
->device_size
- start
;
282 static bool r5l_has_free_space(struct r5l_log
*log
, sector_t size
)
286 used_size
= r5l_ring_distance(log
, log
->last_checkpoint
,
289 return log
->device_size
> used_size
+ size
;
292 static void __r5l_set_io_unit_state(struct r5l_io_unit
*io
,
293 enum r5l_io_unit_state state
)
295 if (WARN_ON(io
->state
>= state
))
301 r5c_return_dev_pending_writes(struct r5conf
*conf
, struct r5dev
*dev
)
303 struct bio
*wbi
, *wbi2
;
307 while (wbi
&& wbi
->bi_iter
.bi_sector
<
308 dev
->sector
+ STRIPE_SECTORS
) {
309 wbi2
= r5_next_bio(wbi
, dev
->sector
);
310 md_write_end(conf
->mddev
);
316 void r5c_handle_cached_data_endio(struct r5conf
*conf
,
317 struct stripe_head
*sh
, int disks
)
321 for (i
= sh
->disks
; i
--; ) {
322 if (sh
->dev
[i
].written
) {
323 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
324 r5c_return_dev_pending_writes(conf
, &sh
->dev
[i
]);
325 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
327 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
333 void r5l_wake_reclaim(struct r5l_log
*log
, sector_t space
);
335 /* Check whether we should flush some stripes to free up stripe cache */
336 void r5c_check_stripe_cache_usage(struct r5conf
*conf
)
340 if (!r5c_is_writeback(conf
->log
))
343 total_cached
= atomic_read(&conf
->r5c_cached_partial_stripes
) +
344 atomic_read(&conf
->r5c_cached_full_stripes
);
347 * The following condition is true for either of the following:
348 * - stripe cache pressure high:
349 * total_cached > 3/4 min_nr_stripes ||
350 * empty_inactive_list_nr > 0
351 * - stripe cache pressure moderate:
352 * total_cached > 1/2 min_nr_stripes
354 if (total_cached
> conf
->min_nr_stripes
* 1 / 2 ||
355 atomic_read(&conf
->empty_inactive_list_nr
) > 0)
356 r5l_wake_reclaim(conf
->log
, 0);
360 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
361 * stripes in the cache
363 void r5c_check_cached_full_stripe(struct r5conf
*conf
)
365 if (!r5c_is_writeback(conf
->log
))
369 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
370 * or a full stripe (chunk size / 4k stripes).
372 if (atomic_read(&conf
->r5c_cached_full_stripes
) >=
373 min(R5C_FULL_STRIPE_FLUSH_BATCH(conf
),
374 conf
->chunk_sectors
>> STRIPE_SHIFT
))
375 r5l_wake_reclaim(conf
->log
, 0);
379 * Total log space (in sectors) needed to flush all data in cache
381 * To avoid deadlock due to log space, it is necessary to reserve log
382 * space to flush critical stripes (stripes that occupying log space near
383 * last_checkpoint). This function helps check how much log space is
384 * required to flush all cached stripes.
386 * To reduce log space requirements, two mechanisms are used to give cache
387 * flush higher priorities:
388 * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
389 * stripes ALREADY in journal can be flushed w/o pending writes;
390 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
391 * can be delayed (r5l_add_no_space_stripe).
393 * In cache flush, the stripe goes through 1 and then 2. For a stripe that
394 * already passed 1, flushing it requires at most (conf->max_degraded + 1)
395 * pages of journal space. For stripes that has not passed 1, flushing it
396 * requires (conf->raid_disks + 1) pages of journal space. There are at
397 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
398 * required to flush all cached stripes (in pages) is:
400 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
401 * (group_cnt + 1) * (raid_disks + 1)
403 * (stripe_in_journal_count) * (max_degraded + 1) +
404 * (group_cnt + 1) * (raid_disks - max_degraded)
406 static sector_t
r5c_log_required_to_flush_cache(struct r5conf
*conf
)
408 struct r5l_log
*log
= conf
->log
;
410 if (!r5c_is_writeback(log
))
413 return BLOCK_SECTORS
*
414 ((conf
->max_degraded
+ 1) * atomic_read(&log
->stripe_in_journal_count
) +
415 (conf
->raid_disks
- conf
->max_degraded
) * (conf
->group_cnt
+ 1));
419 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
421 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
422 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
423 * device is less than 2x of reclaim_required_space.
425 static inline void r5c_update_log_state(struct r5l_log
*log
)
427 struct r5conf
*conf
= log
->rdev
->mddev
->private;
429 sector_t reclaim_space
;
430 bool wake_reclaim
= false;
432 if (!r5c_is_writeback(log
))
435 free_space
= r5l_ring_distance(log
, log
->log_start
,
436 log
->last_checkpoint
);
437 reclaim_space
= r5c_log_required_to_flush_cache(conf
);
438 if (free_space
< 2 * reclaim_space
)
439 set_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
);
441 if (test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
))
443 clear_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
);
445 if (free_space
< 3 * reclaim_space
)
446 set_bit(R5C_LOG_TIGHT
, &conf
->cache_state
);
448 clear_bit(R5C_LOG_TIGHT
, &conf
->cache_state
);
451 r5l_wake_reclaim(log
, 0);
455 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
456 * This function should only be called in write-back mode.
458 void r5c_make_stripe_write_out(struct stripe_head
*sh
)
460 struct r5conf
*conf
= sh
->raid_conf
;
461 struct r5l_log
*log
= conf
->log
;
463 BUG_ON(!r5c_is_writeback(log
));
465 WARN_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
466 clear_bit(STRIPE_R5C_CACHING
, &sh
->state
);
468 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
469 atomic_inc(&conf
->preread_active_stripes
);
472 static void r5c_handle_data_cached(struct stripe_head
*sh
)
476 for (i
= sh
->disks
; i
--; )
477 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
)) {
478 set_bit(R5_InJournal
, &sh
->dev
[i
].flags
);
479 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
481 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
485 * this journal write must contain full parity,
486 * it may also contain some data pages
488 static void r5c_handle_parity_cached(struct stripe_head
*sh
)
492 for (i
= sh
->disks
; i
--; )
493 if (test_bit(R5_InJournal
, &sh
->dev
[i
].flags
))
494 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
498 * Setting proper flags after writing (or flushing) data and/or parity to the
499 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
501 static void r5c_finish_cache_stripe(struct stripe_head
*sh
)
503 struct r5l_log
*log
= sh
->raid_conf
->log
;
505 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
) {
506 BUG_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
508 * Set R5_InJournal for parity dev[pd_idx]. This means
509 * all data AND parity in the journal. For RAID 6, it is
510 * NOT necessary to set the flag for dev[qd_idx], as the
511 * two parities are written out together.
513 set_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
);
514 } else if (test_bit(STRIPE_R5C_CACHING
, &sh
->state
)) {
515 r5c_handle_data_cached(sh
);
517 r5c_handle_parity_cached(sh
);
518 set_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
);
522 static void r5l_io_run_stripes(struct r5l_io_unit
*io
)
524 struct stripe_head
*sh
, *next
;
526 list_for_each_entry_safe(sh
, next
, &io
->stripe_list
, log_list
) {
527 list_del_init(&sh
->log_list
);
529 r5c_finish_cache_stripe(sh
);
531 set_bit(STRIPE_HANDLE
, &sh
->state
);
532 raid5_release_stripe(sh
);
536 static void r5l_log_run_stripes(struct r5l_log
*log
)
538 struct r5l_io_unit
*io
, *next
;
540 assert_spin_locked(&log
->io_list_lock
);
542 list_for_each_entry_safe(io
, next
, &log
->running_ios
, log_sibling
) {
543 /* don't change list order */
544 if (io
->state
< IO_UNIT_IO_END
)
547 list_move_tail(&io
->log_sibling
, &log
->finished_ios
);
548 r5l_io_run_stripes(io
);
552 static void r5l_move_to_end_ios(struct r5l_log
*log
)
554 struct r5l_io_unit
*io
, *next
;
556 assert_spin_locked(&log
->io_list_lock
);
558 list_for_each_entry_safe(io
, next
, &log
->running_ios
, log_sibling
) {
559 /* don't change list order */
560 if (io
->state
< IO_UNIT_IO_END
)
562 list_move_tail(&io
->log_sibling
, &log
->io_end_ios
);
566 static void __r5l_stripe_write_finished(struct r5l_io_unit
*io
);
567 static void r5l_log_endio(struct bio
*bio
)
569 struct r5l_io_unit
*io
= bio
->bi_private
;
570 struct r5l_io_unit
*io_deferred
;
571 struct r5l_log
*log
= io
->log
;
575 md_error(log
->rdev
->mddev
, log
->rdev
);
578 mempool_free(io
->meta_page
, log
->meta_pool
);
580 spin_lock_irqsave(&log
->io_list_lock
, flags
);
581 __r5l_set_io_unit_state(io
, IO_UNIT_IO_END
);
582 if (log
->need_cache_flush
&& !list_empty(&io
->stripe_list
))
583 r5l_move_to_end_ios(log
);
585 r5l_log_run_stripes(log
);
586 if (!list_empty(&log
->running_ios
)) {
588 * FLUSH/FUA io_unit is deferred because of ordering, now we
591 io_deferred
= list_first_entry(&log
->running_ios
,
592 struct r5l_io_unit
, log_sibling
);
593 if (io_deferred
->io_deferred
)
594 schedule_work(&log
->deferred_io_work
);
597 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
599 if (log
->need_cache_flush
)
600 md_wakeup_thread(log
->rdev
->mddev
->thread
);
602 if (io
->has_null_flush
) {
605 WARN_ON(bio_list_empty(&io
->flush_barriers
));
606 while ((bi
= bio_list_pop(&io
->flush_barriers
)) != NULL
) {
608 atomic_dec(&io
->pending_stripe
);
612 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
613 if (atomic_read(&io
->pending_stripe
) == 0)
614 __r5l_stripe_write_finished(io
);
617 static void r5l_do_submit_io(struct r5l_log
*log
, struct r5l_io_unit
*io
)
621 spin_lock_irqsave(&log
->io_list_lock
, flags
);
622 __r5l_set_io_unit_state(io
, IO_UNIT_IO_START
);
623 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
626 io
->current_bio
->bi_opf
|= REQ_PREFLUSH
;
628 io
->current_bio
->bi_opf
|= REQ_FUA
;
629 submit_bio(io
->current_bio
);
635 io
->split_bio
->bi_opf
|= REQ_PREFLUSH
;
637 io
->split_bio
->bi_opf
|= REQ_FUA
;
638 submit_bio(io
->split_bio
);
641 /* deferred io_unit will be dispatched here */
642 static void r5l_submit_io_async(struct work_struct
*work
)
644 struct r5l_log
*log
= container_of(work
, struct r5l_log
,
646 struct r5l_io_unit
*io
= NULL
;
649 spin_lock_irqsave(&log
->io_list_lock
, flags
);
650 if (!list_empty(&log
->running_ios
)) {
651 io
= list_first_entry(&log
->running_ios
, struct r5l_io_unit
,
653 if (!io
->io_deferred
)
658 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
660 r5l_do_submit_io(log
, io
);
663 static void r5c_disable_writeback_async(struct work_struct
*work
)
665 struct r5l_log
*log
= container_of(work
, struct r5l_log
,
666 disable_writeback_work
);
667 struct mddev
*mddev
= log
->rdev
->mddev
;
669 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
671 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
673 mddev_suspend(mddev
);
674 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_THROUGH
;
678 static void r5l_submit_current_io(struct r5l_log
*log
)
680 struct r5l_io_unit
*io
= log
->current_io
;
682 struct r5l_meta_block
*block
;
685 bool do_submit
= true;
690 block
= page_address(io
->meta_page
);
691 block
->meta_size
= cpu_to_le32(io
->meta_offset
);
692 crc
= crc32c_le(log
->uuid_checksum
, block
, PAGE_SIZE
);
693 block
->checksum
= cpu_to_le32(crc
);
694 bio
= io
->current_bio
;
696 log
->current_io
= NULL
;
697 spin_lock_irqsave(&log
->io_list_lock
, flags
);
698 if (io
->has_flush
|| io
->has_fua
) {
699 if (io
!= list_first_entry(&log
->running_ios
,
700 struct r5l_io_unit
, log_sibling
)) {
705 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
707 r5l_do_submit_io(log
, io
);
710 static struct bio
*r5l_bio_alloc(struct r5l_log
*log
)
712 struct bio
*bio
= bio_alloc_bioset(GFP_NOIO
, BIO_MAX_PAGES
, log
->bs
);
714 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
715 bio
->bi_bdev
= log
->rdev
->bdev
;
716 bio
->bi_iter
.bi_sector
= log
->rdev
->data_offset
+ log
->log_start
;
721 static void r5_reserve_log_entry(struct r5l_log
*log
, struct r5l_io_unit
*io
)
723 log
->log_start
= r5l_ring_add(log
, log
->log_start
, BLOCK_SECTORS
);
725 r5c_update_log_state(log
);
727 * If we filled up the log device start from the beginning again,
728 * which will require a new bio.
730 * Note: for this to work properly the log size needs to me a multiple
733 if (log
->log_start
== 0)
734 io
->need_split_bio
= true;
736 io
->log_end
= log
->log_start
;
739 static struct r5l_io_unit
*r5l_new_meta(struct r5l_log
*log
)
741 struct r5l_io_unit
*io
;
742 struct r5l_meta_block
*block
;
744 io
= mempool_alloc(log
->io_pool
, GFP_ATOMIC
);
747 memset(io
, 0, sizeof(*io
));
750 INIT_LIST_HEAD(&io
->log_sibling
);
751 INIT_LIST_HEAD(&io
->stripe_list
);
752 bio_list_init(&io
->flush_barriers
);
753 io
->state
= IO_UNIT_RUNNING
;
755 io
->meta_page
= mempool_alloc(log
->meta_pool
, GFP_NOIO
);
756 block
= page_address(io
->meta_page
);
758 block
->magic
= cpu_to_le32(R5LOG_MAGIC
);
759 block
->version
= R5LOG_VERSION
;
760 block
->seq
= cpu_to_le64(log
->seq
);
761 block
->position
= cpu_to_le64(log
->log_start
);
763 io
->log_start
= log
->log_start
;
764 io
->meta_offset
= sizeof(struct r5l_meta_block
);
765 io
->seq
= log
->seq
++;
767 io
->current_bio
= r5l_bio_alloc(log
);
768 io
->current_bio
->bi_end_io
= r5l_log_endio
;
769 io
->current_bio
->bi_private
= io
;
770 bio_add_page(io
->current_bio
, io
->meta_page
, PAGE_SIZE
, 0);
772 r5_reserve_log_entry(log
, io
);
774 spin_lock_irq(&log
->io_list_lock
);
775 list_add_tail(&io
->log_sibling
, &log
->running_ios
);
776 spin_unlock_irq(&log
->io_list_lock
);
781 static int r5l_get_meta(struct r5l_log
*log
, unsigned int payload_size
)
783 if (log
->current_io
&&
784 log
->current_io
->meta_offset
+ payload_size
> PAGE_SIZE
)
785 r5l_submit_current_io(log
);
787 if (!log
->current_io
) {
788 log
->current_io
= r5l_new_meta(log
);
789 if (!log
->current_io
)
796 static void r5l_append_payload_meta(struct r5l_log
*log
, u16 type
,
798 u32 checksum1
, u32 checksum2
,
799 bool checksum2_valid
)
801 struct r5l_io_unit
*io
= log
->current_io
;
802 struct r5l_payload_data_parity
*payload
;
804 payload
= page_address(io
->meta_page
) + io
->meta_offset
;
805 payload
->header
.type
= cpu_to_le16(type
);
806 payload
->header
.flags
= cpu_to_le16(0);
807 payload
->size
= cpu_to_le32((1 + !!checksum2_valid
) <<
809 payload
->location
= cpu_to_le64(location
);
810 payload
->checksum
[0] = cpu_to_le32(checksum1
);
812 payload
->checksum
[1] = cpu_to_le32(checksum2
);
814 io
->meta_offset
+= sizeof(struct r5l_payload_data_parity
) +
815 sizeof(__le32
) * (1 + !!checksum2_valid
);
818 static void r5l_append_payload_page(struct r5l_log
*log
, struct page
*page
)
820 struct r5l_io_unit
*io
= log
->current_io
;
822 if (io
->need_split_bio
) {
823 BUG_ON(io
->split_bio
);
824 io
->split_bio
= io
->current_bio
;
825 io
->current_bio
= r5l_bio_alloc(log
);
826 bio_chain(io
->current_bio
, io
->split_bio
);
827 io
->need_split_bio
= false;
830 if (!bio_add_page(io
->current_bio
, page
, PAGE_SIZE
, 0))
833 r5_reserve_log_entry(log
, io
);
836 static void r5l_append_flush_payload(struct r5l_log
*log
, sector_t sect
)
838 struct mddev
*mddev
= log
->rdev
->mddev
;
839 struct r5conf
*conf
= mddev
->private;
840 struct r5l_io_unit
*io
;
841 struct r5l_payload_flush
*payload
;
845 * payload_flush requires extra writes to the journal.
846 * To avoid handling the extra IO in quiesce, just skip
852 mutex_lock(&log
->io_mutex
);
853 meta_size
= sizeof(struct r5l_payload_flush
) + sizeof(__le64
);
855 if (r5l_get_meta(log
, meta_size
)) {
856 mutex_unlock(&log
->io_mutex
);
860 /* current implementation is one stripe per flush payload */
861 io
= log
->current_io
;
862 payload
= page_address(io
->meta_page
) + io
->meta_offset
;
863 payload
->header
.type
= cpu_to_le16(R5LOG_PAYLOAD_FLUSH
);
864 payload
->header
.flags
= cpu_to_le16(0);
865 payload
->size
= cpu_to_le32(sizeof(__le64
));
866 payload
->flush_stripes
[0] = cpu_to_le64(sect
);
867 io
->meta_offset
+= meta_size
;
868 mutex_unlock(&log
->io_mutex
);
871 static int r5l_log_stripe(struct r5l_log
*log
, struct stripe_head
*sh
,
872 int data_pages
, int parity_pages
)
877 struct r5l_io_unit
*io
;
880 ((sizeof(struct r5l_payload_data_parity
) + sizeof(__le32
))
882 sizeof(struct r5l_payload_data_parity
) +
883 sizeof(__le32
) * parity_pages
;
885 ret
= r5l_get_meta(log
, meta_size
);
889 io
= log
->current_io
;
891 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH
, &sh
->state
))
894 for (i
= 0; i
< sh
->disks
; i
++) {
895 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
) ||
896 test_bit(R5_InJournal
, &sh
->dev
[i
].flags
))
898 if (i
== sh
->pd_idx
|| i
== sh
->qd_idx
)
900 if (test_bit(R5_WantFUA
, &sh
->dev
[i
].flags
) &&
901 log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_BACK
) {
904 * we need to flush journal to make sure recovery can
905 * reach the data with fua flag
909 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_DATA
,
910 raid5_compute_blocknr(sh
, i
, 0),
911 sh
->dev
[i
].log_checksum
, 0, false);
912 r5l_append_payload_page(log
, sh
->dev
[i
].page
);
915 if (parity_pages
== 2) {
916 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_PARITY
,
917 sh
->sector
, sh
->dev
[sh
->pd_idx
].log_checksum
,
918 sh
->dev
[sh
->qd_idx
].log_checksum
, true);
919 r5l_append_payload_page(log
, sh
->dev
[sh
->pd_idx
].page
);
920 r5l_append_payload_page(log
, sh
->dev
[sh
->qd_idx
].page
);
921 } else if (parity_pages
== 1) {
922 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_PARITY
,
923 sh
->sector
, sh
->dev
[sh
->pd_idx
].log_checksum
,
925 r5l_append_payload_page(log
, sh
->dev
[sh
->pd_idx
].page
);
926 } else /* Just writing data, not parity, in caching phase */
927 BUG_ON(parity_pages
!= 0);
929 list_add_tail(&sh
->log_list
, &io
->stripe_list
);
930 atomic_inc(&io
->pending_stripe
);
933 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
936 if (sh
->log_start
== MaxSector
) {
937 BUG_ON(!list_empty(&sh
->r5c
));
938 sh
->log_start
= io
->log_start
;
939 spin_lock_irq(&log
->stripe_in_journal_lock
);
940 list_add_tail(&sh
->r5c
,
941 &log
->stripe_in_journal_list
);
942 spin_unlock_irq(&log
->stripe_in_journal_lock
);
943 atomic_inc(&log
->stripe_in_journal_count
);
948 /* add stripe to no_space_stripes, and then wake up reclaim */
949 static inline void r5l_add_no_space_stripe(struct r5l_log
*log
,
950 struct stripe_head
*sh
)
952 spin_lock(&log
->no_space_stripes_lock
);
953 list_add_tail(&sh
->log_list
, &log
->no_space_stripes
);
954 spin_unlock(&log
->no_space_stripes_lock
);
958 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
959 * data from log to raid disks), so we shouldn't wait for reclaim here
961 int r5l_write_stripe(struct r5l_log
*log
, struct stripe_head
*sh
)
963 struct r5conf
*conf
= sh
->raid_conf
;
965 int data_pages
, parity_pages
;
969 bool wake_reclaim
= false;
973 /* Don't support stripe batch */
974 if (sh
->log_io
|| !test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
) ||
975 test_bit(STRIPE_SYNCING
, &sh
->state
)) {
976 /* the stripe is written to log, we start writing it to raid */
977 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
981 WARN_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
983 for (i
= 0; i
< sh
->disks
; i
++) {
986 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
) ||
987 test_bit(R5_InJournal
, &sh
->dev
[i
].flags
))
991 /* checksum is already calculated in last run */
992 if (test_bit(STRIPE_LOG_TRAPPED
, &sh
->state
))
994 addr
= kmap_atomic(sh
->dev
[i
].page
);
995 sh
->dev
[i
].log_checksum
= crc32c_le(log
->uuid_checksum
,
999 parity_pages
= 1 + !!(sh
->qd_idx
>= 0);
1000 data_pages
= write_disks
- parity_pages
;
1002 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
1004 * The stripe must enter state machine again to finish the write, so
1007 clear_bit(STRIPE_DELAYED
, &sh
->state
);
1008 atomic_inc(&sh
->count
);
1010 mutex_lock(&log
->io_mutex
);
1012 reserve
= (1 + write_disks
) << (PAGE_SHIFT
- 9);
1014 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
) {
1015 if (!r5l_has_free_space(log
, reserve
)) {
1016 r5l_add_no_space_stripe(log
, sh
);
1017 wake_reclaim
= true;
1019 ret
= r5l_log_stripe(log
, sh
, data_pages
, parity_pages
);
1021 spin_lock_irq(&log
->io_list_lock
);
1022 list_add_tail(&sh
->log_list
,
1023 &log
->no_mem_stripes
);
1024 spin_unlock_irq(&log
->io_list_lock
);
1027 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1029 * log space critical, do not process stripes that are
1030 * not in cache yet (sh->log_start == MaxSector).
1032 if (test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
) &&
1033 sh
->log_start
== MaxSector
) {
1034 r5l_add_no_space_stripe(log
, sh
);
1035 wake_reclaim
= true;
1037 } else if (!r5l_has_free_space(log
, reserve
)) {
1038 if (sh
->log_start
== log
->last_checkpoint
)
1041 r5l_add_no_space_stripe(log
, sh
);
1043 ret
= r5l_log_stripe(log
, sh
, data_pages
, parity_pages
);
1045 spin_lock_irq(&log
->io_list_lock
);
1046 list_add_tail(&sh
->log_list
,
1047 &log
->no_mem_stripes
);
1048 spin_unlock_irq(&log
->io_list_lock
);
1053 mutex_unlock(&log
->io_mutex
);
1055 r5l_wake_reclaim(log
, reserve
);
1059 void r5l_write_stripe_run(struct r5l_log
*log
)
1063 mutex_lock(&log
->io_mutex
);
1064 r5l_submit_current_io(log
);
1065 mutex_unlock(&log
->io_mutex
);
1068 int r5l_handle_flush_request(struct r5l_log
*log
, struct bio
*bio
)
1073 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
) {
1075 * in write through (journal only)
1076 * we flush log disk cache first, then write stripe data to
1077 * raid disks. So if bio is finished, the log disk cache is
1078 * flushed already. The recovery guarantees we can recovery
1079 * the bio from log disk, so we don't need to flush again
1081 if (bio
->bi_iter
.bi_size
== 0) {
1085 bio
->bi_opf
&= ~REQ_PREFLUSH
;
1087 /* write back (with cache) */
1088 if (bio
->bi_iter
.bi_size
== 0) {
1089 mutex_lock(&log
->io_mutex
);
1090 r5l_get_meta(log
, 0);
1091 bio_list_add(&log
->current_io
->flush_barriers
, bio
);
1092 log
->current_io
->has_flush
= 1;
1093 log
->current_io
->has_null_flush
= 1;
1094 atomic_inc(&log
->current_io
->pending_stripe
);
1095 r5l_submit_current_io(log
);
1096 mutex_unlock(&log
->io_mutex
);
1103 /* This will run after log space is reclaimed */
1104 static void r5l_run_no_space_stripes(struct r5l_log
*log
)
1106 struct stripe_head
*sh
;
1108 spin_lock(&log
->no_space_stripes_lock
);
1109 while (!list_empty(&log
->no_space_stripes
)) {
1110 sh
= list_first_entry(&log
->no_space_stripes
,
1111 struct stripe_head
, log_list
);
1112 list_del_init(&sh
->log_list
);
1113 set_bit(STRIPE_HANDLE
, &sh
->state
);
1114 raid5_release_stripe(sh
);
1116 spin_unlock(&log
->no_space_stripes_lock
);
1120 * calculate new last_checkpoint
1121 * for write through mode, returns log->next_checkpoint
1122 * for write back, returns log_start of first sh in stripe_in_journal_list
1124 static sector_t
r5c_calculate_new_cp(struct r5conf
*conf
)
1126 struct stripe_head
*sh
;
1127 struct r5l_log
*log
= conf
->log
;
1129 unsigned long flags
;
1131 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
1132 return log
->next_checkpoint
;
1134 spin_lock_irqsave(&log
->stripe_in_journal_lock
, flags
);
1135 if (list_empty(&conf
->log
->stripe_in_journal_list
)) {
1136 /* all stripes flushed */
1137 spin_unlock_irqrestore(&log
->stripe_in_journal_lock
, flags
);
1138 return log
->next_checkpoint
;
1140 sh
= list_first_entry(&conf
->log
->stripe_in_journal_list
,
1141 struct stripe_head
, r5c
);
1142 new_cp
= sh
->log_start
;
1143 spin_unlock_irqrestore(&log
->stripe_in_journal_lock
, flags
);
1147 static sector_t
r5l_reclaimable_space(struct r5l_log
*log
)
1149 struct r5conf
*conf
= log
->rdev
->mddev
->private;
1151 return r5l_ring_distance(log
, log
->last_checkpoint
,
1152 r5c_calculate_new_cp(conf
));
1155 static void r5l_run_no_mem_stripe(struct r5l_log
*log
)
1157 struct stripe_head
*sh
;
1159 assert_spin_locked(&log
->io_list_lock
);
1161 if (!list_empty(&log
->no_mem_stripes
)) {
1162 sh
= list_first_entry(&log
->no_mem_stripes
,
1163 struct stripe_head
, log_list
);
1164 list_del_init(&sh
->log_list
);
1165 set_bit(STRIPE_HANDLE
, &sh
->state
);
1166 raid5_release_stripe(sh
);
1170 static bool r5l_complete_finished_ios(struct r5l_log
*log
)
1172 struct r5l_io_unit
*io
, *next
;
1175 assert_spin_locked(&log
->io_list_lock
);
1177 list_for_each_entry_safe(io
, next
, &log
->finished_ios
, log_sibling
) {
1178 /* don't change list order */
1179 if (io
->state
< IO_UNIT_STRIPE_END
)
1182 log
->next_checkpoint
= io
->log_start
;
1184 list_del(&io
->log_sibling
);
1185 mempool_free(io
, log
->io_pool
);
1186 r5l_run_no_mem_stripe(log
);
1194 static void __r5l_stripe_write_finished(struct r5l_io_unit
*io
)
1196 struct r5l_log
*log
= io
->log
;
1197 struct r5conf
*conf
= log
->rdev
->mddev
->private;
1198 unsigned long flags
;
1200 spin_lock_irqsave(&log
->io_list_lock
, flags
);
1201 __r5l_set_io_unit_state(io
, IO_UNIT_STRIPE_END
);
1203 if (!r5l_complete_finished_ios(log
)) {
1204 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
1208 if (r5l_reclaimable_space(log
) > log
->max_free_space
||
1209 test_bit(R5C_LOG_TIGHT
, &conf
->cache_state
))
1210 r5l_wake_reclaim(log
, 0);
1212 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
1213 wake_up(&log
->iounit_wait
);
1216 void r5l_stripe_write_finished(struct stripe_head
*sh
)
1218 struct r5l_io_unit
*io
;
1223 if (io
&& atomic_dec_and_test(&io
->pending_stripe
))
1224 __r5l_stripe_write_finished(io
);
1227 static void r5l_log_flush_endio(struct bio
*bio
)
1229 struct r5l_log
*log
= container_of(bio
, struct r5l_log
,
1231 unsigned long flags
;
1232 struct r5l_io_unit
*io
;
1235 md_error(log
->rdev
->mddev
, log
->rdev
);
1237 spin_lock_irqsave(&log
->io_list_lock
, flags
);
1238 list_for_each_entry(io
, &log
->flushing_ios
, log_sibling
)
1239 r5l_io_run_stripes(io
);
1240 list_splice_tail_init(&log
->flushing_ios
, &log
->finished_ios
);
1241 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
1245 * Starting dispatch IO to raid.
1246 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1247 * broken meta in the middle of a log causes recovery can't find meta at the
1248 * head of log. If operations require meta at the head persistent in log, we
1249 * must make sure meta before it persistent in log too. A case is:
1251 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1252 * data/parity must be persistent in log before we do the write to raid disks.
1254 * The solution is we restrictly maintain io_unit list order. In this case, we
1255 * only write stripes of an io_unit to raid disks till the io_unit is the first
1256 * one whose data/parity is in log.
1258 void r5l_flush_stripe_to_raid(struct r5l_log
*log
)
1262 if (!log
|| !log
->need_cache_flush
)
1265 spin_lock_irq(&log
->io_list_lock
);
1266 /* flush bio is running */
1267 if (!list_empty(&log
->flushing_ios
)) {
1268 spin_unlock_irq(&log
->io_list_lock
);
1271 list_splice_tail_init(&log
->io_end_ios
, &log
->flushing_ios
);
1272 do_flush
= !list_empty(&log
->flushing_ios
);
1273 spin_unlock_irq(&log
->io_list_lock
);
1277 bio_reset(&log
->flush_bio
);
1278 log
->flush_bio
.bi_bdev
= log
->rdev
->bdev
;
1279 log
->flush_bio
.bi_end_io
= r5l_log_flush_endio
;
1280 log
->flush_bio
.bi_opf
= REQ_OP_WRITE
| REQ_PREFLUSH
;
1281 submit_bio(&log
->flush_bio
);
1284 static void r5l_write_super(struct r5l_log
*log
, sector_t cp
);
1285 static void r5l_write_super_and_discard_space(struct r5l_log
*log
,
1288 struct block_device
*bdev
= log
->rdev
->bdev
;
1289 struct mddev
*mddev
;
1291 r5l_write_super(log
, end
);
1293 if (!blk_queue_discard(bdev_get_queue(bdev
)))
1296 mddev
= log
->rdev
->mddev
;
1298 * Discard could zero data, so before discard we must make sure
1299 * superblock is updated to new log tail. Updating superblock (either
1300 * directly call md_update_sb() or depend on md thread) must hold
1301 * reconfig mutex. On the other hand, raid5_quiesce is called with
1302 * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
1303 * for all IO finish, hence waitting for reclaim thread, while reclaim
1304 * thread is calling this function and waitting for reconfig mutex. So
1305 * there is a deadlock. We workaround this issue with a trylock.
1306 * FIXME: we could miss discard if we can't take reconfig mutex
1308 set_mask_bits(&mddev
->sb_flags
, 0,
1309 BIT(MD_SB_CHANGE_DEVS
) | BIT(MD_SB_CHANGE_PENDING
));
1310 if (!mddev_trylock(mddev
))
1312 md_update_sb(mddev
, 1);
1313 mddev_unlock(mddev
);
1315 /* discard IO error really doesn't matter, ignore it */
1316 if (log
->last_checkpoint
< end
) {
1317 blkdev_issue_discard(bdev
,
1318 log
->last_checkpoint
+ log
->rdev
->data_offset
,
1319 end
- log
->last_checkpoint
, GFP_NOIO
, 0);
1321 blkdev_issue_discard(bdev
,
1322 log
->last_checkpoint
+ log
->rdev
->data_offset
,
1323 log
->device_size
- log
->last_checkpoint
,
1325 blkdev_issue_discard(bdev
, log
->rdev
->data_offset
, end
,
1331 * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1332 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1334 * must hold conf->device_lock
1336 static void r5c_flush_stripe(struct r5conf
*conf
, struct stripe_head
*sh
)
1338 BUG_ON(list_empty(&sh
->lru
));
1339 BUG_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
1340 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
1343 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1344 * raid5_release_stripe() while holding conf->device_lock
1346 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST
, &sh
->state
));
1347 assert_spin_locked(&conf
->device_lock
);
1349 list_del_init(&sh
->lru
);
1350 atomic_inc(&sh
->count
);
1352 set_bit(STRIPE_HANDLE
, &sh
->state
);
1353 atomic_inc(&conf
->active_stripes
);
1354 r5c_make_stripe_write_out(sh
);
1356 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
))
1357 atomic_inc(&conf
->r5c_flushing_partial_stripes
);
1359 atomic_inc(&conf
->r5c_flushing_full_stripes
);
1360 raid5_release_stripe(sh
);
1364 * if num == 0, flush all full stripes
1365 * if num > 0, flush all full stripes. If less than num full stripes are
1366 * flushed, flush some partial stripes until totally num stripes are
1367 * flushed or there is no more cached stripes.
1369 void r5c_flush_cache(struct r5conf
*conf
, int num
)
1372 struct stripe_head
*sh
, *next
;
1374 assert_spin_locked(&conf
->device_lock
);
1379 list_for_each_entry_safe(sh
, next
, &conf
->r5c_full_stripe_list
, lru
) {
1380 r5c_flush_stripe(conf
, sh
);
1386 list_for_each_entry_safe(sh
, next
,
1387 &conf
->r5c_partial_stripe_list
, lru
) {
1388 r5c_flush_stripe(conf
, sh
);
1394 static void r5c_do_reclaim(struct r5conf
*conf
)
1396 struct r5l_log
*log
= conf
->log
;
1397 struct stripe_head
*sh
;
1399 unsigned long flags
;
1401 int stripes_to_flush
;
1402 int flushing_partial
, flushing_full
;
1404 if (!r5c_is_writeback(log
))
1407 flushing_partial
= atomic_read(&conf
->r5c_flushing_partial_stripes
);
1408 flushing_full
= atomic_read(&conf
->r5c_flushing_full_stripes
);
1409 total_cached
= atomic_read(&conf
->r5c_cached_partial_stripes
) +
1410 atomic_read(&conf
->r5c_cached_full_stripes
) -
1411 flushing_full
- flushing_partial
;
1413 if (total_cached
> conf
->min_nr_stripes
* 3 / 4 ||
1414 atomic_read(&conf
->empty_inactive_list_nr
) > 0)
1416 * if stripe cache pressure high, flush all full stripes and
1417 * some partial stripes
1419 stripes_to_flush
= R5C_RECLAIM_STRIPE_GROUP
;
1420 else if (total_cached
> conf
->min_nr_stripes
* 1 / 2 ||
1421 atomic_read(&conf
->r5c_cached_full_stripes
) - flushing_full
>
1422 R5C_FULL_STRIPE_FLUSH_BATCH(conf
))
1424 * if stripe cache pressure moderate, or if there is many full
1425 * stripes,flush all full stripes
1427 stripes_to_flush
= 0;
1429 /* no need to flush */
1430 stripes_to_flush
= -1;
1432 if (stripes_to_flush
>= 0) {
1433 spin_lock_irqsave(&conf
->device_lock
, flags
);
1434 r5c_flush_cache(conf
, stripes_to_flush
);
1435 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1438 /* if log space is tight, flush stripes on stripe_in_journal_list */
1439 if (test_bit(R5C_LOG_TIGHT
, &conf
->cache_state
)) {
1440 spin_lock_irqsave(&log
->stripe_in_journal_lock
, flags
);
1441 spin_lock(&conf
->device_lock
);
1442 list_for_each_entry(sh
, &log
->stripe_in_journal_list
, r5c
) {
1444 * stripes on stripe_in_journal_list could be in any
1445 * state of the stripe_cache state machine. In this
1446 * case, we only want to flush stripe on
1447 * r5c_cached_full/partial_stripes. The following
1448 * condition makes sure the stripe is on one of the
1451 if (!list_empty(&sh
->lru
) &&
1452 !test_bit(STRIPE_HANDLE
, &sh
->state
) &&
1453 atomic_read(&sh
->count
) == 0) {
1454 r5c_flush_stripe(conf
, sh
);
1455 if (count
++ >= R5C_RECLAIM_STRIPE_GROUP
)
1459 spin_unlock(&conf
->device_lock
);
1460 spin_unlock_irqrestore(&log
->stripe_in_journal_lock
, flags
);
1463 if (!test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
))
1464 r5l_run_no_space_stripes(log
);
1466 md_wakeup_thread(conf
->mddev
->thread
);
1469 static void r5l_do_reclaim(struct r5l_log
*log
)
1471 struct r5conf
*conf
= log
->rdev
->mddev
->private;
1472 sector_t reclaim_target
= xchg(&log
->reclaim_target
, 0);
1473 sector_t reclaimable
;
1474 sector_t next_checkpoint
;
1477 spin_lock_irq(&log
->io_list_lock
);
1478 write_super
= r5l_reclaimable_space(log
) > log
->max_free_space
||
1479 reclaim_target
!= 0 || !list_empty(&log
->no_space_stripes
);
1481 * move proper io_unit to reclaim list. We should not change the order.
1482 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1483 * shouldn't reuse space of an unreclaimable io_unit
1486 reclaimable
= r5l_reclaimable_space(log
);
1487 if (reclaimable
>= reclaim_target
||
1488 (list_empty(&log
->running_ios
) &&
1489 list_empty(&log
->io_end_ios
) &&
1490 list_empty(&log
->flushing_ios
) &&
1491 list_empty(&log
->finished_ios
)))
1494 md_wakeup_thread(log
->rdev
->mddev
->thread
);
1495 wait_event_lock_irq(log
->iounit_wait
,
1496 r5l_reclaimable_space(log
) > reclaimable
,
1500 next_checkpoint
= r5c_calculate_new_cp(conf
);
1501 spin_unlock_irq(&log
->io_list_lock
);
1503 if (reclaimable
== 0 || !write_super
)
1507 * write_super will flush cache of each raid disk. We must write super
1508 * here, because the log area might be reused soon and we don't want to
1511 r5l_write_super_and_discard_space(log
, next_checkpoint
);
1513 mutex_lock(&log
->io_mutex
);
1514 log
->last_checkpoint
= next_checkpoint
;
1515 r5c_update_log_state(log
);
1516 mutex_unlock(&log
->io_mutex
);
1518 r5l_run_no_space_stripes(log
);
1521 static void r5l_reclaim_thread(struct md_thread
*thread
)
1523 struct mddev
*mddev
= thread
->mddev
;
1524 struct r5conf
*conf
= mddev
->private;
1525 struct r5l_log
*log
= conf
->log
;
1529 r5c_do_reclaim(conf
);
1530 r5l_do_reclaim(log
);
1533 void r5l_wake_reclaim(struct r5l_log
*log
, sector_t space
)
1535 unsigned long target
;
1536 unsigned long new = (unsigned long)space
; /* overflow in theory */
1541 target
= log
->reclaim_target
;
1544 } while (cmpxchg(&log
->reclaim_target
, target
, new) != target
);
1545 md_wakeup_thread(log
->reclaim_thread
);
1548 void r5l_quiesce(struct r5l_log
*log
, int state
)
1550 struct mddev
*mddev
;
1551 if (!log
|| state
== 2)
1554 kthread_unpark(log
->reclaim_thread
->tsk
);
1555 else if (state
== 1) {
1556 /* make sure r5l_write_super_and_discard_space exits */
1557 mddev
= log
->rdev
->mddev
;
1558 wake_up(&mddev
->sb_wait
);
1559 kthread_park(log
->reclaim_thread
->tsk
);
1560 r5l_wake_reclaim(log
, MaxSector
);
1561 r5l_do_reclaim(log
);
1565 bool r5l_log_disk_error(struct r5conf
*conf
)
1567 struct r5l_log
*log
;
1569 /* don't allow write if journal disk is missing */
1571 log
= rcu_dereference(conf
->log
);
1574 ret
= test_bit(MD_HAS_JOURNAL
, &conf
->mddev
->flags
);
1576 ret
= test_bit(Faulty
, &log
->rdev
->flags
);
1581 #define R5L_RECOVERY_PAGE_POOL_SIZE 256
1583 struct r5l_recovery_ctx
{
1584 struct page
*meta_page
; /* current meta */
1585 sector_t meta_total_blocks
; /* total size of current meta and data */
1586 sector_t pos
; /* recovery position */
1587 u64 seq
; /* recovery position seq */
1588 int data_parity_stripes
; /* number of data_parity stripes */
1589 int data_only_stripes
; /* number of data_only stripes */
1590 struct list_head cached_list
;
1593 * read ahead page pool (ra_pool)
1594 * in recovery, log is read sequentially. It is not efficient to
1595 * read every page with sync_page_io(). The read ahead page pool
1596 * reads multiple pages with one IO, so further log read can
1597 * just copy data from the pool.
1599 struct page
*ra_pool
[R5L_RECOVERY_PAGE_POOL_SIZE
];
1600 sector_t pool_offset
; /* offset of first page in the pool */
1601 int total_pages
; /* total allocated pages */
1602 int valid_pages
; /* pages with valid data */
1603 struct bio
*ra_bio
; /* bio to do the read ahead */
1606 static int r5l_recovery_allocate_ra_pool(struct r5l_log
*log
,
1607 struct r5l_recovery_ctx
*ctx
)
1611 ctx
->ra_bio
= bio_alloc_bioset(GFP_KERNEL
, BIO_MAX_PAGES
, log
->bs
);
1615 ctx
->valid_pages
= 0;
1616 ctx
->total_pages
= 0;
1617 while (ctx
->total_pages
< R5L_RECOVERY_PAGE_POOL_SIZE
) {
1618 page
= alloc_page(GFP_KERNEL
);
1622 ctx
->ra_pool
[ctx
->total_pages
] = page
;
1623 ctx
->total_pages
+= 1;
1626 if (ctx
->total_pages
== 0) {
1627 bio_put(ctx
->ra_bio
);
1631 ctx
->pool_offset
= 0;
1635 static void r5l_recovery_free_ra_pool(struct r5l_log
*log
,
1636 struct r5l_recovery_ctx
*ctx
)
1640 for (i
= 0; i
< ctx
->total_pages
; ++i
)
1641 put_page(ctx
->ra_pool
[i
]);
1642 bio_put(ctx
->ra_bio
);
1646 * fetch ctx->valid_pages pages from offset
1647 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1648 * However, if the offset is close to the end of the journal device,
1649 * ctx->valid_pages could be smaller than ctx->total_pages
1651 static int r5l_recovery_fetch_ra_pool(struct r5l_log
*log
,
1652 struct r5l_recovery_ctx
*ctx
,
1655 bio_reset(ctx
->ra_bio
);
1656 ctx
->ra_bio
->bi_bdev
= log
->rdev
->bdev
;
1657 bio_set_op_attrs(ctx
->ra_bio
, REQ_OP_READ
, 0);
1658 ctx
->ra_bio
->bi_iter
.bi_sector
= log
->rdev
->data_offset
+ offset
;
1660 ctx
->valid_pages
= 0;
1661 ctx
->pool_offset
= offset
;
1663 while (ctx
->valid_pages
< ctx
->total_pages
) {
1664 bio_add_page(ctx
->ra_bio
,
1665 ctx
->ra_pool
[ctx
->valid_pages
], PAGE_SIZE
, 0);
1666 ctx
->valid_pages
+= 1;
1668 offset
= r5l_ring_add(log
, offset
, BLOCK_SECTORS
);
1670 if (offset
== 0) /* reached end of the device */
1674 return submit_bio_wait(ctx
->ra_bio
);
1678 * try read a page from the read ahead page pool, if the page is not in the
1679 * pool, call r5l_recovery_fetch_ra_pool
1681 static int r5l_recovery_read_page(struct r5l_log
*log
,
1682 struct r5l_recovery_ctx
*ctx
,
1688 if (offset
< ctx
->pool_offset
||
1689 offset
>= ctx
->pool_offset
+ ctx
->valid_pages
* BLOCK_SECTORS
) {
1690 ret
= r5l_recovery_fetch_ra_pool(log
, ctx
, offset
);
1695 BUG_ON(offset
< ctx
->pool_offset
||
1696 offset
>= ctx
->pool_offset
+ ctx
->valid_pages
* BLOCK_SECTORS
);
1698 memcpy(page_address(page
),
1699 page_address(ctx
->ra_pool
[(offset
- ctx
->pool_offset
) >>
1700 BLOCK_SECTOR_SHIFT
]),
1705 static int r5l_recovery_read_meta_block(struct r5l_log
*log
,
1706 struct r5l_recovery_ctx
*ctx
)
1708 struct page
*page
= ctx
->meta_page
;
1709 struct r5l_meta_block
*mb
;
1710 u32 crc
, stored_crc
;
1713 ret
= r5l_recovery_read_page(log
, ctx
, page
, ctx
->pos
);
1717 mb
= page_address(page
);
1718 stored_crc
= le32_to_cpu(mb
->checksum
);
1721 if (le32_to_cpu(mb
->magic
) != R5LOG_MAGIC
||
1722 le64_to_cpu(mb
->seq
) != ctx
->seq
||
1723 mb
->version
!= R5LOG_VERSION
||
1724 le64_to_cpu(mb
->position
) != ctx
->pos
)
1727 crc
= crc32c_le(log
->uuid_checksum
, mb
, PAGE_SIZE
);
1728 if (stored_crc
!= crc
)
1731 if (le32_to_cpu(mb
->meta_size
) > PAGE_SIZE
)
1734 ctx
->meta_total_blocks
= BLOCK_SECTORS
;
1740 r5l_recovery_create_empty_meta_block(struct r5l_log
*log
,
1742 sector_t pos
, u64 seq
)
1744 struct r5l_meta_block
*mb
;
1746 mb
= page_address(page
);
1748 mb
->magic
= cpu_to_le32(R5LOG_MAGIC
);
1749 mb
->version
= R5LOG_VERSION
;
1750 mb
->meta_size
= cpu_to_le32(sizeof(struct r5l_meta_block
));
1751 mb
->seq
= cpu_to_le64(seq
);
1752 mb
->position
= cpu_to_le64(pos
);
1755 static int r5l_log_write_empty_meta_block(struct r5l_log
*log
, sector_t pos
,
1759 struct r5l_meta_block
*mb
;
1761 page
= alloc_page(GFP_KERNEL
);
1764 r5l_recovery_create_empty_meta_block(log
, page
, pos
, seq
);
1765 mb
= page_address(page
);
1766 mb
->checksum
= cpu_to_le32(crc32c_le(log
->uuid_checksum
,
1768 if (!sync_page_io(log
->rdev
, pos
, PAGE_SIZE
, page
, REQ_OP_WRITE
,
1778 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1779 * to mark valid (potentially not flushed) data in the journal.
1781 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1782 * so there should not be any mismatch here.
1784 static void r5l_recovery_load_data(struct r5l_log
*log
,
1785 struct stripe_head
*sh
,
1786 struct r5l_recovery_ctx
*ctx
,
1787 struct r5l_payload_data_parity
*payload
,
1788 sector_t log_offset
)
1790 struct mddev
*mddev
= log
->rdev
->mddev
;
1791 struct r5conf
*conf
= mddev
->private;
1794 raid5_compute_sector(conf
,
1795 le64_to_cpu(payload
->location
), 0,
1797 r5l_recovery_read_page(log
, ctx
, sh
->dev
[dd_idx
].page
, log_offset
);
1798 sh
->dev
[dd_idx
].log_checksum
=
1799 le32_to_cpu(payload
->checksum
[0]);
1800 ctx
->meta_total_blocks
+= BLOCK_SECTORS
;
1802 set_bit(R5_Wantwrite
, &sh
->dev
[dd_idx
].flags
);
1803 set_bit(STRIPE_R5C_CACHING
, &sh
->state
);
1806 static void r5l_recovery_load_parity(struct r5l_log
*log
,
1807 struct stripe_head
*sh
,
1808 struct r5l_recovery_ctx
*ctx
,
1809 struct r5l_payload_data_parity
*payload
,
1810 sector_t log_offset
)
1812 struct mddev
*mddev
= log
->rdev
->mddev
;
1813 struct r5conf
*conf
= mddev
->private;
1815 ctx
->meta_total_blocks
+= BLOCK_SECTORS
* conf
->max_degraded
;
1816 r5l_recovery_read_page(log
, ctx
, sh
->dev
[sh
->pd_idx
].page
, log_offset
);
1817 sh
->dev
[sh
->pd_idx
].log_checksum
=
1818 le32_to_cpu(payload
->checksum
[0]);
1819 set_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
);
1821 if (sh
->qd_idx
>= 0) {
1822 r5l_recovery_read_page(
1823 log
, ctx
, sh
->dev
[sh
->qd_idx
].page
,
1824 r5l_ring_add(log
, log_offset
, BLOCK_SECTORS
));
1825 sh
->dev
[sh
->qd_idx
].log_checksum
=
1826 le32_to_cpu(payload
->checksum
[1]);
1827 set_bit(R5_Wantwrite
, &sh
->dev
[sh
->qd_idx
].flags
);
1829 clear_bit(STRIPE_R5C_CACHING
, &sh
->state
);
1832 static void r5l_recovery_reset_stripe(struct stripe_head
*sh
)
1837 sh
->log_start
= MaxSector
;
1838 for (i
= sh
->disks
; i
--; )
1839 sh
->dev
[i
].flags
= 0;
1843 r5l_recovery_replay_one_stripe(struct r5conf
*conf
,
1844 struct stripe_head
*sh
,
1845 struct r5l_recovery_ctx
*ctx
)
1847 struct md_rdev
*rdev
, *rrdev
;
1851 for (disk_index
= 0; disk_index
< sh
->disks
; disk_index
++) {
1852 if (!test_bit(R5_Wantwrite
, &sh
->dev
[disk_index
].flags
))
1854 if (disk_index
== sh
->qd_idx
|| disk_index
== sh
->pd_idx
)
1860 * stripes that only have parity must have been flushed
1861 * before the crash that we are now recovering from, so
1862 * there is nothing more to recovery.
1864 if (data_count
== 0)
1867 for (disk_index
= 0; disk_index
< sh
->disks
; disk_index
++) {
1868 if (!test_bit(R5_Wantwrite
, &sh
->dev
[disk_index
].flags
))
1871 /* in case device is broken */
1873 rdev
= rcu_dereference(conf
->disks
[disk_index
].rdev
);
1875 atomic_inc(&rdev
->nr_pending
);
1877 sync_page_io(rdev
, sh
->sector
, PAGE_SIZE
,
1878 sh
->dev
[disk_index
].page
, REQ_OP_WRITE
, 0,
1880 rdev_dec_pending(rdev
, rdev
->mddev
);
1883 rrdev
= rcu_dereference(conf
->disks
[disk_index
].replacement
);
1885 atomic_inc(&rrdev
->nr_pending
);
1887 sync_page_io(rrdev
, sh
->sector
, PAGE_SIZE
,
1888 sh
->dev
[disk_index
].page
, REQ_OP_WRITE
, 0,
1890 rdev_dec_pending(rrdev
, rrdev
->mddev
);
1895 ctx
->data_parity_stripes
++;
1897 r5l_recovery_reset_stripe(sh
);
1900 static struct stripe_head
*
1901 r5c_recovery_alloc_stripe(struct r5conf
*conf
,
1902 sector_t stripe_sect
)
1904 struct stripe_head
*sh
;
1906 sh
= raid5_get_active_stripe(conf
, stripe_sect
, 0, 1, 0);
1908 return NULL
; /* no more stripe available */
1910 r5l_recovery_reset_stripe(sh
);
1915 static struct stripe_head
*
1916 r5c_recovery_lookup_stripe(struct list_head
*list
, sector_t sect
)
1918 struct stripe_head
*sh
;
1920 list_for_each_entry(sh
, list
, lru
)
1921 if (sh
->sector
== sect
)
1927 r5c_recovery_drop_stripes(struct list_head
*cached_stripe_list
,
1928 struct r5l_recovery_ctx
*ctx
)
1930 struct stripe_head
*sh
, *next
;
1932 list_for_each_entry_safe(sh
, next
, cached_stripe_list
, lru
) {
1933 r5l_recovery_reset_stripe(sh
);
1934 list_del_init(&sh
->lru
);
1935 raid5_release_stripe(sh
);
1940 r5c_recovery_replay_stripes(struct list_head
*cached_stripe_list
,
1941 struct r5l_recovery_ctx
*ctx
)
1943 struct stripe_head
*sh
, *next
;
1945 list_for_each_entry_safe(sh
, next
, cached_stripe_list
, lru
)
1946 if (!test_bit(STRIPE_R5C_CACHING
, &sh
->state
)) {
1947 r5l_recovery_replay_one_stripe(sh
->raid_conf
, sh
, ctx
);
1948 list_del_init(&sh
->lru
);
1949 raid5_release_stripe(sh
);
1953 /* if matches return 0; otherwise return -EINVAL */
1955 r5l_recovery_verify_data_checksum(struct r5l_log
*log
,
1956 struct r5l_recovery_ctx
*ctx
,
1958 sector_t log_offset
, __le32 log_checksum
)
1963 r5l_recovery_read_page(log
, ctx
, page
, log_offset
);
1964 addr
= kmap_atomic(page
);
1965 checksum
= crc32c_le(log
->uuid_checksum
, addr
, PAGE_SIZE
);
1966 kunmap_atomic(addr
);
1967 return (le32_to_cpu(log_checksum
) == checksum
) ? 0 : -EINVAL
;
1971 * before loading data to stripe cache, we need verify checksum for all data,
1972 * if there is mismatch for any data page, we drop all data in the mata block
1975 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log
*log
,
1976 struct r5l_recovery_ctx
*ctx
)
1978 struct mddev
*mddev
= log
->rdev
->mddev
;
1979 struct r5conf
*conf
= mddev
->private;
1980 struct r5l_meta_block
*mb
= page_address(ctx
->meta_page
);
1981 sector_t mb_offset
= sizeof(struct r5l_meta_block
);
1982 sector_t log_offset
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
1984 struct r5l_payload_data_parity
*payload
;
1985 struct r5l_payload_flush
*payload_flush
;
1987 page
= alloc_page(GFP_KERNEL
);
1991 while (mb_offset
< le32_to_cpu(mb
->meta_size
)) {
1992 payload
= (void *)mb
+ mb_offset
;
1993 payload_flush
= (void *)mb
+ mb_offset
;
1995 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) {
1996 if (r5l_recovery_verify_data_checksum(
1997 log
, ctx
, page
, log_offset
,
1998 payload
->checksum
[0]) < 0)
2000 } else if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_PARITY
) {
2001 if (r5l_recovery_verify_data_checksum(
2002 log
, ctx
, page
, log_offset
,
2003 payload
->checksum
[0]) < 0)
2005 if (conf
->max_degraded
== 2 && /* q for RAID 6 */
2006 r5l_recovery_verify_data_checksum(
2008 r5l_ring_add(log
, log_offset
,
2010 payload
->checksum
[1]) < 0)
2012 } else if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_FLUSH
) {
2013 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2014 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2017 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_FLUSH
) {
2018 mb_offset
+= sizeof(struct r5l_payload_flush
) +
2019 le32_to_cpu(payload_flush
->size
);
2021 /* DATA or PARITY payload */
2022 log_offset
= r5l_ring_add(log
, log_offset
,
2023 le32_to_cpu(payload
->size
));
2024 mb_offset
+= sizeof(struct r5l_payload_data_parity
) +
2026 (le32_to_cpu(payload
->size
) >> (PAGE_SHIFT
- 9));
2040 * Analyze all data/parity pages in one meta block
2043 * -EINVAL for unknown playload type
2044 * -EAGAIN for checksum mismatch of data page
2045 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2048 r5c_recovery_analyze_meta_block(struct r5l_log
*log
,
2049 struct r5l_recovery_ctx
*ctx
,
2050 struct list_head
*cached_stripe_list
)
2052 struct mddev
*mddev
= log
->rdev
->mddev
;
2053 struct r5conf
*conf
= mddev
->private;
2054 struct r5l_meta_block
*mb
;
2055 struct r5l_payload_data_parity
*payload
;
2056 struct r5l_payload_flush
*payload_flush
;
2058 sector_t log_offset
;
2059 sector_t stripe_sect
;
2060 struct stripe_head
*sh
;
2064 * for mismatch in data blocks, we will drop all data in this mb, but
2065 * we will still read next mb for other data with FLUSH flag, as
2066 * io_unit could finish out of order.
2068 ret
= r5l_recovery_verify_data_checksum_for_mb(log
, ctx
);
2072 return ret
; /* -ENOMEM duo to alloc_page() failed */
2074 mb
= page_address(ctx
->meta_page
);
2075 mb_offset
= sizeof(struct r5l_meta_block
);
2076 log_offset
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2078 while (mb_offset
< le32_to_cpu(mb
->meta_size
)) {
2081 payload
= (void *)mb
+ mb_offset
;
2082 payload_flush
= (void *)mb
+ mb_offset
;
2084 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_FLUSH
) {
2087 count
= le32_to_cpu(payload_flush
->size
) / sizeof(__le64
);
2088 for (i
= 0; i
< count
; ++i
) {
2089 stripe_sect
= le64_to_cpu(payload_flush
->flush_stripes
[i
]);
2090 sh
= r5c_recovery_lookup_stripe(cached_stripe_list
,
2093 WARN_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2094 r5l_recovery_reset_stripe(sh
);
2095 list_del_init(&sh
->lru
);
2096 raid5_release_stripe(sh
);
2100 mb_offset
+= sizeof(struct r5l_payload_flush
) +
2101 le32_to_cpu(payload_flush
->size
);
2105 /* DATA or PARITY payload */
2106 stripe_sect
= (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) ?
2107 raid5_compute_sector(
2108 conf
, le64_to_cpu(payload
->location
), 0, &dd
,
2110 : le64_to_cpu(payload
->location
);
2112 sh
= r5c_recovery_lookup_stripe(cached_stripe_list
,
2116 sh
= r5c_recovery_alloc_stripe(conf
, stripe_sect
);
2118 * cannot get stripe from raid5_get_active_stripe
2119 * try replay some stripes
2122 r5c_recovery_replay_stripes(
2123 cached_stripe_list
, ctx
);
2124 sh
= r5c_recovery_alloc_stripe(
2128 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2130 conf
->min_nr_stripes
* 2);
2131 raid5_set_cache_size(mddev
,
2132 conf
->min_nr_stripes
* 2);
2133 sh
= r5c_recovery_alloc_stripe(conf
,
2137 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2141 list_add_tail(&sh
->lru
, cached_stripe_list
);
2144 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) {
2145 if (!test_bit(STRIPE_R5C_CACHING
, &sh
->state
) &&
2146 test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
)) {
2147 r5l_recovery_replay_one_stripe(conf
, sh
, ctx
);
2148 list_move_tail(&sh
->lru
, cached_stripe_list
);
2150 r5l_recovery_load_data(log
, sh
, ctx
, payload
,
2152 } else if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_PARITY
)
2153 r5l_recovery_load_parity(log
, sh
, ctx
, payload
,
2158 log_offset
= r5l_ring_add(log
, log_offset
,
2159 le32_to_cpu(payload
->size
));
2161 mb_offset
+= sizeof(struct r5l_payload_data_parity
) +
2163 (le32_to_cpu(payload
->size
) >> (PAGE_SHIFT
- 9));
2170 * Load the stripe into cache. The stripe will be written out later by
2171 * the stripe cache state machine.
2173 static void r5c_recovery_load_one_stripe(struct r5l_log
*log
,
2174 struct stripe_head
*sh
)
2179 for (i
= sh
->disks
; i
--; ) {
2181 if (test_and_clear_bit(R5_Wantwrite
, &dev
->flags
)) {
2182 set_bit(R5_InJournal
, &dev
->flags
);
2183 set_bit(R5_UPTODATE
, &dev
->flags
);
2189 * Scan through the log for all to-be-flushed data
2191 * For stripes with data and parity, namely Data-Parity stripe
2192 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2194 * For stripes with only data, namely Data-Only stripe
2195 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2197 * For a stripe, if we see data after parity, we should discard all previous
2198 * data and parity for this stripe, as these data are already flushed to
2201 * At the end of the scan, we return the new journal_tail, which points to
2202 * first data-only stripe on the journal device, or next invalid meta block.
2204 static int r5c_recovery_flush_log(struct r5l_log
*log
,
2205 struct r5l_recovery_ctx
*ctx
)
2207 struct stripe_head
*sh
;
2210 /* scan through the log */
2212 if (r5l_recovery_read_meta_block(log
, ctx
))
2215 ret
= r5c_recovery_analyze_meta_block(log
, ctx
,
2218 * -EAGAIN means mismatch in data block, in this case, we still
2219 * try scan the next metablock
2221 if (ret
&& ret
!= -EAGAIN
)
2222 break; /* ret == -EINVAL or -ENOMEM */
2224 ctx
->pos
= r5l_ring_add(log
, ctx
->pos
, ctx
->meta_total_blocks
);
2227 if (ret
== -ENOMEM
) {
2228 r5c_recovery_drop_stripes(&ctx
->cached_list
, ctx
);
2232 /* replay data-parity stripes */
2233 r5c_recovery_replay_stripes(&ctx
->cached_list
, ctx
);
2235 /* load data-only stripes to stripe cache */
2236 list_for_each_entry(sh
, &ctx
->cached_list
, lru
) {
2237 WARN_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2238 r5c_recovery_load_one_stripe(log
, sh
);
2239 ctx
->data_only_stripes
++;
2246 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2247 * log will start here. but we can't let superblock point to last valid
2248 * meta block. The log might looks like:
2249 * | meta 1| meta 2| meta 3|
2250 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2251 * superblock points to meta 1, we write a new valid meta 2n. if crash
2252 * happens again, new recovery will start from meta 1. Since meta 2n is
2253 * valid now, recovery will think meta 3 is valid, which is wrong.
2254 * The solution is we create a new meta in meta2 with its seq == meta
2255 * 1's seq + 10000 and let superblock points to meta2. The same recovery
2256 * will not think meta 3 is a valid meta, because its seq doesn't match
2260 * Before recovery, the log looks like the following
2262 * ---------------------------------------------
2263 * | valid log | invalid log |
2264 * ---------------------------------------------
2266 * |- log->last_checkpoint
2267 * |- log->last_cp_seq
2269 * Now we scan through the log until we see invalid entry
2271 * ---------------------------------------------
2272 * | valid log | invalid log |
2273 * ---------------------------------------------
2275 * |- log->last_checkpoint |- ctx->pos
2276 * |- log->last_cp_seq |- ctx->seq
2278 * From this point, we need to increase seq number by 10 to avoid
2279 * confusing next recovery.
2281 * ---------------------------------------------
2282 * | valid log | invalid log |
2283 * ---------------------------------------------
2285 * |- log->last_checkpoint |- ctx->pos+1
2286 * |- log->last_cp_seq |- ctx->seq+10001
2288 * However, it is not safe to start the state machine yet, because data only
2289 * parities are not yet secured in RAID. To save these data only parities, we
2290 * rewrite them from seq+11.
2292 * -----------------------------------------------------------------
2293 * | valid log | data only stripes | invalid log |
2294 * -----------------------------------------------------------------
2296 * |- log->last_checkpoint |- ctx->pos+n
2297 * |- log->last_cp_seq |- ctx->seq+10000+n
2299 * If failure happens again during this process, the recovery can safe start
2300 * again from log->last_checkpoint.
2302 * Once data only stripes are rewritten to journal, we move log_tail
2304 * -----------------------------------------------------------------
2305 * | old log | data only stripes | invalid log |
2306 * -----------------------------------------------------------------
2308 * |- log->last_checkpoint |- ctx->pos+n
2309 * |- log->last_cp_seq |- ctx->seq+10000+n
2311 * Then we can safely start the state machine. If failure happens from this
2312 * point on, the recovery will start from new log->last_checkpoint.
2315 r5c_recovery_rewrite_data_only_stripes(struct r5l_log
*log
,
2316 struct r5l_recovery_ctx
*ctx
)
2318 struct stripe_head
*sh
;
2319 struct mddev
*mddev
= log
->rdev
->mddev
;
2321 sector_t next_checkpoint
= MaxSector
;
2323 page
= alloc_page(GFP_KERNEL
);
2325 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2330 WARN_ON(list_empty(&ctx
->cached_list
));
2332 list_for_each_entry(sh
, &ctx
->cached_list
, lru
) {
2333 struct r5l_meta_block
*mb
;
2338 WARN_ON(!test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2339 r5l_recovery_create_empty_meta_block(log
, page
,
2340 ctx
->pos
, ctx
->seq
);
2341 mb
= page_address(page
);
2342 offset
= le32_to_cpu(mb
->meta_size
);
2343 write_pos
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2345 for (i
= sh
->disks
; i
--; ) {
2346 struct r5dev
*dev
= &sh
->dev
[i
];
2347 struct r5l_payload_data_parity
*payload
;
2350 if (test_bit(R5_InJournal
, &dev
->flags
)) {
2351 payload
= (void *)mb
+ offset
;
2352 payload
->header
.type
= cpu_to_le16(
2353 R5LOG_PAYLOAD_DATA
);
2354 payload
->size
= cpu_to_le32(BLOCK_SECTORS
);
2355 payload
->location
= cpu_to_le64(
2356 raid5_compute_blocknr(sh
, i
, 0));
2357 addr
= kmap_atomic(dev
->page
);
2358 payload
->checksum
[0] = cpu_to_le32(
2359 crc32c_le(log
->uuid_checksum
, addr
,
2361 kunmap_atomic(addr
);
2362 sync_page_io(log
->rdev
, write_pos
, PAGE_SIZE
,
2363 dev
->page
, REQ_OP_WRITE
, 0, false);
2364 write_pos
= r5l_ring_add(log
, write_pos
,
2366 offset
+= sizeof(__le32
) +
2367 sizeof(struct r5l_payload_data_parity
);
2371 mb
->meta_size
= cpu_to_le32(offset
);
2372 mb
->checksum
= cpu_to_le32(crc32c_le(log
->uuid_checksum
,
2374 sync_page_io(log
->rdev
, ctx
->pos
, PAGE_SIZE
, page
,
2375 REQ_OP_WRITE
, REQ_FUA
, false);
2376 sh
->log_start
= ctx
->pos
;
2377 list_add_tail(&sh
->r5c
, &log
->stripe_in_journal_list
);
2378 atomic_inc(&log
->stripe_in_journal_count
);
2379 ctx
->pos
= write_pos
;
2381 next_checkpoint
= sh
->log_start
;
2383 log
->next_checkpoint
= next_checkpoint
;
2388 static void r5c_recovery_flush_data_only_stripes(struct r5l_log
*log
,
2389 struct r5l_recovery_ctx
*ctx
)
2391 struct mddev
*mddev
= log
->rdev
->mddev
;
2392 struct r5conf
*conf
= mddev
->private;
2393 struct stripe_head
*sh
, *next
;
2395 if (ctx
->data_only_stripes
== 0)
2398 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_BACK
;
2400 list_for_each_entry_safe(sh
, next
, &ctx
->cached_list
, lru
) {
2401 r5c_make_stripe_write_out(sh
);
2402 set_bit(STRIPE_HANDLE
, &sh
->state
);
2403 list_del_init(&sh
->lru
);
2404 raid5_release_stripe(sh
);
2407 md_wakeup_thread(conf
->mddev
->thread
);
2408 /* reuse conf->wait_for_quiescent in recovery */
2409 wait_event(conf
->wait_for_quiescent
,
2410 atomic_read(&conf
->active_stripes
) == 0);
2412 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_THROUGH
;
2415 static int r5l_recovery_log(struct r5l_log
*log
)
2417 struct mddev
*mddev
= log
->rdev
->mddev
;
2418 struct r5l_recovery_ctx
*ctx
;
2422 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
2426 ctx
->pos
= log
->last_checkpoint
;
2427 ctx
->seq
= log
->last_cp_seq
;
2428 INIT_LIST_HEAD(&ctx
->cached_list
);
2429 ctx
->meta_page
= alloc_page(GFP_KERNEL
);
2431 if (!ctx
->meta_page
) {
2436 if (r5l_recovery_allocate_ra_pool(log
, ctx
) != 0) {
2441 ret
= r5c_recovery_flush_log(log
, ctx
);
2449 if ((ctx
->data_only_stripes
== 0) && (ctx
->data_parity_stripes
== 0))
2450 pr_debug("md/raid:%s: starting from clean shutdown\n",
2453 pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2454 mdname(mddev
), ctx
->data_only_stripes
,
2455 ctx
->data_parity_stripes
);
2457 if (ctx
->data_only_stripes
== 0) {
2458 log
->next_checkpoint
= ctx
->pos
;
2459 r5l_log_write_empty_meta_block(log
, ctx
->pos
, ctx
->seq
++);
2460 ctx
->pos
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
2461 } else if (r5c_recovery_rewrite_data_only_stripes(log
, ctx
)) {
2462 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2468 log
->log_start
= ctx
->pos
;
2469 log
->seq
= ctx
->seq
;
2470 log
->last_checkpoint
= pos
;
2471 r5l_write_super(log
, pos
);
2473 r5c_recovery_flush_data_only_stripes(log
, ctx
);
2476 r5l_recovery_free_ra_pool(log
, ctx
);
2478 __free_page(ctx
->meta_page
);
2484 static void r5l_write_super(struct r5l_log
*log
, sector_t cp
)
2486 struct mddev
*mddev
= log
->rdev
->mddev
;
2488 log
->rdev
->journal_tail
= cp
;
2489 set_bit(MD_SB_CHANGE_DEVS
, &mddev
->sb_flags
);
2492 static ssize_t
r5c_journal_mode_show(struct mddev
*mddev
, char *page
)
2494 struct r5conf
*conf
= mddev
->private;
2500 switch (conf
->log
->r5c_journal_mode
) {
2501 case R5C_JOURNAL_MODE_WRITE_THROUGH
:
2503 page
, PAGE_SIZE
, "[%s] %s\n",
2504 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_THROUGH
],
2505 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_BACK
]);
2507 case R5C_JOURNAL_MODE_WRITE_BACK
:
2509 page
, PAGE_SIZE
, "%s [%s]\n",
2510 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_THROUGH
],
2511 r5c_journal_mode_str
[R5C_JOURNAL_MODE_WRITE_BACK
]);
2520 * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2522 * @mode as defined in 'enum r5c_journal_mode'.
2525 int r5c_journal_mode_set(struct mddev
*mddev
, int mode
)
2527 struct r5conf
*conf
= mddev
->private;
2528 struct r5l_log
*log
= conf
->log
;
2533 if (mode
< R5C_JOURNAL_MODE_WRITE_THROUGH
||
2534 mode
> R5C_JOURNAL_MODE_WRITE_BACK
)
2537 if (raid5_calc_degraded(conf
) > 0 &&
2538 mode
== R5C_JOURNAL_MODE_WRITE_BACK
)
2541 mddev_suspend(mddev
);
2542 conf
->log
->r5c_journal_mode
= mode
;
2543 mddev_resume(mddev
);
2545 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2546 mdname(mddev
), mode
, r5c_journal_mode_str
[mode
]);
2549 EXPORT_SYMBOL(r5c_journal_mode_set
);
2551 static ssize_t
r5c_journal_mode_store(struct mddev
*mddev
,
2552 const char *page
, size_t length
)
2554 int mode
= ARRAY_SIZE(r5c_journal_mode_str
);
2555 size_t len
= length
;
2560 if (page
[len
- 1] == '\n')
2564 if (strlen(r5c_journal_mode_str
[mode
]) == len
&&
2565 !strncmp(page
, r5c_journal_mode_str
[mode
], len
))
2568 return r5c_journal_mode_set(mddev
, mode
) ?: length
;
2571 struct md_sysfs_entry
2572 r5c_journal_mode
= __ATTR(journal_mode
, 0644,
2573 r5c_journal_mode_show
, r5c_journal_mode_store
);
2576 * Try handle write operation in caching phase. This function should only
2577 * be called in write-back mode.
2579 * If all outstanding writes can be handled in caching phase, returns 0
2580 * If writes requires write-out phase, call r5c_make_stripe_write_out()
2581 * and returns -EAGAIN
2583 int r5c_try_caching_write(struct r5conf
*conf
,
2584 struct stripe_head
*sh
,
2585 struct stripe_head_state
*s
,
2588 struct r5l_log
*log
= conf
->log
;
2593 sector_t tree_index
;
2597 BUG_ON(!r5c_is_writeback(log
));
2599 if (!test_bit(STRIPE_R5C_CACHING
, &sh
->state
)) {
2601 * There are two different scenarios here:
2602 * 1. The stripe has some data cached, and it is sent to
2603 * write-out phase for reclaim
2604 * 2. The stripe is clean, and this is the first write
2606 * For 1, return -EAGAIN, so we continue with
2607 * handle_stripe_dirtying().
2609 * For 2, set STRIPE_R5C_CACHING and continue with caching
2613 /* case 1: anything injournal or anything in written */
2614 if (s
->injournal
> 0 || s
->written
> 0)
2617 set_bit(STRIPE_R5C_CACHING
, &sh
->state
);
2621 * When run in degraded mode, array is set to write-through mode.
2622 * This check helps drain pending write safely in the transition to
2623 * write-through mode.
2626 r5c_make_stripe_write_out(sh
);
2630 for (i
= disks
; i
--; ) {
2632 /* if non-overwrite, use writing-out phase */
2633 if (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2634 !test_bit(R5_InJournal
, &dev
->flags
)) {
2635 r5c_make_stripe_write_out(sh
);
2640 /* if the stripe is not counted in big_stripe_tree, add it now */
2641 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
) &&
2642 !test_bit(STRIPE_R5C_FULL_STRIPE
, &sh
->state
)) {
2643 tree_index
= r5c_tree_index(conf
, sh
->sector
);
2644 spin_lock(&log
->tree_lock
);
2645 pslot
= radix_tree_lookup_slot(&log
->big_stripe_tree
,
2648 refcount
= (uintptr_t)radix_tree_deref_slot_protected(
2649 pslot
, &log
->tree_lock
) >>
2650 R5C_RADIX_COUNT_SHIFT
;
2651 radix_tree_replace_slot(
2652 &log
->big_stripe_tree
, pslot
,
2653 (void *)((refcount
+ 1) << R5C_RADIX_COUNT_SHIFT
));
2656 * this radix_tree_insert can fail safely, so no
2657 * need to call radix_tree_preload()
2659 ret
= radix_tree_insert(
2660 &log
->big_stripe_tree
, tree_index
,
2661 (void *)(1 << R5C_RADIX_COUNT_SHIFT
));
2663 spin_unlock(&log
->tree_lock
);
2664 r5c_make_stripe_write_out(sh
);
2668 spin_unlock(&log
->tree_lock
);
2671 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2672 * counted in the radix tree
2674 set_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
);
2675 atomic_inc(&conf
->r5c_cached_partial_stripes
);
2678 for (i
= disks
; i
--; ) {
2681 set_bit(R5_Wantwrite
, &dev
->flags
);
2682 set_bit(R5_Wantdrain
, &dev
->flags
);
2683 set_bit(R5_LOCKED
, &dev
->flags
);
2689 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2691 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2692 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2693 * r5c_handle_data_cached()
2695 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
2702 * free extra pages (orig_page) we allocated for prexor
2704 void r5c_release_extra_page(struct stripe_head
*sh
)
2706 struct r5conf
*conf
= sh
->raid_conf
;
2708 bool using_disk_info_extra_page
;
2710 using_disk_info_extra_page
=
2711 sh
->dev
[0].orig_page
== conf
->disks
[0].extra_page
;
2713 for (i
= sh
->disks
; i
--; )
2714 if (sh
->dev
[i
].page
!= sh
->dev
[i
].orig_page
) {
2715 struct page
*p
= sh
->dev
[i
].orig_page
;
2717 sh
->dev
[i
].orig_page
= sh
->dev
[i
].page
;
2718 clear_bit(R5_OrigPageUPTDODATE
, &sh
->dev
[i
].flags
);
2720 if (!using_disk_info_extra_page
)
2724 if (using_disk_info_extra_page
) {
2725 clear_bit(R5C_EXTRA_PAGE_IN_USE
, &conf
->cache_state
);
2726 md_wakeup_thread(conf
->mddev
->thread
);
2730 void r5c_use_extra_page(struct stripe_head
*sh
)
2732 struct r5conf
*conf
= sh
->raid_conf
;
2736 for (i
= sh
->disks
; i
--; ) {
2738 if (dev
->orig_page
!= dev
->page
)
2739 put_page(dev
->orig_page
);
2740 dev
->orig_page
= conf
->disks
[i
].extra_page
;
2745 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2746 * stripe is committed to RAID disks.
2748 void r5c_finish_stripe_write_out(struct r5conf
*conf
,
2749 struct stripe_head
*sh
,
2750 struct stripe_head_state
*s
)
2752 struct r5l_log
*log
= conf
->log
;
2755 sector_t tree_index
;
2759 if (!log
|| !test_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
))
2762 WARN_ON(test_bit(STRIPE_R5C_CACHING
, &sh
->state
));
2763 clear_bit(R5_InJournal
, &sh
->dev
[sh
->pd_idx
].flags
);
2765 if (log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_THROUGH
)
2768 for (i
= sh
->disks
; i
--; ) {
2769 clear_bit(R5_InJournal
, &sh
->dev
[i
].flags
);
2770 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2775 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2776 * We updated R5_InJournal, so we also update s->injournal.
2780 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2781 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2782 md_wakeup_thread(conf
->mddev
->thread
);
2785 wake_up(&conf
->wait_for_overlap
);
2787 spin_lock_irq(&log
->stripe_in_journal_lock
);
2788 list_del_init(&sh
->r5c
);
2789 spin_unlock_irq(&log
->stripe_in_journal_lock
);
2790 sh
->log_start
= MaxSector
;
2792 atomic_dec(&log
->stripe_in_journal_count
);
2793 r5c_update_log_state(log
);
2795 /* stop counting this stripe in big_stripe_tree */
2796 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
) ||
2797 test_bit(STRIPE_R5C_FULL_STRIPE
, &sh
->state
)) {
2798 tree_index
= r5c_tree_index(conf
, sh
->sector
);
2799 spin_lock(&log
->tree_lock
);
2800 pslot
= radix_tree_lookup_slot(&log
->big_stripe_tree
,
2802 BUG_ON(pslot
== NULL
);
2803 refcount
= (uintptr_t)radix_tree_deref_slot_protected(
2804 pslot
, &log
->tree_lock
) >>
2805 R5C_RADIX_COUNT_SHIFT
;
2807 radix_tree_delete(&log
->big_stripe_tree
, tree_index
);
2809 radix_tree_replace_slot(
2810 &log
->big_stripe_tree
, pslot
,
2811 (void *)((refcount
- 1) << R5C_RADIX_COUNT_SHIFT
));
2812 spin_unlock(&log
->tree_lock
);
2815 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE
, &sh
->state
)) {
2816 BUG_ON(atomic_read(&conf
->r5c_cached_partial_stripes
) == 0);
2817 atomic_dec(&conf
->r5c_flushing_partial_stripes
);
2818 atomic_dec(&conf
->r5c_cached_partial_stripes
);
2821 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE
, &sh
->state
)) {
2822 BUG_ON(atomic_read(&conf
->r5c_cached_full_stripes
) == 0);
2823 atomic_dec(&conf
->r5c_flushing_full_stripes
);
2824 atomic_dec(&conf
->r5c_cached_full_stripes
);
2827 r5l_append_flush_payload(log
, sh
->sector
);
2830 int r5c_cache_data(struct r5l_log
*log
, struct stripe_head
*sh
)
2832 struct r5conf
*conf
= sh
->raid_conf
;
2840 for (i
= 0; i
< sh
->disks
; i
++) {
2843 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
2845 addr
= kmap_atomic(sh
->dev
[i
].page
);
2846 sh
->dev
[i
].log_checksum
= crc32c_le(log
->uuid_checksum
,
2848 kunmap_atomic(addr
);
2851 WARN_ON(pages
== 0);
2854 * The stripe must enter state machine again to call endio, so
2857 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2858 atomic_inc(&sh
->count
);
2860 mutex_lock(&log
->io_mutex
);
2862 reserve
= (1 + pages
) << (PAGE_SHIFT
- 9);
2864 if (test_bit(R5C_LOG_CRITICAL
, &conf
->cache_state
) &&
2865 sh
->log_start
== MaxSector
)
2866 r5l_add_no_space_stripe(log
, sh
);
2867 else if (!r5l_has_free_space(log
, reserve
)) {
2868 if (sh
->log_start
== log
->last_checkpoint
)
2871 r5l_add_no_space_stripe(log
, sh
);
2873 ret
= r5l_log_stripe(log
, sh
, pages
, 0);
2875 spin_lock_irq(&log
->io_list_lock
);
2876 list_add_tail(&sh
->log_list
, &log
->no_mem_stripes
);
2877 spin_unlock_irq(&log
->io_list_lock
);
2881 mutex_unlock(&log
->io_mutex
);
2885 /* check whether this big stripe is in write back cache. */
2886 bool r5c_big_stripe_cached(struct r5conf
*conf
, sector_t sect
)
2888 struct r5l_log
*log
= conf
->log
;
2889 sector_t tree_index
;
2895 WARN_ON_ONCE(!rcu_read_lock_held());
2896 tree_index
= r5c_tree_index(conf
, sect
);
2897 slot
= radix_tree_lookup(&log
->big_stripe_tree
, tree_index
);
2898 return slot
!= NULL
;
2901 static int r5l_load_log(struct r5l_log
*log
)
2903 struct md_rdev
*rdev
= log
->rdev
;
2905 struct r5l_meta_block
*mb
;
2906 sector_t cp
= log
->rdev
->journal_tail
;
2907 u32 stored_crc
, expected_crc
;
2908 bool create_super
= false;
2911 /* Make sure it's valid */
2912 if (cp
>= rdev
->sectors
|| round_down(cp
, BLOCK_SECTORS
) != cp
)
2914 page
= alloc_page(GFP_KERNEL
);
2918 if (!sync_page_io(rdev
, cp
, PAGE_SIZE
, page
, REQ_OP_READ
, 0, false)) {
2922 mb
= page_address(page
);
2924 if (le32_to_cpu(mb
->magic
) != R5LOG_MAGIC
||
2925 mb
->version
!= R5LOG_VERSION
) {
2926 create_super
= true;
2929 stored_crc
= le32_to_cpu(mb
->checksum
);
2931 expected_crc
= crc32c_le(log
->uuid_checksum
, mb
, PAGE_SIZE
);
2932 if (stored_crc
!= expected_crc
) {
2933 create_super
= true;
2936 if (le64_to_cpu(mb
->position
) != cp
) {
2937 create_super
= true;
2942 log
->last_cp_seq
= prandom_u32();
2944 r5l_log_write_empty_meta_block(log
, cp
, log
->last_cp_seq
);
2946 * Make sure super points to correct address. Log might have
2947 * data very soon. If super hasn't correct log tail address,
2948 * recovery can't find the log
2950 r5l_write_super(log
, cp
);
2952 log
->last_cp_seq
= le64_to_cpu(mb
->seq
);
2954 log
->device_size
= round_down(rdev
->sectors
, BLOCK_SECTORS
);
2955 log
->max_free_space
= log
->device_size
>> RECLAIM_MAX_FREE_SPACE_SHIFT
;
2956 if (log
->max_free_space
> RECLAIM_MAX_FREE_SPACE
)
2957 log
->max_free_space
= RECLAIM_MAX_FREE_SPACE
;
2958 log
->last_checkpoint
= cp
;
2963 log
->log_start
= r5l_ring_add(log
, cp
, BLOCK_SECTORS
);
2964 log
->seq
= log
->last_cp_seq
+ 1;
2965 log
->next_checkpoint
= cp
;
2967 ret
= r5l_recovery_log(log
);
2969 r5c_update_log_state(log
);
2976 void r5c_update_on_rdev_error(struct mddev
*mddev
)
2978 struct r5conf
*conf
= mddev
->private;
2979 struct r5l_log
*log
= conf
->log
;
2984 if (raid5_calc_degraded(conf
) > 0 &&
2985 conf
->log
->r5c_journal_mode
== R5C_JOURNAL_MODE_WRITE_BACK
)
2986 schedule_work(&log
->disable_writeback_work
);
2989 int r5l_init_log(struct r5conf
*conf
, struct md_rdev
*rdev
)
2991 struct request_queue
*q
= bdev_get_queue(rdev
->bdev
);
2992 struct r5l_log
*log
;
2993 char b
[BDEVNAME_SIZE
];
2995 pr_debug("md/raid:%s: using device %s as journal\n",
2996 mdname(conf
->mddev
), bdevname(rdev
->bdev
, b
));
2998 if (PAGE_SIZE
!= 4096)
3002 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3003 * raid_disks r5l_payload_data_parity.
3005 * Write journal and cache does not work for very big array
3006 * (raid_disks > 203)
3008 if (sizeof(struct r5l_meta_block
) +
3009 ((sizeof(struct r5l_payload_data_parity
) + sizeof(__le32
)) *
3010 conf
->raid_disks
) > PAGE_SIZE
) {
3011 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3012 mdname(conf
->mddev
), conf
->raid_disks
);
3016 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
3021 log
->need_cache_flush
= test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
) != 0;
3023 log
->uuid_checksum
= crc32c_le(~0, rdev
->mddev
->uuid
,
3024 sizeof(rdev
->mddev
->uuid
));
3026 mutex_init(&log
->io_mutex
);
3028 spin_lock_init(&log
->io_list_lock
);
3029 INIT_LIST_HEAD(&log
->running_ios
);
3030 INIT_LIST_HEAD(&log
->io_end_ios
);
3031 INIT_LIST_HEAD(&log
->flushing_ios
);
3032 INIT_LIST_HEAD(&log
->finished_ios
);
3033 bio_init(&log
->flush_bio
, NULL
, 0);
3035 log
->io_kc
= KMEM_CACHE(r5l_io_unit
, 0);
3039 log
->io_pool
= mempool_create_slab_pool(R5L_POOL_SIZE
, log
->io_kc
);
3043 log
->bs
= bioset_create(R5L_POOL_SIZE
, 0);
3047 log
->meta_pool
= mempool_create_page_pool(R5L_POOL_SIZE
, 0);
3048 if (!log
->meta_pool
)
3051 spin_lock_init(&log
->tree_lock
);
3052 INIT_RADIX_TREE(&log
->big_stripe_tree
, GFP_NOWAIT
| __GFP_NOWARN
);
3054 log
->reclaim_thread
= md_register_thread(r5l_reclaim_thread
,
3055 log
->rdev
->mddev
, "reclaim");
3056 if (!log
->reclaim_thread
)
3057 goto reclaim_thread
;
3058 log
->reclaim_thread
->timeout
= R5C_RECLAIM_WAKEUP_INTERVAL
;
3060 init_waitqueue_head(&log
->iounit_wait
);
3062 INIT_LIST_HEAD(&log
->no_mem_stripes
);
3064 INIT_LIST_HEAD(&log
->no_space_stripes
);
3065 spin_lock_init(&log
->no_space_stripes_lock
);
3067 INIT_WORK(&log
->deferred_io_work
, r5l_submit_io_async
);
3068 INIT_WORK(&log
->disable_writeback_work
, r5c_disable_writeback_async
);
3070 log
->r5c_journal_mode
= R5C_JOURNAL_MODE_WRITE_THROUGH
;
3071 INIT_LIST_HEAD(&log
->stripe_in_journal_list
);
3072 spin_lock_init(&log
->stripe_in_journal_lock
);
3073 atomic_set(&log
->stripe_in_journal_count
, 0);
3075 rcu_assign_pointer(conf
->log
, log
);
3077 if (r5l_load_log(log
))
3080 set_bit(MD_HAS_JOURNAL
, &conf
->mddev
->flags
);
3084 rcu_assign_pointer(conf
->log
, NULL
);
3085 md_unregister_thread(&log
->reclaim_thread
);
3087 mempool_destroy(log
->meta_pool
);
3089 bioset_free(log
->bs
);
3091 mempool_destroy(log
->io_pool
);
3093 kmem_cache_destroy(log
->io_kc
);
3099 void r5l_exit_log(struct r5conf
*conf
)
3101 struct r5l_log
*log
= conf
->log
;
3106 flush_work(&log
->disable_writeback_work
);
3107 md_unregister_thread(&log
->reclaim_thread
);
3108 mempool_destroy(log
->meta_pool
);
3109 bioset_free(log
->bs
);
3110 mempool_destroy(log
->io_pool
);
3111 kmem_cache_destroy(log
->io_kc
);