2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/kernel.h>
15 #include <linux/wait.h>
16 #include <linux/blkdev.h>
17 #include <linux/slab.h>
18 #include <linux/raid/md_p.h>
19 #include <linux/crc32c.h>
20 #include <linux/random.h>
25 * metadata/data stored in disk with 4k size unit (a block) regardless
26 * underneath hardware sector size. only works with PAGE_SIZE == 4096
28 #define BLOCK_SECTORS (8)
31 * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
32 * recovery scans a very long log
34 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
35 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
38 * We only need 2 bios per I/O unit to make progress, but ensure we
39 * have a few more available to not get too tight.
41 #define R5L_POOL_SIZE 4
48 sector_t device_size
; /* log device size, round to
50 sector_t max_free_space
; /* reclaim run if free space is at
53 sector_t last_checkpoint
; /* log tail. where recovery scan
55 u64 last_cp_seq
; /* log tail sequence */
57 sector_t log_start
; /* log head. where new data appends */
58 u64 seq
; /* log head sequence */
60 sector_t next_checkpoint
;
63 struct mutex io_mutex
;
64 struct r5l_io_unit
*current_io
; /* current io_unit accepting new data */
66 spinlock_t io_list_lock
;
67 struct list_head running_ios
; /* io_units which are still running,
68 * and have not yet been completely
69 * written to the log */
70 struct list_head io_end_ios
; /* io_units which have been completely
71 * written to the log but not yet written
73 struct list_head flushing_ios
; /* io_units which are waiting for log
75 struct list_head finished_ios
; /* io_units which settle down in log disk */
78 struct kmem_cache
*io_kc
;
82 struct md_thread
*reclaim_thread
;
83 unsigned long reclaim_target
; /* number of space that need to be
84 * reclaimed. if it's 0, reclaim spaces
85 * used by io_units which are in
86 * IO_UNIT_STRIPE_END state (eg, reclaim
87 * dones't wait for specific io_unit
88 * switching to IO_UNIT_STRIPE_END
90 wait_queue_head_t iounit_wait
;
92 struct list_head no_space_stripes
; /* pending stripes, log has no space */
93 spinlock_t no_space_stripes_lock
;
95 bool need_cache_flush
;
100 * an IO range starts from a meta data block and end at the next meta data
101 * block. The io unit's the meta data block tracks data/parity followed it. io
102 * unit is written to log disk with normal write, as we always flush log disk
103 * first and then start move data to raid disks, there is no requirement to
104 * write io unit with FLUSH/FUA
109 struct page
*meta_page
; /* store meta block */
110 int meta_offset
; /* current offset in meta_page */
112 struct bio
*current_bio
;/* current_bio accepting new data */
114 atomic_t pending_stripe
;/* how many stripes not flushed to raid */
115 u64 seq
; /* seq number of the metablock */
116 sector_t log_start
; /* where the io_unit starts */
117 sector_t log_end
; /* where the io_unit ends */
118 struct list_head log_sibling
; /* log->running_ios */
119 struct list_head stripe_list
; /* stripes added to the io_unit */
125 /* r5l_io_unit state */
126 enum r5l_io_unit_state
{
127 IO_UNIT_RUNNING
= 0, /* accepting new IO */
128 IO_UNIT_IO_START
= 1, /* io_unit bio start writing to log,
129 * don't accepting new bio */
130 IO_UNIT_IO_END
= 2, /* io_unit bio finish writing to log */
131 IO_UNIT_STRIPE_END
= 3, /* stripes data finished writing to raid */
134 static sector_t
r5l_ring_add(struct r5l_log
*log
, sector_t start
, sector_t inc
)
137 if (start
>= log
->device_size
)
138 start
= start
- log
->device_size
;
142 static sector_t
r5l_ring_distance(struct r5l_log
*log
, sector_t start
,
148 return end
+ log
->device_size
- start
;
151 static bool r5l_has_free_space(struct r5l_log
*log
, sector_t size
)
155 used_size
= r5l_ring_distance(log
, log
->last_checkpoint
,
158 return log
->device_size
> used_size
+ size
;
161 static void __r5l_set_io_unit_state(struct r5l_io_unit
*io
,
162 enum r5l_io_unit_state state
)
164 if (WARN_ON(io
->state
>= state
))
169 static void r5l_io_run_stripes(struct r5l_io_unit
*io
)
171 struct stripe_head
*sh
, *next
;
173 list_for_each_entry_safe(sh
, next
, &io
->stripe_list
, log_list
) {
174 list_del_init(&sh
->log_list
);
175 set_bit(STRIPE_HANDLE
, &sh
->state
);
176 raid5_release_stripe(sh
);
180 static void r5l_log_run_stripes(struct r5l_log
*log
)
182 struct r5l_io_unit
*io
, *next
;
184 assert_spin_locked(&log
->io_list_lock
);
186 list_for_each_entry_safe(io
, next
, &log
->running_ios
, log_sibling
) {
187 /* don't change list order */
188 if (io
->state
< IO_UNIT_IO_END
)
191 list_move_tail(&io
->log_sibling
, &log
->finished_ios
);
192 r5l_io_run_stripes(io
);
196 static void r5l_move_to_end_ios(struct r5l_log
*log
)
198 struct r5l_io_unit
*io
, *next
;
200 assert_spin_locked(&log
->io_list_lock
);
202 list_for_each_entry_safe(io
, next
, &log
->running_ios
, log_sibling
) {
203 /* don't change list order */
204 if (io
->state
< IO_UNIT_IO_END
)
206 list_move_tail(&io
->log_sibling
, &log
->io_end_ios
);
210 static void r5l_log_endio(struct bio
*bio
)
212 struct r5l_io_unit
*io
= bio
->bi_private
;
213 struct r5l_log
*log
= io
->log
;
217 md_error(log
->rdev
->mddev
, log
->rdev
);
220 mempool_free(io
->meta_page
, log
->meta_pool
);
222 spin_lock_irqsave(&log
->io_list_lock
, flags
);
223 __r5l_set_io_unit_state(io
, IO_UNIT_IO_END
);
224 if (log
->need_cache_flush
)
225 r5l_move_to_end_ios(log
);
227 r5l_log_run_stripes(log
);
228 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
230 if (log
->need_cache_flush
)
231 md_wakeup_thread(log
->rdev
->mddev
->thread
);
234 static void r5l_submit_current_io(struct r5l_log
*log
)
236 struct r5l_io_unit
*io
= log
->current_io
;
237 struct r5l_meta_block
*block
;
244 block
= page_address(io
->meta_page
);
245 block
->meta_size
= cpu_to_le32(io
->meta_offset
);
246 crc
= crc32c_le(log
->uuid_checksum
, block
, PAGE_SIZE
);
247 block
->checksum
= cpu_to_le32(crc
);
249 log
->current_io
= NULL
;
250 spin_lock_irqsave(&log
->io_list_lock
, flags
);
251 __r5l_set_io_unit_state(io
, IO_UNIT_IO_START
);
252 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
254 submit_bio(WRITE
, io
->current_bio
);
257 static struct bio
*r5l_bio_alloc(struct r5l_log
*log
)
259 struct bio
*bio
= bio_alloc_bioset(GFP_NOIO
, BIO_MAX_PAGES
, log
->bs
);
262 bio
->bi_bdev
= log
->rdev
->bdev
;
263 bio
->bi_iter
.bi_sector
= log
->rdev
->data_offset
+ log
->log_start
;
268 static void r5_reserve_log_entry(struct r5l_log
*log
, struct r5l_io_unit
*io
)
270 log
->log_start
= r5l_ring_add(log
, log
->log_start
, BLOCK_SECTORS
);
273 * If we filled up the log device start from the beginning again,
274 * which will require a new bio.
276 * Note: for this to work properly the log size needs to me a multiple
279 if (log
->log_start
== 0)
280 io
->need_split_bio
= true;
282 io
->log_end
= log
->log_start
;
285 static struct r5l_io_unit
*r5l_new_meta(struct r5l_log
*log
)
287 struct r5l_io_unit
*io
;
288 struct r5l_meta_block
*block
;
290 /* We can't handle memory allocate failure so far */
291 io
= kmem_cache_zalloc(log
->io_kc
, GFP_NOIO
| __GFP_NOFAIL
);
293 INIT_LIST_HEAD(&io
->log_sibling
);
294 INIT_LIST_HEAD(&io
->stripe_list
);
295 io
->state
= IO_UNIT_RUNNING
;
297 io
->meta_page
= mempool_alloc(log
->meta_pool
, GFP_NOIO
);
298 block
= page_address(io
->meta_page
);
300 block
->magic
= cpu_to_le32(R5LOG_MAGIC
);
301 block
->version
= R5LOG_VERSION
;
302 block
->seq
= cpu_to_le64(log
->seq
);
303 block
->position
= cpu_to_le64(log
->log_start
);
305 io
->log_start
= log
->log_start
;
306 io
->meta_offset
= sizeof(struct r5l_meta_block
);
307 io
->seq
= log
->seq
++;
309 io
->current_bio
= r5l_bio_alloc(log
);
310 io
->current_bio
->bi_end_io
= r5l_log_endio
;
311 io
->current_bio
->bi_private
= io
;
312 bio_add_page(io
->current_bio
, io
->meta_page
, PAGE_SIZE
, 0);
314 r5_reserve_log_entry(log
, io
);
316 spin_lock_irq(&log
->io_list_lock
);
317 list_add_tail(&io
->log_sibling
, &log
->running_ios
);
318 spin_unlock_irq(&log
->io_list_lock
);
323 static int r5l_get_meta(struct r5l_log
*log
, unsigned int payload_size
)
325 if (log
->current_io
&&
326 log
->current_io
->meta_offset
+ payload_size
> PAGE_SIZE
)
327 r5l_submit_current_io(log
);
329 if (!log
->current_io
)
330 log
->current_io
= r5l_new_meta(log
);
334 static void r5l_append_payload_meta(struct r5l_log
*log
, u16 type
,
336 u32 checksum1
, u32 checksum2
,
337 bool checksum2_valid
)
339 struct r5l_io_unit
*io
= log
->current_io
;
340 struct r5l_payload_data_parity
*payload
;
342 payload
= page_address(io
->meta_page
) + io
->meta_offset
;
343 payload
->header
.type
= cpu_to_le16(type
);
344 payload
->header
.flags
= cpu_to_le16(0);
345 payload
->size
= cpu_to_le32((1 + !!checksum2_valid
) <<
347 payload
->location
= cpu_to_le64(location
);
348 payload
->checksum
[0] = cpu_to_le32(checksum1
);
350 payload
->checksum
[1] = cpu_to_le32(checksum2
);
352 io
->meta_offset
+= sizeof(struct r5l_payload_data_parity
) +
353 sizeof(__le32
) * (1 + !!checksum2_valid
);
356 static void r5l_append_payload_page(struct r5l_log
*log
, struct page
*page
)
358 struct r5l_io_unit
*io
= log
->current_io
;
360 if (io
->need_split_bio
) {
361 struct bio
*prev
= io
->current_bio
;
363 io
->current_bio
= r5l_bio_alloc(log
);
364 bio_chain(io
->current_bio
, prev
);
366 submit_bio(WRITE
, prev
);
369 if (!bio_add_page(io
->current_bio
, page
, PAGE_SIZE
, 0))
372 r5_reserve_log_entry(log
, io
);
375 static void r5l_log_stripe(struct r5l_log
*log
, struct stripe_head
*sh
,
376 int data_pages
, int parity_pages
)
380 struct r5l_io_unit
*io
;
383 ((sizeof(struct r5l_payload_data_parity
) + sizeof(__le32
))
385 sizeof(struct r5l_payload_data_parity
) +
386 sizeof(__le32
) * parity_pages
;
388 r5l_get_meta(log
, meta_size
);
389 io
= log
->current_io
;
391 for (i
= 0; i
< sh
->disks
; i
++) {
392 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
394 if (i
== sh
->pd_idx
|| i
== sh
->qd_idx
)
396 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_DATA
,
397 raid5_compute_blocknr(sh
, i
, 0),
398 sh
->dev
[i
].log_checksum
, 0, false);
399 r5l_append_payload_page(log
, sh
->dev
[i
].page
);
402 if (sh
->qd_idx
>= 0) {
403 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_PARITY
,
404 sh
->sector
, sh
->dev
[sh
->pd_idx
].log_checksum
,
405 sh
->dev
[sh
->qd_idx
].log_checksum
, true);
406 r5l_append_payload_page(log
, sh
->dev
[sh
->pd_idx
].page
);
407 r5l_append_payload_page(log
, sh
->dev
[sh
->qd_idx
].page
);
409 r5l_append_payload_meta(log
, R5LOG_PAYLOAD_PARITY
,
410 sh
->sector
, sh
->dev
[sh
->pd_idx
].log_checksum
,
412 r5l_append_payload_page(log
, sh
->dev
[sh
->pd_idx
].page
);
415 list_add_tail(&sh
->log_list
, &io
->stripe_list
);
416 atomic_inc(&io
->pending_stripe
);
420 static void r5l_wake_reclaim(struct r5l_log
*log
, sector_t space
);
422 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
423 * data from log to raid disks), so we shouldn't wait for reclaim here
425 int r5l_write_stripe(struct r5l_log
*log
, struct stripe_head
*sh
)
428 int data_pages
, parity_pages
;
435 /* Don't support stripe batch */
436 if (sh
->log_io
|| !test_bit(R5_Wantwrite
, &sh
->dev
[sh
->pd_idx
].flags
) ||
437 test_bit(STRIPE_SYNCING
, &sh
->state
)) {
438 /* the stripe is written to log, we start writing it to raid */
439 clear_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
443 for (i
= 0; i
< sh
->disks
; i
++) {
446 if (!test_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
449 /* checksum is already calculated in last run */
450 if (test_bit(STRIPE_LOG_TRAPPED
, &sh
->state
))
452 addr
= kmap_atomic(sh
->dev
[i
].page
);
453 sh
->dev
[i
].log_checksum
= crc32c_le(log
->uuid_checksum
,
457 parity_pages
= 1 + !!(sh
->qd_idx
>= 0);
458 data_pages
= write_disks
- parity_pages
;
461 ((sizeof(struct r5l_payload_data_parity
) + sizeof(__le32
))
463 sizeof(struct r5l_payload_data_parity
) +
464 sizeof(__le32
) * parity_pages
;
465 /* Doesn't work with very big raid array */
466 if (meta_size
+ sizeof(struct r5l_meta_block
) > PAGE_SIZE
)
469 set_bit(STRIPE_LOG_TRAPPED
, &sh
->state
);
471 * The stripe must enter state machine again to finish the write, so
474 clear_bit(STRIPE_DELAYED
, &sh
->state
);
475 atomic_inc(&sh
->count
);
477 mutex_lock(&log
->io_mutex
);
479 reserve
= (1 + write_disks
) << (PAGE_SHIFT
- 9);
480 if (r5l_has_free_space(log
, reserve
))
481 r5l_log_stripe(log
, sh
, data_pages
, parity_pages
);
483 spin_lock(&log
->no_space_stripes_lock
);
484 list_add_tail(&sh
->log_list
, &log
->no_space_stripes
);
485 spin_unlock(&log
->no_space_stripes_lock
);
487 r5l_wake_reclaim(log
, reserve
);
489 mutex_unlock(&log
->io_mutex
);
494 void r5l_write_stripe_run(struct r5l_log
*log
)
498 mutex_lock(&log
->io_mutex
);
499 r5l_submit_current_io(log
);
500 mutex_unlock(&log
->io_mutex
);
503 int r5l_handle_flush_request(struct r5l_log
*log
, struct bio
*bio
)
508 * we flush log disk cache first, then write stripe data to raid disks.
509 * So if bio is finished, the log disk cache is flushed already. The
510 * recovery guarantees we can recovery the bio from log disk, so we
511 * don't need to flush again
513 if (bio
->bi_iter
.bi_size
== 0) {
517 bio
->bi_rw
&= ~REQ_FLUSH
;
521 /* This will run after log space is reclaimed */
522 static void r5l_run_no_space_stripes(struct r5l_log
*log
)
524 struct stripe_head
*sh
;
526 spin_lock(&log
->no_space_stripes_lock
);
527 while (!list_empty(&log
->no_space_stripes
)) {
528 sh
= list_first_entry(&log
->no_space_stripes
,
529 struct stripe_head
, log_list
);
530 list_del_init(&sh
->log_list
);
531 set_bit(STRIPE_HANDLE
, &sh
->state
);
532 raid5_release_stripe(sh
);
534 spin_unlock(&log
->no_space_stripes_lock
);
537 static sector_t
r5l_reclaimable_space(struct r5l_log
*log
)
539 return r5l_ring_distance(log
, log
->last_checkpoint
,
540 log
->next_checkpoint
);
543 static bool r5l_complete_finished_ios(struct r5l_log
*log
)
545 struct r5l_io_unit
*io
, *next
;
548 assert_spin_locked(&log
->io_list_lock
);
550 list_for_each_entry_safe(io
, next
, &log
->finished_ios
, log_sibling
) {
551 /* don't change list order */
552 if (io
->state
< IO_UNIT_STRIPE_END
)
555 log
->next_checkpoint
= io
->log_start
;
556 log
->next_cp_seq
= io
->seq
;
558 list_del(&io
->log_sibling
);
559 kmem_cache_free(log
->io_kc
, io
);
567 static void __r5l_stripe_write_finished(struct r5l_io_unit
*io
)
569 struct r5l_log
*log
= io
->log
;
572 spin_lock_irqsave(&log
->io_list_lock
, flags
);
573 __r5l_set_io_unit_state(io
, IO_UNIT_STRIPE_END
);
575 if (!r5l_complete_finished_ios(log
)) {
576 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
580 if (r5l_reclaimable_space(log
) > log
->max_free_space
)
581 r5l_wake_reclaim(log
, 0);
583 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
584 wake_up(&log
->iounit_wait
);
587 void r5l_stripe_write_finished(struct stripe_head
*sh
)
589 struct r5l_io_unit
*io
;
594 if (io
&& atomic_dec_and_test(&io
->pending_stripe
))
595 __r5l_stripe_write_finished(io
);
598 static void r5l_log_flush_endio(struct bio
*bio
)
600 struct r5l_log
*log
= container_of(bio
, struct r5l_log
,
603 struct r5l_io_unit
*io
;
606 md_error(log
->rdev
->mddev
, log
->rdev
);
608 spin_lock_irqsave(&log
->io_list_lock
, flags
);
609 list_for_each_entry(io
, &log
->flushing_ios
, log_sibling
)
610 r5l_io_run_stripes(io
);
611 list_splice_tail_init(&log
->flushing_ios
, &log
->finished_ios
);
612 spin_unlock_irqrestore(&log
->io_list_lock
, flags
);
616 * Starting dispatch IO to raid.
617 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
618 * broken meta in the middle of a log causes recovery can't find meta at the
619 * head of log. If operations require meta at the head persistent in log, we
620 * must make sure meta before it persistent in log too. A case is:
622 * stripe data/parity is in log, we start write stripe to raid disks. stripe
623 * data/parity must be persistent in log before we do the write to raid disks.
625 * The solution is we restrictly maintain io_unit list order. In this case, we
626 * only write stripes of an io_unit to raid disks till the io_unit is the first
627 * one whose data/parity is in log.
629 void r5l_flush_stripe_to_raid(struct r5l_log
*log
)
633 if (!log
|| !log
->need_cache_flush
)
636 spin_lock_irq(&log
->io_list_lock
);
637 /* flush bio is running */
638 if (!list_empty(&log
->flushing_ios
)) {
639 spin_unlock_irq(&log
->io_list_lock
);
642 list_splice_tail_init(&log
->io_end_ios
, &log
->flushing_ios
);
643 do_flush
= !list_empty(&log
->flushing_ios
);
644 spin_unlock_irq(&log
->io_list_lock
);
648 bio_reset(&log
->flush_bio
);
649 log
->flush_bio
.bi_bdev
= log
->rdev
->bdev
;
650 log
->flush_bio
.bi_end_io
= r5l_log_flush_endio
;
651 submit_bio(WRITE_FLUSH
, &log
->flush_bio
);
654 static void r5l_write_super(struct r5l_log
*log
, sector_t cp
);
655 static void r5l_write_super_and_discard_space(struct r5l_log
*log
,
658 struct block_device
*bdev
= log
->rdev
->bdev
;
661 r5l_write_super(log
, end
);
663 if (!blk_queue_discard(bdev_get_queue(bdev
)))
666 mddev
= log
->rdev
->mddev
;
668 * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
669 * wait for this thread to finish. This thread waits for
670 * MD_CHANGE_PENDING clear, which is supposed to be done in
671 * md_check_recovery(). md_check_recovery() tries to get
672 * reconfig_mutex. Since r5l_quiesce already holds the mutex,
673 * md_check_recovery() fails, so the PENDING never get cleared. The
674 * in_teardown check workaround this issue.
676 if (!log
->in_teardown
) {
677 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
678 set_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
679 md_wakeup_thread(mddev
->thread
);
680 wait_event(mddev
->sb_wait
,
681 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
) ||
684 * r5l_quiesce could run after in_teardown check and hold
685 * mutex first. Superblock might get updated twice.
687 if (log
->in_teardown
)
688 md_update_sb(mddev
, 1);
690 WARN_ON(!mddev_is_locked(mddev
));
691 md_update_sb(mddev
, 1);
694 /* discard IO error really doesn't matter, ignore it */
695 if (log
->last_checkpoint
< end
) {
696 blkdev_issue_discard(bdev
,
697 log
->last_checkpoint
+ log
->rdev
->data_offset
,
698 end
- log
->last_checkpoint
, GFP_NOIO
, 0);
700 blkdev_issue_discard(bdev
,
701 log
->last_checkpoint
+ log
->rdev
->data_offset
,
702 log
->device_size
- log
->last_checkpoint
,
704 blkdev_issue_discard(bdev
, log
->rdev
->data_offset
, end
,
710 static void r5l_do_reclaim(struct r5l_log
*log
)
712 sector_t reclaim_target
= xchg(&log
->reclaim_target
, 0);
713 sector_t reclaimable
;
714 sector_t next_checkpoint
;
717 spin_lock_irq(&log
->io_list_lock
);
719 * move proper io_unit to reclaim list. We should not change the order.
720 * reclaimable/unreclaimable io_unit can be mixed in the list, we
721 * shouldn't reuse space of an unreclaimable io_unit
724 reclaimable
= r5l_reclaimable_space(log
);
725 if (reclaimable
>= reclaim_target
||
726 (list_empty(&log
->running_ios
) &&
727 list_empty(&log
->io_end_ios
) &&
728 list_empty(&log
->flushing_ios
) &&
729 list_empty(&log
->finished_ios
)))
732 md_wakeup_thread(log
->rdev
->mddev
->thread
);
733 wait_event_lock_irq(log
->iounit_wait
,
734 r5l_reclaimable_space(log
) > reclaimable
,
738 next_checkpoint
= log
->next_checkpoint
;
739 next_cp_seq
= log
->next_cp_seq
;
740 spin_unlock_irq(&log
->io_list_lock
);
742 BUG_ON(reclaimable
< 0);
743 if (reclaimable
== 0)
747 * write_super will flush cache of each raid disk. We must write super
748 * here, because the log area might be reused soon and we don't want to
751 r5l_write_super_and_discard_space(log
, next_checkpoint
);
753 mutex_lock(&log
->io_mutex
);
754 log
->last_checkpoint
= next_checkpoint
;
755 log
->last_cp_seq
= next_cp_seq
;
756 mutex_unlock(&log
->io_mutex
);
758 r5l_run_no_space_stripes(log
);
761 static void r5l_reclaim_thread(struct md_thread
*thread
)
763 struct mddev
*mddev
= thread
->mddev
;
764 struct r5conf
*conf
= mddev
->private;
765 struct r5l_log
*log
= conf
->log
;
772 static void r5l_wake_reclaim(struct r5l_log
*log
, sector_t space
)
774 unsigned long target
;
775 unsigned long new = (unsigned long)space
; /* overflow in theory */
778 target
= log
->reclaim_target
;
781 } while (cmpxchg(&log
->reclaim_target
, target
, new) != target
);
782 md_wakeup_thread(log
->reclaim_thread
);
785 void r5l_quiesce(struct r5l_log
*log
, int state
)
788 if (!log
|| state
== 2)
791 log
->in_teardown
= 0;
792 log
->reclaim_thread
= md_register_thread(r5l_reclaim_thread
,
793 log
->rdev
->mddev
, "reclaim");
794 } else if (state
== 1) {
796 * at this point all stripes are finished, so io_unit is at
797 * least in STRIPE_END state
799 log
->in_teardown
= 1;
800 /* make sure r5l_write_super_and_discard_space exits */
801 mddev
= log
->rdev
->mddev
;
802 wake_up(&mddev
->sb_wait
);
803 r5l_wake_reclaim(log
, -1L);
804 md_unregister_thread(&log
->reclaim_thread
);
809 bool r5l_log_disk_error(struct r5conf
*conf
)
813 /* don't allow write if journal disk is missing */
815 log
= rcu_dereference(conf
->log
);
818 ret
= test_bit(MD_HAS_JOURNAL
, &conf
->mddev
->flags
);
820 ret
= test_bit(Faulty
, &log
->rdev
->flags
);
825 struct r5l_recovery_ctx
{
826 struct page
*meta_page
; /* current meta */
827 sector_t meta_total_blocks
; /* total size of current meta and data */
828 sector_t pos
; /* recovery position */
829 u64 seq
; /* recovery position seq */
832 static int r5l_read_meta_block(struct r5l_log
*log
,
833 struct r5l_recovery_ctx
*ctx
)
835 struct page
*page
= ctx
->meta_page
;
836 struct r5l_meta_block
*mb
;
839 if (!sync_page_io(log
->rdev
, ctx
->pos
, PAGE_SIZE
, page
, READ
, false))
842 mb
= page_address(page
);
843 stored_crc
= le32_to_cpu(mb
->checksum
);
846 if (le32_to_cpu(mb
->magic
) != R5LOG_MAGIC
||
847 le64_to_cpu(mb
->seq
) != ctx
->seq
||
848 mb
->version
!= R5LOG_VERSION
||
849 le64_to_cpu(mb
->position
) != ctx
->pos
)
852 crc
= crc32c_le(log
->uuid_checksum
, mb
, PAGE_SIZE
);
853 if (stored_crc
!= crc
)
856 if (le32_to_cpu(mb
->meta_size
) > PAGE_SIZE
)
859 ctx
->meta_total_blocks
= BLOCK_SECTORS
;
864 static int r5l_recovery_flush_one_stripe(struct r5l_log
*log
,
865 struct r5l_recovery_ctx
*ctx
,
866 sector_t stripe_sect
,
867 int *offset
, sector_t
*log_offset
)
869 struct r5conf
*conf
= log
->rdev
->mddev
->private;
870 struct stripe_head
*sh
;
871 struct r5l_payload_data_parity
*payload
;
874 sh
= raid5_get_active_stripe(conf
, stripe_sect
, 0, 0, 0);
876 payload
= page_address(ctx
->meta_page
) + *offset
;
878 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_DATA
) {
879 raid5_compute_sector(conf
,
880 le64_to_cpu(payload
->location
), 0,
883 sync_page_io(log
->rdev
, *log_offset
, PAGE_SIZE
,
884 sh
->dev
[disk_index
].page
, READ
, false);
885 sh
->dev
[disk_index
].log_checksum
=
886 le32_to_cpu(payload
->checksum
[0]);
887 set_bit(R5_Wantwrite
, &sh
->dev
[disk_index
].flags
);
888 ctx
->meta_total_blocks
+= BLOCK_SECTORS
;
890 disk_index
= sh
->pd_idx
;
891 sync_page_io(log
->rdev
, *log_offset
, PAGE_SIZE
,
892 sh
->dev
[disk_index
].page
, READ
, false);
893 sh
->dev
[disk_index
].log_checksum
=
894 le32_to_cpu(payload
->checksum
[0]);
895 set_bit(R5_Wantwrite
, &sh
->dev
[disk_index
].flags
);
897 if (sh
->qd_idx
>= 0) {
898 disk_index
= sh
->qd_idx
;
899 sync_page_io(log
->rdev
,
900 r5l_ring_add(log
, *log_offset
, BLOCK_SECTORS
),
901 PAGE_SIZE
, sh
->dev
[disk_index
].page
,
903 sh
->dev
[disk_index
].log_checksum
=
904 le32_to_cpu(payload
->checksum
[1]);
905 set_bit(R5_Wantwrite
,
906 &sh
->dev
[disk_index
].flags
);
908 ctx
->meta_total_blocks
+= BLOCK_SECTORS
* conf
->max_degraded
;
911 *log_offset
= r5l_ring_add(log
, *log_offset
,
912 le32_to_cpu(payload
->size
));
913 *offset
+= sizeof(struct r5l_payload_data_parity
) +
915 (le32_to_cpu(payload
->size
) >> (PAGE_SHIFT
- 9));
916 if (le16_to_cpu(payload
->header
.type
) == R5LOG_PAYLOAD_PARITY
)
920 for (disk_index
= 0; disk_index
< sh
->disks
; disk_index
++) {
924 if (!test_bit(R5_Wantwrite
, &sh
->dev
[disk_index
].flags
))
926 addr
= kmap_atomic(sh
->dev
[disk_index
].page
);
927 checksum
= crc32c_le(log
->uuid_checksum
, addr
, PAGE_SIZE
);
929 if (checksum
!= sh
->dev
[disk_index
].log_checksum
)
933 for (disk_index
= 0; disk_index
< sh
->disks
; disk_index
++) {
934 struct md_rdev
*rdev
, *rrdev
;
936 if (!test_and_clear_bit(R5_Wantwrite
,
937 &sh
->dev
[disk_index
].flags
))
940 /* in case device is broken */
941 rdev
= rcu_dereference(conf
->disks
[disk_index
].rdev
);
943 sync_page_io(rdev
, stripe_sect
, PAGE_SIZE
,
944 sh
->dev
[disk_index
].page
, WRITE
, false);
945 rrdev
= rcu_dereference(conf
->disks
[disk_index
].replacement
);
947 sync_page_io(rrdev
, stripe_sect
, PAGE_SIZE
,
948 sh
->dev
[disk_index
].page
, WRITE
, false);
950 raid5_release_stripe(sh
);
954 for (disk_index
= 0; disk_index
< sh
->disks
; disk_index
++)
955 sh
->dev
[disk_index
].flags
= 0;
956 raid5_release_stripe(sh
);
960 static int r5l_recovery_flush_one_meta(struct r5l_log
*log
,
961 struct r5l_recovery_ctx
*ctx
)
963 struct r5conf
*conf
= log
->rdev
->mddev
->private;
964 struct r5l_payload_data_parity
*payload
;
965 struct r5l_meta_block
*mb
;
968 sector_t stripe_sector
;
970 mb
= page_address(ctx
->meta_page
);
971 offset
= sizeof(struct r5l_meta_block
);
972 log_offset
= r5l_ring_add(log
, ctx
->pos
, BLOCK_SECTORS
);
974 while (offset
< le32_to_cpu(mb
->meta_size
)) {
977 payload
= (void *)mb
+ offset
;
978 stripe_sector
= raid5_compute_sector(conf
,
979 le64_to_cpu(payload
->location
), 0, &dd
, NULL
);
980 if (r5l_recovery_flush_one_stripe(log
, ctx
, stripe_sector
,
981 &offset
, &log_offset
))
987 /* copy data/parity from log to raid disks */
988 static void r5l_recovery_flush_log(struct r5l_log
*log
,
989 struct r5l_recovery_ctx
*ctx
)
992 if (r5l_read_meta_block(log
, ctx
))
994 if (r5l_recovery_flush_one_meta(log
, ctx
))
997 ctx
->pos
= r5l_ring_add(log
, ctx
->pos
, ctx
->meta_total_blocks
);
1001 static int r5l_log_write_empty_meta_block(struct r5l_log
*log
, sector_t pos
,
1005 struct r5l_meta_block
*mb
;
1008 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
1011 mb
= page_address(page
);
1012 mb
->magic
= cpu_to_le32(R5LOG_MAGIC
);
1013 mb
->version
= R5LOG_VERSION
;
1014 mb
->meta_size
= cpu_to_le32(sizeof(struct r5l_meta_block
));
1015 mb
->seq
= cpu_to_le64(seq
);
1016 mb
->position
= cpu_to_le64(pos
);
1017 crc
= crc32c_le(log
->uuid_checksum
, mb
, PAGE_SIZE
);
1018 mb
->checksum
= cpu_to_le32(crc
);
1020 if (!sync_page_io(log
->rdev
, pos
, PAGE_SIZE
, page
, WRITE_FUA
, false)) {
1028 static int r5l_recovery_log(struct r5l_log
*log
)
1030 struct r5l_recovery_ctx ctx
;
1032 ctx
.pos
= log
->last_checkpoint
;
1033 ctx
.seq
= log
->last_cp_seq
;
1034 ctx
.meta_page
= alloc_page(GFP_KERNEL
);
1038 r5l_recovery_flush_log(log
, &ctx
);
1039 __free_page(ctx
.meta_page
);
1042 * we did a recovery. Now ctx.pos points to an invalid meta block. New
1043 * log will start here. but we can't let superblock point to last valid
1044 * meta block. The log might looks like:
1045 * | meta 1| meta 2| meta 3|
1046 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
1047 * superblock points to meta 1, we write a new valid meta 2n. if crash
1048 * happens again, new recovery will start from meta 1. Since meta 2n is
1049 * valid now, recovery will think meta 3 is valid, which is wrong.
1050 * The solution is we create a new meta in meta2 with its seq == meta
1051 * 1's seq + 10 and let superblock points to meta2. The same recovery will
1052 * not think meta 3 is a valid meta, because its seq doesn't match
1054 if (ctx
.seq
> log
->last_cp_seq
+ 1) {
1057 ret
= r5l_log_write_empty_meta_block(log
, ctx
.pos
, ctx
.seq
+ 10);
1060 log
->seq
= ctx
.seq
+ 11;
1061 log
->log_start
= r5l_ring_add(log
, ctx
.pos
, BLOCK_SECTORS
);
1062 r5l_write_super(log
, ctx
.pos
);
1064 log
->log_start
= ctx
.pos
;
1070 static void r5l_write_super(struct r5l_log
*log
, sector_t cp
)
1072 struct mddev
*mddev
= log
->rdev
->mddev
;
1074 log
->rdev
->journal_tail
= cp
;
1075 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1078 static int r5l_load_log(struct r5l_log
*log
)
1080 struct md_rdev
*rdev
= log
->rdev
;
1082 struct r5l_meta_block
*mb
;
1083 sector_t cp
= log
->rdev
->journal_tail
;
1084 u32 stored_crc
, expected_crc
;
1085 bool create_super
= false;
1088 /* Make sure it's valid */
1089 if (cp
>= rdev
->sectors
|| round_down(cp
, BLOCK_SECTORS
) != cp
)
1091 page
= alloc_page(GFP_KERNEL
);
1095 if (!sync_page_io(rdev
, cp
, PAGE_SIZE
, page
, READ
, false)) {
1099 mb
= page_address(page
);
1101 if (le32_to_cpu(mb
->magic
) != R5LOG_MAGIC
||
1102 mb
->version
!= R5LOG_VERSION
) {
1103 create_super
= true;
1106 stored_crc
= le32_to_cpu(mb
->checksum
);
1108 expected_crc
= crc32c_le(log
->uuid_checksum
, mb
, PAGE_SIZE
);
1109 if (stored_crc
!= expected_crc
) {
1110 create_super
= true;
1113 if (le64_to_cpu(mb
->position
) != cp
) {
1114 create_super
= true;
1119 log
->last_cp_seq
= prandom_u32();
1122 * Make sure super points to correct address. Log might have
1123 * data very soon. If super hasn't correct log tail address,
1124 * recovery can't find the log
1126 r5l_write_super(log
, cp
);
1128 log
->last_cp_seq
= le64_to_cpu(mb
->seq
);
1130 log
->device_size
= round_down(rdev
->sectors
, BLOCK_SECTORS
);
1131 log
->max_free_space
= log
->device_size
>> RECLAIM_MAX_FREE_SPACE_SHIFT
;
1132 if (log
->max_free_space
> RECLAIM_MAX_FREE_SPACE
)
1133 log
->max_free_space
= RECLAIM_MAX_FREE_SPACE
;
1134 log
->last_checkpoint
= cp
;
1138 return r5l_recovery_log(log
);
1144 int r5l_init_log(struct r5conf
*conf
, struct md_rdev
*rdev
)
1146 struct r5l_log
*log
;
1148 if (PAGE_SIZE
!= 4096)
1150 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
1155 log
->need_cache_flush
= (rdev
->bdev
->bd_disk
->queue
->flush_flags
!= 0);
1157 log
->uuid_checksum
= crc32c_le(~0, rdev
->mddev
->uuid
,
1158 sizeof(rdev
->mddev
->uuid
));
1160 mutex_init(&log
->io_mutex
);
1162 spin_lock_init(&log
->io_list_lock
);
1163 INIT_LIST_HEAD(&log
->running_ios
);
1164 INIT_LIST_HEAD(&log
->io_end_ios
);
1165 INIT_LIST_HEAD(&log
->flushing_ios
);
1166 INIT_LIST_HEAD(&log
->finished_ios
);
1167 bio_init(&log
->flush_bio
);
1169 log
->io_kc
= KMEM_CACHE(r5l_io_unit
, 0);
1173 log
->bs
= bioset_create(R5L_POOL_SIZE
, 0);
1177 log
->meta_pool
= mempool_create_page_pool(R5L_POOL_SIZE
, 0);
1178 if (!log
->meta_pool
)
1181 log
->reclaim_thread
= md_register_thread(r5l_reclaim_thread
,
1182 log
->rdev
->mddev
, "reclaim");
1183 if (!log
->reclaim_thread
)
1184 goto reclaim_thread
;
1185 init_waitqueue_head(&log
->iounit_wait
);
1187 INIT_LIST_HEAD(&log
->no_space_stripes
);
1188 spin_lock_init(&log
->no_space_stripes_lock
);
1190 if (r5l_load_log(log
))
1193 rcu_assign_pointer(conf
->log
, log
);
1197 md_unregister_thread(&log
->reclaim_thread
);
1199 mempool_destroy(log
->meta_pool
);
1201 bioset_free(log
->bs
);
1203 kmem_cache_destroy(log
->io_kc
);
1209 void r5l_exit_log(struct r5l_log
*log
)
1211 md_unregister_thread(&log
->reclaim_thread
);
1212 mempool_destroy(log
->meta_pool
);
1213 bioset_free(log
->bs
);
1214 kmem_cache_destroy(log
->io_kc
);