2 * raid1.c : Multiple Devices driver for Linux
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
8 * RAID-1 management functions.
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/blkdev.h>
37 #include <linux/module.h>
38 #include <linux/seq_file.h>
39 #include <linux/ratelimit.h>
40 #include <linux/sched/signal.h>
42 #include <trace/events/block.h>
48 #define UNSUPPORTED_MDDEV_FLAGS \
49 ((1L << MD_HAS_JOURNAL) | \
50 (1L << MD_JOURNAL_CLEAN) | \
54 * Number of guaranteed r1bios in case of extreme VM load:
56 #define NR_RAID1_BIOS 256
58 /* when we get a read error on a read-only array, we redirect to another
59 * device without failing the first device, or trying to over-write to
60 * correct the read error. To keep track of bad blocks on a per-bio
61 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
63 #define IO_BLOCKED ((struct bio *)1)
64 /* When we successfully write to a known bad-block, we need to remove the
65 * bad-block marking which must be done from process context. So we record
66 * the success by setting devs[n].bio to IO_MADE_GOOD
68 #define IO_MADE_GOOD ((struct bio *)2)
70 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
72 /* When there are this many requests queue to be written by
73 * the raid1 thread, we become 'congested' to provide back-pressure
76 static int max_queued_requests
= 1024;
78 static void allow_barrier(struct r1conf
*conf
, sector_t sector_nr
);
79 static void lower_barrier(struct r1conf
*conf
, sector_t sector_nr
);
81 #define raid1_log(md, fmt, args...) \
82 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
87 * 'strct resync_pages' stores actual pages used for doing the resync
88 * IO, and it is per-bio, so make .bi_private points to it.
90 static inline struct resync_pages
*get_resync_pages(struct bio
*bio
)
92 return bio
->bi_private
;
96 * for resync bio, r1bio pointer can be retrieved from the per-bio
97 * 'struct resync_pages'.
99 static inline struct r1bio
*get_resync_r1bio(struct bio
*bio
)
101 return get_resync_pages(bio
)->raid_bio
;
104 static void * r1bio_pool_alloc(gfp_t gfp_flags
, void *data
)
106 struct pool_info
*pi
= data
;
107 int size
= offsetof(struct r1bio
, bios
[pi
->raid_disks
]);
109 /* allocate a r1bio with room for raid_disks entries in the bios array */
110 return kzalloc(size
, gfp_flags
);
113 static void r1bio_pool_free(void *r1_bio
, void *data
)
118 #define RESYNC_DEPTH 32
119 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
120 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
121 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
122 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
123 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
125 static void * r1buf_pool_alloc(gfp_t gfp_flags
, void *data
)
127 struct pool_info
*pi
= data
;
128 struct r1bio
*r1_bio
;
132 struct resync_pages
*rps
;
134 r1_bio
= r1bio_pool_alloc(gfp_flags
, pi
);
138 rps
= kmalloc(sizeof(struct resync_pages
) * pi
->raid_disks
,
144 * Allocate bios : 1 for reading, n-1 for writing
146 for (j
= pi
->raid_disks
; j
-- ; ) {
147 bio
= bio_kmalloc(gfp_flags
, RESYNC_PAGES
);
150 r1_bio
->bios
[j
] = bio
;
153 * Allocate RESYNC_PAGES data pages and attach them to
155 * If this is a user-requested check/repair, allocate
156 * RESYNC_PAGES for each bio.
158 if (test_bit(MD_RECOVERY_REQUESTED
, &pi
->mddev
->recovery
))
159 need_pages
= pi
->raid_disks
;
162 for (j
= 0; j
< pi
->raid_disks
; j
++) {
163 struct resync_pages
*rp
= &rps
[j
];
165 bio
= r1_bio
->bios
[j
];
167 if (j
< need_pages
) {
168 if (resync_alloc_pages(rp
, gfp_flags
))
171 memcpy(rp
, &rps
[0], sizeof(*rp
));
172 resync_get_all_pages(rp
);
175 rp
->raid_bio
= r1_bio
;
176 bio
->bi_private
= rp
;
179 r1_bio
->master_bio
= NULL
;
185 resync_free_pages(&rps
[j
]);
188 while (++j
< pi
->raid_disks
)
189 bio_put(r1_bio
->bios
[j
]);
193 r1bio_pool_free(r1_bio
, data
);
197 static void r1buf_pool_free(void *__r1_bio
, void *data
)
199 struct pool_info
*pi
= data
;
201 struct r1bio
*r1bio
= __r1_bio
;
202 struct resync_pages
*rp
= NULL
;
204 for (i
= pi
->raid_disks
; i
--; ) {
205 rp
= get_resync_pages(r1bio
->bios
[i
]);
206 resync_free_pages(rp
);
207 bio_put(r1bio
->bios
[i
]);
210 /* resync pages array stored in the 1st bio's .bi_private */
213 r1bio_pool_free(r1bio
, data
);
216 static void put_all_bios(struct r1conf
*conf
, struct r1bio
*r1_bio
)
220 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
221 struct bio
**bio
= r1_bio
->bios
+ i
;
222 if (!BIO_SPECIAL(*bio
))
228 static void free_r1bio(struct r1bio
*r1_bio
)
230 struct r1conf
*conf
= r1_bio
->mddev
->private;
232 put_all_bios(conf
, r1_bio
);
233 mempool_free(r1_bio
, conf
->r1bio_pool
);
236 static void put_buf(struct r1bio
*r1_bio
)
238 struct r1conf
*conf
= r1_bio
->mddev
->private;
239 sector_t sect
= r1_bio
->sector
;
242 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
243 struct bio
*bio
= r1_bio
->bios
[i
];
245 rdev_dec_pending(conf
->mirrors
[i
].rdev
, r1_bio
->mddev
);
248 mempool_free(r1_bio
, conf
->r1buf_pool
);
250 lower_barrier(conf
, sect
);
253 static void reschedule_retry(struct r1bio
*r1_bio
)
256 struct mddev
*mddev
= r1_bio
->mddev
;
257 struct r1conf
*conf
= mddev
->private;
260 idx
= sector_to_idx(r1_bio
->sector
);
261 spin_lock_irqsave(&conf
->device_lock
, flags
);
262 list_add(&r1_bio
->retry_list
, &conf
->retry_list
);
263 atomic_inc(&conf
->nr_queued
[idx
]);
264 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
266 wake_up(&conf
->wait_barrier
);
267 md_wakeup_thread(mddev
->thread
);
271 * raid_end_bio_io() is called when we have finished servicing a mirrored
272 * operation and are ready to return a success/failure code to the buffer
275 static void call_bio_endio(struct r1bio
*r1_bio
)
277 struct bio
*bio
= r1_bio
->master_bio
;
278 struct r1conf
*conf
= r1_bio
->mddev
->private;
280 if (!test_bit(R1BIO_Uptodate
, &r1_bio
->state
))
281 bio
->bi_status
= BLK_STS_IOERR
;
285 * Wake up any possible resync thread that waits for the device
288 allow_barrier(conf
, r1_bio
->sector
);
291 static void raid_end_bio_io(struct r1bio
*r1_bio
)
293 struct bio
*bio
= r1_bio
->master_bio
;
295 /* if nobody has done the final endio yet, do it now */
296 if (!test_and_set_bit(R1BIO_Returned
, &r1_bio
->state
)) {
297 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
298 (bio_data_dir(bio
) == WRITE
) ? "write" : "read",
299 (unsigned long long) bio
->bi_iter
.bi_sector
,
300 (unsigned long long) bio_end_sector(bio
) - 1);
302 call_bio_endio(r1_bio
);
308 * Update disk head position estimator based on IRQ completion info.
310 static inline void update_head_pos(int disk
, struct r1bio
*r1_bio
)
312 struct r1conf
*conf
= r1_bio
->mddev
->private;
314 conf
->mirrors
[disk
].head_position
=
315 r1_bio
->sector
+ (r1_bio
->sectors
);
319 * Find the disk number which triggered given bio
321 static int find_bio_disk(struct r1bio
*r1_bio
, struct bio
*bio
)
324 struct r1conf
*conf
= r1_bio
->mddev
->private;
325 int raid_disks
= conf
->raid_disks
;
327 for (mirror
= 0; mirror
< raid_disks
* 2; mirror
++)
328 if (r1_bio
->bios
[mirror
] == bio
)
331 BUG_ON(mirror
== raid_disks
* 2);
332 update_head_pos(mirror
, r1_bio
);
337 static void raid1_end_read_request(struct bio
*bio
)
339 int uptodate
= !bio
->bi_status
;
340 struct r1bio
*r1_bio
= bio
->bi_private
;
341 struct r1conf
*conf
= r1_bio
->mddev
->private;
342 struct md_rdev
*rdev
= conf
->mirrors
[r1_bio
->read_disk
].rdev
;
345 * this branch is our 'one mirror IO has finished' event handler:
347 update_head_pos(r1_bio
->read_disk
, r1_bio
);
350 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
351 else if (test_bit(FailFast
, &rdev
->flags
) &&
352 test_bit(R1BIO_FailFast
, &r1_bio
->state
))
353 /* This was a fail-fast read so we definitely
357 /* If all other devices have failed, we want to return
358 * the error upwards rather than fail the last device.
359 * Here we redefine "uptodate" to mean "Don't want to retry"
362 spin_lock_irqsave(&conf
->device_lock
, flags
);
363 if (r1_bio
->mddev
->degraded
== conf
->raid_disks
||
364 (r1_bio
->mddev
->degraded
== conf
->raid_disks
-1 &&
365 test_bit(In_sync
, &rdev
->flags
)))
367 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
371 raid_end_bio_io(r1_bio
);
372 rdev_dec_pending(rdev
, conf
->mddev
);
377 char b
[BDEVNAME_SIZE
];
378 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
380 bdevname(rdev
->bdev
, b
),
381 (unsigned long long)r1_bio
->sector
);
382 set_bit(R1BIO_ReadError
, &r1_bio
->state
);
383 reschedule_retry(r1_bio
);
384 /* don't drop the reference on read_disk yet */
388 static void close_write(struct r1bio
*r1_bio
)
390 /* it really is the end of this request */
391 if (test_bit(R1BIO_BehindIO
, &r1_bio
->state
)) {
392 bio_free_pages(r1_bio
->behind_master_bio
);
393 bio_put(r1_bio
->behind_master_bio
);
394 r1_bio
->behind_master_bio
= NULL
;
396 /* clear the bitmap if all writes complete successfully */
397 bitmap_endwrite(r1_bio
->mddev
->bitmap
, r1_bio
->sector
,
399 !test_bit(R1BIO_Degraded
, &r1_bio
->state
),
400 test_bit(R1BIO_BehindIO
, &r1_bio
->state
));
401 md_write_end(r1_bio
->mddev
);
404 static void r1_bio_write_done(struct r1bio
*r1_bio
)
406 if (!atomic_dec_and_test(&r1_bio
->remaining
))
409 if (test_bit(R1BIO_WriteError
, &r1_bio
->state
))
410 reschedule_retry(r1_bio
);
413 if (test_bit(R1BIO_MadeGood
, &r1_bio
->state
))
414 reschedule_retry(r1_bio
);
416 raid_end_bio_io(r1_bio
);
420 static void raid1_end_write_request(struct bio
*bio
)
422 struct r1bio
*r1_bio
= bio
->bi_private
;
423 int behind
= test_bit(R1BIO_BehindIO
, &r1_bio
->state
);
424 struct r1conf
*conf
= r1_bio
->mddev
->private;
425 struct bio
*to_put
= NULL
;
426 int mirror
= find_bio_disk(r1_bio
, bio
);
427 struct md_rdev
*rdev
= conf
->mirrors
[mirror
].rdev
;
430 discard_error
= bio
->bi_status
&& bio_op(bio
) == REQ_OP_DISCARD
;
433 * 'one mirror IO has finished' event handler:
435 if (bio
->bi_status
&& !discard_error
) {
436 set_bit(WriteErrorSeen
, &rdev
->flags
);
437 if (!test_and_set_bit(WantReplacement
, &rdev
->flags
))
438 set_bit(MD_RECOVERY_NEEDED
, &
439 conf
->mddev
->recovery
);
441 if (test_bit(FailFast
, &rdev
->flags
) &&
442 (bio
->bi_opf
& MD_FAILFAST
) &&
443 /* We never try FailFast to WriteMostly devices */
444 !test_bit(WriteMostly
, &rdev
->flags
)) {
445 md_error(r1_bio
->mddev
, rdev
);
446 if (!test_bit(Faulty
, &rdev
->flags
))
447 /* This is the only remaining device,
448 * We need to retry the write without
451 set_bit(R1BIO_WriteError
, &r1_bio
->state
);
453 /* Finished with this branch */
454 r1_bio
->bios
[mirror
] = NULL
;
458 set_bit(R1BIO_WriteError
, &r1_bio
->state
);
461 * Set R1BIO_Uptodate in our master bio, so that we
462 * will return a good error code for to the higher
463 * levels even if IO on some other mirrored buffer
466 * The 'master' represents the composite IO operation
467 * to user-side. So if something waits for IO, then it
468 * will wait for the 'master' bio.
473 r1_bio
->bios
[mirror
] = NULL
;
476 * Do not set R1BIO_Uptodate if the current device is
477 * rebuilding or Faulty. This is because we cannot use
478 * such device for properly reading the data back (we could
479 * potentially use it, if the current write would have felt
480 * before rdev->recovery_offset, but for simplicity we don't
483 if (test_bit(In_sync
, &rdev
->flags
) &&
484 !test_bit(Faulty
, &rdev
->flags
))
485 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
487 /* Maybe we can clear some bad blocks. */
488 if (is_badblock(rdev
, r1_bio
->sector
, r1_bio
->sectors
,
489 &first_bad
, &bad_sectors
) && !discard_error
) {
490 r1_bio
->bios
[mirror
] = IO_MADE_GOOD
;
491 set_bit(R1BIO_MadeGood
, &r1_bio
->state
);
496 /* we release behind master bio when all write are done */
497 if (r1_bio
->behind_master_bio
== bio
)
500 if (test_bit(WriteMostly
, &rdev
->flags
))
501 atomic_dec(&r1_bio
->behind_remaining
);
504 * In behind mode, we ACK the master bio once the I/O
505 * has safely reached all non-writemostly
506 * disks. Setting the Returned bit ensures that this
507 * gets done only once -- we don't ever want to return
508 * -EIO here, instead we'll wait
510 if (atomic_read(&r1_bio
->behind_remaining
) >= (atomic_read(&r1_bio
->remaining
)-1) &&
511 test_bit(R1BIO_Uptodate
, &r1_bio
->state
)) {
512 /* Maybe we can return now */
513 if (!test_and_set_bit(R1BIO_Returned
, &r1_bio
->state
)) {
514 struct bio
*mbio
= r1_bio
->master_bio
;
515 pr_debug("raid1: behind end write sectors"
517 (unsigned long long) mbio
->bi_iter
.bi_sector
,
518 (unsigned long long) bio_end_sector(mbio
) - 1);
519 call_bio_endio(r1_bio
);
523 if (r1_bio
->bios
[mirror
] == NULL
)
524 rdev_dec_pending(rdev
, conf
->mddev
);
527 * Let's see if all mirrored write operations have finished
530 r1_bio_write_done(r1_bio
);
536 static sector_t
align_to_barrier_unit_end(sector_t start_sector
,
541 WARN_ON(sectors
== 0);
543 * len is the number of sectors from start_sector to end of the
544 * barrier unit which start_sector belongs to.
546 len
= round_up(start_sector
+ 1, BARRIER_UNIT_SECTOR_SIZE
) -
556 * This routine returns the disk from which the requested read should
557 * be done. There is a per-array 'next expected sequential IO' sector
558 * number - if this matches on the next IO then we use the last disk.
559 * There is also a per-disk 'last know head position' sector that is
560 * maintained from IRQ contexts, both the normal and the resync IO
561 * completion handlers update this position correctly. If there is no
562 * perfect sequential match then we pick the disk whose head is closest.
564 * If there are 2 mirrors in the same 2 devices, performance degrades
565 * because position is mirror, not device based.
567 * The rdev for the device selected will have nr_pending incremented.
569 static int read_balance(struct r1conf
*conf
, struct r1bio
*r1_bio
, int *max_sectors
)
571 const sector_t this_sector
= r1_bio
->sector
;
573 int best_good_sectors
;
574 int best_disk
, best_dist_disk
, best_pending_disk
;
578 unsigned int min_pending
;
579 struct md_rdev
*rdev
;
581 int choose_next_idle
;
585 * Check if we can balance. We can balance on the whole
586 * device if no resync is going on, or below the resync window.
587 * We take the first readable disk when above the resync window.
590 sectors
= r1_bio
->sectors
;
593 best_dist
= MaxSector
;
594 best_pending_disk
= -1;
595 min_pending
= UINT_MAX
;
596 best_good_sectors
= 0;
598 choose_next_idle
= 0;
599 clear_bit(R1BIO_FailFast
, &r1_bio
->state
);
601 if ((conf
->mddev
->recovery_cp
< this_sector
+ sectors
) ||
602 (mddev_is_clustered(conf
->mddev
) &&
603 md_cluster_ops
->area_resyncing(conf
->mddev
, READ
, this_sector
,
604 this_sector
+ sectors
)))
609 for (disk
= 0 ; disk
< conf
->raid_disks
* 2 ; disk
++) {
613 unsigned int pending
;
616 rdev
= rcu_dereference(conf
->mirrors
[disk
].rdev
);
617 if (r1_bio
->bios
[disk
] == IO_BLOCKED
619 || test_bit(Faulty
, &rdev
->flags
))
621 if (!test_bit(In_sync
, &rdev
->flags
) &&
622 rdev
->recovery_offset
< this_sector
+ sectors
)
624 if (test_bit(WriteMostly
, &rdev
->flags
)) {
625 /* Don't balance among write-mostly, just
626 * use the first as a last resort */
627 if (best_dist_disk
< 0) {
628 if (is_badblock(rdev
, this_sector
, sectors
,
629 &first_bad
, &bad_sectors
)) {
630 if (first_bad
<= this_sector
)
631 /* Cannot use this */
633 best_good_sectors
= first_bad
- this_sector
;
635 best_good_sectors
= sectors
;
636 best_dist_disk
= disk
;
637 best_pending_disk
= disk
;
641 /* This is a reasonable device to use. It might
644 if (is_badblock(rdev
, this_sector
, sectors
,
645 &first_bad
, &bad_sectors
)) {
646 if (best_dist
< MaxSector
)
647 /* already have a better device */
649 if (first_bad
<= this_sector
) {
650 /* cannot read here. If this is the 'primary'
651 * device, then we must not read beyond
652 * bad_sectors from another device..
654 bad_sectors
-= (this_sector
- first_bad
);
655 if (choose_first
&& sectors
> bad_sectors
)
656 sectors
= bad_sectors
;
657 if (best_good_sectors
> sectors
)
658 best_good_sectors
= sectors
;
661 sector_t good_sectors
= first_bad
- this_sector
;
662 if (good_sectors
> best_good_sectors
) {
663 best_good_sectors
= good_sectors
;
671 if ((sectors
> best_good_sectors
) && (best_disk
>= 0))
673 best_good_sectors
= sectors
;
677 /* At least two disks to choose from so failfast is OK */
678 set_bit(R1BIO_FailFast
, &r1_bio
->state
);
680 nonrot
= blk_queue_nonrot(bdev_get_queue(rdev
->bdev
));
681 has_nonrot_disk
|= nonrot
;
682 pending
= atomic_read(&rdev
->nr_pending
);
683 dist
= abs(this_sector
- conf
->mirrors
[disk
].head_position
);
688 /* Don't change to another disk for sequential reads */
689 if (conf
->mirrors
[disk
].next_seq_sect
== this_sector
691 int opt_iosize
= bdev_io_opt(rdev
->bdev
) >> 9;
692 struct raid1_info
*mirror
= &conf
->mirrors
[disk
];
696 * If buffered sequential IO size exceeds optimal
697 * iosize, check if there is idle disk. If yes, choose
698 * the idle disk. read_balance could already choose an
699 * idle disk before noticing it's a sequential IO in
700 * this disk. This doesn't matter because this disk
701 * will idle, next time it will be utilized after the
702 * first disk has IO size exceeds optimal iosize. In
703 * this way, iosize of the first disk will be optimal
704 * iosize at least. iosize of the second disk might be
705 * small, but not a big deal since when the second disk
706 * starts IO, the first disk is likely still busy.
708 if (nonrot
&& opt_iosize
> 0 &&
709 mirror
->seq_start
!= MaxSector
&&
710 mirror
->next_seq_sect
> opt_iosize
&&
711 mirror
->next_seq_sect
- opt_iosize
>=
713 choose_next_idle
= 1;
719 if (choose_next_idle
)
722 if (min_pending
> pending
) {
723 min_pending
= pending
;
724 best_pending_disk
= disk
;
727 if (dist
< best_dist
) {
729 best_dist_disk
= disk
;
734 * If all disks are rotational, choose the closest disk. If any disk is
735 * non-rotational, choose the disk with less pending request even the
736 * disk is rotational, which might/might not be optimal for raids with
737 * mixed ratation/non-rotational disks depending on workload.
739 if (best_disk
== -1) {
740 if (has_nonrot_disk
|| min_pending
== 0)
741 best_disk
= best_pending_disk
;
743 best_disk
= best_dist_disk
;
746 if (best_disk
>= 0) {
747 rdev
= rcu_dereference(conf
->mirrors
[best_disk
].rdev
);
750 atomic_inc(&rdev
->nr_pending
);
751 sectors
= best_good_sectors
;
753 if (conf
->mirrors
[best_disk
].next_seq_sect
!= this_sector
)
754 conf
->mirrors
[best_disk
].seq_start
= this_sector
;
756 conf
->mirrors
[best_disk
].next_seq_sect
= this_sector
+ sectors
;
759 *max_sectors
= sectors
;
764 static int raid1_congested(struct mddev
*mddev
, int bits
)
766 struct r1conf
*conf
= mddev
->private;
769 if ((bits
& (1 << WB_async_congested
)) &&
770 conf
->pending_count
>= max_queued_requests
)
774 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
775 struct md_rdev
*rdev
= rcu_dereference(conf
->mirrors
[i
].rdev
);
776 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)) {
777 struct request_queue
*q
= bdev_get_queue(rdev
->bdev
);
781 /* Note the '|| 1' - when read_balance prefers
782 * non-congested targets, it can be removed
784 if ((bits
& (1 << WB_async_congested
)) || 1)
785 ret
|= bdi_congested(q
->backing_dev_info
, bits
);
787 ret
&= bdi_congested(q
->backing_dev_info
, bits
);
794 static void flush_bio_list(struct r1conf
*conf
, struct bio
*bio
)
796 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
797 bitmap_unplug(conf
->mddev
->bitmap
);
798 wake_up(&conf
->wait_barrier
);
800 while (bio
) { /* submit pending writes */
801 struct bio
*next
= bio
->bi_next
;
802 struct md_rdev
*rdev
= (void*)bio
->bi_bdev
;
804 bio
->bi_bdev
= rdev
->bdev
;
805 if (test_bit(Faulty
, &rdev
->flags
)) {
806 bio
->bi_status
= BLK_STS_IOERR
;
808 } else if (unlikely((bio_op(bio
) == REQ_OP_DISCARD
) &&
809 !blk_queue_discard(bdev_get_queue(bio
->bi_bdev
))))
813 generic_make_request(bio
);
818 static void flush_pending_writes(struct r1conf
*conf
)
820 /* Any writes that have been queued but are awaiting
821 * bitmap updates get flushed here.
823 spin_lock_irq(&conf
->device_lock
);
825 if (conf
->pending_bio_list
.head
) {
827 bio
= bio_list_get(&conf
->pending_bio_list
);
828 conf
->pending_count
= 0;
829 spin_unlock_irq(&conf
->device_lock
);
830 flush_bio_list(conf
, bio
);
832 spin_unlock_irq(&conf
->device_lock
);
836 * Sometimes we need to suspend IO while we do something else,
837 * either some resync/recovery, or reconfigure the array.
838 * To do this we raise a 'barrier'.
839 * The 'barrier' is a counter that can be raised multiple times
840 * to count how many activities are happening which preclude
842 * We can only raise the barrier if there is no pending IO.
843 * i.e. if nr_pending == 0.
844 * We choose only to raise the barrier if no-one is waiting for the
845 * barrier to go down. This means that as soon as an IO request
846 * is ready, no other operations which require a barrier will start
847 * until the IO request has had a chance.
849 * So: regular IO calls 'wait_barrier'. When that returns there
850 * is no backgroup IO happening, It must arrange to call
851 * allow_barrier when it has finished its IO.
852 * backgroup IO calls must call raise_barrier. Once that returns
853 * there is no normal IO happeing. It must arrange to call
854 * lower_barrier when the particular background IO completes.
856 static void raise_barrier(struct r1conf
*conf
, sector_t sector_nr
)
858 int idx
= sector_to_idx(sector_nr
);
860 spin_lock_irq(&conf
->resync_lock
);
862 /* Wait until no block IO is waiting */
863 wait_event_lock_irq(conf
->wait_barrier
,
864 !atomic_read(&conf
->nr_waiting
[idx
]),
867 /* block any new IO from starting */
868 atomic_inc(&conf
->barrier
[idx
]);
870 * In raise_barrier() we firstly increase conf->barrier[idx] then
871 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
872 * increase conf->nr_pending[idx] then check conf->barrier[idx].
873 * A memory barrier here to make sure conf->nr_pending[idx] won't
874 * be fetched before conf->barrier[idx] is increased. Otherwise
875 * there will be a race between raise_barrier() and _wait_barrier().
877 smp_mb__after_atomic();
879 /* For these conditions we must wait:
880 * A: while the array is in frozen state
881 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
882 * existing in corresponding I/O barrier bucket.
883 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
884 * max resync count which allowed on current I/O barrier bucket.
886 wait_event_lock_irq(conf
->wait_barrier
,
887 !conf
->array_frozen
&&
888 !atomic_read(&conf
->nr_pending
[idx
]) &&
889 atomic_read(&conf
->barrier
[idx
]) < RESYNC_DEPTH
,
892 atomic_inc(&conf
->nr_sync_pending
);
893 spin_unlock_irq(&conf
->resync_lock
);
896 static void lower_barrier(struct r1conf
*conf
, sector_t sector_nr
)
898 int idx
= sector_to_idx(sector_nr
);
900 BUG_ON(atomic_read(&conf
->barrier
[idx
]) <= 0);
902 atomic_dec(&conf
->barrier
[idx
]);
903 atomic_dec(&conf
->nr_sync_pending
);
904 wake_up(&conf
->wait_barrier
);
907 static void _wait_barrier(struct r1conf
*conf
, int idx
)
910 * We need to increase conf->nr_pending[idx] very early here,
911 * then raise_barrier() can be blocked when it waits for
912 * conf->nr_pending[idx] to be 0. Then we can avoid holding
913 * conf->resync_lock when there is no barrier raised in same
914 * barrier unit bucket. Also if the array is frozen, I/O
915 * should be blocked until array is unfrozen.
917 atomic_inc(&conf
->nr_pending
[idx
]);
919 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
920 * check conf->barrier[idx]. In raise_barrier() we firstly increase
921 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
922 * barrier is necessary here to make sure conf->barrier[idx] won't be
923 * fetched before conf->nr_pending[idx] is increased. Otherwise there
924 * will be a race between _wait_barrier() and raise_barrier().
926 smp_mb__after_atomic();
929 * Don't worry about checking two atomic_t variables at same time
930 * here. If during we check conf->barrier[idx], the array is
931 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
932 * 0, it is safe to return and make the I/O continue. Because the
933 * array is frozen, all I/O returned here will eventually complete
934 * or be queued, no race will happen. See code comment in
937 if (!READ_ONCE(conf
->array_frozen
) &&
938 !atomic_read(&conf
->barrier
[idx
]))
942 * After holding conf->resync_lock, conf->nr_pending[idx]
943 * should be decreased before waiting for barrier to drop.
944 * Otherwise, we may encounter a race condition because
945 * raise_barrer() might be waiting for conf->nr_pending[idx]
946 * to be 0 at same time.
948 spin_lock_irq(&conf
->resync_lock
);
949 atomic_inc(&conf
->nr_waiting
[idx
]);
950 atomic_dec(&conf
->nr_pending
[idx
]);
952 * In case freeze_array() is waiting for
953 * get_unqueued_pending() == extra
955 wake_up(&conf
->wait_barrier
);
956 /* Wait for the barrier in same barrier unit bucket to drop. */
957 wait_event_lock_irq(conf
->wait_barrier
,
958 !conf
->array_frozen
&&
959 !atomic_read(&conf
->barrier
[idx
]),
961 atomic_inc(&conf
->nr_pending
[idx
]);
962 atomic_dec(&conf
->nr_waiting
[idx
]);
963 spin_unlock_irq(&conf
->resync_lock
);
966 static void wait_read_barrier(struct r1conf
*conf
, sector_t sector_nr
)
968 int idx
= sector_to_idx(sector_nr
);
971 * Very similar to _wait_barrier(). The difference is, for read
972 * I/O we don't need wait for sync I/O, but if the whole array
973 * is frozen, the read I/O still has to wait until the array is
974 * unfrozen. Since there is no ordering requirement with
975 * conf->barrier[idx] here, memory barrier is unnecessary as well.
977 atomic_inc(&conf
->nr_pending
[idx
]);
979 if (!READ_ONCE(conf
->array_frozen
))
982 spin_lock_irq(&conf
->resync_lock
);
983 atomic_inc(&conf
->nr_waiting
[idx
]);
984 atomic_dec(&conf
->nr_pending
[idx
]);
986 * In case freeze_array() is waiting for
987 * get_unqueued_pending() == extra
989 wake_up(&conf
->wait_barrier
);
990 /* Wait for array to be unfrozen */
991 wait_event_lock_irq(conf
->wait_barrier
,
994 atomic_inc(&conf
->nr_pending
[idx
]);
995 atomic_dec(&conf
->nr_waiting
[idx
]);
996 spin_unlock_irq(&conf
->resync_lock
);
999 static void wait_barrier(struct r1conf
*conf
, sector_t sector_nr
)
1001 int idx
= sector_to_idx(sector_nr
);
1003 _wait_barrier(conf
, idx
);
1006 static void wait_all_barriers(struct r1conf
*conf
)
1010 for (idx
= 0; idx
< BARRIER_BUCKETS_NR
; idx
++)
1011 _wait_barrier(conf
, idx
);
1014 static void _allow_barrier(struct r1conf
*conf
, int idx
)
1016 atomic_dec(&conf
->nr_pending
[idx
]);
1017 wake_up(&conf
->wait_barrier
);
1020 static void allow_barrier(struct r1conf
*conf
, sector_t sector_nr
)
1022 int idx
= sector_to_idx(sector_nr
);
1024 _allow_barrier(conf
, idx
);
1027 static void allow_all_barriers(struct r1conf
*conf
)
1031 for (idx
= 0; idx
< BARRIER_BUCKETS_NR
; idx
++)
1032 _allow_barrier(conf
, idx
);
1035 /* conf->resync_lock should be held */
1036 static int get_unqueued_pending(struct r1conf
*conf
)
1040 ret
= atomic_read(&conf
->nr_sync_pending
);
1041 for (idx
= 0; idx
< BARRIER_BUCKETS_NR
; idx
++)
1042 ret
+= atomic_read(&conf
->nr_pending
[idx
]) -
1043 atomic_read(&conf
->nr_queued
[idx
]);
1048 static void freeze_array(struct r1conf
*conf
, int extra
)
1050 /* Stop sync I/O and normal I/O and wait for everything to
1052 * This is called in two situations:
1053 * 1) management command handlers (reshape, remove disk, quiesce).
1054 * 2) one normal I/O request failed.
1056 * After array_frozen is set to 1, new sync IO will be blocked at
1057 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1058 * or wait_read_barrier(). The flying I/Os will either complete or be
1059 * queued. When everything goes quite, there are only queued I/Os left.
1061 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1062 * barrier bucket index which this I/O request hits. When all sync and
1063 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1064 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1065 * in handle_read_error(), we may call freeze_array() before trying to
1066 * fix the read error. In this case, the error read I/O is not queued,
1067 * so get_unqueued_pending() == 1.
1069 * Therefore before this function returns, we need to wait until
1070 * get_unqueued_pendings(conf) gets equal to extra. For
1071 * normal I/O context, extra is 1, in rested situations extra is 0.
1073 spin_lock_irq(&conf
->resync_lock
);
1074 conf
->array_frozen
= 1;
1075 raid1_log(conf
->mddev
, "wait freeze");
1076 wait_event_lock_irq_cmd(
1078 get_unqueued_pending(conf
) == extra
,
1080 flush_pending_writes(conf
));
1081 spin_unlock_irq(&conf
->resync_lock
);
1083 static void unfreeze_array(struct r1conf
*conf
)
1085 /* reverse the effect of the freeze */
1086 spin_lock_irq(&conf
->resync_lock
);
1087 conf
->array_frozen
= 0;
1088 spin_unlock_irq(&conf
->resync_lock
);
1089 wake_up(&conf
->wait_barrier
);
1092 static struct bio
*alloc_behind_master_bio(struct r1bio
*r1_bio
,
1095 int size
= bio
->bi_iter
.bi_size
;
1096 unsigned vcnt
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1098 struct bio
*behind_bio
= NULL
;
1100 behind_bio
= bio_alloc_mddev(GFP_NOIO
, vcnt
, r1_bio
->mddev
);
1104 /* discard op, we don't support writezero/writesame yet */
1105 if (!bio_has_data(bio
))
1108 while (i
< vcnt
&& size
) {
1110 int len
= min_t(int, PAGE_SIZE
, size
);
1112 page
= alloc_page(GFP_NOIO
);
1113 if (unlikely(!page
))
1116 bio_add_page(behind_bio
, page
, len
, 0);
1122 bio_copy_data(behind_bio
, bio
);
1124 r1_bio
->behind_master_bio
= behind_bio
;;
1125 set_bit(R1BIO_BehindIO
, &r1_bio
->state
);
1130 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1131 bio
->bi_iter
.bi_size
);
1132 bio_free_pages(behind_bio
);
1137 struct raid1_plug_cb
{
1138 struct blk_plug_cb cb
;
1139 struct bio_list pending
;
1143 static void raid1_unplug(struct blk_plug_cb
*cb
, bool from_schedule
)
1145 struct raid1_plug_cb
*plug
= container_of(cb
, struct raid1_plug_cb
,
1147 struct mddev
*mddev
= plug
->cb
.data
;
1148 struct r1conf
*conf
= mddev
->private;
1151 if (from_schedule
|| current
->bio_list
) {
1152 spin_lock_irq(&conf
->device_lock
);
1153 bio_list_merge(&conf
->pending_bio_list
, &plug
->pending
);
1154 conf
->pending_count
+= plug
->pending_cnt
;
1155 spin_unlock_irq(&conf
->device_lock
);
1156 wake_up(&conf
->wait_barrier
);
1157 md_wakeup_thread(mddev
->thread
);
1162 /* we aren't scheduling, so we can do the write-out directly. */
1163 bio
= bio_list_get(&plug
->pending
);
1164 flush_bio_list(conf
, bio
);
1168 static void init_r1bio(struct r1bio
*r1_bio
, struct mddev
*mddev
, struct bio
*bio
)
1170 r1_bio
->master_bio
= bio
;
1171 r1_bio
->sectors
= bio_sectors(bio
);
1173 r1_bio
->mddev
= mddev
;
1174 r1_bio
->sector
= bio
->bi_iter
.bi_sector
;
1177 static inline struct r1bio
*
1178 alloc_r1bio(struct mddev
*mddev
, struct bio
*bio
)
1180 struct r1conf
*conf
= mddev
->private;
1181 struct r1bio
*r1_bio
;
1183 r1_bio
= mempool_alloc(conf
->r1bio_pool
, GFP_NOIO
);
1184 /* Ensure no bio records IO_BLOCKED */
1185 memset(r1_bio
->bios
, 0, conf
->raid_disks
* sizeof(r1_bio
->bios
[0]));
1186 init_r1bio(r1_bio
, mddev
, bio
);
1190 static void raid1_read_request(struct mddev
*mddev
, struct bio
*bio
,
1191 int max_read_sectors
, struct r1bio
*r1_bio
)
1193 struct r1conf
*conf
= mddev
->private;
1194 struct raid1_info
*mirror
;
1195 struct bio
*read_bio
;
1196 struct bitmap
*bitmap
= mddev
->bitmap
;
1197 const int op
= bio_op(bio
);
1198 const unsigned long do_sync
= (bio
->bi_opf
& REQ_SYNC
);
1201 bool print_msg
= !!r1_bio
;
1202 char b
[BDEVNAME_SIZE
];
1205 * If r1_bio is set, we are blocking the raid1d thread
1206 * so there is a tiny risk of deadlock. So ask for
1207 * emergency memory if needed.
1209 gfp_t gfp
= r1_bio
? (GFP_NOIO
| __GFP_HIGH
) : GFP_NOIO
;
1212 /* Need to get the block device name carefully */
1213 struct md_rdev
*rdev
;
1215 rdev
= rcu_dereference(conf
->mirrors
[r1_bio
->read_disk
].rdev
);
1217 bdevname(rdev
->bdev
, b
);
1224 * Still need barrier for READ in case that whole
1227 wait_read_barrier(conf
, bio
->bi_iter
.bi_sector
);
1230 r1_bio
= alloc_r1bio(mddev
, bio
);
1232 init_r1bio(r1_bio
, mddev
, bio
);
1233 r1_bio
->sectors
= max_read_sectors
;
1236 * make_request() can abort the operation when read-ahead is being
1237 * used and no empty request is available.
1239 rdisk
= read_balance(conf
, r1_bio
, &max_sectors
);
1242 /* couldn't find anywhere to read from */
1244 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1247 (unsigned long long)r1_bio
->sector
);
1249 raid_end_bio_io(r1_bio
);
1252 mirror
= conf
->mirrors
+ rdisk
;
1255 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1257 (unsigned long long)r1_bio
->sector
,
1258 bdevname(mirror
->rdev
->bdev
, b
));
1260 if (test_bit(WriteMostly
, &mirror
->rdev
->flags
) &&
1263 * Reading from a write-mostly device must take care not to
1264 * over-take any writes that are 'behind'
1266 raid1_log(mddev
, "wait behind writes");
1267 wait_event(bitmap
->behind_wait
,
1268 atomic_read(&bitmap
->behind_writes
) == 0);
1271 if (max_sectors
< bio_sectors(bio
)) {
1272 struct bio
*split
= bio_split(bio
, max_sectors
,
1273 gfp
, conf
->bio_split
);
1274 bio_chain(split
, bio
);
1275 generic_make_request(bio
);
1277 r1_bio
->master_bio
= bio
;
1278 r1_bio
->sectors
= max_sectors
;
1281 r1_bio
->read_disk
= rdisk
;
1283 read_bio
= bio_clone_fast(bio
, gfp
, mddev
->bio_set
);
1285 r1_bio
->bios
[rdisk
] = read_bio
;
1287 read_bio
->bi_iter
.bi_sector
= r1_bio
->sector
+
1288 mirror
->rdev
->data_offset
;
1289 read_bio
->bi_bdev
= mirror
->rdev
->bdev
;
1290 read_bio
->bi_end_io
= raid1_end_read_request
;
1291 bio_set_op_attrs(read_bio
, op
, do_sync
);
1292 if (test_bit(FailFast
, &mirror
->rdev
->flags
) &&
1293 test_bit(R1BIO_FailFast
, &r1_bio
->state
))
1294 read_bio
->bi_opf
|= MD_FAILFAST
;
1295 read_bio
->bi_private
= r1_bio
;
1298 trace_block_bio_remap(bdev_get_queue(read_bio
->bi_bdev
),
1299 read_bio
, disk_devt(mddev
->gendisk
),
1302 generic_make_request(read_bio
);
1305 static void raid1_write_request(struct mddev
*mddev
, struct bio
*bio
,
1306 int max_write_sectors
)
1308 struct r1conf
*conf
= mddev
->private;
1309 struct r1bio
*r1_bio
;
1311 struct bitmap
*bitmap
= mddev
->bitmap
;
1312 unsigned long flags
;
1313 struct md_rdev
*blocked_rdev
;
1314 struct blk_plug_cb
*cb
;
1315 struct raid1_plug_cb
*plug
= NULL
;
1320 * Register the new request and wait if the reconstruction
1321 * thread has put up a bar for new requests.
1322 * Continue immediately if no resync is active currently.
1326 if ((bio_end_sector(bio
) > mddev
->suspend_lo
&&
1327 bio
->bi_iter
.bi_sector
< mddev
->suspend_hi
) ||
1328 (mddev_is_clustered(mddev
) &&
1329 md_cluster_ops
->area_resyncing(mddev
, WRITE
,
1330 bio
->bi_iter
.bi_sector
, bio_end_sector(bio
)))) {
1333 * As the suspend_* range is controlled by userspace, we want
1334 * an interruptible wait.
1339 prepare_to_wait(&conf
->wait_barrier
,
1340 &w
, TASK_INTERRUPTIBLE
);
1341 if (bio_end_sector(bio
) <= mddev
->suspend_lo
||
1342 bio
->bi_iter
.bi_sector
>= mddev
->suspend_hi
||
1343 (mddev_is_clustered(mddev
) &&
1344 !md_cluster_ops
->area_resyncing(mddev
, WRITE
,
1345 bio
->bi_iter
.bi_sector
,
1346 bio_end_sector(bio
))))
1349 sigprocmask(SIG_BLOCK
, &full
, &old
);
1351 sigprocmask(SIG_SETMASK
, &old
, NULL
);
1353 finish_wait(&conf
->wait_barrier
, &w
);
1355 wait_barrier(conf
, bio
->bi_iter
.bi_sector
);
1357 r1_bio
= alloc_r1bio(mddev
, bio
);
1358 r1_bio
->sectors
= max_write_sectors
;
1360 if (conf
->pending_count
>= max_queued_requests
) {
1361 md_wakeup_thread(mddev
->thread
);
1362 raid1_log(mddev
, "wait queued");
1363 wait_event(conf
->wait_barrier
,
1364 conf
->pending_count
< max_queued_requests
);
1366 /* first select target devices under rcu_lock and
1367 * inc refcount on their rdev. Record them by setting
1369 * If there are known/acknowledged bad blocks on any device on
1370 * which we have seen a write error, we want to avoid writing those
1372 * This potentially requires several writes to write around
1373 * the bad blocks. Each set of writes gets it's own r1bio
1374 * with a set of bios attached.
1377 disks
= conf
->raid_disks
* 2;
1379 blocked_rdev
= NULL
;
1381 max_sectors
= r1_bio
->sectors
;
1382 for (i
= 0; i
< disks
; i
++) {
1383 struct md_rdev
*rdev
= rcu_dereference(conf
->mirrors
[i
].rdev
);
1384 if (rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
1385 atomic_inc(&rdev
->nr_pending
);
1386 blocked_rdev
= rdev
;
1389 r1_bio
->bios
[i
] = NULL
;
1390 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
)) {
1391 if (i
< conf
->raid_disks
)
1392 set_bit(R1BIO_Degraded
, &r1_bio
->state
);
1396 atomic_inc(&rdev
->nr_pending
);
1397 if (test_bit(WriteErrorSeen
, &rdev
->flags
)) {
1402 is_bad
= is_badblock(rdev
, r1_bio
->sector
, max_sectors
,
1403 &first_bad
, &bad_sectors
);
1405 /* mustn't write here until the bad block is
1407 set_bit(BlockedBadBlocks
, &rdev
->flags
);
1408 blocked_rdev
= rdev
;
1411 if (is_bad
&& first_bad
<= r1_bio
->sector
) {
1412 /* Cannot write here at all */
1413 bad_sectors
-= (r1_bio
->sector
- first_bad
);
1414 if (bad_sectors
< max_sectors
)
1415 /* mustn't write more than bad_sectors
1416 * to other devices yet
1418 max_sectors
= bad_sectors
;
1419 rdev_dec_pending(rdev
, mddev
);
1420 /* We don't set R1BIO_Degraded as that
1421 * only applies if the disk is
1422 * missing, so it might be re-added,
1423 * and we want to know to recover this
1425 * In this case the device is here,
1426 * and the fact that this chunk is not
1427 * in-sync is recorded in the bad
1433 int good_sectors
= first_bad
- r1_bio
->sector
;
1434 if (good_sectors
< max_sectors
)
1435 max_sectors
= good_sectors
;
1438 r1_bio
->bios
[i
] = bio
;
1442 if (unlikely(blocked_rdev
)) {
1443 /* Wait for this device to become unblocked */
1446 for (j
= 0; j
< i
; j
++)
1447 if (r1_bio
->bios
[j
])
1448 rdev_dec_pending(conf
->mirrors
[j
].rdev
, mddev
);
1450 allow_barrier(conf
, bio
->bi_iter
.bi_sector
);
1451 raid1_log(mddev
, "wait rdev %d blocked", blocked_rdev
->raid_disk
);
1452 md_wait_for_blocked_rdev(blocked_rdev
, mddev
);
1453 wait_barrier(conf
, bio
->bi_iter
.bi_sector
);
1457 if (max_sectors
< bio_sectors(bio
)) {
1458 struct bio
*split
= bio_split(bio
, max_sectors
,
1459 GFP_NOIO
, conf
->bio_split
);
1460 bio_chain(split
, bio
);
1461 generic_make_request(bio
);
1463 r1_bio
->master_bio
= bio
;
1464 r1_bio
->sectors
= max_sectors
;
1467 atomic_set(&r1_bio
->remaining
, 1);
1468 atomic_set(&r1_bio
->behind_remaining
, 0);
1472 for (i
= 0; i
< disks
; i
++) {
1473 struct bio
*mbio
= NULL
;
1474 if (!r1_bio
->bios
[i
])
1480 * Not if there are too many, or cannot
1481 * allocate memory, or a reader on WriteMostly
1482 * is waiting for behind writes to flush */
1484 (atomic_read(&bitmap
->behind_writes
)
1485 < mddev
->bitmap_info
.max_write_behind
) &&
1486 !waitqueue_active(&bitmap
->behind_wait
)) {
1487 mbio
= alloc_behind_master_bio(r1_bio
, bio
);
1490 bitmap_startwrite(bitmap
, r1_bio
->sector
,
1492 test_bit(R1BIO_BehindIO
,
1498 if (r1_bio
->behind_master_bio
)
1499 mbio
= bio_clone_fast(r1_bio
->behind_master_bio
,
1503 mbio
= bio_clone_fast(bio
, GFP_NOIO
, mddev
->bio_set
);
1506 if (r1_bio
->behind_master_bio
) {
1507 if (test_bit(WriteMostly
, &conf
->mirrors
[i
].rdev
->flags
))
1508 atomic_inc(&r1_bio
->behind_remaining
);
1511 r1_bio
->bios
[i
] = mbio
;
1513 mbio
->bi_iter
.bi_sector
= (r1_bio
->sector
+
1514 conf
->mirrors
[i
].rdev
->data_offset
);
1515 mbio
->bi_bdev
= conf
->mirrors
[i
].rdev
->bdev
;
1516 mbio
->bi_end_io
= raid1_end_write_request
;
1517 mbio
->bi_opf
= bio_op(bio
) | (bio
->bi_opf
& (REQ_SYNC
| REQ_FUA
));
1518 if (test_bit(FailFast
, &conf
->mirrors
[i
].rdev
->flags
) &&
1519 !test_bit(WriteMostly
, &conf
->mirrors
[i
].rdev
->flags
) &&
1520 conf
->raid_disks
- mddev
->degraded
> 1)
1521 mbio
->bi_opf
|= MD_FAILFAST
;
1522 mbio
->bi_private
= r1_bio
;
1524 atomic_inc(&r1_bio
->remaining
);
1527 trace_block_bio_remap(bdev_get_queue(mbio
->bi_bdev
),
1528 mbio
, disk_devt(mddev
->gendisk
),
1530 /* flush_pending_writes() needs access to the rdev so...*/
1531 mbio
->bi_bdev
= (void*)conf
->mirrors
[i
].rdev
;
1533 cb
= blk_check_plugged(raid1_unplug
, mddev
, sizeof(*plug
));
1535 plug
= container_of(cb
, struct raid1_plug_cb
, cb
);
1539 bio_list_add(&plug
->pending
, mbio
);
1540 plug
->pending_cnt
++;
1542 spin_lock_irqsave(&conf
->device_lock
, flags
);
1543 bio_list_add(&conf
->pending_bio_list
, mbio
);
1544 conf
->pending_count
++;
1545 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1546 md_wakeup_thread(mddev
->thread
);
1550 r1_bio_write_done(r1_bio
);
1552 /* In case raid1d snuck in to freeze_array */
1553 wake_up(&conf
->wait_barrier
);
1556 static bool raid1_make_request(struct mddev
*mddev
, struct bio
*bio
)
1560 if (unlikely(bio
->bi_opf
& REQ_PREFLUSH
)) {
1561 md_flush_request(mddev
, bio
);
1566 * There is a limit to the maximum size, but
1567 * the read/write handler might find a lower limit
1568 * due to bad blocks. To avoid multiple splits,
1569 * we pass the maximum number of sectors down
1570 * and let the lower level perform the split.
1572 sectors
= align_to_barrier_unit_end(
1573 bio
->bi_iter
.bi_sector
, bio_sectors(bio
));
1575 if (bio_data_dir(bio
) == READ
)
1576 raid1_read_request(mddev
, bio
, sectors
, NULL
);
1578 if (!md_write_start(mddev
,bio
))
1580 raid1_write_request(mddev
, bio
, sectors
);
1585 static void raid1_status(struct seq_file
*seq
, struct mddev
*mddev
)
1587 struct r1conf
*conf
= mddev
->private;
1590 seq_printf(seq
, " [%d/%d] [", conf
->raid_disks
,
1591 conf
->raid_disks
- mddev
->degraded
);
1593 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1594 struct md_rdev
*rdev
= rcu_dereference(conf
->mirrors
[i
].rdev
);
1595 seq_printf(seq
, "%s",
1596 rdev
&& test_bit(In_sync
, &rdev
->flags
) ? "U" : "_");
1599 seq_printf(seq
, "]");
1602 static void raid1_error(struct mddev
*mddev
, struct md_rdev
*rdev
)
1604 char b
[BDEVNAME_SIZE
];
1605 struct r1conf
*conf
= mddev
->private;
1606 unsigned long flags
;
1609 * If it is not operational, then we have already marked it as dead
1610 * else if it is the last working disks, ignore the error, let the
1611 * next level up know.
1612 * else mark the drive as failed
1614 spin_lock_irqsave(&conf
->device_lock
, flags
);
1615 if (test_bit(In_sync
, &rdev
->flags
)
1616 && (conf
->raid_disks
- mddev
->degraded
) == 1) {
1618 * Don't fail the drive, act as though we were just a
1619 * normal single drive.
1620 * However don't try a recovery from this drive as
1621 * it is very likely to fail.
1623 conf
->recovery_disabled
= mddev
->recovery_disabled
;
1624 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1627 set_bit(Blocked
, &rdev
->flags
);
1628 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1630 set_bit(Faulty
, &rdev
->flags
);
1632 set_bit(Faulty
, &rdev
->flags
);
1633 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1635 * if recovery is running, make sure it aborts.
1637 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1638 set_mask_bits(&mddev
->sb_flags
, 0,
1639 BIT(MD_SB_CHANGE_DEVS
) | BIT(MD_SB_CHANGE_PENDING
));
1640 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1641 "md/raid1:%s: Operation continuing on %d devices.\n",
1642 mdname(mddev
), bdevname(rdev
->bdev
, b
),
1643 mdname(mddev
), conf
->raid_disks
- mddev
->degraded
);
1646 static void print_conf(struct r1conf
*conf
)
1650 pr_debug("RAID1 conf printout:\n");
1652 pr_debug("(!conf)\n");
1655 pr_debug(" --- wd:%d rd:%d\n", conf
->raid_disks
- conf
->mddev
->degraded
,
1659 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1660 char b
[BDEVNAME_SIZE
];
1661 struct md_rdev
*rdev
= rcu_dereference(conf
->mirrors
[i
].rdev
);
1663 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1664 i
, !test_bit(In_sync
, &rdev
->flags
),
1665 !test_bit(Faulty
, &rdev
->flags
),
1666 bdevname(rdev
->bdev
,b
));
1671 static void close_sync(struct r1conf
*conf
)
1673 wait_all_barriers(conf
);
1674 allow_all_barriers(conf
);
1676 mempool_destroy(conf
->r1buf_pool
);
1677 conf
->r1buf_pool
= NULL
;
1680 static int raid1_spare_active(struct mddev
*mddev
)
1683 struct r1conf
*conf
= mddev
->private;
1685 unsigned long flags
;
1688 * Find all failed disks within the RAID1 configuration
1689 * and mark them readable.
1690 * Called under mddev lock, so rcu protection not needed.
1691 * device_lock used to avoid races with raid1_end_read_request
1692 * which expects 'In_sync' flags and ->degraded to be consistent.
1694 spin_lock_irqsave(&conf
->device_lock
, flags
);
1695 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1696 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
1697 struct md_rdev
*repl
= conf
->mirrors
[conf
->raid_disks
+ i
].rdev
;
1699 && !test_bit(Candidate
, &repl
->flags
)
1700 && repl
->recovery_offset
== MaxSector
1701 && !test_bit(Faulty
, &repl
->flags
)
1702 && !test_and_set_bit(In_sync
, &repl
->flags
)) {
1703 /* replacement has just become active */
1705 !test_and_clear_bit(In_sync
, &rdev
->flags
))
1708 /* Replaced device not technically
1709 * faulty, but we need to be sure
1710 * it gets removed and never re-added
1712 set_bit(Faulty
, &rdev
->flags
);
1713 sysfs_notify_dirent_safe(
1718 && rdev
->recovery_offset
== MaxSector
1719 && !test_bit(Faulty
, &rdev
->flags
)
1720 && !test_and_set_bit(In_sync
, &rdev
->flags
)) {
1722 sysfs_notify_dirent_safe(rdev
->sysfs_state
);
1725 mddev
->degraded
-= count
;
1726 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1732 static int raid1_add_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
1734 struct r1conf
*conf
= mddev
->private;
1737 struct raid1_info
*p
;
1739 int last
= conf
->raid_disks
- 1;
1741 if (mddev
->recovery_disabled
== conf
->recovery_disabled
)
1744 if (md_integrity_add_rdev(rdev
, mddev
))
1747 if (rdev
->raid_disk
>= 0)
1748 first
= last
= rdev
->raid_disk
;
1751 * find the disk ... but prefer rdev->saved_raid_disk
1754 if (rdev
->saved_raid_disk
>= 0 &&
1755 rdev
->saved_raid_disk
>= first
&&
1756 conf
->mirrors
[rdev
->saved_raid_disk
].rdev
== NULL
)
1757 first
= last
= rdev
->saved_raid_disk
;
1759 for (mirror
= first
; mirror
<= last
; mirror
++) {
1760 p
= conf
->mirrors
+mirror
;
1764 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
1765 rdev
->data_offset
<< 9);
1767 p
->head_position
= 0;
1768 rdev
->raid_disk
= mirror
;
1770 /* As all devices are equivalent, we don't need a full recovery
1771 * if this was recently any drive of the array
1773 if (rdev
->saved_raid_disk
< 0)
1775 rcu_assign_pointer(p
->rdev
, rdev
);
1778 if (test_bit(WantReplacement
, &p
->rdev
->flags
) &&
1779 p
[conf
->raid_disks
].rdev
== NULL
) {
1780 /* Add this device as a replacement */
1781 clear_bit(In_sync
, &rdev
->flags
);
1782 set_bit(Replacement
, &rdev
->flags
);
1783 rdev
->raid_disk
= mirror
;
1786 rcu_assign_pointer(p
[conf
->raid_disks
].rdev
, rdev
);
1790 if (mddev
->queue
&& blk_queue_discard(bdev_get_queue(rdev
->bdev
)))
1791 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, mddev
->queue
);
1796 static int raid1_remove_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
1798 struct r1conf
*conf
= mddev
->private;
1800 int number
= rdev
->raid_disk
;
1801 struct raid1_info
*p
= conf
->mirrors
+ number
;
1803 if (rdev
!= p
->rdev
)
1804 p
= conf
->mirrors
+ conf
->raid_disks
+ number
;
1807 if (rdev
== p
->rdev
) {
1808 if (test_bit(In_sync
, &rdev
->flags
) ||
1809 atomic_read(&rdev
->nr_pending
)) {
1813 /* Only remove non-faulty devices if recovery
1816 if (!test_bit(Faulty
, &rdev
->flags
) &&
1817 mddev
->recovery_disabled
!= conf
->recovery_disabled
&&
1818 mddev
->degraded
< conf
->raid_disks
) {
1823 if (!test_bit(RemoveSynchronized
, &rdev
->flags
)) {
1825 if (atomic_read(&rdev
->nr_pending
)) {
1826 /* lost the race, try later */
1832 if (conf
->mirrors
[conf
->raid_disks
+ number
].rdev
) {
1833 /* We just removed a device that is being replaced.
1834 * Move down the replacement. We drain all IO before
1835 * doing this to avoid confusion.
1837 struct md_rdev
*repl
=
1838 conf
->mirrors
[conf
->raid_disks
+ number
].rdev
;
1839 freeze_array(conf
, 0);
1840 clear_bit(Replacement
, &repl
->flags
);
1842 conf
->mirrors
[conf
->raid_disks
+ number
].rdev
= NULL
;
1843 unfreeze_array(conf
);
1846 clear_bit(WantReplacement
, &rdev
->flags
);
1847 err
= md_integrity_register(mddev
);
1855 static void end_sync_read(struct bio
*bio
)
1857 struct r1bio
*r1_bio
= get_resync_r1bio(bio
);
1859 update_head_pos(r1_bio
->read_disk
, r1_bio
);
1862 * we have read a block, now it needs to be re-written,
1863 * or re-read if the read failed.
1864 * We don't do much here, just schedule handling by raid1d
1866 if (!bio
->bi_status
)
1867 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
1869 if (atomic_dec_and_test(&r1_bio
->remaining
))
1870 reschedule_retry(r1_bio
);
1873 static void end_sync_write(struct bio
*bio
)
1875 int uptodate
= !bio
->bi_status
;
1876 struct r1bio
*r1_bio
= get_resync_r1bio(bio
);
1877 struct mddev
*mddev
= r1_bio
->mddev
;
1878 struct r1conf
*conf
= mddev
->private;
1881 struct md_rdev
*rdev
= conf
->mirrors
[find_bio_disk(r1_bio
, bio
)].rdev
;
1884 sector_t sync_blocks
= 0;
1885 sector_t s
= r1_bio
->sector
;
1886 long sectors_to_go
= r1_bio
->sectors
;
1887 /* make sure these bits doesn't get cleared. */
1889 bitmap_end_sync(mddev
->bitmap
, s
,
1892 sectors_to_go
-= sync_blocks
;
1893 } while (sectors_to_go
> 0);
1894 set_bit(WriteErrorSeen
, &rdev
->flags
);
1895 if (!test_and_set_bit(WantReplacement
, &rdev
->flags
))
1896 set_bit(MD_RECOVERY_NEEDED
, &
1898 set_bit(R1BIO_WriteError
, &r1_bio
->state
);
1899 } else if (is_badblock(rdev
, r1_bio
->sector
, r1_bio
->sectors
,
1900 &first_bad
, &bad_sectors
) &&
1901 !is_badblock(conf
->mirrors
[r1_bio
->read_disk
].rdev
,
1904 &first_bad
, &bad_sectors
)
1906 set_bit(R1BIO_MadeGood
, &r1_bio
->state
);
1908 if (atomic_dec_and_test(&r1_bio
->remaining
)) {
1909 int s
= r1_bio
->sectors
;
1910 if (test_bit(R1BIO_MadeGood
, &r1_bio
->state
) ||
1911 test_bit(R1BIO_WriteError
, &r1_bio
->state
))
1912 reschedule_retry(r1_bio
);
1915 md_done_sync(mddev
, s
, uptodate
);
1920 static int r1_sync_page_io(struct md_rdev
*rdev
, sector_t sector
,
1921 int sectors
, struct page
*page
, int rw
)
1923 if (sync_page_io(rdev
, sector
, sectors
<< 9, page
, rw
, 0, false))
1927 set_bit(WriteErrorSeen
, &rdev
->flags
);
1928 if (!test_and_set_bit(WantReplacement
,
1930 set_bit(MD_RECOVERY_NEEDED
, &
1931 rdev
->mddev
->recovery
);
1933 /* need to record an error - either for the block or the device */
1934 if (!rdev_set_badblocks(rdev
, sector
, sectors
, 0))
1935 md_error(rdev
->mddev
, rdev
);
1939 static int fix_sync_read_error(struct r1bio
*r1_bio
)
1941 /* Try some synchronous reads of other devices to get
1942 * good data, much like with normal read errors. Only
1943 * read into the pages we already have so we don't
1944 * need to re-issue the read request.
1945 * We don't need to freeze the array, because being in an
1946 * active sync request, there is no normal IO, and
1947 * no overlapping syncs.
1948 * We don't need to check is_badblock() again as we
1949 * made sure that anything with a bad block in range
1950 * will have bi_end_io clear.
1952 struct mddev
*mddev
= r1_bio
->mddev
;
1953 struct r1conf
*conf
= mddev
->private;
1954 struct bio
*bio
= r1_bio
->bios
[r1_bio
->read_disk
];
1955 struct page
**pages
= get_resync_pages(bio
)->pages
;
1956 sector_t sect
= r1_bio
->sector
;
1957 int sectors
= r1_bio
->sectors
;
1959 struct md_rdev
*rdev
;
1961 rdev
= conf
->mirrors
[r1_bio
->read_disk
].rdev
;
1962 if (test_bit(FailFast
, &rdev
->flags
)) {
1963 /* Don't try recovering from here - just fail it
1964 * ... unless it is the last working device of course */
1965 md_error(mddev
, rdev
);
1966 if (test_bit(Faulty
, &rdev
->flags
))
1967 /* Don't try to read from here, but make sure
1968 * put_buf does it's thing
1970 bio
->bi_end_io
= end_sync_write
;
1975 int d
= r1_bio
->read_disk
;
1979 if (s
> (PAGE_SIZE
>>9))
1982 if (r1_bio
->bios
[d
]->bi_end_io
== end_sync_read
) {
1983 /* No rcu protection needed here devices
1984 * can only be removed when no resync is
1985 * active, and resync is currently active
1987 rdev
= conf
->mirrors
[d
].rdev
;
1988 if (sync_page_io(rdev
, sect
, s
<<9,
1990 REQ_OP_READ
, 0, false)) {
1996 if (d
== conf
->raid_disks
* 2)
1998 } while (!success
&& d
!= r1_bio
->read_disk
);
2001 char b
[BDEVNAME_SIZE
];
2003 /* Cannot read from anywhere, this block is lost.
2004 * Record a bad block on each device. If that doesn't
2005 * work just disable and interrupt the recovery.
2006 * Don't fail devices as that won't really help.
2008 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2010 bdevname(bio
->bi_bdev
, b
),
2011 (unsigned long long)r1_bio
->sector
);
2012 for (d
= 0; d
< conf
->raid_disks
* 2; d
++) {
2013 rdev
= conf
->mirrors
[d
].rdev
;
2014 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
2016 if (!rdev_set_badblocks(rdev
, sect
, s
, 0))
2020 conf
->recovery_disabled
=
2021 mddev
->recovery_disabled
;
2022 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
2023 md_done_sync(mddev
, r1_bio
->sectors
, 0);
2035 /* write it back and re-read */
2036 while (d
!= r1_bio
->read_disk
) {
2038 d
= conf
->raid_disks
* 2;
2040 if (r1_bio
->bios
[d
]->bi_end_io
!= end_sync_read
)
2042 rdev
= conf
->mirrors
[d
].rdev
;
2043 if (r1_sync_page_io(rdev
, sect
, s
,
2046 r1_bio
->bios
[d
]->bi_end_io
= NULL
;
2047 rdev_dec_pending(rdev
, mddev
);
2051 while (d
!= r1_bio
->read_disk
) {
2053 d
= conf
->raid_disks
* 2;
2055 if (r1_bio
->bios
[d
]->bi_end_io
!= end_sync_read
)
2057 rdev
= conf
->mirrors
[d
].rdev
;
2058 if (r1_sync_page_io(rdev
, sect
, s
,
2061 atomic_add(s
, &rdev
->corrected_errors
);
2067 set_bit(R1BIO_Uptodate
, &r1_bio
->state
);
2072 static void process_checks(struct r1bio
*r1_bio
)
2074 /* We have read all readable devices. If we haven't
2075 * got the block, then there is no hope left.
2076 * If we have, then we want to do a comparison
2077 * and skip the write if everything is the same.
2078 * If any blocks failed to read, then we need to
2079 * attempt an over-write
2081 struct mddev
*mddev
= r1_bio
->mddev
;
2082 struct r1conf
*conf
= mddev
->private;
2087 /* Fix variable parts of all bios */
2088 vcnt
= (r1_bio
->sectors
+ PAGE_SIZE
/ 512 - 1) >> (PAGE_SHIFT
- 9);
2089 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
2090 blk_status_t status
;
2091 struct bio
*b
= r1_bio
->bios
[i
];
2092 struct resync_pages
*rp
= get_resync_pages(b
);
2093 if (b
->bi_end_io
!= end_sync_read
)
2095 /* fixup the bio for reuse, but preserve errno */
2096 status
= b
->bi_status
;
2098 b
->bi_status
= status
;
2099 b
->bi_iter
.bi_sector
= r1_bio
->sector
+
2100 conf
->mirrors
[i
].rdev
->data_offset
;
2101 b
->bi_bdev
= conf
->mirrors
[i
].rdev
->bdev
;
2102 b
->bi_end_io
= end_sync_read
;
2103 rp
->raid_bio
= r1_bio
;
2106 /* initialize bvec table again */
2107 md_bio_reset_resync_pages(b
, rp
, r1_bio
->sectors
<< 9);
2109 for (primary
= 0; primary
< conf
->raid_disks
* 2; primary
++)
2110 if (r1_bio
->bios
[primary
]->bi_end_io
== end_sync_read
&&
2111 !r1_bio
->bios
[primary
]->bi_status
) {
2112 r1_bio
->bios
[primary
]->bi_end_io
= NULL
;
2113 rdev_dec_pending(conf
->mirrors
[primary
].rdev
, mddev
);
2116 r1_bio
->read_disk
= primary
;
2117 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
2119 struct bio
*pbio
= r1_bio
->bios
[primary
];
2120 struct bio
*sbio
= r1_bio
->bios
[i
];
2121 blk_status_t status
= sbio
->bi_status
;
2122 struct page
**ppages
= get_resync_pages(pbio
)->pages
;
2123 struct page
**spages
= get_resync_pages(sbio
)->pages
;
2125 int page_len
[RESYNC_PAGES
] = { 0 };
2127 if (sbio
->bi_end_io
!= end_sync_read
)
2129 /* Now we can 'fixup' the error value */
2130 sbio
->bi_status
= 0;
2132 bio_for_each_segment_all(bi
, sbio
, j
)
2133 page_len
[j
] = bi
->bv_len
;
2136 for (j
= vcnt
; j
-- ; ) {
2137 if (memcmp(page_address(ppages
[j
]),
2138 page_address(spages
[j
]),
2145 atomic64_add(r1_bio
->sectors
, &mddev
->resync_mismatches
);
2146 if (j
< 0 || (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)
2148 /* No need to write to this device. */
2149 sbio
->bi_end_io
= NULL
;
2150 rdev_dec_pending(conf
->mirrors
[i
].rdev
, mddev
);
2154 bio_copy_data(sbio
, pbio
);
2158 static void sync_request_write(struct mddev
*mddev
, struct r1bio
*r1_bio
)
2160 struct r1conf
*conf
= mddev
->private;
2162 int disks
= conf
->raid_disks
* 2;
2165 if (!test_bit(R1BIO_Uptodate
, &r1_bio
->state
))
2166 /* ouch - failed to read all of that. */
2167 if (!fix_sync_read_error(r1_bio
))
2170 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
2171 process_checks(r1_bio
);
2176 atomic_set(&r1_bio
->remaining
, 1);
2177 for (i
= 0; i
< disks
; i
++) {
2178 wbio
= r1_bio
->bios
[i
];
2179 if (wbio
->bi_end_io
== NULL
||
2180 (wbio
->bi_end_io
== end_sync_read
&&
2181 (i
== r1_bio
->read_disk
||
2182 !test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))))
2184 if (test_bit(Faulty
, &conf
->mirrors
[i
].rdev
->flags
))
2187 bio_set_op_attrs(wbio
, REQ_OP_WRITE
, 0);
2188 if (test_bit(FailFast
, &conf
->mirrors
[i
].rdev
->flags
))
2189 wbio
->bi_opf
|= MD_FAILFAST
;
2191 wbio
->bi_end_io
= end_sync_write
;
2192 atomic_inc(&r1_bio
->remaining
);
2193 md_sync_acct(conf
->mirrors
[i
].rdev
->bdev
, bio_sectors(wbio
));
2195 generic_make_request(wbio
);
2198 if (atomic_dec_and_test(&r1_bio
->remaining
)) {
2199 /* if we're here, all write(s) have completed, so clean up */
2200 int s
= r1_bio
->sectors
;
2201 if (test_bit(R1BIO_MadeGood
, &r1_bio
->state
) ||
2202 test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2203 reschedule_retry(r1_bio
);
2206 md_done_sync(mddev
, s
, 1);
2212 * This is a kernel thread which:
2214 * 1. Retries failed read operations on working mirrors.
2215 * 2. Updates the raid superblock when problems encounter.
2216 * 3. Performs writes following reads for array synchronising.
2219 static void fix_read_error(struct r1conf
*conf
, int read_disk
,
2220 sector_t sect
, int sectors
)
2222 struct mddev
*mddev
= conf
->mddev
;
2228 struct md_rdev
*rdev
;
2230 if (s
> (PAGE_SIZE
>>9))
2238 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
2240 (test_bit(In_sync
, &rdev
->flags
) ||
2241 (!test_bit(Faulty
, &rdev
->flags
) &&
2242 rdev
->recovery_offset
>= sect
+ s
)) &&
2243 is_badblock(rdev
, sect
, s
,
2244 &first_bad
, &bad_sectors
) == 0) {
2245 atomic_inc(&rdev
->nr_pending
);
2247 if (sync_page_io(rdev
, sect
, s
<<9,
2248 conf
->tmppage
, REQ_OP_READ
, 0, false))
2250 rdev_dec_pending(rdev
, mddev
);
2256 if (d
== conf
->raid_disks
* 2)
2258 } while (!success
&& d
!= read_disk
);
2261 /* Cannot read from anywhere - mark it bad */
2262 struct md_rdev
*rdev
= conf
->mirrors
[read_disk
].rdev
;
2263 if (!rdev_set_badblocks(rdev
, sect
, s
, 0))
2264 md_error(mddev
, rdev
);
2267 /* write it back and re-read */
2269 while (d
!= read_disk
) {
2271 d
= conf
->raid_disks
* 2;
2274 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
2276 !test_bit(Faulty
, &rdev
->flags
)) {
2277 atomic_inc(&rdev
->nr_pending
);
2279 r1_sync_page_io(rdev
, sect
, s
,
2280 conf
->tmppage
, WRITE
);
2281 rdev_dec_pending(rdev
, mddev
);
2286 while (d
!= read_disk
) {
2287 char b
[BDEVNAME_SIZE
];
2289 d
= conf
->raid_disks
* 2;
2292 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
2294 !test_bit(Faulty
, &rdev
->flags
)) {
2295 atomic_inc(&rdev
->nr_pending
);
2297 if (r1_sync_page_io(rdev
, sect
, s
,
2298 conf
->tmppage
, READ
)) {
2299 atomic_add(s
, &rdev
->corrected_errors
);
2300 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2302 (unsigned long long)(sect
+
2304 bdevname(rdev
->bdev
, b
));
2306 rdev_dec_pending(rdev
, mddev
);
2315 static int narrow_write_error(struct r1bio
*r1_bio
, int i
)
2317 struct mddev
*mddev
= r1_bio
->mddev
;
2318 struct r1conf
*conf
= mddev
->private;
2319 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
2321 /* bio has the data to be written to device 'i' where
2322 * we just recently had a write error.
2323 * We repeatedly clone the bio and trim down to one block,
2324 * then try the write. Where the write fails we record
2326 * It is conceivable that the bio doesn't exactly align with
2327 * blocks. We must handle this somehow.
2329 * We currently own a reference on the rdev.
2335 int sect_to_write
= r1_bio
->sectors
;
2338 if (rdev
->badblocks
.shift
< 0)
2341 block_sectors
= roundup(1 << rdev
->badblocks
.shift
,
2342 bdev_logical_block_size(rdev
->bdev
) >> 9);
2343 sector
= r1_bio
->sector
;
2344 sectors
= ((sector
+ block_sectors
)
2345 & ~(sector_t
)(block_sectors
- 1))
2348 while (sect_to_write
) {
2350 if (sectors
> sect_to_write
)
2351 sectors
= sect_to_write
;
2352 /* Write at 'sector' for 'sectors'*/
2354 if (test_bit(R1BIO_BehindIO
, &r1_bio
->state
)) {
2355 wbio
= bio_clone_fast(r1_bio
->behind_master_bio
,
2358 /* We really need a _all clone */
2359 wbio
->bi_iter
= (struct bvec_iter
){ 0 };
2361 wbio
= bio_clone_fast(r1_bio
->master_bio
, GFP_NOIO
,
2365 bio_set_op_attrs(wbio
, REQ_OP_WRITE
, 0);
2366 wbio
->bi_iter
.bi_sector
= r1_bio
->sector
;
2367 wbio
->bi_iter
.bi_size
= r1_bio
->sectors
<< 9;
2369 bio_trim(wbio
, sector
- r1_bio
->sector
, sectors
);
2370 wbio
->bi_iter
.bi_sector
+= rdev
->data_offset
;
2371 wbio
->bi_bdev
= rdev
->bdev
;
2373 if (submit_bio_wait(wbio
) < 0)
2375 ok
= rdev_set_badblocks(rdev
, sector
,
2380 sect_to_write
-= sectors
;
2382 sectors
= block_sectors
;
2387 static void handle_sync_write_finished(struct r1conf
*conf
, struct r1bio
*r1_bio
)
2390 int s
= r1_bio
->sectors
;
2391 for (m
= 0; m
< conf
->raid_disks
* 2 ; m
++) {
2392 struct md_rdev
*rdev
= conf
->mirrors
[m
].rdev
;
2393 struct bio
*bio
= r1_bio
->bios
[m
];
2394 if (bio
->bi_end_io
== NULL
)
2396 if (!bio
->bi_status
&&
2397 test_bit(R1BIO_MadeGood
, &r1_bio
->state
)) {
2398 rdev_clear_badblocks(rdev
, r1_bio
->sector
, s
, 0);
2400 if (bio
->bi_status
&&
2401 test_bit(R1BIO_WriteError
, &r1_bio
->state
)) {
2402 if (!rdev_set_badblocks(rdev
, r1_bio
->sector
, s
, 0))
2403 md_error(conf
->mddev
, rdev
);
2407 md_done_sync(conf
->mddev
, s
, 1);
2410 static void handle_write_finished(struct r1conf
*conf
, struct r1bio
*r1_bio
)
2415 for (m
= 0; m
< conf
->raid_disks
* 2 ; m
++)
2416 if (r1_bio
->bios
[m
] == IO_MADE_GOOD
) {
2417 struct md_rdev
*rdev
= conf
->mirrors
[m
].rdev
;
2418 rdev_clear_badblocks(rdev
,
2420 r1_bio
->sectors
, 0);
2421 rdev_dec_pending(rdev
, conf
->mddev
);
2422 } else if (r1_bio
->bios
[m
] != NULL
) {
2423 /* This drive got a write error. We need to
2424 * narrow down and record precise write
2428 if (!narrow_write_error(r1_bio
, m
)) {
2429 md_error(conf
->mddev
,
2430 conf
->mirrors
[m
].rdev
);
2431 /* an I/O failed, we can't clear the bitmap */
2432 set_bit(R1BIO_Degraded
, &r1_bio
->state
);
2434 rdev_dec_pending(conf
->mirrors
[m
].rdev
,
2438 spin_lock_irq(&conf
->device_lock
);
2439 list_add(&r1_bio
->retry_list
, &conf
->bio_end_io_list
);
2440 idx
= sector_to_idx(r1_bio
->sector
);
2441 atomic_inc(&conf
->nr_queued
[idx
]);
2442 spin_unlock_irq(&conf
->device_lock
);
2444 * In case freeze_array() is waiting for condition
2445 * get_unqueued_pending() == extra to be true.
2447 wake_up(&conf
->wait_barrier
);
2448 md_wakeup_thread(conf
->mddev
->thread
);
2450 if (test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2451 close_write(r1_bio
);
2452 raid_end_bio_io(r1_bio
);
2456 static void handle_read_error(struct r1conf
*conf
, struct r1bio
*r1_bio
)
2458 struct mddev
*mddev
= conf
->mddev
;
2460 struct md_rdev
*rdev
;
2462 sector_t bio_sector
;
2464 clear_bit(R1BIO_ReadError
, &r1_bio
->state
);
2465 /* we got a read error. Maybe the drive is bad. Maybe just
2466 * the block and we can fix it.
2467 * We freeze all other IO, and try reading the block from
2468 * other devices. When we find one, we re-write
2469 * and check it that fixes the read error.
2470 * This is all done synchronously while the array is
2474 bio
= r1_bio
->bios
[r1_bio
->read_disk
];
2475 bio_dev
= bio
->bi_bdev
->bd_dev
;
2476 bio_sector
= conf
->mirrors
[r1_bio
->read_disk
].rdev
->data_offset
+ r1_bio
->sector
;
2478 r1_bio
->bios
[r1_bio
->read_disk
] = NULL
;
2480 rdev
= conf
->mirrors
[r1_bio
->read_disk
].rdev
;
2482 && !test_bit(FailFast
, &rdev
->flags
)) {
2483 freeze_array(conf
, 1);
2484 fix_read_error(conf
, r1_bio
->read_disk
,
2485 r1_bio
->sector
, r1_bio
->sectors
);
2486 unfreeze_array(conf
);
2488 r1_bio
->bios
[r1_bio
->read_disk
] = IO_BLOCKED
;
2491 rdev_dec_pending(rdev
, conf
->mddev
);
2492 allow_barrier(conf
, r1_bio
->sector
);
2493 bio
= r1_bio
->master_bio
;
2495 /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2497 raid1_read_request(mddev
, bio
, r1_bio
->sectors
, r1_bio
);
2500 static void raid1d(struct md_thread
*thread
)
2502 struct mddev
*mddev
= thread
->mddev
;
2503 struct r1bio
*r1_bio
;
2504 unsigned long flags
;
2505 struct r1conf
*conf
= mddev
->private;
2506 struct list_head
*head
= &conf
->retry_list
;
2507 struct blk_plug plug
;
2510 md_check_recovery(mddev
);
2512 if (!list_empty_careful(&conf
->bio_end_io_list
) &&
2513 !test_bit(MD_SB_CHANGE_PENDING
, &mddev
->sb_flags
)) {
2515 spin_lock_irqsave(&conf
->device_lock
, flags
);
2516 if (!test_bit(MD_SB_CHANGE_PENDING
, &mddev
->sb_flags
))
2517 list_splice_init(&conf
->bio_end_io_list
, &tmp
);
2518 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
2519 while (!list_empty(&tmp
)) {
2520 r1_bio
= list_first_entry(&tmp
, struct r1bio
,
2522 list_del(&r1_bio
->retry_list
);
2523 idx
= sector_to_idx(r1_bio
->sector
);
2524 atomic_dec(&conf
->nr_queued
[idx
]);
2525 if (mddev
->degraded
)
2526 set_bit(R1BIO_Degraded
, &r1_bio
->state
);
2527 if (test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2528 close_write(r1_bio
);
2529 raid_end_bio_io(r1_bio
);
2533 blk_start_plug(&plug
);
2536 flush_pending_writes(conf
);
2538 spin_lock_irqsave(&conf
->device_lock
, flags
);
2539 if (list_empty(head
)) {
2540 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
2543 r1_bio
= list_entry(head
->prev
, struct r1bio
, retry_list
);
2544 list_del(head
->prev
);
2545 idx
= sector_to_idx(r1_bio
->sector
);
2546 atomic_dec(&conf
->nr_queued
[idx
]);
2547 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
2549 mddev
= r1_bio
->mddev
;
2550 conf
= mddev
->private;
2551 if (test_bit(R1BIO_IsSync
, &r1_bio
->state
)) {
2552 if (test_bit(R1BIO_MadeGood
, &r1_bio
->state
) ||
2553 test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2554 handle_sync_write_finished(conf
, r1_bio
);
2556 sync_request_write(mddev
, r1_bio
);
2557 } else if (test_bit(R1BIO_MadeGood
, &r1_bio
->state
) ||
2558 test_bit(R1BIO_WriteError
, &r1_bio
->state
))
2559 handle_write_finished(conf
, r1_bio
);
2560 else if (test_bit(R1BIO_ReadError
, &r1_bio
->state
))
2561 handle_read_error(conf
, r1_bio
);
2566 if (mddev
->sb_flags
& ~(1<<MD_SB_CHANGE_PENDING
))
2567 md_check_recovery(mddev
);
2569 blk_finish_plug(&plug
);
2572 static int init_resync(struct r1conf
*conf
)
2576 buffs
= RESYNC_WINDOW
/ RESYNC_BLOCK_SIZE
;
2577 BUG_ON(conf
->r1buf_pool
);
2578 conf
->r1buf_pool
= mempool_create(buffs
, r1buf_pool_alloc
, r1buf_pool_free
,
2580 if (!conf
->r1buf_pool
)
2586 * perform a "sync" on one "block"
2588 * We need to make sure that no normal I/O request - particularly write
2589 * requests - conflict with active sync requests.
2591 * This is achieved by tracking pending requests and a 'barrier' concept
2592 * that can be installed to exclude normal IO requests.
2595 static sector_t
raid1_sync_request(struct mddev
*mddev
, sector_t sector_nr
,
2598 struct r1conf
*conf
= mddev
->private;
2599 struct r1bio
*r1_bio
;
2601 sector_t max_sector
, nr_sectors
;
2605 int write_targets
= 0, read_targets
= 0;
2606 sector_t sync_blocks
;
2607 int still_degraded
= 0;
2608 int good_sectors
= RESYNC_SECTORS
;
2609 int min_bad
= 0; /* number of sectors that are bad in all devices */
2610 int idx
= sector_to_idx(sector_nr
);
2613 if (!conf
->r1buf_pool
)
2614 if (init_resync(conf
))
2617 max_sector
= mddev
->dev_sectors
;
2618 if (sector_nr
>= max_sector
) {
2619 /* If we aborted, we need to abort the
2620 * sync on the 'current' bitmap chunk (there will
2621 * only be one in raid1 resync.
2622 * We can find the current addess in mddev->curr_resync
2624 if (mddev
->curr_resync
< max_sector
) /* aborted */
2625 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
2627 else /* completed sync */
2630 bitmap_close_sync(mddev
->bitmap
);
2633 if (mddev_is_clustered(mddev
)) {
2634 conf
->cluster_sync_low
= 0;
2635 conf
->cluster_sync_high
= 0;
2640 if (mddev
->bitmap
== NULL
&&
2641 mddev
->recovery_cp
== MaxSector
&&
2642 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
2643 conf
->fullsync
== 0) {
2645 return max_sector
- sector_nr
;
2647 /* before building a request, check if we can skip these blocks..
2648 * This call the bitmap_start_sync doesn't actually record anything
2650 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
2651 !conf
->fullsync
&& !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
2652 /* We can skip this block, and probably several more */
2658 * If there is non-resync activity waiting for a turn, then let it
2659 * though before starting on this new sync request.
2661 if (atomic_read(&conf
->nr_waiting
[idx
]))
2662 schedule_timeout_uninterruptible(1);
2664 /* we are incrementing sector_nr below. To be safe, we check against
2665 * sector_nr + two times RESYNC_SECTORS
2668 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
,
2669 mddev_is_clustered(mddev
) && (sector_nr
+ 2 * RESYNC_SECTORS
> conf
->cluster_sync_high
));
2670 r1_bio
= mempool_alloc(conf
->r1buf_pool
, GFP_NOIO
);
2672 raise_barrier(conf
, sector_nr
);
2676 * If we get a correctably read error during resync or recovery,
2677 * we might want to read from a different device. So we
2678 * flag all drives that could conceivably be read from for READ,
2679 * and any others (which will be non-In_sync devices) for WRITE.
2680 * If a read fails, we try reading from something else for which READ
2684 r1_bio
->mddev
= mddev
;
2685 r1_bio
->sector
= sector_nr
;
2687 set_bit(R1BIO_IsSync
, &r1_bio
->state
);
2688 /* make sure good_sectors won't go across barrier unit boundary */
2689 good_sectors
= align_to_barrier_unit_end(sector_nr
, good_sectors
);
2691 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
2692 struct md_rdev
*rdev
;
2693 bio
= r1_bio
->bios
[i
];
2695 rdev
= rcu_dereference(conf
->mirrors
[i
].rdev
);
2697 test_bit(Faulty
, &rdev
->flags
)) {
2698 if (i
< conf
->raid_disks
)
2700 } else if (!test_bit(In_sync
, &rdev
->flags
)) {
2701 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
2702 bio
->bi_end_io
= end_sync_write
;
2705 /* may need to read from here */
2706 sector_t first_bad
= MaxSector
;
2709 if (is_badblock(rdev
, sector_nr
, good_sectors
,
2710 &first_bad
, &bad_sectors
)) {
2711 if (first_bad
> sector_nr
)
2712 good_sectors
= first_bad
- sector_nr
;
2714 bad_sectors
-= (sector_nr
- first_bad
);
2716 min_bad
> bad_sectors
)
2717 min_bad
= bad_sectors
;
2720 if (sector_nr
< first_bad
) {
2721 if (test_bit(WriteMostly
, &rdev
->flags
)) {
2728 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
2729 bio
->bi_end_io
= end_sync_read
;
2731 } else if (!test_bit(WriteErrorSeen
, &rdev
->flags
) &&
2732 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) &&
2733 !test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)) {
2735 * The device is suitable for reading (InSync),
2736 * but has bad block(s) here. Let's try to correct them,
2737 * if we are doing resync or repair. Otherwise, leave
2738 * this device alone for this sync request.
2740 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
2741 bio
->bi_end_io
= end_sync_write
;
2745 if (bio
->bi_end_io
) {
2746 atomic_inc(&rdev
->nr_pending
);
2747 bio
->bi_iter
.bi_sector
= sector_nr
+ rdev
->data_offset
;
2748 bio
->bi_bdev
= rdev
->bdev
;
2749 if (test_bit(FailFast
, &rdev
->flags
))
2750 bio
->bi_opf
|= MD_FAILFAST
;
2756 r1_bio
->read_disk
= disk
;
2758 if (read_targets
== 0 && min_bad
> 0) {
2759 /* These sectors are bad on all InSync devices, so we
2760 * need to mark them bad on all write targets
2763 for (i
= 0 ; i
< conf
->raid_disks
* 2 ; i
++)
2764 if (r1_bio
->bios
[i
]->bi_end_io
== end_sync_write
) {
2765 struct md_rdev
*rdev
= conf
->mirrors
[i
].rdev
;
2766 ok
= rdev_set_badblocks(rdev
, sector_nr
,
2770 set_bit(MD_SB_CHANGE_DEVS
, &mddev
->sb_flags
);
2775 /* Cannot record the badblocks, so need to
2777 * If there are multiple read targets, could just
2778 * fail the really bad ones ???
2780 conf
->recovery_disabled
= mddev
->recovery_disabled
;
2781 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
2787 if (min_bad
> 0 && min_bad
< good_sectors
) {
2788 /* only resync enough to reach the next bad->good
2790 good_sectors
= min_bad
;
2793 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) && read_targets
> 0)
2794 /* extra read targets are also write targets */
2795 write_targets
+= read_targets
-1;
2797 if (write_targets
== 0 || read_targets
== 0) {
2798 /* There is nowhere to write, so all non-sync
2799 * drives must be failed - so we are finished
2803 max_sector
= sector_nr
+ min_bad
;
2804 rv
= max_sector
- sector_nr
;
2810 if (max_sector
> mddev
->resync_max
)
2811 max_sector
= mddev
->resync_max
; /* Don't do IO beyond here */
2812 if (max_sector
> sector_nr
+ good_sectors
)
2813 max_sector
= sector_nr
+ good_sectors
;
2818 int len
= PAGE_SIZE
;
2819 if (sector_nr
+ (len
>>9) > max_sector
)
2820 len
= (max_sector
- sector_nr
) << 9;
2823 if (sync_blocks
== 0) {
2824 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
,
2825 &sync_blocks
, still_degraded
) &&
2827 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
2829 if ((len
>> 9) > sync_blocks
)
2830 len
= sync_blocks
<<9;
2833 for (i
= 0 ; i
< conf
->raid_disks
* 2; i
++) {
2834 struct resync_pages
*rp
;
2836 bio
= r1_bio
->bios
[i
];
2837 rp
= get_resync_pages(bio
);
2838 if (bio
->bi_end_io
) {
2839 page
= resync_fetch_page(rp
, page_idx
);
2842 * won't fail because the vec table is big
2843 * enough to hold all these pages
2845 bio_add_page(bio
, page
, len
, 0);
2848 nr_sectors
+= len
>>9;
2849 sector_nr
+= len
>>9;
2850 sync_blocks
-= (len
>>9);
2851 } while (++page_idx
< RESYNC_PAGES
);
2853 r1_bio
->sectors
= nr_sectors
;
2855 if (mddev_is_clustered(mddev
) &&
2856 conf
->cluster_sync_high
< sector_nr
+ nr_sectors
) {
2857 conf
->cluster_sync_low
= mddev
->curr_resync_completed
;
2858 conf
->cluster_sync_high
= conf
->cluster_sync_low
+ CLUSTER_RESYNC_WINDOW_SECTORS
;
2859 /* Send resync message */
2860 md_cluster_ops
->resync_info_update(mddev
,
2861 conf
->cluster_sync_low
,
2862 conf
->cluster_sync_high
);
2865 /* For a user-requested sync, we read all readable devices and do a
2868 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
2869 atomic_set(&r1_bio
->remaining
, read_targets
);
2870 for (i
= 0; i
< conf
->raid_disks
* 2 && read_targets
; i
++) {
2871 bio
= r1_bio
->bios
[i
];
2872 if (bio
->bi_end_io
== end_sync_read
) {
2874 md_sync_acct(bio
->bi_bdev
, nr_sectors
);
2875 if (read_targets
== 1)
2876 bio
->bi_opf
&= ~MD_FAILFAST
;
2877 generic_make_request(bio
);
2881 atomic_set(&r1_bio
->remaining
, 1);
2882 bio
= r1_bio
->bios
[r1_bio
->read_disk
];
2883 md_sync_acct(bio
->bi_bdev
, nr_sectors
);
2884 if (read_targets
== 1)
2885 bio
->bi_opf
&= ~MD_FAILFAST
;
2886 generic_make_request(bio
);
2892 static sector_t
raid1_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
2897 return mddev
->dev_sectors
;
2900 static struct r1conf
*setup_conf(struct mddev
*mddev
)
2902 struct r1conf
*conf
;
2904 struct raid1_info
*disk
;
2905 struct md_rdev
*rdev
;
2908 conf
= kzalloc(sizeof(struct r1conf
), GFP_KERNEL
);
2912 conf
->nr_pending
= kcalloc(BARRIER_BUCKETS_NR
,
2913 sizeof(atomic_t
), GFP_KERNEL
);
2914 if (!conf
->nr_pending
)
2917 conf
->nr_waiting
= kcalloc(BARRIER_BUCKETS_NR
,
2918 sizeof(atomic_t
), GFP_KERNEL
);
2919 if (!conf
->nr_waiting
)
2922 conf
->nr_queued
= kcalloc(BARRIER_BUCKETS_NR
,
2923 sizeof(atomic_t
), GFP_KERNEL
);
2924 if (!conf
->nr_queued
)
2927 conf
->barrier
= kcalloc(BARRIER_BUCKETS_NR
,
2928 sizeof(atomic_t
), GFP_KERNEL
);
2932 conf
->mirrors
= kzalloc(sizeof(struct raid1_info
)
2933 * mddev
->raid_disks
* 2,
2938 conf
->tmppage
= alloc_page(GFP_KERNEL
);
2942 conf
->poolinfo
= kzalloc(sizeof(*conf
->poolinfo
), GFP_KERNEL
);
2943 if (!conf
->poolinfo
)
2945 conf
->poolinfo
->raid_disks
= mddev
->raid_disks
* 2;
2946 conf
->r1bio_pool
= mempool_create(NR_RAID1_BIOS
, r1bio_pool_alloc
,
2949 if (!conf
->r1bio_pool
)
2952 conf
->bio_split
= bioset_create(BIO_POOL_SIZE
, 0, 0);
2953 if (!conf
->bio_split
)
2956 conf
->poolinfo
->mddev
= mddev
;
2959 spin_lock_init(&conf
->device_lock
);
2960 rdev_for_each(rdev
, mddev
) {
2961 int disk_idx
= rdev
->raid_disk
;
2962 if (disk_idx
>= mddev
->raid_disks
2965 if (test_bit(Replacement
, &rdev
->flags
))
2966 disk
= conf
->mirrors
+ mddev
->raid_disks
+ disk_idx
;
2968 disk
= conf
->mirrors
+ disk_idx
;
2973 disk
->head_position
= 0;
2974 disk
->seq_start
= MaxSector
;
2976 conf
->raid_disks
= mddev
->raid_disks
;
2977 conf
->mddev
= mddev
;
2978 INIT_LIST_HEAD(&conf
->retry_list
);
2979 INIT_LIST_HEAD(&conf
->bio_end_io_list
);
2981 spin_lock_init(&conf
->resync_lock
);
2982 init_waitqueue_head(&conf
->wait_barrier
);
2984 bio_list_init(&conf
->pending_bio_list
);
2985 conf
->pending_count
= 0;
2986 conf
->recovery_disabled
= mddev
->recovery_disabled
- 1;
2989 for (i
= 0; i
< conf
->raid_disks
* 2; i
++) {
2991 disk
= conf
->mirrors
+ i
;
2993 if (i
< conf
->raid_disks
&&
2994 disk
[conf
->raid_disks
].rdev
) {
2995 /* This slot has a replacement. */
2997 /* No original, just make the replacement
2998 * a recovering spare
3001 disk
[conf
->raid_disks
].rdev
;
3002 disk
[conf
->raid_disks
].rdev
= NULL
;
3003 } else if (!test_bit(In_sync
, &disk
->rdev
->flags
))
3004 /* Original is not in_sync - bad */
3009 !test_bit(In_sync
, &disk
->rdev
->flags
)) {
3010 disk
->head_position
= 0;
3012 (disk
->rdev
->saved_raid_disk
< 0))
3018 conf
->thread
= md_register_thread(raid1d
, mddev
, "raid1");
3026 mempool_destroy(conf
->r1bio_pool
);
3027 kfree(conf
->mirrors
);
3028 safe_put_page(conf
->tmppage
);
3029 kfree(conf
->poolinfo
);
3030 kfree(conf
->nr_pending
);
3031 kfree(conf
->nr_waiting
);
3032 kfree(conf
->nr_queued
);
3033 kfree(conf
->barrier
);
3034 if (conf
->bio_split
)
3035 bioset_free(conf
->bio_split
);
3038 return ERR_PTR(err
);
3041 static void raid1_free(struct mddev
*mddev
, void *priv
);
3042 static int raid1_run(struct mddev
*mddev
)
3044 struct r1conf
*conf
;
3046 struct md_rdev
*rdev
;
3048 bool discard_supported
= false;
3050 if (mddev
->level
!= 1) {
3051 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3052 mdname(mddev
), mddev
->level
);
3055 if (mddev
->reshape_position
!= MaxSector
) {
3056 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3060 if (mddev_init_writes_pending(mddev
) < 0)
3063 * copy the already verified devices into our private RAID1
3064 * bookkeeping area. [whatever we allocate in run(),
3065 * should be freed in raid1_free()]
3067 if (mddev
->private == NULL
)
3068 conf
= setup_conf(mddev
);
3070 conf
= mddev
->private;
3073 return PTR_ERR(conf
);
3076 blk_queue_max_write_same_sectors(mddev
->queue
, 0);
3077 blk_queue_max_write_zeroes_sectors(mddev
->queue
, 0);
3080 rdev_for_each(rdev
, mddev
) {
3081 if (!mddev
->gendisk
)
3083 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
3084 rdev
->data_offset
<< 9);
3085 if (blk_queue_discard(bdev_get_queue(rdev
->bdev
)))
3086 discard_supported
= true;
3089 mddev
->degraded
= 0;
3090 for (i
=0; i
< conf
->raid_disks
; i
++)
3091 if (conf
->mirrors
[i
].rdev
== NULL
||
3092 !test_bit(In_sync
, &conf
->mirrors
[i
].rdev
->flags
) ||
3093 test_bit(Faulty
, &conf
->mirrors
[i
].rdev
->flags
))
3096 if (conf
->raid_disks
- mddev
->degraded
== 1)
3097 mddev
->recovery_cp
= MaxSector
;
3099 if (mddev
->recovery_cp
!= MaxSector
)
3100 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3102 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3103 mdname(mddev
), mddev
->raid_disks
- mddev
->degraded
,
3107 * Ok, everything is just fine now
3109 mddev
->thread
= conf
->thread
;
3110 conf
->thread
= NULL
;
3111 mddev
->private = conf
;
3112 set_bit(MD_FAILFAST_SUPPORTED
, &mddev
->flags
);
3114 md_set_array_sectors(mddev
, raid1_size(mddev
, 0, 0));
3117 if (discard_supported
)
3118 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
,
3121 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
,
3125 ret
= md_integrity_register(mddev
);
3127 md_unregister_thread(&mddev
->thread
);
3128 raid1_free(mddev
, conf
);
3133 static void raid1_free(struct mddev
*mddev
, void *priv
)
3135 struct r1conf
*conf
= priv
;
3137 mempool_destroy(conf
->r1bio_pool
);
3138 kfree(conf
->mirrors
);
3139 safe_put_page(conf
->tmppage
);
3140 kfree(conf
->poolinfo
);
3141 kfree(conf
->nr_pending
);
3142 kfree(conf
->nr_waiting
);
3143 kfree(conf
->nr_queued
);
3144 kfree(conf
->barrier
);
3145 if (conf
->bio_split
)
3146 bioset_free(conf
->bio_split
);
3150 static int raid1_resize(struct mddev
*mddev
, sector_t sectors
)
3152 /* no resync is happening, and there is enough space
3153 * on all devices, so we can resize.
3154 * We need to make sure resync covers any new space.
3155 * If the array is shrinking we should possibly wait until
3156 * any io in the removed space completes, but it hardly seems
3159 sector_t newsize
= raid1_size(mddev
, sectors
, 0);
3160 if (mddev
->external_size
&&
3161 mddev
->array_sectors
> newsize
)
3163 if (mddev
->bitmap
) {
3164 int ret
= bitmap_resize(mddev
->bitmap
, newsize
, 0, 0);
3168 md_set_array_sectors(mddev
, newsize
);
3169 if (sectors
> mddev
->dev_sectors
&&
3170 mddev
->recovery_cp
> mddev
->dev_sectors
) {
3171 mddev
->recovery_cp
= mddev
->dev_sectors
;
3172 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3174 mddev
->dev_sectors
= sectors
;
3175 mddev
->resync_max_sectors
= sectors
;
3179 static int raid1_reshape(struct mddev
*mddev
)
3182 * 1/ resize the r1bio_pool
3183 * 2/ resize conf->mirrors
3185 * We allocate a new r1bio_pool if we can.
3186 * Then raise a device barrier and wait until all IO stops.
3187 * Then resize conf->mirrors and swap in the new r1bio pool.
3189 * At the same time, we "pack" the devices so that all the missing
3190 * devices have the higher raid_disk numbers.
3192 mempool_t
*newpool
, *oldpool
;
3193 struct pool_info
*newpoolinfo
;
3194 struct raid1_info
*newmirrors
;
3195 struct r1conf
*conf
= mddev
->private;
3196 int cnt
, raid_disks
;
3197 unsigned long flags
;
3200 /* Cannot change chunk_size, layout, or level */
3201 if (mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
||
3202 mddev
->layout
!= mddev
->new_layout
||
3203 mddev
->level
!= mddev
->new_level
) {
3204 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
3205 mddev
->new_layout
= mddev
->layout
;
3206 mddev
->new_level
= mddev
->level
;
3210 if (!mddev_is_clustered(mddev
))
3211 md_allow_write(mddev
);
3213 raid_disks
= mddev
->raid_disks
+ mddev
->delta_disks
;
3215 if (raid_disks
< conf
->raid_disks
) {
3217 for (d
= 0; d
< conf
->raid_disks
; d
++)
3218 if (conf
->mirrors
[d
].rdev
)
3220 if (cnt
> raid_disks
)
3224 newpoolinfo
= kmalloc(sizeof(*newpoolinfo
), GFP_KERNEL
);
3227 newpoolinfo
->mddev
= mddev
;
3228 newpoolinfo
->raid_disks
= raid_disks
* 2;
3230 newpool
= mempool_create(NR_RAID1_BIOS
, r1bio_pool_alloc
,
3231 r1bio_pool_free
, newpoolinfo
);
3236 newmirrors
= kzalloc(sizeof(struct raid1_info
) * raid_disks
* 2,
3240 mempool_destroy(newpool
);
3244 freeze_array(conf
, 0);
3246 /* ok, everything is stopped */
3247 oldpool
= conf
->r1bio_pool
;
3248 conf
->r1bio_pool
= newpool
;
3250 for (d
= d2
= 0; d
< conf
->raid_disks
; d
++) {
3251 struct md_rdev
*rdev
= conf
->mirrors
[d
].rdev
;
3252 if (rdev
&& rdev
->raid_disk
!= d2
) {
3253 sysfs_unlink_rdev(mddev
, rdev
);
3254 rdev
->raid_disk
= d2
;
3255 sysfs_unlink_rdev(mddev
, rdev
);
3256 if (sysfs_link_rdev(mddev
, rdev
))
3257 pr_warn("md/raid1:%s: cannot register rd%d\n",
3258 mdname(mddev
), rdev
->raid_disk
);
3261 newmirrors
[d2
++].rdev
= rdev
;
3263 kfree(conf
->mirrors
);
3264 conf
->mirrors
= newmirrors
;
3265 kfree(conf
->poolinfo
);
3266 conf
->poolinfo
= newpoolinfo
;
3268 spin_lock_irqsave(&conf
->device_lock
, flags
);
3269 mddev
->degraded
+= (raid_disks
- conf
->raid_disks
);
3270 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3271 conf
->raid_disks
= mddev
->raid_disks
= raid_disks
;
3272 mddev
->delta_disks
= 0;
3274 unfreeze_array(conf
);
3276 set_bit(MD_RECOVERY_RECOVER
, &mddev
->recovery
);
3277 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3278 md_wakeup_thread(mddev
->thread
);
3280 mempool_destroy(oldpool
);
3284 static void raid1_quiesce(struct mddev
*mddev
, int state
)
3286 struct r1conf
*conf
= mddev
->private;
3289 case 2: /* wake for suspend */
3290 wake_up(&conf
->wait_barrier
);
3293 freeze_array(conf
, 0);
3296 unfreeze_array(conf
);
3301 static void *raid1_takeover(struct mddev
*mddev
)
3303 /* raid1 can take over:
3304 * raid5 with 2 devices, any layout or chunk size
3306 if (mddev
->level
== 5 && mddev
->raid_disks
== 2) {
3307 struct r1conf
*conf
;
3308 mddev
->new_level
= 1;
3309 mddev
->new_layout
= 0;
3310 mddev
->new_chunk_sectors
= 0;
3311 conf
= setup_conf(mddev
);
3312 if (!IS_ERR(conf
)) {
3313 /* Array must appear to be quiesced */
3314 conf
->array_frozen
= 1;
3315 mddev_clear_unsupported_flags(mddev
,
3316 UNSUPPORTED_MDDEV_FLAGS
);
3320 return ERR_PTR(-EINVAL
);
3323 static struct md_personality raid1_personality
=
3327 .owner
= THIS_MODULE
,
3328 .make_request
= raid1_make_request
,
3331 .status
= raid1_status
,
3332 .error_handler
= raid1_error
,
3333 .hot_add_disk
= raid1_add_disk
,
3334 .hot_remove_disk
= raid1_remove_disk
,
3335 .spare_active
= raid1_spare_active
,
3336 .sync_request
= raid1_sync_request
,
3337 .resize
= raid1_resize
,
3339 .check_reshape
= raid1_reshape
,
3340 .quiesce
= raid1_quiesce
,
3341 .takeover
= raid1_takeover
,
3342 .congested
= raid1_congested
,
3345 static int __init
raid_init(void)
3347 return register_md_personality(&raid1_personality
);
3350 static void raid_exit(void)
3352 unregister_md_personality(&raid1_personality
);
3355 module_init(raid_init
);
3356 module_exit(raid_exit
);
3357 MODULE_LICENSE("GPL");
3358 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3359 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3360 MODULE_ALIAS("md-raid1");
3361 MODULE_ALIAS("md-level-1");
3363 module_param(max_queued_requests
, int, S_IRUGO
|S_IWUSR
);