2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->seq_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/module.h>
51 #include <linux/async.h>
52 #include <linux/seq_file.h>
53 #include <linux/cpu.h>
54 #include <linux/slab.h>
55 #include <linux/ratelimit.h>
65 #define NR_STRIPES 256
66 #define STRIPE_SIZE PAGE_SIZE
67 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
68 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
69 #define IO_THRESHOLD 1
70 #define BYPASS_THRESHOLD 1
71 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
72 #define HASH_MASK (NR_HASH - 1)
74 static inline struct hlist_head
*stripe_hash(struct r5conf
*conf
, sector_t sect
)
76 int hash
= (sect
>> STRIPE_SHIFT
) & HASH_MASK
;
77 return &conf
->stripe_hashtbl
[hash
];
80 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
81 * order without overlap. There may be several bio's per stripe+device, and
82 * a bio could span several devices.
83 * When walking this list for a particular stripe+device, we must never proceed
84 * beyond a bio that extends past this device, as the next bio might no longer
86 * This function is used to determine the 'next' bio in the list, given the sector
87 * of the current stripe+device
89 static inline struct bio
*r5_next_bio(struct bio
*bio
, sector_t sector
)
91 int sectors
= bio
->bi_size
>> 9;
92 if (bio
->bi_sector
+ sectors
< sector
+ STRIPE_SECTORS
)
99 * We maintain a biased count of active stripes in the bottom 16 bits of
100 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
102 static inline int raid5_bi_processed_stripes(struct bio
*bio
)
104 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
105 return (atomic_read(segments
) >> 16) & 0xffff;
108 static inline int raid5_dec_bi_active_stripes(struct bio
*bio
)
110 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
111 return atomic_sub_return(1, segments
) & 0xffff;
114 static inline void raid5_inc_bi_active_stripes(struct bio
*bio
)
116 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
117 atomic_inc(segments
);
120 static inline void raid5_set_bi_processed_stripes(struct bio
*bio
,
123 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
127 old
= atomic_read(segments
);
128 new = (old
& 0xffff) | (cnt
<< 16);
129 } while (atomic_cmpxchg(segments
, old
, new) != old
);
132 static inline void raid5_set_bi_stripes(struct bio
*bio
, unsigned int cnt
)
134 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
135 atomic_set(segments
, cnt
);
138 /* Find first data disk in a raid6 stripe */
139 static inline int raid6_d0(struct stripe_head
*sh
)
142 /* ddf always start from first device */
144 /* md starts just after Q block */
145 if (sh
->qd_idx
== sh
->disks
- 1)
148 return sh
->qd_idx
+ 1;
150 static inline int raid6_next_disk(int disk
, int raid_disks
)
153 return (disk
< raid_disks
) ? disk
: 0;
156 /* When walking through the disks in a raid5, starting at raid6_d0,
157 * We need to map each disk to a 'slot', where the data disks are slot
158 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
159 * is raid_disks-1. This help does that mapping.
161 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
162 int *count
, int syndrome_disks
)
168 if (idx
== sh
->pd_idx
)
169 return syndrome_disks
;
170 if (idx
== sh
->qd_idx
)
171 return syndrome_disks
+ 1;
177 static void return_io(struct bio
*return_bi
)
179 struct bio
*bi
= return_bi
;
182 return_bi
= bi
->bi_next
;
190 static void print_raid5_conf (struct r5conf
*conf
);
192 static int stripe_operations_active(struct stripe_head
*sh
)
194 return sh
->check_state
|| sh
->reconstruct_state
||
195 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
196 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
199 static void do_release_stripe(struct r5conf
*conf
, struct stripe_head
*sh
)
201 BUG_ON(!list_empty(&sh
->lru
));
202 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
203 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
204 if (test_bit(STRIPE_DELAYED
, &sh
->state
) &&
205 !test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
206 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
207 else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
208 sh
->bm_seq
- conf
->seq_write
> 0)
209 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
211 clear_bit(STRIPE_DELAYED
, &sh
->state
);
212 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
213 list_add_tail(&sh
->lru
, &conf
->handle_list
);
215 md_wakeup_thread(conf
->mddev
->thread
);
217 BUG_ON(stripe_operations_active(sh
));
218 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
219 if (atomic_dec_return(&conf
->preread_active_stripes
)
221 md_wakeup_thread(conf
->mddev
->thread
);
222 atomic_dec(&conf
->active_stripes
);
223 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
224 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
225 wake_up(&conf
->wait_for_stripe
);
226 if (conf
->retry_read_aligned
)
227 md_wakeup_thread(conf
->mddev
->thread
);
232 static void __release_stripe(struct r5conf
*conf
, struct stripe_head
*sh
)
234 if (atomic_dec_and_test(&sh
->count
))
235 do_release_stripe(conf
, sh
);
238 static void release_stripe(struct stripe_head
*sh
)
240 struct r5conf
*conf
= sh
->raid_conf
;
243 local_irq_save(flags
);
244 if (atomic_dec_and_lock(&sh
->count
, &conf
->device_lock
)) {
245 do_release_stripe(conf
, sh
);
246 spin_unlock(&conf
->device_lock
);
248 local_irq_restore(flags
);
251 static inline void remove_hash(struct stripe_head
*sh
)
253 pr_debug("remove_hash(), stripe %llu\n",
254 (unsigned long long)sh
->sector
);
256 hlist_del_init(&sh
->hash
);
259 static inline void insert_hash(struct r5conf
*conf
, struct stripe_head
*sh
)
261 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
263 pr_debug("insert_hash(), stripe %llu\n",
264 (unsigned long long)sh
->sector
);
266 hlist_add_head(&sh
->hash
, hp
);
270 /* find an idle stripe, make sure it is unhashed, and return it. */
271 static struct stripe_head
*get_free_stripe(struct r5conf
*conf
)
273 struct stripe_head
*sh
= NULL
;
274 struct list_head
*first
;
276 if (list_empty(&conf
->inactive_list
))
278 first
= conf
->inactive_list
.next
;
279 sh
= list_entry(first
, struct stripe_head
, lru
);
280 list_del_init(first
);
282 atomic_inc(&conf
->active_stripes
);
287 static void shrink_buffers(struct stripe_head
*sh
)
291 int num
= sh
->raid_conf
->pool_size
;
293 for (i
= 0; i
< num
; i
++) {
297 sh
->dev
[i
].page
= NULL
;
302 static int grow_buffers(struct stripe_head
*sh
)
305 int num
= sh
->raid_conf
->pool_size
;
307 for (i
= 0; i
< num
; i
++) {
310 if (!(page
= alloc_page(GFP_KERNEL
))) {
313 sh
->dev
[i
].page
= page
;
318 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
319 static void stripe_set_idx(sector_t stripe
, struct r5conf
*conf
, int previous
,
320 struct stripe_head
*sh
);
322 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
324 struct r5conf
*conf
= sh
->raid_conf
;
327 BUG_ON(atomic_read(&sh
->count
) != 0);
328 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
329 BUG_ON(stripe_operations_active(sh
));
331 pr_debug("init_stripe called, stripe %llu\n",
332 (unsigned long long)sh
->sector
);
336 sh
->generation
= conf
->generation
- previous
;
337 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
339 stripe_set_idx(sector
, conf
, previous
, sh
);
343 for (i
= sh
->disks
; i
--; ) {
344 struct r5dev
*dev
= &sh
->dev
[i
];
346 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
347 test_bit(R5_LOCKED
, &dev
->flags
)) {
348 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
349 (unsigned long long)sh
->sector
, i
, dev
->toread
,
350 dev
->read
, dev
->towrite
, dev
->written
,
351 test_bit(R5_LOCKED
, &dev
->flags
));
355 raid5_build_block(sh
, i
, previous
);
357 insert_hash(conf
, sh
);
360 static struct stripe_head
*__find_stripe(struct r5conf
*conf
, sector_t sector
,
363 struct stripe_head
*sh
;
364 struct hlist_node
*hn
;
366 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
367 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
368 if (sh
->sector
== sector
&& sh
->generation
== generation
)
370 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
375 * Need to check if array has failed when deciding whether to:
377 * - remove non-faulty devices
380 * This determination is simple when no reshape is happening.
381 * However if there is a reshape, we need to carefully check
382 * both the before and after sections.
383 * This is because some failed devices may only affect one
384 * of the two sections, and some non-in_sync devices may
385 * be insync in the section most affected by failed devices.
387 static int calc_degraded(struct r5conf
*conf
)
389 int degraded
, degraded2
;
394 for (i
= 0; i
< conf
->previous_raid_disks
; i
++) {
395 struct md_rdev
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
396 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
398 else if (test_bit(In_sync
, &rdev
->flags
))
401 /* not in-sync or faulty.
402 * If the reshape increases the number of devices,
403 * this is being recovered by the reshape, so
404 * this 'previous' section is not in_sync.
405 * If the number of devices is being reduced however,
406 * the device can only be part of the array if
407 * we are reverting a reshape, so this section will
410 if (conf
->raid_disks
>= conf
->previous_raid_disks
)
414 if (conf
->raid_disks
== conf
->previous_raid_disks
)
418 for (i
= 0; i
< conf
->raid_disks
; i
++) {
419 struct md_rdev
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
420 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
422 else if (test_bit(In_sync
, &rdev
->flags
))
425 /* not in-sync or faulty.
426 * If reshape increases the number of devices, this
427 * section has already been recovered, else it
428 * almost certainly hasn't.
430 if (conf
->raid_disks
<= conf
->previous_raid_disks
)
434 if (degraded2
> degraded
)
439 static int has_failed(struct r5conf
*conf
)
443 if (conf
->mddev
->reshape_position
== MaxSector
)
444 return conf
->mddev
->degraded
> conf
->max_degraded
;
446 degraded
= calc_degraded(conf
);
447 if (degraded
> conf
->max_degraded
)
452 static struct stripe_head
*
453 get_active_stripe(struct r5conf
*conf
, sector_t sector
,
454 int previous
, int noblock
, int noquiesce
)
456 struct stripe_head
*sh
;
458 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
460 spin_lock_irq(&conf
->device_lock
);
463 wait_event_lock_irq(conf
->wait_for_stripe
,
464 conf
->quiesce
== 0 || noquiesce
,
465 conf
->device_lock
, /* nothing */);
466 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
468 if (!conf
->inactive_blocked
)
469 sh
= get_free_stripe(conf
);
470 if (noblock
&& sh
== NULL
)
473 conf
->inactive_blocked
= 1;
474 wait_event_lock_irq(conf
->wait_for_stripe
,
475 !list_empty(&conf
->inactive_list
) &&
476 (atomic_read(&conf
->active_stripes
)
477 < (conf
->max_nr_stripes
*3/4)
478 || !conf
->inactive_blocked
),
481 conf
->inactive_blocked
= 0;
483 init_stripe(sh
, sector
, previous
);
485 if (atomic_read(&sh
->count
)) {
486 BUG_ON(!list_empty(&sh
->lru
)
487 && !test_bit(STRIPE_EXPANDING
, &sh
->state
)
488 && !test_bit(STRIPE_ON_UNPLUG_LIST
, &sh
->state
));
490 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
491 atomic_inc(&conf
->active_stripes
);
492 if (list_empty(&sh
->lru
) &&
493 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
495 list_del_init(&sh
->lru
);
498 } while (sh
== NULL
);
501 atomic_inc(&sh
->count
);
503 spin_unlock_irq(&conf
->device_lock
);
507 /* Determine if 'data_offset' or 'new_data_offset' should be used
508 * in this stripe_head.
510 static int use_new_offset(struct r5conf
*conf
, struct stripe_head
*sh
)
512 sector_t progress
= conf
->reshape_progress
;
513 /* Need a memory barrier to make sure we see the value
514 * of conf->generation, or ->data_offset that was set before
515 * reshape_progress was updated.
518 if (progress
== MaxSector
)
520 if (sh
->generation
== conf
->generation
- 1)
522 /* We are in a reshape, and this is a new-generation stripe,
523 * so use new_data_offset.
529 raid5_end_read_request(struct bio
*bi
, int error
);
531 raid5_end_write_request(struct bio
*bi
, int error
);
533 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
535 struct r5conf
*conf
= sh
->raid_conf
;
536 int i
, disks
= sh
->disks
;
540 for (i
= disks
; i
--; ) {
542 int replace_only
= 0;
543 struct bio
*bi
, *rbi
;
544 struct md_rdev
*rdev
, *rrdev
= NULL
;
545 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
)) {
546 if (test_and_clear_bit(R5_WantFUA
, &sh
->dev
[i
].flags
))
550 } else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
552 else if (test_and_clear_bit(R5_WantReplace
,
553 &sh
->dev
[i
].flags
)) {
558 if (test_and_clear_bit(R5_SyncIO
, &sh
->dev
[i
].flags
))
561 bi
= &sh
->dev
[i
].req
;
562 rbi
= &sh
->dev
[i
].rreq
; /* For writing to replacement */
567 bi
->bi_end_io
= raid5_end_write_request
;
568 rbi
->bi_end_io
= raid5_end_write_request
;
570 bi
->bi_end_io
= raid5_end_read_request
;
573 rrdev
= rcu_dereference(conf
->disks
[i
].replacement
);
574 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
575 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
584 /* We raced and saw duplicates */
587 if (test_bit(R5_ReadRepl
, &sh
->dev
[i
].flags
) && rrdev
)
592 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
595 atomic_inc(&rdev
->nr_pending
);
596 if (rrdev
&& test_bit(Faulty
, &rrdev
->flags
))
599 atomic_inc(&rrdev
->nr_pending
);
602 /* We have already checked bad blocks for reads. Now
603 * need to check for writes. We never accept write errors
604 * on the replacement, so we don't to check rrdev.
606 while ((rw
& WRITE
) && rdev
&&
607 test_bit(WriteErrorSeen
, &rdev
->flags
)) {
610 int bad
= is_badblock(rdev
, sh
->sector
, STRIPE_SECTORS
,
611 &first_bad
, &bad_sectors
);
616 set_bit(BlockedBadBlocks
, &rdev
->flags
);
617 if (!conf
->mddev
->external
&&
618 conf
->mddev
->flags
) {
619 /* It is very unlikely, but we might
620 * still need to write out the
621 * bad block log - better give it
623 md_check_recovery(conf
->mddev
);
626 * Because md_wait_for_blocked_rdev
627 * will dec nr_pending, we must
628 * increment it first.
630 atomic_inc(&rdev
->nr_pending
);
631 md_wait_for_blocked_rdev(rdev
, conf
->mddev
);
633 /* Acknowledged bad block - skip the write */
634 rdev_dec_pending(rdev
, conf
->mddev
);
640 if (s
->syncing
|| s
->expanding
|| s
->expanded
642 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
644 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
646 bi
->bi_bdev
= rdev
->bdev
;
647 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
648 __func__
, (unsigned long long)sh
->sector
,
650 atomic_inc(&sh
->count
);
651 if (use_new_offset(conf
, sh
))
652 bi
->bi_sector
= (sh
->sector
653 + rdev
->new_data_offset
);
655 bi
->bi_sector
= (sh
->sector
656 + rdev
->data_offset
);
657 if (test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
))
658 bi
->bi_rw
|= REQ_FLUSH
;
660 bi
->bi_flags
= 1 << BIO_UPTODATE
;
662 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
663 bi
->bi_io_vec
[0].bv_offset
= 0;
664 bi
->bi_size
= STRIPE_SIZE
;
667 set_bit(R5_DOUBLE_LOCKED
, &sh
->dev
[i
].flags
);
668 generic_make_request(bi
);
671 if (s
->syncing
|| s
->expanding
|| s
->expanded
673 md_sync_acct(rrdev
->bdev
, STRIPE_SECTORS
);
675 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
677 rbi
->bi_bdev
= rrdev
->bdev
;
678 pr_debug("%s: for %llu schedule op %ld on "
679 "replacement disc %d\n",
680 __func__
, (unsigned long long)sh
->sector
,
682 atomic_inc(&sh
->count
);
683 if (use_new_offset(conf
, sh
))
684 rbi
->bi_sector
= (sh
->sector
685 + rrdev
->new_data_offset
);
687 rbi
->bi_sector
= (sh
->sector
688 + rrdev
->data_offset
);
689 rbi
->bi_flags
= 1 << BIO_UPTODATE
;
691 rbi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
692 rbi
->bi_io_vec
[0].bv_offset
= 0;
693 rbi
->bi_size
= STRIPE_SIZE
;
695 generic_make_request(rbi
);
697 if (!rdev
&& !rrdev
) {
699 set_bit(STRIPE_DEGRADED
, &sh
->state
);
700 pr_debug("skip op %ld on disc %d for sector %llu\n",
701 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
702 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
703 set_bit(STRIPE_HANDLE
, &sh
->state
);
708 static struct dma_async_tx_descriptor
*
709 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
710 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
713 struct page
*bio_page
;
716 struct async_submit_ctl submit
;
717 enum async_tx_flags flags
= 0;
719 if (bio
->bi_sector
>= sector
)
720 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
722 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
725 flags
|= ASYNC_TX_FENCE
;
726 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
728 bio_for_each_segment(bvl
, bio
, i
) {
729 int len
= bvl
->bv_len
;
733 if (page_offset
< 0) {
734 b_offset
= -page_offset
;
735 page_offset
+= b_offset
;
739 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
740 clen
= STRIPE_SIZE
- page_offset
;
745 b_offset
+= bvl
->bv_offset
;
746 bio_page
= bvl
->bv_page
;
748 tx
= async_memcpy(page
, bio_page
, page_offset
,
749 b_offset
, clen
, &submit
);
751 tx
= async_memcpy(bio_page
, page
, b_offset
,
752 page_offset
, clen
, &submit
);
754 /* chain the operations */
755 submit
.depend_tx
= tx
;
757 if (clen
< len
) /* hit end of page */
765 static void ops_complete_biofill(void *stripe_head_ref
)
767 struct stripe_head
*sh
= stripe_head_ref
;
768 struct bio
*return_bi
= NULL
;
771 pr_debug("%s: stripe %llu\n", __func__
,
772 (unsigned long long)sh
->sector
);
774 /* clear completed biofills */
775 for (i
= sh
->disks
; i
--; ) {
776 struct r5dev
*dev
= &sh
->dev
[i
];
778 /* acknowledge completion of a biofill operation */
779 /* and check if we need to reply to a read request,
780 * new R5_Wantfill requests are held off until
781 * !STRIPE_BIOFILL_RUN
783 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
784 struct bio
*rbi
, *rbi2
;
789 while (rbi
&& rbi
->bi_sector
<
790 dev
->sector
+ STRIPE_SECTORS
) {
791 rbi2
= r5_next_bio(rbi
, dev
->sector
);
792 if (!raid5_dec_bi_active_stripes(rbi
)) {
793 rbi
->bi_next
= return_bi
;
800 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
802 return_io(return_bi
);
804 set_bit(STRIPE_HANDLE
, &sh
->state
);
808 static void ops_run_biofill(struct stripe_head
*sh
)
810 struct dma_async_tx_descriptor
*tx
= NULL
;
811 struct async_submit_ctl submit
;
814 pr_debug("%s: stripe %llu\n", __func__
,
815 (unsigned long long)sh
->sector
);
817 for (i
= sh
->disks
; i
--; ) {
818 struct r5dev
*dev
= &sh
->dev
[i
];
819 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
821 spin_lock_irq(&sh
->stripe_lock
);
822 dev
->read
= rbi
= dev
->toread
;
824 spin_unlock_irq(&sh
->stripe_lock
);
825 while (rbi
&& rbi
->bi_sector
<
826 dev
->sector
+ STRIPE_SECTORS
) {
827 tx
= async_copy_data(0, rbi
, dev
->page
,
829 rbi
= r5_next_bio(rbi
, dev
->sector
);
834 atomic_inc(&sh
->count
);
835 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
836 async_trigger_callback(&submit
);
839 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
846 tgt
= &sh
->dev
[target
];
847 set_bit(R5_UPTODATE
, &tgt
->flags
);
848 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
849 clear_bit(R5_Wantcompute
, &tgt
->flags
);
852 static void ops_complete_compute(void *stripe_head_ref
)
854 struct stripe_head
*sh
= stripe_head_ref
;
856 pr_debug("%s: stripe %llu\n", __func__
,
857 (unsigned long long)sh
->sector
);
859 /* mark the computed target(s) as uptodate */
860 mark_target_uptodate(sh
, sh
->ops
.target
);
861 mark_target_uptodate(sh
, sh
->ops
.target2
);
863 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
864 if (sh
->check_state
== check_state_compute_run
)
865 sh
->check_state
= check_state_compute_result
;
866 set_bit(STRIPE_HANDLE
, &sh
->state
);
870 /* return a pointer to the address conversion region of the scribble buffer */
871 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
872 struct raid5_percpu
*percpu
)
874 return percpu
->scribble
+ sizeof(struct page
*) * (sh
->disks
+ 2);
877 static struct dma_async_tx_descriptor
*
878 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
880 int disks
= sh
->disks
;
881 struct page
**xor_srcs
= percpu
->scribble
;
882 int target
= sh
->ops
.target
;
883 struct r5dev
*tgt
= &sh
->dev
[target
];
884 struct page
*xor_dest
= tgt
->page
;
886 struct dma_async_tx_descriptor
*tx
;
887 struct async_submit_ctl submit
;
890 pr_debug("%s: stripe %llu block: %d\n",
891 __func__
, (unsigned long long)sh
->sector
, target
);
892 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
894 for (i
= disks
; i
--; )
896 xor_srcs
[count
++] = sh
->dev
[i
].page
;
898 atomic_inc(&sh
->count
);
900 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
901 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
));
902 if (unlikely(count
== 1))
903 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
905 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
910 /* set_syndrome_sources - populate source buffers for gen_syndrome
911 * @srcs - (struct page *) array of size sh->disks
912 * @sh - stripe_head to parse
914 * Populates srcs in proper layout order for the stripe and returns the
915 * 'count' of sources to be used in a call to async_gen_syndrome. The P
916 * destination buffer is recorded in srcs[count] and the Q destination
917 * is recorded in srcs[count+1]].
919 static int set_syndrome_sources(struct page
**srcs
, struct stripe_head
*sh
)
921 int disks
= sh
->disks
;
922 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
923 int d0_idx
= raid6_d0(sh
);
927 for (i
= 0; i
< disks
; i
++)
933 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
935 srcs
[slot
] = sh
->dev
[i
].page
;
936 i
= raid6_next_disk(i
, disks
);
937 } while (i
!= d0_idx
);
939 return syndrome_disks
;
942 static struct dma_async_tx_descriptor
*
943 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
945 int disks
= sh
->disks
;
946 struct page
**blocks
= percpu
->scribble
;
948 int qd_idx
= sh
->qd_idx
;
949 struct dma_async_tx_descriptor
*tx
;
950 struct async_submit_ctl submit
;
956 if (sh
->ops
.target
< 0)
957 target
= sh
->ops
.target2
;
958 else if (sh
->ops
.target2
< 0)
959 target
= sh
->ops
.target
;
961 /* we should only have one valid target */
964 pr_debug("%s: stripe %llu block: %d\n",
965 __func__
, (unsigned long long)sh
->sector
, target
);
967 tgt
= &sh
->dev
[target
];
968 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
971 atomic_inc(&sh
->count
);
973 if (target
== qd_idx
) {
974 count
= set_syndrome_sources(blocks
, sh
);
975 blocks
[count
] = NULL
; /* regenerating p is not necessary */
976 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
977 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
978 ops_complete_compute
, sh
,
979 to_addr_conv(sh
, percpu
));
980 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
982 /* Compute any data- or p-drive using XOR */
984 for (i
= disks
; i
-- ; ) {
985 if (i
== target
|| i
== qd_idx
)
987 blocks
[count
++] = sh
->dev
[i
].page
;
990 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
991 NULL
, ops_complete_compute
, sh
,
992 to_addr_conv(sh
, percpu
));
993 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
999 static struct dma_async_tx_descriptor
*
1000 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1002 int i
, count
, disks
= sh
->disks
;
1003 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
1004 int d0_idx
= raid6_d0(sh
);
1005 int faila
= -1, failb
= -1;
1006 int target
= sh
->ops
.target
;
1007 int target2
= sh
->ops
.target2
;
1008 struct r5dev
*tgt
= &sh
->dev
[target
];
1009 struct r5dev
*tgt2
= &sh
->dev
[target2
];
1010 struct dma_async_tx_descriptor
*tx
;
1011 struct page
**blocks
= percpu
->scribble
;
1012 struct async_submit_ctl submit
;
1014 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1015 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
1016 BUG_ON(target
< 0 || target2
< 0);
1017 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
1018 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
1020 /* we need to open-code set_syndrome_sources to handle the
1021 * slot number conversion for 'faila' and 'failb'
1023 for (i
= 0; i
< disks
; i
++)
1028 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
1030 blocks
[slot
] = sh
->dev
[i
].page
;
1036 i
= raid6_next_disk(i
, disks
);
1037 } while (i
!= d0_idx
);
1039 BUG_ON(faila
== failb
);
1042 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1043 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
1045 atomic_inc(&sh
->count
);
1047 if (failb
== syndrome_disks
+1) {
1048 /* Q disk is one of the missing disks */
1049 if (faila
== syndrome_disks
) {
1050 /* Missing P+Q, just recompute */
1051 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
1052 ops_complete_compute
, sh
,
1053 to_addr_conv(sh
, percpu
));
1054 return async_gen_syndrome(blocks
, 0, syndrome_disks
+2,
1055 STRIPE_SIZE
, &submit
);
1059 int qd_idx
= sh
->qd_idx
;
1061 /* Missing D+Q: recompute D from P, then recompute Q */
1062 if (target
== qd_idx
)
1063 data_target
= target2
;
1065 data_target
= target
;
1068 for (i
= disks
; i
-- ; ) {
1069 if (i
== data_target
|| i
== qd_idx
)
1071 blocks
[count
++] = sh
->dev
[i
].page
;
1073 dest
= sh
->dev
[data_target
].page
;
1074 init_async_submit(&submit
,
1075 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
1077 to_addr_conv(sh
, percpu
));
1078 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
1081 count
= set_syndrome_sources(blocks
, sh
);
1082 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
1083 ops_complete_compute
, sh
,
1084 to_addr_conv(sh
, percpu
));
1085 return async_gen_syndrome(blocks
, 0, count
+2,
1086 STRIPE_SIZE
, &submit
);
1089 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
1090 ops_complete_compute
, sh
,
1091 to_addr_conv(sh
, percpu
));
1092 if (failb
== syndrome_disks
) {
1093 /* We're missing D+P. */
1094 return async_raid6_datap_recov(syndrome_disks
+2,
1098 /* We're missing D+D. */
1099 return async_raid6_2data_recov(syndrome_disks
+2,
1100 STRIPE_SIZE
, faila
, failb
,
1107 static void ops_complete_prexor(void *stripe_head_ref
)
1109 struct stripe_head
*sh
= stripe_head_ref
;
1111 pr_debug("%s: stripe %llu\n", __func__
,
1112 (unsigned long long)sh
->sector
);
1115 static struct dma_async_tx_descriptor
*
1116 ops_run_prexor(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1117 struct dma_async_tx_descriptor
*tx
)
1119 int disks
= sh
->disks
;
1120 struct page
**xor_srcs
= percpu
->scribble
;
1121 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1122 struct async_submit_ctl submit
;
1124 /* existing parity data subtracted */
1125 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1127 pr_debug("%s: stripe %llu\n", __func__
,
1128 (unsigned long long)sh
->sector
);
1130 for (i
= disks
; i
--; ) {
1131 struct r5dev
*dev
= &sh
->dev
[i
];
1132 /* Only process blocks that are known to be uptodate */
1133 if (test_bit(R5_Wantdrain
, &dev
->flags
))
1134 xor_srcs
[count
++] = dev
->page
;
1137 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
1138 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
));
1139 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1144 static struct dma_async_tx_descriptor
*
1145 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
1147 int disks
= sh
->disks
;
1150 pr_debug("%s: stripe %llu\n", __func__
,
1151 (unsigned long long)sh
->sector
);
1153 for (i
= disks
; i
--; ) {
1154 struct r5dev
*dev
= &sh
->dev
[i
];
1157 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
1160 spin_lock_irq(&sh
->stripe_lock
);
1161 chosen
= dev
->towrite
;
1162 dev
->towrite
= NULL
;
1163 BUG_ON(dev
->written
);
1164 wbi
= dev
->written
= chosen
;
1165 spin_unlock_irq(&sh
->stripe_lock
);
1167 while (wbi
&& wbi
->bi_sector
<
1168 dev
->sector
+ STRIPE_SECTORS
) {
1169 if (wbi
->bi_rw
& REQ_FUA
)
1170 set_bit(R5_WantFUA
, &dev
->flags
);
1171 if (wbi
->bi_rw
& REQ_SYNC
)
1172 set_bit(R5_SyncIO
, &dev
->flags
);
1173 tx
= async_copy_data(1, wbi
, dev
->page
,
1175 wbi
= r5_next_bio(wbi
, dev
->sector
);
1183 static void ops_complete_reconstruct(void *stripe_head_ref
)
1185 struct stripe_head
*sh
= stripe_head_ref
;
1186 int disks
= sh
->disks
;
1187 int pd_idx
= sh
->pd_idx
;
1188 int qd_idx
= sh
->qd_idx
;
1190 bool fua
= false, sync
= false;
1192 pr_debug("%s: stripe %llu\n", __func__
,
1193 (unsigned long long)sh
->sector
);
1195 for (i
= disks
; i
--; ) {
1196 fua
|= test_bit(R5_WantFUA
, &sh
->dev
[i
].flags
);
1197 sync
|= test_bit(R5_SyncIO
, &sh
->dev
[i
].flags
);
1200 for (i
= disks
; i
--; ) {
1201 struct r5dev
*dev
= &sh
->dev
[i
];
1203 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
) {
1204 set_bit(R5_UPTODATE
, &dev
->flags
);
1206 set_bit(R5_WantFUA
, &dev
->flags
);
1208 set_bit(R5_SyncIO
, &dev
->flags
);
1212 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
1213 sh
->reconstruct_state
= reconstruct_state_drain_result
;
1214 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
1215 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
1217 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
1218 sh
->reconstruct_state
= reconstruct_state_result
;
1221 set_bit(STRIPE_HANDLE
, &sh
->state
);
1226 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1227 struct dma_async_tx_descriptor
*tx
)
1229 int disks
= sh
->disks
;
1230 struct page
**xor_srcs
= percpu
->scribble
;
1231 struct async_submit_ctl submit
;
1232 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1233 struct page
*xor_dest
;
1235 unsigned long flags
;
1237 pr_debug("%s: stripe %llu\n", __func__
,
1238 (unsigned long long)sh
->sector
);
1240 /* check if prexor is active which means only process blocks
1241 * that are part of a read-modify-write (written)
1243 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1245 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1246 for (i
= disks
; i
--; ) {
1247 struct r5dev
*dev
= &sh
->dev
[i
];
1249 xor_srcs
[count
++] = dev
->page
;
1252 xor_dest
= sh
->dev
[pd_idx
].page
;
1253 for (i
= disks
; i
--; ) {
1254 struct r5dev
*dev
= &sh
->dev
[i
];
1256 xor_srcs
[count
++] = dev
->page
;
1260 /* 1/ if we prexor'd then the dest is reused as a source
1261 * 2/ if we did not prexor then we are redoing the parity
1262 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1263 * for the synchronous xor case
1265 flags
= ASYNC_TX_ACK
|
1266 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1268 atomic_inc(&sh
->count
);
1270 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, sh
,
1271 to_addr_conv(sh
, percpu
));
1272 if (unlikely(count
== 1))
1273 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1275 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1279 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1280 struct dma_async_tx_descriptor
*tx
)
1282 struct async_submit_ctl submit
;
1283 struct page
**blocks
= percpu
->scribble
;
1286 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1288 count
= set_syndrome_sources(blocks
, sh
);
1290 atomic_inc(&sh
->count
);
1292 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_reconstruct
,
1293 sh
, to_addr_conv(sh
, percpu
));
1294 async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1297 static void ops_complete_check(void *stripe_head_ref
)
1299 struct stripe_head
*sh
= stripe_head_ref
;
1301 pr_debug("%s: stripe %llu\n", __func__
,
1302 (unsigned long long)sh
->sector
);
1304 sh
->check_state
= check_state_check_result
;
1305 set_bit(STRIPE_HANDLE
, &sh
->state
);
1309 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1311 int disks
= sh
->disks
;
1312 int pd_idx
= sh
->pd_idx
;
1313 int qd_idx
= sh
->qd_idx
;
1314 struct page
*xor_dest
;
1315 struct page
**xor_srcs
= percpu
->scribble
;
1316 struct dma_async_tx_descriptor
*tx
;
1317 struct async_submit_ctl submit
;
1321 pr_debug("%s: stripe %llu\n", __func__
,
1322 (unsigned long long)sh
->sector
);
1325 xor_dest
= sh
->dev
[pd_idx
].page
;
1326 xor_srcs
[count
++] = xor_dest
;
1327 for (i
= disks
; i
--; ) {
1328 if (i
== pd_idx
|| i
== qd_idx
)
1330 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1333 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1334 to_addr_conv(sh
, percpu
));
1335 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1336 &sh
->ops
.zero_sum_result
, &submit
);
1338 atomic_inc(&sh
->count
);
1339 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1340 tx
= async_trigger_callback(&submit
);
1343 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1345 struct page
**srcs
= percpu
->scribble
;
1346 struct async_submit_ctl submit
;
1349 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1350 (unsigned long long)sh
->sector
, checkp
);
1352 count
= set_syndrome_sources(srcs
, sh
);
1356 atomic_inc(&sh
->count
);
1357 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1358 sh
, to_addr_conv(sh
, percpu
));
1359 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1360 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1363 static void __raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1365 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1366 struct dma_async_tx_descriptor
*tx
= NULL
;
1367 struct r5conf
*conf
= sh
->raid_conf
;
1368 int level
= conf
->level
;
1369 struct raid5_percpu
*percpu
;
1373 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1374 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1375 ops_run_biofill(sh
);
1379 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1381 tx
= ops_run_compute5(sh
, percpu
);
1383 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1384 tx
= ops_run_compute6_1(sh
, percpu
);
1386 tx
= ops_run_compute6_2(sh
, percpu
);
1388 /* terminate the chain if reconstruct is not set to be run */
1389 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1393 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
1394 tx
= ops_run_prexor(sh
, percpu
, tx
);
1396 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1397 tx
= ops_run_biodrain(sh
, tx
);
1401 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1403 ops_run_reconstruct5(sh
, percpu
, tx
);
1405 ops_run_reconstruct6(sh
, percpu
, tx
);
1408 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1409 if (sh
->check_state
== check_state_run
)
1410 ops_run_check_p(sh
, percpu
);
1411 else if (sh
->check_state
== check_state_run_q
)
1412 ops_run_check_pq(sh
, percpu
, 0);
1413 else if (sh
->check_state
== check_state_run_pq
)
1414 ops_run_check_pq(sh
, percpu
, 1);
1420 for (i
= disks
; i
--; ) {
1421 struct r5dev
*dev
= &sh
->dev
[i
];
1422 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1423 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1428 #ifdef CONFIG_MULTICORE_RAID456
1429 static void async_run_ops(void *param
, async_cookie_t cookie
)
1431 struct stripe_head
*sh
= param
;
1432 unsigned long ops_request
= sh
->ops
.request
;
1434 clear_bit_unlock(STRIPE_OPS_REQ_PENDING
, &sh
->state
);
1435 wake_up(&sh
->ops
.wait_for_ops
);
1437 __raid_run_ops(sh
, ops_request
);
1441 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1443 /* since handle_stripe can be called outside of raid5d context
1444 * we need to ensure sh->ops.request is de-staged before another
1447 wait_event(sh
->ops
.wait_for_ops
,
1448 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING
, &sh
->state
));
1449 sh
->ops
.request
= ops_request
;
1451 atomic_inc(&sh
->count
);
1452 async_schedule(async_run_ops
, sh
);
1455 #define raid_run_ops __raid_run_ops
1458 static int grow_one_stripe(struct r5conf
*conf
)
1460 struct stripe_head
*sh
;
1461 sh
= kmem_cache_zalloc(conf
->slab_cache
, GFP_KERNEL
);
1465 sh
->raid_conf
= conf
;
1466 #ifdef CONFIG_MULTICORE_RAID456
1467 init_waitqueue_head(&sh
->ops
.wait_for_ops
);
1470 spin_lock_init(&sh
->stripe_lock
);
1472 if (grow_buffers(sh
)) {
1474 kmem_cache_free(conf
->slab_cache
, sh
);
1477 /* we just created an active stripe so... */
1478 atomic_set(&sh
->count
, 1);
1479 atomic_inc(&conf
->active_stripes
);
1480 INIT_LIST_HEAD(&sh
->lru
);
1485 static int grow_stripes(struct r5conf
*conf
, int num
)
1487 struct kmem_cache
*sc
;
1488 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1490 if (conf
->mddev
->gendisk
)
1491 sprintf(conf
->cache_name
[0],
1492 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
1494 sprintf(conf
->cache_name
[0],
1495 "raid%d-%p", conf
->level
, conf
->mddev
);
1496 sprintf(conf
->cache_name
[1], "%s-alt", conf
->cache_name
[0]);
1498 conf
->active_name
= 0;
1499 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
1500 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
1504 conf
->slab_cache
= sc
;
1505 conf
->pool_size
= devs
;
1507 if (!grow_one_stripe(conf
))
1513 * scribble_len - return the required size of the scribble region
1514 * @num - total number of disks in the array
1516 * The size must be enough to contain:
1517 * 1/ a struct page pointer for each device in the array +2
1518 * 2/ room to convert each entry in (1) to its corresponding dma
1519 * (dma_map_page()) or page (page_address()) address.
1521 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1522 * calculate over all devices (not just the data blocks), using zeros in place
1523 * of the P and Q blocks.
1525 static size_t scribble_len(int num
)
1529 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
1534 static int resize_stripes(struct r5conf
*conf
, int newsize
)
1536 /* Make all the stripes able to hold 'newsize' devices.
1537 * New slots in each stripe get 'page' set to a new page.
1539 * This happens in stages:
1540 * 1/ create a new kmem_cache and allocate the required number of
1542 * 2/ gather all the old stripe_heads and tranfer the pages across
1543 * to the new stripe_heads. This will have the side effect of
1544 * freezing the array as once all stripe_heads have been collected,
1545 * no IO will be possible. Old stripe heads are freed once their
1546 * pages have been transferred over, and the old kmem_cache is
1547 * freed when all stripes are done.
1548 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1549 * we simple return a failre status - no need to clean anything up.
1550 * 4/ allocate new pages for the new slots in the new stripe_heads.
1551 * If this fails, we don't bother trying the shrink the
1552 * stripe_heads down again, we just leave them as they are.
1553 * As each stripe_head is processed the new one is released into
1556 * Once step2 is started, we cannot afford to wait for a write,
1557 * so we use GFP_NOIO allocations.
1559 struct stripe_head
*osh
, *nsh
;
1560 LIST_HEAD(newstripes
);
1561 struct disk_info
*ndisks
;
1564 struct kmem_cache
*sc
;
1567 if (newsize
<= conf
->pool_size
)
1568 return 0; /* never bother to shrink */
1570 err
= md_allow_write(conf
->mddev
);
1575 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1576 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1581 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1582 nsh
= kmem_cache_zalloc(sc
, GFP_KERNEL
);
1586 nsh
->raid_conf
= conf
;
1587 #ifdef CONFIG_MULTICORE_RAID456
1588 init_waitqueue_head(&nsh
->ops
.wait_for_ops
);
1591 list_add(&nsh
->lru
, &newstripes
);
1594 /* didn't get enough, give up */
1595 while (!list_empty(&newstripes
)) {
1596 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1597 list_del(&nsh
->lru
);
1598 kmem_cache_free(sc
, nsh
);
1600 kmem_cache_destroy(sc
);
1603 /* Step 2 - Must use GFP_NOIO now.
1604 * OK, we have enough stripes, start collecting inactive
1605 * stripes and copying them over
1607 list_for_each_entry(nsh
, &newstripes
, lru
) {
1608 spin_lock_irq(&conf
->device_lock
);
1609 wait_event_lock_irq(conf
->wait_for_stripe
,
1610 !list_empty(&conf
->inactive_list
),
1613 osh
= get_free_stripe(conf
);
1614 spin_unlock_irq(&conf
->device_lock
);
1615 atomic_set(&nsh
->count
, 1);
1616 for(i
=0; i
<conf
->pool_size
; i
++)
1617 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1618 for( ; i
<newsize
; i
++)
1619 nsh
->dev
[i
].page
= NULL
;
1620 kmem_cache_free(conf
->slab_cache
, osh
);
1622 kmem_cache_destroy(conf
->slab_cache
);
1625 * At this point, we are holding all the stripes so the array
1626 * is completely stalled, so now is a good time to resize
1627 * conf->disks and the scribble region
1629 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1631 for (i
=0; i
<conf
->raid_disks
; i
++)
1632 ndisks
[i
] = conf
->disks
[i
];
1634 conf
->disks
= ndisks
;
1639 conf
->scribble_len
= scribble_len(newsize
);
1640 for_each_present_cpu(cpu
) {
1641 struct raid5_percpu
*percpu
;
1644 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1645 scribble
= kmalloc(conf
->scribble_len
, GFP_NOIO
);
1648 kfree(percpu
->scribble
);
1649 percpu
->scribble
= scribble
;
1657 /* Step 4, return new stripes to service */
1658 while(!list_empty(&newstripes
)) {
1659 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1660 list_del_init(&nsh
->lru
);
1662 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1663 if (nsh
->dev
[i
].page
== NULL
) {
1664 struct page
*p
= alloc_page(GFP_NOIO
);
1665 nsh
->dev
[i
].page
= p
;
1669 release_stripe(nsh
);
1671 /* critical section pass, GFP_NOIO no longer needed */
1673 conf
->slab_cache
= sc
;
1674 conf
->active_name
= 1-conf
->active_name
;
1675 conf
->pool_size
= newsize
;
1679 static int drop_one_stripe(struct r5conf
*conf
)
1681 struct stripe_head
*sh
;
1683 spin_lock_irq(&conf
->device_lock
);
1684 sh
= get_free_stripe(conf
);
1685 spin_unlock_irq(&conf
->device_lock
);
1688 BUG_ON(atomic_read(&sh
->count
));
1690 kmem_cache_free(conf
->slab_cache
, sh
);
1691 atomic_dec(&conf
->active_stripes
);
1695 static void shrink_stripes(struct r5conf
*conf
)
1697 while (drop_one_stripe(conf
))
1700 if (conf
->slab_cache
)
1701 kmem_cache_destroy(conf
->slab_cache
);
1702 conf
->slab_cache
= NULL
;
1705 static void raid5_end_read_request(struct bio
* bi
, int error
)
1707 struct stripe_head
*sh
= bi
->bi_private
;
1708 struct r5conf
*conf
= sh
->raid_conf
;
1709 int disks
= sh
->disks
, i
;
1710 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1711 char b
[BDEVNAME_SIZE
];
1712 struct md_rdev
*rdev
= NULL
;
1715 for (i
=0 ; i
<disks
; i
++)
1716 if (bi
== &sh
->dev
[i
].req
)
1719 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1720 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1726 if (test_bit(R5_ReadRepl
, &sh
->dev
[i
].flags
))
1727 /* If replacement finished while this request was outstanding,
1728 * 'replacement' might be NULL already.
1729 * In that case it moved down to 'rdev'.
1730 * rdev is not removed until all requests are finished.
1732 rdev
= conf
->disks
[i
].replacement
;
1734 rdev
= conf
->disks
[i
].rdev
;
1736 if (use_new_offset(conf
, sh
))
1737 s
= sh
->sector
+ rdev
->new_data_offset
;
1739 s
= sh
->sector
+ rdev
->data_offset
;
1741 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1742 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1743 /* Note that this cannot happen on a
1744 * replacement device. We just fail those on
1749 "md/raid:%s: read error corrected"
1750 " (%lu sectors at %llu on %s)\n",
1751 mdname(conf
->mddev
), STRIPE_SECTORS
,
1752 (unsigned long long)s
,
1753 bdevname(rdev
->bdev
, b
));
1754 atomic_add(STRIPE_SECTORS
, &rdev
->corrected_errors
);
1755 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1756 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1757 } else if (test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
))
1758 clear_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
);
1760 if (atomic_read(&rdev
->read_errors
))
1761 atomic_set(&rdev
->read_errors
, 0);
1763 const char *bdn
= bdevname(rdev
->bdev
, b
);
1767 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1768 atomic_inc(&rdev
->read_errors
);
1769 if (test_bit(R5_ReadRepl
, &sh
->dev
[i
].flags
))
1772 "md/raid:%s: read error on replacement device "
1773 "(sector %llu on %s).\n",
1774 mdname(conf
->mddev
),
1775 (unsigned long long)s
,
1777 else if (conf
->mddev
->degraded
>= conf
->max_degraded
) {
1781 "md/raid:%s: read error not correctable "
1782 "(sector %llu on %s).\n",
1783 mdname(conf
->mddev
),
1784 (unsigned long long)s
,
1786 } else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
)) {
1791 "md/raid:%s: read error NOT corrected!! "
1792 "(sector %llu on %s).\n",
1793 mdname(conf
->mddev
),
1794 (unsigned long long)s
,
1796 } else if (atomic_read(&rdev
->read_errors
)
1797 > conf
->max_nr_stripes
)
1799 "md/raid:%s: Too many read errors, failing device %s.\n",
1800 mdname(conf
->mddev
), bdn
);
1804 if (test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
)) {
1805 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1806 clear_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
);
1808 set_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
);
1810 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1811 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1813 && test_bit(In_sync
, &rdev
->flags
)
1814 && rdev_set_badblocks(
1815 rdev
, sh
->sector
, STRIPE_SECTORS
, 0)))
1816 md_error(conf
->mddev
, rdev
);
1819 rdev_dec_pending(rdev
, conf
->mddev
);
1820 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1821 set_bit(STRIPE_HANDLE
, &sh
->state
);
1825 static void raid5_end_write_request(struct bio
*bi
, int error
)
1827 struct stripe_head
*sh
= bi
->bi_private
;
1828 struct r5conf
*conf
= sh
->raid_conf
;
1829 int disks
= sh
->disks
, i
;
1830 struct md_rdev
*uninitialized_var(rdev
);
1831 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1834 int replacement
= 0;
1836 for (i
= 0 ; i
< disks
; i
++) {
1837 if (bi
== &sh
->dev
[i
].req
) {
1838 rdev
= conf
->disks
[i
].rdev
;
1841 if (bi
== &sh
->dev
[i
].rreq
) {
1842 rdev
= conf
->disks
[i
].replacement
;
1846 /* rdev was removed and 'replacement'
1847 * replaced it. rdev is not removed
1848 * until all requests are finished.
1850 rdev
= conf
->disks
[i
].rdev
;
1854 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1855 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1864 md_error(conf
->mddev
, rdev
);
1865 else if (is_badblock(rdev
, sh
->sector
,
1867 &first_bad
, &bad_sectors
))
1868 set_bit(R5_MadeGoodRepl
, &sh
->dev
[i
].flags
);
1871 set_bit(WriteErrorSeen
, &rdev
->flags
);
1872 set_bit(R5_WriteError
, &sh
->dev
[i
].flags
);
1873 if (!test_and_set_bit(WantReplacement
, &rdev
->flags
))
1874 set_bit(MD_RECOVERY_NEEDED
,
1875 &rdev
->mddev
->recovery
);
1876 } else if (is_badblock(rdev
, sh
->sector
,
1878 &first_bad
, &bad_sectors
))
1879 set_bit(R5_MadeGood
, &sh
->dev
[i
].flags
);
1881 rdev_dec_pending(rdev
, conf
->mddev
);
1883 if (!test_and_clear_bit(R5_DOUBLE_LOCKED
, &sh
->dev
[i
].flags
))
1884 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1885 set_bit(STRIPE_HANDLE
, &sh
->state
);
1889 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
1891 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
1893 struct r5dev
*dev
= &sh
->dev
[i
];
1895 bio_init(&dev
->req
);
1896 dev
->req
.bi_io_vec
= &dev
->vec
;
1898 dev
->req
.bi_max_vecs
++;
1899 dev
->req
.bi_private
= sh
;
1900 dev
->vec
.bv_page
= dev
->page
;
1902 bio_init(&dev
->rreq
);
1903 dev
->rreq
.bi_io_vec
= &dev
->rvec
;
1904 dev
->rreq
.bi_vcnt
++;
1905 dev
->rreq
.bi_max_vecs
++;
1906 dev
->rreq
.bi_private
= sh
;
1907 dev
->rvec
.bv_page
= dev
->page
;
1910 dev
->sector
= compute_blocknr(sh
, i
, previous
);
1913 static void error(struct mddev
*mddev
, struct md_rdev
*rdev
)
1915 char b
[BDEVNAME_SIZE
];
1916 struct r5conf
*conf
= mddev
->private;
1917 unsigned long flags
;
1918 pr_debug("raid456: error called\n");
1920 spin_lock_irqsave(&conf
->device_lock
, flags
);
1921 clear_bit(In_sync
, &rdev
->flags
);
1922 mddev
->degraded
= calc_degraded(conf
);
1923 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1924 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1926 set_bit(Blocked
, &rdev
->flags
);
1927 set_bit(Faulty
, &rdev
->flags
);
1928 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1930 "md/raid:%s: Disk failure on %s, disabling device.\n"
1931 "md/raid:%s: Operation continuing on %d devices.\n",
1933 bdevname(rdev
->bdev
, b
),
1935 conf
->raid_disks
- mddev
->degraded
);
1939 * Input: a 'big' sector number,
1940 * Output: index of the data and parity disk, and the sector # in them.
1942 static sector_t
raid5_compute_sector(struct r5conf
*conf
, sector_t r_sector
,
1943 int previous
, int *dd_idx
,
1944 struct stripe_head
*sh
)
1946 sector_t stripe
, stripe2
;
1947 sector_t chunk_number
;
1948 unsigned int chunk_offset
;
1951 sector_t new_sector
;
1952 int algorithm
= previous
? conf
->prev_algo
1954 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1955 : conf
->chunk_sectors
;
1956 int raid_disks
= previous
? conf
->previous_raid_disks
1958 int data_disks
= raid_disks
- conf
->max_degraded
;
1960 /* First compute the information on this sector */
1963 * Compute the chunk number and the sector offset inside the chunk
1965 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1966 chunk_number
= r_sector
;
1969 * Compute the stripe number
1971 stripe
= chunk_number
;
1972 *dd_idx
= sector_div(stripe
, data_disks
);
1975 * Select the parity disk based on the user selected algorithm.
1977 pd_idx
= qd_idx
= -1;
1978 switch(conf
->level
) {
1980 pd_idx
= data_disks
;
1983 switch (algorithm
) {
1984 case ALGORITHM_LEFT_ASYMMETRIC
:
1985 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1986 if (*dd_idx
>= pd_idx
)
1989 case ALGORITHM_RIGHT_ASYMMETRIC
:
1990 pd_idx
= sector_div(stripe2
, raid_disks
);
1991 if (*dd_idx
>= pd_idx
)
1994 case ALGORITHM_LEFT_SYMMETRIC
:
1995 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
1996 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1998 case ALGORITHM_RIGHT_SYMMETRIC
:
1999 pd_idx
= sector_div(stripe2
, raid_disks
);
2000 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
2002 case ALGORITHM_PARITY_0
:
2006 case ALGORITHM_PARITY_N
:
2007 pd_idx
= data_disks
;
2015 switch (algorithm
) {
2016 case ALGORITHM_LEFT_ASYMMETRIC
:
2017 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2018 qd_idx
= pd_idx
+ 1;
2019 if (pd_idx
== raid_disks
-1) {
2020 (*dd_idx
)++; /* Q D D D P */
2022 } else if (*dd_idx
>= pd_idx
)
2023 (*dd_idx
) += 2; /* D D P Q D */
2025 case ALGORITHM_RIGHT_ASYMMETRIC
:
2026 pd_idx
= sector_div(stripe2
, raid_disks
);
2027 qd_idx
= pd_idx
+ 1;
2028 if (pd_idx
== raid_disks
-1) {
2029 (*dd_idx
)++; /* Q D D D P */
2031 } else if (*dd_idx
>= pd_idx
)
2032 (*dd_idx
) += 2; /* D D P Q D */
2034 case ALGORITHM_LEFT_SYMMETRIC
:
2035 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2036 qd_idx
= (pd_idx
+ 1) % raid_disks
;
2037 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
2039 case ALGORITHM_RIGHT_SYMMETRIC
:
2040 pd_idx
= sector_div(stripe2
, raid_disks
);
2041 qd_idx
= (pd_idx
+ 1) % raid_disks
;
2042 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
2045 case ALGORITHM_PARITY_0
:
2050 case ALGORITHM_PARITY_N
:
2051 pd_idx
= data_disks
;
2052 qd_idx
= data_disks
+ 1;
2055 case ALGORITHM_ROTATING_ZERO_RESTART
:
2056 /* Exactly the same as RIGHT_ASYMMETRIC, but or
2057 * of blocks for computing Q is different.
2059 pd_idx
= sector_div(stripe2
, raid_disks
);
2060 qd_idx
= pd_idx
+ 1;
2061 if (pd_idx
== raid_disks
-1) {
2062 (*dd_idx
)++; /* Q D D D P */
2064 } else if (*dd_idx
>= pd_idx
)
2065 (*dd_idx
) += 2; /* D D P Q D */
2069 case ALGORITHM_ROTATING_N_RESTART
:
2070 /* Same a left_asymmetric, by first stripe is
2071 * D D D P Q rather than
2075 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2076 qd_idx
= pd_idx
+ 1;
2077 if (pd_idx
== raid_disks
-1) {
2078 (*dd_idx
)++; /* Q D D D P */
2080 } else if (*dd_idx
>= pd_idx
)
2081 (*dd_idx
) += 2; /* D D P Q D */
2085 case ALGORITHM_ROTATING_N_CONTINUE
:
2086 /* Same as left_symmetric but Q is before P */
2087 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2088 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
2089 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
2093 case ALGORITHM_LEFT_ASYMMETRIC_6
:
2094 /* RAID5 left_asymmetric, with Q on last device */
2095 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
2096 if (*dd_idx
>= pd_idx
)
2098 qd_idx
= raid_disks
- 1;
2101 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
2102 pd_idx
= sector_div(stripe2
, raid_disks
-1);
2103 if (*dd_idx
>= pd_idx
)
2105 qd_idx
= raid_disks
- 1;
2108 case ALGORITHM_LEFT_SYMMETRIC_6
:
2109 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
2110 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
2111 qd_idx
= raid_disks
- 1;
2114 case ALGORITHM_RIGHT_SYMMETRIC_6
:
2115 pd_idx
= sector_div(stripe2
, raid_disks
-1);
2116 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
2117 qd_idx
= raid_disks
- 1;
2120 case ALGORITHM_PARITY_0_6
:
2123 qd_idx
= raid_disks
- 1;
2133 sh
->pd_idx
= pd_idx
;
2134 sh
->qd_idx
= qd_idx
;
2135 sh
->ddf_layout
= ddf_layout
;
2138 * Finally, compute the new sector number
2140 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
2145 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
2147 struct r5conf
*conf
= sh
->raid_conf
;
2148 int raid_disks
= sh
->disks
;
2149 int data_disks
= raid_disks
- conf
->max_degraded
;
2150 sector_t new_sector
= sh
->sector
, check
;
2151 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
2152 : conf
->chunk_sectors
;
2153 int algorithm
= previous
? conf
->prev_algo
2157 sector_t chunk_number
;
2158 int dummy1
, dd_idx
= i
;
2160 struct stripe_head sh2
;
2163 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
2164 stripe
= new_sector
;
2166 if (i
== sh
->pd_idx
)
2168 switch(conf
->level
) {
2171 switch (algorithm
) {
2172 case ALGORITHM_LEFT_ASYMMETRIC
:
2173 case ALGORITHM_RIGHT_ASYMMETRIC
:
2177 case ALGORITHM_LEFT_SYMMETRIC
:
2178 case ALGORITHM_RIGHT_SYMMETRIC
:
2181 i
-= (sh
->pd_idx
+ 1);
2183 case ALGORITHM_PARITY_0
:
2186 case ALGORITHM_PARITY_N
:
2193 if (i
== sh
->qd_idx
)
2194 return 0; /* It is the Q disk */
2195 switch (algorithm
) {
2196 case ALGORITHM_LEFT_ASYMMETRIC
:
2197 case ALGORITHM_RIGHT_ASYMMETRIC
:
2198 case ALGORITHM_ROTATING_ZERO_RESTART
:
2199 case ALGORITHM_ROTATING_N_RESTART
:
2200 if (sh
->pd_idx
== raid_disks
-1)
2201 i
--; /* Q D D D P */
2202 else if (i
> sh
->pd_idx
)
2203 i
-= 2; /* D D P Q D */
2205 case ALGORITHM_LEFT_SYMMETRIC
:
2206 case ALGORITHM_RIGHT_SYMMETRIC
:
2207 if (sh
->pd_idx
== raid_disks
-1)
2208 i
--; /* Q D D D P */
2213 i
-= (sh
->pd_idx
+ 2);
2216 case ALGORITHM_PARITY_0
:
2219 case ALGORITHM_PARITY_N
:
2221 case ALGORITHM_ROTATING_N_CONTINUE
:
2222 /* Like left_symmetric, but P is before Q */
2223 if (sh
->pd_idx
== 0)
2224 i
--; /* P D D D Q */
2229 i
-= (sh
->pd_idx
+ 1);
2232 case ALGORITHM_LEFT_ASYMMETRIC_6
:
2233 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
2237 case ALGORITHM_LEFT_SYMMETRIC_6
:
2238 case ALGORITHM_RIGHT_SYMMETRIC_6
:
2240 i
+= data_disks
+ 1;
2241 i
-= (sh
->pd_idx
+ 1);
2243 case ALGORITHM_PARITY_0_6
:
2252 chunk_number
= stripe
* data_disks
+ i
;
2253 r_sector
= chunk_number
* sectors_per_chunk
+ chunk_offset
;
2255 check
= raid5_compute_sector(conf
, r_sector
,
2256 previous
, &dummy1
, &sh2
);
2257 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
2258 || sh2
.qd_idx
!= sh
->qd_idx
) {
2259 printk(KERN_ERR
"md/raid:%s: compute_blocknr: map not correct\n",
2260 mdname(conf
->mddev
));
2268 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2269 int rcw
, int expand
)
2271 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
2272 struct r5conf
*conf
= sh
->raid_conf
;
2273 int level
= conf
->level
;
2276 /* if we are not expanding this is a proper write request, and
2277 * there will be bios with new data to be drained into the
2281 sh
->reconstruct_state
= reconstruct_state_drain_run
;
2282 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2284 sh
->reconstruct_state
= reconstruct_state_run
;
2286 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2288 for (i
= disks
; i
--; ) {
2289 struct r5dev
*dev
= &sh
->dev
[i
];
2292 set_bit(R5_LOCKED
, &dev
->flags
);
2293 set_bit(R5_Wantdrain
, &dev
->flags
);
2295 clear_bit(R5_UPTODATE
, &dev
->flags
);
2299 if (s
->locked
+ conf
->max_degraded
== disks
)
2300 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2301 atomic_inc(&conf
->pending_full_writes
);
2304 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
2305 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
2307 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
2308 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
2309 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2310 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2312 for (i
= disks
; i
--; ) {
2313 struct r5dev
*dev
= &sh
->dev
[i
];
2318 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
2319 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2320 set_bit(R5_Wantdrain
, &dev
->flags
);
2321 set_bit(R5_LOCKED
, &dev
->flags
);
2322 clear_bit(R5_UPTODATE
, &dev
->flags
);
2328 /* keep the parity disk(s) locked while asynchronous operations
2331 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2332 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2336 int qd_idx
= sh
->qd_idx
;
2337 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2339 set_bit(R5_LOCKED
, &dev
->flags
);
2340 clear_bit(R5_UPTODATE
, &dev
->flags
);
2344 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2345 __func__
, (unsigned long long)sh
->sector
,
2346 s
->locked
, s
->ops_request
);
2350 * Each stripe/dev can have one or more bion attached.
2351 * toread/towrite point to the first in a chain.
2352 * The bi_next chain must be in order.
2354 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
2357 struct r5conf
*conf
= sh
->raid_conf
;
2360 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2361 (unsigned long long)bi
->bi_sector
,
2362 (unsigned long long)sh
->sector
);
2365 * If several bio share a stripe. The bio bi_phys_segments acts as a
2366 * reference count to avoid race. The reference count should already be
2367 * increased before this function is called (for example, in
2368 * make_request()), so other bio sharing this stripe will not free the
2369 * stripe. If a stripe is owned by one stripe, the stripe lock will
2372 spin_lock_irq(&sh
->stripe_lock
);
2374 bip
= &sh
->dev
[dd_idx
].towrite
;
2378 bip
= &sh
->dev
[dd_idx
].toread
;
2379 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
2380 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
2382 bip
= & (*bip
)->bi_next
;
2384 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
2387 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2391 raid5_inc_bi_active_stripes(bi
);
2394 /* check if page is covered */
2395 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2396 for (bi
=sh
->dev
[dd_idx
].towrite
;
2397 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2398 bi
&& bi
->bi_sector
<= sector
;
2399 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2400 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
2401 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
2403 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
2404 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
2406 spin_unlock_irq(&sh
->stripe_lock
);
2408 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2409 (unsigned long long)(*bip
)->bi_sector
,
2410 (unsigned long long)sh
->sector
, dd_idx
);
2412 if (conf
->mddev
->bitmap
&& firstwrite
) {
2413 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
2415 sh
->bm_seq
= conf
->seq_flush
+1;
2416 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
2421 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
2422 spin_unlock_irq(&sh
->stripe_lock
);
2426 static void end_reshape(struct r5conf
*conf
);
2428 static void stripe_set_idx(sector_t stripe
, struct r5conf
*conf
, int previous
,
2429 struct stripe_head
*sh
)
2431 int sectors_per_chunk
=
2432 previous
? conf
->prev_chunk_sectors
: conf
->chunk_sectors
;
2434 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
2435 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
2437 raid5_compute_sector(conf
,
2438 stripe
* (disks
- conf
->max_degraded
)
2439 *sectors_per_chunk
+ chunk_offset
,
2445 handle_failed_stripe(struct r5conf
*conf
, struct stripe_head
*sh
,
2446 struct stripe_head_state
*s
, int disks
,
2447 struct bio
**return_bi
)
2450 for (i
= disks
; i
--; ) {
2454 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2455 struct md_rdev
*rdev
;
2457 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2458 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2459 atomic_inc(&rdev
->nr_pending
);
2464 if (!rdev_set_badblocks(
2468 md_error(conf
->mddev
, rdev
);
2469 rdev_dec_pending(rdev
, conf
->mddev
);
2472 spin_lock_irq(&sh
->stripe_lock
);
2473 /* fail all writes first */
2474 bi
= sh
->dev
[i
].towrite
;
2475 sh
->dev
[i
].towrite
= NULL
;
2476 spin_unlock_irq(&sh
->stripe_lock
);
2482 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2483 wake_up(&conf
->wait_for_overlap
);
2485 while (bi
&& bi
->bi_sector
<
2486 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2487 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2488 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2489 if (!raid5_dec_bi_active_stripes(bi
)) {
2490 md_write_end(conf
->mddev
);
2491 bi
->bi_next
= *return_bi
;
2497 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2498 STRIPE_SECTORS
, 0, 0);
2500 /* and fail all 'written' */
2501 bi
= sh
->dev
[i
].written
;
2502 sh
->dev
[i
].written
= NULL
;
2503 if (bi
) bitmap_end
= 1;
2504 while (bi
&& bi
->bi_sector
<
2505 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2506 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2507 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2508 if (!raid5_dec_bi_active_stripes(bi
)) {
2509 md_write_end(conf
->mddev
);
2510 bi
->bi_next
= *return_bi
;
2516 /* fail any reads if this device is non-operational and
2517 * the data has not reached the cache yet.
2519 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2520 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2521 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2522 bi
= sh
->dev
[i
].toread
;
2523 sh
->dev
[i
].toread
= NULL
;
2524 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2525 wake_up(&conf
->wait_for_overlap
);
2526 if (bi
) s
->to_read
--;
2527 while (bi
&& bi
->bi_sector
<
2528 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2529 struct bio
*nextbi
=
2530 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2531 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2532 if (!raid5_dec_bi_active_stripes(bi
)) {
2533 bi
->bi_next
= *return_bi
;
2540 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2541 STRIPE_SECTORS
, 0, 0);
2542 /* If we were in the middle of a write the parity block might
2543 * still be locked - so just clear all R5_LOCKED flags
2545 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
2548 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2549 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2550 md_wakeup_thread(conf
->mddev
->thread
);
2554 handle_failed_sync(struct r5conf
*conf
, struct stripe_head
*sh
,
2555 struct stripe_head_state
*s
)
2560 clear_bit(STRIPE_SYNCING
, &sh
->state
);
2563 /* There is nothing more to do for sync/check/repair.
2564 * Don't even need to abort as that is handled elsewhere
2565 * if needed, and not always wanted e.g. if there is a known
2567 * For recover/replace we need to record a bad block on all
2568 * non-sync devices, or abort the recovery
2570 if (test_bit(MD_RECOVERY_RECOVER
, &conf
->mddev
->recovery
)) {
2571 /* During recovery devices cannot be removed, so
2572 * locking and refcounting of rdevs is not needed
2574 for (i
= 0; i
< conf
->raid_disks
; i
++) {
2575 struct md_rdev
*rdev
= conf
->disks
[i
].rdev
;
2577 && !test_bit(Faulty
, &rdev
->flags
)
2578 && !test_bit(In_sync
, &rdev
->flags
)
2579 && !rdev_set_badblocks(rdev
, sh
->sector
,
2582 rdev
= conf
->disks
[i
].replacement
;
2584 && !test_bit(Faulty
, &rdev
->flags
)
2585 && !test_bit(In_sync
, &rdev
->flags
)
2586 && !rdev_set_badblocks(rdev
, sh
->sector
,
2591 conf
->recovery_disabled
=
2592 conf
->mddev
->recovery_disabled
;
2594 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, !abort
);
2597 static int want_replace(struct stripe_head
*sh
, int disk_idx
)
2599 struct md_rdev
*rdev
;
2601 /* Doing recovery so rcu locking not required */
2602 rdev
= sh
->raid_conf
->disks
[disk_idx
].replacement
;
2604 && !test_bit(Faulty
, &rdev
->flags
)
2605 && !test_bit(In_sync
, &rdev
->flags
)
2606 && (rdev
->recovery_offset
<= sh
->sector
2607 || rdev
->mddev
->recovery_cp
<= sh
->sector
))
2613 /* fetch_block - checks the given member device to see if its data needs
2614 * to be read or computed to satisfy a request.
2616 * Returns 1 when no more member devices need to be checked, otherwise returns
2617 * 0 to tell the loop in handle_stripe_fill to continue
2619 static int fetch_block(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2620 int disk_idx
, int disks
)
2622 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2623 struct r5dev
*fdev
[2] = { &sh
->dev
[s
->failed_num
[0]],
2624 &sh
->dev
[s
->failed_num
[1]] };
2626 /* is the data in this block needed, and can we get it? */
2627 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2628 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2630 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2631 s
->syncing
|| s
->expanding
||
2632 (s
->replacing
&& want_replace(sh
, disk_idx
)) ||
2633 (s
->failed
>= 1 && fdev
[0]->toread
) ||
2634 (s
->failed
>= 2 && fdev
[1]->toread
) ||
2635 (sh
->raid_conf
->level
<= 5 && s
->failed
&& fdev
[0]->towrite
&&
2636 !test_bit(R5_OVERWRITE
, &fdev
[0]->flags
)) ||
2637 (sh
->raid_conf
->level
== 6 && s
->failed
&& s
->to_write
))) {
2638 /* we would like to get this block, possibly by computing it,
2639 * otherwise read it if the backing disk is insync
2641 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
2642 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
2643 if ((s
->uptodate
== disks
- 1) &&
2644 (s
->failed
&& (disk_idx
== s
->failed_num
[0] ||
2645 disk_idx
== s
->failed_num
[1]))) {
2646 /* have disk failed, and we're requested to fetch it;
2649 pr_debug("Computing stripe %llu block %d\n",
2650 (unsigned long long)sh
->sector
, disk_idx
);
2651 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2652 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2653 set_bit(R5_Wantcompute
, &dev
->flags
);
2654 sh
->ops
.target
= disk_idx
;
2655 sh
->ops
.target2
= -1; /* no 2nd target */
2657 /* Careful: from this point on 'uptodate' is in the eye
2658 * of raid_run_ops which services 'compute' operations
2659 * before writes. R5_Wantcompute flags a block that will
2660 * be R5_UPTODATE by the time it is needed for a
2661 * subsequent operation.
2665 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
2666 /* Computing 2-failure is *very* expensive; only
2667 * do it if failed >= 2
2670 for (other
= disks
; other
--; ) {
2671 if (other
== disk_idx
)
2673 if (!test_bit(R5_UPTODATE
,
2674 &sh
->dev
[other
].flags
))
2678 pr_debug("Computing stripe %llu blocks %d,%d\n",
2679 (unsigned long long)sh
->sector
,
2681 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2682 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2683 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
2684 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
2685 sh
->ops
.target
= disk_idx
;
2686 sh
->ops
.target2
= other
;
2690 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2691 set_bit(R5_LOCKED
, &dev
->flags
);
2692 set_bit(R5_Wantread
, &dev
->flags
);
2694 pr_debug("Reading block %d (sync=%d)\n",
2695 disk_idx
, s
->syncing
);
2703 * handle_stripe_fill - read or compute data to satisfy pending requests.
2705 static void handle_stripe_fill(struct stripe_head
*sh
,
2706 struct stripe_head_state
*s
,
2711 /* look for blocks to read/compute, skip this if a compute
2712 * is already in flight, or if the stripe contents are in the
2713 * midst of changing due to a write
2715 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2716 !sh
->reconstruct_state
)
2717 for (i
= disks
; i
--; )
2718 if (fetch_block(sh
, s
, i
, disks
))
2720 set_bit(STRIPE_HANDLE
, &sh
->state
);
2724 /* handle_stripe_clean_event
2725 * any written block on an uptodate or failed drive can be returned.
2726 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2727 * never LOCKED, so we don't need to test 'failed' directly.
2729 static void handle_stripe_clean_event(struct r5conf
*conf
,
2730 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2735 for (i
= disks
; i
--; )
2736 if (sh
->dev
[i
].written
) {
2738 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2739 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2740 /* We can return any write requests */
2741 struct bio
*wbi
, *wbi2
;
2742 pr_debug("Return write for disc %d\n", i
);
2744 dev
->written
= NULL
;
2745 while (wbi
&& wbi
->bi_sector
<
2746 dev
->sector
+ STRIPE_SECTORS
) {
2747 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2748 if (!raid5_dec_bi_active_stripes(wbi
)) {
2749 md_write_end(conf
->mddev
);
2750 wbi
->bi_next
= *return_bi
;
2755 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2757 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2762 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2763 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2764 md_wakeup_thread(conf
->mddev
->thread
);
2767 static void handle_stripe_dirtying(struct r5conf
*conf
,
2768 struct stripe_head
*sh
,
2769 struct stripe_head_state
*s
,
2772 int rmw
= 0, rcw
= 0, i
;
2773 if (conf
->max_degraded
== 2) {
2774 /* RAID6 requires 'rcw' in current implementation
2775 * Calculate the real rcw later - for now fake it
2776 * look like rcw is cheaper
2779 } else for (i
= disks
; i
--; ) {
2780 /* would I have to read this buffer for read_modify_write */
2781 struct r5dev
*dev
= &sh
->dev
[i
];
2782 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2783 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2784 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2785 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2786 if (test_bit(R5_Insync
, &dev
->flags
))
2789 rmw
+= 2*disks
; /* cannot read it */
2791 /* Would I have to read this buffer for reconstruct_write */
2792 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2793 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2794 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2795 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2796 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2801 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2802 (unsigned long long)sh
->sector
, rmw
, rcw
);
2803 set_bit(STRIPE_HANDLE
, &sh
->state
);
2804 if (rmw
< rcw
&& rmw
> 0)
2805 /* prefer read-modify-write, but need to get some data */
2806 for (i
= disks
; i
--; ) {
2807 struct r5dev
*dev
= &sh
->dev
[i
];
2808 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2809 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2810 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2811 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2812 test_bit(R5_Insync
, &dev
->flags
)) {
2814 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2815 pr_debug("Read_old block "
2816 "%d for r-m-w\n", i
);
2817 set_bit(R5_LOCKED
, &dev
->flags
);
2818 set_bit(R5_Wantread
, &dev
->flags
);
2821 set_bit(STRIPE_DELAYED
, &sh
->state
);
2822 set_bit(STRIPE_HANDLE
, &sh
->state
);
2826 if (rcw
<= rmw
&& rcw
> 0) {
2827 /* want reconstruct write, but need to get some data */
2829 for (i
= disks
; i
--; ) {
2830 struct r5dev
*dev
= &sh
->dev
[i
];
2831 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2832 i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
&&
2833 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2834 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2835 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2837 if (!test_bit(R5_Insync
, &dev
->flags
))
2838 continue; /* it's a failed drive */
2840 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2841 pr_debug("Read_old block "
2842 "%d for Reconstruct\n", i
);
2843 set_bit(R5_LOCKED
, &dev
->flags
);
2844 set_bit(R5_Wantread
, &dev
->flags
);
2847 set_bit(STRIPE_DELAYED
, &sh
->state
);
2848 set_bit(STRIPE_HANDLE
, &sh
->state
);
2853 /* now if nothing is locked, and if we have enough data,
2854 * we can start a write request
2856 /* since handle_stripe can be called at any time we need to handle the
2857 * case where a compute block operation has been submitted and then a
2858 * subsequent call wants to start a write request. raid_run_ops only
2859 * handles the case where compute block and reconstruct are requested
2860 * simultaneously. If this is not the case then new writes need to be
2861 * held off until the compute completes.
2863 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2864 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2865 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2866 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
2869 static void handle_parity_checks5(struct r5conf
*conf
, struct stripe_head
*sh
,
2870 struct stripe_head_state
*s
, int disks
)
2872 struct r5dev
*dev
= NULL
;
2874 set_bit(STRIPE_HANDLE
, &sh
->state
);
2876 switch (sh
->check_state
) {
2877 case check_state_idle
:
2878 /* start a new check operation if there are no failures */
2879 if (s
->failed
== 0) {
2880 BUG_ON(s
->uptodate
!= disks
);
2881 sh
->check_state
= check_state_run
;
2882 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2883 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2887 dev
= &sh
->dev
[s
->failed_num
[0]];
2889 case check_state_compute_result
:
2890 sh
->check_state
= check_state_idle
;
2892 dev
= &sh
->dev
[sh
->pd_idx
];
2894 /* check that a write has not made the stripe insync */
2895 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2898 /* either failed parity check, or recovery is happening */
2899 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2900 BUG_ON(s
->uptodate
!= disks
);
2902 set_bit(R5_LOCKED
, &dev
->flags
);
2904 set_bit(R5_Wantwrite
, &dev
->flags
);
2906 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2907 set_bit(STRIPE_INSYNC
, &sh
->state
);
2909 case check_state_run
:
2910 break; /* we will be called again upon completion */
2911 case check_state_check_result
:
2912 sh
->check_state
= check_state_idle
;
2914 /* if a failure occurred during the check operation, leave
2915 * STRIPE_INSYNC not set and let the stripe be handled again
2920 /* handle a successful check operation, if parity is correct
2921 * we are done. Otherwise update the mismatch count and repair
2922 * parity if !MD_RECOVERY_CHECK
2924 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
2925 /* parity is correct (on disc,
2926 * not in buffer any more)
2928 set_bit(STRIPE_INSYNC
, &sh
->state
);
2930 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2931 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2932 /* don't try to repair!! */
2933 set_bit(STRIPE_INSYNC
, &sh
->state
);
2935 sh
->check_state
= check_state_compute_run
;
2936 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2937 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2938 set_bit(R5_Wantcompute
,
2939 &sh
->dev
[sh
->pd_idx
].flags
);
2940 sh
->ops
.target
= sh
->pd_idx
;
2941 sh
->ops
.target2
= -1;
2946 case check_state_compute_run
:
2949 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2950 __func__
, sh
->check_state
,
2951 (unsigned long long) sh
->sector
);
2957 static void handle_parity_checks6(struct r5conf
*conf
, struct stripe_head
*sh
,
2958 struct stripe_head_state
*s
,
2961 int pd_idx
= sh
->pd_idx
;
2962 int qd_idx
= sh
->qd_idx
;
2965 set_bit(STRIPE_HANDLE
, &sh
->state
);
2967 BUG_ON(s
->failed
> 2);
2969 /* Want to check and possibly repair P and Q.
2970 * However there could be one 'failed' device, in which
2971 * case we can only check one of them, possibly using the
2972 * other to generate missing data
2975 switch (sh
->check_state
) {
2976 case check_state_idle
:
2977 /* start a new check operation if there are < 2 failures */
2978 if (s
->failed
== s
->q_failed
) {
2979 /* The only possible failed device holds Q, so it
2980 * makes sense to check P (If anything else were failed,
2981 * we would have used P to recreate it).
2983 sh
->check_state
= check_state_run
;
2985 if (!s
->q_failed
&& s
->failed
< 2) {
2986 /* Q is not failed, and we didn't use it to generate
2987 * anything, so it makes sense to check it
2989 if (sh
->check_state
== check_state_run
)
2990 sh
->check_state
= check_state_run_pq
;
2992 sh
->check_state
= check_state_run_q
;
2995 /* discard potentially stale zero_sum_result */
2996 sh
->ops
.zero_sum_result
= 0;
2998 if (sh
->check_state
== check_state_run
) {
2999 /* async_xor_zero_sum destroys the contents of P */
3000 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
3003 if (sh
->check_state
>= check_state_run
&&
3004 sh
->check_state
<= check_state_run_pq
) {
3005 /* async_syndrome_zero_sum preserves P and Q, so
3006 * no need to mark them !uptodate here
3008 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
3012 /* we have 2-disk failure */
3013 BUG_ON(s
->failed
!= 2);
3015 case check_state_compute_result
:
3016 sh
->check_state
= check_state_idle
;
3018 /* check that a write has not made the stripe insync */
3019 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
3022 /* now write out any block on a failed drive,
3023 * or P or Q if they were recomputed
3025 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
3026 if (s
->failed
== 2) {
3027 dev
= &sh
->dev
[s
->failed_num
[1]];
3029 set_bit(R5_LOCKED
, &dev
->flags
);
3030 set_bit(R5_Wantwrite
, &dev
->flags
);
3032 if (s
->failed
>= 1) {
3033 dev
= &sh
->dev
[s
->failed_num
[0]];
3035 set_bit(R5_LOCKED
, &dev
->flags
);
3036 set_bit(R5_Wantwrite
, &dev
->flags
);
3038 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
3039 dev
= &sh
->dev
[pd_idx
];
3041 set_bit(R5_LOCKED
, &dev
->flags
);
3042 set_bit(R5_Wantwrite
, &dev
->flags
);
3044 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
3045 dev
= &sh
->dev
[qd_idx
];
3047 set_bit(R5_LOCKED
, &dev
->flags
);
3048 set_bit(R5_Wantwrite
, &dev
->flags
);
3050 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
3052 set_bit(STRIPE_INSYNC
, &sh
->state
);
3054 case check_state_run
:
3055 case check_state_run_q
:
3056 case check_state_run_pq
:
3057 break; /* we will be called again upon completion */
3058 case check_state_check_result
:
3059 sh
->check_state
= check_state_idle
;
3061 /* handle a successful check operation, if parity is correct
3062 * we are done. Otherwise update the mismatch count and repair
3063 * parity if !MD_RECOVERY_CHECK
3065 if (sh
->ops
.zero_sum_result
== 0) {
3066 /* both parities are correct */
3068 set_bit(STRIPE_INSYNC
, &sh
->state
);
3070 /* in contrast to the raid5 case we can validate
3071 * parity, but still have a failure to write
3074 sh
->check_state
= check_state_compute_result
;
3075 /* Returning at this point means that we may go
3076 * off and bring p and/or q uptodate again so
3077 * we make sure to check zero_sum_result again
3078 * to verify if p or q need writeback
3082 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
3083 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
3084 /* don't try to repair!! */
3085 set_bit(STRIPE_INSYNC
, &sh
->state
);
3087 int *target
= &sh
->ops
.target
;
3089 sh
->ops
.target
= -1;
3090 sh
->ops
.target2
= -1;
3091 sh
->check_state
= check_state_compute_run
;
3092 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
3093 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
3094 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
3095 set_bit(R5_Wantcompute
,
3096 &sh
->dev
[pd_idx
].flags
);
3098 target
= &sh
->ops
.target2
;
3101 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
3102 set_bit(R5_Wantcompute
,
3103 &sh
->dev
[qd_idx
].flags
);
3110 case check_state_compute_run
:
3113 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
3114 __func__
, sh
->check_state
,
3115 (unsigned long long) sh
->sector
);
3120 static void handle_stripe_expansion(struct r5conf
*conf
, struct stripe_head
*sh
)
3124 /* We have read all the blocks in this stripe and now we need to
3125 * copy some of them into a target stripe for expand.
3127 struct dma_async_tx_descriptor
*tx
= NULL
;
3128 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3129 for (i
= 0; i
< sh
->disks
; i
++)
3130 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
3132 struct stripe_head
*sh2
;
3133 struct async_submit_ctl submit
;
3135 sector_t bn
= compute_blocknr(sh
, i
, 1);
3136 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
3138 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
3140 /* so far only the early blocks of this stripe
3141 * have been requested. When later blocks
3142 * get requested, we will try again
3145 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
3146 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
3147 /* must have already done this block */
3148 release_stripe(sh2
);
3152 /* place all the copies on one channel */
3153 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
3154 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
3155 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
3158 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
3159 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
3160 for (j
= 0; j
< conf
->raid_disks
; j
++)
3161 if (j
!= sh2
->pd_idx
&&
3163 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
3165 if (j
== conf
->raid_disks
) {
3166 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
3167 set_bit(STRIPE_HANDLE
, &sh2
->state
);
3169 release_stripe(sh2
);
3172 /* done submitting copies, wait for them to complete */
3175 dma_wait_for_async_tx(tx
);
3180 * handle_stripe - do things to a stripe.
3182 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
3183 * state of various bits to see what needs to be done.
3185 * return some read requests which now have data
3186 * return some write requests which are safely on storage
3187 * schedule a read on some buffers
3188 * schedule a write of some buffers
3189 * return confirmation of parity correctness
3193 static void analyse_stripe(struct stripe_head
*sh
, struct stripe_head_state
*s
)
3195 struct r5conf
*conf
= sh
->raid_conf
;
3196 int disks
= sh
->disks
;
3199 int do_recovery
= 0;
3201 memset(s
, 0, sizeof(*s
));
3203 s
->expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3204 s
->expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3205 s
->failed_num
[0] = -1;
3206 s
->failed_num
[1] = -1;
3208 /* Now to look around and see what can be done */
3210 for (i
=disks
; i
--; ) {
3211 struct md_rdev
*rdev
;
3218 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3220 dev
->toread
, dev
->towrite
, dev
->written
);
3221 /* maybe we can reply to a read
3223 * new wantfill requests are only permitted while
3224 * ops_complete_biofill is guaranteed to be inactive
3226 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3227 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3228 set_bit(R5_Wantfill
, &dev
->flags
);
3230 /* now count some things */
3231 if (test_bit(R5_LOCKED
, &dev
->flags
))
3233 if (test_bit(R5_UPTODATE
, &dev
->flags
))
3235 if (test_bit(R5_Wantcompute
, &dev
->flags
)) {
3237 BUG_ON(s
->compute
> 2);
3240 if (test_bit(R5_Wantfill
, &dev
->flags
))
3242 else if (dev
->toread
)
3246 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3251 /* Prefer to use the replacement for reads, but only
3252 * if it is recovered enough and has no bad blocks.
3254 rdev
= rcu_dereference(conf
->disks
[i
].replacement
);
3255 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) &&
3256 rdev
->recovery_offset
>= sh
->sector
+ STRIPE_SECTORS
&&
3257 !is_badblock(rdev
, sh
->sector
, STRIPE_SECTORS
,
3258 &first_bad
, &bad_sectors
))
3259 set_bit(R5_ReadRepl
, &dev
->flags
);
3262 set_bit(R5_NeedReplace
, &dev
->flags
);
3263 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3264 clear_bit(R5_ReadRepl
, &dev
->flags
);
3266 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
3269 is_bad
= is_badblock(rdev
, sh
->sector
, STRIPE_SECTORS
,
3270 &first_bad
, &bad_sectors
);
3271 if (s
->blocked_rdev
== NULL
3272 && (test_bit(Blocked
, &rdev
->flags
)
3275 set_bit(BlockedBadBlocks
,
3277 s
->blocked_rdev
= rdev
;
3278 atomic_inc(&rdev
->nr_pending
);
3281 clear_bit(R5_Insync
, &dev
->flags
);
3285 /* also not in-sync */
3286 if (!test_bit(WriteErrorSeen
, &rdev
->flags
) &&
3287 test_bit(R5_UPTODATE
, &dev
->flags
)) {
3288 /* treat as in-sync, but with a read error
3289 * which we can now try to correct
3291 set_bit(R5_Insync
, &dev
->flags
);
3292 set_bit(R5_ReadError
, &dev
->flags
);
3294 } else if (test_bit(In_sync
, &rdev
->flags
))
3295 set_bit(R5_Insync
, &dev
->flags
);
3296 else if (sh
->sector
+ STRIPE_SECTORS
<= rdev
->recovery_offset
)
3297 /* in sync if before recovery_offset */
3298 set_bit(R5_Insync
, &dev
->flags
);
3299 else if (test_bit(R5_UPTODATE
, &dev
->flags
) &&
3300 test_bit(R5_Expanded
, &dev
->flags
))
3301 /* If we've reshaped into here, we assume it is Insync.
3302 * We will shortly update recovery_offset to make
3305 set_bit(R5_Insync
, &dev
->flags
);
3307 if (rdev
&& test_bit(R5_WriteError
, &dev
->flags
)) {
3308 /* This flag does not apply to '.replacement'
3309 * only to .rdev, so make sure to check that*/
3310 struct md_rdev
*rdev2
= rcu_dereference(
3311 conf
->disks
[i
].rdev
);
3313 clear_bit(R5_Insync
, &dev
->flags
);
3314 if (rdev2
&& !test_bit(Faulty
, &rdev2
->flags
)) {
3315 s
->handle_bad_blocks
= 1;
3316 atomic_inc(&rdev2
->nr_pending
);
3318 clear_bit(R5_WriteError
, &dev
->flags
);
3320 if (rdev
&& test_bit(R5_MadeGood
, &dev
->flags
)) {
3321 /* This flag does not apply to '.replacement'
3322 * only to .rdev, so make sure to check that*/
3323 struct md_rdev
*rdev2
= rcu_dereference(
3324 conf
->disks
[i
].rdev
);
3325 if (rdev2
&& !test_bit(Faulty
, &rdev2
->flags
)) {
3326 s
->handle_bad_blocks
= 1;
3327 atomic_inc(&rdev2
->nr_pending
);
3329 clear_bit(R5_MadeGood
, &dev
->flags
);
3331 if (test_bit(R5_MadeGoodRepl
, &dev
->flags
)) {
3332 struct md_rdev
*rdev2
= rcu_dereference(
3333 conf
->disks
[i
].replacement
);
3334 if (rdev2
&& !test_bit(Faulty
, &rdev2
->flags
)) {
3335 s
->handle_bad_blocks
= 1;
3336 atomic_inc(&rdev2
->nr_pending
);
3338 clear_bit(R5_MadeGoodRepl
, &dev
->flags
);
3340 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3341 /* The ReadError flag will just be confusing now */
3342 clear_bit(R5_ReadError
, &dev
->flags
);
3343 clear_bit(R5_ReWrite
, &dev
->flags
);
3345 if (test_bit(R5_ReadError
, &dev
->flags
))
3346 clear_bit(R5_Insync
, &dev
->flags
);
3347 if (!test_bit(R5_Insync
, &dev
->flags
)) {
3349 s
->failed_num
[s
->failed
] = i
;
3351 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
))
3355 if (test_bit(STRIPE_SYNCING
, &sh
->state
)) {
3356 /* If there is a failed device being replaced,
3357 * we must be recovering.
3358 * else if we are after recovery_cp, we must be syncing
3359 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
3360 * else we can only be replacing
3361 * sync and recovery both need to read all devices, and so
3362 * use the same flag.
3365 sh
->sector
>= conf
->mddev
->recovery_cp
||
3366 test_bit(MD_RECOVERY_REQUESTED
, &(conf
->mddev
->recovery
)))
3374 static void handle_stripe(struct stripe_head
*sh
)
3376 struct stripe_head_state s
;
3377 struct r5conf
*conf
= sh
->raid_conf
;
3380 int disks
= sh
->disks
;
3381 struct r5dev
*pdev
, *qdev
;
3383 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3384 if (test_and_set_bit_lock(STRIPE_ACTIVE
, &sh
->state
)) {
3385 /* already being handled, ensure it gets handled
3386 * again when current action finishes */
3387 set_bit(STRIPE_HANDLE
, &sh
->state
);
3391 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
)) {
3392 set_bit(STRIPE_SYNCING
, &sh
->state
);
3393 clear_bit(STRIPE_INSYNC
, &sh
->state
);
3395 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3397 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3398 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3399 (unsigned long long)sh
->sector
, sh
->state
,
3400 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->qd_idx
,
3401 sh
->check_state
, sh
->reconstruct_state
);
3403 analyse_stripe(sh
, &s
);
3405 if (s
.handle_bad_blocks
) {
3406 set_bit(STRIPE_HANDLE
, &sh
->state
);
3410 if (unlikely(s
.blocked_rdev
)) {
3411 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3412 s
.replacing
|| s
.to_write
|| s
.written
) {
3413 set_bit(STRIPE_HANDLE
, &sh
->state
);
3416 /* There is nothing for the blocked_rdev to block */
3417 rdev_dec_pending(s
.blocked_rdev
, conf
->mddev
);
3418 s
.blocked_rdev
= NULL
;
3421 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3422 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3423 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3426 pr_debug("locked=%d uptodate=%d to_read=%d"
3427 " to_write=%d failed=%d failed_num=%d,%d\n",
3428 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3429 s
.failed_num
[0], s
.failed_num
[1]);
3430 /* check if the array has lost more than max_degraded devices and,
3431 * if so, some requests might need to be failed.
3433 if (s
.failed
> conf
->max_degraded
) {
3434 sh
->check_state
= 0;
3435 sh
->reconstruct_state
= 0;
3436 if (s
.to_read
+s
.to_write
+s
.written
)
3437 handle_failed_stripe(conf
, sh
, &s
, disks
, &s
.return_bi
);
3438 if (s
.syncing
+ s
.replacing
)
3439 handle_failed_sync(conf
, sh
, &s
);
3443 * might be able to return some write requests if the parity blocks
3444 * are safe, or on a failed drive
3446 pdev
= &sh
->dev
[sh
->pd_idx
];
3447 s
.p_failed
= (s
.failed
>= 1 && s
.failed_num
[0] == sh
->pd_idx
)
3448 || (s
.failed
>= 2 && s
.failed_num
[1] == sh
->pd_idx
);
3449 qdev
= &sh
->dev
[sh
->qd_idx
];
3450 s
.q_failed
= (s
.failed
>= 1 && s
.failed_num
[0] == sh
->qd_idx
)
3451 || (s
.failed
>= 2 && s
.failed_num
[1] == sh
->qd_idx
)
3455 (s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3456 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3457 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3458 (s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3459 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3460 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3461 handle_stripe_clean_event(conf
, sh
, disks
, &s
.return_bi
);
3463 /* Now we might consider reading some blocks, either to check/generate
3464 * parity, or to satisfy requests
3465 * or to load a block that is being partially written.
3467 if (s
.to_read
|| s
.non_overwrite
3468 || (conf
->level
== 6 && s
.to_write
&& s
.failed
)
3469 || (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
))
3472 handle_stripe_fill(sh
, &s
, disks
);
3474 /* Now we check to see if any write operations have recently
3478 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
3480 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
3481 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
3482 sh
->reconstruct_state
= reconstruct_state_idle
;
3484 /* All the 'written' buffers and the parity block are ready to
3485 * be written back to disk
3487 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3488 BUG_ON(sh
->qd_idx
>= 0 &&
3489 !test_bit(R5_UPTODATE
, &sh
->dev
[sh
->qd_idx
].flags
));
3490 for (i
= disks
; i
--; ) {
3491 struct r5dev
*dev
= &sh
->dev
[i
];
3492 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3493 (i
== sh
->pd_idx
|| i
== sh
->qd_idx
||
3495 pr_debug("Writing block %d\n", i
);
3496 set_bit(R5_Wantwrite
, &dev
->flags
);
3499 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3500 ((i
== sh
->pd_idx
|| i
== sh
->qd_idx
) &&
3502 set_bit(STRIPE_INSYNC
, &sh
->state
);
3505 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3506 s
.dec_preread_active
= 1;
3509 /* Now to consider new write requests and what else, if anything
3510 * should be read. We do not handle new writes when:
3511 * 1/ A 'write' operation (copy+xor) is already in flight.
3512 * 2/ A 'check' operation is in flight, as it may clobber the parity
3515 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3516 handle_stripe_dirtying(conf
, sh
, &s
, disks
);
3518 /* maybe we need to check and possibly fix the parity for this stripe
3519 * Any reads will already have been scheduled, so we just see if enough
3520 * data is available. The parity check is held off while parity
3521 * dependent operations are in flight.
3523 if (sh
->check_state
||
3524 (s
.syncing
&& s
.locked
== 0 &&
3525 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3526 !test_bit(STRIPE_INSYNC
, &sh
->state
))) {
3527 if (conf
->level
== 6)
3528 handle_parity_checks6(conf
, sh
, &s
, disks
);
3530 handle_parity_checks5(conf
, sh
, &s
, disks
);
3533 if (s
.replacing
&& s
.locked
== 0
3534 && !test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3535 /* Write out to replacement devices where possible */
3536 for (i
= 0; i
< conf
->raid_disks
; i
++)
3537 if (test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
) &&
3538 test_bit(R5_NeedReplace
, &sh
->dev
[i
].flags
)) {
3539 set_bit(R5_WantReplace
, &sh
->dev
[i
].flags
);
3540 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3543 set_bit(STRIPE_INSYNC
, &sh
->state
);
3545 if ((s
.syncing
|| s
.replacing
) && s
.locked
== 0 &&
3546 test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3547 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3548 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3551 /* If the failed drives are just a ReadError, then we might need
3552 * to progress the repair/check process
3554 if (s
.failed
<= conf
->max_degraded
&& !conf
->mddev
->ro
)
3555 for (i
= 0; i
< s
.failed
; i
++) {
3556 struct r5dev
*dev
= &sh
->dev
[s
.failed_num
[i
]];
3557 if (test_bit(R5_ReadError
, &dev
->flags
)
3558 && !test_bit(R5_LOCKED
, &dev
->flags
)
3559 && test_bit(R5_UPTODATE
, &dev
->flags
)
3561 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3562 set_bit(R5_Wantwrite
, &dev
->flags
);
3563 set_bit(R5_ReWrite
, &dev
->flags
);
3564 set_bit(R5_LOCKED
, &dev
->flags
);
3567 /* let's read it back */
3568 set_bit(R5_Wantread
, &dev
->flags
);
3569 set_bit(R5_LOCKED
, &dev
->flags
);
3576 /* Finish reconstruct operations initiated by the expansion process */
3577 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3578 struct stripe_head
*sh_src
3579 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3580 if (sh_src
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh_src
->state
)) {
3581 /* sh cannot be written until sh_src has been read.
3582 * so arrange for sh to be delayed a little
3584 set_bit(STRIPE_DELAYED
, &sh
->state
);
3585 set_bit(STRIPE_HANDLE
, &sh
->state
);
3586 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3588 atomic_inc(&conf
->preread_active_stripes
);
3589 release_stripe(sh_src
);
3593 release_stripe(sh_src
);
3595 sh
->reconstruct_state
= reconstruct_state_idle
;
3596 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3597 for (i
= conf
->raid_disks
; i
--; ) {
3598 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3599 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3604 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3605 !sh
->reconstruct_state
) {
3606 /* Need to write out all blocks after computing parity */
3607 sh
->disks
= conf
->raid_disks
;
3608 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3609 schedule_reconstruction(sh
, &s
, 1, 1);
3610 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3611 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3612 atomic_dec(&conf
->reshape_stripes
);
3613 wake_up(&conf
->wait_for_overlap
);
3614 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3617 if (s
.expanding
&& s
.locked
== 0 &&
3618 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3619 handle_stripe_expansion(conf
, sh
);
3622 /* wait for this device to become unblocked */
3623 if (unlikely(s
.blocked_rdev
)) {
3624 if (conf
->mddev
->external
)
3625 md_wait_for_blocked_rdev(s
.blocked_rdev
,
3628 /* Internal metadata will immediately
3629 * be written by raid5d, so we don't
3630 * need to wait here.
3632 rdev_dec_pending(s
.blocked_rdev
,
3636 if (s
.handle_bad_blocks
)
3637 for (i
= disks
; i
--; ) {
3638 struct md_rdev
*rdev
;
3639 struct r5dev
*dev
= &sh
->dev
[i
];
3640 if (test_and_clear_bit(R5_WriteError
, &dev
->flags
)) {
3641 /* We own a safe reference to the rdev */
3642 rdev
= conf
->disks
[i
].rdev
;
3643 if (!rdev_set_badblocks(rdev
, sh
->sector
,
3645 md_error(conf
->mddev
, rdev
);
3646 rdev_dec_pending(rdev
, conf
->mddev
);
3648 if (test_and_clear_bit(R5_MadeGood
, &dev
->flags
)) {
3649 rdev
= conf
->disks
[i
].rdev
;
3650 rdev_clear_badblocks(rdev
, sh
->sector
,
3652 rdev_dec_pending(rdev
, conf
->mddev
);
3654 if (test_and_clear_bit(R5_MadeGoodRepl
, &dev
->flags
)) {
3655 rdev
= conf
->disks
[i
].replacement
;
3657 /* rdev have been moved down */
3658 rdev
= conf
->disks
[i
].rdev
;
3659 rdev_clear_badblocks(rdev
, sh
->sector
,
3661 rdev_dec_pending(rdev
, conf
->mddev
);
3666 raid_run_ops(sh
, s
.ops_request
);
3670 if (s
.dec_preread_active
) {
3671 /* We delay this until after ops_run_io so that if make_request
3672 * is waiting on a flush, it won't continue until the writes
3673 * have actually been submitted.
3675 atomic_dec(&conf
->preread_active_stripes
);
3676 if (atomic_read(&conf
->preread_active_stripes
) <
3678 md_wakeup_thread(conf
->mddev
->thread
);
3681 return_io(s
.return_bi
);
3683 clear_bit_unlock(STRIPE_ACTIVE
, &sh
->state
);
3686 static void raid5_activate_delayed(struct r5conf
*conf
)
3688 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3689 while (!list_empty(&conf
->delayed_list
)) {
3690 struct list_head
*l
= conf
->delayed_list
.next
;
3691 struct stripe_head
*sh
;
3692 sh
= list_entry(l
, struct stripe_head
, lru
);
3694 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3695 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3696 atomic_inc(&conf
->preread_active_stripes
);
3697 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3702 static void activate_bit_delay(struct r5conf
*conf
)
3704 /* device_lock is held */
3705 struct list_head head
;
3706 list_add(&head
, &conf
->bitmap_list
);
3707 list_del_init(&conf
->bitmap_list
);
3708 while (!list_empty(&head
)) {
3709 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3710 list_del_init(&sh
->lru
);
3711 atomic_inc(&sh
->count
);
3712 __release_stripe(conf
, sh
);
3716 int md_raid5_congested(struct mddev
*mddev
, int bits
)
3718 struct r5conf
*conf
= mddev
->private;
3720 /* No difference between reads and writes. Just check
3721 * how busy the stripe_cache is
3724 if (conf
->inactive_blocked
)
3728 if (list_empty_careful(&conf
->inactive_list
))
3733 EXPORT_SYMBOL_GPL(md_raid5_congested
);
3735 static int raid5_congested(void *data
, int bits
)
3737 struct mddev
*mddev
= data
;
3739 return mddev_congested(mddev
, bits
) ||
3740 md_raid5_congested(mddev
, bits
);
3743 /* We want read requests to align with chunks where possible,
3744 * but write requests don't need to.
3746 static int raid5_mergeable_bvec(struct request_queue
*q
,
3747 struct bvec_merge_data
*bvm
,
3748 struct bio_vec
*biovec
)
3750 struct mddev
*mddev
= q
->queuedata
;
3751 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
3753 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3754 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
3756 if ((bvm
->bi_rw
& 1) == WRITE
)
3757 return biovec
->bv_len
; /* always allow writes to be mergeable */
3759 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3760 chunk_sectors
= mddev
->new_chunk_sectors
;
3761 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3762 if (max
< 0) max
= 0;
3763 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3764 return biovec
->bv_len
;
3770 static int in_chunk_boundary(struct mddev
*mddev
, struct bio
*bio
)
3772 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3773 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3774 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3776 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3777 chunk_sectors
= mddev
->new_chunk_sectors
;
3778 return chunk_sectors
>=
3779 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3783 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3784 * later sampled by raid5d.
3786 static void add_bio_to_retry(struct bio
*bi
,struct r5conf
*conf
)
3788 unsigned long flags
;
3790 spin_lock_irqsave(&conf
->device_lock
, flags
);
3792 bi
->bi_next
= conf
->retry_read_aligned_list
;
3793 conf
->retry_read_aligned_list
= bi
;
3795 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3796 md_wakeup_thread(conf
->mddev
->thread
);
3800 static struct bio
*remove_bio_from_retry(struct r5conf
*conf
)
3804 bi
= conf
->retry_read_aligned
;
3806 conf
->retry_read_aligned
= NULL
;
3809 bi
= conf
->retry_read_aligned_list
;
3811 conf
->retry_read_aligned_list
= bi
->bi_next
;
3814 * this sets the active strip count to 1 and the processed
3815 * strip count to zero (upper 8 bits)
3817 raid5_set_bi_stripes(bi
, 1); /* biased count of active stripes */
3825 * The "raid5_align_endio" should check if the read succeeded and if it
3826 * did, call bio_endio on the original bio (having bio_put the new bio
3828 * If the read failed..
3830 static void raid5_align_endio(struct bio
*bi
, int error
)
3832 struct bio
* raid_bi
= bi
->bi_private
;
3833 struct mddev
*mddev
;
3834 struct r5conf
*conf
;
3835 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3836 struct md_rdev
*rdev
;
3840 rdev
= (void*)raid_bi
->bi_next
;
3841 raid_bi
->bi_next
= NULL
;
3842 mddev
= rdev
->mddev
;
3843 conf
= mddev
->private;
3845 rdev_dec_pending(rdev
, conf
->mddev
);
3847 if (!error
&& uptodate
) {
3848 bio_endio(raid_bi
, 0);
3849 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3850 wake_up(&conf
->wait_for_stripe
);
3855 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3857 add_bio_to_retry(raid_bi
, conf
);
3860 static int bio_fits_rdev(struct bio
*bi
)
3862 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3864 if ((bi
->bi_size
>>9) > queue_max_sectors(q
))
3866 blk_recount_segments(q
, bi
);
3867 if (bi
->bi_phys_segments
> queue_max_segments(q
))
3870 if (q
->merge_bvec_fn
)
3871 /* it's too hard to apply the merge_bvec_fn at this stage,
3880 static int chunk_aligned_read(struct mddev
*mddev
, struct bio
* raid_bio
)
3882 struct r5conf
*conf
= mddev
->private;
3884 struct bio
* align_bi
;
3885 struct md_rdev
*rdev
;
3886 sector_t end_sector
;
3888 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3889 pr_debug("chunk_aligned_read : non aligned\n");
3893 * use bio_clone_mddev to make a copy of the bio
3895 align_bi
= bio_clone_mddev(raid_bio
, GFP_NOIO
, mddev
);
3899 * set bi_end_io to a new function, and set bi_private to the
3902 align_bi
->bi_end_io
= raid5_align_endio
;
3903 align_bi
->bi_private
= raid_bio
;
3907 align_bi
->bi_sector
= raid5_compute_sector(conf
, raid_bio
->bi_sector
,
3911 end_sector
= align_bi
->bi_sector
+ (align_bi
->bi_size
>> 9);
3913 rdev
= rcu_dereference(conf
->disks
[dd_idx
].replacement
);
3914 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
) ||
3915 rdev
->recovery_offset
< end_sector
) {
3916 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3918 (test_bit(Faulty
, &rdev
->flags
) ||
3919 !(test_bit(In_sync
, &rdev
->flags
) ||
3920 rdev
->recovery_offset
>= end_sector
)))
3927 atomic_inc(&rdev
->nr_pending
);
3929 raid_bio
->bi_next
= (void*)rdev
;
3930 align_bi
->bi_bdev
= rdev
->bdev
;
3931 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3933 if (!bio_fits_rdev(align_bi
) ||
3934 is_badblock(rdev
, align_bi
->bi_sector
, align_bi
->bi_size
>>9,
3935 &first_bad
, &bad_sectors
)) {
3936 /* too big in some way, or has a known bad block */
3938 rdev_dec_pending(rdev
, mddev
);
3942 /* No reshape active, so we can trust rdev->data_offset */
3943 align_bi
->bi_sector
+= rdev
->data_offset
;
3945 spin_lock_irq(&conf
->device_lock
);
3946 wait_event_lock_irq(conf
->wait_for_stripe
,
3948 conf
->device_lock
, /* nothing */);
3949 atomic_inc(&conf
->active_aligned_reads
);
3950 spin_unlock_irq(&conf
->device_lock
);
3952 generic_make_request(align_bi
);
3961 /* __get_priority_stripe - get the next stripe to process
3963 * Full stripe writes are allowed to pass preread active stripes up until
3964 * the bypass_threshold is exceeded. In general the bypass_count
3965 * increments when the handle_list is handled before the hold_list; however, it
3966 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3967 * stripe with in flight i/o. The bypass_count will be reset when the
3968 * head of the hold_list has changed, i.e. the head was promoted to the
3971 static struct stripe_head
*__get_priority_stripe(struct r5conf
*conf
)
3973 struct stripe_head
*sh
;
3975 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3977 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3978 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3979 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3981 if (!list_empty(&conf
->handle_list
)) {
3982 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3984 if (list_empty(&conf
->hold_list
))
3985 conf
->bypass_count
= 0;
3986 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3987 if (conf
->hold_list
.next
== conf
->last_hold
)
3988 conf
->bypass_count
++;
3990 conf
->last_hold
= conf
->hold_list
.next
;
3991 conf
->bypass_count
-= conf
->bypass_threshold
;
3992 if (conf
->bypass_count
< 0)
3993 conf
->bypass_count
= 0;
3996 } else if (!list_empty(&conf
->hold_list
) &&
3997 ((conf
->bypass_threshold
&&
3998 conf
->bypass_count
> conf
->bypass_threshold
) ||
3999 atomic_read(&conf
->pending_full_writes
) == 0)) {
4000 sh
= list_entry(conf
->hold_list
.next
,
4002 conf
->bypass_count
-= conf
->bypass_threshold
;
4003 if (conf
->bypass_count
< 0)
4004 conf
->bypass_count
= 0;
4008 list_del_init(&sh
->lru
);
4009 atomic_inc(&sh
->count
);
4010 BUG_ON(atomic_read(&sh
->count
) != 1);
4014 struct raid5_plug_cb
{
4015 struct blk_plug_cb cb
;
4016 struct list_head list
;
4019 static void raid5_unplug(struct blk_plug_cb
*blk_cb
, bool from_schedule
)
4021 struct raid5_plug_cb
*cb
= container_of(
4022 blk_cb
, struct raid5_plug_cb
, cb
);
4023 struct stripe_head
*sh
;
4024 struct mddev
*mddev
= cb
->cb
.data
;
4025 struct r5conf
*conf
= mddev
->private;
4027 if (cb
->list
.next
&& !list_empty(&cb
->list
)) {
4028 spin_lock_irq(&conf
->device_lock
);
4029 while (!list_empty(&cb
->list
)) {
4030 sh
= list_first_entry(&cb
->list
, struct stripe_head
, lru
);
4031 list_del_init(&sh
->lru
);
4033 * avoid race release_stripe_plug() sees
4034 * STRIPE_ON_UNPLUG_LIST clear but the stripe
4035 * is still in our list
4037 smp_mb__before_clear_bit();
4038 clear_bit(STRIPE_ON_UNPLUG_LIST
, &sh
->state
);
4039 __release_stripe(conf
, sh
);
4041 spin_unlock_irq(&conf
->device_lock
);
4046 static void release_stripe_plug(struct mddev
*mddev
,
4047 struct stripe_head
*sh
)
4049 struct blk_plug_cb
*blk_cb
= blk_check_plugged(
4050 raid5_unplug
, mddev
,
4051 sizeof(struct raid5_plug_cb
));
4052 struct raid5_plug_cb
*cb
;
4059 cb
= container_of(blk_cb
, struct raid5_plug_cb
, cb
);
4061 if (cb
->list
.next
== NULL
)
4062 INIT_LIST_HEAD(&cb
->list
);
4064 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST
, &sh
->state
))
4065 list_add_tail(&sh
->lru
, &cb
->list
);
4070 static void make_request(struct mddev
*mddev
, struct bio
* bi
)
4072 struct r5conf
*conf
= mddev
->private;
4074 sector_t new_sector
;
4075 sector_t logical_sector
, last_sector
;
4076 struct stripe_head
*sh
;
4077 const int rw
= bio_data_dir(bi
);
4080 if (unlikely(bi
->bi_rw
& REQ_FLUSH
)) {
4081 md_flush_request(mddev
, bi
);
4085 md_write_start(mddev
, bi
);
4088 mddev
->reshape_position
== MaxSector
&&
4089 chunk_aligned_read(mddev
,bi
))
4092 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4093 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
4095 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
4097 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
4103 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
4104 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
4105 /* spinlock is needed as reshape_progress may be
4106 * 64bit on a 32bit platform, and so it might be
4107 * possible to see a half-updated value
4108 * Of course reshape_progress could change after
4109 * the lock is dropped, so once we get a reference
4110 * to the stripe that we think it is, we will have
4113 spin_lock_irq(&conf
->device_lock
);
4114 if (mddev
->reshape_backwards
4115 ? logical_sector
< conf
->reshape_progress
4116 : logical_sector
>= conf
->reshape_progress
) {
4119 if (mddev
->reshape_backwards
4120 ? logical_sector
< conf
->reshape_safe
4121 : logical_sector
>= conf
->reshape_safe
) {
4122 spin_unlock_irq(&conf
->device_lock
);
4127 spin_unlock_irq(&conf
->device_lock
);
4130 new_sector
= raid5_compute_sector(conf
, logical_sector
,
4133 pr_debug("raid456: make_request, sector %llu logical %llu\n",
4134 (unsigned long long)new_sector
,
4135 (unsigned long long)logical_sector
);
4137 sh
= get_active_stripe(conf
, new_sector
, previous
,
4138 (bi
->bi_rw
&RWA_MASK
), 0);
4140 if (unlikely(previous
)) {
4141 /* expansion might have moved on while waiting for a
4142 * stripe, so we must do the range check again.
4143 * Expansion could still move past after this
4144 * test, but as we are holding a reference to
4145 * 'sh', we know that if that happens,
4146 * STRIPE_EXPANDING will get set and the expansion
4147 * won't proceed until we finish with the stripe.
4150 spin_lock_irq(&conf
->device_lock
);
4151 if (mddev
->reshape_backwards
4152 ? logical_sector
>= conf
->reshape_progress
4153 : logical_sector
< conf
->reshape_progress
)
4154 /* mismatch, need to try again */
4156 spin_unlock_irq(&conf
->device_lock
);
4165 logical_sector
>= mddev
->suspend_lo
&&
4166 logical_sector
< mddev
->suspend_hi
) {
4168 /* As the suspend_* range is controlled by
4169 * userspace, we want an interruptible
4172 flush_signals(current
);
4173 prepare_to_wait(&conf
->wait_for_overlap
,
4174 &w
, TASK_INTERRUPTIBLE
);
4175 if (logical_sector
>= mddev
->suspend_lo
&&
4176 logical_sector
< mddev
->suspend_hi
)
4181 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
4182 !add_stripe_bio(sh
, bi
, dd_idx
, rw
)) {
4183 /* Stripe is busy expanding or
4184 * add failed due to overlap. Flush everything
4187 md_wakeup_thread(mddev
->thread
);
4192 finish_wait(&conf
->wait_for_overlap
, &w
);
4193 set_bit(STRIPE_HANDLE
, &sh
->state
);
4194 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4195 if ((bi
->bi_rw
& REQ_NOIDLE
) &&
4196 !test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4197 atomic_inc(&conf
->preread_active_stripes
);
4198 release_stripe_plug(mddev
, sh
);
4200 /* cannot get stripe for read-ahead, just give-up */
4201 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
4202 finish_wait(&conf
->wait_for_overlap
, &w
);
4207 remaining
= raid5_dec_bi_active_stripes(bi
);
4208 if (remaining
== 0) {
4211 md_write_end(mddev
);
4217 static sector_t
raid5_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
);
4219 static sector_t
reshape_request(struct mddev
*mddev
, sector_t sector_nr
, int *skipped
)
4221 /* reshaping is quite different to recovery/resync so it is
4222 * handled quite separately ... here.
4224 * On each call to sync_request, we gather one chunk worth of
4225 * destination stripes and flag them as expanding.
4226 * Then we find all the source stripes and request reads.
4227 * As the reads complete, handle_stripe will copy the data
4228 * into the destination stripe and release that stripe.
4230 struct r5conf
*conf
= mddev
->private;
4231 struct stripe_head
*sh
;
4232 sector_t first_sector
, last_sector
;
4233 int raid_disks
= conf
->previous_raid_disks
;
4234 int data_disks
= raid_disks
- conf
->max_degraded
;
4235 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
4238 sector_t writepos
, readpos
, safepos
;
4239 sector_t stripe_addr
;
4240 int reshape_sectors
;
4241 struct list_head stripes
;
4243 if (sector_nr
== 0) {
4244 /* If restarting in the middle, skip the initial sectors */
4245 if (mddev
->reshape_backwards
&&
4246 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
4247 sector_nr
= raid5_size(mddev
, 0, 0)
4248 - conf
->reshape_progress
;
4249 } else if (!mddev
->reshape_backwards
&&
4250 conf
->reshape_progress
> 0)
4251 sector_nr
= conf
->reshape_progress
;
4252 sector_div(sector_nr
, new_data_disks
);
4254 mddev
->curr_resync_completed
= sector_nr
;
4255 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4261 /* We need to process a full chunk at a time.
4262 * If old and new chunk sizes differ, we need to process the
4265 if (mddev
->new_chunk_sectors
> mddev
->chunk_sectors
)
4266 reshape_sectors
= mddev
->new_chunk_sectors
;
4268 reshape_sectors
= mddev
->chunk_sectors
;
4270 /* We update the metadata at least every 10 seconds, or when
4271 * the data about to be copied would over-write the source of
4272 * the data at the front of the range. i.e. one new_stripe
4273 * along from reshape_progress new_maps to after where
4274 * reshape_safe old_maps to
4276 writepos
= conf
->reshape_progress
;
4277 sector_div(writepos
, new_data_disks
);
4278 readpos
= conf
->reshape_progress
;
4279 sector_div(readpos
, data_disks
);
4280 safepos
= conf
->reshape_safe
;
4281 sector_div(safepos
, data_disks
);
4282 if (mddev
->reshape_backwards
) {
4283 writepos
-= min_t(sector_t
, reshape_sectors
, writepos
);
4284 readpos
+= reshape_sectors
;
4285 safepos
+= reshape_sectors
;
4287 writepos
+= reshape_sectors
;
4288 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
4289 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
4292 /* Having calculated the 'writepos' possibly use it
4293 * to set 'stripe_addr' which is where we will write to.
4295 if (mddev
->reshape_backwards
) {
4296 BUG_ON(conf
->reshape_progress
== 0);
4297 stripe_addr
= writepos
;
4298 BUG_ON((mddev
->dev_sectors
&
4299 ~((sector_t
)reshape_sectors
- 1))
4300 - reshape_sectors
- stripe_addr
4303 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
4304 stripe_addr
= sector_nr
;
4307 /* 'writepos' is the most advanced device address we might write.
4308 * 'readpos' is the least advanced device address we might read.
4309 * 'safepos' is the least address recorded in the metadata as having
4311 * If there is a min_offset_diff, these are adjusted either by
4312 * increasing the safepos/readpos if diff is negative, or
4313 * increasing writepos if diff is positive.
4314 * If 'readpos' is then behind 'writepos', there is no way that we can
4315 * ensure safety in the face of a crash - that must be done by userspace
4316 * making a backup of the data. So in that case there is no particular
4317 * rush to update metadata.
4318 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4319 * update the metadata to advance 'safepos' to match 'readpos' so that
4320 * we can be safe in the event of a crash.
4321 * So we insist on updating metadata if safepos is behind writepos and
4322 * readpos is beyond writepos.
4323 * In any case, update the metadata every 10 seconds.
4324 * Maybe that number should be configurable, but I'm not sure it is
4325 * worth it.... maybe it could be a multiple of safemode_delay???
4327 if (conf
->min_offset_diff
< 0) {
4328 safepos
+= -conf
->min_offset_diff
;
4329 readpos
+= -conf
->min_offset_diff
;
4331 writepos
+= conf
->min_offset_diff
;
4333 if ((mddev
->reshape_backwards
4334 ? (safepos
> writepos
&& readpos
< writepos
)
4335 : (safepos
< writepos
&& readpos
> writepos
)) ||
4336 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
4337 /* Cannot proceed until we've updated the superblock... */
4338 wait_event(conf
->wait_for_overlap
,
4339 atomic_read(&conf
->reshape_stripes
)==0);
4340 mddev
->reshape_position
= conf
->reshape_progress
;
4341 mddev
->curr_resync_completed
= sector_nr
;
4342 conf
->reshape_checkpoint
= jiffies
;
4343 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4344 md_wakeup_thread(mddev
->thread
);
4345 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
4346 kthread_should_stop());
4347 spin_lock_irq(&conf
->device_lock
);
4348 conf
->reshape_safe
= mddev
->reshape_position
;
4349 spin_unlock_irq(&conf
->device_lock
);
4350 wake_up(&conf
->wait_for_overlap
);
4351 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4354 INIT_LIST_HEAD(&stripes
);
4355 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
4357 int skipped_disk
= 0;
4358 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
4359 set_bit(STRIPE_EXPANDING
, &sh
->state
);
4360 atomic_inc(&conf
->reshape_stripes
);
4361 /* If any of this stripe is beyond the end of the old
4362 * array, then we need to zero those blocks
4364 for (j
=sh
->disks
; j
--;) {
4366 if (j
== sh
->pd_idx
)
4368 if (conf
->level
== 6 &&
4371 s
= compute_blocknr(sh
, j
, 0);
4372 if (s
< raid5_size(mddev
, 0, 0)) {
4376 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
4377 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
4378 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
4380 if (!skipped_disk
) {
4381 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4382 set_bit(STRIPE_HANDLE
, &sh
->state
);
4384 list_add(&sh
->lru
, &stripes
);
4386 spin_lock_irq(&conf
->device_lock
);
4387 if (mddev
->reshape_backwards
)
4388 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
4390 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
4391 spin_unlock_irq(&conf
->device_lock
);
4392 /* Ok, those stripe are ready. We can start scheduling
4393 * reads on the source stripes.
4394 * The source stripes are determined by mapping the first and last
4395 * block on the destination stripes.
4398 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
4401 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
4402 * new_data_disks
- 1),
4404 if (last_sector
>= mddev
->dev_sectors
)
4405 last_sector
= mddev
->dev_sectors
- 1;
4406 while (first_sector
<= last_sector
) {
4407 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
4408 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
4409 set_bit(STRIPE_HANDLE
, &sh
->state
);
4411 first_sector
+= STRIPE_SECTORS
;
4413 /* Now that the sources are clearly marked, we can release
4414 * the destination stripes
4416 while (!list_empty(&stripes
)) {
4417 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
4418 list_del_init(&sh
->lru
);
4421 /* If this takes us to the resync_max point where we have to pause,
4422 * then we need to write out the superblock.
4424 sector_nr
+= reshape_sectors
;
4425 if ((sector_nr
- mddev
->curr_resync_completed
) * 2
4426 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
4427 /* Cannot proceed until we've updated the superblock... */
4428 wait_event(conf
->wait_for_overlap
,
4429 atomic_read(&conf
->reshape_stripes
) == 0);
4430 mddev
->reshape_position
= conf
->reshape_progress
;
4431 mddev
->curr_resync_completed
= sector_nr
;
4432 conf
->reshape_checkpoint
= jiffies
;
4433 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4434 md_wakeup_thread(mddev
->thread
);
4435 wait_event(mddev
->sb_wait
,
4436 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
4437 || kthread_should_stop());
4438 spin_lock_irq(&conf
->device_lock
);
4439 conf
->reshape_safe
= mddev
->reshape_position
;
4440 spin_unlock_irq(&conf
->device_lock
);
4441 wake_up(&conf
->wait_for_overlap
);
4442 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4444 return reshape_sectors
;
4447 /* FIXME go_faster isn't used */
4448 static inline sector_t
sync_request(struct mddev
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
4450 struct r5conf
*conf
= mddev
->private;
4451 struct stripe_head
*sh
;
4452 sector_t max_sector
= mddev
->dev_sectors
;
4453 sector_t sync_blocks
;
4454 int still_degraded
= 0;
4457 if (sector_nr
>= max_sector
) {
4458 /* just being told to finish up .. nothing much to do */
4460 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
4465 if (mddev
->curr_resync
< max_sector
) /* aborted */
4466 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
4468 else /* completed sync */
4470 bitmap_close_sync(mddev
->bitmap
);
4475 /* Allow raid5_quiesce to complete */
4476 wait_event(conf
->wait_for_overlap
, conf
->quiesce
!= 2);
4478 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
4479 return reshape_request(mddev
, sector_nr
, skipped
);
4481 /* No need to check resync_max as we never do more than one
4482 * stripe, and as resync_max will always be on a chunk boundary,
4483 * if the check in md_do_sync didn't fire, there is no chance
4484 * of overstepping resync_max here
4487 /* if there is too many failed drives and we are trying
4488 * to resync, then assert that we are finished, because there is
4489 * nothing we can do.
4491 if (mddev
->degraded
>= conf
->max_degraded
&&
4492 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
4493 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
4497 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
4498 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
4499 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
4500 /* we can skip this block, and probably more */
4501 sync_blocks
/= STRIPE_SECTORS
;
4503 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
4506 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
4508 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
4510 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
4511 /* make sure we don't swamp the stripe cache if someone else
4512 * is trying to get access
4514 schedule_timeout_uninterruptible(1);
4516 /* Need to check if array will still be degraded after recovery/resync
4517 * We don't need to check the 'failed' flag as when that gets set,
4520 for (i
= 0; i
< conf
->raid_disks
; i
++)
4521 if (conf
->disks
[i
].rdev
== NULL
)
4524 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
4526 set_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
);
4531 return STRIPE_SECTORS
;
4534 static int retry_aligned_read(struct r5conf
*conf
, struct bio
*raid_bio
)
4536 /* We may not be able to submit a whole bio at once as there
4537 * may not be enough stripe_heads available.
4538 * We cannot pre-allocate enough stripe_heads as we may need
4539 * more than exist in the cache (if we allow ever large chunks).
4540 * So we do one stripe head at a time and record in
4541 * ->bi_hw_segments how many have been done.
4543 * We *know* that this entire raid_bio is in one chunk, so
4544 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4546 struct stripe_head
*sh
;
4548 sector_t sector
, logical_sector
, last_sector
;
4553 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4554 sector
= raid5_compute_sector(conf
, logical_sector
,
4556 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
4558 for (; logical_sector
< last_sector
;
4559 logical_sector
+= STRIPE_SECTORS
,
4560 sector
+= STRIPE_SECTORS
,
4563 if (scnt
< raid5_bi_processed_stripes(raid_bio
))
4564 /* already done this stripe */
4567 sh
= get_active_stripe(conf
, sector
, 0, 1, 0);
4570 /* failed to get a stripe - must wait */
4571 raid5_set_bi_processed_stripes(raid_bio
, scnt
);
4572 conf
->retry_read_aligned
= raid_bio
;
4576 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
4578 raid5_set_bi_processed_stripes(raid_bio
, scnt
);
4579 conf
->retry_read_aligned
= raid_bio
;
4583 set_bit(R5_ReadNoMerge
, &sh
->dev
[dd_idx
].flags
);
4588 remaining
= raid5_dec_bi_active_stripes(raid_bio
);
4590 bio_endio(raid_bio
, 0);
4591 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4592 wake_up(&conf
->wait_for_stripe
);
4596 #define MAX_STRIPE_BATCH 8
4597 static int handle_active_stripes(struct r5conf
*conf
)
4599 struct stripe_head
*batch
[MAX_STRIPE_BATCH
], *sh
;
4600 int i
, batch_size
= 0;
4602 while (batch_size
< MAX_STRIPE_BATCH
&&
4603 (sh
= __get_priority_stripe(conf
)) != NULL
)
4604 batch
[batch_size
++] = sh
;
4606 if (batch_size
== 0)
4608 spin_unlock_irq(&conf
->device_lock
);
4610 for (i
= 0; i
< batch_size
; i
++)
4611 handle_stripe(batch
[i
]);
4615 spin_lock_irq(&conf
->device_lock
);
4616 for (i
= 0; i
< batch_size
; i
++)
4617 __release_stripe(conf
, batch
[i
]);
4622 * This is our raid5 kernel thread.
4624 * We scan the hash table for stripes which can be handled now.
4625 * During the scan, completed stripes are saved for us by the interrupt
4626 * handler, so that they will not have to wait for our next wakeup.
4628 static void raid5d(struct mddev
*mddev
)
4630 struct r5conf
*conf
= mddev
->private;
4632 struct blk_plug plug
;
4634 pr_debug("+++ raid5d active\n");
4636 md_check_recovery(mddev
);
4638 blk_start_plug(&plug
);
4640 spin_lock_irq(&conf
->device_lock
);
4646 !list_empty(&conf
->bitmap_list
)) {
4647 /* Now is a good time to flush some bitmap updates */
4649 spin_unlock_irq(&conf
->device_lock
);
4650 bitmap_unplug(mddev
->bitmap
);
4651 spin_lock_irq(&conf
->device_lock
);
4652 conf
->seq_write
= conf
->seq_flush
;
4653 activate_bit_delay(conf
);
4655 raid5_activate_delayed(conf
);
4657 while ((bio
= remove_bio_from_retry(conf
))) {
4659 spin_unlock_irq(&conf
->device_lock
);
4660 ok
= retry_aligned_read(conf
, bio
);
4661 spin_lock_irq(&conf
->device_lock
);
4667 batch_size
= handle_active_stripes(conf
);
4670 handled
+= batch_size
;
4672 if (mddev
->flags
& ~(1<<MD_CHANGE_PENDING
)) {
4673 spin_unlock_irq(&conf
->device_lock
);
4674 md_check_recovery(mddev
);
4675 spin_lock_irq(&conf
->device_lock
);
4678 pr_debug("%d stripes handled\n", handled
);
4680 spin_unlock_irq(&conf
->device_lock
);
4682 async_tx_issue_pending_all();
4683 blk_finish_plug(&plug
);
4685 pr_debug("--- raid5d inactive\n");
4689 raid5_show_stripe_cache_size(struct mddev
*mddev
, char *page
)
4691 struct r5conf
*conf
= mddev
->private;
4693 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4699 raid5_set_cache_size(struct mddev
*mddev
, int size
)
4701 struct r5conf
*conf
= mddev
->private;
4704 if (size
<= 16 || size
> 32768)
4706 while (size
< conf
->max_nr_stripes
) {
4707 if (drop_one_stripe(conf
))
4708 conf
->max_nr_stripes
--;
4712 err
= md_allow_write(mddev
);
4715 while (size
> conf
->max_nr_stripes
) {
4716 if (grow_one_stripe(conf
))
4717 conf
->max_nr_stripes
++;
4722 EXPORT_SYMBOL(raid5_set_cache_size
);
4725 raid5_store_stripe_cache_size(struct mddev
*mddev
, const char *page
, size_t len
)
4727 struct r5conf
*conf
= mddev
->private;
4731 if (len
>= PAGE_SIZE
)
4736 if (strict_strtoul(page
, 10, &new))
4738 err
= raid5_set_cache_size(mddev
, new);
4744 static struct md_sysfs_entry
4745 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4746 raid5_show_stripe_cache_size
,
4747 raid5_store_stripe_cache_size
);
4750 raid5_show_preread_threshold(struct mddev
*mddev
, char *page
)
4752 struct r5conf
*conf
= mddev
->private;
4754 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4760 raid5_store_preread_threshold(struct mddev
*mddev
, const char *page
, size_t len
)
4762 struct r5conf
*conf
= mddev
->private;
4764 if (len
>= PAGE_SIZE
)
4769 if (strict_strtoul(page
, 10, &new))
4771 if (new > conf
->max_nr_stripes
)
4773 conf
->bypass_threshold
= new;
4777 static struct md_sysfs_entry
4778 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4780 raid5_show_preread_threshold
,
4781 raid5_store_preread_threshold
);
4784 stripe_cache_active_show(struct mddev
*mddev
, char *page
)
4786 struct r5conf
*conf
= mddev
->private;
4788 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4793 static struct md_sysfs_entry
4794 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4796 static struct attribute
*raid5_attrs
[] = {
4797 &raid5_stripecache_size
.attr
,
4798 &raid5_stripecache_active
.attr
,
4799 &raid5_preread_bypass_threshold
.attr
,
4802 static struct attribute_group raid5_attrs_group
= {
4804 .attrs
= raid5_attrs
,
4808 raid5_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
4810 struct r5conf
*conf
= mddev
->private;
4813 sectors
= mddev
->dev_sectors
;
4815 /* size is defined by the smallest of previous and new size */
4816 raid_disks
= min(conf
->raid_disks
, conf
->previous_raid_disks
);
4818 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
4819 sectors
&= ~((sector_t
)mddev
->new_chunk_sectors
- 1);
4820 return sectors
* (raid_disks
- conf
->max_degraded
);
4823 static void raid5_free_percpu(struct r5conf
*conf
)
4825 struct raid5_percpu
*percpu
;
4832 for_each_possible_cpu(cpu
) {
4833 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4834 safe_put_page(percpu
->spare_page
);
4835 kfree(percpu
->scribble
);
4837 #ifdef CONFIG_HOTPLUG_CPU
4838 unregister_cpu_notifier(&conf
->cpu_notify
);
4842 free_percpu(conf
->percpu
);
4845 static void free_conf(struct r5conf
*conf
)
4847 shrink_stripes(conf
);
4848 raid5_free_percpu(conf
);
4850 kfree(conf
->stripe_hashtbl
);
4854 #ifdef CONFIG_HOTPLUG_CPU
4855 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
4858 struct r5conf
*conf
= container_of(nfb
, struct r5conf
, cpu_notify
);
4859 long cpu
= (long)hcpu
;
4860 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4863 case CPU_UP_PREPARE
:
4864 case CPU_UP_PREPARE_FROZEN
:
4865 if (conf
->level
== 6 && !percpu
->spare_page
)
4866 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
4867 if (!percpu
->scribble
)
4868 percpu
->scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4870 if (!percpu
->scribble
||
4871 (conf
->level
== 6 && !percpu
->spare_page
)) {
4872 safe_put_page(percpu
->spare_page
);
4873 kfree(percpu
->scribble
);
4874 pr_err("%s: failed memory allocation for cpu%ld\n",
4876 return notifier_from_errno(-ENOMEM
);
4880 case CPU_DEAD_FROZEN
:
4881 safe_put_page(percpu
->spare_page
);
4882 kfree(percpu
->scribble
);
4883 percpu
->spare_page
= NULL
;
4884 percpu
->scribble
= NULL
;
4893 static int raid5_alloc_percpu(struct r5conf
*conf
)
4896 struct page
*spare_page
;
4897 struct raid5_percpu __percpu
*allcpus
;
4901 allcpus
= alloc_percpu(struct raid5_percpu
);
4904 conf
->percpu
= allcpus
;
4908 for_each_present_cpu(cpu
) {
4909 if (conf
->level
== 6) {
4910 spare_page
= alloc_page(GFP_KERNEL
);
4915 per_cpu_ptr(conf
->percpu
, cpu
)->spare_page
= spare_page
;
4917 scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4922 per_cpu_ptr(conf
->percpu
, cpu
)->scribble
= scribble
;
4924 #ifdef CONFIG_HOTPLUG_CPU
4925 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
4926 conf
->cpu_notify
.priority
= 0;
4928 err
= register_cpu_notifier(&conf
->cpu_notify
);
4935 static struct r5conf
*setup_conf(struct mddev
*mddev
)
4937 struct r5conf
*conf
;
4938 int raid_disk
, memory
, max_disks
;
4939 struct md_rdev
*rdev
;
4940 struct disk_info
*disk
;
4943 if (mddev
->new_level
!= 5
4944 && mddev
->new_level
!= 4
4945 && mddev
->new_level
!= 6) {
4946 printk(KERN_ERR
"md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4947 mdname(mddev
), mddev
->new_level
);
4948 return ERR_PTR(-EIO
);
4950 if ((mddev
->new_level
== 5
4951 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
4952 (mddev
->new_level
== 6
4953 && !algorithm_valid_raid6(mddev
->new_layout
))) {
4954 printk(KERN_ERR
"md/raid:%s: layout %d not supported\n",
4955 mdname(mddev
), mddev
->new_layout
);
4956 return ERR_PTR(-EIO
);
4958 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
4959 printk(KERN_ERR
"md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4960 mdname(mddev
), mddev
->raid_disks
);
4961 return ERR_PTR(-EINVAL
);
4964 if (!mddev
->new_chunk_sectors
||
4965 (mddev
->new_chunk_sectors
<< 9) % PAGE_SIZE
||
4966 !is_power_of_2(mddev
->new_chunk_sectors
)) {
4967 printk(KERN_ERR
"md/raid:%s: invalid chunk size %d\n",
4968 mdname(mddev
), mddev
->new_chunk_sectors
<< 9);
4969 return ERR_PTR(-EINVAL
);
4972 conf
= kzalloc(sizeof(struct r5conf
), GFP_KERNEL
);
4975 spin_lock_init(&conf
->device_lock
);
4976 init_waitqueue_head(&conf
->wait_for_stripe
);
4977 init_waitqueue_head(&conf
->wait_for_overlap
);
4978 INIT_LIST_HEAD(&conf
->handle_list
);
4979 INIT_LIST_HEAD(&conf
->hold_list
);
4980 INIT_LIST_HEAD(&conf
->delayed_list
);
4981 INIT_LIST_HEAD(&conf
->bitmap_list
);
4982 INIT_LIST_HEAD(&conf
->inactive_list
);
4983 atomic_set(&conf
->active_stripes
, 0);
4984 atomic_set(&conf
->preread_active_stripes
, 0);
4985 atomic_set(&conf
->active_aligned_reads
, 0);
4986 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4987 conf
->recovery_disabled
= mddev
->recovery_disabled
- 1;
4989 conf
->raid_disks
= mddev
->raid_disks
;
4990 if (mddev
->reshape_position
== MaxSector
)
4991 conf
->previous_raid_disks
= mddev
->raid_disks
;
4993 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4994 max_disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
4995 conf
->scribble_len
= scribble_len(max_disks
);
4997 conf
->disks
= kzalloc(max_disks
* sizeof(struct disk_info
),
5002 conf
->mddev
= mddev
;
5004 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
5007 conf
->level
= mddev
->new_level
;
5008 if (raid5_alloc_percpu(conf
) != 0)
5011 pr_debug("raid456: run(%s) called.\n", mdname(mddev
));
5013 rdev_for_each(rdev
, mddev
) {
5014 raid_disk
= rdev
->raid_disk
;
5015 if (raid_disk
>= max_disks
5018 disk
= conf
->disks
+ raid_disk
;
5020 if (test_bit(Replacement
, &rdev
->flags
)) {
5021 if (disk
->replacement
)
5023 disk
->replacement
= rdev
;
5030 if (test_bit(In_sync
, &rdev
->flags
)) {
5031 char b
[BDEVNAME_SIZE
];
5032 printk(KERN_INFO
"md/raid:%s: device %s operational as raid"
5034 mdname(mddev
), bdevname(rdev
->bdev
, b
), raid_disk
);
5035 } else if (rdev
->saved_raid_disk
!= raid_disk
)
5036 /* Cannot rely on bitmap to complete recovery */
5040 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
5041 conf
->level
= mddev
->new_level
;
5042 if (conf
->level
== 6)
5043 conf
->max_degraded
= 2;
5045 conf
->max_degraded
= 1;
5046 conf
->algorithm
= mddev
->new_layout
;
5047 conf
->max_nr_stripes
= NR_STRIPES
;
5048 conf
->reshape_progress
= mddev
->reshape_position
;
5049 if (conf
->reshape_progress
!= MaxSector
) {
5050 conf
->prev_chunk_sectors
= mddev
->chunk_sectors
;
5051 conf
->prev_algo
= mddev
->layout
;
5054 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
5055 max_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
5056 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
5058 "md/raid:%s: couldn't allocate %dkB for buffers\n",
5059 mdname(mddev
), memory
);
5062 printk(KERN_INFO
"md/raid:%s: allocated %dkB\n",
5063 mdname(mddev
), memory
);
5065 sprintf(pers_name
, "raid%d", mddev
->new_level
);
5066 conf
->thread
= md_register_thread(raid5d
, mddev
, pers_name
);
5067 if (!conf
->thread
) {
5069 "md/raid:%s: couldn't allocate thread.\n",
5079 return ERR_PTR(-EIO
);
5081 return ERR_PTR(-ENOMEM
);
5085 static int only_parity(int raid_disk
, int algo
, int raid_disks
, int max_degraded
)
5088 case ALGORITHM_PARITY_0
:
5089 if (raid_disk
< max_degraded
)
5092 case ALGORITHM_PARITY_N
:
5093 if (raid_disk
>= raid_disks
- max_degraded
)
5096 case ALGORITHM_PARITY_0_6
:
5097 if (raid_disk
== 0 ||
5098 raid_disk
== raid_disks
- 1)
5101 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5102 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5103 case ALGORITHM_LEFT_SYMMETRIC_6
:
5104 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5105 if (raid_disk
== raid_disks
- 1)
5111 static int run(struct mddev
*mddev
)
5113 struct r5conf
*conf
;
5114 int working_disks
= 0;
5115 int dirty_parity_disks
= 0;
5116 struct md_rdev
*rdev
;
5117 sector_t reshape_offset
= 0;
5119 long long min_offset_diff
= 0;
5122 if (mddev
->recovery_cp
!= MaxSector
)
5123 printk(KERN_NOTICE
"md/raid:%s: not clean"
5124 " -- starting background reconstruction\n",
5127 rdev_for_each(rdev
, mddev
) {
5129 if (rdev
->raid_disk
< 0)
5131 diff
= (rdev
->new_data_offset
- rdev
->data_offset
);
5133 min_offset_diff
= diff
;
5135 } else if (mddev
->reshape_backwards
&&
5136 diff
< min_offset_diff
)
5137 min_offset_diff
= diff
;
5138 else if (!mddev
->reshape_backwards
&&
5139 diff
> min_offset_diff
)
5140 min_offset_diff
= diff
;
5143 if (mddev
->reshape_position
!= MaxSector
) {
5144 /* Check that we can continue the reshape.
5145 * Difficulties arise if the stripe we would write to
5146 * next is at or after the stripe we would read from next.
5147 * For a reshape that changes the number of devices, this
5148 * is only possible for a very short time, and mdadm makes
5149 * sure that time appears to have past before assembling
5150 * the array. So we fail if that time hasn't passed.
5151 * For a reshape that keeps the number of devices the same
5152 * mdadm must be monitoring the reshape can keeping the
5153 * critical areas read-only and backed up. It will start
5154 * the array in read-only mode, so we check for that.
5156 sector_t here_new
, here_old
;
5158 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
5160 if (mddev
->new_level
!= mddev
->level
) {
5161 printk(KERN_ERR
"md/raid:%s: unsupported reshape "
5162 "required - aborting.\n",
5166 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
5167 /* reshape_position must be on a new-stripe boundary, and one
5168 * further up in new geometry must map after here in old
5171 here_new
= mddev
->reshape_position
;
5172 if (sector_div(here_new
, mddev
->new_chunk_sectors
*
5173 (mddev
->raid_disks
- max_degraded
))) {
5174 printk(KERN_ERR
"md/raid:%s: reshape_position not "
5175 "on a stripe boundary\n", mdname(mddev
));
5178 reshape_offset
= here_new
* mddev
->new_chunk_sectors
;
5179 /* here_new is the stripe we will write to */
5180 here_old
= mddev
->reshape_position
;
5181 sector_div(here_old
, mddev
->chunk_sectors
*
5182 (old_disks
-max_degraded
));
5183 /* here_old is the first stripe that we might need to read
5185 if (mddev
->delta_disks
== 0) {
5186 if ((here_new
* mddev
->new_chunk_sectors
!=
5187 here_old
* mddev
->chunk_sectors
)) {
5188 printk(KERN_ERR
"md/raid:%s: reshape position is"
5189 " confused - aborting\n", mdname(mddev
));
5192 /* We cannot be sure it is safe to start an in-place
5193 * reshape. It is only safe if user-space is monitoring
5194 * and taking constant backups.
5195 * mdadm always starts a situation like this in
5196 * readonly mode so it can take control before
5197 * allowing any writes. So just check for that.
5199 if (abs(min_offset_diff
) >= mddev
->chunk_sectors
&&
5200 abs(min_offset_diff
) >= mddev
->new_chunk_sectors
)
5201 /* not really in-place - so OK */;
5202 else if (mddev
->ro
== 0) {
5203 printk(KERN_ERR
"md/raid:%s: in-place reshape "
5204 "must be started in read-only mode "
5209 } else if (mddev
->reshape_backwards
5210 ? (here_new
* mddev
->new_chunk_sectors
+ min_offset_diff
<=
5211 here_old
* mddev
->chunk_sectors
)
5212 : (here_new
* mddev
->new_chunk_sectors
>=
5213 here_old
* mddev
->chunk_sectors
+ (-min_offset_diff
))) {
5214 /* Reading from the same stripe as writing to - bad */
5215 printk(KERN_ERR
"md/raid:%s: reshape_position too early for "
5216 "auto-recovery - aborting.\n",
5220 printk(KERN_INFO
"md/raid:%s: reshape will continue\n",
5222 /* OK, we should be able to continue; */
5224 BUG_ON(mddev
->level
!= mddev
->new_level
);
5225 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
5226 BUG_ON(mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
);
5227 BUG_ON(mddev
->delta_disks
!= 0);
5230 if (mddev
->private == NULL
)
5231 conf
= setup_conf(mddev
);
5233 conf
= mddev
->private;
5236 return PTR_ERR(conf
);
5238 conf
->min_offset_diff
= min_offset_diff
;
5239 mddev
->thread
= conf
->thread
;
5240 conf
->thread
= NULL
;
5241 mddev
->private = conf
;
5243 for (i
= 0; i
< conf
->raid_disks
&& conf
->previous_raid_disks
;
5245 rdev
= conf
->disks
[i
].rdev
;
5246 if (!rdev
&& conf
->disks
[i
].replacement
) {
5247 /* The replacement is all we have yet */
5248 rdev
= conf
->disks
[i
].replacement
;
5249 conf
->disks
[i
].replacement
= NULL
;
5250 clear_bit(Replacement
, &rdev
->flags
);
5251 conf
->disks
[i
].rdev
= rdev
;
5255 if (conf
->disks
[i
].replacement
&&
5256 conf
->reshape_progress
!= MaxSector
) {
5257 /* replacements and reshape simply do not mix. */
5258 printk(KERN_ERR
"md: cannot handle concurrent "
5259 "replacement and reshape.\n");
5262 if (test_bit(In_sync
, &rdev
->flags
)) {
5266 /* This disc is not fully in-sync. However if it
5267 * just stored parity (beyond the recovery_offset),
5268 * when we don't need to be concerned about the
5269 * array being dirty.
5270 * When reshape goes 'backwards', we never have
5271 * partially completed devices, so we only need
5272 * to worry about reshape going forwards.
5274 /* Hack because v0.91 doesn't store recovery_offset properly. */
5275 if (mddev
->major_version
== 0 &&
5276 mddev
->minor_version
> 90)
5277 rdev
->recovery_offset
= reshape_offset
;
5279 if (rdev
->recovery_offset
< reshape_offset
) {
5280 /* We need to check old and new layout */
5281 if (!only_parity(rdev
->raid_disk
,
5284 conf
->max_degraded
))
5287 if (!only_parity(rdev
->raid_disk
,
5289 conf
->previous_raid_disks
,
5290 conf
->max_degraded
))
5292 dirty_parity_disks
++;
5296 * 0 for a fully functional array, 1 or 2 for a degraded array.
5298 mddev
->degraded
= calc_degraded(conf
);
5300 if (has_failed(conf
)) {
5301 printk(KERN_ERR
"md/raid:%s: not enough operational devices"
5302 " (%d/%d failed)\n",
5303 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
5307 /* device size must be a multiple of chunk size */
5308 mddev
->dev_sectors
&= ~(mddev
->chunk_sectors
- 1);
5309 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
5311 if (mddev
->degraded
> dirty_parity_disks
&&
5312 mddev
->recovery_cp
!= MaxSector
) {
5313 if (mddev
->ok_start_degraded
)
5315 "md/raid:%s: starting dirty degraded array"
5316 " - data corruption possible.\n",
5320 "md/raid:%s: cannot start dirty degraded array.\n",
5326 if (mddev
->degraded
== 0)
5327 printk(KERN_INFO
"md/raid:%s: raid level %d active with %d out of %d"
5328 " devices, algorithm %d\n", mdname(mddev
), conf
->level
,
5329 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
5332 printk(KERN_ALERT
"md/raid:%s: raid level %d active with %d"
5333 " out of %d devices, algorithm %d\n",
5334 mdname(mddev
), conf
->level
,
5335 mddev
->raid_disks
- mddev
->degraded
,
5336 mddev
->raid_disks
, mddev
->new_layout
);
5338 print_raid5_conf(conf
);
5340 if (conf
->reshape_progress
!= MaxSector
) {
5341 conf
->reshape_safe
= conf
->reshape_progress
;
5342 atomic_set(&conf
->reshape_stripes
, 0);
5343 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5344 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5345 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5346 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5347 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5352 /* Ok, everything is just fine now */
5353 if (mddev
->to_remove
== &raid5_attrs_group
)
5354 mddev
->to_remove
= NULL
;
5355 else if (mddev
->kobj
.sd
&&
5356 sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
5358 "raid5: failed to create sysfs attributes for %s\n",
5360 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5364 /* read-ahead size must cover two whole stripes, which
5365 * is 2 * (datadisks) * chunksize where 'n' is the
5366 * number of raid devices
5368 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
5369 int stripe
= data_disks
*
5370 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
5371 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5372 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5374 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
5376 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
5377 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
5379 chunk_size
= mddev
->chunk_sectors
<< 9;
5380 blk_queue_io_min(mddev
->queue
, chunk_size
);
5381 blk_queue_io_opt(mddev
->queue
, chunk_size
*
5382 (conf
->raid_disks
- conf
->max_degraded
));
5384 rdev_for_each(rdev
, mddev
) {
5385 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
5386 rdev
->data_offset
<< 9);
5387 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
5388 rdev
->new_data_offset
<< 9);
5394 md_unregister_thread(&mddev
->thread
);
5395 print_raid5_conf(conf
);
5397 mddev
->private = NULL
;
5398 printk(KERN_ALERT
"md/raid:%s: failed to run raid set.\n", mdname(mddev
));
5402 static int stop(struct mddev
*mddev
)
5404 struct r5conf
*conf
= mddev
->private;
5406 md_unregister_thread(&mddev
->thread
);
5408 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
5410 mddev
->private = NULL
;
5411 mddev
->to_remove
= &raid5_attrs_group
;
5415 static void status(struct seq_file
*seq
, struct mddev
*mddev
)
5417 struct r5conf
*conf
= mddev
->private;
5420 seq_printf(seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
,
5421 mddev
->chunk_sectors
/ 2, mddev
->layout
);
5422 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
5423 for (i
= 0; i
< conf
->raid_disks
; i
++)
5424 seq_printf (seq
, "%s",
5425 conf
->disks
[i
].rdev
&&
5426 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
5427 seq_printf (seq
, "]");
5430 static void print_raid5_conf (struct r5conf
*conf
)
5433 struct disk_info
*tmp
;
5435 printk(KERN_DEBUG
"RAID conf printout:\n");
5437 printk("(conf==NULL)\n");
5440 printk(KERN_DEBUG
" --- level:%d rd:%d wd:%d\n", conf
->level
,
5442 conf
->raid_disks
- conf
->mddev
->degraded
);
5444 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5445 char b
[BDEVNAME_SIZE
];
5446 tmp
= conf
->disks
+ i
;
5448 printk(KERN_DEBUG
" disk %d, o:%d, dev:%s\n",
5449 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
5450 bdevname(tmp
->rdev
->bdev
, b
));
5454 static int raid5_spare_active(struct mddev
*mddev
)
5457 struct r5conf
*conf
= mddev
->private;
5458 struct disk_info
*tmp
;
5460 unsigned long flags
;
5462 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5463 tmp
= conf
->disks
+ i
;
5464 if (tmp
->replacement
5465 && tmp
->replacement
->recovery_offset
== MaxSector
5466 && !test_bit(Faulty
, &tmp
->replacement
->flags
)
5467 && !test_and_set_bit(In_sync
, &tmp
->replacement
->flags
)) {
5468 /* Replacement has just become active. */
5470 || !test_and_clear_bit(In_sync
, &tmp
->rdev
->flags
))
5473 /* Replaced device not technically faulty,
5474 * but we need to be sure it gets removed
5475 * and never re-added.
5477 set_bit(Faulty
, &tmp
->rdev
->flags
);
5478 sysfs_notify_dirent_safe(
5479 tmp
->rdev
->sysfs_state
);
5481 sysfs_notify_dirent_safe(tmp
->replacement
->sysfs_state
);
5482 } else if (tmp
->rdev
5483 && tmp
->rdev
->recovery_offset
== MaxSector
5484 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
5485 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
5487 sysfs_notify_dirent_safe(tmp
->rdev
->sysfs_state
);
5490 spin_lock_irqsave(&conf
->device_lock
, flags
);
5491 mddev
->degraded
= calc_degraded(conf
);
5492 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5493 print_raid5_conf(conf
);
5497 static int raid5_remove_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
5499 struct r5conf
*conf
= mddev
->private;
5501 int number
= rdev
->raid_disk
;
5502 struct md_rdev
**rdevp
;
5503 struct disk_info
*p
= conf
->disks
+ number
;
5505 print_raid5_conf(conf
);
5506 if (rdev
== p
->rdev
)
5508 else if (rdev
== p
->replacement
)
5509 rdevp
= &p
->replacement
;
5513 if (number
>= conf
->raid_disks
&&
5514 conf
->reshape_progress
== MaxSector
)
5515 clear_bit(In_sync
, &rdev
->flags
);
5517 if (test_bit(In_sync
, &rdev
->flags
) ||
5518 atomic_read(&rdev
->nr_pending
)) {
5522 /* Only remove non-faulty devices if recovery
5525 if (!test_bit(Faulty
, &rdev
->flags
) &&
5526 mddev
->recovery_disabled
!= conf
->recovery_disabled
&&
5527 !has_failed(conf
) &&
5528 (!p
->replacement
|| p
->replacement
== rdev
) &&
5529 number
< conf
->raid_disks
) {
5535 if (atomic_read(&rdev
->nr_pending
)) {
5536 /* lost the race, try later */
5539 } else if (p
->replacement
) {
5540 /* We must have just cleared 'rdev' */
5541 p
->rdev
= p
->replacement
;
5542 clear_bit(Replacement
, &p
->replacement
->flags
);
5543 smp_mb(); /* Make sure other CPUs may see both as identical
5544 * but will never see neither - if they are careful
5546 p
->replacement
= NULL
;
5547 clear_bit(WantReplacement
, &rdev
->flags
);
5549 /* We might have just removed the Replacement as faulty-
5550 * clear the bit just in case
5552 clear_bit(WantReplacement
, &rdev
->flags
);
5555 print_raid5_conf(conf
);
5559 static int raid5_add_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
5561 struct r5conf
*conf
= mddev
->private;
5564 struct disk_info
*p
;
5566 int last
= conf
->raid_disks
- 1;
5568 if (mddev
->recovery_disabled
== conf
->recovery_disabled
)
5571 if (rdev
->saved_raid_disk
< 0 && has_failed(conf
))
5572 /* no point adding a device */
5575 if (rdev
->raid_disk
>= 0)
5576 first
= last
= rdev
->raid_disk
;
5579 * find the disk ... but prefer rdev->saved_raid_disk
5582 if (rdev
->saved_raid_disk
>= 0 &&
5583 rdev
->saved_raid_disk
>= first
&&
5584 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
5585 first
= rdev
->saved_raid_disk
;
5587 for (disk
= first
; disk
<= last
; disk
++) {
5588 p
= conf
->disks
+ disk
;
5589 if (p
->rdev
== NULL
) {
5590 clear_bit(In_sync
, &rdev
->flags
);
5591 rdev
->raid_disk
= disk
;
5593 if (rdev
->saved_raid_disk
!= disk
)
5595 rcu_assign_pointer(p
->rdev
, rdev
);
5599 for (disk
= first
; disk
<= last
; disk
++) {
5600 p
= conf
->disks
+ disk
;
5601 if (test_bit(WantReplacement
, &p
->rdev
->flags
) &&
5602 p
->replacement
== NULL
) {
5603 clear_bit(In_sync
, &rdev
->flags
);
5604 set_bit(Replacement
, &rdev
->flags
);
5605 rdev
->raid_disk
= disk
;
5608 rcu_assign_pointer(p
->replacement
, rdev
);
5613 print_raid5_conf(conf
);
5617 static int raid5_resize(struct mddev
*mddev
, sector_t sectors
)
5619 /* no resync is happening, and there is enough space
5620 * on all devices, so we can resize.
5621 * We need to make sure resync covers any new space.
5622 * If the array is shrinking we should possibly wait until
5623 * any io in the removed space completes, but it hardly seems
5627 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
5628 newsize
= raid5_size(mddev
, sectors
, mddev
->raid_disks
);
5629 if (mddev
->external_size
&&
5630 mddev
->array_sectors
> newsize
)
5632 if (mddev
->bitmap
) {
5633 int ret
= bitmap_resize(mddev
->bitmap
, sectors
, 0, 0);
5637 md_set_array_sectors(mddev
, newsize
);
5638 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5639 revalidate_disk(mddev
->gendisk
);
5640 if (sectors
> mddev
->dev_sectors
&&
5641 mddev
->recovery_cp
> mddev
->dev_sectors
) {
5642 mddev
->recovery_cp
= mddev
->dev_sectors
;
5643 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5645 mddev
->dev_sectors
= sectors
;
5646 mddev
->resync_max_sectors
= sectors
;
5650 static int check_stripe_cache(struct mddev
*mddev
)
5652 /* Can only proceed if there are plenty of stripe_heads.
5653 * We need a minimum of one full stripe,, and for sensible progress
5654 * it is best to have about 4 times that.
5655 * If we require 4 times, then the default 256 4K stripe_heads will
5656 * allow for chunk sizes up to 256K, which is probably OK.
5657 * If the chunk size is greater, user-space should request more
5658 * stripe_heads first.
5660 struct r5conf
*conf
= mddev
->private;
5661 if (((mddev
->chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5662 > conf
->max_nr_stripes
||
5663 ((mddev
->new_chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5664 > conf
->max_nr_stripes
) {
5665 printk(KERN_WARNING
"md/raid:%s: reshape: not enough stripes. Needed %lu\n",
5667 ((max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) << 9)
5674 static int check_reshape(struct mddev
*mddev
)
5676 struct r5conf
*conf
= mddev
->private;
5678 if (mddev
->delta_disks
== 0 &&
5679 mddev
->new_layout
== mddev
->layout
&&
5680 mddev
->new_chunk_sectors
== mddev
->chunk_sectors
)
5681 return 0; /* nothing to do */
5682 if (has_failed(conf
))
5684 if (mddev
->delta_disks
< 0) {
5685 /* We might be able to shrink, but the devices must
5686 * be made bigger first.
5687 * For raid6, 4 is the minimum size.
5688 * Otherwise 2 is the minimum
5691 if (mddev
->level
== 6)
5693 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
5697 if (!check_stripe_cache(mddev
))
5700 return resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
5703 static int raid5_start_reshape(struct mddev
*mddev
)
5705 struct r5conf
*conf
= mddev
->private;
5706 struct md_rdev
*rdev
;
5708 unsigned long flags
;
5710 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
5713 if (!check_stripe_cache(mddev
))
5716 if (has_failed(conf
))
5719 rdev_for_each(rdev
, mddev
) {
5720 if (!test_bit(In_sync
, &rdev
->flags
)
5721 && !test_bit(Faulty
, &rdev
->flags
))
5725 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
5726 /* Not enough devices even to make a degraded array
5731 /* Refuse to reduce size of the array. Any reductions in
5732 * array size must be through explicit setting of array_size
5735 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
5736 < mddev
->array_sectors
) {
5737 printk(KERN_ERR
"md/raid:%s: array size must be reduced "
5738 "before number of disks\n", mdname(mddev
));
5742 atomic_set(&conf
->reshape_stripes
, 0);
5743 spin_lock_irq(&conf
->device_lock
);
5744 conf
->previous_raid_disks
= conf
->raid_disks
;
5745 conf
->raid_disks
+= mddev
->delta_disks
;
5746 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
5747 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
5748 conf
->prev_algo
= conf
->algorithm
;
5749 conf
->algorithm
= mddev
->new_layout
;
5751 /* Code that selects data_offset needs to see the generation update
5752 * if reshape_progress has been set - so a memory barrier needed.
5755 if (mddev
->reshape_backwards
)
5756 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
5758 conf
->reshape_progress
= 0;
5759 conf
->reshape_safe
= conf
->reshape_progress
;
5760 spin_unlock_irq(&conf
->device_lock
);
5762 /* Add some new drives, as many as will fit.
5763 * We know there are enough to make the newly sized array work.
5764 * Don't add devices if we are reducing the number of
5765 * devices in the array. This is because it is not possible
5766 * to correctly record the "partially reconstructed" state of
5767 * such devices during the reshape and confusion could result.
5769 if (mddev
->delta_disks
>= 0) {
5770 rdev_for_each(rdev
, mddev
)
5771 if (rdev
->raid_disk
< 0 &&
5772 !test_bit(Faulty
, &rdev
->flags
)) {
5773 if (raid5_add_disk(mddev
, rdev
) == 0) {
5775 >= conf
->previous_raid_disks
)
5776 set_bit(In_sync
, &rdev
->flags
);
5778 rdev
->recovery_offset
= 0;
5780 if (sysfs_link_rdev(mddev
, rdev
))
5781 /* Failure here is OK */;
5783 } else if (rdev
->raid_disk
>= conf
->previous_raid_disks
5784 && !test_bit(Faulty
, &rdev
->flags
)) {
5785 /* This is a spare that was manually added */
5786 set_bit(In_sync
, &rdev
->flags
);
5789 /* When a reshape changes the number of devices,
5790 * ->degraded is measured against the larger of the
5791 * pre and post number of devices.
5793 spin_lock_irqsave(&conf
->device_lock
, flags
);
5794 mddev
->degraded
= calc_degraded(conf
);
5795 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5797 mddev
->raid_disks
= conf
->raid_disks
;
5798 mddev
->reshape_position
= conf
->reshape_progress
;
5799 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5801 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5802 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5803 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5804 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5805 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5807 if (!mddev
->sync_thread
) {
5808 mddev
->recovery
= 0;
5809 spin_lock_irq(&conf
->device_lock
);
5810 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
5811 rdev_for_each(rdev
, mddev
)
5812 rdev
->new_data_offset
= rdev
->data_offset
;
5814 conf
->reshape_progress
= MaxSector
;
5815 mddev
->reshape_position
= MaxSector
;
5816 spin_unlock_irq(&conf
->device_lock
);
5819 conf
->reshape_checkpoint
= jiffies
;
5820 md_wakeup_thread(mddev
->sync_thread
);
5821 md_new_event(mddev
);
5825 /* This is called from the reshape thread and should make any
5826 * changes needed in 'conf'
5828 static void end_reshape(struct r5conf
*conf
)
5831 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
5832 struct md_rdev
*rdev
;
5834 spin_lock_irq(&conf
->device_lock
);
5835 conf
->previous_raid_disks
= conf
->raid_disks
;
5836 rdev_for_each(rdev
, conf
->mddev
)
5837 rdev
->data_offset
= rdev
->new_data_offset
;
5839 conf
->reshape_progress
= MaxSector
;
5840 spin_unlock_irq(&conf
->device_lock
);
5841 wake_up(&conf
->wait_for_overlap
);
5843 /* read-ahead size must cover two whole stripes, which is
5844 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5846 if (conf
->mddev
->queue
) {
5847 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
5848 int stripe
= data_disks
* ((conf
->chunk_sectors
<< 9)
5850 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5851 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5856 /* This is called from the raid5d thread with mddev_lock held.
5857 * It makes config changes to the device.
5859 static void raid5_finish_reshape(struct mddev
*mddev
)
5861 struct r5conf
*conf
= mddev
->private;
5863 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5865 if (mddev
->delta_disks
> 0) {
5866 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5867 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5868 revalidate_disk(mddev
->gendisk
);
5871 spin_lock_irq(&conf
->device_lock
);
5872 mddev
->degraded
= calc_degraded(conf
);
5873 spin_unlock_irq(&conf
->device_lock
);
5874 for (d
= conf
->raid_disks
;
5875 d
< conf
->raid_disks
- mddev
->delta_disks
;
5877 struct md_rdev
*rdev
= conf
->disks
[d
].rdev
;
5879 clear_bit(In_sync
, &rdev
->flags
);
5880 rdev
= conf
->disks
[d
].replacement
;
5882 clear_bit(In_sync
, &rdev
->flags
);
5885 mddev
->layout
= conf
->algorithm
;
5886 mddev
->chunk_sectors
= conf
->chunk_sectors
;
5887 mddev
->reshape_position
= MaxSector
;
5888 mddev
->delta_disks
= 0;
5889 mddev
->reshape_backwards
= 0;
5893 static void raid5_quiesce(struct mddev
*mddev
, int state
)
5895 struct r5conf
*conf
= mddev
->private;
5898 case 2: /* resume for a suspend */
5899 wake_up(&conf
->wait_for_overlap
);
5902 case 1: /* stop all writes */
5903 spin_lock_irq(&conf
->device_lock
);
5904 /* '2' tells resync/reshape to pause so that all
5905 * active stripes can drain
5908 wait_event_lock_irq(conf
->wait_for_stripe
,
5909 atomic_read(&conf
->active_stripes
) == 0 &&
5910 atomic_read(&conf
->active_aligned_reads
) == 0,
5911 conf
->device_lock
, /* nothing */);
5913 spin_unlock_irq(&conf
->device_lock
);
5914 /* allow reshape to continue */
5915 wake_up(&conf
->wait_for_overlap
);
5918 case 0: /* re-enable writes */
5919 spin_lock_irq(&conf
->device_lock
);
5921 wake_up(&conf
->wait_for_stripe
);
5922 wake_up(&conf
->wait_for_overlap
);
5923 spin_unlock_irq(&conf
->device_lock
);
5929 static void *raid45_takeover_raid0(struct mddev
*mddev
, int level
)
5931 struct r0conf
*raid0_conf
= mddev
->private;
5934 /* for raid0 takeover only one zone is supported */
5935 if (raid0_conf
->nr_strip_zones
> 1) {
5936 printk(KERN_ERR
"md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5938 return ERR_PTR(-EINVAL
);
5941 sectors
= raid0_conf
->strip_zone
[0].zone_end
;
5942 sector_div(sectors
, raid0_conf
->strip_zone
[0].nb_dev
);
5943 mddev
->dev_sectors
= sectors
;
5944 mddev
->new_level
= level
;
5945 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5946 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
5947 mddev
->raid_disks
+= 1;
5948 mddev
->delta_disks
= 1;
5949 /* make sure it will be not marked as dirty */
5950 mddev
->recovery_cp
= MaxSector
;
5952 return setup_conf(mddev
);
5956 static void *raid5_takeover_raid1(struct mddev
*mddev
)
5960 if (mddev
->raid_disks
!= 2 ||
5961 mddev
->degraded
> 1)
5962 return ERR_PTR(-EINVAL
);
5964 /* Should check if there are write-behind devices? */
5966 chunksect
= 64*2; /* 64K by default */
5968 /* The array must be an exact multiple of chunksize */
5969 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
5972 if ((chunksect
<<9) < STRIPE_SIZE
)
5973 /* array size does not allow a suitable chunk size */
5974 return ERR_PTR(-EINVAL
);
5976 mddev
->new_level
= 5;
5977 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5978 mddev
->new_chunk_sectors
= chunksect
;
5980 return setup_conf(mddev
);
5983 static void *raid5_takeover_raid6(struct mddev
*mddev
)
5987 switch (mddev
->layout
) {
5988 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5989 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
5991 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5992 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
5994 case ALGORITHM_LEFT_SYMMETRIC_6
:
5995 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5997 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5998 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
6000 case ALGORITHM_PARITY_0_6
:
6001 new_layout
= ALGORITHM_PARITY_0
;
6003 case ALGORITHM_PARITY_N
:
6004 new_layout
= ALGORITHM_PARITY_N
;
6007 return ERR_PTR(-EINVAL
);
6009 mddev
->new_level
= 5;
6010 mddev
->new_layout
= new_layout
;
6011 mddev
->delta_disks
= -1;
6012 mddev
->raid_disks
-= 1;
6013 return setup_conf(mddev
);
6017 static int raid5_check_reshape(struct mddev
*mddev
)
6019 /* For a 2-drive array, the layout and chunk size can be changed
6020 * immediately as not restriping is needed.
6021 * For larger arrays we record the new value - after validation
6022 * to be used by a reshape pass.
6024 struct r5conf
*conf
= mddev
->private;
6025 int new_chunk
= mddev
->new_chunk_sectors
;
6027 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid5(mddev
->new_layout
))
6029 if (new_chunk
> 0) {
6030 if (!is_power_of_2(new_chunk
))
6032 if (new_chunk
< (PAGE_SIZE
>>9))
6034 if (mddev
->array_sectors
& (new_chunk
-1))
6035 /* not factor of array size */
6039 /* They look valid */
6041 if (mddev
->raid_disks
== 2) {
6042 /* can make the change immediately */
6043 if (mddev
->new_layout
>= 0) {
6044 conf
->algorithm
= mddev
->new_layout
;
6045 mddev
->layout
= mddev
->new_layout
;
6047 if (new_chunk
> 0) {
6048 conf
->chunk_sectors
= new_chunk
;
6049 mddev
->chunk_sectors
= new_chunk
;
6051 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
6052 md_wakeup_thread(mddev
->thread
);
6054 return check_reshape(mddev
);
6057 static int raid6_check_reshape(struct mddev
*mddev
)
6059 int new_chunk
= mddev
->new_chunk_sectors
;
6061 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid6(mddev
->new_layout
))
6063 if (new_chunk
> 0) {
6064 if (!is_power_of_2(new_chunk
))
6066 if (new_chunk
< (PAGE_SIZE
>> 9))
6068 if (mddev
->array_sectors
& (new_chunk
-1))
6069 /* not factor of array size */
6073 /* They look valid */
6074 return check_reshape(mddev
);
6077 static void *raid5_takeover(struct mddev
*mddev
)
6079 /* raid5 can take over:
6080 * raid0 - if there is only one strip zone - make it a raid4 layout
6081 * raid1 - if there are two drives. We need to know the chunk size
6082 * raid4 - trivial - just use a raid4 layout.
6083 * raid6 - Providing it is a *_6 layout
6085 if (mddev
->level
== 0)
6086 return raid45_takeover_raid0(mddev
, 5);
6087 if (mddev
->level
== 1)
6088 return raid5_takeover_raid1(mddev
);
6089 if (mddev
->level
== 4) {
6090 mddev
->new_layout
= ALGORITHM_PARITY_N
;
6091 mddev
->new_level
= 5;
6092 return setup_conf(mddev
);
6094 if (mddev
->level
== 6)
6095 return raid5_takeover_raid6(mddev
);
6097 return ERR_PTR(-EINVAL
);
6100 static void *raid4_takeover(struct mddev
*mddev
)
6102 /* raid4 can take over:
6103 * raid0 - if there is only one strip zone
6104 * raid5 - if layout is right
6106 if (mddev
->level
== 0)
6107 return raid45_takeover_raid0(mddev
, 4);
6108 if (mddev
->level
== 5 &&
6109 mddev
->layout
== ALGORITHM_PARITY_N
) {
6110 mddev
->new_layout
= 0;
6111 mddev
->new_level
= 4;
6112 return setup_conf(mddev
);
6114 return ERR_PTR(-EINVAL
);
6117 static struct md_personality raid5_personality
;
6119 static void *raid6_takeover(struct mddev
*mddev
)
6121 /* Currently can only take over a raid5. We map the
6122 * personality to an equivalent raid6 personality
6123 * with the Q block at the end.
6127 if (mddev
->pers
!= &raid5_personality
)
6128 return ERR_PTR(-EINVAL
);
6129 if (mddev
->degraded
> 1)
6130 return ERR_PTR(-EINVAL
);
6131 if (mddev
->raid_disks
> 253)
6132 return ERR_PTR(-EINVAL
);
6133 if (mddev
->raid_disks
< 3)
6134 return ERR_PTR(-EINVAL
);
6136 switch (mddev
->layout
) {
6137 case ALGORITHM_LEFT_ASYMMETRIC
:
6138 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
6140 case ALGORITHM_RIGHT_ASYMMETRIC
:
6141 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
6143 case ALGORITHM_LEFT_SYMMETRIC
:
6144 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
6146 case ALGORITHM_RIGHT_SYMMETRIC
:
6147 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
6149 case ALGORITHM_PARITY_0
:
6150 new_layout
= ALGORITHM_PARITY_0_6
;
6152 case ALGORITHM_PARITY_N
:
6153 new_layout
= ALGORITHM_PARITY_N
;
6156 return ERR_PTR(-EINVAL
);
6158 mddev
->new_level
= 6;
6159 mddev
->new_layout
= new_layout
;
6160 mddev
->delta_disks
= 1;
6161 mddev
->raid_disks
+= 1;
6162 return setup_conf(mddev
);
6166 static struct md_personality raid6_personality
=
6170 .owner
= THIS_MODULE
,
6171 .make_request
= make_request
,
6175 .error_handler
= error
,
6176 .hot_add_disk
= raid5_add_disk
,
6177 .hot_remove_disk
= raid5_remove_disk
,
6178 .spare_active
= raid5_spare_active
,
6179 .sync_request
= sync_request
,
6180 .resize
= raid5_resize
,
6182 .check_reshape
= raid6_check_reshape
,
6183 .start_reshape
= raid5_start_reshape
,
6184 .finish_reshape
= raid5_finish_reshape
,
6185 .quiesce
= raid5_quiesce
,
6186 .takeover
= raid6_takeover
,
6188 static struct md_personality raid5_personality
=
6192 .owner
= THIS_MODULE
,
6193 .make_request
= make_request
,
6197 .error_handler
= error
,
6198 .hot_add_disk
= raid5_add_disk
,
6199 .hot_remove_disk
= raid5_remove_disk
,
6200 .spare_active
= raid5_spare_active
,
6201 .sync_request
= sync_request
,
6202 .resize
= raid5_resize
,
6204 .check_reshape
= raid5_check_reshape
,
6205 .start_reshape
= raid5_start_reshape
,
6206 .finish_reshape
= raid5_finish_reshape
,
6207 .quiesce
= raid5_quiesce
,
6208 .takeover
= raid5_takeover
,
6211 static struct md_personality raid4_personality
=
6215 .owner
= THIS_MODULE
,
6216 .make_request
= make_request
,
6220 .error_handler
= error
,
6221 .hot_add_disk
= raid5_add_disk
,
6222 .hot_remove_disk
= raid5_remove_disk
,
6223 .spare_active
= raid5_spare_active
,
6224 .sync_request
= sync_request
,
6225 .resize
= raid5_resize
,
6227 .check_reshape
= raid5_check_reshape
,
6228 .start_reshape
= raid5_start_reshape
,
6229 .finish_reshape
= raid5_finish_reshape
,
6230 .quiesce
= raid5_quiesce
,
6231 .takeover
= raid4_takeover
,
6234 static int __init
raid5_init(void)
6236 register_md_personality(&raid6_personality
);
6237 register_md_personality(&raid5_personality
);
6238 register_md_personality(&raid4_personality
);
6242 static void raid5_exit(void)
6244 unregister_md_personality(&raid6_personality
);
6245 unregister_md_personality(&raid5_personality
);
6246 unregister_md_personality(&raid4_personality
);
6249 module_init(raid5_init
);
6250 module_exit(raid5_exit
);
6251 MODULE_LICENSE("GPL");
6252 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
6253 MODULE_ALIAS("md-personality-4"); /* RAID5 */
6254 MODULE_ALIAS("md-raid5");
6255 MODULE_ALIAS("md-raid4");
6256 MODULE_ALIAS("md-level-5");
6257 MODULE_ALIAS("md-level-4");
6258 MODULE_ALIAS("md-personality-8"); /* RAID6 */
6259 MODULE_ALIAS("md-raid6");
6260 MODULE_ALIAS("md-level-6");
6262 /* This used to be two separate modules, they were: */
6263 MODULE_ALIAS("raid5");
6264 MODULE_ALIAS("raid6");