2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/async_tx.h>
49 #include <linux/seq_file.h>
59 #define NR_STRIPES 256
60 #define STRIPE_SIZE PAGE_SIZE
61 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
62 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
63 #define IO_THRESHOLD 1
64 #define BYPASS_THRESHOLD 1
65 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
66 #define HASH_MASK (NR_HASH - 1)
68 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
70 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
71 * order without overlap. There may be several bio's per stripe+device, and
72 * a bio could span several devices.
73 * When walking this list for a particular stripe+device, we must never proceed
74 * beyond a bio that extends past this device, as the next bio might no longer
76 * This macro is used to determine the 'next' bio in the list, given the sector
77 * of the current stripe+device
79 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
81 * The following can be used to debug the driver
83 #define RAID5_PARANOIA 1
84 #if RAID5_PARANOIA && defined(CONFIG_SMP)
85 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
87 # define CHECK_DEVLOCK()
95 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
97 #if !RAID6_USE_EMPTY_ZERO_PAGE
98 /* In .bss so it's zeroed */
99 const char raid6_empty_zero_page
[PAGE_SIZE
] __attribute__((aligned(256)));
103 * We maintain a biased count of active stripes in the bottom 16 bits of
104 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
106 static inline int raid5_bi_phys_segments(struct bio
*bio
)
108 return bio
->bi_phys_segments
& 0xffff;
111 static inline int raid5_bi_hw_segments(struct bio
*bio
)
113 return (bio
->bi_phys_segments
>> 16) & 0xffff;
116 static inline int raid5_dec_bi_phys_segments(struct bio
*bio
)
118 --bio
->bi_phys_segments
;
119 return raid5_bi_phys_segments(bio
);
122 static inline int raid5_dec_bi_hw_segments(struct bio
*bio
)
124 unsigned short val
= raid5_bi_hw_segments(bio
);
127 bio
->bi_phys_segments
= (val
<< 16) | raid5_bi_phys_segments(bio
);
131 static inline void raid5_set_bi_hw_segments(struct bio
*bio
, unsigned int cnt
)
133 bio
->bi_phys_segments
= raid5_bi_phys_segments(bio
) || (cnt
<< 16);
136 /* Find first data disk in a raid6 stripe */
137 static inline int raid6_d0(struct stripe_head
*sh
)
139 if (sh
->qd_idx
== sh
->disks
- 1)
142 return sh
->qd_idx
+ 1;
144 static inline int raid6_next_disk(int disk
, int raid_disks
)
147 return (disk
< raid_disks
) ? disk
: 0;
150 /* When walking through the disks in a raid5, starting at raid6_d0,
151 * We need to map each disk to a 'slot', where the data disks are slot
152 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
153 * is raid_disks-1. This help does that mapping.
155 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
, int *count
)
158 if (idx
== sh
->pd_idx
)
159 return sh
->disks
- 2;
160 if (idx
== sh
->qd_idx
)
161 return sh
->disks
- 1;
166 static void return_io(struct bio
*return_bi
)
168 struct bio
*bi
= return_bi
;
171 return_bi
= bi
->bi_next
;
179 static void print_raid5_conf (raid5_conf_t
*conf
);
181 static int stripe_operations_active(struct stripe_head
*sh
)
183 return sh
->check_state
|| sh
->reconstruct_state
||
184 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
185 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
188 static void __release_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
)
190 if (atomic_dec_and_test(&sh
->count
)) {
191 BUG_ON(!list_empty(&sh
->lru
));
192 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
193 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
194 if (test_bit(STRIPE_DELAYED
, &sh
->state
)) {
195 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
196 blk_plug_device(conf
->mddev
->queue
);
197 } else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
198 sh
->bm_seq
- conf
->seq_write
> 0) {
199 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
200 blk_plug_device(conf
->mddev
->queue
);
202 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
203 list_add_tail(&sh
->lru
, &conf
->handle_list
);
205 md_wakeup_thread(conf
->mddev
->thread
);
207 BUG_ON(stripe_operations_active(sh
));
208 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
209 atomic_dec(&conf
->preread_active_stripes
);
210 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
)
211 md_wakeup_thread(conf
->mddev
->thread
);
213 atomic_dec(&conf
->active_stripes
);
214 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
215 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
216 wake_up(&conf
->wait_for_stripe
);
217 if (conf
->retry_read_aligned
)
218 md_wakeup_thread(conf
->mddev
->thread
);
224 static void release_stripe(struct stripe_head
*sh
)
226 raid5_conf_t
*conf
= sh
->raid_conf
;
229 spin_lock_irqsave(&conf
->device_lock
, flags
);
230 __release_stripe(conf
, sh
);
231 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
234 static inline void remove_hash(struct stripe_head
*sh
)
236 pr_debug("remove_hash(), stripe %llu\n",
237 (unsigned long long)sh
->sector
);
239 hlist_del_init(&sh
->hash
);
242 static inline void insert_hash(raid5_conf_t
*conf
, struct stripe_head
*sh
)
244 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
246 pr_debug("insert_hash(), stripe %llu\n",
247 (unsigned long long)sh
->sector
);
250 hlist_add_head(&sh
->hash
, hp
);
254 /* find an idle stripe, make sure it is unhashed, and return it. */
255 static struct stripe_head
*get_free_stripe(raid5_conf_t
*conf
)
257 struct stripe_head
*sh
= NULL
;
258 struct list_head
*first
;
261 if (list_empty(&conf
->inactive_list
))
263 first
= conf
->inactive_list
.next
;
264 sh
= list_entry(first
, struct stripe_head
, lru
);
265 list_del_init(first
);
267 atomic_inc(&conf
->active_stripes
);
272 static void shrink_buffers(struct stripe_head
*sh
, int num
)
277 for (i
=0; i
<num
; i
++) {
281 sh
->dev
[i
].page
= NULL
;
286 static int grow_buffers(struct stripe_head
*sh
, int num
)
290 for (i
=0; i
<num
; i
++) {
293 if (!(page
= alloc_page(GFP_KERNEL
))) {
296 sh
->dev
[i
].page
= page
;
301 static void raid5_build_block(struct stripe_head
*sh
, int i
);
302 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
303 struct stripe_head
*sh
);
305 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
307 raid5_conf_t
*conf
= sh
->raid_conf
;
310 BUG_ON(atomic_read(&sh
->count
) != 0);
311 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
312 BUG_ON(stripe_operations_active(sh
));
315 pr_debug("init_stripe called, stripe %llu\n",
316 (unsigned long long)sh
->sector
);
320 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
322 stripe_set_idx(sector
, conf
, previous
, sh
);
326 for (i
= sh
->disks
; i
--; ) {
327 struct r5dev
*dev
= &sh
->dev
[i
];
329 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
330 test_bit(R5_LOCKED
, &dev
->flags
)) {
331 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
332 (unsigned long long)sh
->sector
, i
, dev
->toread
,
333 dev
->read
, dev
->towrite
, dev
->written
,
334 test_bit(R5_LOCKED
, &dev
->flags
));
338 raid5_build_block(sh
, i
);
340 insert_hash(conf
, sh
);
343 static struct stripe_head
*__find_stripe(raid5_conf_t
*conf
, sector_t sector
, int disks
)
345 struct stripe_head
*sh
;
346 struct hlist_node
*hn
;
349 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
350 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
351 if (sh
->sector
== sector
&& sh
->disks
== disks
)
353 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
357 static void unplug_slaves(mddev_t
*mddev
);
358 static void raid5_unplug_device(struct request_queue
*q
);
360 static struct stripe_head
*
361 get_active_stripe(raid5_conf_t
*conf
, sector_t sector
,
362 int previous
, int noblock
)
364 struct stripe_head
*sh
;
365 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
367 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
369 spin_lock_irq(&conf
->device_lock
);
372 wait_event_lock_irq(conf
->wait_for_stripe
,
374 conf
->device_lock
, /* nothing */);
375 sh
= __find_stripe(conf
, sector
, disks
);
377 if (!conf
->inactive_blocked
)
378 sh
= get_free_stripe(conf
);
379 if (noblock
&& sh
== NULL
)
382 conf
->inactive_blocked
= 1;
383 wait_event_lock_irq(conf
->wait_for_stripe
,
384 !list_empty(&conf
->inactive_list
) &&
385 (atomic_read(&conf
->active_stripes
)
386 < (conf
->max_nr_stripes
*3/4)
387 || !conf
->inactive_blocked
),
389 raid5_unplug_device(conf
->mddev
->queue
)
391 conf
->inactive_blocked
= 0;
393 init_stripe(sh
, sector
, previous
);
395 if (atomic_read(&sh
->count
)) {
396 BUG_ON(!list_empty(&sh
->lru
));
398 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
399 atomic_inc(&conf
->active_stripes
);
400 if (list_empty(&sh
->lru
) &&
401 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
403 list_del_init(&sh
->lru
);
406 } while (sh
== NULL
);
409 atomic_inc(&sh
->count
);
411 spin_unlock_irq(&conf
->device_lock
);
416 raid5_end_read_request(struct bio
*bi
, int error
);
418 raid5_end_write_request(struct bio
*bi
, int error
);
420 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
422 raid5_conf_t
*conf
= sh
->raid_conf
;
423 int i
, disks
= sh
->disks
;
427 for (i
= disks
; i
--; ) {
431 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
433 else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
438 bi
= &sh
->dev
[i
].req
;
442 bi
->bi_end_io
= raid5_end_write_request
;
444 bi
->bi_end_io
= raid5_end_read_request
;
447 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
448 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
451 atomic_inc(&rdev
->nr_pending
);
455 if (s
->syncing
|| s
->expanding
|| s
->expanded
)
456 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
458 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
460 bi
->bi_bdev
= rdev
->bdev
;
461 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
462 __func__
, (unsigned long long)sh
->sector
,
464 atomic_inc(&sh
->count
);
465 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
466 bi
->bi_flags
= 1 << BIO_UPTODATE
;
470 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
471 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
472 bi
->bi_io_vec
[0].bv_offset
= 0;
473 bi
->bi_size
= STRIPE_SIZE
;
476 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
477 atomic_add(STRIPE_SECTORS
,
478 &rdev
->corrected_errors
);
479 generic_make_request(bi
);
482 set_bit(STRIPE_DEGRADED
, &sh
->state
);
483 pr_debug("skip op %ld on disc %d for sector %llu\n",
484 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
485 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
486 set_bit(STRIPE_HANDLE
, &sh
->state
);
491 static struct dma_async_tx_descriptor
*
492 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
493 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
496 struct page
*bio_page
;
500 if (bio
->bi_sector
>= sector
)
501 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
503 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
504 bio_for_each_segment(bvl
, bio
, i
) {
505 int len
= bio_iovec_idx(bio
, i
)->bv_len
;
509 if (page_offset
< 0) {
510 b_offset
= -page_offset
;
511 page_offset
+= b_offset
;
515 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
516 clen
= STRIPE_SIZE
- page_offset
;
521 b_offset
+= bio_iovec_idx(bio
, i
)->bv_offset
;
522 bio_page
= bio_iovec_idx(bio
, i
)->bv_page
;
524 tx
= async_memcpy(page
, bio_page
, page_offset
,
529 tx
= async_memcpy(bio_page
, page
, b_offset
,
534 if (clen
< len
) /* hit end of page */
542 static void ops_complete_biofill(void *stripe_head_ref
)
544 struct stripe_head
*sh
= stripe_head_ref
;
545 struct bio
*return_bi
= NULL
;
546 raid5_conf_t
*conf
= sh
->raid_conf
;
549 pr_debug("%s: stripe %llu\n", __func__
,
550 (unsigned long long)sh
->sector
);
552 /* clear completed biofills */
553 spin_lock_irq(&conf
->device_lock
);
554 for (i
= sh
->disks
; i
--; ) {
555 struct r5dev
*dev
= &sh
->dev
[i
];
557 /* acknowledge completion of a biofill operation */
558 /* and check if we need to reply to a read request,
559 * new R5_Wantfill requests are held off until
560 * !STRIPE_BIOFILL_RUN
562 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
563 struct bio
*rbi
, *rbi2
;
568 while (rbi
&& rbi
->bi_sector
<
569 dev
->sector
+ STRIPE_SECTORS
) {
570 rbi2
= r5_next_bio(rbi
, dev
->sector
);
571 if (!raid5_dec_bi_phys_segments(rbi
)) {
572 rbi
->bi_next
= return_bi
;
579 spin_unlock_irq(&conf
->device_lock
);
580 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
582 return_io(return_bi
);
584 set_bit(STRIPE_HANDLE
, &sh
->state
);
588 static void ops_run_biofill(struct stripe_head
*sh
)
590 struct dma_async_tx_descriptor
*tx
= NULL
;
591 raid5_conf_t
*conf
= sh
->raid_conf
;
594 pr_debug("%s: stripe %llu\n", __func__
,
595 (unsigned long long)sh
->sector
);
597 for (i
= sh
->disks
; i
--; ) {
598 struct r5dev
*dev
= &sh
->dev
[i
];
599 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
601 spin_lock_irq(&conf
->device_lock
);
602 dev
->read
= rbi
= dev
->toread
;
604 spin_unlock_irq(&conf
->device_lock
);
605 while (rbi
&& rbi
->bi_sector
<
606 dev
->sector
+ STRIPE_SECTORS
) {
607 tx
= async_copy_data(0, rbi
, dev
->page
,
609 rbi
= r5_next_bio(rbi
, dev
->sector
);
614 atomic_inc(&sh
->count
);
615 async_trigger_callback(ASYNC_TX_DEP_ACK
| ASYNC_TX_ACK
, tx
,
616 ops_complete_biofill
, sh
);
619 static void ops_complete_compute5(void *stripe_head_ref
)
621 struct stripe_head
*sh
= stripe_head_ref
;
622 int target
= sh
->ops
.target
;
623 struct r5dev
*tgt
= &sh
->dev
[target
];
625 pr_debug("%s: stripe %llu\n", __func__
,
626 (unsigned long long)sh
->sector
);
628 set_bit(R5_UPTODATE
, &tgt
->flags
);
629 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
630 clear_bit(R5_Wantcompute
, &tgt
->flags
);
631 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
632 if (sh
->check_state
== check_state_compute_run
)
633 sh
->check_state
= check_state_compute_result
;
634 set_bit(STRIPE_HANDLE
, &sh
->state
);
638 static struct dma_async_tx_descriptor
*ops_run_compute5(struct stripe_head
*sh
)
640 /* kernel stack size limits the total number of disks */
641 int disks
= sh
->disks
;
642 struct page
*xor_srcs
[disks
];
643 int target
= sh
->ops
.target
;
644 struct r5dev
*tgt
= &sh
->dev
[target
];
645 struct page
*xor_dest
= tgt
->page
;
647 struct dma_async_tx_descriptor
*tx
;
650 pr_debug("%s: stripe %llu block: %d\n",
651 __func__
, (unsigned long long)sh
->sector
, target
);
652 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
654 for (i
= disks
; i
--; )
656 xor_srcs
[count
++] = sh
->dev
[i
].page
;
658 atomic_inc(&sh
->count
);
660 if (unlikely(count
== 1))
661 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
,
662 0, NULL
, ops_complete_compute5
, sh
);
664 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
665 ASYNC_TX_XOR_ZERO_DST
, NULL
,
666 ops_complete_compute5
, sh
);
671 static void ops_complete_prexor(void *stripe_head_ref
)
673 struct stripe_head
*sh
= stripe_head_ref
;
675 pr_debug("%s: stripe %llu\n", __func__
,
676 (unsigned long long)sh
->sector
);
679 static struct dma_async_tx_descriptor
*
680 ops_run_prexor(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
682 /* kernel stack size limits the total number of disks */
683 int disks
= sh
->disks
;
684 struct page
*xor_srcs
[disks
];
685 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
687 /* existing parity data subtracted */
688 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
690 pr_debug("%s: stripe %llu\n", __func__
,
691 (unsigned long long)sh
->sector
);
693 for (i
= disks
; i
--; ) {
694 struct r5dev
*dev
= &sh
->dev
[i
];
695 /* Only process blocks that are known to be uptodate */
696 if (test_bit(R5_Wantdrain
, &dev
->flags
))
697 xor_srcs
[count
++] = dev
->page
;
700 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
701 ASYNC_TX_DEP_ACK
| ASYNC_TX_XOR_DROP_DST
, tx
,
702 ops_complete_prexor
, sh
);
707 static struct dma_async_tx_descriptor
*
708 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
710 int disks
= sh
->disks
;
713 pr_debug("%s: stripe %llu\n", __func__
,
714 (unsigned long long)sh
->sector
);
716 for (i
= disks
; i
--; ) {
717 struct r5dev
*dev
= &sh
->dev
[i
];
720 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
723 spin_lock(&sh
->lock
);
724 chosen
= dev
->towrite
;
726 BUG_ON(dev
->written
);
727 wbi
= dev
->written
= chosen
;
728 spin_unlock(&sh
->lock
);
730 while (wbi
&& wbi
->bi_sector
<
731 dev
->sector
+ STRIPE_SECTORS
) {
732 tx
= async_copy_data(1, wbi
, dev
->page
,
734 wbi
= r5_next_bio(wbi
, dev
->sector
);
742 static void ops_complete_postxor(void *stripe_head_ref
)
744 struct stripe_head
*sh
= stripe_head_ref
;
745 int disks
= sh
->disks
, i
, pd_idx
= sh
->pd_idx
;
747 pr_debug("%s: stripe %llu\n", __func__
,
748 (unsigned long long)sh
->sector
);
750 for (i
= disks
; i
--; ) {
751 struct r5dev
*dev
= &sh
->dev
[i
];
752 if (dev
->written
|| i
== pd_idx
)
753 set_bit(R5_UPTODATE
, &dev
->flags
);
756 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
757 sh
->reconstruct_state
= reconstruct_state_drain_result
;
758 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
759 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
761 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
762 sh
->reconstruct_state
= reconstruct_state_result
;
765 set_bit(STRIPE_HANDLE
, &sh
->state
);
770 ops_run_postxor(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
772 /* kernel stack size limits the total number of disks */
773 int disks
= sh
->disks
;
774 struct page
*xor_srcs
[disks
];
776 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
777 struct page
*xor_dest
;
781 pr_debug("%s: stripe %llu\n", __func__
,
782 (unsigned long long)sh
->sector
);
784 /* check if prexor is active which means only process blocks
785 * that are part of a read-modify-write (written)
787 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
789 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
790 for (i
= disks
; i
--; ) {
791 struct r5dev
*dev
= &sh
->dev
[i
];
793 xor_srcs
[count
++] = dev
->page
;
796 xor_dest
= sh
->dev
[pd_idx
].page
;
797 for (i
= disks
; i
--; ) {
798 struct r5dev
*dev
= &sh
->dev
[i
];
800 xor_srcs
[count
++] = dev
->page
;
804 /* 1/ if we prexor'd then the dest is reused as a source
805 * 2/ if we did not prexor then we are redoing the parity
806 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
807 * for the synchronous xor case
809 flags
= ASYNC_TX_DEP_ACK
| ASYNC_TX_ACK
|
810 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
812 atomic_inc(&sh
->count
);
814 if (unlikely(count
== 1)) {
815 flags
&= ~(ASYNC_TX_XOR_DROP_DST
| ASYNC_TX_XOR_ZERO_DST
);
816 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
,
817 flags
, tx
, ops_complete_postxor
, sh
);
819 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
820 flags
, tx
, ops_complete_postxor
, sh
);
823 static void ops_complete_check(void *stripe_head_ref
)
825 struct stripe_head
*sh
= stripe_head_ref
;
827 pr_debug("%s: stripe %llu\n", __func__
,
828 (unsigned long long)sh
->sector
);
830 sh
->check_state
= check_state_check_result
;
831 set_bit(STRIPE_HANDLE
, &sh
->state
);
835 static void ops_run_check(struct stripe_head
*sh
)
837 /* kernel stack size limits the total number of disks */
838 int disks
= sh
->disks
;
839 struct page
*xor_srcs
[disks
];
840 struct dma_async_tx_descriptor
*tx
;
842 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
843 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
845 pr_debug("%s: stripe %llu\n", __func__
,
846 (unsigned long long)sh
->sector
);
848 for (i
= disks
; i
--; ) {
849 struct r5dev
*dev
= &sh
->dev
[i
];
851 xor_srcs
[count
++] = dev
->page
;
854 tx
= async_xor_zero_sum(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
855 &sh
->ops
.zero_sum_result
, 0, NULL
, NULL
, NULL
);
857 atomic_inc(&sh
->count
);
858 tx
= async_trigger_callback(ASYNC_TX_DEP_ACK
| ASYNC_TX_ACK
, tx
,
859 ops_complete_check
, sh
);
862 static void raid5_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
864 int overlap_clear
= 0, i
, disks
= sh
->disks
;
865 struct dma_async_tx_descriptor
*tx
= NULL
;
867 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
872 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
873 tx
= ops_run_compute5(sh
);
874 /* terminate the chain if postxor is not set to be run */
875 if (tx
&& !test_bit(STRIPE_OP_POSTXOR
, &ops_request
))
879 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
880 tx
= ops_run_prexor(sh
, tx
);
882 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
883 tx
= ops_run_biodrain(sh
, tx
);
887 if (test_bit(STRIPE_OP_POSTXOR
, &ops_request
))
888 ops_run_postxor(sh
, tx
);
890 if (test_bit(STRIPE_OP_CHECK
, &ops_request
))
894 for (i
= disks
; i
--; ) {
895 struct r5dev
*dev
= &sh
->dev
[i
];
896 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
897 wake_up(&sh
->raid_conf
->wait_for_overlap
);
901 static int grow_one_stripe(raid5_conf_t
*conf
)
903 struct stripe_head
*sh
;
904 sh
= kmem_cache_alloc(conf
->slab_cache
, GFP_KERNEL
);
907 memset(sh
, 0, sizeof(*sh
) + (conf
->raid_disks
-1)*sizeof(struct r5dev
));
908 sh
->raid_conf
= conf
;
909 spin_lock_init(&sh
->lock
);
911 if (grow_buffers(sh
, conf
->raid_disks
)) {
912 shrink_buffers(sh
, conf
->raid_disks
);
913 kmem_cache_free(conf
->slab_cache
, sh
);
916 sh
->disks
= conf
->raid_disks
;
917 /* we just created an active stripe so... */
918 atomic_set(&sh
->count
, 1);
919 atomic_inc(&conf
->active_stripes
);
920 INIT_LIST_HEAD(&sh
->lru
);
925 static int grow_stripes(raid5_conf_t
*conf
, int num
)
927 struct kmem_cache
*sc
;
928 int devs
= conf
->raid_disks
;
930 sprintf(conf
->cache_name
[0], "raid5-%s", mdname(conf
->mddev
));
931 sprintf(conf
->cache_name
[1], "raid5-%s-alt", mdname(conf
->mddev
));
932 conf
->active_name
= 0;
933 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
934 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
938 conf
->slab_cache
= sc
;
939 conf
->pool_size
= devs
;
941 if (!grow_one_stripe(conf
))
946 #ifdef CONFIG_MD_RAID5_RESHAPE
947 static int resize_stripes(raid5_conf_t
*conf
, int newsize
)
949 /* Make all the stripes able to hold 'newsize' devices.
950 * New slots in each stripe get 'page' set to a new page.
952 * This happens in stages:
953 * 1/ create a new kmem_cache and allocate the required number of
955 * 2/ gather all the old stripe_heads and tranfer the pages across
956 * to the new stripe_heads. This will have the side effect of
957 * freezing the array as once all stripe_heads have been collected,
958 * no IO will be possible. Old stripe heads are freed once their
959 * pages have been transferred over, and the old kmem_cache is
960 * freed when all stripes are done.
961 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
962 * we simple return a failre status - no need to clean anything up.
963 * 4/ allocate new pages for the new slots in the new stripe_heads.
964 * If this fails, we don't bother trying the shrink the
965 * stripe_heads down again, we just leave them as they are.
966 * As each stripe_head is processed the new one is released into
969 * Once step2 is started, we cannot afford to wait for a write,
970 * so we use GFP_NOIO allocations.
972 struct stripe_head
*osh
, *nsh
;
973 LIST_HEAD(newstripes
);
974 struct disk_info
*ndisks
;
976 struct kmem_cache
*sc
;
979 if (newsize
<= conf
->pool_size
)
980 return 0; /* never bother to shrink */
982 err
= md_allow_write(conf
->mddev
);
987 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
988 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
993 for (i
= conf
->max_nr_stripes
; i
; i
--) {
994 nsh
= kmem_cache_alloc(sc
, GFP_KERNEL
);
998 memset(nsh
, 0, sizeof(*nsh
) + (newsize
-1)*sizeof(struct r5dev
));
1000 nsh
->raid_conf
= conf
;
1001 spin_lock_init(&nsh
->lock
);
1003 list_add(&nsh
->lru
, &newstripes
);
1006 /* didn't get enough, give up */
1007 while (!list_empty(&newstripes
)) {
1008 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1009 list_del(&nsh
->lru
);
1010 kmem_cache_free(sc
, nsh
);
1012 kmem_cache_destroy(sc
);
1015 /* Step 2 - Must use GFP_NOIO now.
1016 * OK, we have enough stripes, start collecting inactive
1017 * stripes and copying them over
1019 list_for_each_entry(nsh
, &newstripes
, lru
) {
1020 spin_lock_irq(&conf
->device_lock
);
1021 wait_event_lock_irq(conf
->wait_for_stripe
,
1022 !list_empty(&conf
->inactive_list
),
1024 unplug_slaves(conf
->mddev
)
1026 osh
= get_free_stripe(conf
);
1027 spin_unlock_irq(&conf
->device_lock
);
1028 atomic_set(&nsh
->count
, 1);
1029 for(i
=0; i
<conf
->pool_size
; i
++)
1030 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1031 for( ; i
<newsize
; i
++)
1032 nsh
->dev
[i
].page
= NULL
;
1033 kmem_cache_free(conf
->slab_cache
, osh
);
1035 kmem_cache_destroy(conf
->slab_cache
);
1038 * At this point, we are holding all the stripes so the array
1039 * is completely stalled, so now is a good time to resize
1042 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1044 for (i
=0; i
<conf
->raid_disks
; i
++)
1045 ndisks
[i
] = conf
->disks
[i
];
1047 conf
->disks
= ndisks
;
1051 /* Step 4, return new stripes to service */
1052 while(!list_empty(&newstripes
)) {
1053 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1054 list_del_init(&nsh
->lru
);
1055 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1056 if (nsh
->dev
[i
].page
== NULL
) {
1057 struct page
*p
= alloc_page(GFP_NOIO
);
1058 nsh
->dev
[i
].page
= p
;
1062 release_stripe(nsh
);
1064 /* critical section pass, GFP_NOIO no longer needed */
1066 conf
->slab_cache
= sc
;
1067 conf
->active_name
= 1-conf
->active_name
;
1068 conf
->pool_size
= newsize
;
1073 static int drop_one_stripe(raid5_conf_t
*conf
)
1075 struct stripe_head
*sh
;
1077 spin_lock_irq(&conf
->device_lock
);
1078 sh
= get_free_stripe(conf
);
1079 spin_unlock_irq(&conf
->device_lock
);
1082 BUG_ON(atomic_read(&sh
->count
));
1083 shrink_buffers(sh
, conf
->pool_size
);
1084 kmem_cache_free(conf
->slab_cache
, sh
);
1085 atomic_dec(&conf
->active_stripes
);
1089 static void shrink_stripes(raid5_conf_t
*conf
)
1091 while (drop_one_stripe(conf
))
1094 if (conf
->slab_cache
)
1095 kmem_cache_destroy(conf
->slab_cache
);
1096 conf
->slab_cache
= NULL
;
1099 static void raid5_end_read_request(struct bio
* bi
, int error
)
1101 struct stripe_head
*sh
= bi
->bi_private
;
1102 raid5_conf_t
*conf
= sh
->raid_conf
;
1103 int disks
= sh
->disks
, i
;
1104 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1105 char b
[BDEVNAME_SIZE
];
1109 for (i
=0 ; i
<disks
; i
++)
1110 if (bi
== &sh
->dev
[i
].req
)
1113 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1114 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1122 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1123 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1124 rdev
= conf
->disks
[i
].rdev
;
1125 printk_rl(KERN_INFO
"raid5:%s: read error corrected"
1126 " (%lu sectors at %llu on %s)\n",
1127 mdname(conf
->mddev
), STRIPE_SECTORS
,
1128 (unsigned long long)(sh
->sector
1129 + rdev
->data_offset
),
1130 bdevname(rdev
->bdev
, b
));
1131 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1132 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1134 if (atomic_read(&conf
->disks
[i
].rdev
->read_errors
))
1135 atomic_set(&conf
->disks
[i
].rdev
->read_errors
, 0);
1137 const char *bdn
= bdevname(conf
->disks
[i
].rdev
->bdev
, b
);
1139 rdev
= conf
->disks
[i
].rdev
;
1141 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1142 atomic_inc(&rdev
->read_errors
);
1143 if (conf
->mddev
->degraded
)
1144 printk_rl(KERN_WARNING
1145 "raid5:%s: read error not correctable "
1146 "(sector %llu on %s).\n",
1147 mdname(conf
->mddev
),
1148 (unsigned long long)(sh
->sector
1149 + rdev
->data_offset
),
1151 else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
1153 printk_rl(KERN_WARNING
1154 "raid5:%s: read error NOT corrected!! "
1155 "(sector %llu on %s).\n",
1156 mdname(conf
->mddev
),
1157 (unsigned long long)(sh
->sector
1158 + rdev
->data_offset
),
1160 else if (atomic_read(&rdev
->read_errors
)
1161 > conf
->max_nr_stripes
)
1163 "raid5:%s: Too many read errors, failing device %s.\n",
1164 mdname(conf
->mddev
), bdn
);
1168 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1170 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1171 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1172 md_error(conf
->mddev
, rdev
);
1175 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1176 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1177 set_bit(STRIPE_HANDLE
, &sh
->state
);
1181 static void raid5_end_write_request(struct bio
*bi
, int error
)
1183 struct stripe_head
*sh
= bi
->bi_private
;
1184 raid5_conf_t
*conf
= sh
->raid_conf
;
1185 int disks
= sh
->disks
, i
;
1186 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1188 for (i
=0 ; i
<disks
; i
++)
1189 if (bi
== &sh
->dev
[i
].req
)
1192 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1193 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1201 md_error(conf
->mddev
, conf
->disks
[i
].rdev
);
1203 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1205 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1206 set_bit(STRIPE_HANDLE
, &sh
->state
);
1211 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
);
1213 static void raid5_build_block(struct stripe_head
*sh
, int i
)
1215 struct r5dev
*dev
= &sh
->dev
[i
];
1217 bio_init(&dev
->req
);
1218 dev
->req
.bi_io_vec
= &dev
->vec
;
1220 dev
->req
.bi_max_vecs
++;
1221 dev
->vec
.bv_page
= dev
->page
;
1222 dev
->vec
.bv_len
= STRIPE_SIZE
;
1223 dev
->vec
.bv_offset
= 0;
1225 dev
->req
.bi_sector
= sh
->sector
;
1226 dev
->req
.bi_private
= sh
;
1229 dev
->sector
= compute_blocknr(sh
, i
);
1232 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1234 char b
[BDEVNAME_SIZE
];
1235 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
1236 pr_debug("raid5: error called\n");
1238 if (!test_bit(Faulty
, &rdev
->flags
)) {
1239 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1240 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1241 unsigned long flags
;
1242 spin_lock_irqsave(&conf
->device_lock
, flags
);
1244 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1246 * if recovery was running, make sure it aborts.
1248 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1250 set_bit(Faulty
, &rdev
->flags
);
1252 "raid5: Disk failure on %s, disabling device.\n"
1253 "raid5: Operation continuing on %d devices.\n",
1254 bdevname(rdev
->bdev
,b
), conf
->raid_disks
- mddev
->degraded
);
1259 * Input: a 'big' sector number,
1260 * Output: index of the data and parity disk, and the sector # in them.
1262 static sector_t
raid5_compute_sector(raid5_conf_t
*conf
, sector_t r_sector
,
1263 int previous
, int *dd_idx
,
1264 struct stripe_head
*sh
)
1267 unsigned long chunk_number
;
1268 unsigned int chunk_offset
;
1270 sector_t new_sector
;
1271 int sectors_per_chunk
= conf
->chunk_size
>> 9;
1272 int raid_disks
= previous
? conf
->previous_raid_disks
1274 int data_disks
= raid_disks
- conf
->max_degraded
;
1276 /* First compute the information on this sector */
1279 * Compute the chunk number and the sector offset inside the chunk
1281 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1282 chunk_number
= r_sector
;
1283 BUG_ON(r_sector
!= chunk_number
);
1286 * Compute the stripe number
1288 stripe
= chunk_number
/ data_disks
;
1291 * Compute the data disk and parity disk indexes inside the stripe
1293 *dd_idx
= chunk_number
% data_disks
;
1296 * Select the parity disk based on the user selected algorithm.
1298 pd_idx
= qd_idx
= ~0;
1299 switch(conf
->level
) {
1301 pd_idx
= data_disks
;
1304 switch (conf
->algorithm
) {
1305 case ALGORITHM_LEFT_ASYMMETRIC
:
1306 pd_idx
= data_disks
- stripe
% raid_disks
;
1307 if (*dd_idx
>= pd_idx
)
1310 case ALGORITHM_RIGHT_ASYMMETRIC
:
1311 pd_idx
= stripe
% raid_disks
;
1312 if (*dd_idx
>= pd_idx
)
1315 case ALGORITHM_LEFT_SYMMETRIC
:
1316 pd_idx
= data_disks
- stripe
% raid_disks
;
1317 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1319 case ALGORITHM_RIGHT_SYMMETRIC
:
1320 pd_idx
= stripe
% raid_disks
;
1321 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1323 case ALGORITHM_PARITY_0
:
1327 case ALGORITHM_PARITY_N
:
1328 pd_idx
= data_disks
;
1331 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1338 switch (conf
->algorithm
) {
1339 case ALGORITHM_LEFT_ASYMMETRIC
:
1340 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1341 qd_idx
= pd_idx
+ 1;
1342 if (pd_idx
== raid_disks
-1) {
1343 (*dd_idx
)++; /* Q D D D P */
1345 } else if (*dd_idx
>= pd_idx
)
1346 (*dd_idx
) += 2; /* D D P Q D */
1348 case ALGORITHM_RIGHT_ASYMMETRIC
:
1349 pd_idx
= stripe
% raid_disks
;
1350 qd_idx
= pd_idx
+ 1;
1351 if (pd_idx
== raid_disks
-1) {
1352 (*dd_idx
)++; /* Q D D D P */
1354 } else if (*dd_idx
>= pd_idx
)
1355 (*dd_idx
) += 2; /* D D P Q D */
1357 case ALGORITHM_LEFT_SYMMETRIC
:
1358 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1359 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1360 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1362 case ALGORITHM_RIGHT_SYMMETRIC
:
1363 pd_idx
= stripe
% raid_disks
;
1364 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1365 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1368 case ALGORITHM_PARITY_0
:
1373 case ALGORITHM_PARITY_N
:
1374 pd_idx
= data_disks
;
1375 qd_idx
= data_disks
+ 1;
1378 case ALGORITHM_ROTATING_ZERO_RESTART
:
1379 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1380 * of blocks for computing Q is different.
1382 pd_idx
= stripe
% raid_disks
;
1383 qd_idx
= pd_idx
+ 1;
1384 if (pd_idx
== raid_disks
-1) {
1385 (*dd_idx
)++; /* Q D D D P */
1387 } else if (*dd_idx
>= pd_idx
)
1388 (*dd_idx
) += 2; /* D D P Q D */
1391 case ALGORITHM_ROTATING_N_RESTART
:
1392 /* Same a left_asymmetric, by first stripe is
1393 * D D D P Q rather than
1396 pd_idx
= raid_disks
- 1 - ((stripe
+ 1) % raid_disks
);
1397 qd_idx
= pd_idx
+ 1;
1398 if (pd_idx
== raid_disks
-1) {
1399 (*dd_idx
)++; /* Q D D D P */
1401 } else if (*dd_idx
>= pd_idx
)
1402 (*dd_idx
) += 2; /* D D P Q D */
1405 case ALGORITHM_ROTATING_N_CONTINUE
:
1406 /* Same as left_symmetric but Q is before P */
1407 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1408 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
1409 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1412 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1413 /* RAID5 left_asymmetric, with Q on last device */
1414 pd_idx
= data_disks
- stripe
% (raid_disks
-1);
1415 if (*dd_idx
>= pd_idx
)
1417 qd_idx
= raid_disks
- 1;
1420 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1421 pd_idx
= stripe
% (raid_disks
-1);
1422 if (*dd_idx
>= pd_idx
)
1424 qd_idx
= raid_disks
- 1;
1427 case ALGORITHM_LEFT_SYMMETRIC_6
:
1428 pd_idx
= data_disks
- stripe
% (raid_disks
-1);
1429 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1430 qd_idx
= raid_disks
- 1;
1433 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1434 pd_idx
= stripe
% (raid_disks
-1);
1435 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1436 qd_idx
= raid_disks
- 1;
1439 case ALGORITHM_PARITY_0_6
:
1442 qd_idx
= raid_disks
- 1;
1447 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1455 sh
->pd_idx
= pd_idx
;
1456 sh
->qd_idx
= qd_idx
;
1459 * Finally, compute the new sector number
1461 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
1466 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
)
1468 raid5_conf_t
*conf
= sh
->raid_conf
;
1469 int raid_disks
= sh
->disks
;
1470 int data_disks
= raid_disks
- conf
->max_degraded
;
1471 sector_t new_sector
= sh
->sector
, check
;
1472 int sectors_per_chunk
= conf
->chunk_size
>> 9;
1475 int chunk_number
, dummy1
, dd_idx
= i
;
1477 struct stripe_head sh2
;
1480 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
1481 stripe
= new_sector
;
1482 BUG_ON(new_sector
!= stripe
);
1484 if (i
== sh
->pd_idx
)
1486 switch(conf
->level
) {
1489 switch (conf
->algorithm
) {
1490 case ALGORITHM_LEFT_ASYMMETRIC
:
1491 case ALGORITHM_RIGHT_ASYMMETRIC
:
1495 case ALGORITHM_LEFT_SYMMETRIC
:
1496 case ALGORITHM_RIGHT_SYMMETRIC
:
1499 i
-= (sh
->pd_idx
+ 1);
1501 case ALGORITHM_PARITY_0
:
1504 case ALGORITHM_PARITY_N
:
1507 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1513 if (i
== sh
->qd_idx
)
1514 return 0; /* It is the Q disk */
1515 switch (conf
->algorithm
) {
1516 case ALGORITHM_LEFT_ASYMMETRIC
:
1517 case ALGORITHM_RIGHT_ASYMMETRIC
:
1518 case ALGORITHM_ROTATING_ZERO_RESTART
:
1519 case ALGORITHM_ROTATING_N_RESTART
:
1520 if (sh
->pd_idx
== raid_disks
-1)
1521 i
--; /* Q D D D P */
1522 else if (i
> sh
->pd_idx
)
1523 i
-= 2; /* D D P Q D */
1525 case ALGORITHM_LEFT_SYMMETRIC
:
1526 case ALGORITHM_RIGHT_SYMMETRIC
:
1527 if (sh
->pd_idx
== raid_disks
-1)
1528 i
--; /* Q D D D P */
1533 i
-= (sh
->pd_idx
+ 2);
1536 case ALGORITHM_PARITY_0
:
1539 case ALGORITHM_PARITY_N
:
1541 case ALGORITHM_ROTATING_N_CONTINUE
:
1542 if (sh
->pd_idx
== 0)
1543 i
--; /* P D D D Q */
1544 else if (i
> sh
->pd_idx
)
1545 i
-= 2; /* D D Q P D */
1547 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1548 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1552 case ALGORITHM_LEFT_SYMMETRIC_6
:
1553 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1555 i
+= data_disks
+ 1;
1556 i
-= (sh
->pd_idx
+ 1);
1558 case ALGORITHM_PARITY_0_6
:
1562 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1569 chunk_number
= stripe
* data_disks
+ i
;
1570 r_sector
= (sector_t
)chunk_number
* sectors_per_chunk
+ chunk_offset
;
1572 check
= raid5_compute_sector(conf
, r_sector
,
1573 (raid_disks
!= conf
->raid_disks
),
1575 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
1576 || sh2
.qd_idx
!= sh
->qd_idx
) {
1577 printk(KERN_ERR
"compute_blocknr: map not correct\n");
1586 * Copy data between a page in the stripe cache, and one or more bion
1587 * The page could align with the middle of the bio, or there could be
1588 * several bion, each with several bio_vecs, which cover part of the page
1589 * Multiple bion are linked together on bi_next. There may be extras
1590 * at the end of this list. We ignore them.
1592 static void copy_data(int frombio
, struct bio
*bio
,
1596 char *pa
= page_address(page
);
1597 struct bio_vec
*bvl
;
1601 if (bio
->bi_sector
>= sector
)
1602 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
1604 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
1605 bio_for_each_segment(bvl
, bio
, i
) {
1606 int len
= bio_iovec_idx(bio
,i
)->bv_len
;
1610 if (page_offset
< 0) {
1611 b_offset
= -page_offset
;
1612 page_offset
+= b_offset
;
1616 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
1617 clen
= STRIPE_SIZE
- page_offset
;
1621 char *ba
= __bio_kmap_atomic(bio
, i
, KM_USER0
);
1623 memcpy(pa
+page_offset
, ba
+b_offset
, clen
);
1625 memcpy(ba
+b_offset
, pa
+page_offset
, clen
);
1626 __bio_kunmap_atomic(ba
, KM_USER0
);
1628 if (clen
< len
) /* hit end of page */
1634 #define check_xor() do { \
1635 if (count == MAX_XOR_BLOCKS) { \
1636 xor_blocks(count, STRIPE_SIZE, dest, ptr);\
1641 static void compute_parity6(struct stripe_head
*sh
, int method
)
1643 raid5_conf_t
*conf
= sh
->raid_conf
;
1644 int i
, pd_idx
, qd_idx
, d0_idx
, disks
= sh
->disks
, count
;
1646 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1649 pd_idx
= sh
->pd_idx
;
1650 qd_idx
= sh
->qd_idx
;
1651 d0_idx
= raid6_d0(sh
);
1653 pr_debug("compute_parity, stripe %llu, method %d\n",
1654 (unsigned long long)sh
->sector
, method
);
1657 case READ_MODIFY_WRITE
:
1658 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
1659 case RECONSTRUCT_WRITE
:
1660 for (i
= disks
; i
-- ;)
1661 if ( i
!= pd_idx
&& i
!= qd_idx
&& sh
->dev
[i
].towrite
) {
1662 chosen
= sh
->dev
[i
].towrite
;
1663 sh
->dev
[i
].towrite
= NULL
;
1665 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
1666 wake_up(&conf
->wait_for_overlap
);
1668 BUG_ON(sh
->dev
[i
].written
);
1669 sh
->dev
[i
].written
= chosen
;
1673 BUG(); /* Not implemented yet */
1676 for (i
= disks
; i
--;)
1677 if (sh
->dev
[i
].written
) {
1678 sector_t sector
= sh
->dev
[i
].sector
;
1679 struct bio
*wbi
= sh
->dev
[i
].written
;
1680 while (wbi
&& wbi
->bi_sector
< sector
+ STRIPE_SECTORS
) {
1681 copy_data(1, wbi
, sh
->dev
[i
].page
, sector
);
1682 wbi
= r5_next_bio(wbi
, sector
);
1685 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1686 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1689 /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/
1690 /* FIX: Is this ordering of drives even remotely optimal? */
1694 int slot
= raid6_idx_to_slot(i
, sh
, &count
);
1695 ptrs
[slot
] = page_address(sh
->dev
[i
].page
);
1696 if (slot
< sh
->disks
- 2 &&
1697 !test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
)) {
1698 printk(KERN_ERR
"block %d/%d not uptodate "
1699 "on parity calc\n", i
, count
);
1702 i
= raid6_next_disk(i
, disks
);
1703 } while (i
!= d0_idx
);
1704 BUG_ON(count
+2 != disks
);
1706 raid6_call
.gen_syndrome(disks
, STRIPE_SIZE
, ptrs
);
1709 case RECONSTRUCT_WRITE
:
1710 set_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
1711 set_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
);
1712 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
1713 set_bit(R5_LOCKED
, &sh
->dev
[qd_idx
].flags
);
1716 set_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
1717 set_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
);
1723 /* Compute one missing block */
1724 static void compute_block_1(struct stripe_head
*sh
, int dd_idx
, int nozero
)
1726 int i
, count
, disks
= sh
->disks
;
1727 void *ptr
[MAX_XOR_BLOCKS
], *dest
, *p
;
1728 int qd_idx
= sh
->qd_idx
;
1730 pr_debug("compute_block_1, stripe %llu, idx %d\n",
1731 (unsigned long long)sh
->sector
, dd_idx
);
1733 if ( dd_idx
== qd_idx
) {
1734 /* We're actually computing the Q drive */
1735 compute_parity6(sh
, UPDATE_PARITY
);
1737 dest
= page_address(sh
->dev
[dd_idx
].page
);
1738 if (!nozero
) memset(dest
, 0, STRIPE_SIZE
);
1740 for (i
= disks
; i
--; ) {
1741 if (i
== dd_idx
|| i
== qd_idx
)
1743 p
= page_address(sh
->dev
[i
].page
);
1744 if (test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
))
1747 printk("compute_block() %d, stripe %llu, %d"
1748 " not present\n", dd_idx
,
1749 (unsigned long long)sh
->sector
, i
);
1754 xor_blocks(count
, STRIPE_SIZE
, dest
, ptr
);
1755 if (!nozero
) set_bit(R5_UPTODATE
, &sh
->dev
[dd_idx
].flags
);
1756 else clear_bit(R5_UPTODATE
, &sh
->dev
[dd_idx
].flags
);
1760 /* Compute two missing blocks */
1761 static void compute_block_2(struct stripe_head
*sh
, int dd_idx1
, int dd_idx2
)
1763 int i
, count
, disks
= sh
->disks
;
1764 int d0_idx
= raid6_d0(sh
);
1765 int faila
= -1, failb
= -1;
1766 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1773 slot
= raid6_idx_to_slot(i
, sh
, &count
);
1774 ptrs
[slot
] = page_address(sh
->dev
[i
].page
);
1779 i
= raid6_next_disk(i
, disks
);
1780 } while (i
!= d0_idx
);
1781 BUG_ON(count
+2 != disks
);
1783 BUG_ON(faila
== failb
);
1784 if ( failb
< faila
) { int tmp
= faila
; faila
= failb
; failb
= tmp
; }
1786 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
1787 (unsigned long long)sh
->sector
, dd_idx1
, dd_idx2
,
1790 if ( failb
== disks
-1 ) {
1791 /* Q disk is one of the missing disks */
1792 if ( faila
== disks
-2 ) {
1793 /* Missing P+Q, just recompute */
1794 compute_parity6(sh
, UPDATE_PARITY
);
1797 /* We're missing D+Q; recompute D from P */
1798 compute_block_1(sh
, ((dd_idx1
== sh
->qd_idx
) ?
1801 compute_parity6(sh
, UPDATE_PARITY
); /* Is this necessary? */
1806 /* We're missing D+P or D+D; */
1807 if (failb
== disks
-2) {
1808 /* We're missing D+P. */
1809 raid6_datap_recov(disks
, STRIPE_SIZE
, faila
, ptrs
);
1811 /* We're missing D+D. */
1812 raid6_2data_recov(disks
, STRIPE_SIZE
, faila
, failb
, ptrs
);
1815 /* Both the above update both missing blocks */
1816 set_bit(R5_UPTODATE
, &sh
->dev
[dd_idx1
].flags
);
1817 set_bit(R5_UPTODATE
, &sh
->dev
[dd_idx2
].flags
);
1821 schedule_reconstruction5(struct stripe_head
*sh
, struct stripe_head_state
*s
,
1822 int rcw
, int expand
)
1824 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
1827 /* if we are not expanding this is a proper write request, and
1828 * there will be bios with new data to be drained into the
1832 sh
->reconstruct_state
= reconstruct_state_drain_run
;
1833 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
1835 sh
->reconstruct_state
= reconstruct_state_run
;
1837 set_bit(STRIPE_OP_POSTXOR
, &s
->ops_request
);
1839 for (i
= disks
; i
--; ) {
1840 struct r5dev
*dev
= &sh
->dev
[i
];
1843 set_bit(R5_LOCKED
, &dev
->flags
);
1844 set_bit(R5_Wantdrain
, &dev
->flags
);
1846 clear_bit(R5_UPTODATE
, &dev
->flags
);
1850 if (s
->locked
+ 1 == disks
)
1851 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
1852 atomic_inc(&sh
->raid_conf
->pending_full_writes
);
1854 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
1855 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
1857 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
1858 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
1859 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
1860 set_bit(STRIPE_OP_POSTXOR
, &s
->ops_request
);
1862 for (i
= disks
; i
--; ) {
1863 struct r5dev
*dev
= &sh
->dev
[i
];
1868 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
1869 test_bit(R5_Wantcompute
, &dev
->flags
))) {
1870 set_bit(R5_Wantdrain
, &dev
->flags
);
1871 set_bit(R5_LOCKED
, &dev
->flags
);
1872 clear_bit(R5_UPTODATE
, &dev
->flags
);
1878 /* keep the parity disk locked while asynchronous operations
1881 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
1882 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
1885 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
1886 __func__
, (unsigned long long)sh
->sector
,
1887 s
->locked
, s
->ops_request
);
1891 * Each stripe/dev can have one or more bion attached.
1892 * toread/towrite point to the first in a chain.
1893 * The bi_next chain must be in order.
1895 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
1898 raid5_conf_t
*conf
= sh
->raid_conf
;
1901 pr_debug("adding bh b#%llu to stripe s#%llu\n",
1902 (unsigned long long)bi
->bi_sector
,
1903 (unsigned long long)sh
->sector
);
1906 spin_lock(&sh
->lock
);
1907 spin_lock_irq(&conf
->device_lock
);
1909 bip
= &sh
->dev
[dd_idx
].towrite
;
1910 if (*bip
== NULL
&& sh
->dev
[dd_idx
].written
== NULL
)
1913 bip
= &sh
->dev
[dd_idx
].toread
;
1914 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
1915 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
1917 bip
= & (*bip
)->bi_next
;
1919 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
1922 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
1926 bi
->bi_phys_segments
++;
1927 spin_unlock_irq(&conf
->device_lock
);
1928 spin_unlock(&sh
->lock
);
1930 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
1931 (unsigned long long)bi
->bi_sector
,
1932 (unsigned long long)sh
->sector
, dd_idx
);
1934 if (conf
->mddev
->bitmap
&& firstwrite
) {
1935 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
1937 sh
->bm_seq
= conf
->seq_flush
+1;
1938 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
1942 /* check if page is covered */
1943 sector_t sector
= sh
->dev
[dd_idx
].sector
;
1944 for (bi
=sh
->dev
[dd_idx
].towrite
;
1945 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
1946 bi
&& bi
->bi_sector
<= sector
;
1947 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
1948 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
1949 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
1951 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
1952 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
1957 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
1958 spin_unlock_irq(&conf
->device_lock
);
1959 spin_unlock(&sh
->lock
);
1963 static void end_reshape(raid5_conf_t
*conf
);
1965 static int page_is_zero(struct page
*p
)
1967 char *a
= page_address(p
);
1968 return ((*(u32
*)a
) == 0 &&
1969 memcmp(a
, a
+4, STRIPE_SIZE
-4)==0);
1972 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
1973 struct stripe_head
*sh
)
1975 int sectors_per_chunk
= conf
->chunk_size
>> 9;
1977 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
1978 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
1980 raid5_compute_sector(conf
,
1981 stripe
* (disks
- conf
->max_degraded
)
1982 *sectors_per_chunk
+ chunk_offset
,
1988 handle_failed_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
,
1989 struct stripe_head_state
*s
, int disks
,
1990 struct bio
**return_bi
)
1993 for (i
= disks
; i
--; ) {
1997 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2000 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2001 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2002 /* multiple read failures in one stripe */
2003 md_error(conf
->mddev
, rdev
);
2006 spin_lock_irq(&conf
->device_lock
);
2007 /* fail all writes first */
2008 bi
= sh
->dev
[i
].towrite
;
2009 sh
->dev
[i
].towrite
= NULL
;
2015 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2016 wake_up(&conf
->wait_for_overlap
);
2018 while (bi
&& bi
->bi_sector
<
2019 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2020 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2021 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2022 if (!raid5_dec_bi_phys_segments(bi
)) {
2023 md_write_end(conf
->mddev
);
2024 bi
->bi_next
= *return_bi
;
2029 /* and fail all 'written' */
2030 bi
= sh
->dev
[i
].written
;
2031 sh
->dev
[i
].written
= NULL
;
2032 if (bi
) bitmap_end
= 1;
2033 while (bi
&& bi
->bi_sector
<
2034 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2035 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2036 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2037 if (!raid5_dec_bi_phys_segments(bi
)) {
2038 md_write_end(conf
->mddev
);
2039 bi
->bi_next
= *return_bi
;
2045 /* fail any reads if this device is non-operational and
2046 * the data has not reached the cache yet.
2048 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2049 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2050 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2051 bi
= sh
->dev
[i
].toread
;
2052 sh
->dev
[i
].toread
= NULL
;
2053 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2054 wake_up(&conf
->wait_for_overlap
);
2055 if (bi
) s
->to_read
--;
2056 while (bi
&& bi
->bi_sector
<
2057 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2058 struct bio
*nextbi
=
2059 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2060 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2061 if (!raid5_dec_bi_phys_segments(bi
)) {
2062 bi
->bi_next
= *return_bi
;
2068 spin_unlock_irq(&conf
->device_lock
);
2070 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2071 STRIPE_SECTORS
, 0, 0);
2074 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2075 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2076 md_wakeup_thread(conf
->mddev
->thread
);
2079 /* fetch_block5 - checks the given member device to see if its data needs
2080 * to be read or computed to satisfy a request.
2082 * Returns 1 when no more member devices need to be checked, otherwise returns
2083 * 0 to tell the loop in handle_stripe_fill5 to continue
2085 static int fetch_block5(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2086 int disk_idx
, int disks
)
2088 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2089 struct r5dev
*failed_dev
= &sh
->dev
[s
->failed_num
];
2091 /* is the data in this block needed, and can we get it? */
2092 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2093 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2095 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2096 s
->syncing
|| s
->expanding
||
2098 (failed_dev
->toread
||
2099 (failed_dev
->towrite
&&
2100 !test_bit(R5_OVERWRITE
, &failed_dev
->flags
)))))) {
2101 /* We would like to get this block, possibly by computing it,
2102 * otherwise read it if the backing disk is insync
2104 if ((s
->uptodate
== disks
- 1) &&
2105 (s
->failed
&& disk_idx
== s
->failed_num
)) {
2106 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2107 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2108 set_bit(R5_Wantcompute
, &dev
->flags
);
2109 sh
->ops
.target
= disk_idx
;
2111 /* Careful: from this point on 'uptodate' is in the eye
2112 * of raid5_run_ops which services 'compute' operations
2113 * before writes. R5_Wantcompute flags a block that will
2114 * be R5_UPTODATE by the time it is needed for a
2115 * subsequent operation.
2118 return 1; /* uptodate + compute == disks */
2119 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2120 set_bit(R5_LOCKED
, &dev
->flags
);
2121 set_bit(R5_Wantread
, &dev
->flags
);
2123 pr_debug("Reading block %d (sync=%d)\n", disk_idx
,
2132 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2134 static void handle_stripe_fill5(struct stripe_head
*sh
,
2135 struct stripe_head_state
*s
, int disks
)
2139 /* look for blocks to read/compute, skip this if a compute
2140 * is already in flight, or if the stripe contents are in the
2141 * midst of changing due to a write
2143 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2144 !sh
->reconstruct_state
)
2145 for (i
= disks
; i
--; )
2146 if (fetch_block5(sh
, s
, i
, disks
))
2148 set_bit(STRIPE_HANDLE
, &sh
->state
);
2151 static void handle_stripe_fill6(struct stripe_head
*sh
,
2152 struct stripe_head_state
*s
, struct r6_state
*r6s
,
2156 for (i
= disks
; i
--; ) {
2157 struct r5dev
*dev
= &sh
->dev
[i
];
2158 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2159 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2160 (dev
->toread
|| (dev
->towrite
&&
2161 !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2162 s
->syncing
|| s
->expanding
||
2164 (sh
->dev
[r6s
->failed_num
[0]].toread
||
2167 (sh
->dev
[r6s
->failed_num
[1]].toread
||
2169 /* we would like to get this block, possibly
2170 * by computing it, but we might not be able to
2172 if ((s
->uptodate
== disks
- 1) &&
2173 (s
->failed
&& (i
== r6s
->failed_num
[0] ||
2174 i
== r6s
->failed_num
[1]))) {
2175 pr_debug("Computing stripe %llu block %d\n",
2176 (unsigned long long)sh
->sector
, i
);
2177 compute_block_1(sh
, i
, 0);
2179 } else if ( s
->uptodate
== disks
-2 && s
->failed
>= 2 ) {
2180 /* Computing 2-failure is *very* expensive; only
2181 * do it if failed >= 2
2184 for (other
= disks
; other
--; ) {
2187 if (!test_bit(R5_UPTODATE
,
2188 &sh
->dev
[other
].flags
))
2192 pr_debug("Computing stripe %llu blocks %d,%d\n",
2193 (unsigned long long)sh
->sector
,
2195 compute_block_2(sh
, i
, other
);
2197 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2198 set_bit(R5_LOCKED
, &dev
->flags
);
2199 set_bit(R5_Wantread
, &dev
->flags
);
2201 pr_debug("Reading block %d (sync=%d)\n",
2206 set_bit(STRIPE_HANDLE
, &sh
->state
);
2210 /* handle_stripe_clean_event
2211 * any written block on an uptodate or failed drive can be returned.
2212 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2213 * never LOCKED, so we don't need to test 'failed' directly.
2215 static void handle_stripe_clean_event(raid5_conf_t
*conf
,
2216 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2221 for (i
= disks
; i
--; )
2222 if (sh
->dev
[i
].written
) {
2224 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2225 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2226 /* We can return any write requests */
2227 struct bio
*wbi
, *wbi2
;
2229 pr_debug("Return write for disc %d\n", i
);
2230 spin_lock_irq(&conf
->device_lock
);
2232 dev
->written
= NULL
;
2233 while (wbi
&& wbi
->bi_sector
<
2234 dev
->sector
+ STRIPE_SECTORS
) {
2235 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2236 if (!raid5_dec_bi_phys_segments(wbi
)) {
2237 md_write_end(conf
->mddev
);
2238 wbi
->bi_next
= *return_bi
;
2243 if (dev
->towrite
== NULL
)
2245 spin_unlock_irq(&conf
->device_lock
);
2247 bitmap_endwrite(conf
->mddev
->bitmap
,
2250 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2255 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2256 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2257 md_wakeup_thread(conf
->mddev
->thread
);
2260 static void handle_stripe_dirtying5(raid5_conf_t
*conf
,
2261 struct stripe_head
*sh
, struct stripe_head_state
*s
, int disks
)
2263 int rmw
= 0, rcw
= 0, i
;
2264 for (i
= disks
; i
--; ) {
2265 /* would I have to read this buffer for read_modify_write */
2266 struct r5dev
*dev
= &sh
->dev
[i
];
2267 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2268 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2269 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2270 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2271 if (test_bit(R5_Insync
, &dev
->flags
))
2274 rmw
+= 2*disks
; /* cannot read it */
2276 /* Would I have to read this buffer for reconstruct_write */
2277 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2278 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2279 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2280 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2281 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2286 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2287 (unsigned long long)sh
->sector
, rmw
, rcw
);
2288 set_bit(STRIPE_HANDLE
, &sh
->state
);
2289 if (rmw
< rcw
&& rmw
> 0)
2290 /* prefer read-modify-write, but need to get some data */
2291 for (i
= disks
; i
--; ) {
2292 struct r5dev
*dev
= &sh
->dev
[i
];
2293 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2294 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2295 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2296 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2297 test_bit(R5_Insync
, &dev
->flags
)) {
2299 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2300 pr_debug("Read_old block "
2301 "%d for r-m-w\n", i
);
2302 set_bit(R5_LOCKED
, &dev
->flags
);
2303 set_bit(R5_Wantread
, &dev
->flags
);
2306 set_bit(STRIPE_DELAYED
, &sh
->state
);
2307 set_bit(STRIPE_HANDLE
, &sh
->state
);
2311 if (rcw
<= rmw
&& rcw
> 0)
2312 /* want reconstruct write, but need to get some data */
2313 for (i
= disks
; i
--; ) {
2314 struct r5dev
*dev
= &sh
->dev
[i
];
2315 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2317 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2318 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2319 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2320 test_bit(R5_Insync
, &dev
->flags
)) {
2322 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2323 pr_debug("Read_old block "
2324 "%d for Reconstruct\n", i
);
2325 set_bit(R5_LOCKED
, &dev
->flags
);
2326 set_bit(R5_Wantread
, &dev
->flags
);
2329 set_bit(STRIPE_DELAYED
, &sh
->state
);
2330 set_bit(STRIPE_HANDLE
, &sh
->state
);
2334 /* now if nothing is locked, and if we have enough data,
2335 * we can start a write request
2337 /* since handle_stripe can be called at any time we need to handle the
2338 * case where a compute block operation has been submitted and then a
2339 * subsequent call wants to start a write request. raid5_run_ops only
2340 * handles the case where compute block and postxor are requested
2341 * simultaneously. If this is not the case then new writes need to be
2342 * held off until the compute completes.
2344 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2345 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2346 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2347 schedule_reconstruction5(sh
, s
, rcw
== 0, 0);
2350 static void handle_stripe_dirtying6(raid5_conf_t
*conf
,
2351 struct stripe_head
*sh
, struct stripe_head_state
*s
,
2352 struct r6_state
*r6s
, int disks
)
2354 int rcw
= 0, must_compute
= 0, pd_idx
= sh
->pd_idx
, i
;
2355 int qd_idx
= r6s
->qd_idx
;
2356 for (i
= disks
; i
--; ) {
2357 struct r5dev
*dev
= &sh
->dev
[i
];
2358 /* Would I have to read this buffer for reconstruct_write */
2359 if (!test_bit(R5_OVERWRITE
, &dev
->flags
)
2360 && i
!= pd_idx
&& i
!= qd_idx
2361 && (!test_bit(R5_LOCKED
, &dev
->flags
)
2363 !test_bit(R5_UPTODATE
, &dev
->flags
)) {
2364 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2366 pr_debug("raid6: must_compute: "
2367 "disk %d flags=%#lx\n", i
, dev
->flags
);
2372 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n",
2373 (unsigned long long)sh
->sector
, rcw
, must_compute
);
2374 set_bit(STRIPE_HANDLE
, &sh
->state
);
2377 /* want reconstruct write, but need to get some data */
2378 for (i
= disks
; i
--; ) {
2379 struct r5dev
*dev
= &sh
->dev
[i
];
2380 if (!test_bit(R5_OVERWRITE
, &dev
->flags
)
2381 && !(s
->failed
== 0 && (i
== pd_idx
|| i
== qd_idx
))
2382 && !test_bit(R5_LOCKED
, &dev
->flags
) &&
2383 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2384 test_bit(R5_Insync
, &dev
->flags
)) {
2386 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2387 pr_debug("Read_old stripe %llu "
2388 "block %d for Reconstruct\n",
2389 (unsigned long long)sh
->sector
, i
);
2390 set_bit(R5_LOCKED
, &dev
->flags
);
2391 set_bit(R5_Wantread
, &dev
->flags
);
2394 pr_debug("Request delayed stripe %llu "
2395 "block %d for Reconstruct\n",
2396 (unsigned long long)sh
->sector
, i
);
2397 set_bit(STRIPE_DELAYED
, &sh
->state
);
2398 set_bit(STRIPE_HANDLE
, &sh
->state
);
2402 /* now if nothing is locked, and if we have enough data, we can start a
2405 if (s
->locked
== 0 && rcw
== 0 &&
2406 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
2407 if (must_compute
> 0) {
2408 /* We have failed blocks and need to compute them */
2409 switch (s
->failed
) {
2413 compute_block_1(sh
, r6s
->failed_num
[0], 0);
2416 compute_block_2(sh
, r6s
->failed_num
[0],
2417 r6s
->failed_num
[1]);
2419 default: /* This request should have been failed? */
2424 pr_debug("Computing parity for stripe %llu\n",
2425 (unsigned long long)sh
->sector
);
2426 compute_parity6(sh
, RECONSTRUCT_WRITE
);
2427 /* now every locked buffer is ready to be written */
2428 for (i
= disks
; i
--; )
2429 if (test_bit(R5_LOCKED
, &sh
->dev
[i
].flags
)) {
2430 pr_debug("Writing stripe %llu block %d\n",
2431 (unsigned long long)sh
->sector
, i
);
2433 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
2435 if (s
->locked
== disks
)
2436 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2437 atomic_inc(&conf
->pending_full_writes
);
2438 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
2439 set_bit(STRIPE_INSYNC
, &sh
->state
);
2441 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2442 atomic_dec(&conf
->preread_active_stripes
);
2443 if (atomic_read(&conf
->preread_active_stripes
) <
2445 md_wakeup_thread(conf
->mddev
->thread
);
2450 static void handle_parity_checks5(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2451 struct stripe_head_state
*s
, int disks
)
2453 struct r5dev
*dev
= NULL
;
2455 set_bit(STRIPE_HANDLE
, &sh
->state
);
2457 switch (sh
->check_state
) {
2458 case check_state_idle
:
2459 /* start a new check operation if there are no failures */
2460 if (s
->failed
== 0) {
2461 BUG_ON(s
->uptodate
!= disks
);
2462 sh
->check_state
= check_state_run
;
2463 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2464 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2468 dev
= &sh
->dev
[s
->failed_num
];
2470 case check_state_compute_result
:
2471 sh
->check_state
= check_state_idle
;
2473 dev
= &sh
->dev
[sh
->pd_idx
];
2475 /* check that a write has not made the stripe insync */
2476 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2479 /* either failed parity check, or recovery is happening */
2480 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2481 BUG_ON(s
->uptodate
!= disks
);
2483 set_bit(R5_LOCKED
, &dev
->flags
);
2485 set_bit(R5_Wantwrite
, &dev
->flags
);
2487 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2488 set_bit(STRIPE_INSYNC
, &sh
->state
);
2490 case check_state_run
:
2491 break; /* we will be called again upon completion */
2492 case check_state_check_result
:
2493 sh
->check_state
= check_state_idle
;
2495 /* if a failure occurred during the check operation, leave
2496 * STRIPE_INSYNC not set and let the stripe be handled again
2501 /* handle a successful check operation, if parity is correct
2502 * we are done. Otherwise update the mismatch count and repair
2503 * parity if !MD_RECOVERY_CHECK
2505 if (sh
->ops
.zero_sum_result
== 0)
2506 /* parity is correct (on disc,
2507 * not in buffer any more)
2509 set_bit(STRIPE_INSYNC
, &sh
->state
);
2511 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2512 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2513 /* don't try to repair!! */
2514 set_bit(STRIPE_INSYNC
, &sh
->state
);
2516 sh
->check_state
= check_state_compute_run
;
2517 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2518 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2519 set_bit(R5_Wantcompute
,
2520 &sh
->dev
[sh
->pd_idx
].flags
);
2521 sh
->ops
.target
= sh
->pd_idx
;
2526 case check_state_compute_run
:
2529 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2530 __func__
, sh
->check_state
,
2531 (unsigned long long) sh
->sector
);
2537 static void handle_parity_checks6(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2538 struct stripe_head_state
*s
,
2539 struct r6_state
*r6s
, struct page
*tmp_page
,
2542 int update_p
= 0, update_q
= 0;
2544 int pd_idx
= sh
->pd_idx
;
2545 int qd_idx
= r6s
->qd_idx
;
2547 set_bit(STRIPE_HANDLE
, &sh
->state
);
2549 BUG_ON(s
->failed
> 2);
2550 BUG_ON(s
->uptodate
< disks
);
2551 /* Want to check and possibly repair P and Q.
2552 * However there could be one 'failed' device, in which
2553 * case we can only check one of them, possibly using the
2554 * other to generate missing data
2557 /* If !tmp_page, we cannot do the calculations,
2558 * but as we have set STRIPE_HANDLE, we will soon be called
2559 * by stripe_handle with a tmp_page - just wait until then.
2562 if (s
->failed
== r6s
->q_failed
) {
2563 /* The only possible failed device holds 'Q', so it
2564 * makes sense to check P (If anything else were failed,
2565 * we would have used P to recreate it).
2567 compute_block_1(sh
, pd_idx
, 1);
2568 if (!page_is_zero(sh
->dev
[pd_idx
].page
)) {
2569 compute_block_1(sh
, pd_idx
, 0);
2573 if (!r6s
->q_failed
&& s
->failed
< 2) {
2574 /* q is not failed, and we didn't use it to generate
2575 * anything, so it makes sense to check it
2577 memcpy(page_address(tmp_page
),
2578 page_address(sh
->dev
[qd_idx
].page
),
2580 compute_parity6(sh
, UPDATE_PARITY
);
2581 if (memcmp(page_address(tmp_page
),
2582 page_address(sh
->dev
[qd_idx
].page
),
2583 STRIPE_SIZE
) != 0) {
2584 clear_bit(STRIPE_INSYNC
, &sh
->state
);
2588 if (update_p
|| update_q
) {
2589 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2590 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2591 /* don't try to repair!! */
2592 update_p
= update_q
= 0;
2595 /* now write out any block on a failed drive,
2596 * or P or Q if they need it
2599 if (s
->failed
== 2) {
2600 dev
= &sh
->dev
[r6s
->failed_num
[1]];
2602 set_bit(R5_LOCKED
, &dev
->flags
);
2603 set_bit(R5_Wantwrite
, &dev
->flags
);
2605 if (s
->failed
>= 1) {
2606 dev
= &sh
->dev
[r6s
->failed_num
[0]];
2608 set_bit(R5_LOCKED
, &dev
->flags
);
2609 set_bit(R5_Wantwrite
, &dev
->flags
);
2613 dev
= &sh
->dev
[pd_idx
];
2615 set_bit(R5_LOCKED
, &dev
->flags
);
2616 set_bit(R5_Wantwrite
, &dev
->flags
);
2619 dev
= &sh
->dev
[qd_idx
];
2621 set_bit(R5_LOCKED
, &dev
->flags
);
2622 set_bit(R5_Wantwrite
, &dev
->flags
);
2624 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2626 set_bit(STRIPE_INSYNC
, &sh
->state
);
2630 static void handle_stripe_expansion(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2631 struct r6_state
*r6s
)
2635 /* We have read all the blocks in this stripe and now we need to
2636 * copy some of them into a target stripe for expand.
2638 struct dma_async_tx_descriptor
*tx
= NULL
;
2639 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2640 for (i
= 0; i
< sh
->disks
; i
++)
2641 if (i
!= sh
->pd_idx
&& (!r6s
|| i
!= r6s
->qd_idx
)) {
2643 struct stripe_head
*sh2
;
2645 sector_t bn
= compute_blocknr(sh
, i
);
2646 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
2648 sh2
= get_active_stripe(conf
, s
, 0, 1);
2650 /* so far only the early blocks of this stripe
2651 * have been requested. When later blocks
2652 * get requested, we will try again
2655 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
2656 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
2657 /* must have already done this block */
2658 release_stripe(sh2
);
2662 /* place all the copies on one channel */
2663 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
2664 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
2665 ASYNC_TX_DEP_ACK
, tx
, NULL
, NULL
);
2667 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
2668 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
2669 for (j
= 0; j
< conf
->raid_disks
; j
++)
2670 if (j
!= sh2
->pd_idx
&&
2671 (!r6s
|| j
!= sh2
->qd_idx
) &&
2672 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
2674 if (j
== conf
->raid_disks
) {
2675 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
2676 set_bit(STRIPE_HANDLE
, &sh2
->state
);
2678 release_stripe(sh2
);
2681 /* done submitting copies, wait for them to complete */
2684 dma_wait_for_async_tx(tx
);
2690 * handle_stripe - do things to a stripe.
2692 * We lock the stripe and then examine the state of various bits
2693 * to see what needs to be done.
2695 * return some read request which now have data
2696 * return some write requests which are safely on disc
2697 * schedule a read on some buffers
2698 * schedule a write of some buffers
2699 * return confirmation of parity correctness
2701 * buffers are taken off read_list or write_list, and bh_cache buffers
2702 * get BH_Lock set before the stripe lock is released.
2706 static bool handle_stripe5(struct stripe_head
*sh
)
2708 raid5_conf_t
*conf
= sh
->raid_conf
;
2709 int disks
= sh
->disks
, i
;
2710 struct bio
*return_bi
= NULL
;
2711 struct stripe_head_state s
;
2713 mdk_rdev_t
*blocked_rdev
= NULL
;
2716 memset(&s
, 0, sizeof(s
));
2717 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2718 "reconstruct:%d\n", (unsigned long long)sh
->sector
, sh
->state
,
2719 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->check_state
,
2720 sh
->reconstruct_state
);
2722 spin_lock(&sh
->lock
);
2723 clear_bit(STRIPE_HANDLE
, &sh
->state
);
2724 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2726 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
2727 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2728 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2730 /* Now to look around and see what can be done */
2732 for (i
=disks
; i
--; ) {
2734 struct r5dev
*dev
= &sh
->dev
[i
];
2735 clear_bit(R5_Insync
, &dev
->flags
);
2737 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2738 "written %p\n", i
, dev
->flags
, dev
->toread
, dev
->read
,
2739 dev
->towrite
, dev
->written
);
2741 /* maybe we can request a biofill operation
2743 * new wantfill requests are only permitted while
2744 * ops_complete_biofill is guaranteed to be inactive
2746 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
2747 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
2748 set_bit(R5_Wantfill
, &dev
->flags
);
2750 /* now count some things */
2751 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
2752 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
2753 if (test_bit(R5_Wantcompute
, &dev
->flags
)) s
.compute
++;
2755 if (test_bit(R5_Wantfill
, &dev
->flags
))
2757 else if (dev
->toread
)
2761 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
2766 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2767 if (blocked_rdev
== NULL
&&
2768 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
2769 blocked_rdev
= rdev
;
2770 atomic_inc(&rdev
->nr_pending
);
2772 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
2773 /* The ReadError flag will just be confusing now */
2774 clear_bit(R5_ReadError
, &dev
->flags
);
2775 clear_bit(R5_ReWrite
, &dev
->flags
);
2777 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
2778 || test_bit(R5_ReadError
, &dev
->flags
)) {
2782 set_bit(R5_Insync
, &dev
->flags
);
2786 if (unlikely(blocked_rdev
)) {
2787 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
2788 s
.to_write
|| s
.written
) {
2789 set_bit(STRIPE_HANDLE
, &sh
->state
);
2792 /* There is nothing for the blocked_rdev to block */
2793 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
2794 blocked_rdev
= NULL
;
2797 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
2798 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
2799 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
2802 pr_debug("locked=%d uptodate=%d to_read=%d"
2803 " to_write=%d failed=%d failed_num=%d\n",
2804 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
,
2805 s
.failed
, s
.failed_num
);
2806 /* check if the array has lost two devices and, if so, some requests might
2809 if (s
.failed
> 1 && s
.to_read
+s
.to_write
+s
.written
)
2810 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
2811 if (s
.failed
> 1 && s
.syncing
) {
2812 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
2813 clear_bit(STRIPE_SYNCING
, &sh
->state
);
2817 /* might be able to return some write requests if the parity block
2818 * is safe, or on a failed drive
2820 dev
= &sh
->dev
[sh
->pd_idx
];
2822 ((test_bit(R5_Insync
, &dev
->flags
) &&
2823 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2824 test_bit(R5_UPTODATE
, &dev
->flags
)) ||
2825 (s
.failed
== 1 && s
.failed_num
== sh
->pd_idx
)))
2826 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
2828 /* Now we might consider reading some blocks, either to check/generate
2829 * parity, or to satisfy requests
2830 * or to load a block that is being partially written.
2832 if (s
.to_read
|| s
.non_overwrite
||
2833 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
2834 handle_stripe_fill5(sh
, &s
, disks
);
2836 /* Now we check to see if any write operations have recently
2840 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
2842 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
2843 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
2844 sh
->reconstruct_state
= reconstruct_state_idle
;
2846 /* All the 'written' buffers and the parity block are ready to
2847 * be written back to disk
2849 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
2850 for (i
= disks
; i
--; ) {
2852 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
2853 (i
== sh
->pd_idx
|| dev
->written
)) {
2854 pr_debug("Writing block %d\n", i
);
2855 set_bit(R5_Wantwrite
, &dev
->flags
);
2858 if (!test_bit(R5_Insync
, &dev
->flags
) ||
2859 (i
== sh
->pd_idx
&& s
.failed
== 0))
2860 set_bit(STRIPE_INSYNC
, &sh
->state
);
2863 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2864 atomic_dec(&conf
->preread_active_stripes
);
2865 if (atomic_read(&conf
->preread_active_stripes
) <
2867 md_wakeup_thread(conf
->mddev
->thread
);
2871 /* Now to consider new write requests and what else, if anything
2872 * should be read. We do not handle new writes when:
2873 * 1/ A 'write' operation (copy+xor) is already in flight.
2874 * 2/ A 'check' operation is in flight, as it may clobber the parity
2877 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
2878 handle_stripe_dirtying5(conf
, sh
, &s
, disks
);
2880 /* maybe we need to check and possibly fix the parity for this stripe
2881 * Any reads will already have been scheduled, so we just see if enough
2882 * data is available. The parity check is held off while parity
2883 * dependent operations are in flight.
2885 if (sh
->check_state
||
2886 (s
.syncing
&& s
.locked
== 0 &&
2887 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
2888 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
2889 handle_parity_checks5(conf
, sh
, &s
, disks
);
2891 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
2892 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
2893 clear_bit(STRIPE_SYNCING
, &sh
->state
);
2896 /* If the failed drive is just a ReadError, then we might need to progress
2897 * the repair/check process
2899 if (s
.failed
== 1 && !conf
->mddev
->ro
&&
2900 test_bit(R5_ReadError
, &sh
->dev
[s
.failed_num
].flags
)
2901 && !test_bit(R5_LOCKED
, &sh
->dev
[s
.failed_num
].flags
)
2902 && test_bit(R5_UPTODATE
, &sh
->dev
[s
.failed_num
].flags
)
2904 dev
= &sh
->dev
[s
.failed_num
];
2905 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
2906 set_bit(R5_Wantwrite
, &dev
->flags
);
2907 set_bit(R5_ReWrite
, &dev
->flags
);
2908 set_bit(R5_LOCKED
, &dev
->flags
);
2911 /* let's read it back */
2912 set_bit(R5_Wantread
, &dev
->flags
);
2913 set_bit(R5_LOCKED
, &dev
->flags
);
2918 /* Finish reconstruct operations initiated by the expansion process */
2919 if (sh
->reconstruct_state
== reconstruct_state_result
) {
2920 sh
->reconstruct_state
= reconstruct_state_idle
;
2921 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
2922 for (i
= conf
->raid_disks
; i
--; ) {
2923 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
2924 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
2929 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
2930 !sh
->reconstruct_state
) {
2931 /* Need to write out all blocks after computing parity */
2932 sh
->disks
= conf
->raid_disks
;
2933 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
2934 schedule_reconstruction5(sh
, &s
, 1, 1);
2935 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
2936 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2937 atomic_dec(&conf
->reshape_stripes
);
2938 wake_up(&conf
->wait_for_overlap
);
2939 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
2942 if (s
.expanding
&& s
.locked
== 0 &&
2943 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
2944 handle_stripe_expansion(conf
, sh
, NULL
);
2947 spin_unlock(&sh
->lock
);
2949 /* wait for this device to become unblocked */
2950 if (unlikely(blocked_rdev
))
2951 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
2954 raid5_run_ops(sh
, s
.ops_request
);
2958 return_io(return_bi
);
2960 return blocked_rdev
== NULL
;
2963 static bool handle_stripe6(struct stripe_head
*sh
, struct page
*tmp_page
)
2965 raid5_conf_t
*conf
= sh
->raid_conf
;
2966 int disks
= sh
->disks
;
2967 struct bio
*return_bi
= NULL
;
2968 int i
, pd_idx
= sh
->pd_idx
;
2969 struct stripe_head_state s
;
2970 struct r6_state r6s
;
2971 struct r5dev
*dev
, *pdev
, *qdev
;
2972 mdk_rdev_t
*blocked_rdev
= NULL
;
2974 r6s
.qd_idx
= sh
->qd_idx
;
2975 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
2976 "pd_idx=%d, qd_idx=%d\n",
2977 (unsigned long long)sh
->sector
, sh
->state
,
2978 atomic_read(&sh
->count
), pd_idx
, r6s
.qd_idx
);
2979 memset(&s
, 0, sizeof(s
));
2981 spin_lock(&sh
->lock
);
2982 clear_bit(STRIPE_HANDLE
, &sh
->state
);
2983 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2985 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
2986 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2987 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2988 /* Now to look around and see what can be done */
2991 for (i
=disks
; i
--; ) {
2994 clear_bit(R5_Insync
, &dev
->flags
);
2996 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
2997 i
, dev
->flags
, dev
->toread
, dev
->towrite
, dev
->written
);
2998 /* maybe we can reply to a read */
2999 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
) {
3000 struct bio
*rbi
, *rbi2
;
3001 pr_debug("Return read for disc %d\n", i
);
3002 spin_lock_irq(&conf
->device_lock
);
3005 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
3006 wake_up(&conf
->wait_for_overlap
);
3007 spin_unlock_irq(&conf
->device_lock
);
3008 while (rbi
&& rbi
->bi_sector
< dev
->sector
+ STRIPE_SECTORS
) {
3009 copy_data(0, rbi
, dev
->page
, dev
->sector
);
3010 rbi2
= r5_next_bio(rbi
, dev
->sector
);
3011 spin_lock_irq(&conf
->device_lock
);
3012 if (!raid5_dec_bi_phys_segments(rbi
)) {
3013 rbi
->bi_next
= return_bi
;
3016 spin_unlock_irq(&conf
->device_lock
);
3021 /* now count some things */
3022 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3023 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3030 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3035 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3036 if (blocked_rdev
== NULL
&&
3037 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3038 blocked_rdev
= rdev
;
3039 atomic_inc(&rdev
->nr_pending
);
3041 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3042 /* The ReadError flag will just be confusing now */
3043 clear_bit(R5_ReadError
, &dev
->flags
);
3044 clear_bit(R5_ReWrite
, &dev
->flags
);
3046 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3047 || test_bit(R5_ReadError
, &dev
->flags
)) {
3049 r6s
.failed_num
[s
.failed
] = i
;
3052 set_bit(R5_Insync
, &dev
->flags
);
3056 if (unlikely(blocked_rdev
)) {
3057 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3058 s
.to_write
|| s
.written
) {
3059 set_bit(STRIPE_HANDLE
, &sh
->state
);
3062 /* There is nothing for the blocked_rdev to block */
3063 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3064 blocked_rdev
= NULL
;
3067 pr_debug("locked=%d uptodate=%d to_read=%d"
3068 " to_write=%d failed=%d failed_num=%d,%d\n",
3069 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3070 r6s
.failed_num
[0], r6s
.failed_num
[1]);
3071 /* check if the array has lost >2 devices and, if so, some requests
3072 * might need to be failed
3074 if (s
.failed
> 2 && s
.to_read
+s
.to_write
+s
.written
)
3075 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3076 if (s
.failed
> 2 && s
.syncing
) {
3077 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3078 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3083 * might be able to return some write requests if the parity blocks
3084 * are safe, or on a failed drive
3086 pdev
= &sh
->dev
[pd_idx
];
3087 r6s
.p_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == pd_idx
)
3088 || (s
.failed
>= 2 && r6s
.failed_num
[1] == pd_idx
);
3089 qdev
= &sh
->dev
[r6s
.qd_idx
];
3090 r6s
.q_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == r6s
.qd_idx
)
3091 || (s
.failed
>= 2 && r6s
.failed_num
[1] == r6s
.qd_idx
);
3094 ( r6s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3095 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3096 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3097 ( r6s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3098 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3099 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3100 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3102 /* Now we might consider reading some blocks, either to check/generate
3103 * parity, or to satisfy requests
3104 * or to load a block that is being partially written.
3106 if (s
.to_read
|| s
.non_overwrite
|| (s
.to_write
&& s
.failed
) ||
3107 (s
.syncing
&& (s
.uptodate
< disks
)) || s
.expanding
)
3108 handle_stripe_fill6(sh
, &s
, &r6s
, disks
);
3110 /* now to consider writing and what else, if anything should be read */
3112 handle_stripe_dirtying6(conf
, sh
, &s
, &r6s
, disks
);
3114 /* maybe we need to check and possibly fix the parity for this stripe
3115 * Any reads will already have been scheduled, so we just see if enough
3118 if (s
.syncing
&& s
.locked
== 0 && !test_bit(STRIPE_INSYNC
, &sh
->state
))
3119 handle_parity_checks6(conf
, sh
, &s
, &r6s
, tmp_page
, disks
);
3121 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3122 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3123 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3126 /* If the failed drives are just a ReadError, then we might need
3127 * to progress the repair/check process
3129 if (s
.failed
<= 2 && !conf
->mddev
->ro
)
3130 for (i
= 0; i
< s
.failed
; i
++) {
3131 dev
= &sh
->dev
[r6s
.failed_num
[i
]];
3132 if (test_bit(R5_ReadError
, &dev
->flags
)
3133 && !test_bit(R5_LOCKED
, &dev
->flags
)
3134 && test_bit(R5_UPTODATE
, &dev
->flags
)
3136 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3137 set_bit(R5_Wantwrite
, &dev
->flags
);
3138 set_bit(R5_ReWrite
, &dev
->flags
);
3139 set_bit(R5_LOCKED
, &dev
->flags
);
3141 /* let's read it back */
3142 set_bit(R5_Wantread
, &dev
->flags
);
3143 set_bit(R5_LOCKED
, &dev
->flags
);
3148 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
3149 /* Need to write out all blocks after computing P&Q */
3150 sh
->disks
= conf
->raid_disks
;
3151 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3152 compute_parity6(sh
, RECONSTRUCT_WRITE
);
3153 for (i
= conf
->raid_disks
; i
-- ; ) {
3154 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3156 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3158 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3159 } else if (s
.expanded
) {
3160 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3161 atomic_dec(&conf
->reshape_stripes
);
3162 wake_up(&conf
->wait_for_overlap
);
3163 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3166 if (s
.expanding
&& s
.locked
== 0 &&
3167 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3168 handle_stripe_expansion(conf
, sh
, &r6s
);
3171 spin_unlock(&sh
->lock
);
3173 /* wait for this device to become unblocked */
3174 if (unlikely(blocked_rdev
))
3175 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3179 return_io(return_bi
);
3181 return blocked_rdev
== NULL
;
3184 /* returns true if the stripe was handled */
3185 static bool handle_stripe(struct stripe_head
*sh
, struct page
*tmp_page
)
3187 if (sh
->raid_conf
->level
== 6)
3188 return handle_stripe6(sh
, tmp_page
);
3190 return handle_stripe5(sh
);
3195 static void raid5_activate_delayed(raid5_conf_t
*conf
)
3197 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3198 while (!list_empty(&conf
->delayed_list
)) {
3199 struct list_head
*l
= conf
->delayed_list
.next
;
3200 struct stripe_head
*sh
;
3201 sh
= list_entry(l
, struct stripe_head
, lru
);
3203 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3204 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3205 atomic_inc(&conf
->preread_active_stripes
);
3206 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3209 blk_plug_device(conf
->mddev
->queue
);
3212 static void activate_bit_delay(raid5_conf_t
*conf
)
3214 /* device_lock is held */
3215 struct list_head head
;
3216 list_add(&head
, &conf
->bitmap_list
);
3217 list_del_init(&conf
->bitmap_list
);
3218 while (!list_empty(&head
)) {
3219 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3220 list_del_init(&sh
->lru
);
3221 atomic_inc(&sh
->count
);
3222 __release_stripe(conf
, sh
);
3226 static void unplug_slaves(mddev_t
*mddev
)
3228 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3232 for (i
=0; i
<mddev
->raid_disks
; i
++) {
3233 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3234 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) && atomic_read(&rdev
->nr_pending
)) {
3235 struct request_queue
*r_queue
= bdev_get_queue(rdev
->bdev
);
3237 atomic_inc(&rdev
->nr_pending
);
3240 blk_unplug(r_queue
);
3242 rdev_dec_pending(rdev
, mddev
);
3249 static void raid5_unplug_device(struct request_queue
*q
)
3251 mddev_t
*mddev
= q
->queuedata
;
3252 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3253 unsigned long flags
;
3255 spin_lock_irqsave(&conf
->device_lock
, flags
);
3257 if (blk_remove_plug(q
)) {
3259 raid5_activate_delayed(conf
);
3261 md_wakeup_thread(mddev
->thread
);
3263 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3265 unplug_slaves(mddev
);
3268 static int raid5_congested(void *data
, int bits
)
3270 mddev_t
*mddev
= data
;
3271 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3273 /* No difference between reads and writes. Just check
3274 * how busy the stripe_cache is
3276 if (conf
->inactive_blocked
)
3280 if (list_empty_careful(&conf
->inactive_list
))
3286 /* We want read requests to align with chunks where possible,
3287 * but write requests don't need to.
3289 static int raid5_mergeable_bvec(struct request_queue
*q
,
3290 struct bvec_merge_data
*bvm
,
3291 struct bio_vec
*biovec
)
3293 mddev_t
*mddev
= q
->queuedata
;
3294 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
3296 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
3297 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
3299 if ((bvm
->bi_rw
& 1) == WRITE
)
3300 return biovec
->bv_len
; /* always allow writes to be mergeable */
3302 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3303 if (max
< 0) max
= 0;
3304 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3305 return biovec
->bv_len
;
3311 static int in_chunk_boundary(mddev_t
*mddev
, struct bio
*bio
)
3313 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3314 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
3315 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3317 return chunk_sectors
>=
3318 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3322 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3323 * later sampled by raid5d.
3325 static void add_bio_to_retry(struct bio
*bi
,raid5_conf_t
*conf
)
3327 unsigned long flags
;
3329 spin_lock_irqsave(&conf
->device_lock
, flags
);
3331 bi
->bi_next
= conf
->retry_read_aligned_list
;
3332 conf
->retry_read_aligned_list
= bi
;
3334 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3335 md_wakeup_thread(conf
->mddev
->thread
);
3339 static struct bio
*remove_bio_from_retry(raid5_conf_t
*conf
)
3343 bi
= conf
->retry_read_aligned
;
3345 conf
->retry_read_aligned
= NULL
;
3348 bi
= conf
->retry_read_aligned_list
;
3350 conf
->retry_read_aligned_list
= bi
->bi_next
;
3353 * this sets the active strip count to 1 and the processed
3354 * strip count to zero (upper 8 bits)
3356 bi
->bi_phys_segments
= 1; /* biased count of active stripes */
3364 * The "raid5_align_endio" should check if the read succeeded and if it
3365 * did, call bio_endio on the original bio (having bio_put the new bio
3367 * If the read failed..
3369 static void raid5_align_endio(struct bio
*bi
, int error
)
3371 struct bio
* raid_bi
= bi
->bi_private
;
3374 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3379 mddev
= raid_bi
->bi_bdev
->bd_disk
->queue
->queuedata
;
3380 conf
= mddev_to_conf(mddev
);
3381 rdev
= (void*)raid_bi
->bi_next
;
3382 raid_bi
->bi_next
= NULL
;
3384 rdev_dec_pending(rdev
, conf
->mddev
);
3386 if (!error
&& uptodate
) {
3387 bio_endio(raid_bi
, 0);
3388 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3389 wake_up(&conf
->wait_for_stripe
);
3394 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3396 add_bio_to_retry(raid_bi
, conf
);
3399 static int bio_fits_rdev(struct bio
*bi
)
3401 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3403 if ((bi
->bi_size
>>9) > q
->max_sectors
)
3405 blk_recount_segments(q
, bi
);
3406 if (bi
->bi_phys_segments
> q
->max_phys_segments
)
3409 if (q
->merge_bvec_fn
)
3410 /* it's too hard to apply the merge_bvec_fn at this stage,
3419 static int chunk_aligned_read(struct request_queue
*q
, struct bio
* raid_bio
)
3421 mddev_t
*mddev
= q
->queuedata
;
3422 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3423 unsigned int dd_idx
;
3424 struct bio
* align_bi
;
3427 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3428 pr_debug("chunk_aligned_read : non aligned\n");
3432 * use bio_clone to make a copy of the bio
3434 align_bi
= bio_clone(raid_bio
, GFP_NOIO
);
3438 * set bi_end_io to a new function, and set bi_private to the
3441 align_bi
->bi_end_io
= raid5_align_endio
;
3442 align_bi
->bi_private
= raid_bio
;
3446 align_bi
->bi_sector
= raid5_compute_sector(conf
, raid_bio
->bi_sector
,
3451 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3452 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
3453 atomic_inc(&rdev
->nr_pending
);
3455 raid_bio
->bi_next
= (void*)rdev
;
3456 align_bi
->bi_bdev
= rdev
->bdev
;
3457 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3458 align_bi
->bi_sector
+= rdev
->data_offset
;
3460 if (!bio_fits_rdev(align_bi
)) {
3461 /* too big in some way */
3463 rdev_dec_pending(rdev
, mddev
);
3467 spin_lock_irq(&conf
->device_lock
);
3468 wait_event_lock_irq(conf
->wait_for_stripe
,
3470 conf
->device_lock
, /* nothing */);
3471 atomic_inc(&conf
->active_aligned_reads
);
3472 spin_unlock_irq(&conf
->device_lock
);
3474 generic_make_request(align_bi
);
3483 /* __get_priority_stripe - get the next stripe to process
3485 * Full stripe writes are allowed to pass preread active stripes up until
3486 * the bypass_threshold is exceeded. In general the bypass_count
3487 * increments when the handle_list is handled before the hold_list; however, it
3488 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3489 * stripe with in flight i/o. The bypass_count will be reset when the
3490 * head of the hold_list has changed, i.e. the head was promoted to the
3493 static struct stripe_head
*__get_priority_stripe(raid5_conf_t
*conf
)
3495 struct stripe_head
*sh
;
3497 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3499 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3500 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3501 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3503 if (!list_empty(&conf
->handle_list
)) {
3504 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3506 if (list_empty(&conf
->hold_list
))
3507 conf
->bypass_count
= 0;
3508 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3509 if (conf
->hold_list
.next
== conf
->last_hold
)
3510 conf
->bypass_count
++;
3512 conf
->last_hold
= conf
->hold_list
.next
;
3513 conf
->bypass_count
-= conf
->bypass_threshold
;
3514 if (conf
->bypass_count
< 0)
3515 conf
->bypass_count
= 0;
3518 } else if (!list_empty(&conf
->hold_list
) &&
3519 ((conf
->bypass_threshold
&&
3520 conf
->bypass_count
> conf
->bypass_threshold
) ||
3521 atomic_read(&conf
->pending_full_writes
) == 0)) {
3522 sh
= list_entry(conf
->hold_list
.next
,
3524 conf
->bypass_count
-= conf
->bypass_threshold
;
3525 if (conf
->bypass_count
< 0)
3526 conf
->bypass_count
= 0;
3530 list_del_init(&sh
->lru
);
3531 atomic_inc(&sh
->count
);
3532 BUG_ON(atomic_read(&sh
->count
) != 1);
3536 static int make_request(struct request_queue
*q
, struct bio
* bi
)
3538 mddev_t
*mddev
= q
->queuedata
;
3539 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3541 sector_t new_sector
;
3542 sector_t logical_sector
, last_sector
;
3543 struct stripe_head
*sh
;
3544 const int rw
= bio_data_dir(bi
);
3547 if (unlikely(bio_barrier(bi
))) {
3548 bio_endio(bi
, -EOPNOTSUPP
);
3552 md_write_start(mddev
, bi
);
3554 cpu
= part_stat_lock();
3555 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
3556 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
3561 mddev
->reshape_position
== MaxSector
&&
3562 chunk_aligned_read(q
,bi
))
3565 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3566 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
3568 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
3570 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
3572 int disks
, data_disks
;
3577 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
3578 if (likely(conf
->expand_progress
== MaxSector
))
3579 disks
= conf
->raid_disks
;
3581 /* spinlock is needed as expand_progress may be
3582 * 64bit on a 32bit platform, and so it might be
3583 * possible to see a half-updated value
3584 * Ofcourse expand_progress could change after
3585 * the lock is dropped, so once we get a reference
3586 * to the stripe that we think it is, we will have
3589 spin_lock_irq(&conf
->device_lock
);
3590 disks
= conf
->raid_disks
;
3591 if (logical_sector
>= conf
->expand_progress
) {
3592 disks
= conf
->previous_raid_disks
;
3595 if (logical_sector
>= conf
->expand_lo
) {
3596 spin_unlock_irq(&conf
->device_lock
);
3601 spin_unlock_irq(&conf
->device_lock
);
3603 data_disks
= disks
- conf
->max_degraded
;
3605 new_sector
= raid5_compute_sector(conf
, logical_sector
,
3608 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3609 (unsigned long long)new_sector
,
3610 (unsigned long long)logical_sector
);
3612 sh
= get_active_stripe(conf
, new_sector
, previous
,
3613 (bi
->bi_rw
&RWA_MASK
));
3615 if (unlikely(conf
->expand_progress
!= MaxSector
)) {
3616 /* expansion might have moved on while waiting for a
3617 * stripe, so we must do the range check again.
3618 * Expansion could still move past after this
3619 * test, but as we are holding a reference to
3620 * 'sh', we know that if that happens,
3621 * STRIPE_EXPANDING will get set and the expansion
3622 * won't proceed until we finish with the stripe.
3625 spin_lock_irq(&conf
->device_lock
);
3626 if (logical_sector
< conf
->expand_progress
&&
3627 disks
== conf
->previous_raid_disks
)
3628 /* mismatch, need to try again */
3630 spin_unlock_irq(&conf
->device_lock
);
3636 /* FIXME what if we get a false positive because these
3637 * are being updated.
3639 if (logical_sector
>= mddev
->suspend_lo
&&
3640 logical_sector
< mddev
->suspend_hi
) {
3646 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
3647 !add_stripe_bio(sh
, bi
, dd_idx
, (bi
->bi_rw
&RW_MASK
))) {
3648 /* Stripe is busy expanding or
3649 * add failed due to overlap. Flush everything
3652 raid5_unplug_device(mddev
->queue
);
3657 finish_wait(&conf
->wait_for_overlap
, &w
);
3658 set_bit(STRIPE_HANDLE
, &sh
->state
);
3659 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3662 /* cannot get stripe for read-ahead, just give-up */
3663 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3664 finish_wait(&conf
->wait_for_overlap
, &w
);
3669 spin_lock_irq(&conf
->device_lock
);
3670 remaining
= raid5_dec_bi_phys_segments(bi
);
3671 spin_unlock_irq(&conf
->device_lock
);
3672 if (remaining
== 0) {
3675 md_write_end(mddev
);
3682 static sector_t
reshape_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
)
3684 /* reshaping is quite different to recovery/resync so it is
3685 * handled quite separately ... here.
3687 * On each call to sync_request, we gather one chunk worth of
3688 * destination stripes and flag them as expanding.
3689 * Then we find all the source stripes and request reads.
3690 * As the reads complete, handle_stripe will copy the data
3691 * into the destination stripe and release that stripe.
3693 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
3694 struct stripe_head
*sh
;
3695 sector_t first_sector
, last_sector
;
3696 int raid_disks
= conf
->previous_raid_disks
;
3697 int data_disks
= raid_disks
- conf
->max_degraded
;
3698 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
3701 sector_t writepos
, safepos
, gap
;
3703 if (sector_nr
== 0 &&
3704 conf
->expand_progress
!= 0) {
3705 /* restarting in the middle, skip the initial sectors */
3706 sector_nr
= conf
->expand_progress
;
3707 sector_div(sector_nr
, new_data_disks
);
3712 /* we update the metadata when there is more than 3Meg
3713 * in the block range (that is rather arbitrary, should
3714 * probably be time based) or when the data about to be
3715 * copied would over-write the source of the data at
3716 * the front of the range.
3717 * i.e. one new_stripe forward from expand_progress new_maps
3718 * to after where expand_lo old_maps to
3720 writepos
= conf
->expand_progress
+
3721 conf
->chunk_size
/512*(new_data_disks
);
3722 sector_div(writepos
, new_data_disks
);
3723 safepos
= conf
->expand_lo
;
3724 sector_div(safepos
, data_disks
);
3725 gap
= conf
->expand_progress
- conf
->expand_lo
;
3727 if (writepos
>= safepos
||
3728 gap
> (new_data_disks
)*3000*2 /*3Meg*/) {
3729 /* Cannot proceed until we've updated the superblock... */
3730 wait_event(conf
->wait_for_overlap
,
3731 atomic_read(&conf
->reshape_stripes
)==0);
3732 mddev
->reshape_position
= conf
->expand_progress
;
3733 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
3734 md_wakeup_thread(mddev
->thread
);
3735 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
3736 kthread_should_stop());
3737 spin_lock_irq(&conf
->device_lock
);
3738 conf
->expand_lo
= mddev
->reshape_position
;
3739 spin_unlock_irq(&conf
->device_lock
);
3740 wake_up(&conf
->wait_for_overlap
);
3743 for (i
=0; i
< conf
->chunk_size
/512; i
+= STRIPE_SECTORS
) {
3746 sh
= get_active_stripe(conf
, sector_nr
+i
, 0, 0);
3747 set_bit(STRIPE_EXPANDING
, &sh
->state
);
3748 atomic_inc(&conf
->reshape_stripes
);
3749 /* If any of this stripe is beyond the end of the old
3750 * array, then we need to zero those blocks
3752 for (j
=sh
->disks
; j
--;) {
3754 if (j
== sh
->pd_idx
)
3756 if (conf
->level
== 6 &&
3759 s
= compute_blocknr(sh
, j
);
3760 if (s
< mddev
->array_sectors
) {
3764 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
3765 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
3766 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
3769 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3770 set_bit(STRIPE_HANDLE
, &sh
->state
);
3774 spin_lock_irq(&conf
->device_lock
);
3775 conf
->expand_progress
= (sector_nr
+ i
) * new_data_disks
;
3776 spin_unlock_irq(&conf
->device_lock
);
3777 /* Ok, those stripe are ready. We can start scheduling
3778 * reads on the source stripes.
3779 * The source stripes are determined by mapping the first and last
3780 * block on the destination stripes.
3783 raid5_compute_sector(conf
, sector_nr
*(new_data_disks
),
3786 raid5_compute_sector(conf
, ((sector_nr
+conf
->chunk_size
/512)
3787 *(new_data_disks
) - 1),
3789 if (last_sector
>= mddev
->dev_sectors
)
3790 last_sector
= mddev
->dev_sectors
- 1;
3791 while (first_sector
<= last_sector
) {
3792 sh
= get_active_stripe(conf
, first_sector
, 1, 0);
3793 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3794 set_bit(STRIPE_HANDLE
, &sh
->state
);
3796 first_sector
+= STRIPE_SECTORS
;
3798 /* If this takes us to the resync_max point where we have to pause,
3799 * then we need to write out the superblock.
3801 sector_nr
+= conf
->chunk_size
>>9;
3802 if (sector_nr
>= mddev
->resync_max
) {
3803 /* Cannot proceed until we've updated the superblock... */
3804 wait_event(conf
->wait_for_overlap
,
3805 atomic_read(&conf
->reshape_stripes
) == 0);
3806 mddev
->reshape_position
= conf
->expand_progress
;
3807 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
3808 md_wakeup_thread(mddev
->thread
);
3809 wait_event(mddev
->sb_wait
,
3810 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
3811 || kthread_should_stop());
3812 spin_lock_irq(&conf
->device_lock
);
3813 conf
->expand_lo
= mddev
->reshape_position
;
3814 spin_unlock_irq(&conf
->device_lock
);
3815 wake_up(&conf
->wait_for_overlap
);
3817 return conf
->chunk_size
>>9;
3820 /* FIXME go_faster isn't used */
3821 static inline sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
3823 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
3824 struct stripe_head
*sh
;
3825 sector_t max_sector
= mddev
->dev_sectors
;
3827 int still_degraded
= 0;
3830 if (sector_nr
>= max_sector
) {
3831 /* just being told to finish up .. nothing much to do */
3832 unplug_slaves(mddev
);
3833 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
3838 if (mddev
->curr_resync
< max_sector
) /* aborted */
3839 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
3841 else /* completed sync */
3843 bitmap_close_sync(mddev
->bitmap
);
3848 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
3849 return reshape_request(mddev
, sector_nr
, skipped
);
3851 /* No need to check resync_max as we never do more than one
3852 * stripe, and as resync_max will always be on a chunk boundary,
3853 * if the check in md_do_sync didn't fire, there is no chance
3854 * of overstepping resync_max here
3857 /* if there is too many failed drives and we are trying
3858 * to resync, then assert that we are finished, because there is
3859 * nothing we can do.
3861 if (mddev
->degraded
>= conf
->max_degraded
&&
3862 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
3863 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
3867 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
3868 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
3869 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
3870 /* we can skip this block, and probably more */
3871 sync_blocks
/= STRIPE_SECTORS
;
3873 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
3877 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
3879 sh
= get_active_stripe(conf
, sector_nr
, 0, 1);
3881 sh
= get_active_stripe(conf
, sector_nr
, 0, 0);
3882 /* make sure we don't swamp the stripe cache if someone else
3883 * is trying to get access
3885 schedule_timeout_uninterruptible(1);
3887 /* Need to check if array will still be degraded after recovery/resync
3888 * We don't need to check the 'failed' flag as when that gets set,
3891 for (i
=0; i
<mddev
->raid_disks
; i
++)
3892 if (conf
->disks
[i
].rdev
== NULL
)
3895 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
3897 spin_lock(&sh
->lock
);
3898 set_bit(STRIPE_SYNCING
, &sh
->state
);
3899 clear_bit(STRIPE_INSYNC
, &sh
->state
);
3900 spin_unlock(&sh
->lock
);
3902 /* wait for any blocked device to be handled */
3903 while(unlikely(!handle_stripe(sh
, NULL
)))
3907 return STRIPE_SECTORS
;
3910 static int retry_aligned_read(raid5_conf_t
*conf
, struct bio
*raid_bio
)
3912 /* We may not be able to submit a whole bio at once as there
3913 * may not be enough stripe_heads available.
3914 * We cannot pre-allocate enough stripe_heads as we may need
3915 * more than exist in the cache (if we allow ever large chunks).
3916 * So we do one stripe head at a time and record in
3917 * ->bi_hw_segments how many have been done.
3919 * We *know* that this entire raid_bio is in one chunk, so
3920 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
3922 struct stripe_head
*sh
;
3924 sector_t sector
, logical_sector
, last_sector
;
3929 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3930 sector
= raid5_compute_sector(conf
, logical_sector
,
3932 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
3934 for (; logical_sector
< last_sector
;
3935 logical_sector
+= STRIPE_SECTORS
,
3936 sector
+= STRIPE_SECTORS
,
3939 if (scnt
< raid5_bi_hw_segments(raid_bio
))
3940 /* already done this stripe */
3943 sh
= get_active_stripe(conf
, sector
, 0, 1);
3946 /* failed to get a stripe - must wait */
3947 raid5_set_bi_hw_segments(raid_bio
, scnt
);
3948 conf
->retry_read_aligned
= raid_bio
;
3952 set_bit(R5_ReadError
, &sh
->dev
[dd_idx
].flags
);
3953 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
3955 raid5_set_bi_hw_segments(raid_bio
, scnt
);
3956 conf
->retry_read_aligned
= raid_bio
;
3960 handle_stripe(sh
, NULL
);
3964 spin_lock_irq(&conf
->device_lock
);
3965 remaining
= raid5_dec_bi_phys_segments(raid_bio
);
3966 spin_unlock_irq(&conf
->device_lock
);
3968 bio_endio(raid_bio
, 0);
3969 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3970 wake_up(&conf
->wait_for_stripe
);
3977 * This is our raid5 kernel thread.
3979 * We scan the hash table for stripes which can be handled now.
3980 * During the scan, completed stripes are saved for us by the interrupt
3981 * handler, so that they will not have to wait for our next wakeup.
3983 static void raid5d(mddev_t
*mddev
)
3985 struct stripe_head
*sh
;
3986 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
3989 pr_debug("+++ raid5d active\n");
3991 md_check_recovery(mddev
);
3994 spin_lock_irq(&conf
->device_lock
);
3998 if (conf
->seq_flush
!= conf
->seq_write
) {
3999 int seq
= conf
->seq_flush
;
4000 spin_unlock_irq(&conf
->device_lock
);
4001 bitmap_unplug(mddev
->bitmap
);
4002 spin_lock_irq(&conf
->device_lock
);
4003 conf
->seq_write
= seq
;
4004 activate_bit_delay(conf
);
4007 while ((bio
= remove_bio_from_retry(conf
))) {
4009 spin_unlock_irq(&conf
->device_lock
);
4010 ok
= retry_aligned_read(conf
, bio
);
4011 spin_lock_irq(&conf
->device_lock
);
4017 sh
= __get_priority_stripe(conf
);
4021 spin_unlock_irq(&conf
->device_lock
);
4024 handle_stripe(sh
, conf
->spare_page
);
4027 spin_lock_irq(&conf
->device_lock
);
4029 pr_debug("%d stripes handled\n", handled
);
4031 spin_unlock_irq(&conf
->device_lock
);
4033 async_tx_issue_pending_all();
4034 unplug_slaves(mddev
);
4036 pr_debug("--- raid5d inactive\n");
4040 raid5_show_stripe_cache_size(mddev_t
*mddev
, char *page
)
4042 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4044 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4050 raid5_store_stripe_cache_size(mddev_t
*mddev
, const char *page
, size_t len
)
4052 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4056 if (len
>= PAGE_SIZE
)
4061 if (strict_strtoul(page
, 10, &new))
4063 if (new <= 16 || new > 32768)
4065 while (new < conf
->max_nr_stripes
) {
4066 if (drop_one_stripe(conf
))
4067 conf
->max_nr_stripes
--;
4071 err
= md_allow_write(mddev
);
4074 while (new > conf
->max_nr_stripes
) {
4075 if (grow_one_stripe(conf
))
4076 conf
->max_nr_stripes
++;
4082 static struct md_sysfs_entry
4083 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4084 raid5_show_stripe_cache_size
,
4085 raid5_store_stripe_cache_size
);
4088 raid5_show_preread_threshold(mddev_t
*mddev
, char *page
)
4090 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4092 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4098 raid5_store_preread_threshold(mddev_t
*mddev
, const char *page
, size_t len
)
4100 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4102 if (len
>= PAGE_SIZE
)
4107 if (strict_strtoul(page
, 10, &new))
4109 if (new > conf
->max_nr_stripes
)
4111 conf
->bypass_threshold
= new;
4115 static struct md_sysfs_entry
4116 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4118 raid5_show_preread_threshold
,
4119 raid5_store_preread_threshold
);
4122 stripe_cache_active_show(mddev_t
*mddev
, char *page
)
4124 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4126 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4131 static struct md_sysfs_entry
4132 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4134 static struct attribute
*raid5_attrs
[] = {
4135 &raid5_stripecache_size
.attr
,
4136 &raid5_stripecache_active
.attr
,
4137 &raid5_preread_bypass_threshold
.attr
,
4140 static struct attribute_group raid5_attrs_group
= {
4142 .attrs
= raid5_attrs
,
4145 static int run(mddev_t
*mddev
)
4148 int raid_disk
, memory
;
4150 struct disk_info
*disk
;
4151 int working_disks
= 0;
4153 if (mddev
->level
!= 5 && mddev
->level
!= 4 && mddev
->level
!= 6) {
4154 printk(KERN_ERR
"raid5: %s: raid level not set to 4/5/6 (%d)\n",
4155 mdname(mddev
), mddev
->level
);
4158 if ((mddev
->level
== 5 && !algorithm_valid_raid5(mddev
->layout
)) ||
4159 (mddev
->level
== 6 && !algorithm_valid_raid6(mddev
->layout
))) {
4160 printk(KERN_ERR
"raid5: %s: layout %d not supported\n",
4161 mdname(mddev
), mddev
->layout
);
4165 if (mddev
->chunk_size
< PAGE_SIZE
) {
4166 printk(KERN_ERR
"md/raid5: chunk_size must be at least "
4167 "PAGE_SIZE but %d < %ld\n",
4168 mddev
->chunk_size
, PAGE_SIZE
);
4172 if (mddev
->reshape_position
!= MaxSector
) {
4173 /* Check that we can continue the reshape.
4174 * Currently only disks can change, it must
4175 * increase, and we must be past the point where
4176 * a stripe over-writes itself
4178 sector_t here_new
, here_old
;
4180 int max_degraded
= (mddev
->level
== 5 ? 1 : 2);
4182 if (mddev
->new_level
!= mddev
->level
||
4183 mddev
->new_layout
!= mddev
->layout
||
4184 mddev
->new_chunk
!= mddev
->chunk_size
) {
4185 printk(KERN_ERR
"raid5: %s: unsupported reshape "
4186 "required - aborting.\n",
4190 if (mddev
->delta_disks
<= 0) {
4191 printk(KERN_ERR
"raid5: %s: unsupported reshape "
4192 "(reduce disks) required - aborting.\n",
4196 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4197 /* reshape_position must be on a new-stripe boundary, and one
4198 * further up in new geometry must map after here in old
4201 here_new
= mddev
->reshape_position
;
4202 if (sector_div(here_new
, (mddev
->chunk_size
>>9)*
4203 (mddev
->raid_disks
- max_degraded
))) {
4204 printk(KERN_ERR
"raid5: reshape_position not "
4205 "on a stripe boundary\n");
4208 /* here_new is the stripe we will write to */
4209 here_old
= mddev
->reshape_position
;
4210 sector_div(here_old
, (mddev
->chunk_size
>>9)*
4211 (old_disks
-max_degraded
));
4212 /* here_old is the first stripe that we might need to read
4214 if (here_new
>= here_old
) {
4215 /* Reading from the same stripe as writing to - bad */
4216 printk(KERN_ERR
"raid5: reshape_position too early for "
4217 "auto-recovery - aborting.\n");
4220 printk(KERN_INFO
"raid5: reshape will continue\n");
4221 /* OK, we should be able to continue; */
4225 mddev
->private = kzalloc(sizeof (raid5_conf_t
), GFP_KERNEL
);
4226 if ((conf
= mddev
->private) == NULL
)
4228 if (mddev
->reshape_position
== MaxSector
) {
4229 conf
->previous_raid_disks
= conf
->raid_disks
= mddev
->raid_disks
;
4231 conf
->raid_disks
= mddev
->raid_disks
;
4232 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4235 conf
->disks
= kzalloc(conf
->raid_disks
* sizeof(struct disk_info
),
4240 conf
->mddev
= mddev
;
4242 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
4245 if (mddev
->level
== 6) {
4246 conf
->spare_page
= alloc_page(GFP_KERNEL
);
4247 if (!conf
->spare_page
)
4250 spin_lock_init(&conf
->device_lock
);
4251 mddev
->queue
->queue_lock
= &conf
->device_lock
;
4252 init_waitqueue_head(&conf
->wait_for_stripe
);
4253 init_waitqueue_head(&conf
->wait_for_overlap
);
4254 INIT_LIST_HEAD(&conf
->handle_list
);
4255 INIT_LIST_HEAD(&conf
->hold_list
);
4256 INIT_LIST_HEAD(&conf
->delayed_list
);
4257 INIT_LIST_HEAD(&conf
->bitmap_list
);
4258 INIT_LIST_HEAD(&conf
->inactive_list
);
4259 atomic_set(&conf
->active_stripes
, 0);
4260 atomic_set(&conf
->preread_active_stripes
, 0);
4261 atomic_set(&conf
->active_aligned_reads
, 0);
4262 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4264 pr_debug("raid5: run(%s) called.\n", mdname(mddev
));
4266 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4267 raid_disk
= rdev
->raid_disk
;
4268 if (raid_disk
>= conf
->raid_disks
4271 disk
= conf
->disks
+ raid_disk
;
4275 if (test_bit(In_sync
, &rdev
->flags
)) {
4276 char b
[BDEVNAME_SIZE
];
4277 printk(KERN_INFO
"raid5: device %s operational as raid"
4278 " disk %d\n", bdevname(rdev
->bdev
,b
),
4282 /* Cannot rely on bitmap to complete recovery */
4287 * 0 for a fully functional array, 1 or 2 for a degraded array.
4289 mddev
->degraded
= conf
->raid_disks
- working_disks
;
4290 conf
->mddev
= mddev
;
4291 conf
->chunk_size
= mddev
->chunk_size
;
4292 conf
->level
= mddev
->level
;
4293 if (conf
->level
== 6)
4294 conf
->max_degraded
= 2;
4296 conf
->max_degraded
= 1;
4297 conf
->algorithm
= mddev
->layout
;
4298 conf
->max_nr_stripes
= NR_STRIPES
;
4299 conf
->expand_progress
= mddev
->reshape_position
;
4301 /* device size must be a multiple of chunk size */
4302 mddev
->dev_sectors
&= ~(mddev
->chunk_size
/ 512 - 1);
4303 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
4305 if (conf
->level
== 6 && conf
->raid_disks
< 4) {
4306 printk(KERN_ERR
"raid6: not enough configured devices for %s (%d, minimum 4)\n",
4307 mdname(mddev
), conf
->raid_disks
);
4310 if (!conf
->chunk_size
|| conf
->chunk_size
% 4) {
4311 printk(KERN_ERR
"raid5: invalid chunk size %d for %s\n",
4312 conf
->chunk_size
, mdname(mddev
));
4315 if (mddev
->degraded
> conf
->max_degraded
) {
4316 printk(KERN_ERR
"raid5: not enough operational devices for %s"
4317 " (%d/%d failed)\n",
4318 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
4322 if (mddev
->degraded
> 0 &&
4323 mddev
->recovery_cp
!= MaxSector
) {
4324 if (mddev
->ok_start_degraded
)
4326 "raid5: starting dirty degraded array: %s"
4327 "- data corruption possible.\n",
4331 "raid5: cannot start dirty degraded array for %s\n",
4338 mddev
->thread
= md_register_thread(raid5d
, mddev
, "%s_raid5");
4339 if (!mddev
->thread
) {
4341 "raid5: couldn't allocate thread for %s\n",
4346 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
4347 conf
->raid_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
4348 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
4350 "raid5: couldn't allocate %dkB for buffers\n", memory
);
4351 shrink_stripes(conf
);
4352 md_unregister_thread(mddev
->thread
);
4355 printk(KERN_INFO
"raid5: allocated %dkB for %s\n",
4356 memory
, mdname(mddev
));
4358 if (mddev
->degraded
== 0)
4359 printk("raid5: raid level %d set %s active with %d out of %d"
4360 " devices, algorithm %d\n", conf
->level
, mdname(mddev
),
4361 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
4364 printk(KERN_ALERT
"raid5: raid level %d set %s active with %d"
4365 " out of %d devices, algorithm %d\n", conf
->level
,
4366 mdname(mddev
), mddev
->raid_disks
- mddev
->degraded
,
4367 mddev
->raid_disks
, conf
->algorithm
);
4369 print_raid5_conf(conf
);
4371 if (conf
->expand_progress
!= MaxSector
) {
4372 printk("...ok start reshape thread\n");
4373 conf
->expand_lo
= conf
->expand_progress
;
4374 atomic_set(&conf
->reshape_stripes
, 0);
4375 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
4376 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
4377 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
4378 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
4379 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
4383 /* read-ahead size must cover two whole stripes, which is
4384 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4387 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
4388 int stripe
= data_disks
*
4389 (mddev
->chunk_size
/ PAGE_SIZE
);
4390 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
4391 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
4394 /* Ok, everything is just fine now */
4395 if (sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
4397 "raid5: failed to create sysfs attributes for %s\n",
4400 mddev
->queue
->unplug_fn
= raid5_unplug_device
;
4401 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
4402 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
4404 mddev
->array_sectors
= mddev
->dev_sectors
*
4405 (conf
->previous_raid_disks
- conf
->max_degraded
);
4407 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
4412 print_raid5_conf(conf
);
4413 safe_put_page(conf
->spare_page
);
4415 kfree(conf
->stripe_hashtbl
);
4418 mddev
->private = NULL
;
4419 printk(KERN_ALERT
"raid5: failed to run raid set %s\n", mdname(mddev
));
4425 static int stop(mddev_t
*mddev
)
4427 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4429 md_unregister_thread(mddev
->thread
);
4430 mddev
->thread
= NULL
;
4431 shrink_stripes(conf
);
4432 kfree(conf
->stripe_hashtbl
);
4433 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
4434 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
4435 sysfs_remove_group(&mddev
->kobj
, &raid5_attrs_group
);
4438 mddev
->private = NULL
;
4443 static void print_sh(struct seq_file
*seq
, struct stripe_head
*sh
)
4447 seq_printf(seq
, "sh %llu, pd_idx %d, state %ld.\n",
4448 (unsigned long long)sh
->sector
, sh
->pd_idx
, sh
->state
);
4449 seq_printf(seq
, "sh %llu, count %d.\n",
4450 (unsigned long long)sh
->sector
, atomic_read(&sh
->count
));
4451 seq_printf(seq
, "sh %llu, ", (unsigned long long)sh
->sector
);
4452 for (i
= 0; i
< sh
->disks
; i
++) {
4453 seq_printf(seq
, "(cache%d: %p %ld) ",
4454 i
, sh
->dev
[i
].page
, sh
->dev
[i
].flags
);
4456 seq_printf(seq
, "\n");
4459 static void printall(struct seq_file
*seq
, raid5_conf_t
*conf
)
4461 struct stripe_head
*sh
;
4462 struct hlist_node
*hn
;
4465 spin_lock_irq(&conf
->device_lock
);
4466 for (i
= 0; i
< NR_HASH
; i
++) {
4467 hlist_for_each_entry(sh
, hn
, &conf
->stripe_hashtbl
[i
], hash
) {
4468 if (sh
->raid_conf
!= conf
)
4473 spin_unlock_irq(&conf
->device_lock
);
4477 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
4479 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4482 seq_printf (seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
, mddev
->chunk_size
>> 10, mddev
->layout
);
4483 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
4484 for (i
= 0; i
< conf
->raid_disks
; i
++)
4485 seq_printf (seq
, "%s",
4486 conf
->disks
[i
].rdev
&&
4487 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
4488 seq_printf (seq
, "]");
4490 seq_printf (seq
, "\n");
4491 printall(seq
, conf
);
4495 static void print_raid5_conf (raid5_conf_t
*conf
)
4498 struct disk_info
*tmp
;
4500 printk("RAID5 conf printout:\n");
4502 printk("(conf==NULL)\n");
4505 printk(" --- rd:%d wd:%d\n", conf
->raid_disks
,
4506 conf
->raid_disks
- conf
->mddev
->degraded
);
4508 for (i
= 0; i
< conf
->raid_disks
; i
++) {
4509 char b
[BDEVNAME_SIZE
];
4510 tmp
= conf
->disks
+ i
;
4512 printk(" disk %d, o:%d, dev:%s\n",
4513 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
4514 bdevname(tmp
->rdev
->bdev
,b
));
4518 static int raid5_spare_active(mddev_t
*mddev
)
4521 raid5_conf_t
*conf
= mddev
->private;
4522 struct disk_info
*tmp
;
4524 for (i
= 0; i
< conf
->raid_disks
; i
++) {
4525 tmp
= conf
->disks
+ i
;
4527 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
4528 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
4529 unsigned long flags
;
4530 spin_lock_irqsave(&conf
->device_lock
, flags
);
4532 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
4535 print_raid5_conf(conf
);
4539 static int raid5_remove_disk(mddev_t
*mddev
, int number
)
4541 raid5_conf_t
*conf
= mddev
->private;
4544 struct disk_info
*p
= conf
->disks
+ number
;
4546 print_raid5_conf(conf
);
4549 if (test_bit(In_sync
, &rdev
->flags
) ||
4550 atomic_read(&rdev
->nr_pending
)) {
4554 /* Only remove non-faulty devices if recovery
4557 if (!test_bit(Faulty
, &rdev
->flags
) &&
4558 mddev
->degraded
<= conf
->max_degraded
) {
4564 if (atomic_read(&rdev
->nr_pending
)) {
4565 /* lost the race, try later */
4572 print_raid5_conf(conf
);
4576 static int raid5_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
4578 raid5_conf_t
*conf
= mddev
->private;
4581 struct disk_info
*p
;
4583 int last
= conf
->raid_disks
- 1;
4585 if (mddev
->degraded
> conf
->max_degraded
)
4586 /* no point adding a device */
4589 if (rdev
->raid_disk
>= 0)
4590 first
= last
= rdev
->raid_disk
;
4593 * find the disk ... but prefer rdev->saved_raid_disk
4596 if (rdev
->saved_raid_disk
>= 0 &&
4597 rdev
->saved_raid_disk
>= first
&&
4598 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
4599 disk
= rdev
->saved_raid_disk
;
4602 for ( ; disk
<= last
; disk
++)
4603 if ((p
=conf
->disks
+ disk
)->rdev
== NULL
) {
4604 clear_bit(In_sync
, &rdev
->flags
);
4605 rdev
->raid_disk
= disk
;
4607 if (rdev
->saved_raid_disk
!= disk
)
4609 rcu_assign_pointer(p
->rdev
, rdev
);
4612 print_raid5_conf(conf
);
4616 static int raid5_resize(mddev_t
*mddev
, sector_t sectors
)
4618 /* no resync is happening, and there is enough space
4619 * on all devices, so we can resize.
4620 * We need to make sure resync covers any new space.
4621 * If the array is shrinking we should possibly wait until
4622 * any io in the removed space completes, but it hardly seems
4625 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4627 sectors
&= ~((sector_t
)mddev
->chunk_size
/512 - 1);
4628 mddev
->array_sectors
= sectors
* (mddev
->raid_disks
4629 - conf
->max_degraded
);
4630 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
4632 if (sectors
> mddev
->dev_sectors
&& mddev
->recovery_cp
== MaxSector
) {
4633 mddev
->recovery_cp
= mddev
->dev_sectors
;
4634 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4636 mddev
->dev_sectors
= sectors
;
4637 mddev
->resync_max_sectors
= sectors
;
4641 #ifdef CONFIG_MD_RAID5_RESHAPE
4642 static int raid5_check_reshape(mddev_t
*mddev
)
4644 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4647 if (mddev
->delta_disks
< 0 ||
4648 mddev
->new_level
!= mddev
->level
)
4649 return -EINVAL
; /* Cannot shrink array or change level yet */
4650 if (mddev
->delta_disks
== 0)
4651 return 0; /* nothing to do */
4653 /* Cannot grow a bitmap yet */
4656 /* Can only proceed if there are plenty of stripe_heads.
4657 * We need a minimum of one full stripe,, and for sensible progress
4658 * it is best to have about 4 times that.
4659 * If we require 4 times, then the default 256 4K stripe_heads will
4660 * allow for chunk sizes up to 256K, which is probably OK.
4661 * If the chunk size is greater, user-space should request more
4662 * stripe_heads first.
4664 if ((mddev
->chunk_size
/ STRIPE_SIZE
) * 4 > conf
->max_nr_stripes
||
4665 (mddev
->new_chunk
/ STRIPE_SIZE
) * 4 > conf
->max_nr_stripes
) {
4666 printk(KERN_WARNING
"raid5: reshape: not enough stripes. Needed %lu\n",
4667 (mddev
->chunk_size
/ STRIPE_SIZE
)*4);
4671 err
= resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
4675 if (mddev
->degraded
> conf
->max_degraded
)
4677 /* looks like we might be able to manage this */
4681 static int raid5_start_reshape(mddev_t
*mddev
)
4683 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4686 int added_devices
= 0;
4687 unsigned long flags
;
4689 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
4692 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4693 if (rdev
->raid_disk
< 0 &&
4694 !test_bit(Faulty
, &rdev
->flags
))
4697 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
4698 /* Not enough devices even to make a degraded array
4703 atomic_set(&conf
->reshape_stripes
, 0);
4704 spin_lock_irq(&conf
->device_lock
);
4705 conf
->previous_raid_disks
= conf
->raid_disks
;
4706 conf
->raid_disks
+= mddev
->delta_disks
;
4707 conf
->expand_progress
= 0;
4708 conf
->expand_lo
= 0;
4709 spin_unlock_irq(&conf
->device_lock
);
4711 /* Add some new drives, as many as will fit.
4712 * We know there are enough to make the newly sized array work.
4714 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4715 if (rdev
->raid_disk
< 0 &&
4716 !test_bit(Faulty
, &rdev
->flags
)) {
4717 if (raid5_add_disk(mddev
, rdev
) == 0) {
4719 set_bit(In_sync
, &rdev
->flags
);
4721 rdev
->recovery_offset
= 0;
4722 sprintf(nm
, "rd%d", rdev
->raid_disk
);
4723 if (sysfs_create_link(&mddev
->kobj
,
4726 "raid5: failed to create "
4727 " link %s for %s\n",
4733 spin_lock_irqsave(&conf
->device_lock
, flags
);
4734 mddev
->degraded
= (conf
->raid_disks
- conf
->previous_raid_disks
) - added_devices
;
4735 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
4736 mddev
->raid_disks
= conf
->raid_disks
;
4737 mddev
->reshape_position
= 0;
4738 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4740 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
4741 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
4742 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
4743 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
4744 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
4746 if (!mddev
->sync_thread
) {
4747 mddev
->recovery
= 0;
4748 spin_lock_irq(&conf
->device_lock
);
4749 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
4750 conf
->expand_progress
= MaxSector
;
4751 spin_unlock_irq(&conf
->device_lock
);
4754 md_wakeup_thread(mddev
->sync_thread
);
4755 md_new_event(mddev
);
4760 static void end_reshape(raid5_conf_t
*conf
)
4762 struct block_device
*bdev
;
4764 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
4765 conf
->mddev
->array_sectors
= conf
->mddev
->dev_sectors
*
4766 (conf
->raid_disks
- conf
->max_degraded
);
4767 set_capacity(conf
->mddev
->gendisk
, conf
->mddev
->array_sectors
);
4768 conf
->mddev
->changed
= 1;
4770 bdev
= bdget_disk(conf
->mddev
->gendisk
, 0);
4772 mutex_lock(&bdev
->bd_inode
->i_mutex
);
4773 i_size_write(bdev
->bd_inode
,
4774 (loff_t
)conf
->mddev
->array_sectors
<< 9);
4775 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
4778 spin_lock_irq(&conf
->device_lock
);
4779 conf
->expand_progress
= MaxSector
;
4780 spin_unlock_irq(&conf
->device_lock
);
4781 conf
->mddev
->reshape_position
= MaxSector
;
4783 /* read-ahead size must cover two whole stripes, which is
4784 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4787 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
4788 int stripe
= data_disks
*
4789 (conf
->mddev
->chunk_size
/ PAGE_SIZE
);
4790 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
4791 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
4796 static void raid5_quiesce(mddev_t
*mddev
, int state
)
4798 raid5_conf_t
*conf
= mddev_to_conf(mddev
);
4801 case 2: /* resume for a suspend */
4802 wake_up(&conf
->wait_for_overlap
);
4805 case 1: /* stop all writes */
4806 spin_lock_irq(&conf
->device_lock
);
4808 wait_event_lock_irq(conf
->wait_for_stripe
,
4809 atomic_read(&conf
->active_stripes
) == 0 &&
4810 atomic_read(&conf
->active_aligned_reads
) == 0,
4811 conf
->device_lock
, /* nothing */);
4812 spin_unlock_irq(&conf
->device_lock
);
4815 case 0: /* re-enable writes */
4816 spin_lock_irq(&conf
->device_lock
);
4818 wake_up(&conf
->wait_for_stripe
);
4819 wake_up(&conf
->wait_for_overlap
);
4820 spin_unlock_irq(&conf
->device_lock
);
4825 static struct mdk_personality raid6_personality
=
4829 .owner
= THIS_MODULE
,
4830 .make_request
= make_request
,
4834 .error_handler
= error
,
4835 .hot_add_disk
= raid5_add_disk
,
4836 .hot_remove_disk
= raid5_remove_disk
,
4837 .spare_active
= raid5_spare_active
,
4838 .sync_request
= sync_request
,
4839 .resize
= raid5_resize
,
4840 #ifdef CONFIG_MD_RAID5_RESHAPE
4841 .check_reshape
= raid5_check_reshape
,
4842 .start_reshape
= raid5_start_reshape
,
4844 .quiesce
= raid5_quiesce
,
4846 static struct mdk_personality raid5_personality
=
4850 .owner
= THIS_MODULE
,
4851 .make_request
= make_request
,
4855 .error_handler
= error
,
4856 .hot_add_disk
= raid5_add_disk
,
4857 .hot_remove_disk
= raid5_remove_disk
,
4858 .spare_active
= raid5_spare_active
,
4859 .sync_request
= sync_request
,
4860 .resize
= raid5_resize
,
4861 #ifdef CONFIG_MD_RAID5_RESHAPE
4862 .check_reshape
= raid5_check_reshape
,
4863 .start_reshape
= raid5_start_reshape
,
4865 .quiesce
= raid5_quiesce
,
4868 static struct mdk_personality raid4_personality
=
4872 .owner
= THIS_MODULE
,
4873 .make_request
= make_request
,
4877 .error_handler
= error
,
4878 .hot_add_disk
= raid5_add_disk
,
4879 .hot_remove_disk
= raid5_remove_disk
,
4880 .spare_active
= raid5_spare_active
,
4881 .sync_request
= sync_request
,
4882 .resize
= raid5_resize
,
4883 #ifdef CONFIG_MD_RAID5_RESHAPE
4884 .check_reshape
= raid5_check_reshape
,
4885 .start_reshape
= raid5_start_reshape
,
4887 .quiesce
= raid5_quiesce
,
4890 static int __init
raid5_init(void)
4894 e
= raid6_select_algo();
4897 register_md_personality(&raid6_personality
);
4898 register_md_personality(&raid5_personality
);
4899 register_md_personality(&raid4_personality
);
4903 static void raid5_exit(void)
4905 unregister_md_personality(&raid6_personality
);
4906 unregister_md_personality(&raid5_personality
);
4907 unregister_md_personality(&raid4_personality
);
4910 module_init(raid5_init
);
4911 module_exit(raid5_exit
);
4912 MODULE_LICENSE("GPL");
4913 MODULE_ALIAS("md-personality-4"); /* RAID5 */
4914 MODULE_ALIAS("md-raid5");
4915 MODULE_ALIAS("md-raid4");
4916 MODULE_ALIAS("md-level-5");
4917 MODULE_ALIAS("md-level-4");
4918 MODULE_ALIAS("md-personality-8"); /* RAID6 */
4919 MODULE_ALIAS("md-raid6");
4920 MODULE_ALIAS("md-level-6");
4922 /* This used to be two separate modules, they were: */
4923 MODULE_ALIAS("raid5");
4924 MODULE_ALIAS("raid6");